func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
dnp3_al_process_object(tvbuff_t *tvb, packet_info *pinfo, int offset,
proto_tree *robj_tree, gboolean header_only,
guint16 *al_objtype, nstime_t *al_cto)
{
guint8 al_objq, al_objq_prefix, al_objq_range, al_oct_len = 0, bitindex;
guint16 al_obj, temp;
guint32 al_ptaddr = 0;
int num_items = 0;
int orig_offset, rangebytes = 0;
proto_item *object_item, *range_item;
proto_tree *object_tree, *qualifier_tree, *range_tree;
orig_offset = offset;
/* Application Layer Objects in this Message */
*al_objtype =
al_obj = tvb_get_ntohs(tvb, offset);
/* Special handling for Octet string objects as the variation is the length of the string */
temp = al_obj & 0xFF00;
if ((temp == AL_OBJ_OCT) || (temp == AL_OBJ_OCT_EVT )) {
al_oct_len = al_obj & 0xFF;
al_obj = temp;
}
/* Create Data Objects Detail Tree */
if (AL_OBJ_GROUP(al_obj) == 0x0) {
object_item = proto_tree_add_uint_format(robj_tree, hf_dnp3_al_obj, tvb, offset, 2, al_obj,
"Object(s): %s (0x%04x)",
val_to_str_ext_const(al_obj, &dnp3_al_obj_vals_ext, "Unknown group 0 Variation"),
al_obj);
if (try_val_to_str_ext(al_obj, &dnp3_al_obj_vals_ext) == NULL) {
expert_add_info(pinfo, object_item, &ei_dnp3_unknown_group0_variation);
}
}
else {
object_item = proto_tree_add_uint_format(robj_tree, hf_dnp3_al_obj, tvb, offset, 2, al_obj,
"Object(s): %s (0x%04x)",
val_to_str_ext_const(al_obj, &dnp3_al_obj_vals_ext, "Unknown Object\\Variation"),
al_obj);
if (try_val_to_str_ext(al_obj, &dnp3_al_obj_vals_ext) == NULL) {
expert_add_info(pinfo, object_item, &ei_dnp3_unknown_object);
}
}
object_tree = proto_item_add_subtree(object_item, ett_dnp3_al_obj);
offset += 2;
/* Object Qualifier */
al_objq = tvb_get_guint8(tvb, offset);
al_objq_prefix = al_objq & AL_OBJQ_PREFIX;
al_objq_prefix = al_objq_prefix >> 4;
al_objq_range = al_objq & AL_OBJQ_RANGE;
qualifier_tree = proto_tree_add_subtree_format(object_tree, tvb, offset, 1, ett_dnp3_al_obj_qualifier, NULL,
"Qualifier Field, Prefix: %s, Range: %s",
val_to_str_ext_const(al_objq_prefix, &dnp3_al_objq_prefix_vals_ext, "Unknown Prefix Type"),
val_to_str_ext_const(al_objq_range, &dnp3_al_objq_range_vals_ext, "Unknown Range Type"));
proto_tree_add_item(qualifier_tree, hf_dnp3_al_objq_prefix, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(qualifier_tree, hf_dnp3_al_objq_range, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
/* Create (possibly synthesized) number of items and range field tree */
range_tree = proto_tree_add_subtree(object_tree, tvb, offset, 0, ett_dnp3_al_obj_range, &range_item, "Number of Items: ");
switch (al_objq_range)
{
case AL_OBJQL_RANGE_SSI8: /* 8-bit Start and Stop Indices in Range Field */
num_items = ( tvb_get_guint8(tvb, offset+1) - tvb_get_guint8(tvb, offset) + 1);
proto_item_set_generated(range_item);
al_ptaddr = tvb_get_guint8(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_start8, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(range_tree, hf_dnp3_al_range_stop8, tvb, offset + 1, 1, ENC_LITTLE_ENDIAN);
rangebytes = 2;
break;
case AL_OBJQL_RANGE_SSI16: /* 16-bit Start and Stop Indices in Range Field */
num_items = ( tvb_get_letohs(tvb, offset+2) - tvb_get_letohs(tvb, (offset)) + 1);
proto_item_set_generated(range_item);
al_ptaddr = tvb_get_letohs(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_start16, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(range_tree, hf_dnp3_al_range_stop16, tvb, offset + 2, 2, ENC_LITTLE_ENDIAN);
rangebytes = 4;
break;
case AL_OBJQL_RANGE_SSI32: /* 32-bit Start and Stop Indices in Range Field */
num_items = ( tvb_get_letohl(tvb, offset+4) - tvb_get_letohl(tvb, offset) + 1);
proto_item_set_generated(range_item);
al_ptaddr = tvb_get_letohl(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_start32, tvb, offset, 4, ENC_LITTLE_ENDIAN);
proto_tree_add_item(range_tree, hf_dnp3_al_range_stop32, tvb, offset + 4, 4, ENC_LITTLE_ENDIAN);
rangebytes = 8;
break;
case AL_OBJQL_RANGE_AA8: /* 8-bit Absolute Address in Range Field */
num_items = 1;
proto_item_set_generated(range_item);
al_ptaddr = tvb_get_guint8(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_abs8, tvb, offset, 1, ENC_LITTLE_ENDIAN);
rangebytes = 1;
break;
case AL_OBJQL_RANGE_AA16: /* 16-bit Absolute Address in Range Field */
num_items = 1;
proto_item_set_generated(range_item);
al_ptaddr = tvb_get_letohs(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_abs16, tvb, offset, 2, ENC_LITTLE_ENDIAN);
rangebytes = 2;
break;
case AL_OBJQL_RANGE_AA32: /* 32-bit Absolute Address in Range Field */
num_items = 1;
proto_item_set_generated(range_item);
al_ptaddr = tvb_get_letohl(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_abs32, tvb, offset, 4, ENC_LITTLE_ENDIAN);
rangebytes = 4;
break;
case AL_OBJQL_RANGE_SF8: /* 8-bit Single Field Quantity in Range Field */
num_items = tvb_get_guint8(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_quant8, tvb, offset, 1, ENC_LITTLE_ENDIAN);
rangebytes = 1;
proto_item_set_len(range_item, rangebytes);
break;
case AL_OBJQL_RANGE_SF16: /* 16-bit Single Field Quantity in Range Field */
num_items = tvb_get_letohs(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_quant16, tvb, offset, 2, ENC_LITTLE_ENDIAN);
rangebytes = 2;
proto_item_set_len(range_item, rangebytes);
break;
case AL_OBJQL_RANGE_SF32: /* 32-bit Single Field Quantity in Range Field */
num_items = tvb_get_letohl(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_quant32, tvb, offset, 4, ENC_LITTLE_ENDIAN);
rangebytes = 4;
proto_item_set_len(range_item, rangebytes);
break;
case AL_OBJQL_RANGE_FF: /* 8 bit object count in Range Field */
num_items = tvb_get_guint8(tvb, offset);
proto_tree_add_item(range_tree, hf_dnp3_al_range_quant8, tvb, offset, 1, ENC_LITTLE_ENDIAN);
rangebytes = 1;
proto_item_set_len(range_item, rangebytes);
}
if (num_items > 0) {
proto_item_append_text(object_item, ", %d point%s", num_items, plurality(num_items, "", "s"));
}
proto_item_append_text(range_item, "%d", num_items);
/* A negative number of items is an error */
if (num_items < 0) {
proto_item_append_text(range_item, " (bogus)");
expert_add_info(pinfo, range_item, &ei_dnp_num_items_neg);
return tvb_captured_length(tvb);
}
/* Move offset past any range field */
offset += rangebytes;
bitindex = 0; /* Temp variable for cycling through points when object values are encoded into
bits; primarily objects 0x0101, 0x0301 & 0x1001 */
/* Only process the point information for replies or items with point index lists */
if (!header_only || al_objq_prefix > 0) {
int item_num;
int start_offset;
start_offset = offset;
for (item_num = 0; item_num < num_items; item_num++)
{
proto_item *point_item;
proto_tree *point_tree;
guint data_pos;
int prefixbytes;
/* Create Point item and process prefix */
if (al_objq_prefix <= AL_OBJQL_PREFIX_4O) {
point_tree = proto_tree_add_subtree(object_tree, tvb, offset, -1, ett_dnp3_al_obj_point, &point_item, "Point Number");
}
else {
point_tree = proto_tree_add_subtree(object_tree, tvb, offset, -1, ett_dnp3_al_obj_point, &point_item, "Object: Size");
}
data_pos = offset;
prefixbytes = dnp3_al_obj_procprefix(tvb, offset, al_objq_prefix, &al_ptaddr, point_tree);
/* If this is an 'empty' object type and the num_items field is not equal to zero,
then the packet is potentially malicious */
if ((num_items != 0) && (dnp3_al_empty_obj(al_obj))) {
proto_item_append_text(range_item, " (bogus)");
expert_add_info(pinfo, range_item, &ei_dnp3_num_items_invalid);
num_items = 0;
}
proto_item_append_text(point_item, " %u", al_ptaddr);
proto_item_set_len(point_item, prefixbytes);
data_pos += prefixbytes;
if (!header_only || (AL_OBJQL_PREFIX_1OS <= al_objq_prefix && al_objq_prefix <= AL_OBJQL_PREFIX_4OS)) {
/* Process the object values */
guint8 al_2bit, al_ptflags, al_bi_val, al_tcc_code;
gint16 al_val_int16;
guint16 al_val_uint16, al_ctlobj_stat;
guint16 al_relms, al_filename_len, al_file_ctrl_mode;
gint32 al_val_int32;
guint32 al_val_uint32, file_data_size;
nstime_t al_reltime, al_abstime;
gboolean al_bit;
gfloat al_valflt;
gdouble al_valdbl;
const gchar *ctl_status_str;
/* Device Attributes (g0) all have a type code, use that rather than the individual variation */
if (AL_OBJ_GROUP(al_obj) == 0x0) {
guint32 data_type;
guint8 da_len;
/* Add and retrieve the data type */
proto_tree_add_item_ret_uint(point_tree, hf_dnp3_al_datatype, tvb, data_pos, 1, ENC_LITTLE_ENDIAN, &data_type);
data_pos++;
/* If a valid data type process it */
if (try_val_to_str(data_type, dnp3_al_data_type_vals) != NULL) {
switch(data_type) {
case AL_DATA_TYPE_NONE:
break;
case AL_DATA_TYPE_VSTR:
da_len = tvb_get_guint8(tvb, data_pos);
proto_tree_add_item(point_tree, hf_dnp3_al_da_length, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos++;
const guint8* da_value;
proto_tree_add_item_ret_string(point_tree, hf_dnp3_al_da_value, tvb, data_pos, da_len, ENC_ASCII|ENC_NA, wmem_packet_scope(), &da_value);
proto_item_append_text(object_item, ", Value: %s", da_value);
data_pos += da_len;
break;
case AL_DATA_TYPE_UINT:
da_len = tvb_get_guint8(tvb, data_pos);
proto_tree_add_item(point_tree, hf_dnp3_al_da_length, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos++;
if (da_len == 1) {
proto_tree_add_item(point_tree, hf_dnp3_al_da_uint8, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
proto_item_append_text(object_item, ", Value: %u", tvb_get_guint8(tvb, data_pos));
data_pos++;
}
else if (da_len == 2) {
proto_tree_add_item(point_tree, hf_dnp3_al_da_uint16, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
proto_item_append_text(object_item, ", Value: %u", tvb_get_letohs(tvb, data_pos));
data_pos += 2;
}
else if (da_len == 4) {
proto_tree_add_item(point_tree, hf_dnp3_al_da_uint32, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
proto_item_append_text(object_item, ", Value: %u", tvb_get_letohl(tvb, data_pos));
data_pos += 4;
}
break;
case AL_DATA_TYPE_INT:
da_len = tvb_get_guint8(tvb, data_pos);
proto_tree_add_item(point_tree, hf_dnp3_al_da_length, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos++;
if (da_len == 1) {
proto_tree_add_item(point_tree, hf_dnp3_al_da_int8, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
proto_item_append_text(object_item, ", Value: %d", tvb_get_guint8(tvb, data_pos));
data_pos++;
}
else if (da_len == 2) {
proto_tree_add_item(point_tree, hf_dnp3_al_da_int16, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
proto_item_append_text(object_item, ", Value: %d", tvb_get_letohs(tvb, data_pos));
data_pos += 2;
}
else if (da_len == 4) {
proto_tree_add_item(point_tree, hf_dnp3_al_da_int32, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
proto_item_append_text(object_item, ", Value: %d", tvb_get_letohl(tvb, data_pos));
data_pos += 4;
}
break;
case AL_DATA_TYPE_FLT:
da_len = tvb_get_guint8(tvb, data_pos);
proto_tree_add_item(point_tree, hf_dnp3_al_da_length, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos++;
if (da_len == 4) {
proto_tree_add_item(point_tree, hf_dnp3_al_da_flt, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
proto_item_append_text(object_item, ", Value: %g", tvb_get_letohieee_float(tvb, data_pos));
data_pos += 4;
}
else if (da_len == 8) {
proto_tree_add_item(point_tree, hf_dnp3_al_da_dbl, tvb, data_pos, 8, ENC_LITTLE_ENDIAN);
proto_item_append_text(object_item, ", Value: %g", tvb_get_letohieee_double(tvb, data_pos));
data_pos += 8;
}
break;
case AL_DATA_TYPE_OSTR:
break;
case AL_DATA_TYPE_BSTR:
break;
case AL_DATA_TYPE_TIME:
break;
case AL_DATA_TYPE_UNCD:
break;
case AL_DATA_TYPE_U8BS8LIST:
break;
case AL_DATA_TYPE_U8BS8EXLIST:
break;
}
}
offset = data_pos;
}
else {
/* All other objects are handled here, by their variations */
switch (al_obj)
{
/* There is nothing to handle for the default variations */
case AL_OBJ_BI_ALL: /* Binary Input Default Variation (Obj:01, Var:Default) */
case AL_OBJ_BIC_ALL: /* Binary Input Change Default Variation (Obj:02, Var:Default) */
case AL_OBJ_BOC_ALL: /* Binary Output Event Default Variation (Obj:11, Var:Default) */
case AL_OBJ_2BI_ALL: /* Double-bit Input Default Variation (Obj:03, Var:Default) */
case AL_OBJ_2BIC_ALL: /* Double-bit Input Change Default Variation (Obj:04, Var:Default) */
case AL_OBJ_CTR_ALL: /* Binary Counter Default Variation (Obj:20, Var:Default) */
case AL_OBJ_CTRC_ALL: /* Binary Counter Change Default Variation (Obj:22 Var:Default) */
case AL_OBJ_AI_ALL: /* Analog Input Default Variation (Obj:30, Var:Default) */
case AL_OBJ_AIC_ALL: /* Analog Input Change Default Variation (Obj:32 Var:Default) */
case AL_OBJ_AIDB_ALL: /* Analog Input Deadband Default Variation (Obj:34, Var:Default) */
case AL_OBJ_AOC_ALL: /* Analog Output Event Default Variation (Obj:42 Var:Default) */
offset = data_pos;
break;
/* Bit-based Data objects here */
case AL_OBJ_BI_1BIT: /* Single-Bit Binary Input (Obj:01, Var:01) */
case AL_OBJ_BO: /* Binary Output (Obj:10, Var:01) */
case AL_OBJ_CTL_PMASK: /* Pattern Mask (Obj:12, Var:03) */
case AL_OBJ_IIN: /* Internal Indications - IIN (Obj: 80, Var:01) */
/* Extract the bit from the packed byte */
al_bi_val = tvb_get_guint8(tvb, data_pos);
al_bit = (al_bi_val & 1) > 0;
if (al_obj == AL_OBJ_IIN) {
/* For an IIN bit, work out the IIN constant value for the bit position to get the name of the bit */
guint16 iin_bit = 0;
if (al_ptaddr < 8) {
iin_bit = 0x100 << al_ptaddr;
}
else {
iin_bit = 1 << (al_ptaddr - 8);
}
proto_item_append_text(point_item, " (%s), Value: %u",
val_to_str_const(iin_bit, dnp3_al_iin_vals, "Invalid IIN bit"), al_bit);
}
else
{
if (al_objq_prefix != AL_OBJQL_PREFIX_NI) {
/* Each item has an index prefix, in this case bump
the bitindex to force the correct offset adjustment */
bitindex = 7;
}
else {
/* Regular packed bits, get the value at the appropriate bit index */
al_bit = (al_bi_val & (1 << bitindex)) > 0;
}
proto_item_append_text(point_item, ", Value: %u", al_bit);
}
proto_tree_add_boolean(point_tree, hf_dnp3_al_bit, tvb, data_pos, 1, al_bit);
proto_item_set_len(point_item, prefixbytes + 1);
/* Increment the bit index for next cycle */
bitindex++;
/* If we have counted 8 bits or read the last item,
reset bit index and move onto the next byte */
if ((bitindex > 7) || (item_num == (num_items-1)))
{
bitindex = 0;
offset += (prefixbytes + 1);
}
break;
case AL_OBJ_2BI_NF: /* Double-bit Input No Flags (Obj:03, Var:01) */
if (bitindex > 3)
{
bitindex = 0;
offset += (prefixbytes + 1);
}
/* Extract the Double-bit from the packed byte */
al_bi_val = tvb_get_guint8(tvb, offset);
al_2bit = ((al_bi_val >> (bitindex << 1)) & 3);
proto_item_append_text(point_item, ", Value: %u", al_2bit);
proto_tree_add_uint(point_tree, hf_dnp3_al_2bit, tvb, offset, 1, al_2bit);
proto_item_set_len(point_item, prefixbytes + 1);
/* If we've read the last item, then move the offset past this byte */
if (item_num == (num_items-1))
{
offset += (prefixbytes + 1);
}
/* Increment the bit index for next cycle */
bitindex++;
break;
case AL_OBJ_BI_STAT: /* Binary Input With Status (Obj:01, Var:02) */
case AL_OBJ_BIC_NOTIME: /* Binary Input Change Without Time (Obj:02, Var:01) */
case AL_OBJ_BO_STAT: /* Binary Output Status (Obj:10, Var:02) */
case AL_OBJ_BOC_NOTIME: /* Binary Output Change Without Time (Obj:11, Var:01) */
/* Get Point Flags */
al_ptflags = tvb_get_guint8(tvb, data_pos);
switch (al_obj) {
case AL_OBJ_BI_STAT:
case AL_OBJ_BIC_NOTIME:
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, BIN_IN);
break;
case AL_OBJ_BO_STAT:
case AL_OBJ_BOC_NOTIME:
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, BIN_OUT);
break;
}
data_pos += 1;
al_bit = (al_ptflags & AL_OBJ_BI_FLAG7) > 0;
proto_item_append_text(point_item, ", Value: %u", al_bit);
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_2BI_STAT: /* Double-bit Input With Status (Obj:03, Var:02) */
case AL_OBJ_2BIC_NOTIME: /* Double-bit Input Change Without Time (Obj:04, Var:01) */
/* Get Point Flags */
al_ptflags = tvb_get_guint8(tvb, data_pos);
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, BIN_IN);
data_pos += 1;
al_2bit = (al_ptflags >> 6) & 3;
proto_item_append_text(point_item, ", Value: %u", al_2bit);
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_BIC_TIME: /* Binary Input Change w/ Time (Obj:02, Var:02) */
case AL_OBJ_BOC_TIME: /* Binary Output Change w/ Time (Obj:11, Var:02) */
/* Get Point Flags */
al_ptflags = tvb_get_guint8(tvb, data_pos);
switch (al_obj) {
case AL_OBJ_BIC_TIME:
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, BIN_IN);
break;
case AL_OBJ_BOC_TIME:
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, BIN_OUT);
break;
}
data_pos += 1;
/* Get timestamp */
dnp3_al_get_timestamp(&al_abstime, tvb, data_pos);
proto_tree_add_time(point_tree, hf_dnp3_al_timestamp, tvb, data_pos, 6, &al_abstime);
data_pos += 6;
al_bit = (al_ptflags & AL_OBJ_BI_FLAG7) >> 7; /* bit shift 1xxxxxxx -> xxxxxxx1 */
proto_item_append_text(point_item, ", Value: %u, Timestamp: %s",
al_bit, abs_time_to_str(wmem_packet_scope(), &al_abstime, ABSOLUTE_TIME_UTC, FALSE));
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_2BIC_TIME: /* Double-bit Input Change w/ Time (Obj:04, Var:02) */
/* Get Point Flags */
al_ptflags = tvb_get_guint8(tvb, data_pos);
dnp3_al_obj_quality(tvb, (offset+prefixbytes), al_ptflags, point_tree, point_item, BIN_IN);
data_pos += 1;
/* Get timestamp */
dnp3_al_get_timestamp(&al_abstime, tvb, data_pos);
proto_tree_add_time(point_tree, hf_dnp3_al_timestamp, tvb, data_pos, 6, &al_abstime);
data_pos += 6;
al_2bit = (al_ptflags >> 6) & 3; /* bit shift 11xxxxxx -> 00000011 */
proto_item_append_text(point_item, ", Value: %u, Timestamp: %s",
al_2bit, abs_time_to_str(wmem_packet_scope(), &al_abstime, ABSOLUTE_TIME_UTC, FALSE));
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_BIC_RTIME: /* Binary Input Change w/ Relative Time (Obj:02, Var:03) */
/* Get Point Flags */
al_ptflags = tvb_get_guint8(tvb, data_pos);
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, BIN_IN);
data_pos += 1;
/* Get relative time in ms, and convert to ns_time */
al_relms = tvb_get_letohs(tvb, data_pos);
al_reltime.secs = al_relms / 1000;
al_reltime.nsecs = (al_relms % 1000) * 1000000;
/* Now add to CTO time */
nstime_sum(&al_abstime, al_cto, &al_reltime);
proto_tree_add_time(point_tree, hf_dnp3_al_rel_timestamp, tvb, data_pos, 2, &al_reltime);
data_pos += 2;
al_bit = (al_ptflags & AL_OBJ_BI_FLAG7) >> 7; /* bit shift 1xxxxxxx -> xxxxxxx1 */
proto_item_append_text(point_item, ", Value: %u, Timestamp: %s",
al_bit, abs_time_to_str(wmem_packet_scope(), &al_abstime, ABSOLUTE_TIME_UTC, FALSE));
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_CTLOP_BLK: /* Control Relay Output Block (Obj:12, Var:01) */
case AL_OBJ_CTL_PCB: /* Pattern Control Block (Obj:12, Var:02) */
{
proto_tree *tcc_tree;
/* Add a expand/collapse for TCC */
al_tcc_code = tvb_get_guint8(tvb, data_pos);
tcc_tree = proto_tree_add_subtree_format(point_tree, tvb, data_pos, 1,
ett_dnp3_al_obj_point_tcc, NULL, "Control Code [0x%02x]",al_tcc_code);
/* Add the Control Code to the Point number list for quick visual reference as to the operation */
proto_item_append_text(point_item, " [%s]", val_to_str_const((al_tcc_code & AL_OBJCTLC_CODE),
dnp3_al_ctlc_code_vals,
"Invalid Operation"));
/* Add Trip/Close qualifier (if applicable) to previously appended quick visual reference */
proto_item_append_text(point_item, " [%s]", val_to_str_const((al_tcc_code & AL_OBJCTLC_TC) >> 6,
dnp3_al_ctlc_tc_vals,
"Invalid Qualifier"));
/* Control Code 'Operation Type' */
proto_tree_add_item(tcc_tree, hf_dnp3_ctlobj_code_c, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
/* Control Code Misc Values */
proto_tree_add_item(tcc_tree, hf_dnp3_ctlobj_code_m, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
/* Control Code 'Trip Close Code' */
proto_tree_add_item(tcc_tree, hf_dnp3_ctlobj_code_tc, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos += 1;
/* Get "Count" Field */
proto_tree_add_item(point_tree, hf_dnp3_al_count, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos += 1;
/* Get "On Time" Field */
proto_tree_add_item(point_tree, hf_dnp3_al_on_time, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* Get "Off Time" Field */
proto_tree_add_item(point_tree, hf_dnp3_al_off_time, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* Get "Control Status" Field */
proto_tree_add_item(point_tree, hf_dnp3_al_ctrlstatus, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos += 1;
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
}
case AL_OBJ_BOE_NOTIME: /* Binary Command Event (Obj:13, Var:01) */
case AL_OBJ_BOE_TIME: /* Binary Command Event with time (Obj:13, Var:02) */
case AL_OBJ_AOC_32EVNT: /* 32-bit Analog Command Event (Obj:43, Var:01) */
case AL_OBJ_AOC_16EVNT: /* 16-bit Analog Command Event (Obj:43, Var:02) */
case AL_OBJ_AOC_32EVTT: /* 32-bit Analog Command Event with time (Obj:43, Var:03) */
case AL_OBJ_AOC_16EVTT: /* 16-bit Analog Command Event with time (Obj:43, Var:04) */
case AL_OBJ_AOC_FLTEVNT: /* 32-bit Floating Point Analog Command Event (Obj:43, Var:05) */
case AL_OBJ_AOC_DBLEVNT: /* 64-bit Floating Point Analog Command Event (Obj:43, Var:06) */
case AL_OBJ_AOC_FLTEVTT: /* 32-bit Floating Point Analog Command Event with time (Obj:43, Var:07) */
case AL_OBJ_AOC_DBLEVTT: /* 64-bit Floating Point Analog Command Event with time (Obj:43, Var:08) */
{
/* Get the status code */
al_ctlobj_stat = tvb_get_guint8(tvb, data_pos) & AL_OBJCTL_STATUS_MASK;
ctl_status_str = val_to_str_ext(al_ctlobj_stat, &dnp3_al_ctl_status_vals_ext, "Invalid Status (0x%02x)");
proto_item_append_text(point_item, " [Status: %s (0x%02x)]", ctl_status_str, al_ctlobj_stat);
proto_tree_add_item(point_tree, hf_dnp3_al_ctrlstatus, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
/* Get the command value */
switch(al_obj)
{
case AL_OBJ_BOE_NOTIME:
case AL_OBJ_BOE_TIME:
proto_tree_add_item(point_tree, hf_dnp3_bocs_bit, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos += 1;
break;
case AL_OBJ_AOC_32EVNT:
case AL_OBJ_AOC_32EVTT:
data_pos += 1; /* Step past status */
al_val_int32 = tvb_get_letohl(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %d", al_val_int32);
proto_tree_add_item(point_tree, hf_dnp3_al_anaout32, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
break;
case AL_OBJ_AOC_16EVNT:
case AL_OBJ_AOC_16EVTT:
data_pos += 1; /* Step past status */
al_val_int16 = tvb_get_letohs(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %d", al_val_int16);
proto_tree_add_item(point_tree, hf_dnp3_al_anaout16, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
break;
case AL_OBJ_AOC_FLTEVNT:
case AL_OBJ_AOC_FLTEVTT:
data_pos += 1; /* Step past status */
al_valflt = tvb_get_letohieee_float(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %g", al_valflt);
proto_tree_add_item(point_tree, hf_dnp3_al_anaoutflt, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
break;
case AL_OBJ_AOC_DBLEVNT:
case AL_OBJ_AOC_DBLEVTT:
data_pos += 1; /* Step past status */
al_valdbl = tvb_get_letohieee_double(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %g", al_valdbl);
proto_tree_add_item(point_tree, hf_dnp3_al_anaoutdbl, tvb, data_pos, 8, ENC_LITTLE_ENDIAN);
data_pos += 8;
break;
}
/* Get the timestamp */
switch(al_obj)
{
case AL_OBJ_BOE_TIME: /* Binary Command Event with time (Obj:13, Var:02) */
case AL_OBJ_AOC_32EVTT: /* 32-bit Analog Command Event with time (Obj:43, Var:03) */
case AL_OBJ_AOC_16EVTT: /* 16-bit Analog Command Event with time (Obj:43, Var:04) */
case AL_OBJ_AOC_FLTEVTT: /* 32-bit Floating Point Analog Command Event with time (Obj:43, Var:07) */
case AL_OBJ_AOC_DBLEVTT: /* 64-bit Floating Point Analog Command Event with time (Obj:43, Var:08) */
dnp3_al_get_timestamp(&al_abstime, tvb, data_pos);
proto_item_append_text(point_item, ", Timestamp: %s", abs_time_to_str(wmem_packet_scope(), &al_abstime, ABSOLUTE_TIME_UTC, FALSE));
proto_tree_add_time(point_tree, hf_dnp3_al_timestamp, tvb, data_pos, 6, &al_abstime);
data_pos += 6;
break;
}
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
}
case AL_OBJ_AO_32OPB: /* 32-Bit Analog Output Block (Obj:41, Var:01) */
case AL_OBJ_AO_16OPB: /* 16-Bit Analog Output Block (Obj:41, Var:02) */
case AL_OBJ_AO_FLTOPB: /* 32-Bit Floating Point Output Block (Obj:41, Var:03) */
case AL_OBJ_AO_DBLOPB: /* 64-Bit Floating Point Output Block (Obj:41, Var:04) */
switch (al_obj)
{
case AL_OBJ_AO_32OPB:
al_val_int32 = tvb_get_letohl(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %d", al_val_int32);
proto_tree_add_item(point_tree, hf_dnp3_al_anaout32, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
break;
case AL_OBJ_AO_16OPB:
al_val_int16 = tvb_get_letohs(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %d", al_val_int16);
proto_tree_add_item(point_tree, hf_dnp3_al_anaout16, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
break;
case AL_OBJ_AO_FLTOPB:
al_valflt = tvb_get_letohieee_float(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %g", al_valflt);
proto_tree_add_item(point_tree, hf_dnp3_al_anaoutflt, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
break;
case AL_OBJ_AO_DBLOPB:
al_valdbl = tvb_get_letohieee_double(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %g", al_valdbl);
proto_tree_add_item(point_tree, hf_dnp3_al_anaoutdbl, tvb, data_pos, 8, ENC_LITTLE_ENDIAN);
data_pos += 8;
break;
}
/* Get control status */
al_ctlobj_stat = tvb_get_guint8(tvb, data_pos) & AL_OBJCTL_STATUS_MASK;
ctl_status_str = val_to_str_ext(al_ctlobj_stat, &dnp3_al_ctl_status_vals_ext, "Invalid Status (0x%02x)");
proto_item_append_text(point_item, " [Status: %s (0x%02x)]", ctl_status_str, al_ctlobj_stat);
proto_tree_add_item(point_tree, hf_dnp3_al_ctrlstatus, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos += 1;
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_CTR_32: /* 32-Bit Binary Counter (Obj:20, Var:01) */
case AL_OBJ_CTR_16: /* 16-Bit Binary Counter (Obj:20, Var:02) */
case AL_OBJ_DCTR_32: /* 32-Bit Binary Delta Counter (Obj:20, Var:03) */
case AL_OBJ_DCTR_16: /* 16-Bit Binary Delta Counter (Obj:20, Var:04) */
case AL_OBJ_CTR_32NF: /* 32-Bit Binary Counter Without Flag (Obj:20, Var:05) */
case AL_OBJ_CTR_16NF: /* 16-Bit Binary Counter Without Flag (Obj:20, Var:06) */
case AL_OBJ_DCTR_32NF: /* 32-Bit Binary Delta Counter Without Flag (Obj:20, Var:07) */
case AL_OBJ_DCTR_16NF: /* 16-Bit Binary Delta Counter Without Flag (Obj:20, Var:08) */
case AL_OBJ_FCTR_32: /* 32-Bit Frozen Counter (Obj:21, Var:01) */
case AL_OBJ_FCTR_16: /* 16-Bit Frozen Counter (Obj:21, Var:02) */
case AL_OBJ_FDCTR_32: /* 21 03 32-Bit Frozen Delta Counter */
case AL_OBJ_FDCTR_16: /* 21 04 16-Bit Frozen Delta Counter */
case AL_OBJ_FCTR_32T: /* 32-Bit Frozen Counter w/ Time of Freeze (Obj:21 Var:05 ) */
case AL_OBJ_FCTR_16T: /* 16-Bit Frozen Counter w/ Time of Freeze (Obj:21 Var:06) */
case AL_OBJ_FDCTR_32T: /* 32-Bit Frozen Delta Counter w/ Time of Freeze (Obj:21 Var:07) */
case AL_OBJ_FDCTR_16T: /* 16-Bit Frozen Delta Counter w/ Time of Freeze (Obj:21 Var:08) */
case AL_OBJ_FCTR_32NF: /* 32-Bit Frozen Counter Without Flag (Obj:21 Var:09) */
case AL_OBJ_FCTR_16NF: /* 16-Bit Frozen Counter Without Flag (Obj:21 Var:10) */
case AL_OBJ_FDCTR_32NF: /* 32-Bit Frozen Delta Counter Without Flag (Obj:21 Var:11) */
case AL_OBJ_FDCTR_16NF: /* 16-Bit Frozen Delta Counter Without Flag (Obj:21 Var:12) */
case AL_OBJ_CTRC_32: /* 32-Bit Counter Change Event w/o Time (Obj:22, Var:01) */
case AL_OBJ_CTRC_16: /* 16-Bit Counter Change Event w/o Time (Obj:22, Var:02) */
case AL_OBJ_DCTRC_32: /* 32-Bit Delta Counter Change Event w/o Time (Obj:22, Var:03) */
case AL_OBJ_DCTRC_16: /* 16-Bit Delta Counter Change Event w/o Time (Obj:22, Var:04) */
case AL_OBJ_CTRC_32T: /* 32-Bit Counter Change Event with Time (Obj:22, Var:05) */
case AL_OBJ_CTRC_16T: /* 16-Bit Counter Change Event with Time (Obj:22, Var:06) */
case AL_OBJ_DCTRC_32T: /* 32-Bit Delta Counter Change Event with Time (Obj:22, Var:07) */
case AL_OBJ_DCTRC_16T: /* 16-Bit Delta Counter Change Event with Time (Obj:22, Var:08) */
case AL_OBJ_FCTRC_32: /* 32-Bit Frozen Counter Change Event (Obj:23 Var:01) */
case AL_OBJ_FCTRC_16: /* 16-Bit Frozen Counter Change Event (Obj:23 Var:02) */
case AL_OBJ_FDCTRC_32: /* 32-Bit Frozen Delta Counter Change Event (Obj:23 Var:03) */
case AL_OBJ_FDCTRC_16: /* 16-Bit Frozen Delta Counter Change Event (Obj:23 Var:04) */
case AL_OBJ_FCTRC_32T: /* 32-Bit Frozen Counter Change Event w/ Time of Freeze (Obj:23 Var:05) */
case AL_OBJ_FCTRC_16T: /* 16-Bit Frozen Counter Change Event w/ Time of Freeze (Obj:23 Var:06) */
case AL_OBJ_FDCTRC_32T: /* 32-Bit Frozen Delta Counter Change Event w/ Time of Freeze (Obj:23 Var:07) */
case AL_OBJ_FDCTRC_16T: /* 16-Bit Frozen Delta Counter Change Event w/ Time of Freeze (Obj:23 Var:08) */
/* Get Point Flags for those types that have them, it's easier to block out those that don't have flags */
switch (al_obj)
{
case AL_OBJ_CTR_32NF:
case AL_OBJ_CTR_16NF:
case AL_OBJ_DCTR_32NF:
case AL_OBJ_DCTR_16NF:
case AL_OBJ_FCTR_32NF:
case AL_OBJ_FCTR_16NF:
case AL_OBJ_FDCTR_32NF:
case AL_OBJ_FDCTR_16NF:
break;
default:
al_ptflags = tvb_get_guint8(tvb, data_pos);
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, COUNTER);
data_pos += 1;
break;
}
/* Get Counter values */
switch (al_obj)
{
case AL_OBJ_CTR_32:
case AL_OBJ_DCTR_32:
case AL_OBJ_CTR_32NF:
case AL_OBJ_DCTR_32NF:
case AL_OBJ_FCTR_32:
case AL_OBJ_FDCTR_32:
case AL_OBJ_FCTR_32T:
case AL_OBJ_FDCTR_32T:
case AL_OBJ_FCTR_32NF:
case AL_OBJ_FDCTR_32NF:
case AL_OBJ_CTRC_32:
case AL_OBJ_DCTRC_32:
case AL_OBJ_CTRC_32T:
case AL_OBJ_DCTRC_32T:
case AL_OBJ_FCTRC_32:
case AL_OBJ_FDCTRC_32:
case AL_OBJ_FCTRC_32T:
case AL_OBJ_FDCTRC_32T:
al_val_uint32 = tvb_get_letohl(tvb, data_pos);
proto_item_append_text(point_item, ", Count: %u", al_val_uint32);
proto_tree_add_item(point_tree, hf_dnp3_al_cnt32, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
break;
case AL_OBJ_CTR_16:
case AL_OBJ_DCTR_16:
case AL_OBJ_CTR_16NF:
case AL_OBJ_DCTR_16NF:
case AL_OBJ_FCTR_16:
case AL_OBJ_FDCTR_16:
case AL_OBJ_FCTR_16T:
case AL_OBJ_FDCTR_16T:
case AL_OBJ_FCTR_16NF:
case AL_OBJ_FDCTR_16NF:
case AL_OBJ_CTRC_16:
case AL_OBJ_DCTRC_16:
case AL_OBJ_CTRC_16T:
case AL_OBJ_DCTRC_16T:
case AL_OBJ_FCTRC_16:
case AL_OBJ_FDCTRC_16:
case AL_OBJ_FCTRC_16T:
case AL_OBJ_FDCTRC_16T:
al_val_uint16 = tvb_get_letohs(tvb, data_pos);
proto_item_append_text(point_item, ", Count: %u", al_val_uint16);
proto_tree_add_item(point_tree, hf_dnp3_al_cnt16, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
break;
}
/* Get the time for those points that have it */
switch (al_obj)
{
case AL_OBJ_FCTR_32T:
case AL_OBJ_FCTR_16T:
case AL_OBJ_FDCTR_32T:
case AL_OBJ_FDCTR_16T:
case AL_OBJ_CTRC_32T:
case AL_OBJ_CTRC_16T:
case AL_OBJ_DCTRC_32T:
case AL_OBJ_DCTRC_16T:
case AL_OBJ_FCTRC_32T:
case AL_OBJ_FCTRC_16T:
case AL_OBJ_FDCTRC_32T:
case AL_OBJ_FDCTRC_16T:
dnp3_al_get_timestamp(&al_abstime, tvb, data_pos);
proto_item_append_text(point_item, ", Timestamp: %s", abs_time_to_str(wmem_packet_scope(), &al_abstime, ABSOLUTE_TIME_UTC, FALSE));
proto_tree_add_time(point_tree, hf_dnp3_al_timestamp, tvb, data_pos, 6, &al_abstime);
data_pos += 6;
break;
}
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_AI_32: /* 32-Bit Analog Input (Obj:30, Var:01) */
case AL_OBJ_AI_16: /* 16-Bit Analog Input (Obj:30, Var:02) */
case AL_OBJ_AI_32NF: /* 32-Bit Analog Input Without Flag (Obj:30, Var:03) */
case AL_OBJ_AI_16NF: /* 16-Bit Analog Input Without Flag (Obj:30, Var:04) */
case AL_OBJ_AI_FLT: /* 32-Bit Floating Point Input (Obj:30, Var:05) */
case AL_OBJ_AI_DBL: /* 64-Bit Floating Point Input (Obj:30, Var:06) */
case AL_OBJ_AIF_FLT: /* 32-Bit Frozen Floating Point Input (Obj:31, Var:07) */
case AL_OBJ_AIF_DBL: /* 64-Bit Frozen Floating Point Input (Obj:31, Var:08) */
case AL_OBJ_AIC_32NT: /* 32-Bit Analog Change Event w/o Time (Obj:32, Var:01) */
case AL_OBJ_AIC_16NT: /* 16-Bit Analog Change Event w/o Time (Obj:32, Var:02) */
case AL_OBJ_AIC_32T: /* 32-Bit Analog Change Event with Time (Obj:32, Var:03) */
case AL_OBJ_AIC_16T: /* 16-Bit Analog Change Event with Time (Obj:32, Var:04) */
case AL_OBJ_AIC_FLTNT: /* 32-Bit Floating Point Change Event w/o Time (Obj:32, Var:05) */
case AL_OBJ_AIC_DBLNT: /* 64-Bit Floating Point Change Event w/o Time (Obj:32, Var:06) */
case AL_OBJ_AIC_FLTT: /* 32-Bit Floating Point Change Event w/ Time (Obj:32, Var:07) */
case AL_OBJ_AIC_DBLT: /* 64-Bit Floating Point Change Event w/ Time (Obj:32, Var:08) */
case AL_OBJ_AIFC_FLTNT: /* 32-Bit Floating Point Frozen Change Event w/o Time (Obj:33, Var:05) */
case AL_OBJ_AIFC_DBLNT: /* 64-Bit Floating Point Frozen Change Event w/o Time (Obj:33, Var:06) */
case AL_OBJ_AIFC_FLTT: /* 32-Bit Floating Point Frozen Change Event w/ Time (Obj:33, Var:07) */
case AL_OBJ_AIFC_DBLT: /* 64-Bit Floating Point Frozen Change Event w/ Time (Obj:33, Var:08) */
case AL_OBJ_AIDB_16: /* 16-Bit Analog Input Deadband (Obj:34, Var:01) */
case AL_OBJ_AIDB_32: /* 32-Bit Analog Input Deadband (Obj:34, Var:02) */
case AL_OBJ_AIDB_FLT: /* 32-Bit Floating Point Analog Input Deadband (Obj:34, Var:03) */
/* Get Point Flags for those types that have them */
switch (al_obj)
{
case AL_OBJ_AI_32NF:
case AL_OBJ_AI_16NF:
case AL_OBJ_AIDB_16:
case AL_OBJ_AIDB_32:
case AL_OBJ_AIDB_FLT:
break;
default:
al_ptflags = tvb_get_guint8(tvb, data_pos);
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, ANA_IN);
data_pos += 1;
break;
}
switch (al_obj)
{
case AL_OBJ_AI_32:
case AL_OBJ_AI_32NF:
case AL_OBJ_AIC_32NT:
case AL_OBJ_AIC_32T:
case AL_OBJ_AIDB_32:
al_val_int32 = tvb_get_letohl(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %d", al_val_int32);
proto_tree_add_item(point_tree, hf_dnp3_al_ana32, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
break;
case AL_OBJ_AI_16:
case AL_OBJ_AI_16NF:
case AL_OBJ_AIC_16NT:
case AL_OBJ_AIC_16T:
case AL_OBJ_AIDB_16:
al_val_int16 = tvb_get_letohs(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %d", al_val_int16);
proto_tree_add_item(point_tree, hf_dnp3_al_ana16, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
break;
case AL_OBJ_AI_FLT:
case AL_OBJ_AIF_FLT:
case AL_OBJ_AIC_FLTNT:
case AL_OBJ_AIC_FLTT:
case AL_OBJ_AIFC_FLTNT:
case AL_OBJ_AIFC_FLTT:
case AL_OBJ_AIDB_FLT:
al_valflt = tvb_get_letohieee_float(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %g", al_valflt);
proto_tree_add_item(point_tree, hf_dnp3_al_anaflt, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
break;
case AL_OBJ_AI_DBL:
case AL_OBJ_AIF_DBL:
case AL_OBJ_AIC_DBLNT:
case AL_OBJ_AIC_DBLT:
case AL_OBJ_AIFC_DBLNT:
case AL_OBJ_AIFC_DBLT:
al_valdbl = tvb_get_letohieee_double(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %g", al_valdbl);
proto_tree_add_item(point_tree, hf_dnp3_al_anadbl, tvb, data_pos, 8, ENC_LITTLE_ENDIAN);
data_pos += 8;
break;
}
/* Get timestamp */
switch (al_obj)
{
case AL_OBJ_AIC_32T:
case AL_OBJ_AIC_16T:
case AL_OBJ_AIC_FLTT:
case AL_OBJ_AIC_DBLT:
case AL_OBJ_AIFC_FLTT:
case AL_OBJ_AIFC_DBLT:
dnp3_al_get_timestamp(&al_abstime, tvb, data_pos);
proto_item_append_text(point_item, ", Timestamp: %s", abs_time_to_str(wmem_packet_scope(), &al_abstime, ABSOLUTE_TIME_UTC, FALSE));
proto_tree_add_time(point_tree, hf_dnp3_al_timestamp, tvb, data_pos, 6, &al_abstime);
data_pos += 6;
break;
}
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_AO_32: /* 32-Bit Analog Output Status (Obj:40, Var:01) */
case AL_OBJ_AO_16: /* 16-Bit Analog Output Status (Obj:40, Var:02) */
case AL_OBJ_AO_FLT: /* 32-Bit Floating Point Output Status (Obj:40, Var:03) */
case AL_OBJ_AO_DBL: /* 64-Bit Floating Point Output Status (Obj:40, Var:04) */
case AL_OBJ_AOC_32NT: /* 32-Bit Analog Output Event w/o Time (Obj:42, Var:01) */
case AL_OBJ_AOC_16NT: /* 16-Bit Analog Output Event w/o Time (Obj:42, Var:02) */
case AL_OBJ_AOC_32T: /* 32-Bit Analog Output Event with Time (Obj:42, Var:03) */
case AL_OBJ_AOC_16T: /* 16-Bit Analog Output Event with Time (Obj:42, Var:04) */
case AL_OBJ_AOC_FLTNT: /* 32-Bit Floating Point Output Event w/o Time (Obj:42, Var:05) */
case AL_OBJ_AOC_DBLNT: /* 64-Bit Floating Point Output Event w/o Time (Obj:42, Var:06) */
case AL_OBJ_AOC_FLTT: /* 32-Bit Floating Point Output Event w/ Time (Obj:42, Var:07) */
case AL_OBJ_AOC_DBLT: /* 64-Bit Floating Point Output Event w/ Time (Obj:42, Var:08) */
/* Get Point Flags */
al_ptflags = tvb_get_guint8(tvb, data_pos);
dnp3_al_obj_quality(tvb, data_pos, al_ptflags, point_tree, point_item, ANA_OUT);
data_pos += 1;
switch (al_obj)
{
case AL_OBJ_AO_32: /* 32-Bit Analog Output Status (Obj:40, Var:01) */
case AL_OBJ_AOC_32NT: /* 32-Bit Analog Output Event w/o Time (Obj:42, Var:01) */
case AL_OBJ_AOC_32T: /* 32-Bit Analog Output Event with Time (Obj:42, Var:03) */
al_val_int32 = tvb_get_letohl(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %d", al_val_int32);
proto_tree_add_item(point_tree, hf_dnp3_al_anaout32, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
break;
case AL_OBJ_AO_16: /* 16-Bit Analog Output Status (Obj:40, Var:02) */
case AL_OBJ_AOC_16NT: /* 16-Bit Analog Output Event w/o Time (Obj:42, Var:02) */
case AL_OBJ_AOC_16T: /* 16-Bit Analog Output Event with Time (Obj:42, Var:04) */
al_val_int16 = tvb_get_letohs(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %d", al_val_int16);
proto_tree_add_item(point_tree, hf_dnp3_al_anaout16, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
break;
case AL_OBJ_AO_FLT: /* 32-Bit Floating Point Output Status (Obj:40, Var:03) */
case AL_OBJ_AOC_FLTNT: /* 32-Bit Floating Point Output Event w/o Time (Obj:42, Var:05) */
case AL_OBJ_AOC_FLTT: /* 32-Bit Floating Point Output Event w/ Time (Obj:42, Var:07) */
al_valflt = tvb_get_letohieee_float(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %g", al_valflt);
proto_tree_add_item(point_tree, hf_dnp3_al_anaoutflt, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
break;
case AL_OBJ_AO_DBL: /* 64-Bit Floating Point Output Status (Obj:40, Var:04) */
case AL_OBJ_AOC_DBLNT: /* 64-Bit Floating Point Output Event w/o Time (Obj:42, Var:06) */
case AL_OBJ_AOC_DBLT: /* 64-Bit Floating Point Output Event w/ Time (Obj:42, Var:08) */
al_valdbl = tvb_get_letohieee_double(tvb, data_pos);
proto_item_append_text(point_item, ", Value: %g", al_valdbl);
proto_tree_add_item(point_tree, hf_dnp3_al_anaoutdbl, tvb, data_pos, 8, ENC_LITTLE_ENDIAN);
data_pos += 8;
break;
}
/* Get timestamp */
switch (al_obj)
{
case AL_OBJ_AOC_32T:
case AL_OBJ_AOC_16T:
case AL_OBJ_AOC_FLTT:
case AL_OBJ_AOC_DBLT:
dnp3_al_get_timestamp(&al_abstime, tvb, data_pos);
proto_item_append_text(point_item, ", Timestamp: %s", abs_time_to_str(wmem_packet_scope(), &al_abstime, ABSOLUTE_TIME_UTC, FALSE));
proto_tree_add_time(point_tree, hf_dnp3_al_timestamp, tvb, data_pos, 6, &al_abstime);
data_pos += 6;
break;
}
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_TD: /* Time and Date (Obj:50, Var:01) */
case AL_OBJ_TDR: /* Time and Date at Last Recorded Time (Obj:50, Var:03) */
case AL_OBJ_TDCTO: /* Time and Date CTO (Obj:51, Var:01) */
dnp3_al_get_timestamp(&al_abstime, tvb, data_pos);
proto_tree_add_time(object_tree, hf_dnp3_al_timestamp, tvb, data_pos, 6, &al_abstime);
data_pos += 6;
proto_item_set_len(point_item, data_pos - offset);
if (al_obj == AL_OBJ_TDCTO) {
/* Copy the time object to the CTO for any other relative time objects in this response */
nstime_copy(al_cto, &al_abstime);
}
offset = data_pos;
break;
case AL_OBJ_TDELAYF: /* Time Delay - Fine (Obj:52, Var:02) */
proto_tree_add_item(object_tree, hf_dnp3_al_time_delay, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_CLASS0: /* Class Data Objects */
case AL_OBJ_CLASS1:
case AL_OBJ_CLASS2:
case AL_OBJ_CLASS3:
/* No data here */
offset = data_pos;
break;
case AL_OBJ_FILE_CMD: /* File Control - File Command (Obj:70, Var:03) */
/* File name offset and length */
proto_tree_add_item(point_tree, hf_dnp3_al_file_string_offset, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
al_filename_len = tvb_get_letohs(tvb, data_pos);
proto_tree_add_item(point_tree, hf_dnp3_al_file_string_length, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
/* Grab the mode as it determines if some of the following fields are relevant */
al_file_ctrl_mode = tvb_get_letohs(tvb, data_pos + 16);
/* Creation Time */
if (al_file_ctrl_mode == AL_OBJ_FILE_MODE_WRITE) {
dnp3_al_get_timestamp(&al_abstime, tvb, data_pos);
proto_tree_add_time(point_tree, hf_dnp3_al_timestamp, tvb, data_pos, 6, &al_abstime);
}
data_pos += 6;
/* Perms */
if (al_file_ctrl_mode == AL_OBJ_FILE_MODE_WRITE) {
proto_item *perms_item;
proto_tree *perms_tree;
perms_item = proto_tree_add_item(point_tree, hf_dnp3_al_file_perms, tvb, offset, 2, ENC_LITTLE_ENDIAN);
perms_tree = proto_item_add_subtree(perms_item, ett_dnp3_al_obj_point_perms);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_read_owner, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_write_owner, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_exec_owner, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_read_group, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_write_group, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_exec_group, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_read_world, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_write_world, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(perms_tree, hf_dnp3_al_file_perms_exec_world, tvb, offset, 2, ENC_LITTLE_ENDIAN);
}
data_pos += 2;
/* Auth Key */
proto_tree_add_item(point_tree, hf_dnp3_al_file_auth, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* File Size */
if (al_file_ctrl_mode == AL_OBJ_FILE_MODE_WRITE || al_file_ctrl_mode == AL_OBJ_FILE_MODE_APPEND) {
proto_tree_add_item(point_tree, hf_dnp3_al_file_size, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
}
data_pos += 4;
/* Mode */
proto_tree_add_item(point_tree, hf_dnp3_al_file_mode, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
/* Max Block Size */
proto_tree_add_item(point_tree, hf_dnp3_al_file_maxblk, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
/* Request ID */
proto_tree_add_item(point_tree, hf_dnp3_al_file_reqID, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
/* Filename */
if (al_filename_len > 0) {
proto_tree_add_item(point_tree, hf_dnp3_al_file_name, tvb, data_pos, al_filename_len, ENC_ASCII|ENC_NA);
}
data_pos += al_filename_len;
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_FILE_STAT: /* File Control - File Status (Obj:70, Var:04) */
/* File Handle */
proto_tree_add_item(point_tree, hf_dnp3_al_file_handle, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* File Size */
proto_tree_add_item(point_tree, hf_dnp3_al_file_size, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* Max Block Size */
proto_tree_add_item(point_tree, hf_dnp3_al_file_maxblk, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
/* Request ID */
proto_tree_add_item(point_tree, hf_dnp3_al_file_reqID, tvb, data_pos, 2, ENC_LITTLE_ENDIAN);
data_pos += 2;
/* Status code */
proto_tree_add_item(point_tree, hf_dnp3_al_file_status, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos += 1;
/* Optional text */
file_data_size = al_ptaddr - (data_pos - offset - prefixbytes);
if ((file_data_size) > 0) {
proto_tree_add_item(point_tree, hf_dnp3_al_file_data, tvb, data_pos, file_data_size, ENC_NA);
data_pos += file_data_size;
}
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_FILE_TRANS: /* File Control - File Transport (Obj:70, Var:05) */
/* File Handle */
proto_tree_add_item(point_tree, hf_dnp3_al_file_handle, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* File block (bits 0 - 30) and last block flag (bit 31) */
proto_tree_add_item(point_tree, hf_dnp3_al_file_blocknum, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
proto_tree_add_item(point_tree, hf_dnp3_al_file_lastblock, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* File data */
file_data_size = al_ptaddr - (data_pos - offset - prefixbytes);
if ((file_data_size) > 0) {
proto_tree_add_item(point_tree, hf_dnp3_al_file_data, tvb, data_pos, file_data_size, ENC_NA);
data_pos += file_data_size;
}
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_FILE_TRAN_ST: /* File Control Tansport Status (Obj:70, Var:06) */
/* File Handle */
proto_tree_add_item(point_tree, hf_dnp3_al_file_handle, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* File block (bits 0 - 30) and last block flag (bit 31) */
proto_tree_add_item(point_tree, hf_dnp3_al_file_blocknum, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
proto_tree_add_item(point_tree, hf_dnp3_al_file_lastblock, tvb, data_pos, 4, ENC_LITTLE_ENDIAN);
data_pos += 4;
/* Status code */
proto_tree_add_item(point_tree, hf_dnp3_al_file_status, tvb, data_pos, 1, ENC_LITTLE_ENDIAN);
data_pos += 1;
/* Optional text */
file_data_size = al_ptaddr - (data_pos - offset - prefixbytes);
if ((file_data_size) > 0) {
proto_tree_add_item(point_tree, hf_dnp3_al_file_data, tvb, data_pos, file_data_size, ENC_NA);
data_pos += file_data_size;
}
proto_item_set_len(point_item, data_pos - offset);
offset = data_pos;
break;
case AL_OBJ_OCT: /* Octet string */
case AL_OBJ_OCT_EVT: /* Octet string event */
/* read the number of bytes defined by the variation */
if (al_oct_len > 0) {
proto_tree_add_item(object_tree, hf_dnp3_al_octet_string, tvb, data_pos, al_oct_len, ENC_NA);
data_pos += al_oct_len;
proto_item_set_len(point_item, data_pos - offset);
}
offset = data_pos;
break;
default: /* In case of unknown object */
proto_tree_add_item(object_tree, hf_dnp3_unknown_data_chunk, tvb, offset, -1, ENC_NA);
offset = tvb_captured_length(tvb); /* Finish decoding if unknown object is encountered... */
break;
}
}
/* And increment the point address, may be overwritten by an index value */
al_ptaddr++;
}
else {
/* No objects, just prefixes, move past prefix values */
offset = data_pos;
}
if (start_offset > offset) {
expert_add_info(pinfo, point_item, &ei_dnp_invalid_length);
offset = tvb_captured_length(tvb); /* Finish decoding if unknown object is encountered... */
}
}
}
proto_item_set_len(object_item, offset - orig_offset);
return offset;
}
| 0 |
[
"CWE-835"
] |
wireshark
|
618661b22e34a59b21117db723d8ff91e064d4ba
| 166,107,750,976,274,360,000,000,000,000,000,000,000 | 1,249 |
dnp: plug a memory leak.
If we're throwing away the data, *throw away the data* - free it, as
we're not using it as the backing data for a tvbuff.
|
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
loff_t first_block_offset, last_block_offset;
handle_t *handle;
unsigned int credits;
int ret = 0;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
trace_ext4_punch_hole(inode, offset, length, 0);
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + length - 1);
if (ret)
return ret;
}
inode_lock(inode);
/* No need to punch hole beyond i_size */
if (offset >= inode->i_size)
goto out_mutex;
/*
* If the hole extends beyond i_size, set the hole
* to end after the page that contains i_size
*/
if (offset + length > inode->i_size) {
length = inode->i_size +
PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
offset;
}
if (offset & (sb->s_blocksize - 1) ||
(offset + length) & (sb->s_blocksize - 1)) {
/*
* Attach jinode to inode for jbd2 if we do any zeroing of
* partial block
*/
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
goto out_mutex;
}
/* Wait all existing dio workers, newcomers will block on i_mutex */
inode_dio_wait(inode);
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
/* Now release the pages and zero block aligned part of pages*/
if (last_block_offset > first_block_offset) {
ret = ext4_update_disksize_before_punch(inode, offset, length);
if (ret)
goto out_dio;
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
goto out_dio;
}
ret = ext4_zero_partial_blocks(handle, inode, offset,
length);
if (ret)
goto out_stop;
first_block = (offset + sb->s_blocksize - 1) >>
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
/* If there are no blocks to remove, return now */
if (first_block >= stop_block)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = current_time(inode);
ext4_mark_inode_dirty(handle, inode);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
out_dio:
up_write(&EXT4_I(inode)->i_mmap_sem);
out_mutex:
inode_unlock(inode);
return ret;
}
| 0 |
[] |
linux
|
8e4b5eae5decd9dfe5a4ee369c22028f90ab4c44
| 22,050,165,216,352,595,000,000,000,000,000,000,000 | 131 |
ext4: fail ext4_iget for root directory if unallocated
If the root directory has an i_links_count of zero, then when the file
system is mounted, then when ext4_fill_super() notices the problem and
tries to call iput() the root directory in the error return path,
ext4_evict_inode() will try to free the inode on disk, before all of
the file system structures are set up, and this will result in an OOPS
caused by a NULL pointer dereference.
This issue has been assigned CVE-2018-1092.
https://bugzilla.kernel.org/show_bug.cgi?id=199179
https://bugzilla.redhat.com/show_bug.cgi?id=1560777
Reported-by: Wen Xu <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Cc: [email protected]
|
cdataBlockDebug(void *ctx ATTRIBUTE_UNUSED, const xmlChar *value, int len)
{
callbacks++;
if (quiet)
return;
fprintf(SAXdebug, "SAX.pcdata(%.20s, %d)\n",
(char *) value, len);
}
| 0 |
[
"CWE-125"
] |
libxml2
|
a820dbeac29d330bae4be05d9ecd939ad6b4aa33
| 114,596,024,469,954,750,000,000,000,000,000,000,000 | 8 |
Bug 758605: Heap-based buffer overread in xmlDictAddString <https://bugzilla.gnome.org/show_bug.cgi?id=758605>
Reviewed by David Kilzer.
* HTMLparser.c:
(htmlParseName): Add bounds check.
(htmlParseNameComplex): Ditto.
* result/HTML/758605.html: Added.
* result/HTML/758605.html.err: Added.
* result/HTML/758605.html.sax: Added.
* runtest.c:
(pushParseTest): The input for the new test case was so small
(4 bytes) that htmlParseChunk() was never called after
htmlCreatePushParserCtxt(), thereby creating a false positive
test failure. Fixed by using a do-while loop so we always call
htmlParseChunk() at least once.
* test/HTML/758605.html: Added.
|
void vhost_vsock_common_unrealize(VirtIODevice *vdev)
{
VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
vhost_vsock_common_post_load_timer_cleanup(vvc);
virtio_delete_queue(vvc->recv_vq);
virtio_delete_queue(vvc->trans_vq);
virtio_delete_queue(vvc->event_vq);
virtio_cleanup(vdev);
}
| 0 |
[
"CWE-772"
] |
qemu
|
8d1b247f3748ac4078524130c6d7ae42b6140aaf
| 49,031,463,198,074,380,000,000,000,000,000,000,000 | 11 |
vhost-vsock: detach the virqueue element in case of error
In vhost_vsock_common_send_transport_reset(), if an element popped from
the virtqueue is invalid, we should call virtqueue_detach_element() to
detach it from the virtqueue before freeing its memory.
Fixes: fc0b9b0e1c ("vhost-vsock: add virtio sockets device")
Fixes: CVE-2022-26354
Cc: [email protected]
Reported-by: VictorV <[email protected]>
Signed-off-by: Stefano Garzarella <[email protected]>
Message-Id: <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Reviewed-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
|
int lzxd_decompress(struct lzxd_stream *lzx, off_t out_bytes) {
/* bitstream and huffman reading variables */
register unsigned int bit_buffer;
register int bits_left, i=0;
unsigned char *i_ptr, *i_end;
register unsigned short sym;
int match_length, length_footer, extra, verbatim_bits, bytes_todo;
int this_run, main_element, aligned_bits, j;
unsigned char *window, *runsrc, *rundest, buf[12];
unsigned int frame_size=0, end_frame, match_offset, window_posn;
unsigned int R0, R1, R2;
/* easy answers */
if (!lzx || (out_bytes < 0)) return MSPACK_ERR_ARGS;
if (lzx->error) return lzx->error;
/* flush out any stored-up bytes before we begin */
i = lzx->o_end - lzx->o_ptr;
if ((off_t) i > out_bytes) i = (int) out_bytes;
if (i) {
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
}
if (out_bytes == 0) return MSPACK_ERR_OK;
/* restore local state */
RESTORE_BITS;
window = lzx->window;
window_posn = lzx->window_posn;
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
end_frame = (unsigned int)((lzx->offset + out_bytes) / LZX_FRAME_SIZE) + 1;
while (lzx->frame < end_frame) {
/* have we reached the reset interval? (if there is one?) */
if (lzx->reset_interval && ((lzx->frame % lzx->reset_interval) == 0)) {
if (lzx->block_remaining) {
D(("%d bytes remaining at reset interval", lzx->block_remaining))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-read the intel header and reset the huffman lengths */
lzxd_reset_state(lzx);
R0 = lzx->R0;
R1 = lzx->R1;
R2 = lzx->R2;
}
/* LZX DELTA format has chunk_size, not present in LZX format */
if (lzx->is_delta) {
ENSURE_BITS(16);
REMOVE_BITS(16);
}
/* read header if necessary */
if (!lzx->header_read) {
/* read 1 bit. if bit=0, intel filesize = 0.
* if bit=1, read intel filesize (32 bits) */
j = 0; READ_BITS(i, 1); if (i) { READ_BITS(i, 16); READ_BITS(j, 16); }
lzx->intel_filesize = (i << 16) | j;
lzx->header_read = 1;
}
/* calculate size of frame: all frames are 32k except the final frame
* which is 32kb or less. this can only be calculated when lzx->length
* has been filled in. */
frame_size = LZX_FRAME_SIZE;
if (lzx->length && (lzx->length - lzx->offset) < (off_t)frame_size) {
frame_size = lzx->length - lzx->offset;
}
/* decode until one more frame is available */
bytes_todo = lzx->frame_posn + frame_size - window_posn;
while (bytes_todo > 0) {
/* initialise new block, if one is needed */
if (lzx->block_remaining == 0) {
/* realign if previous block was an odd-sized UNCOMPRESSED block */
if ((lzx->block_type == LZX_BLOCKTYPE_UNCOMPRESSED) &&
(lzx->block_length & 1))
{
READ_IF_NEEDED;
i_ptr++;
}
/* read block type (3 bits) and block length (24 bits) */
READ_BITS(lzx->block_type, 3);
READ_BITS(i, 16); READ_BITS(j, 8);
lzx->block_remaining = lzx->block_length = (i << 8) | j;
/*D(("new block t%d len %u", lzx->block_type, lzx->block_length))*/
/* read individual block headers */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_ALIGNED:
/* read lengths of and build aligned huffman decoding tree */
for (i = 0; i < 8; i++) { READ_BITS(j, 3); lzx->ALIGNED_len[i] = j; }
BUILD_TABLE(ALIGNED);
/* no break -- rest of aligned header is same as verbatim */
case LZX_BLOCKTYPE_VERBATIM:
/* read lengths of and build main huffman decoding tree */
READ_LENGTHS(MAINTREE, 0, 256);
READ_LENGTHS(MAINTREE, 256, LZX_NUM_CHARS + lzx->num_offsets);
BUILD_TABLE(MAINTREE);
/* if the literal 0xE8 is anywhere in the block... */
if (lzx->MAINTREE_len[0xE8] != 0) lzx->intel_started = 1;
/* read lengths of and build lengths huffman decoding tree */
READ_LENGTHS(LENGTH, 0, LZX_NUM_SECONDARY_LENGTHS);
BUILD_TABLE_MAYBE_EMPTY(LENGTH);
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* because we can't assume otherwise */
lzx->intel_started = 1;
/* read 1-16 (not 0-15) bits to align to bytes */
if (bits_left == 0) ENSURE_BITS(16);
bits_left = 0; bit_buffer = 0;
/* read 12 bytes of stored R0 / R1 / R2 values */
for (rundest = &buf[0], i = 0; i < 12; i++) {
READ_IF_NEEDED;
*rundest++ = *i_ptr++;
}
R0 = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
R1 = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24);
R2 = buf[8] | (buf[9] << 8) | (buf[10] << 16) | (buf[11] << 24);
break;
default:
D(("bad block type"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
}
/* decode more of the block:
* run = min(what's available, what's needed) */
this_run = lzx->block_remaining;
if (this_run > bytes_todo) this_run = bytes_todo;
/* assume we decode exactly this_run bytes, for now */
bytes_todo -= this_run;
lzx->block_remaining -= this_run;
/* decode at least this_run bytes */
switch (lzx->block_type) {
case LZX_BLOCKTYPE_VERBATIM:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1=R0; R0 = match_offset; break;
case 2: match_offset = R2; R2=R0; R0 = match_offset; break;
case 3: match_offset = 1; R2=R1; R1=R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
READ_BITS(verbatim_bits, extra);
match_offset = position_base[match_offset] - 2 + verbatim_bits;
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_ALIGNED:
while (this_run > 0) {
READ_HUFFSYM(MAINTREE, main_element);
if (main_element < LZX_NUM_CHARS) {
/* literal: 0 to LZX_NUM_CHARS-1 */
window[window_posn++] = main_element;
this_run--;
}
else {
/* match: LZX_NUM_CHARS + ((slot<<3) | length_header (3 bits)) */
main_element -= LZX_NUM_CHARS;
/* get match length */
match_length = main_element & LZX_NUM_PRIMARY_LENGTHS;
if (match_length == LZX_NUM_PRIMARY_LENGTHS) {
if (lzx->LENGTH_empty) {
D(("LENGTH symbol needed but tree is empty"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
READ_HUFFSYM(LENGTH, length_footer);
match_length += length_footer;
}
match_length += LZX_MIN_MATCH;
/* get match offset */
switch ((match_offset = (main_element >> 3))) {
case 0: match_offset = R0; break;
case 1: match_offset = R1; R1 = R0; R0 = match_offset; break;
case 2: match_offset = R2; R2 = R0; R0 = match_offset; break;
default:
extra = (match_offset >= 36) ? 17 : extra_bits[match_offset];
match_offset = position_base[match_offset] - 2;
if (extra > 3) {
/* verbatim and aligned bits */
extra -= 3;
READ_BITS(verbatim_bits, extra);
match_offset += (verbatim_bits << 3);
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra == 3) {
/* aligned bits only */
READ_HUFFSYM(ALIGNED, aligned_bits);
match_offset += aligned_bits;
}
else if (extra > 0) { /* extra==1, extra==2 */
/* verbatim bits only */
READ_BITS(verbatim_bits, extra);
match_offset += verbatim_bits;
}
else /* extra == 0 */ {
/* ??? not defined in LZX specification! */
match_offset = 1;
}
/* update repeated offset LRU queue */
R2 = R1; R1 = R0; R0 = match_offset;
}
/* LZX DELTA uses max match length to signal even longer match */
if (match_length == LZX_MAX_MATCH && lzx->is_delta) {
int extra_len = 0;
ENSURE_BITS(3); /* 4 entry huffman tree */
if (PEEK_BITS(1) == 0) {
REMOVE_BITS(1); /* '0' -> 8 extra length bits */
READ_BITS(extra_len, 8);
}
else if (PEEK_BITS(2) == 2) {
REMOVE_BITS(2); /* '10' -> 10 extra length bits + 0x100 */
READ_BITS(extra_len, 10);
extra_len += 0x100;
}
else if (PEEK_BITS(3) == 6) {
REMOVE_BITS(3); /* '110' -> 12 extra length bits + 0x500 */
READ_BITS(extra_len, 12);
extra_len += 0x500;
}
else {
REMOVE_BITS(3); /* '111' -> 15 extra length bits */
READ_BITS(extra_len, 15);
}
match_length += extra_len;
}
if ((window_posn + match_length) > lzx->window_size) {
D(("match ran over window wrap"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* copy match */
rundest = &window[window_posn];
i = match_length;
/* does match offset wrap the window? */
if (match_offset > window_posn) {
if (match_offset > lzx->offset &&
(match_offset - window_posn) > lzx->ref_data_size)
{
D(("match offset beyond LZX stream"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* j = length from match offset to end of window */
j = match_offset - window_posn;
if (j > (int) lzx->window_size) {
D(("match offset beyond window boundaries"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
runsrc = &window[lzx->window_size - j];
if (j < i) {
/* if match goes over the window edge, do two copy runs */
i -= j; while (j-- > 0) *rundest++ = *runsrc++;
runsrc = window;
}
while (i-- > 0) *rundest++ = *runsrc++;
}
else {
runsrc = rundest - match_offset;
while (i-- > 0) *rundest++ = *runsrc++;
}
this_run -= match_length;
window_posn += match_length;
}
} /* while (this_run > 0) */
break;
case LZX_BLOCKTYPE_UNCOMPRESSED:
/* as this_run is limited not to wrap a frame, this also means it
* won't wrap the window (as the window is a multiple of 32k) */
if (window_posn + this_run > lzx->window_size) {
D(("match ran over window boundary"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
rundest = &window[window_posn];
window_posn += this_run;
while (this_run > 0) {
if ((i = i_end - i_ptr) == 0) {
READ_IF_NEEDED;
}
else {
if (i > this_run) i = this_run;
lzx->sys->copy(i_ptr, rundest, (size_t) i);
rundest += i;
i_ptr += i;
this_run -= i;
}
}
break;
default:
return lzx->error = MSPACK_ERR_DECRUNCH; /* might as well */
}
/* did the final match overrun our desired this_run length? */
if (this_run < 0) {
if ((unsigned int)(-this_run) > lzx->block_remaining) {
D(("overrun went past end of block by %d (%d remaining)",
-this_run, lzx->block_remaining ))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
lzx->block_remaining -= -this_run;
}
} /* while (bytes_todo > 0) */
/* streams don't extend over frame boundaries */
if ((window_posn - lzx->frame_posn) != frame_size) {
D(("decode beyond output frame limits! %d != %d",
window_posn - lzx->frame_posn, frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* re-align input bitstream */
if (bits_left > 0) ENSURE_BITS(16);
if (bits_left & 15) REMOVE_BITS(bits_left & 15);
/* check that we've used all of the previous frame first */
if (lzx->o_ptr != lzx->o_end) {
D(("%ld avail bytes, new %d frame",
(long)(lzx->o_end - lzx->o_ptr), frame_size))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* does this intel block _really_ need decoding? */
if (lzx->intel_started && lzx->intel_filesize &&
(lzx->frame <= 32768) && (frame_size > 10))
{
unsigned char *data = &lzx->e8_buf[0];
unsigned char *dataend = &lzx->e8_buf[frame_size - 10];
signed int curpos = lzx->intel_curpos;
signed int filesize = lzx->intel_filesize;
signed int abs_off, rel_off;
/* copy e8 block to the e8 buffer and tweak if needed */
lzx->o_ptr = data;
lzx->sys->copy(&lzx->window[lzx->frame_posn], data, frame_size);
while (data < dataend) {
if (*data++ != 0xE8) { curpos++; continue; }
abs_off = data[0] | (data[1]<<8) | (data[2]<<16) | (data[3]<<24);
if ((abs_off >= -curpos) && (abs_off < filesize)) {
rel_off = (abs_off >= 0) ? abs_off - curpos : abs_off + filesize;
data[0] = (unsigned char) rel_off;
data[1] = (unsigned char) (rel_off >> 8);
data[2] = (unsigned char) (rel_off >> 16);
data[3] = (unsigned char) (rel_off >> 24);
}
data += 4;
curpos += 5;
}
lzx->intel_curpos += frame_size;
}
else {
lzx->o_ptr = &lzx->window[lzx->frame_posn];
if (lzx->intel_filesize) lzx->intel_curpos += frame_size;
}
lzx->o_end = &lzx->o_ptr[frame_size];
/* write a frame */
i = (out_bytes < (off_t)frame_size) ? (unsigned int)out_bytes : frame_size;
if (lzx->sys->write(lzx->output, lzx->o_ptr, i) != i) {
return lzx->error = MSPACK_ERR_WRITE;
}
lzx->o_ptr += i;
lzx->offset += i;
out_bytes -= i;
/* advance frame start position */
lzx->frame_posn += frame_size;
lzx->frame++;
/* wrap window / frame position pointers */
if (window_posn == lzx->window_size) window_posn = 0;
if (lzx->frame_posn == lzx->window_size) lzx->frame_posn = 0;
} /* while (lzx->frame < end_frame) */
if (out_bytes) {
D(("bytes left to output"))
return lzx->error = MSPACK_ERR_DECRUNCH;
}
/* store local state */
STORE_BITS;
lzx->window_posn = window_posn;
lzx->R0 = R0;
lzx->R1 = R1;
lzx->R2 = R2;
return MSPACK_ERR_OK;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
clamav-devel
|
a83773682e856ad6529ba6db8d1792e6d515d7f1
| 54,831,592,204,818,270,000,000,000,000,000,000,000 | 503 |
fixing potential OOB window write when unpacking chm files
|
static int writeState(const char *stateFilename)
{
struct logState *p;
FILE *f;
char *chptr;
unsigned int i = 0;
int error = 0;
int bytes = 0;
int fdcurr;
int fdsave;
struct stat sb;
char *tmpFilename = NULL;
struct tm now;
time_t now_time, last_time;
char *prevCtx;
localtime_r(&nowSecs, &now);
tmpFilename = malloc(strlen(stateFilename) + 5 );
if (tmpFilename == NULL) {
message_OOM();
return 1;
}
strcpy(tmpFilename, stateFilename);
strcat(tmpFilename, ".tmp");
/* Remove possible tmp state file from previous run */
error = unlink(tmpFilename);
if (error == -1 && errno != ENOENT) {
message(MESS_ERROR, "error removing old temporary state file %s: %s\n",
tmpFilename, strerror(errno));
free(tmpFilename);
return 1;
}
error = 0;
fdcurr = open(stateFilename, O_RDONLY);
if (fdcurr == -1) {
const char *state_header_v2 = "logrotate state -- version 2\n";
const size_t state_header_v2_len = strlen(state_header_v2);
size_t ret;
/* no error if state file is just missing */
if (errno != ENOENT) {
message(MESS_ERROR, "error opening state file %s: %s\n",
stateFilename, strerror(errno));
free(tmpFilename);
return 1;
}
/* create a stub state file with mode 0644 */
fdcurr = open(stateFilename, O_CREAT | O_EXCL | O_WRONLY,
S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH);
if (fdcurr == -1) {
message(MESS_ERROR, "error creating stub state file %s: %s\n",
stateFilename, strerror(errno));
free(tmpFilename);
return 1;
}
/* check write access */
ret = full_write(fdcurr, state_header_v2, state_header_v2_len);
if (ret != state_header_v2_len) {
message(MESS_ERROR, "error writing to stub state file %s: %s\n",
stateFilename, strerror(errno));
close(fdcurr);
free(tmpFilename);
return 1;
}
}
/* get attributes, to assign them to the new state file */
if (setSecCtx(fdcurr, stateFilename, &prevCtx) != 0) {
/* error msg already printed */
free(tmpFilename);
close(fdcurr);
return 1;
}
#ifdef WITH_ACL
if ((prev_acl = acl_get_fd(fdcurr)) == NULL) {
if (is_acl_well_supported(errno)) {
message(MESS_ERROR, "getting file ACL %s: %s\n",
stateFilename, strerror(errno));
restoreSecCtx(&prevCtx);
free(tmpFilename);
close(fdcurr);
return 1;
}
}
#endif
if (fstat(fdcurr, &sb) == -1) {
message(MESS_ERROR, "error stating %s: %s\n", stateFilename, strerror(errno));
restoreSecCtx(&prevCtx);
free(tmpFilename);
#ifdef WITH_ACL
if (prev_acl) {
acl_free(prev_acl);
prev_acl = NULL;
}
#endif
return 1;
}
close(fdcurr);
fdsave = createOutputFile(tmpFilename, O_RDWR | O_CREAT | O_TRUNC, &sb, prev_acl, 0);
#ifdef WITH_ACL
if (prev_acl) {
acl_free(prev_acl);
prev_acl = NULL;
}
#endif
restoreSecCtx(&prevCtx);
if (fdsave < 0) {
free(tmpFilename);
return 1;
}
f = fdopen(fdsave, "w");
if (!f) {
message(MESS_ERROR, "error creating temp state file %s: %s\n",
tmpFilename, strerror(errno));
free(tmpFilename);
return 1;
}
bytes = fprintf(f, "logrotate state -- version 2\n");
if (bytes < 0)
error = bytes;
/*
* Time in seconds it takes earth to go around sun. The value is
* astronomical measurement (solar year) rather than something derived from
* a convention (calendar year).
*/
#define SECONDS_IN_YEAR 31556926
for (i = 0; i < hashSize && error == 0; i++) {
for (p = states[i]->head.lh_first; p != NULL && error == 0;
p = p->list.le_next) {
/* Skip states which are not used for more than a year. */
now_time = mktime(&now);
last_time = mktime(&p->lastRotated);
if (!p->isUsed && difftime(now_time, last_time) > SECONDS_IN_YEAR) {
message(MESS_DEBUG, "Removing %s from state file, "
"because it does not exist and has not been rotated for one year\n",
p->fn);
continue;
}
error = fputc('"', f) == EOF;
for (chptr = p->fn; *chptr && error == 0; chptr++) {
switch (*chptr) {
case '"':
case '\\':
error = fputc('\\', f) == EOF;
break;
case '\n':
error = fputc('\\', f) == EOF;
if (error == 0) {
error = fputc('n', f) == EOF;
}
continue;
default:
break;
}
if (error == 0 && fputc(*chptr, f) == EOF) {
error = 1;
}
}
if (error == 0 && fputc('"', f) == EOF)
error = 1;
if (error == 0) {
bytes = fprintf(f, " %d-%d-%d-%d:%d:%d\n",
p->lastRotated.tm_year + 1900,
p->lastRotated.tm_mon + 1,
p->lastRotated.tm_mday,
p->lastRotated.tm_hour,
p->lastRotated.tm_min,
p->lastRotated.tm_sec);
if (bytes < 0)
error = bytes;
}
}
}
if (error == 0)
error = fflush(f);
if (error == 0)
error = fsync(fdsave);
if (error == 0)
error = fclose(f);
else
fclose(f);
if (error == 0) {
if (rename(tmpFilename, stateFilename)) {
message(MESS_ERROR, "error renaming temp state file %s to %s: %s\n",
tmpFilename, stateFilename, strerror(errno));
unlink(tmpFilename);
error = 1;
}
}
else {
if (errno)
message(MESS_ERROR, "error creating temp state file %s: %s\n",
tmpFilename, strerror(errno));
else
message(MESS_ERROR, "error creating temp state file %s%s\n",
tmpFilename, error == ENOMEM ?
": Insufficient storage space is available." : "" );
unlink(tmpFilename);
}
free(tmpFilename);
return error;
}
| 1 |
[
"CWE-732"
] |
logrotate
|
f46d0bdfc9c53515c13880c501f4d2e1e7dd8b25
| 79,508,838,414,487,710,000,000,000,000,000,000,000 | 224 |
Lock state file to prevent parallel execution
Running multiple instances of logrotate on the same set of log-files
might have undesirable effects.
Add command line option --skip-state-lock to skip locking the state
file, for example if locking is unsupported or prohibited.
Fixes: https://github.com/logrotate/logrotate/issues/295
Closes: https://github.com/logrotate/logrotate/pull/297
|
u32 gf_hinter_track_get_bandwidth(GF_RTPHinter *tkHinter)
{
return tkHinter->bandwidth;
}
| 0 |
[
"CWE-476"
] |
gpac
|
ebfa346eff05049718f7b80041093b4c5581c24e
| 174,869,086,527,827,880,000,000,000,000,000,000,000 | 4 |
fixed #1706
|
static void handle_irq_transmit_status(struct b43_wldev *dev)
{
u32 v0, v1;
u16 tmp;
struct b43_txstatus stat;
while (1) {
v0 = b43_read32(dev, B43_MMIO_XMITSTAT_0);
if (!(v0 & 0x00000001))
break;
v1 = b43_read32(dev, B43_MMIO_XMITSTAT_1);
stat.cookie = (v0 >> 16);
stat.seq = (v1 & 0x0000FFFF);
stat.phy_stat = ((v1 & 0x00FF0000) >> 16);
tmp = (v0 & 0x0000FFFF);
stat.frame_count = ((tmp & 0xF000) >> 12);
stat.rts_count = ((tmp & 0x0F00) >> 8);
stat.supp_reason = ((tmp & 0x001C) >> 2);
stat.pm_indicated = !!(tmp & 0x0080);
stat.intermediate = !!(tmp & 0x0040);
stat.for_ampdu = !!(tmp & 0x0020);
stat.acked = !!(tmp & 0x0002);
b43_handle_txstatus(dev, &stat);
}
}
| 0 |
[
"CWE-134"
] |
wireless
|
9538cbaab6e8b8046039b4b2eb6c9d614dc782bd
| 68,876,086,175,089,650,000,000,000,000,000,000,000 | 27 |
b43: stop format string leaking into error msgs
The module parameter "fwpostfix" is userspace controllable, unfiltered,
and is used to define the firmware filename. b43_do_request_fw() populates
ctx->errors[] on error, containing the firmware filename. b43err()
parses its arguments as a format string. For systems with b43 hardware,
this could lead to a uid-0 to ring-0 escalation.
CVE-2013-2852
Signed-off-by: Kees Cook <[email protected]>
Cc: [email protected]
Signed-off-by: John W. Linville <[email protected]>
|
server_client_redraw_timer(__unused int fd, __unused short events,
__unused void* data)
{
log_debug("redraw timer fired");
}
| 0 |
[] |
src
|
b32e1d34e10a0da806823f57f02a4ae6e93d756e
| 25,461,716,047,355,790,000,000,000,000,000,000,000 | 5 |
evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547.
|
RZ_API bool rz_core_analysis_function_add(RzCore *core, const char *name, ut64 addr, bool analyze_recursively) {
int depth = rz_config_get_i(core->config, "analysis.depth");
RzAnalysisFunction *fcn = NULL;
//rz_core_analysis_undefine (core, core->offset);
rz_core_analysis_fcn(core, addr, UT64_MAX, RZ_ANALYSIS_REF_TYPE_NULL, depth);
fcn = rz_analysis_get_fcn_in(core->analysis, addr, 0);
if (fcn) {
/* ensure we use a proper name */
rz_core_analysis_function_rename(core, addr, fcn->name);
if (core->analysis->opt.vars) {
rz_core_recover_vars(core, fcn, true);
}
rz_analysis_fcn_vars_add_types(core->analysis, fcn);
} else {
if (core->analysis->verbose) {
eprintf("Warning: Unable to analyze function at 0x%08" PFMT64x "\n", addr);
return false;
}
}
if (analyze_recursively) {
fcn = rz_analysis_get_fcn_in(core->analysis, addr, 0); /// XXX wrong in case of nopskip
if (fcn) {
RzAnalysisXRef *xref;
RzListIter *iter;
RzList *xrefs = rz_analysis_function_get_xrefs_from(fcn);
rz_list_foreach (xrefs, iter, xref) {
if (xref->to == UT64_MAX) {
//eprintf ("Warning: ignore 0x%08"PFMT64x" call 0x%08"PFMT64x"\n", ref->at, ref->addr);
continue;
}
if (xref->type != RZ_ANALYSIS_REF_TYPE_CODE && xref->type != RZ_ANALYSIS_REF_TYPE_CALL) {
/* only follow code/call references */
continue;
}
if (!rz_io_is_valid_offset(core->io, xref->to, !core->analysis->opt.noncode)) {
continue;
}
rz_core_analysis_fcn(core, xref->to, fcn->addr, RZ_ANALYSIS_REF_TYPE_CALL, depth);
/* use recursivity here */
RzAnalysisFunction *f = rz_analysis_get_function_at(core->analysis, xref->to);
if (f) {
RzListIter *iter;
RzAnalysisXRef *xref1;
RzList *xrefs1 = rz_analysis_function_get_xrefs_from(f);
rz_list_foreach (xrefs1, iter, xref1) {
if (!rz_io_is_valid_offset(core->io, xref1->to, !core->analysis->opt.noncode)) {
continue;
}
if (xref1->type != 'c' && xref1->type != 'C') {
continue;
}
rz_core_analysis_fcn(core, xref1->to, f->addr, RZ_ANALYSIS_REF_TYPE_CALL, depth);
// recursively follow fcn->refs again and again
}
rz_list_free(xrefs1);
} else {
f = rz_analysis_get_fcn_in(core->analysis, fcn->addr, 0);
if (f) {
/* cut function */
rz_analysis_function_resize(f, addr - fcn->addr);
rz_core_analysis_fcn(core, xref->to, fcn->addr,
RZ_ANALYSIS_REF_TYPE_CALL, depth);
f = rz_analysis_get_function_at(core->analysis, fcn->addr);
}
if (!f) {
eprintf("af: Cannot find function at 0x%08" PFMT64x "\n", fcn->addr);
rz_list_free(xrefs);
return false;
}
}
}
rz_list_free(xrefs);
if (core->analysis->opt.vars) {
rz_core_recover_vars(core, fcn, true);
}
}
}
if (name) {
if (*name && !rz_core_analysis_function_rename(core, addr, name)) {
eprintf("af: Cannot find function at 0x%08" PFMT64x "\n", addr);
return false;
}
}
rz_core_analysis_propagate_noreturn(core, addr);
rz_core_analysis_flag_every_function(core);
return true;
}
| 0 |
[
"CWE-703"
] |
rizin
|
6ce71d8aa3dafe3cdb52d5d72ae8f4b95916f939
| 198,475,503,702,421,640,000,000,000,000,000,000,000 | 88 |
Initialize retctx,ctx before freeing the inner elements
In rz_core_analysis_type_match retctx structure was initialized on the
stack only after a "goto out_function", where a field of that structure
was freed. When the goto path is taken, the field is not properly
initialized and it cause cause a crash of Rizin or have other effects.
Fixes: CVE-2021-4022
|
int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
{
int err;
rtnl_lock();
err = __tipc_nl_node_set_key(skb, info);
rtnl_unlock();
return err;
}
| 0 |
[] |
linux
|
0217ed2848e8538bcf9172d97ed2eeb4a26041bb
| 152,830,576,302,131,190,000,000,000,000,000,000,000 | 10 |
tipc: better validate user input in tipc_nl_retrieve_key()
Before calling tipc_aead_key_size(ptr), we need to ensure
we have enough data to dereference ptr->keylen.
We probably also want to make sure tipc_aead_key_size()
wont overflow with malicious ptr->keylen values.
Syzbot reported:
BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x21c/0x280 lib/dump_stack.c:120
kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118
__msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197
__tipc_nl_node_set_key net/tipc/node.c:2971 [inline]
tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023
genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline]
genl_family_rcv_msg net/netlink/genetlink.c:783 [inline]
genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800
netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494
genl_rcv+0x63/0x80 net/netlink/genetlink.c:811
netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline]
netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330
netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
RIP: 0023:0xf7f60549
Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00
RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172
RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
Uninit was created at:
kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline]
kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104
kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76
slab_alloc_node mm/slub.c:2907 [inline]
__kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527
__kmalloc_reserve net/core/skbuff.c:142 [inline]
__alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210
alloc_skb include/linux/skbuff.h:1099 [inline]
netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline]
netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894
sock_sendmsg_nosec net/socket.c:652 [inline]
sock_sendmsg net/socket.c:672 [inline]
____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345
___sys_sendmsg net/socket.c:2399 [inline]
__sys_sendmsg+0x714/0x830 net/socket.c:2432
__compat_sys_sendmsg net/compat.c:347 [inline]
__do_compat_sys_sendmsg net/compat.c:354 [inline]
__se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351
__ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351
do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline]
__do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141
do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166
do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209
entry_SYSENTER_compat_after_hwframe+0x4d/0x5c
Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Tuong Lien <[email protected]>
Cc: Jon Maloy <[email protected]>
Cc: Ying Xue <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
qf_new_list(qf_info_T *qi, char_u *qf_title)
{
int i;
qf_list_T *qfl;
// If the current entry is not the last entry, delete entries beyond
// the current entry. This makes it possible to browse in a tree-like
// way with ":grep".
while (qi->qf_listcount > qi->qf_curlist + 1)
qf_free(&qi->qf_lists[--qi->qf_listcount]);
// When the stack is full, remove to oldest entry
// Otherwise, add a new entry.
if (qi->qf_listcount == LISTCOUNT)
{
qf_free(&qi->qf_lists[0]);
for (i = 1; i < LISTCOUNT; ++i)
qi->qf_lists[i - 1] = qi->qf_lists[i];
qi->qf_curlist = LISTCOUNT - 1;
}
else
qi->qf_curlist = qi->qf_listcount++;
qfl = qf_get_curlist(qi);
CLEAR_POINTER(qfl);
qf_store_title(qfl, qf_title);
qfl->qfl_type = qi->qfl_type;
qfl->qf_id = ++last_qf_id;
}
| 0 |
[
"CWE-416"
] |
vim
|
4f1b083be43f351bc107541e7b0c9655a5d2c0bb
| 93,732,360,710,220,830,000,000,000,000,000,000,000 | 28 |
patch 9.0.0322: crash when no errors and 'quickfixtextfunc' is set
Problem: Crash when no errors and 'quickfixtextfunc' is set.
Solution: Do not handle errors if there aren't any.
|
qtdemux_sub_caps (GstQTDemux * qtdemux, QtDemuxStream * stream,
guint32 fourcc, const guint8 * stsd_data, gchar ** codec_name)
{
GstCaps *caps;
GST_DEBUG_OBJECT (qtdemux, "resolve fourcc 0x%08x", GUINT32_TO_BE (fourcc));
switch (fourcc) {
case FOURCC_mp4s:
_codec ("DVD subtitle");
caps = gst_caps_new_empty_simple ("subpicture/x-dvd");
stream->need_process = TRUE;
break;
case FOURCC_text:
_codec ("Quicktime timed text");
goto text;
case FOURCC_tx3g:
_codec ("3GPP timed text");
text:
caps = gst_caps_new_simple ("text/x-raw", "format", G_TYPE_STRING,
"utf8", NULL);
/* actual text piece needs to be extracted */
stream->need_process = TRUE;
break;
case FOURCC_stpp:
_codec ("XML subtitles");
caps = gst_caps_new_empty_simple ("application/ttml+xml");
break;
default:
{
char *s, fourstr[5];
g_snprintf (fourstr, 5, "%" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (fourcc));
s = g_strdup_printf ("text/x-gst-fourcc-%s", g_strstrip (fourstr));
caps = gst_caps_new_empty_simple (s);
g_free (s);
break;
}
}
return caps;
}
| 0 |
[
"CWE-125"
] |
gst-plugins-good
|
d0949baf3dadea6021d54abef6802fed5a06af75
| 294,038,664,196,933,930,000,000,000,000,000,000,000 | 41 |
qtdemux: Fix out of bounds read in tag parsing code
We can't simply assume that the length of the tag value as given
inside the stream is correct but should also check against the amount of
data we have actually available.
https://bugzilla.gnome.org/show_bug.cgi?id=775451
|
int input_device_close_channels(const bdaddr_t *src, const bdaddr_t *dst)
{
struct input_device *idev = find_device(src, dst);
if (!idev)
return -ENOENT;
if (idev->intr_io)
g_io_channel_shutdown(idev->intr_io, TRUE, NULL);
if (idev->ctrl_io)
g_io_channel_shutdown(idev->ctrl_io, TRUE, NULL);
return 0;
}
| 0 |
[] |
bluez
|
3cccdbab2324086588df4ccf5f892fb3ce1f1787
| 39,990,266,921,960,960,000,000,000,000,000,000,000 | 15 |
HID accepts bonded device connections only.
This change adds a configuration for platforms to choose a more secure
posture for the HID profile. While some older mice are known to not
support pairing or encryption, some platform may choose a more secure
posture by requiring the device to be bonded and require the
connection to be encrypted when bonding is required.
Reference:
https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00352.html
|
void ConnectionManagerImpl::RdsRouteConfigUpdateRequester::requestRouteConfigUpdate(
Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) {
absl::optional<Router::ConfigConstSharedPtr> route_config = parent_.routeConfig();
Event::Dispatcher& thread_local_dispatcher =
parent_.connection_manager_.read_callbacks_->connection().dispatcher();
if (route_config.has_value() && route_config.value()->usesVhds()) {
ASSERT(!parent_.request_headers_->Host()->value().empty());
const auto& host_header = absl::AsciiStrToLower(parent_.request_headers_->getHostValue());
requestVhdsUpdate(host_header, thread_local_dispatcher, std::move(route_config_updated_cb));
return;
} else if (parent_.snapped_scoped_routes_config_ != nullptr) {
Router::ScopeKeyPtr scope_key =
parent_.snapped_scoped_routes_config_->computeScopeKey(*parent_.request_headers_);
// If scope_key is not null, the scope exists but RouteConfiguration is not initialized.
if (scope_key != nullptr) {
requestSrdsUpdate(std::move(scope_key), thread_local_dispatcher,
std::move(route_config_updated_cb));
return;
}
}
// Continue the filter chain if no on demand update is requested.
(*route_config_updated_cb)(false);
}
| 0 |
[
"CWE-22"
] |
envoy
|
5333b928d8bcffa26ab19bf018369a835f697585
| 102,769,833,180,367,860,000,000,000,000,000,000,000 | 23 |
Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]>
|
static void __exit dcbnl_exit(void)
{
rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
dcb_flushapp();
}
| 0 |
[
"CWE-399"
] |
linux-2.6
|
29cd8ae0e1a39e239a3a7b67da1986add1199fc0
| 308,822,704,752,258,400,000,000,000,000,000,000,000 | 6 |
dcbnl: fix various netlink info leaks
The dcb netlink interface leaks stack memory in various places:
* perm_addr[] buffer is only filled at max with 12 of the 32 bytes but
copied completely,
* no in-kernel driver fills all fields of an IEEE 802.1Qaz subcommand,
so we're leaking up to 58 bytes for ieee_ets structs, up to 136 bytes
for ieee_pfc structs, etc.,
* the same is true for CEE -- no in-kernel driver fills the whole
struct,
Prevent all of the above stack info leaks by properly initializing the
buffers/structures involved.
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
void *d, int len)
{
uint i;
for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
if (wmi_evt_handlers[i].eventid == id) {
wmi_evt_handlers[i].handler(wil, id, d, len);
return true;
}
}
return false;
}
| 0 |
[
"CWE-119"
] |
linux
|
b5a8ffcae4103a9d823ea3aa3a761f65779fbe2a
| 13,462,750,259,385,290,000,000,000,000,000,000,000 | 14 |
wil6210: missing length check in wmi_set_ie
Add a length check in wmi_set_ie to detect unsigned integer
overflow.
Signed-off-by: Lior David <[email protected]>
Signed-off-by: Maya Erez <[email protected]>
Signed-off-by: Kalle Valo <[email protected]>
|
inline void Dequantize(const uint8* input_data, const Dims<4>& input_dims,
int32 zero_point, double scale, float* output_data,
const Dims<4>& output_dims) {
tflite::DequantizationParams op_params;
op_params.zero_point = zero_point;
op_params.scale = scale;
Dequantize(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
| 92,784,640,885,309,940,000,000,000,000,000,000,000 | 10 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
|
ptvcursor_add_ret_boolean(ptvcursor_t* ptvc, int hfindex, gint length, const guint encoding, gboolean *retval)
{
header_field_info *hfinfo;
field_info *new_fi;
gint item_length;
int offset;
guint64 value, bitval;
offset = ptvc->offset;
PROTO_REGISTRAR_GET_NTH(hfindex, hfinfo);
if (hfinfo->type != FT_BOOLEAN) {
REPORT_DISSECTOR_BUG("field %s is not of type FT_BOOLEAN",
hfinfo->abbrev);
}
/* length validation for native number encoding caught by get_uint64_value() */
/* length has to be -1 or > 0 regardless of encoding */
if (length < -1 || length == 0)
REPORT_DISSECTOR_BUG("Invalid length %d passed to ptvcursor_add_ret_boolean",
length);
if (encoding & ENC_STRING) {
REPORT_DISSECTOR_BUG("wrong encoding");
}
get_hfi_length(hfinfo, ptvc->tvb, offset, &length, &item_length, encoding);
test_length(hfinfo, ptvc->tvb, offset, item_length, encoding);
/* I believe it's ok if this is called with a NULL tree */
value = get_uint64_value(ptvc->tree, ptvc->tvb, offset, length, encoding);
if (retval) {
bitval = value;
if (hfinfo->bitmask) {
/* Mask out irrelevant portions */
bitval &= hfinfo->bitmask;
}
*retval = (bitval != 0);
}
ptvc->offset += get_full_length(hfinfo, ptvc->tvb, offset, length,
item_length, encoding);
CHECK_FOR_NULL_TREE(ptvc->tree);
TRY_TO_FAKE_THIS_ITEM(ptvc->tree, hfinfo->id, hfinfo);
new_fi = new_field_info(ptvc->tree, hfinfo, ptvc->tvb, offset, item_length);
return proto_tree_new_item(new_fi, ptvc->tree, ptvc->tvb,
offset, length, encoding);
}
| 0 |
[
"CWE-401"
] |
wireshark
|
a9fc769d7bb4b491efb61c699d57c9f35269d871
| 10,014,818,893,719,261,000,000,000,000,000,000,000 | 53 |
epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032.
|
PREFIX(scanRef)(const ENCODING *enc, const char *ptr, const char *end,
const char **nextTokPtr) {
REQUIRE_CHAR(enc, ptr, end);
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NMSTRT_CASES(enc, ptr, end, nextTokPtr)
case BT_NUM:
return PREFIX(scanCharRef)(enc, ptr + MINBPC(enc), end, nextTokPtr);
default:
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
while (HAS_CHAR(enc, ptr, end)) {
switch (BYTE_TYPE(enc, ptr)) {
CHECK_NAME_CASES(enc, ptr, end, nextTokPtr)
case BT_SEMI:
*nextTokPtr = ptr + MINBPC(enc);
return XML_TOK_ENTITY_REF;
default:
*nextTokPtr = ptr;
return XML_TOK_INVALID;
}
}
return XML_TOK_PARTIAL;
}
| 0 |
[
"CWE-116"
] |
libexpat
|
3f0a0cb644438d4d8e3294cd0b1245d0edb0c6c6
| 11,381,288,077,992,290,000,000,000,000,000,000,000 | 24 |
lib: Add missing validation of encoding (CVE-2022-25235)
|
const std::string& get() const { return string_; }
| 0 |
[] |
envoy
|
2c60632d41555ec8b3d9ef5246242be637a2db0f
| 53,687,360,009,698,100,000,000,000,000,000,000,000 | 1 |
http: header map security fixes for duplicate headers (#197)
Previously header matching did not match on all headers for
non-inline headers. This patch changes the default behavior to
always logically match on all headers. Multiple individual
headers will be logically concatenated with ',' similar to what
is done with inline headers. This makes the behavior effectively
consistent. This behavior can be temporary reverted by setting
the runtime value "envoy.reloadable_features.header_match_on_all_headers"
to "false".
Targeted fixes have been additionally performed on the following
extensions which make them consider all duplicate headers by default as
a comma concatenated list:
1) Any extension using CEL matching on headers.
2) The header to metadata filter.
3) The JWT filter.
4) The Lua filter.
Like primary header matching used in routing, RBAC, etc. this behavior
can be disabled by setting the runtime value
"envoy.reloadable_features.header_match_on_all_headers" to false.
Finally, the setCopy() header map API previously only set the first
header in the case of duplicate non-inline headers. setCopy() now
behaves similiarly to the other set*() APIs and replaces all found
headers with a single value. This may have had security implications
in the extauth filter which uses this API. This behavior can be disabled
by setting the runtime value
"envoy.reloadable_features.http_set_copy_replace_all_headers" to false.
Fixes https://github.com/envoyproxy/envoy-setec/issues/188
Signed-off-by: Matt Klein <[email protected]>
|
int mingw_bind(int sockfd, struct sockaddr *sa, size_t sz)
{
SOCKET s = (SOCKET)_get_osfhandle(sockfd);
return bind(s, sa, sz);
}
| 0 |
[
"CWE-20"
] |
git
|
6d8684161ee9c03bed5cb69ae76dfdddb85a0003
| 2,915,118,030,023,096,000,000,000,000,000,000,000 | 5 |
mingw: fix quoting of arguments
We need to be careful to follow proper quoting rules. For example, if an
argument contains spaces, we have to quote them. Double-quotes need to
be escaped. Backslashes need to be escaped, but only if they are
followed by a double-quote character.
We need to be _extra_ careful to consider the case where an argument
ends in a backslash _and_ needs to be quoted: in this case, we append a
double-quote character, i.e. the backslash now has to be escaped!
The current code, however, fails to recognize that, and therefore can
turn an argument that ends in a single backslash into a quoted argument
that now ends in an escaped double-quote character. This allows
subsequent command-line parameters to be split and part of them being
mistaken for command-line options, e.g. through a maliciously-crafted
submodule URL during a recursive clone.
Technically, we would not need to quote _all_ arguments which end in a
backslash _unless_ the argument needs to be quoted anyway. For example,
`test\` would not need to be quoted, while `test \` would need to be.
To keep the code simple, however, and therefore easier to reason about
and ensure its correctness, we now _always_ quote an argument that ends
in a backslash.
This addresses CVE-2019-1350.
Signed-off-by: Johannes Schindelin <[email protected]>
|
nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate, int flags)
{
__be32 status = nfserr_notsupp;
struct file *file;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&fallocate->falloc_stateid,
WR_STATE, &file, NULL);
if (status != nfs_ok) {
dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
return status;
}
status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
fallocate->falloc_offset,
fallocate->falloc_length,
flags);
fput(file);
return status;
}
| 0 |
[
"CWE-20",
"CWE-129"
] |
linux
|
b550a32e60a4941994b437a8d662432a486235a5
| 88,135,458,768,756,280,000,000,000,000,000,000,000 | 21 |
nfsd: fix undefined behavior in nfsd4_layout_verify
UBSAN: Undefined behaviour in fs/nfsd/nfs4proc.c:1262:34
shift exponent 128 is too large for 32-bit type 'int'
Depending on compiler+architecture, this may cause the check for
layout_type to succeed for overly large values (which seems to be the
case with amd64). The large value will be later used in de-referencing
nfsd4_layout_ops for function pointers.
Reported-by: Jani Tuovila <[email protected]>
Signed-off-by: Ari Kauppi <[email protected]>
[[email protected]: use LAYOUT_TYPE_MAX instead of 32]
Cc: [email protected]
Reviewed-by: Dan Carpenter <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: J. Bruce Fields <[email protected]>
|
static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
{
u8 i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i;
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
hdev->tm_info.tc_info[i].pgid = 0;
hdev->tm_info.tc_info[i].bw_limit =
hdev->tm_info.pg_info[0].bw_limit;
}
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
/* DCB is enabled if we have more than 1 TC or pfc_en is
* non-zero.
*/
if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
else
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
}
| 0 |
[
"CWE-125"
] |
linux
|
04f25edb48c441fc278ecc154c270f16966cbb90
| 148,407,545,077,561,100,000,000,000,000,000,000,000 | 24 |
net: hns3: add some error checking in hclge_tm module
When hdev->tx_sch_mode is HCLGE_FLAG_VNET_BASE_SCH_MODE, the
hclge_tm_schd_mode_vnet_base_cfg calls hclge_tm_pri_schd_mode_cfg
with vport->vport_id as pri_id, which is used as index for
hdev->tm_info.tc_info, it will cause out of bound access issue
if vport_id is equal to or larger than HNAE3_MAX_TC.
Also hardware only support maximum speed of HCLGE_ETHER_MAX_RATE.
So this patch adds two checks for above cases.
Fixes: 848440544b41 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver")
Signed-off-by: Yunsheng Lin <[email protected]>
Signed-off-by: Peng Li <[email protected]>
Signed-off-by: Huazhong Tan <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
TfLiteRegistration* Register_AVERAGE_POOL_2D() {
return Register_AVERAGE_POOL_GENERIC_OPT();
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
| 327,062,646,973,658,560,000,000,000,000,000,000,000 | 3 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
|
long long Long() const {
return chk(NumberLong)._numberLong();
}
| 0 |
[
"CWE-613"
] |
mongo
|
e55d6e2292e5dbe2f97153251d8193d1cc89f5d7
| 6,723,068,563,770,021,000,000,000,000,000,000,000 | 3 |
SERVER-38984 Validate unique User ID on UserCache hit
|
struct mb2_cache *mb2_cache_create(int bucket_bits)
{
struct mb2_cache *cache;
int bucket_count = 1 << bucket_bits;
int i;
if (!try_module_get(THIS_MODULE))
return NULL;
cache = kzalloc(sizeof(struct mb2_cache), GFP_KERNEL);
if (!cache)
goto err_out;
cache->c_bucket_bits = bucket_bits;
INIT_LIST_HEAD(&cache->c_lru_list);
spin_lock_init(&cache->c_lru_list_lock);
cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head),
GFP_KERNEL);
if (!cache->c_hash) {
kfree(cache);
goto err_out;
}
for (i = 0; i < bucket_count; i++)
INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
cache->c_shrink.count_objects = mb2_cache_count;
cache->c_shrink.scan_objects = mb2_cache_scan;
cache->c_shrink.seeks = DEFAULT_SEEKS;
register_shrinker(&cache->c_shrink);
return cache;
err_out:
module_put(THIS_MODULE);
return NULL;
}
| 0 |
[
"CWE-241",
"CWE-19"
] |
linux
|
f9a61eb4e2471c56a63cd804c7474128138c38ac
| 95,032,396,372,007,720,000,000,000,000,000,000,000 | 35 |
mbcache2: reimplement mbcache
Original mbcache was designed to have more features than what ext?
filesystems ended up using. It supported entry being in more hashes, it
had a home-grown rwlocking of each entry, and one cache could cache
entries from multiple filesystems. This genericity also resulted in more
complex locking, larger cache entries, and generally more code
complexity.
This is reimplementation of the mbcache functionality to exactly fit the
purpose ext? filesystems use it for. Cache entries are now considerably
smaller (7 instead of 13 longs), the code is considerably smaller as
well (414 vs 913 lines of code), and IMO also simpler. The new code is
also much more lightweight.
I have measured the speed using artificial xattr-bench benchmark, which
spawns P processes, each process sets xattr for F different files, and
the value of xattr is randomly chosen from a pool of V values. Averages
of runtimes for 5 runs for various combinations of parameters are below.
The first value in each cell is old mbache, the second value is the new
mbcache.
V=10
F\P 1 2 4 8 16 32 64
10 0.158,0.157 0.208,0.196 0.500,0.277 0.798,0.400 3.258,0.584 13.807,1.047 61.339,2.803
100 0.172,0.167 0.279,0.222 0.520,0.275 0.825,0.341 2.981,0.505 12.022,1.202 44.641,2.943
1000 0.185,0.174 0.297,0.239 0.445,0.283 0.767,0.340 2.329,0.480 6.342,1.198 16.440,3.888
V=100
F\P 1 2 4 8 16 32 64
10 0.162,0.153 0.200,0.186 0.362,0.257 0.671,0.496 1.433,0.943 3.801,1.345 7.938,2.501
100 0.153,0.160 0.221,0.199 0.404,0.264 0.945,0.379 1.556,0.485 3.761,1.156 7.901,2.484
1000 0.215,0.191 0.303,0.246 0.471,0.288 0.960,0.347 1.647,0.479 3.916,1.176 8.058,3.160
V=1000
F\P 1 2 4 8 16 32 64
10 0.151,0.129 0.210,0.163 0.326,0.245 0.685,0.521 1.284,0.859 3.087,2.251 6.451,4.801
100 0.154,0.153 0.211,0.191 0.276,0.282 0.687,0.506 1.202,0.877 3.259,1.954 8.738,2.887
1000 0.145,0.179 0.202,0.222 0.449,0.319 0.899,0.333 1.577,0.524 4.221,1.240 9.782,3.579
V=10000
F\P 1 2 4 8 16 32 64
10 0.161,0.154 0.198,0.190 0.296,0.256 0.662,0.480 1.192,0.818 2.989,2.200 6.362,4.746
100 0.176,0.174 0.236,0.203 0.326,0.255 0.696,0.511 1.183,0.855 4.205,3.444 19.510,17.760
1000 0.199,0.183 0.240,0.227 1.159,1.014 2.286,2.154 6.023,6.039 ---,10.933 ---,36.620
V=100000
F\P 1 2 4 8 16 32 64
10 0.171,0.162 0.204,0.198 0.285,0.230 0.692,0.500 1.225,0.881 2.990,2.243 6.379,4.771
100 0.151,0.171 0.220,0.210 0.295,0.255 0.720,0.518 1.226,0.844 3.423,2.831 19.234,17.544
1000 0.192,0.189 0.249,0.225 1.162,1.043 2.257,2.093 5.853,4.997 ---,10.399 ---,32.198
We see that the new code is faster in pretty much all the cases and
starting from 4 processes there are significant gains with the new code
resulting in upto 20-times shorter runtimes. Also for large numbers of
cached entries all values for the old code could not be measured as the
kernel started hitting softlockups and died before the test completed.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
|
static void igmp_gq_timer_expire(struct timer_list *t)
{
struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer);
in_dev->mr_gq_running = 0;
igmpv3_send_report(in_dev, NULL);
in_dev_put(in_dev);
}
| 0 |
[
"CWE-362"
] |
linux
|
23d2b94043ca8835bd1e67749020e839f396a1c2
| 23,739,814,874,427,203,000,000,000,000,000,000,000 | 8 |
igmp: Add ip_mc_list lock in ip_check_mc_rcu
I got below panic when doing fuzz test:
Kernel panic - not syncing: panic_on_warn set ...
CPU: 0 PID: 4056 Comm: syz-executor.3 Tainted: G B 5.14.0-rc1-00195-gcff5c4254439-dirty #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
Call Trace:
dump_stack_lvl+0x7a/0x9b
panic+0x2cd/0x5af
end_report.cold+0x5a/0x5a
kasan_report+0xec/0x110
ip_check_mc_rcu+0x556/0x5d0
__mkroute_output+0x895/0x1740
ip_route_output_key_hash_rcu+0x2d0/0x1050
ip_route_output_key_hash+0x182/0x2e0
ip_route_output_flow+0x28/0x130
udp_sendmsg+0x165d/0x2280
udpv6_sendmsg+0x121e/0x24f0
inet6_sendmsg+0xf7/0x140
sock_sendmsg+0xe9/0x180
____sys_sendmsg+0x2b8/0x7a0
___sys_sendmsg+0xf0/0x160
__sys_sendmmsg+0x17e/0x3c0
__x64_sys_sendmmsg+0x9e/0x100
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x462eb9
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8
48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48>
3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f3df5af1c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462eb9
RDX: 0000000000000312 RSI: 0000000020001700 RDI: 0000000000000007
RBP: 0000000000000004 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007f3df5af26bc
R13: 00000000004c372d R14: 0000000000700b10 R15: 00000000ffffffff
It is one use-after-free in ip_check_mc_rcu.
In ip_mc_del_src, the ip_sf_list of pmc has been freed under pmc->lock protection.
But access to ip_sf_list in ip_check_mc_rcu is not protected by the lock.
Signed-off-by: Liu Jian <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static inline unsigned tx_desc_get_used(uint32_t *desc)
{
return (desc[1] & DESC_1_USED) ? 1 : 0;
}
| 0 |
[
"CWE-835"
] |
qemu
|
e73adfbeec9d4e008630c814759052ed945c3fed
| 30,293,680,944,227,444,000,000,000,000,000,000,000 | 4 |
cadence_gem: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Alexander Bulekov <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
|
int ssl3_send_certificate_request(SSL *s)
{
unsigned char *p,*d;
int i,j,nl,off,n;
STACK_OF(X509_NAME) *sk=NULL;
X509_NAME *name;
BUF_MEM *buf;
if (s->state == SSL3_ST_SW_CERT_REQ_A)
{
buf=s->init_buf;
d=p=(unsigned char *)&(buf->data[4]);
/* get the list of acceptable cert types */
p++;
n=ssl3_get_req_cert_type(s,p);
d[0]=n;
p+=n;
n++;
if (TLS1_get_version(s) >= TLS1_2_VERSION)
{
nl = tls12_get_req_sig_algs(s, p + 2);
s2n(nl, p);
p += nl + 2;
n += nl + 2;
}
off=n;
p+=2;
n+=2;
sk=SSL_get_client_CA_list(s);
nl=0;
if (sk != NULL)
{
for (i=0; i<sk_X509_NAME_num(sk); i++)
{
name=sk_X509_NAME_value(sk,i);
j=i2d_X509_NAME(name,NULL);
if (!BUF_MEM_grow_clean(buf,4+n+j+2))
{
SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB);
goto err;
}
p=(unsigned char *)&(buf->data[4+n]);
if (!(s->options & SSL_OP_NETSCAPE_CA_DN_BUG))
{
s2n(j,p);
i2d_X509_NAME(name,&p);
n+=2+j;
nl+=2+j;
}
else
{
d=p;
i2d_X509_NAME(name,&p);
j-=2; s2n(j,d); j+=2;
n+=j;
nl+=j;
}
}
}
/* else no CA names */
p=(unsigned char *)&(buf->data[4+off]);
s2n(nl,p);
d=(unsigned char *)buf->data;
*(d++)=SSL3_MT_CERTIFICATE_REQUEST;
l2n3(n,d);
/* we should now have things packed up, so lets send
* it off */
s->init_num=n+4;
s->init_off=0;
#ifdef NETSCAPE_HANG_BUG
if (!BUF_MEM_grow_clean(buf, s->init_num + 4))
{
SSLerr(SSL_F_SSL3_SEND_CERTIFICATE_REQUEST,ERR_R_BUF_LIB);
goto err;
}
p=(unsigned char *)s->init_buf->data + s->init_num;
/* do the header */
*(p++)=SSL3_MT_SERVER_DONE;
*(p++)=0;
*(p++)=0;
*(p++)=0;
s->init_num += 4;
#endif
s->state = SSL3_ST_SW_CERT_REQ_B;
}
/* SSL3_ST_SW_CERT_REQ_B */
return(ssl3_do_write(s,SSL3_RT_HANDSHAKE));
err:
return(-1);
}
| 0 |
[
"CWE-326",
"CWE-310"
] |
openssl
|
bc8923b1ec9c467755cd86f7848c50ee8812e441
| 170,051,769,753,434,070,000,000,000,000,000,000,000 | 101 |
Fix for CVE-2014-0224
Only accept change cipher spec when it is expected instead of at any
time. This prevents premature setting of session keys before the master
secret is determined which an attacker could use as a MITM attack.
Thanks to KIKUCHI Masashi (Lepidum Co. Ltd.) for reporting this issue
and providing the initial fix this patch is based on.
|
static int key_notify_policy_flush(const struct km_event *c)
{
struct sk_buff *skb_out;
struct sadb_msg *hdr;
skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb_out)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
hdr->sadb_msg_type = SADB_X_SPDFLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
| 0 |
[
"CWE-20",
"CWE-269"
] |
linux
|
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
| 519,451,450,030,077,700,000,000,000,000,000,000 | 21 |
net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <[email protected]>
Suggested-by: Eric Dumazet <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static struct client *imap_client_alloc(pool_t pool)
{
struct imap_client *imap_client;
imap_client = p_new(pool, struct imap_client, 1);
return &imap_client->common;
}
| 0 |
[] |
core
|
62061e8cf68f506c0ccaaba21fd4174764ca875f
| 95,133,220,443,820,600,000,000,000,000,000,000,000 | 7 |
imap-login: Split off client_invalid_command()
|
Item_string(THD *thd, CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE):
Item_literal(thd)
{
collation.set(cs, dv);
max_length= 0;
set_name(thd, NULL, 0, system_charset_info);
decimals= NOT_FIXED_DEC;
}
| 0 |
[
"CWE-617"
] |
server
|
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
| 160,017,405,220,895,620,000,000,000,000,000,000,000 | 8 |
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order...
When doing condition pushdown from HAVING into WHERE,
Item_equal::create_pushable_equalities() calls
item->set_extraction_flag(IMMUTABLE_FL) for constant items.
Then, Item::cleanup_excluding_immutables_processor() checks for this flag
to see if it should call item->cleanup() or leave the item as-is.
The failure happens when a constant item has a non-constant one inside it,
like:
(tbl.col=0 AND impossible_cond)
item->walk(cleanup_excluding_immutables_processor) works in a bottom-up
way so it
1. will call Item_func_eq(tbl.col=0)->cleanup()
2. will not call Item_cond_and->cleanup (as the AND is constant)
This creates an item tree where a fixed Item has an un-fixed Item inside
it which eventually causes an assertion failure.
Fixed by introducing this rule: instead of just calling
item->set_extraction_flag(IMMUTABLE_FL);
we call Item::walk() to set the flag for all sub-items of the item.
|
static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct sock_iocb siocb, *x;
if (pos != 0)
return -ESPIPE;
x = alloc_sock_iocb(iocb, &siocb);
if (!x)
return -ENOMEM;
return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
}
| 0 |
[] |
linux-2.6
|
644595f89620ba8446cc555be336d24a34464950
| 150,021,628,161,995,200,000,000,000,000,000,000,000 | 14 |
compat: Handle COMPAT_USE_64BIT_TIME in net/socket.c
Use helper functions aware of COMPAT_USE_64BIT_TIME to write struct
timeval and struct timespec to userspace in net/socket.c.
Signed-off-by: H. Peter Anvin <[email protected]>
|
static void tg3_restore_pci_state(struct tg3 *tp)
{
u32 val;
/* Re-enable indirect register accesses. */
pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl);
/* Set MAX PCI retry to zero. */
val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE))
val |= PCISTATE_RETRY_SAME_DMA;
/* Allow reads and writes to the APE register and memory space. */
if (tg3_flag(tp, ENABLE_APE))
val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
PCISTATE_ALLOW_APE_SHMEM_WR |
PCISTATE_ALLOW_APE_PSPACE_WR;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
if (!tg3_flag(tp, PCI_EXPRESS)) {
pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
tp->pci_cacheline_sz);
pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
tp->pci_lat_timer);
}
/* Make sure PCI-X relaxed ordering bit is clear. */
if (tg3_flag(tp, PCIX_MODE)) {
u16 pcix_cmd;
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
&pcix_cmd);
pcix_cmd &= ~PCI_X_CMD_ERO;
pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
pcix_cmd);
}
if (tg3_flag(tp, 5780_CLASS)) {
/* Chip reset on 5780 will reset MSI enable bit,
* so need to restore it.
*/
if (tg3_flag(tp, USING_MSI)) {
u16 ctrl;
pci_read_config_word(tp->pdev,
tp->msi_cap + PCI_MSI_FLAGS,
&ctrl);
pci_write_config_word(tp->pdev,
tp->msi_cap + PCI_MSI_FLAGS,
ctrl | PCI_MSI_FLAGS_ENABLE);
val = tr32(MSGINT_MODE);
tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
}
}
}
| 0 |
[
"CWE-476",
"CWE-119"
] |
linux
|
715230a44310a8cf66fbfb5a46f9a62a9b2de424
| 264,732,498,620,854,940,000,000,000,000,000,000,000 | 59 |
tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
GF_Err trex_Read(GF_Box *s, GF_BitStream *bs)
{
GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s;
ptr->trackID = gf_bs_read_u32(bs);
ptr->def_sample_desc_index = gf_bs_read_u32(bs);
ptr->def_sample_duration = gf_bs_read_u32(bs);
ptr->def_sample_size = gf_bs_read_u32(bs);
ptr->def_sample_flags = gf_bs_read_u32(bs);
return GF_OK;
| 0 |
[
"CWE-400",
"CWE-401"
] |
gpac
|
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
| 241,158,491,490,086,170,000,000,000,000,000,000,000 | 11 |
prevent dref memleak on invalid input (#1183)
|
static bool vrend_compile_shader(struct vrend_sub_context *sub_ctx,
struct vrend_shader *shader)
{
GLint param;
const char *shader_parts[SHADER_MAX_STRINGS];
for (int i = 0; i < shader->glsl_strings.num_strings; i++)
shader_parts[i] = shader->glsl_strings.strings[i].buf;
shader->id = glCreateShader(conv_shader_type(shader->sel->type));
glShaderSource(shader->id, shader->glsl_strings.num_strings, shader_parts, NULL);
glCompileShader(shader->id);
glGetShaderiv(shader->id, GL_COMPILE_STATUS, ¶m);
if (param == GL_FALSE) {
char infolog[65536];
int len;
glGetShaderInfoLog(shader->id, 65536, &len, infolog);
vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
vrend_printf("shader failed to compile\n%s\n", infolog);
vrend_shader_dump(shader);
return false;
}
shader->is_compiled = true;
return true;
}
| 0 |
[
"CWE-787"
] |
virglrenderer
|
95e581fd181b213c2ed7cdc63f2abc03eaaa77ec
| 332,597,532,393,035,450,000,000,000,000,000,000,000 | 25 |
vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]>
|
tparm_trace_call(const char *string, TPARM_DATA * data)
{
if (USE_TRACEF(TRACE_CALLS)) {
int i;
for (i = 0; i < data->num_actual; i++) {
if (data->p_is_s[i] != 0) {
save_text(", %s", _nc_visbuf(data->p_is_s[i]), 0);
} else if ((long) data->param[i] > MAX_OF_TYPE(NCURSES_INT2) ||
(long) data->param[i] < 0) {
_tracef("BUG: problem with tparm parameter #%d of %d",
i + 1, data->num_actual);
break;
} else {
save_number(", %d", (int) data->param[i], 0);
}
}
_tracef(T_CALLED("%s(%s%s)"), TPS(tname), _nc_visbuf(string), TPS(out_buff));
TPS(out_used) = 0;
_nc_unlock_global(tracef);
}
}
| 0 |
[] |
ncurses
|
790a85dbd4a81d5f5d8dd02a44d84f01512ef443
| 63,383,396,275,279,110,000,000,000,000,000,000,000 | 21 |
ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor").
|
static GF_Err ctrn_box_read(GF_Box *s, GF_BitStream *bs)
{
u32 i, count, flags, first_idx=0;
Bool inherit_dur, inherit_size, inherit_flags, inherit_ctso;
GF_TrunEntry *ent;
GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s;
flags = ptr->flags;
ptr->ctrn_flags = flags;
ptr->flags = 0;
ptr->sample_count = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 2);
if (flags & GF_ISOM_TRUN_DATA_OFFSET) {
if (flags & GF_ISOM_CTRN_DATAOFFSET_16) {
ptr->data_offset = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 2);
} else {
ptr->data_offset = gf_bs_read_u32(bs);
ISOM_DECREASE_SIZE(ptr, 4);
}
ptr->flags |= GF_ISOM_TRUN_DATA_OFFSET;
}
if (flags & GF_ISOM_CTRN_CTSO_MULTIPLIER) {
ptr->ctso_multiplier = gf_bs_read_u16(bs);
ISOM_DECREASE_SIZE(ptr, 2);
}
/*no sample dur/sample_flag/size/ctso for first or following, create a pack sample */
if (! (flags & 0x00FFFF00)) {
GF_SAFEALLOC(ent, GF_TrunEntry);
if (!ent) return GF_OUT_OF_MEM;
ent->nb_pack = ptr->sample_count;
gf_list_add(ptr->entries, ent);
return GF_OK;
}
/*allocate all entries*/
for (i=0; i<ptr->sample_count; i++) {
GF_SAFEALLOC(ent, GF_TrunEntry);
if (!ent) return GF_OUT_OF_MEM;
gf_list_add(ptr->entries, ent);
}
//unpack flags
ptr->ctrn_first_dur = (flags>>22) & 0x3;
ptr->ctrn_first_size = (flags>>20) & 0x3;
ptr->ctrn_first_sample_flags = (flags>>18) & 0x3;
ptr->ctrn_first_ctts = (flags>>16) & 0x3;
ptr->ctrn_dur = (flags>>14) & 0x3;
ptr->ctrn_size = (flags>>12) & 0x3;
ptr->ctrn_sample_flags = (flags>>10) & 0x3;
ptr->ctrn_ctts = (flags>>8) & 0x3;
inherit_dur = flags & GF_ISOM_CTRN_INHERIT_DUR;
inherit_size = flags & GF_ISOM_CTRN_INHERIT_SIZE;
inherit_flags = flags & GF_ISOM_CTRN_INHERIT_FLAGS;
inherit_ctso = flags & GF_ISOM_CTRN_INHERIT_CTSO;
if (flags & GF_ISOM_CTRN_FIRST_SAMPLE) {
ent = gf_list_get(ptr->entries, 0);
first_idx = 1;
if (!inherit_dur && ptr->ctrn_first_dur) {
ent->Duration = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_dur) );
ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_dur) );
}
if (!inherit_size && ptr->ctrn_first_size) {
ent->size = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_size) );
ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_size) );
}
if (!inherit_flags && ptr->ctrn_first_sample_flags) {
ent->flags = ctrn_read_flags(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_sample_flags) );
ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_sample_flags) );
}
if (!inherit_ctso && ptr->ctrn_first_ctts) {
ent->CTS_Offset = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_ctts) );
ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_ctts) );
if (ptr->ctso_multiplier)
ent->CTS_Offset *= (s32) ptr->ctso_multiplier;
}
}
count = ptr->sample_count - first_idx;
if (!inherit_dur && ptr->ctrn_dur) {
u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_dur);
ISOM_DECREASE_SIZE(ptr, count * nbbits / 8);
for (i=first_idx; i<ptr->sample_count; i++) {
ent = gf_list_get(ptr->entries, i);
ent->Duration = gf_bs_read_int(bs, nbbits);
}
}
if (!inherit_size && ptr->ctrn_size) {
u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_size);
ISOM_DECREASE_SIZE(ptr, count * nbbits / 8);
for (i=first_idx; i<ptr->sample_count; i++) {
ent = gf_list_get(ptr->entries, i);
ent->size = gf_bs_read_int(bs, nbbits);
}
}
if (!inherit_flags && ptr->ctrn_sample_flags) {
u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_sample_flags);
ISOM_DECREASE_SIZE(ptr, count * nbbits / 8);
for (i=first_idx; i<ptr->sample_count; i++) {
ent = gf_list_get(ptr->entries, i);
ent->flags = ctrn_read_flags(bs, nbbits);
}
}
if (!inherit_ctso && ptr->ctrn_ctts) {
u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_ctts);
ISOM_DECREASE_SIZE(ptr, count * nbbits / 8);
for (i=first_idx; i<ptr->sample_count; i++) {
ent = gf_list_get(ptr->entries, i);
ent->CTS_Offset = gf_bs_read_int(bs, nbbits);
if (ptr->ctso_multiplier)
ent->CTS_Offset *= (s32) ptr->ctso_multiplier;
}
}
return GF_OK;
| 0 |
[
"CWE-787"
] |
gpac
|
388ecce75d05e11fc8496aa4857b91245007d26e
| 274,973,344,597,961,960,000,000,000,000,000,000,000 | 116 |
fixed #1587
|
int meth_get_head(struct transaction_t *txn, void *params)
{
struct meth_params *gparams = (struct meth_params *) params;
const char **hdr;
struct mime_type_t *mime = NULL;
int ret = 0, r = 0, precond, rights;
const char *data = NULL;
unsigned long datalen = 0, offset = 0;
struct buf msg_buf = BUF_INITIALIZER;
struct resp_body_t *resp_body = &txn->resp_body;
struct mailbox *mailbox = NULL;
struct dav_data *ddata;
struct index_record record;
const char *etag = NULL;
time_t lastmod = 0;
void *davdb = NULL, *obj = NULL;
char *freeme = NULL;
/* Parse the path */
r = dav_parse_req_target(txn, gparams);
if (r) return r;
if (txn->req_tgt.namespace->id == URL_NS_PRINCIPAL) {
/* Special "principal" */
if (txn->req_tgt.flags == TGT_SERVER_INFO) return get_server_info(txn);
/* No content for principals (yet) */
return HTTP_NO_CONTENT;
}
if (!txn->req_tgt.resource) {
/* Do any collection processing */
if (gparams->get) return gparams->get(txn, NULL, NULL, NULL, NULL);
/* We don't handle GET on a collection */
return HTTP_NO_CONTENT;
}
/* Check ACL for current user */
rights = httpd_myrights(httpd_authstate, txn->req_tgt.mbentry);
if ((rights & DACL_READ) != DACL_READ) {
/* DAV:need-privileges */
txn->error.precond = DAV_NEED_PRIVS;
txn->error.resource = txn->req_tgt.path;
txn->error.rights = DACL_READ;
return HTTP_NO_PRIVS;
}
if (gparams->mime_types) {
/* Check requested MIME type:
1st entry in gparams->mime_types array MUST be default MIME type */
if ((hdr = spool_getheader(txn->req_hdrs, "Accept")))
mime = get_accept_type(hdr, gparams->mime_types);
else mime = gparams->mime_types;
if (!mime) return HTTP_NOT_ACCEPTABLE;
}
if (txn->req_tgt.mbentry->server) {
/* Remote mailbox */
struct backend *be;
be = proxy_findserver(txn->req_tgt.mbentry->server,
&http_protocol, httpd_userid,
&backend_cached, NULL, NULL, httpd_in);
if (!be) return HTTP_UNAVAILABLE;
return http_pipe_req_resp(be, txn);
}
/* Local Mailbox */
/* Open mailbox for reading */
r = mailbox_open_irl(txn->req_tgt.mbentry->name, &mailbox);
if (r) {
syslog(LOG_ERR, "http_mailbox_open(%s) failed: %s",
txn->req_tgt.mbentry->name, error_message(r));
goto done;
}
/* Open the DAV DB corresponding to the mailbox */
davdb = gparams->davdb.open_db(mailbox);
/* Find message UID for the resource */
gparams->davdb.lookup_resource(davdb, txn->req_tgt.mbentry->name,
txn->req_tgt.resource, (void **) &ddata, 0);
if (!ddata->rowid) {
ret = HTTP_NOT_FOUND;
goto done;
}
/* Fetch resource validators */
r = gparams->get_validators(mailbox, (void *) ddata, httpd_userid,
&record, &etag, &lastmod);
if (r) {
txn->error.desc = error_message(r);
ret = HTTP_SERVER_ERROR;
goto done;
}
txn->flags.ranges = (ddata->imap_uid != 0);
/* Check any preconditions, including range request */
precond = gparams->check_precond(txn, params, mailbox,
(void *) ddata, etag, lastmod);
switch (precond) {
case HTTP_OK:
case HTTP_PARTIAL:
case HTTP_NOT_MODIFIED:
/* Fill in ETag, Last-Modified, Expires, and Cache-Control */
resp_body->etag = etag;
resp_body->lastmod = lastmod;
resp_body->maxage = 3600; /* 1 hr */
txn->flags.cc |= CC_MAXAGE | CC_REVALIDATE; /* don't use stale data */
if (httpd_userid) txn->flags.cc |= CC_PRIVATE;
if (precond != HTTP_NOT_MODIFIED && record.uid) break;
GCC_FALLTHROUGH
default:
/* We failed a precondition - don't perform the request */
ret = precond;
goto done;
}
/* Do any special processing */
if (gparams->get) {
ret = gparams->get(txn, mailbox, &record, ddata, &obj);
if (ret != HTTP_CONTINUE) goto done;
ret = 0;
}
if (mime && !resp_body->type) {
txn->flags.vary |= VARY_ACCEPT;
resp_body->type = mime->content_type;
}
if (!obj) {
/* Raw resource - length doesn't include RFC 5322 header */
offset = record.header_size;
datalen = record.size - offset;
if (txn->meth == METH_GET) {
/* Load message containing the resource */
r = mailbox_map_record(mailbox, &record, &msg_buf);
if (r) goto done;
data = buf_base(&msg_buf) + offset;
if (mime != gparams->mime_types) {
/* Not the storage format - create resource object */
struct buf inbuf;
buf_init_ro(&inbuf, data, datalen);
obj = gparams->mime_types[0].to_object(&inbuf);
buf_free(&inbuf);
}
}
}
if (obj) {
/* Convert object into requested MIME type */
struct buf *outbuf = mime->from_object(obj);
datalen = buf_len(outbuf);
if (txn->meth == METH_GET) data = freeme = buf_release(outbuf);
buf_destroy(outbuf);
if (gparams->mime_types[0].free) gparams->mime_types[0].free(obj);
}
write_body(precond, txn, data, datalen);
buf_free(&msg_buf);
free(freeme);
done:
if (davdb) gparams->davdb.close_db(davdb);
if (r) {
txn->error.desc = error_message(r);
ret = HTTP_SERVER_ERROR;
}
mailbox_close(&mailbox);
return ret;
}
| 0 |
[] |
cyrus-imapd
|
6703ff881b6056e0c045a7b795ce8ba1bbb87027
| 329,481,916,417,848,940,000,000,000,000,000,000,000 | 188 |
http_dav.c: add 'private' Cache-Control directive for cacheable responses that require authentication
|
void show_ip(struct pt_regs *regs, const char *loglvl)
{
#ifdef CONFIG_X86_32
printk("%sEIP: %pS\n", loglvl, (void *)regs->ip);
#else
printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
#endif
show_opcodes((u8 *)regs->ip, loglvl);
}
| 1 |
[
"CWE-20"
] |
linux
|
342db04ae71273322f0011384a9ed414df8bdae4
| 172,255,949,513,976,020,000,000,000,000,000,000,000 | 9 |
x86/dumpstack: Don't dump kernel memory based on usermode RIP
show_opcodes() is used both for dumping kernel instructions and for dumping
user instructions. If userspace causes #PF by jumping to a kernel address,
show_opcodes() can be reached with regs->ip controlled by the user,
pointing to kernel code. Make sure that userspace can't trick us into
dumping kernel memory into dmesg.
Fixes: 7cccf0725cf7 ("x86/dumpstack: Add a show_ip() function")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected]
|
static void __h2_deinit(void)
{
pool_destroy(pool_head_h2s);
pool_destroy(pool_head_h2c);
}
| 0 |
[
"CWE-119"
] |
haproxy
|
3f0e1ec70173593f4c2b3681b26c04a4ed5fc588
| 186,921,081,738,890,050,000,000,000,000,000,000,000 | 5 |
BUG/CRITICAL: h2: fix incorrect frame length check
The incoming H2 frame length was checked against the max_frame_size
setting instead of being checked against the bufsize. The max_frame_size
only applies to outgoing traffic and not to incoming one, so if a large
enough frame size is advertised in the SETTINGS frame, a wrapped frame
will be defragmented into a temporary allocated buffer where the second
fragment my overflow the heap by up to 16 kB.
It is very unlikely that this can be exploited for code execution given
that buffers are very short lived and their address not realistically
predictable in production, but the likeliness of an immediate crash is
absolutely certain.
This fix must be backported to 1.8.
Many thanks to Jordan Zebor from F5 Networks for reporting this issue
in a responsible way.
|
static int php_snmp_write_exceptions_enabled(php_snmp_object *snmp_object, zval *newval)
{
zval ztmp;
int ret = SUCCESS;
if (Z_TYPE_P(newval) != IS_LONG) {
ZVAL_COPY(&ztmp, newval);
convert_to_long(&ztmp);
newval = &ztmp;
}
snmp_object->exceptions_enabled = Z_LVAL_P(newval);
if (newval == &ztmp) {
zval_ptr_dtor(newval);
}
return ret;
}
| 0 |
[
"CWE-20"
] |
php-src
|
6e25966544fb1d2f3d7596e060ce9c9269bbdcf8
| 219,016,019,185,354,200,000,000,000,000,000,000,000 | 17 |
Fixed bug #71704 php_snmp_error() Format String Vulnerability
|
inline bool Wasm::copyToPointerSize(const Buffer::Instance& buffer, uint64_t start, uint64_t length,
uint64_t ptr_ptr, uint64_t size_ptr) {
uint64_t size = buffer.length();
if (size < start + length) {
return false;
}
auto nslices = buffer.getRawSlices(nullptr, 0);
auto slices = std::make_unique<Buffer::RawSlice[]>(nslices + 10 /* pad for evbuffer overrun */);
auto actual_slices = buffer.getRawSlices(&slices[0], nslices);
uint64_t pointer = 0;
char* p = static_cast<char*>(allocMemory(length, &pointer));
auto s = start;
auto l = length;
if (!p) {
return false;
}
for (uint64_t i = 0; i < actual_slices; i++) {
if (slices[i].len_ <= s) {
s -= slices[i].len_;
continue;
}
auto ll = l;
if (ll > s + slices[i].len_)
ll = s + slices[i].len_;
memcpy(p, static_cast<char*>(slices[i].mem_) + s, ll);
l -= ll;
if (l <= 0) {
break;
}
s = 0;
p += ll;
}
if (!wasm_vm_->setWord(ptr_ptr, Word(pointer))) {
return false;
}
if (!wasm_vm_->setWord(size_ptr, Word(length))) {
return false;
}
return true;
}
| 0 |
[
"CWE-476"
] |
envoy
|
8788a3cf255b647fd14e6b5e2585abaaedb28153
| 66,412,574,810,231,805,000,000,000,000,000,000,000 | 40 |
1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]>
|
static void rtl8139_Config4_write(RTL8139State *s, uint32_t val)
{
val &= 0xff;
DPRINTF("Config4 write val=0x%02x\n", val);
if (!rtl8139_config_writable(s)) {
return;
}
/* mask unwritable bits */
val = SET_MASKED(val, 0x0a, s->Config4);
s->Config4 = val;
}
| 0 |
[
"CWE-835"
] |
qemu
|
5311fb805a4403bba024e83886fa0e7572265de4
| 30,157,420,120,158,804,000,000,000,000,000,000,000 | 15 |
rtl8139: switch to use qemu_receive_packet() for loopback
This patch switches to use qemu_receive_packet() which can detect
reentrancy and return early.
This is intended to address CVE-2021-3416.
Cc: Prasad J Pandit <[email protected]>
Cc: [email protected]
Buglink: https://bugs.launchpad.net/qemu/+bug/1910826
Reviewed-by: Philippe Mathieu-Daudé <[email protected]
Signed-off-by: Alexander Bulekov <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
|
static int h264_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
H264Context *h = avctx->priv_data;
AVFrame *pict = data;
int buf_index = 0;
int ret;
h->flags = avctx->flags;
/* end of stream, output what is still in the buffers */
out:
if (buf_size == 0) {
Picture *out;
int i, out_idx;
h->cur_pic_ptr = NULL;
// FIXME factorize this with the output code below
out = h->delayed_pic[0];
out_idx = 0;
for (i = 1;
h->delayed_pic[i] &&
!h->delayed_pic[i]->f.key_frame &&
!h->delayed_pic[i]->mmco_reset;
i++)
if (h->delayed_pic[i]->poc < out->poc) {
out = h->delayed_pic[i];
out_idx = i;
}
for (i = out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i + 1];
if (out) {
ret = output_frame(h, pict, &out->f);
if (ret < 0)
return ret;
*got_frame = 1;
}
return buf_index;
}
buf_index = decode_nal_units(h, buf, buf_size, 0);
if (buf_index < 0)
return AVERROR_INVALIDDATA;
if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
buf_size = 0;
goto out;
}
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
if (avctx->skip_frame >= AVDISCARD_NONREF)
return 0;
av_log(avctx, AV_LOG_ERROR, "no frame!\n");
return AVERROR_INVALIDDATA;
}
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
(h->mb_y >= h->mb_height && h->mb_height)) {
if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
decode_postinit(h, 1);
field_end(h, 0);
*got_frame = 0;
if (h->next_output_pic && ((avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT) ||
h->next_output_pic->recovered)) {
if (!h->next_output_pic->recovered)
h->next_output_pic->f.flags |= AV_FRAME_FLAG_CORRUPT;
ret = output_frame(h, pict, &h->next_output_pic->f);
if (ret < 0)
return ret;
*got_frame = 1;
}
}
assert(pict->buf[0] || !*got_frame);
return get_consumed_bytes(buf_index, buf_size);
}
| 1 |
[
"CWE-787"
] |
FFmpeg
|
1f097d168d9cad473dd44010a337c1413a9cd198
| 112,262,741,681,516,590,000,000,000,000,000,000,000 | 86 |
h264: reset data partitioning at the beginning of each decode call
Prevents using GetBitContexts with data from previous calls.
Fixes access to freed memory.
Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
CC:[email protected]
|
void arch_pick_mmap_layout(struct mm_struct *mm)
{
/*
* Fall back to the standard layout if the personality bit is set, or
* if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
| 0 |
[] |
linux
|
d6c763afab142a85e4770b4bc2a5f40f256d5c5d
| 328,300,029,749,489,770,000,000,000,000,000,000,000 | 14 |
arm64/mm: Remove hack in mmap randomize layout
Since commit 8a0a9bd4db63 ('random: make get_random_int() more
random'), get_random_int() returns a random value for each call,
so comment and hack introduced in mmap_rnd() as part of commit
1d18c47c735e ('arm64: MMU fault handling and page table management')
are incorrects.
Commit 1d18c47c735e seems to use the same hack introduced by
commit a5adc91a4b44 ('powerpc: Ensure random space between stack
and mmaps'), latter copied in commit 5a0efea09f42 ('sparc64: Sharpen
address space randomization calculations.').
But both architectures were cleaned up as part of commit
fa8cbaaf5a68 ('powerpc+sparc64/mm: Remove hack in mmap randomize
layout') as hack is no more needed since commit 8a0a9bd4db63.
So the present patch removes the comment and the hack around
get_random_int() on AArch64's mmap_rnd().
Cc: David S. Miller <[email protected]>
Cc: Anton Blanchard <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Acked-by: Will Deacon <[email protected]>
Acked-by: Dan McGee <[email protected]>
Signed-off-by: Yann Droneaud <[email protected]>
Signed-off-by: Will Deacon <[email protected]>
|
xmlSchemaClearAttrInfos(xmlSchemaValidCtxtPtr vctxt)
{
int i;
xmlSchemaAttrInfoPtr attr;
if (vctxt->nbAttrInfos == 0)
return;
for (i = 0; i < vctxt->nbAttrInfos; i++) {
attr = vctxt->attrInfos[i];
if (attr->flags & XML_SCHEMA_NODE_INFO_FLAG_OWNED_NAMES) {
if (attr->localName != NULL)
xmlFree((xmlChar *) attr->localName);
if (attr->nsName != NULL)
xmlFree((xmlChar *) attr->nsName);
}
if (attr->flags & XML_SCHEMA_NODE_INFO_FLAG_OWNED_VALUES) {
if (attr->value != NULL)
xmlFree((xmlChar *) attr->value);
}
if (attr->val != NULL) {
xmlSchemaFreeValue(attr->val);
attr->val = NULL;
}
memset(attr, 0, sizeof(xmlSchemaAttrInfo));
}
vctxt->nbAttrInfos = 0;
}
| 0 |
[
"CWE-134"
] |
libxml2
|
4472c3a5a5b516aaf59b89be602fbce52756c3e9
| 164,832,878,077,595,340,000,000,000,000,000,000,000 | 27 |
Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports.
|
static void dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *dwc = pci_get_drvdata(pci);
struct pci_dev *pdev = dwc->pci;
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT)
gpiod_remove_lookup_table(&platform_bytcr_gpios);
#ifdef CONFIG_PM
cancel_work_sync(&dwc->wakeup_work);
#endif
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
platform_device_unregister(dwc->dwc3);
}
| 0 |
[
"CWE-401"
] |
linux
|
9bbfceea12a8f145097a27d7c7267af25893c060
| 157,544,391,505,276,520,000,000,000,000,000,000,000 | 14 |
usb: dwc3: pci: prevent memory leak in dwc3_pci_probe
In dwc3_pci_probe a call to platform_device_alloc allocates a device
which is correctly put in case of error except one case: when the call to
platform_device_add_properties fails it directly returns instead of
going to error handling. This commit replaces return with the goto.
Fixes: 1a7b12f69a94 ("usb: dwc3: pci: Supply device properties via driver data")
Signed-off-by: Navid Emamdoost <[email protected]>
Signed-off-by: Felipe Balbi <[email protected]>
|
clientInterpretRequestHeaders(ClientHttpRequest * http)
{
HttpRequest *request = http->request;
HttpHeader *req_hdr = &request->header;
bool no_cache = false;
request->imslen = -1;
request->ims = req_hdr->getTime(Http::HdrType::IF_MODIFIED_SINCE);
if (request->ims > 0)
request->flags.ims = true;
if (!request->flags.ignoreCc) {
if (request->cache_control) {
if (request->cache_control->hasNoCache())
no_cache=true;
// RFC 2616: treat Pragma:no-cache as if it was Cache-Control:no-cache when Cache-Control is missing
} else if (req_hdr->has(Http::HdrType::PRAGMA))
no_cache = req_hdr->hasListMember(Http::HdrType::PRAGMA,"no-cache",',');
}
if (request->method == Http::METHOD_OTHER) {
no_cache=true;
}
if (no_cache) {
#if USE_HTTP_VIOLATIONS
if (Config.onoff.reload_into_ims)
request->flags.nocacheHack = true;
else if (refresh_nocache_hack)
request->flags.nocacheHack = true;
else
#endif
request->flags.noCache = true;
}
/* ignore range header in non-GETs or non-HEADs */
if (request->method == Http::METHOD_GET || request->method == Http::METHOD_HEAD) {
// XXX: initialize if we got here without HttpRequest::parseHeader()
if (!request->range)
request->range = req_hdr->getRange();
if (request->range) {
request->flags.isRanged = true;
clientStreamNode *node = (clientStreamNode *)http->client_stream.tail->data;
/* XXX: This is suboptimal. We should give the stream the range set,
* and thereby let the top of the stream set the offset when the
* size becomes known. As it is, we will end up requesting from 0
* for evey -X range specification.
* RBC - this may be somewhat wrong. We should probably set the range
* iter up at this point.
*/
node->readBuffer.offset = request->range->lowestOffset(0);
http->range_iter.pos = request->range->begin();
http->range_iter.end = request->range->end();
http->range_iter.valid = true;
}
}
/* Only HEAD and GET requests permit a Range or Request-Range header.
* If these headers appear on any other type of request, delete them now.
*/
else {
req_hdr->delById(Http::HdrType::RANGE);
req_hdr->delById(Http::HdrType::REQUEST_RANGE);
request->ignoreRange("neither HEAD nor GET");
}
if (req_hdr->has(Http::HdrType::AUTHORIZATION))
request->flags.auth = true;
clientCheckPinning(http);
if (!request->url.userInfo().isEmpty())
request->flags.auth = true;
if (req_hdr->has(Http::HdrType::VIA)) {
String s = req_hdr->getList(Http::HdrType::VIA);
/*
* ThisCache cannot be a member of Via header, "1.1 ThisCache" can.
* Note ThisCache2 has a space prepended to the hostname so we don't
* accidentally match super-domains.
*/
if (strListIsSubstr(&s, ThisCache2, ',')) {
request->flags.loopDetected = true;
}
#if USE_FORW_VIA_DB
fvdbCountVia(s.termedBuf());
#endif
s.clean();
}
// headers only relevant to reverse-proxy
if (request->flags.accelerated) {
// check for a cdn-info member with a cdn-id matching surrogate_id
// XXX: HttpHeader::hasListMember() does not handle OWS around ";" yet
if (req_hdr->hasListMember(Http::HdrType::CDN_LOOP, Config.Accel.surrogate_id, ','))
request->flags.loopDetected = true;
}
if (request->flags.loopDetected) {
debugObj(33, DBG_IMPORTANT, "WARNING: Forwarding loop detected for:\n",
request, (ObjPackMethod) & httpRequestPack);
}
#if USE_FORW_VIA_DB
if (req_hdr->has(Http::HdrType::X_FORWARDED_FOR)) {
String s = req_hdr->getList(Http::HdrType::X_FORWARDED_FOR);
fvdbCountForw(s.termedBuf());
s.clean();
}
#endif
request->flags.cachable = http->request->maybeCacheable();
if (clientHierarchical(http))
request->flags.hierarchical = true;
debugs(85, 5, "clientInterpretRequestHeaders: REQ_NOCACHE = " <<
(request->flags.noCache ? "SET" : "NOT SET"));
debugs(85, 5, "clientInterpretRequestHeaders: REQ_CACHABLE = " <<
(request->flags.cachable ? "SET" : "NOT SET"));
debugs(85, 5, "clientInterpretRequestHeaders: REQ_HIERARCHICAL = " <<
(request->flags.hierarchical ? "SET" : "NOT SET"));
}
| 1 |
[
"CWE-116"
] |
squid
|
7024fb734a59409889e53df2257b3fc817809fb4
| 262,990,396,721,541,020,000,000,000,000,000,000,000 | 135 |
Handle more Range requests (#790)
Also removed some effectively unused code.
|
njs_generate_for_let_update(njs_vm_t *vm, njs_generator_t *generator,
njs_parser_node_t *node)
{
njs_parser_node_t *let;
njs_vmcode_variable_t *code_var;
njs_variable_reference_t *ref;
while (node != NULL && node->token_type == NJS_TOKEN_STATEMENT) {
let = node->right;
if (let->token_type != NJS_TOKEN_LET
&& let->token_type != NJS_TOKEN_CONST)
{
return NJS_OK;
}
ref = &let->left->u.reference;
if (ref->variable->closure) {
njs_generate_code(generator, njs_vmcode_variable_t, code_var,
NJS_VMCODE_LET_UPDATE, 0, let);
code_var->dst = let->left->index;
}
node = node->left;
}
return NJS_OK;
}
| 0 |
[
"CWE-703",
"CWE-754"
] |
njs
|
404553896792b8f5f429dc8852d15784a59d8d3e
| 213,084,515,457,912,270,000,000,000,000,000,000,000 | 29 |
Fixed break instruction in a try-catch block.
Previously, JUMP offset for a break instruction inside a try-catch
block was not set to a correct offset during code generation
when a return instruction was present in inner try-catch block.
The fix is to update the JUMP offset appropriately.
This closes #553 issue on Github.
|
TEST_F(HttpConnectionManagerImplTest, TestAccessLog) {
static constexpr char local_address[] = "0.0.0.0";
static constexpr char xff_address[] = "1.2.3.4";
// stream_info.downstreamRemoteAddress will infer the address from request
// headers instead of the physical connection
use_remote_address_ = false;
setup(false, "");
std::shared_ptr<MockStreamDecoderFilter> filter(new NiceMock<MockStreamDecoderFilter>());
std::shared_ptr<AccessLog::MockInstance> handler(new NiceMock<AccessLog::MockInstance>());
EXPECT_CALL(filter_factory_, createFilterChain(_))
.WillOnce(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void {
callbacks.addStreamDecoderFilter(filter);
callbacks.addAccessLogHandler(handler);
}));
EXPECT_CALL(*handler, log(_, _, _, _))
.WillOnce(Invoke([](const HeaderMap*, const HeaderMap*, const HeaderMap*,
const StreamInfo::StreamInfo& stream_info) {
EXPECT_TRUE(stream_info.responseCode());
EXPECT_EQ(stream_info.responseCode().value(), uint32_t(200));
EXPECT_NE(nullptr, stream_info.downstreamLocalAddress());
EXPECT_NE(nullptr, stream_info.downstreamRemoteAddress());
EXPECT_NE(nullptr, stream_info.downstreamDirectRemoteAddress());
EXPECT_NE(nullptr, stream_info.routeEntry());
EXPECT_EQ(stream_info.downstreamRemoteAddress()->ip()->addressAsString(), xff_address);
EXPECT_EQ(stream_info.downstreamDirectRemoteAddress()->ip()->addressAsString(),
local_address);
}));
NiceMock<MockResponseEncoder> encoder;
EXPECT_CALL(*codec_, dispatch(_))
.WillRepeatedly(Invoke([&](Buffer::Instance& data) -> Http::Status {
RequestDecoder* decoder = &conn_manager_->newStream(encoder);
RequestHeaderMapPtr headers{
new TestRequestHeaderMapImpl{{":method", "GET"},
{":authority", "host"},
{":path", "/"},
{"x-forwarded-for", xff_address},
{"x-request-id", "125a4afb-6f55-a4ba-ad80-413f09f48a28"}}};
decoder->decodeHeaders(std::move(headers), true);
ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}};
filter->callbacks_->encodeHeaders(std::move(response_headers), true);
data.drain(4);
return Http::okStatus();
}));
Buffer::OwnedImpl fake_input("1234");
conn_manager_->onData(fake_input, false);
}
| 0 |
[
"CWE-400"
] |
envoy
|
0e49a495826ea9e29134c1bd54fdeb31a034f40c
| 300,840,432,525,598,600,000,000,000,000,000,000,000 | 56 |
http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]>
|
int ssl_set_hostname( ssl_context *ssl, const char *hostname )
{
if( hostname == NULL )
return( POLARSSL_ERR_SSL_BAD_INPUT_DATA );
ssl->hostname_len = strlen( hostname );
ssl->hostname = (unsigned char *) malloc( ssl->hostname_len + 1 );
if( ssl->hostname == NULL )
return( POLARSSL_ERR_SSL_MALLOC_FAILED );
memcpy( ssl->hostname, (const unsigned char *) hostname,
ssl->hostname_len );
ssl->hostname[ssl->hostname_len] = '\0';
return( 0 );
}
| 0 |
[
"CWE-20"
] |
polarssl
|
1922a4e6aade7b1d685af19d4d9339ddb5c02859
| 244,839,080,251,152,080,000,000,000,000,000,000,000 | 18 |
ssl_parse_certificate() now calls x509parse_crt_der() directly
|
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe = filp->private_data;
unsigned int count, head, tail, mask;
switch (cmd) {
case FIONREAD:
__pipe_lock(pipe);
count = 0;
head = pipe->head;
tail = pipe->tail;
mask = pipe->ring_size - 1;
while (tail != head) {
count += pipe->bufs[tail & mask].len;
tail++;
}
__pipe_unlock(pipe);
return put_user(count, (int __user *)arg);
#ifdef CONFIG_WATCH_QUEUE
case IOC_WATCH_QUEUE_SET_SIZE: {
int ret;
__pipe_lock(pipe);
ret = watch_queue_set_size(pipe, arg);
__pipe_unlock(pipe);
return ret;
}
case IOC_WATCH_QUEUE_SET_FILTER:
return watch_queue_set_filter(
pipe, (struct watch_notification_filter __user *)arg);
#endif
default:
return -ENOIOCTLCMD;
}
}
| 0 |
[
"CWE-362"
] |
linux
|
189b0ddc245139af81198d1a3637cac74f96e13a
| 16,770,024,936,782,662,000,000,000,000,000,000,000 | 39 |
pipe: Fix missing lock in pipe_resize_ring()
pipe_resize_ring() needs to take the pipe->rd_wait.lock spinlock to
prevent post_one_notification() from trying to insert into the ring
whilst the ring is being replaced.
The occupancy check must be done after the lock is taken, and the lock
must be taken after the new ring is allocated.
The bug can lead to an oops looking something like:
BUG: KASAN: use-after-free in post_one_notification.isra.0+0x62e/0x840
Read of size 4 at addr ffff88801cc72a70 by task poc/27196
...
Call Trace:
post_one_notification.isra.0+0x62e/0x840
__post_watch_notification+0x3b7/0x650
key_create_or_update+0xb8b/0xd20
__do_sys_add_key+0x175/0x340
__x64_sys_add_key+0xbe/0x140
do_syscall_64+0x5c/0xc0
entry_SYSCALL_64_after_hwframe+0x44/0xae
Reported by Selim Enes Karaduman @Enesdex working with Trend Micro Zero
Day Initiative.
Fixes: c73be61cede5 ("pipe: Add general notification queue support")
Reported-by: [email protected] # ZDI-CAN-17291
Signed-off-by: David Howells <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
win_exchange(long Prenum)
{
frame_T *frp;
frame_T *frp2;
win_T *wp;
win_T *wp2;
int temp;
if (ERROR_IF_ANY_POPUP_WINDOW)
return;
if (ONE_WINDOW) // just one window
{
beep_flush();
return;
}
#ifdef FEAT_GUI
need_mouse_correct = TRUE;
#endif
/*
* find window to exchange with
*/
if (Prenum)
{
frp = curwin->w_frame->fr_parent->fr_child;
while (frp != NULL && --Prenum > 0)
frp = frp->fr_next;
}
else if (curwin->w_frame->fr_next != NULL) // Swap with next
frp = curwin->w_frame->fr_next;
else // Swap last window in row/col with previous
frp = curwin->w_frame->fr_prev;
// We can only exchange a window with another window, not with a frame
// containing windows.
if (frp == NULL || frp->fr_win == NULL || frp->fr_win == curwin)
return;
wp = frp->fr_win;
/*
* 1. remove curwin from the list. Remember after which window it was in wp2
* 2. insert curwin before wp in the list
* if wp != wp2
* 3. remove wp from the list
* 4. insert wp after wp2
* 5. exchange the status line height and vsep width.
*/
wp2 = curwin->w_prev;
frp2 = curwin->w_frame->fr_prev;
if (wp->w_prev != curwin)
{
win_remove(curwin, NULL);
frame_remove(curwin->w_frame);
win_append(wp->w_prev, curwin);
frame_insert(frp, curwin->w_frame);
}
if (wp != wp2)
{
win_remove(wp, NULL);
frame_remove(wp->w_frame);
win_append(wp2, wp);
if (frp2 == NULL)
frame_insert(wp->w_frame->fr_parent->fr_child, wp->w_frame);
else
frame_append(frp2, wp->w_frame);
}
temp = curwin->w_status_height;
curwin->w_status_height = wp->w_status_height;
wp->w_status_height = temp;
temp = curwin->w_vsep_width;
curwin->w_vsep_width = wp->w_vsep_width;
wp->w_vsep_width = temp;
frame_fix_height(curwin);
frame_fix_height(wp);
frame_fix_width(curwin);
frame_fix_width(wp);
(void)win_comp_pos(); // recompute window positions
win_enter(wp, TRUE);
redraw_all_later(NOT_VALID);
}
| 1 |
[
"CWE-703",
"CWE-125"
] |
vim
|
05b27615481e72e3b338bb12990fb3e0c2ecc2a9
| 285,360,446,716,566,540,000,000,000,000,000,000,000 | 84 |
patch 8.2.4154: ml_get error when exchanging windows in Visual mode
Problem: ml_get error when exchanging windows in Visual mode.
Solution: Correct end of Visual area when entering another buffer.
|
static ut64 binobj_a2b(RBinObject *o, ut64 addr) {
return addr + (o? o->baddr_shift: 0);
}
| 0 |
[
"CWE-125"
] |
radare2
|
d31c4d3cbdbe01ea3ded16a584de94149ecd31d9
| 197,745,870,594,662,900,000,000,000,000,000,000,000 | 3 |
Fix #8748 - Fix oobread on string search
|
static int php_stream_temp_close(php_stream *stream, int close_handle TSRMLS_DC)
{
php_stream_temp_data *ts = (php_stream_temp_data*)stream->abstract;
int ret;
assert(ts != NULL);
if (ts->innerstream) {
ret = php_stream_free_enclosed(ts->innerstream, PHP_STREAM_FREE_CLOSE | (close_handle ? 0 : PHP_STREAM_FREE_PRESERVE_HANDLE));
} else {
ret = 0;
}
if (ts->meta) {
zval_ptr_dtor(&ts->meta);
}
efree(ts);
return ret;
}
| 1 |
[
"CWE-20"
] |
php-src
|
6297a117d77fa3a0df2e21ca926a92c231819cd5
| 315,460,441,278,345,400,000,000,000,000,000,000,000 | 21 |
Fixed bug #71323 - Output of stream_get_meta_data can be falsified by its input
|
callbacks_quit_activate (GtkMenuItem *menuitem,
gpointer user_data)
{
gboolean layers_dirty = FALSE;
gint idx;
for (idx = 0; idx<=mainProject->last_loaded; idx++) {
if (mainProject->file[idx] == NULL) break;
layers_dirty = layers_dirty || mainProject->file[idx]->layer_dirty;
}
if (layers_dirty &&
!interface_get_alert_dialog_response(
_("Do you want to close all open layers and quit the program?"),
_("Quitting the program will cause any unsaved changes "
"to be lost."),
FALSE, NULL, GTK_STOCK_QUIT, GTK_STOCK_CANCEL)) {
return TRUE; // stop propagation of the delete_event.
// this would destroy the gui but not return from the gtk event loop.
}
/* Save background color */
if (screen.settings && !screen.background_is_from_project) {
guint clr;
GdkColor *bg = &mainProject->background;
clr = bg->red/257<<16 | bg->green/257<<8 | bg->blue/257;
g_settings_set_uint (screen.settings, "background-color", clr);
}
/* Save main window size and postion */
if (screen.settings) {
GtkWindow *win = GTK_WINDOW(screen.win.topLevelWindow);
gint32 xy[2];
GVariant *var;
gboolean is_max;
is_max = FALSE != (GDK_WINDOW_STATE_MAXIMIZED & gdk_window_get_state (
gtk_widget_get_window (GTK_WIDGET(win))));
g_settings_set_boolean (screen.settings, "window-maximized", is_max);
if (!is_max) {
gtk_window_get_size (win, (gint *)xy, (gint *)(xy+1));
var = g_variant_new_fixed_array (G_VARIANT_TYPE_INT32, xy, 2,
sizeof (xy[0]));
g_settings_set_value (screen.settings, "window-size", var);
gtk_window_get_position (win, (gint *)xy, (gint *)(xy+1));
var = g_variant_new_fixed_array (G_VARIANT_TYPE_INT32, xy, 2,
sizeof (xy[0]));
g_settings_set_value (screen.settings, "window-position", var);
}
}
gerbv_unload_all_layers (mainProject);
gtk_main_quit();
return FALSE;
}
| 0 |
[
"CWE-200"
] |
gerbv
|
319a8af890e4d0a5c38e6d08f510da8eefc42537
| 97,713,386,613,313,080,000,000,000,000,000,000,000 | 59 |
Remove local alias to parameter array
Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402
|
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
long i, ret;
unsigned ui;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
ret = generic_ptrace_peekdata(child, addr, data);
break;
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 7) ||
addr > sizeof(struct user) - 7)
break;
switch (addr) {
case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
tmp = getreg(child, addr);
break;
case offsetof(struct user, u_debugreg[0]):
tmp = child->thread.debugreg0;
break;
case offsetof(struct user, u_debugreg[1]):
tmp = child->thread.debugreg1;
break;
case offsetof(struct user, u_debugreg[2]):
tmp = child->thread.debugreg2;
break;
case offsetof(struct user, u_debugreg[3]):
tmp = child->thread.debugreg3;
break;
case offsetof(struct user, u_debugreg[6]):
tmp = child->thread.debugreg6;
break;
case offsetof(struct user, u_debugreg[7]):
tmp = child->thread.debugreg7;
break;
default:
tmp = 0;
break;
}
ret = put_user(tmp,(unsigned long __user *) data);
break;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
{
int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
ret = -EIO;
if ((addr & 7) ||
addr > sizeof(struct user) - 7)
break;
switch (addr) {
case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
ret = putreg(child, addr, data);
break;
/* Disallows to set a breakpoint into the vsyscall */
case offsetof(struct user, u_debugreg[0]):
if (data >= TASK_SIZE_OF(child) - dsize) break;
child->thread.debugreg0 = data;
ret = 0;
break;
case offsetof(struct user, u_debugreg[1]):
if (data >= TASK_SIZE_OF(child) - dsize) break;
child->thread.debugreg1 = data;
ret = 0;
break;
case offsetof(struct user, u_debugreg[2]):
if (data >= TASK_SIZE_OF(child) - dsize) break;
child->thread.debugreg2 = data;
ret = 0;
break;
case offsetof(struct user, u_debugreg[3]):
if (data >= TASK_SIZE_OF(child) - dsize) break;
child->thread.debugreg3 = data;
ret = 0;
break;
case offsetof(struct user, u_debugreg[6]):
if (data >> 32)
break;
child->thread.debugreg6 = data;
ret = 0;
break;
case offsetof(struct user, u_debugreg[7]):
/* See arch/i386/kernel/ptrace.c for an explanation of
* this awkward check.*/
data &= ~DR_CONTROL_RESERVED;
for(i=0; i<4; i++)
if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
break;
if (i == 4) {
child->thread.debugreg7 = data;
if (data)
set_tsk_thread_flag(child, TIF_DEBUG);
else
clear_tsk_thread_flag(child, TIF_DEBUG);
ret = 0;
}
break;
}
break;
}
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: /* restart after signal. */
ret = -EIO;
if (!valid_signal(data))
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child->exit_code = data;
/* make sure the single step bit is not set. */
clear_singlestep(child);
wake_up_process(child);
ret = 0;
break;
#ifdef CONFIG_IA32_EMULATION
/* This makes only sense with 32bit programs. Allow a
64bit debugger to fully examine them too. Better
don't use it against 64bit processes, use
PTRACE_ARCH_PRCTL instead. */
case PTRACE_SET_THREAD_AREA: {
struct user_desc __user *p;
int old;
p = (struct user_desc __user *)data;
get_user(old, &p->entry_number);
put_user(addr, &p->entry_number);
ret = do_set_thread_area(&child->thread, p);
put_user(old, &p->entry_number);
break;
case PTRACE_GET_THREAD_AREA:
p = (struct user_desc __user *)data;
get_user(old, &p->entry_number);
put_user(addr, &p->entry_number);
ret = do_get_thread_area(&child->thread, p);
put_user(old, &p->entry_number);
break;
}
#endif
/* normal 64bit interface to access TLS data.
Works just like arch_prctl, except that the arguments
are reversed. */
case PTRACE_ARCH_PRCTL:
ret = do_arch_prctl(child, data, addr);
break;
/*
* make the child exit. Best I can do is send it a sigkill.
* perhaps it should be put in the status that it wants to
* exit.
*/
case PTRACE_KILL:
ret = 0;
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
break;
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child->exit_code = SIGKILL;
/* make sure the single step bit is not set. */
clear_singlestep(child);
wake_up_process(child);
break;
case PTRACE_SINGLESTEP: /* set the trap flag. */
ret = -EIO;
if (!valid_signal(data))
break;
clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
set_singlestep(child);
child->exit_code = data;
/* give it a chance to run. */
wake_up_process(child);
ret = 0;
break;
case PTRACE_DETACH:
/* detach a process that was attached. */
ret = ptrace_detach(child, data);
break;
case PTRACE_GETREGS: { /* Get all gp regs from the child. */
if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
sizeof(struct user_regs_struct))) {
ret = -EIO;
break;
}
ret = 0;
for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
data += sizeof(long);
}
break;
}
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
unsigned long tmp;
if (!access_ok(VERIFY_READ, (unsigned __user *)data,
sizeof(struct user_regs_struct))) {
ret = -EIO;
break;
}
ret = 0;
for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
ret = __get_user(tmp, (unsigned long __user *) data);
if (ret)
break;
ret = putreg(child, ui, tmp);
if (ret)
break;
data += sizeof(long);
}
break;
}
case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
sizeof(struct user_i387_struct))) {
ret = -EIO;
break;
}
ret = get_fpregs((struct user_i387_struct __user *)data, child);
break;
}
case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
if (!access_ok(VERIFY_READ, (unsigned __user *)data,
sizeof(struct user_i387_struct))) {
ret = -EIO;
break;
}
set_stopped_child_used_math(child);
ret = set_fpregs(child, (struct user_i387_struct __user *)data);
break;
}
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
29eb51101c02df517ca64ec472d7501127ad1da8
| 308,436,442,897,270,200,000,000,000,000,000,000,000 | 257 |
Handle bogus %cs selector in single-step instruction decoding
The code for LDT segment selectors was not robust in the face of a bogus
selector set in %cs via ptrace before the single-step was done.
Signed-off-by: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
String_Obj Parser::parse_url_function_string()
{
std::string prefix("");
if (lex< uri_prefix >()) {
prefix = std::string(lexed);
}
lex < optional_spaces >();
String_Obj url_string = parse_url_function_argument();
std::string suffix("");
if (lex< real_uri_suffix >()) {
suffix = std::string(lexed);
}
std::string uri("");
if (url_string) {
uri = url_string->to_string({ NESTED, 5 });
}
if (String_Schema* schema = Cast<String_Schema>(url_string)) {
String_Schema_Obj res = SASS_MEMORY_NEW(String_Schema, pstate);
res->append(SASS_MEMORY_NEW(String_Constant, pstate, prefix));
res->append(schema);
res->append(SASS_MEMORY_NEW(String_Constant, pstate, suffix));
return res;
} else {
std::string res = prefix + uri + suffix;
return SASS_MEMORY_NEW(String_Constant, pstate, res);
}
}
| 0 |
[
"CWE-674"
] |
libsass
|
f2db04883e5fff4e03777dcc1eb60d4373c45be1
| 131,850,000,263,222,000,000,000,000,000,000,000,000 | 31 |
Make `parse_css_variable_value` non-recursive
Fixes #2658 stack overflow
|
read_image(Gif_Reader *grr, Gif_Context *gfc, Gif_Image *gfi, int read_flags)
/* returns 0 on memory error */
{
uint8_t packed;
gfi->left = gifgetunsigned(grr);
gfi->top = gifgetunsigned(grr);
gfi->width = gifgetunsigned(grr);
gfi->height = gifgetunsigned(grr);
/* Mainline GIF processors (Firefox, etc.) process missing width (height)
as screen_width (screen_height). */
if (gfi->width == 0)
gfi->width = gfc->stream->screen_width;
if (gfi->height == 0)
gfi->height = gfc->stream->screen_height;
/* If still zero, error. */
if (gfi->width == 0 || gfi->height == 0) {
gif_read_error(gfc, 1, "image has zero width and/or height");
Gif_MakeImageEmpty(gfi);
read_flags = 0;
}
/* If position out of range, error. */
if ((unsigned) gfi->left + (unsigned) gfi->width > 0xFFFF
|| (unsigned) gfi->top + (unsigned) gfi->height > 0xFFFF) {
gif_read_error(gfc, 1, "image position and/or dimensions out of range");
Gif_MakeImageEmpty(gfi);
read_flags = 0;
}
GIF_DEBUG(("<%ux%u> ", gfi->width, gfi->height));
packed = gifgetbyte(grr);
if (packed & 0x80) { /* have a local color table */
int ncol = 1 << ((packed & 0x07) + 1);
gfi->local = read_color_table(ncol, grr);
if (!gfi->local) return 0;
gfi->local->refcount = 1;
}
gfi->interlace = (packed & 0x40) != 0;
/* Keep the compressed data if asked */
if (read_flags & GIF_READ_COMPRESSED) {
if (!read_compressed_image(gfi, grr, read_flags))
return 0;
if (read_flags & GIF_READ_UNCOMPRESSED) {
Gif_Reader new_grr;
make_data_reader(&new_grr, gfi->compressed, gfi->compressed_len);
if (!uncompress_image(gfc, gfi, &new_grr))
return 0;
}
} else if (read_flags & GIF_READ_UNCOMPRESSED) {
if (!uncompress_image(gfc, gfi, grr))
return 0;
} else {
/* skip over the image */
uint8_t buffer[GIF_MAX_BLOCK];
int i = gifgetbyte(grr);
while (i > 0) {
gifgetblock(buffer, i, grr);
i = gifgetbyte(grr);
}
}
return 1;
}
| 0 |
[
"CWE-416"
] |
gifsicle
|
81fd7823f6d9c85ab598bc850e40382068361185
| 230,919,041,842,247,750,000,000,000,000,000,000,000 | 67 |
Fix use-after-free problems reported in #114.
|
static int ldb_match_substring(struct ldb_context *ldb,
const struct ldb_message *msg,
const struct ldb_parse_tree *tree,
enum ldb_scope scope, bool *matched)
{
unsigned int i;
struct ldb_message_element *el;
el = ldb_msg_find_element(msg, tree->u.substring.attr);
if (el == NULL) {
*matched = false;
return LDB_SUCCESS;
}
for (i = 0; i < el->num_values; i++) {
int ret;
ret = ldb_wildcard_compare(ldb, tree, el->values[i], matched);
if (ret != LDB_SUCCESS) return ret;
if (*matched) return LDB_SUCCESS;
}
*matched = false;
return LDB_SUCCESS;
}
| 0 |
[
"CWE-189"
] |
samba
|
ec504dbf69636a554add1f3d5703dd6c3ad450b8
| 285,926,947,164,792,700,000,000,000,000,000,000,000 | 24 |
CVE-2015-3223: lib: ldb: Cope with canonicalise_fn returning string "", length 0.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11325
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Ralph Boehme <[email protected]>
|
GF_Err gf_isom_avc_set_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex)
{
return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, 3);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
gpac
|
90dc7f853d31b0a4e9441cba97feccf36d8b69a4
| 265,798,775,835,343,030,000,000,000,000,000,000,000 | 4 |
fix some exploitable overflows (#994, #997)
|
void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1,
uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
int x_off, int y_off, int block_w, int block_h,
int width, int height, ptrdiff_t linesize,
vp8_mc_func mc_func[3][3])
{
uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
if (AV_RN32A(mv)) {
int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
int my = mv->y & 7, my_idx = subpel_idx[0][my];
x_off += mv->x >> 3;
y_off += mv->y >> 3;
// edge emulation
src1 += y_off * linesize + x_off;
src2 += y_off * linesize + x_off;
ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
src1 - my_idx * linesize - mx_idx,
EDGE_EMU_LINESIZE, linesize,
block_w + subpel_idx[1][mx],
block_h + subpel_idx[1][my],
x_off - mx_idx, y_off - my_idx, width, height);
src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
s->vdsp.emulated_edge_mc(td->edge_emu_buffer,
src2 - my_idx * linesize - mx_idx,
EDGE_EMU_LINESIZE, linesize,
block_w + subpel_idx[1][mx],
block_h + subpel_idx[1][my],
x_off - mx_idx, y_off - my_idx, width, height);
src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
} else {
mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
}
} else {
ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
}
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
FFmpeg
|
6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef
| 230,486,545,177,113,970,000,000,000,000,000,000,000 | 48 |
avcodec/webp: Always set pix_fmt
Fixes: out of array access
Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632
Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Reviewed-by: "Ronald S. Bultje" <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
mrb_protect_error(mrb_state *mrb, mrb_protect_error_func *body, void *userdata, mrb_bool *error)
{
struct mrb_jmpbuf *prev_jmp = mrb->jmp;
struct mrb_jmpbuf c_jmp;
mrb_value result = mrb_nil_value();
int ai = mrb_gc_arena_save(mrb);
const struct mrb_context *c = mrb->c;
ptrdiff_t ci_index = c->ci - c->cibase;
if (error) { *error = FALSE; }
MRB_TRY(&c_jmp) {
mrb->jmp = &c_jmp;
result = body(mrb, userdata);
mrb->jmp = prev_jmp;
}
MRB_CATCH(&c_jmp) {
mrb->jmp = prev_jmp;
result = mrb_obj_value(mrb->exc);
mrb->exc = NULL;
if (error) { *error = TRUE; }
if (mrb->c == c) {
while (c->ci - c->cibase > ci_index) {
cipop(mrb);
}
}
else {
// It was probably switched by mrb_fiber_resume().
// Simply destroy all successive CINFO_DIRECTs once the fiber has been switched.
c = mrb->c;
while (c->ci > c->cibase && c->ci->cci == CINFO_DIRECT) {
cipop(mrb);
}
}
}
MRB_END_EXC(&c_jmp);
mrb_gc_arena_restore(mrb, ai);
mrb_gc_protect(mrb, result);
return result;
}
| 0 |
[
"CWE-122",
"CWE-787"
] |
mruby
|
47068ae07a5fa3aa9a1879cdfe98a9ce0f339299
| 135,701,025,568,067,530,000,000,000,000,000,000,000 | 41 |
vm.c: packed arguments length may be zero for `send` method.
|
reAnchorAny(Buffer *buf, char *re,
Anchor *(*anchorproc) (Buffer *, char *, char *, int, int))
{
Line *l;
char *p = NULL, *p1, *p2;
if (re == NULL || *re == '\0') {
return NULL;
}
if ((re = regexCompile(re, 1)) != NULL) {
return re;
}
for (l = MarkAllPages ? buf->firstLine : buf->topLine; l != NULL &&
(MarkAllPages || l->linenumber < buf->topLine->linenumber + LASTLINE);
l = l->next) {
if (p && l->bpos)
goto next_line;
p = l->lineBuf;
for (;;) {
if (regexMatch(p, &l->lineBuf[l->size] - p, p == l->lineBuf) == 1) {
matchedPosition(&p1, &p2);
p = reAnchorPos(buf, l, p1, p2, anchorproc);
}
else
break;
}
next_line:
if (MarkAllPages && l->next == NULL && buf->pagerSource &&
!(buf->bufferprop & BP_CLOSE))
getNextPage(buf, PagerMax);
}
return NULL;
}
| 0 |
[
"CWE-119"
] |
w3m
|
4e464819dd360ffd3d58fa2a89216fe413cfcc74
| 252,341,621,848,015,350,000,000,000,000,000,000,000 | 33 |
Prevent segfault due to buffer overflows in addMultirowsForm
Bug-Debian: https://github.com/tats/w3m/issues/21
Bug-Debian: https://github.com/tats/w3m/issues/26
|
void set_remote_locators(
const RemoteLocatorList& /*locators*/,
const NetworkFactory& /*network*/,
bool /*use_multicast*/)
{
}
| 0 |
[
"CWE-284"
] |
Fast-DDS
|
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
| 274,552,405,749,878,020,000,000,000,000,000,000,000 | 6 |
check remote permissions (#1387)
* Refs 5346. Blackbox test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. one-way string compare
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Do not add partition separator on last partition
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 5346. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Access control unit testing
It only covers Partition and Topic permissions
Signed-off-by: Iker Luengo <[email protected]>
* Refs #3680. Fix partition check on Permissions plugin.
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Uncrustify
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix tests on mac
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Fix windows tests
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Avoid memory leak on test
Signed-off-by: Iker Luengo <[email protected]>
* Refs 3680. Proxy data mocks should not return temporary objects
Signed-off-by: Iker Luengo <[email protected]>
* refs 3680. uncrustify
Signed-off-by: Iker Luengo <[email protected]>
Co-authored-by: Miguel Company <[email protected]>
|
bool asn1_read_ContextSimple(struct asn1_data *data, uint8_t num, DATA_BLOB *blob)
{
int len;
ZERO_STRUCTP(blob);
if (!asn1_start_tag(data, ASN1_CONTEXT_SIMPLE(num))) return false;
len = asn1_tag_remaining(data);
if (len < 0) {
data->has_error = true;
return false;
}
*blob = data_blob(NULL, len);
if ((len != 0) && (!blob->data)) {
data->has_error = true;
return false;
}
if (!asn1_read(data, blob->data, len)) return false;
return asn1_end_tag(data);
}
| 0 |
[
"CWE-399"
] |
samba
|
9d989c9dd7a5b92d0c5d65287935471b83b6e884
| 338,409,496,451,875,360,000,000,000,000,000,000,000 | 18 |
CVE-2015-7540: lib: util: Check *every* asn1 return call and early return.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Volker Lendecke <[email protected]>
Autobuild-User(master): Jeremy Allison <[email protected]>
Autobuild-Date(master): Fri Sep 19 01:29:00 CEST 2014 on sn-devel-104
(cherry picked from commit b9d3fd4cc551df78a7b066ee8ce43bbaa3ff994a)
|
eafnosupport_fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
struct fib6_result *res, int flags)
{
return -EAFNOSUPPORT;
}
| 0 |
[] |
net
|
6c8991f41546c3c472503dff1ea9daaddf9331c2
| 17,726,646,360,925,700,000,000,000,000,000,000,000 | 5 |
net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
ipv6_stub uses the ip6_dst_lookup function to allow other modules to
perform IPv6 lookups. However, this function skips the XFRM layer
entirely.
All users of ipv6_stub->ip6_dst_lookup use ip_route_output_flow (via the
ip_route_output_key and ip_route_output helpers) for their IPv4 lookups,
which calls xfrm_lookup_route(). This patch fixes this inconsistent
behavior by switching the stub to ip6_dst_lookup_flow, which also calls
xfrm_lookup_route().
This requires some changes in all the callers, as these two functions
take different arguments and have different return types.
Fixes: 5f81bd2e5d80 ("ipv6: export a stub for IPv6 symbols used by vxlan")
Reported-by: Xiumei Mu <[email protected]>
Signed-off-by: Sabrina Dubroca <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
TEST_P(Http2CodecImplTest, SmallMetadataVecTest) {
allow_metadata_ = true;
initialize();
// Generates a valid stream_id by sending a request header.
TestHeaderMapImpl request_headers;
HttpTestUtility::addDefaultHeaders(request_headers);
EXPECT_CALL(request_decoder_, decodeHeaders_(_, true));
request_encoder_->encodeHeaders(request_headers, true);
MetadataMapVector metadata_map_vector;
const int size = 10;
for (int i = 0; i < size; i++) {
MetadataMap metadata_map = {
{"header_key1", "header_value1"},
{"header_key2", "header_value2"},
{"header_key3", "header_value3"},
{"header_key4", "header_value4"},
};
MetadataMapPtr metadata_map_ptr = std::make_unique<MetadataMap>(metadata_map);
metadata_map_vector.push_back(std::move(metadata_map_ptr));
}
EXPECT_CALL(request_decoder_, decodeMetadata_(_)).Times(size);
request_encoder_->encodeMetadata(metadata_map_vector);
EXPECT_CALL(response_decoder_, decodeMetadata_(_)).Times(size);
response_encoder_->encodeMetadata(metadata_map_vector);
}
| 0 |
[
"CWE-400",
"CWE-703"
] |
envoy
|
afc39bea36fd436e54262f150c009e8d72db5014
| 277,467,897,981,279,200,000,000,000,000,000,000,000 | 29 |
Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]>
|
static OFCondition echoSCP( T_ASC_Association * assoc, T_DIMSE_Message * msg, T_ASC_PresentationContextID presID)
{
OFString temp_str;
// assign the actual information of the C-Echo-RQ command to a local variable
T_DIMSE_C_EchoRQ *req = &msg->msg.CEchoRQ;
if (storescpLogger.isEnabledFor(OFLogger::DEBUG_LOG_LEVEL))
{
OFLOG_INFO(storescpLogger, "Received Echo Request");
OFLOG_DEBUG(storescpLogger, DIMSE_dumpMessage(temp_str, *req, DIMSE_INCOMING, NULL, presID));
} else {
OFLOG_INFO(storescpLogger, "Received Echo Request (MsgID " << req->MessageID << ")");
}
/* the echo succeeded !! */
OFCondition cond = DIMSE_sendEchoResponse(assoc, presID, req, STATUS_Success, NULL);
if (cond.bad())
{
OFLOG_ERROR(storescpLogger, "Echo SCP Failed: " << DimseCondition::dump(temp_str, cond));
}
return cond;
}
| 0 |
[
"CWE-264"
] |
dcmtk
|
beaf5a5c24101daeeafa48c375120b16197c9e95
| 316,700,482,153,018,000,000,000,000,000,000,000,000 | 21 |
Make sure to handle setuid() return code properly.
In some tools the return value of setuid() is not checked. In the worst
case this could lead to privilege escalation since the process does not
give up its root privileges and continue as root.
|
static int __meminit __add_section(int nid, struct zone *zone,
unsigned long phys_start_pfn)
{
int nr_pages = PAGES_PER_SECTION;
int ret;
if (pfn_valid(phys_start_pfn))
return -EEXIST;
ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
if (ret < 0)
return ret;
ret = __add_zone(zone, phys_start_pfn);
if (ret < 0)
return ret;
return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
}
| 0 |
[] |
linux-2.6
|
08dff7b7d629807dbb1f398c68dd9cd58dd657a1
| 275,050,514,486,283,360,000,000,000,000,000,000,000 | 21 |
mm/hotplug: correctly add new zone to all other nodes' zone lists
When online_pages() is called to add new memory to an empty zone, it
rebuilds all zone lists by calling build_all_zonelists(). But there's a
bug which prevents the new zone to be added to other nodes' zone lists.
online_pages() {
build_all_zonelists()
.....
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY)
}
Here the node of the zone is put into N_HIGH_MEMORY state after calling
build_all_zonelists(), but build_all_zonelists() only adds zones from
nodes in N_HIGH_MEMORY state to the fallback zone lists.
build_all_zonelists()
->__build_all_zonelists()
->build_zonelists()
->find_next_best_node()
->for_each_node_state(n, N_HIGH_MEMORY)
So memory in the new zone will never be used by other nodes, and it may
cause strange behavor when system is under memory pressure. So put node
into N_HIGH_MEMORY state before calling build_all_zonelists().
Signed-off-by: Jianguo Wu <[email protected]>
Signed-off-by: Jiang Liu <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rusty Russell <[email protected]>
Cc: Yinghai Lu <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Keping Chen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
d_lite_mday(VALUE self)
{
get_d1(self);
return INT2FIX(m_mday(dat));
}
| 0 |
[] |
date
|
3959accef8da5c128f8a8e2fd54e932a4fb253b0
| 270,614,006,363,594,770,000,000,000,000,000,000,000 | 5 |
Add length limit option for methods that parses date strings
`Date.parse` now raises an ArgumentError when a given date string is
longer than 128. You can configure the limit by giving `limit` keyword
arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`,
the limit is disabled.
Not only `Date.parse` but also the following methods are changed.
* Date._parse
* Date.parse
* DateTime.parse
* Date._iso8601
* Date.iso8601
* DateTime.iso8601
* Date._rfc3339
* Date.rfc3339
* DateTime.rfc3339
* Date._xmlschema
* Date.xmlschema
* DateTime.xmlschema
* Date._rfc2822
* Date.rfc2822
* DateTime.rfc2822
* Date._rfc822
* Date.rfc822
* DateTime.rfc822
* Date._jisx0301
* Date.jisx0301
* DateTime.jisx0301
|
ip_ruby_cmd(clientData, interp, argc, argv)
ClientData clientData;
Tcl_Interp *interp;
int argc;
char *argv[];
#endif
{
volatile VALUE receiver;
volatile ID method;
volatile VALUE args;
char *str;
int i;
int len;
struct cmd_body_arg *arg;
int thr_crit_bup;
VALUE old_gc;
int code;
if (interp == (Tcl_Interp*)NULL) {
rbtk_pending_exception = rb_exc_new2(rb_eRuntimeError,
"IP is deleted");
return TCL_ERROR;
}
if (argc < 3) {
#if 0
rb_raise(rb_eArgError, "too few arguments");
#else
Tcl_ResetResult(interp);
Tcl_AppendResult(interp, "too few arguments", (char *)NULL);
rbtk_pending_exception = rb_exc_new2(rb_eArgError,
Tcl_GetStringResult(interp));
return TCL_ERROR;
#endif
}
/* get arguments from Tcl objects */
thr_crit_bup = rb_thread_critical;
rb_thread_critical = Qtrue;
old_gc = rb_gc_disable();
/* get receiver */
#if TCL_MAJOR_VERSION >= 8
str = Tcl_GetStringFromObj(argv[1], &len);
#else /* TCL_MAJOR_VERSION < 8 */
str = argv[1];
#endif
DUMP2("receiver:%s",str);
/* receiver = rb_protect(ip_ruby_cmd_receiver_get, (VALUE)str, &code); */
receiver = ip_ruby_cmd_receiver_get(str);
if (NIL_P(receiver)) {
#if 0
rb_raise(rb_eArgError,
"unknown class/module/global-variable '%s'", str);
#else
Tcl_ResetResult(interp);
Tcl_AppendResult(interp, "unknown class/module/global-variable '",
str, "'", (char *)NULL);
rbtk_pending_exception = rb_exc_new2(rb_eArgError,
Tcl_GetStringResult(interp));
if (old_gc == Qfalse) rb_gc_enable();
return TCL_ERROR;
#endif
}
/* get metrhod */
#if TCL_MAJOR_VERSION >= 8
str = Tcl_GetStringFromObj(argv[2], &len);
#else /* TCL_MAJOR_VERSION < 8 */
str = argv[2];
#endif
method = rb_intern(str);
/* get args */
args = rb_ary_new2(argc - 2);
for(i = 3; i < argc; i++) {
VALUE s;
#if TCL_MAJOR_VERSION >= 8
str = Tcl_GetStringFromObj(argv[i], &len);
s = rb_tainted_str_new(str, len);
#else /* TCL_MAJOR_VERSION < 8 */
str = argv[i];
s = rb_tainted_str_new2(str);
#endif
DUMP2("arg:%s",str);
rb_ary_push(args, s);
}
if (old_gc == Qfalse) rb_gc_enable();
rb_thread_critical = thr_crit_bup;
/* allocate */
arg = ALLOC(struct cmd_body_arg);
/* arg = RbTk_ALLOC_N(struct cmd_body_arg, 1); */
arg->receiver = receiver;
arg->method = method;
arg->args = args;
/* evaluate the argument string by ruby */
code = tcl_protect(interp, ip_ruby_cmd_core, (VALUE)arg);
xfree(arg);
/* ckfree((char*)arg); */
return code;
}
| 0 |
[] |
tk
|
d098136e3f62a4879a7d7cd34bbd50f482ba3331
| 96,003,163,673,889,890,000,000,000,000,000,000,000 | 107 |
tcltklib.c: use StringValueCStr [ci skip]
* ext/tk/tcltklib.c (set_max_block_time, tcl_protect_core,
ip_init, ip_create_slave_core, get_obj_from_str,
ip_cancel_eval_core, lib_set_system_encoding,
alloc_invoke_arguments, lib_merge_tklist): use StringValueCStr
instead of StringValuePtr for values to be passed to Tcl
interperter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@55842 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
|
static int jit_compile(pcre2_code *code, sljit_u32 mode)
{
pcre2_real_code *re = (pcre2_real_code *)code;
struct sljit_compiler *compiler;
backtrack_common rootbacktrack;
compiler_common common_data;
compiler_common *common = &common_data;
const sljit_u8 *tables = re->tables;
void *allocator_data = &re->memctl;
int private_data_size;
PCRE2_SPTR ccend;
executable_functions *functions;
void *executable_func;
sljit_uw executable_size;
sljit_uw total_length;
struct sljit_label *mainloop_label = NULL;
struct sljit_label *continue_match_label;
struct sljit_label *empty_match_found_label = NULL;
struct sljit_label *empty_match_backtrack_label = NULL;
struct sljit_label *reset_match_label;
struct sljit_label *quit_label;
struct sljit_jump *jump;
struct sljit_jump *minlength_check_failed = NULL;
struct sljit_jump *empty_match = NULL;
struct sljit_jump *end_anchor_failed = NULL;
jump_list *reqcu_not_found = NULL;
SLJIT_ASSERT(tables);
#if HAS_VIRTUAL_REGISTERS == 1
SLJIT_ASSERT(sljit_get_register_index(TMP3) < 0 && sljit_get_register_index(ARGUMENTS) < 0 && sljit_get_register_index(RETURN_ADDR) < 0);
#elif HAS_VIRTUAL_REGISTERS == 0
SLJIT_ASSERT(sljit_get_register_index(TMP3) >= 0 && sljit_get_register_index(ARGUMENTS) >= 0 && sljit_get_register_index(RETURN_ADDR) >= 0);
#else
#error "Invalid value for HAS_VIRTUAL_REGISTERS"
#endif
memset(&rootbacktrack, 0, sizeof(backtrack_common));
memset(common, 0, sizeof(compiler_common));
common->re = re;
common->name_table = (PCRE2_SPTR)((uint8_t *)re + sizeof(pcre2_real_code));
rootbacktrack.cc = common->name_table + re->name_count * re->name_entry_size;
#ifdef SUPPORT_UNICODE
common->invalid_utf = (mode & PCRE2_JIT_INVALID_UTF) != 0;
#endif /* SUPPORT_UNICODE */
mode &= ~PUBLIC_JIT_COMPILE_CONFIGURATION_OPTIONS;
common->start = rootbacktrack.cc;
common->read_only_data_head = NULL;
common->fcc = tables + fcc_offset;
common->lcc = (sljit_sw)(tables + lcc_offset);
common->mode = mode;
common->might_be_empty = (re->minlength == 0) || (re->flags & PCRE2_MATCH_EMPTY);
common->allow_empty_partial = (re->max_lookbehind > 0) || (re->flags & PCRE2_MATCH_EMPTY);
common->nltype = NLTYPE_FIXED;
switch(re->newline_convention)
{
case PCRE2_NEWLINE_CR: common->newline = CHAR_CR; break;
case PCRE2_NEWLINE_LF: common->newline = CHAR_NL; break;
case PCRE2_NEWLINE_CRLF: common->newline = (CHAR_CR << 8) | CHAR_NL; break;
case PCRE2_NEWLINE_ANY: common->newline = (CHAR_CR << 8) | CHAR_NL; common->nltype = NLTYPE_ANY; break;
case PCRE2_NEWLINE_ANYCRLF: common->newline = (CHAR_CR << 8) | CHAR_NL; common->nltype = NLTYPE_ANYCRLF; break;
case PCRE2_NEWLINE_NUL: common->newline = CHAR_NUL; break;
default: return PCRE2_ERROR_INTERNAL;
}
common->nlmax = READ_CHAR_MAX;
common->nlmin = 0;
if (re->bsr_convention == PCRE2_BSR_UNICODE)
common->bsr_nltype = NLTYPE_ANY;
else if (re->bsr_convention == PCRE2_BSR_ANYCRLF)
common->bsr_nltype = NLTYPE_ANYCRLF;
else
{
#ifdef BSR_ANYCRLF
common->bsr_nltype = NLTYPE_ANYCRLF;
#else
common->bsr_nltype = NLTYPE_ANY;
#endif
}
common->bsr_nlmax = READ_CHAR_MAX;
common->bsr_nlmin = 0;
common->endonly = (re->overall_options & PCRE2_DOLLAR_ENDONLY) != 0;
common->ctypes = (sljit_sw)(tables + ctypes_offset);
common->name_count = re->name_count;
common->name_entry_size = re->name_entry_size;
common->unset_backref = (re->overall_options & PCRE2_MATCH_UNSET_BACKREF) != 0;
common->alt_circumflex = (re->overall_options & PCRE2_ALT_CIRCUMFLEX) != 0;
#ifdef SUPPORT_UNICODE
/* PCRE_UTF[16|32] have the same value as PCRE_UTF8. */
common->utf = (re->overall_options & PCRE2_UTF) != 0;
common->ucp = (re->overall_options & PCRE2_UCP) != 0;
if (common->utf)
{
if (common->nltype == NLTYPE_ANY)
common->nlmax = 0x2029;
else if (common->nltype == NLTYPE_ANYCRLF)
common->nlmax = (CHAR_CR > CHAR_NL) ? CHAR_CR : CHAR_NL;
else
{
/* We only care about the first newline character. */
common->nlmax = common->newline & 0xff;
}
if (common->nltype == NLTYPE_FIXED)
common->nlmin = common->newline & 0xff;
else
common->nlmin = (CHAR_CR < CHAR_NL) ? CHAR_CR : CHAR_NL;
if (common->bsr_nltype == NLTYPE_ANY)
common->bsr_nlmax = 0x2029;
else
common->bsr_nlmax = (CHAR_CR > CHAR_NL) ? CHAR_CR : CHAR_NL;
common->bsr_nlmin = (CHAR_CR < CHAR_NL) ? CHAR_CR : CHAR_NL;
}
else
common->invalid_utf = FALSE;
#endif /* SUPPORT_UNICODE */
ccend = bracketend(common->start);
/* Calculate the local space size on the stack. */
common->ovector_start = LIMIT_MATCH + sizeof(sljit_sw);
common->optimized_cbracket = (sljit_u8 *)SLJIT_MALLOC(re->top_bracket + 1, allocator_data);
if (!common->optimized_cbracket)
return PCRE2_ERROR_NOMEMORY;
#if defined DEBUG_FORCE_UNOPTIMIZED_CBRAS && DEBUG_FORCE_UNOPTIMIZED_CBRAS == 1
memset(common->optimized_cbracket, 0, re->top_bracket + 1);
#else
memset(common->optimized_cbracket, 1, re->top_bracket + 1);
#endif
SLJIT_ASSERT(*common->start == OP_BRA && ccend[-(1 + LINK_SIZE)] == OP_KET);
#if defined DEBUG_FORCE_UNOPTIMIZED_CBRAS && DEBUG_FORCE_UNOPTIMIZED_CBRAS == 2
common->capture_last_ptr = common->ovector_start;
common->ovector_start += sizeof(sljit_sw);
#endif
if (!check_opcode_types(common, common->start, ccend))
{
SLJIT_FREE(common->optimized_cbracket, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
/* Checking flags and updating ovector_start. */
if (mode == PCRE2_JIT_COMPLETE && (re->flags & PCRE2_LASTSET) != 0 && (re->overall_options & PCRE2_NO_START_OPTIMIZE) == 0)
{
common->req_char_ptr = common->ovector_start;
common->ovector_start += sizeof(sljit_sw);
}
if (mode != PCRE2_JIT_COMPLETE)
{
common->start_used_ptr = common->ovector_start;
common->ovector_start += sizeof(sljit_sw);
if (mode == PCRE2_JIT_PARTIAL_SOFT)
{
common->hit_start = common->ovector_start;
common->ovector_start += sizeof(sljit_sw);
}
}
if ((re->overall_options & (PCRE2_FIRSTLINE | PCRE2_USE_OFFSET_LIMIT)) != 0)
{
common->match_end_ptr = common->ovector_start;
common->ovector_start += sizeof(sljit_sw);
}
#if defined DEBUG_FORCE_CONTROL_HEAD && DEBUG_FORCE_CONTROL_HEAD
common->control_head_ptr = 1;
#endif
if (common->control_head_ptr != 0)
{
common->control_head_ptr = common->ovector_start;
common->ovector_start += sizeof(sljit_sw);
}
if (common->has_set_som)
{
/* Saving the real start pointer is necessary. */
common->start_ptr = common->ovector_start;
common->ovector_start += sizeof(sljit_sw);
}
/* Aligning ovector to even number of sljit words. */
if ((common->ovector_start & sizeof(sljit_sw)) != 0)
common->ovector_start += sizeof(sljit_sw);
if (common->start_ptr == 0)
common->start_ptr = OVECTOR(0);
/* Capturing brackets cannot be optimized if callouts are allowed. */
if (common->capture_last_ptr != 0)
memset(common->optimized_cbracket, 0, re->top_bracket + 1);
SLJIT_ASSERT(!(common->req_char_ptr != 0 && common->start_used_ptr != 0));
common->cbra_ptr = OVECTOR_START + (re->top_bracket + 1) * 2 * sizeof(sljit_sw);
total_length = ccend - common->start;
common->private_data_ptrs = (sljit_s32*)SLJIT_MALLOC(total_length * (sizeof(sljit_s32) + (common->has_then ? 1 : 0)), allocator_data);
if (!common->private_data_ptrs)
{
SLJIT_FREE(common->optimized_cbracket, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
memset(common->private_data_ptrs, 0, total_length * sizeof(sljit_s32));
private_data_size = common->cbra_ptr + (re->top_bracket + 1) * sizeof(sljit_sw);
if ((re->overall_options & PCRE2_ANCHORED) == 0 && (re->overall_options & PCRE2_NO_START_OPTIMIZE) == 0 && !common->has_skip_in_assert_back)
detect_early_fail(common, common->start, &private_data_size, 0, 0, TRUE);
set_private_data_ptrs(common, &private_data_size, ccend);
SLJIT_ASSERT(common->early_fail_start_ptr <= common->early_fail_end_ptr);
if (private_data_size > SLJIT_MAX_LOCAL_SIZE)
{
SLJIT_FREE(common->private_data_ptrs, allocator_data);
SLJIT_FREE(common->optimized_cbracket, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
if (common->has_then)
{
common->then_offsets = (sljit_u8 *)(common->private_data_ptrs + total_length);
memset(common->then_offsets, 0, total_length);
set_then_offsets(common, common->start, NULL);
}
compiler = sljit_create_compiler(allocator_data, NULL);
if (!compiler)
{
SLJIT_FREE(common->optimized_cbracket, allocator_data);
SLJIT_FREE(common->private_data_ptrs, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
common->compiler = compiler;
/* Main pcre2_jit_exec entry. */
SLJIT_ASSERT((private_data_size & (sizeof(sljit_sw) - 1)) == 0);
sljit_emit_enter(compiler, 0, SLJIT_ARGS1(W, W), 5, 5, 0, 0, private_data_size);
/* Register init. */
reset_ovector(common, (re->top_bracket + 1) * 2);
if (common->req_char_ptr != 0)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->req_char_ptr, SLJIT_R0, 0);
OP1(SLJIT_MOV, ARGUMENTS, 0, SLJIT_S0, 0);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_S0, 0);
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str));
OP1(SLJIT_MOV, STR_END, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, end));
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, stack));
OP1(SLJIT_MOV_U32, TMP1, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, limit_match));
OP1(SLJIT_MOV, STACK_TOP, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, end));
OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(struct sljit_stack, start));
OP2(SLJIT_ADD, TMP1, 0, TMP1, 0, SLJIT_IMM, 1);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LIMIT_MATCH, TMP1, 0);
if (common->early_fail_start_ptr < common->early_fail_end_ptr)
reset_early_fail(common);
if (mode == PCRE2_JIT_PARTIAL_SOFT)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, -1);
if (common->mark_ptr != 0)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->mark_ptr, SLJIT_IMM, 0);
if (common->control_head_ptr != 0)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->control_head_ptr, SLJIT_IMM, 0);
/* Main part of the matching */
if ((re->overall_options & PCRE2_ANCHORED) == 0)
{
mainloop_label = mainloop_entry(common);
continue_match_label = LABEL();
/* Forward search if possible. */
if ((re->overall_options & PCRE2_NO_START_OPTIMIZE) == 0)
{
if (mode == PCRE2_JIT_COMPLETE && fast_forward_first_n_chars(common))
;
else if ((re->flags & PCRE2_FIRSTSET) != 0)
fast_forward_first_char(common);
else if ((re->flags & PCRE2_STARTLINE) != 0)
fast_forward_newline(common);
else if ((re->flags & PCRE2_FIRSTMAPSET) != 0)
fast_forward_start_bits(common);
}
}
else
continue_match_label = LABEL();
if (mode == PCRE2_JIT_COMPLETE && re->minlength > 0 && (re->overall_options & PCRE2_NO_START_OPTIMIZE) == 0)
{
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE2_ERROR_NOMATCH);
OP2(SLJIT_ADD, TMP2, 0, STR_PTR, 0, SLJIT_IMM, IN_UCHARS(re->minlength));
minlength_check_failed = CMP(SLJIT_GREATER, TMP2, 0, STR_END, 0);
}
if (common->req_char_ptr != 0)
reqcu_not_found = search_requested_char(common, (PCRE2_UCHAR)(re->last_codeunit), (re->flags & PCRE2_LASTCASELESS) != 0, (re->flags & PCRE2_FIRSTSET) != 0);
/* Store the current STR_PTR in OVECTOR(0). */
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), OVECTOR(0), STR_PTR, 0);
/* Copy the limit of allowed recursions. */
OP1(SLJIT_MOV, COUNT_MATCH, 0, SLJIT_MEM1(SLJIT_SP), LIMIT_MATCH);
if (common->capture_last_ptr != 0)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr, SLJIT_IMM, 0);
if (common->fast_forward_bc_ptr != NULL)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), PRIVATE_DATA(common->fast_forward_bc_ptr + 1) >> 3, STR_PTR, 0);
if (common->start_ptr != OVECTOR(0))
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_ptr, STR_PTR, 0);
/* Copy the beginning of the string. */
if (mode == PCRE2_JIT_PARTIAL_SOFT)
{
jump = CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, -1);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0);
JUMPHERE(jump);
}
else if (mode == PCRE2_JIT_PARTIAL_HARD)
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, STR_PTR, 0);
compile_matchingpath(common, common->start, ccend, &rootbacktrack);
if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler)))
{
sljit_free_compiler(compiler);
SLJIT_FREE(common->optimized_cbracket, allocator_data);
SLJIT_FREE(common->private_data_ptrs, allocator_data);
PRIV(jit_free_rodata)(common->read_only_data_head, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
if ((re->overall_options & PCRE2_ENDANCHORED) != 0)
end_anchor_failed = CMP(SLJIT_NOT_EQUAL, STR_PTR, 0, STR_END, 0);
if (common->might_be_empty)
{
empty_match = CMP(SLJIT_EQUAL, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0));
empty_match_found_label = LABEL();
}
common->accept_label = LABEL();
if (common->accept != NULL)
set_jumps(common->accept, common->accept_label);
/* This means we have a match. Update the ovector. */
copy_ovector(common, re->top_bracket + 1);
common->quit_label = common->abort_label = LABEL();
if (common->quit != NULL)
set_jumps(common->quit, common->quit_label);
if (common->abort != NULL)
set_jumps(common->abort, common->abort_label);
if (minlength_check_failed != NULL)
SET_LABEL(minlength_check_failed, common->abort_label);
sljit_emit_op0(compiler, SLJIT_SKIP_FRAMES_BEFORE_RETURN);
sljit_emit_return(compiler, SLJIT_MOV, SLJIT_RETURN_REG, 0);
if (common->failed_match != NULL)
{
SLJIT_ASSERT(common->mode == PCRE2_JIT_COMPLETE);
set_jumps(common->failed_match, LABEL());
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE2_ERROR_NOMATCH);
JUMPTO(SLJIT_JUMP, common->abort_label);
}
if ((re->overall_options & PCRE2_ENDANCHORED) != 0)
JUMPHERE(end_anchor_failed);
if (mode != PCRE2_JIT_COMPLETE)
{
common->partialmatchlabel = LABEL();
set_jumps(common->partialmatch, common->partialmatchlabel);
return_with_partial_match(common, common->quit_label);
}
if (common->might_be_empty)
empty_match_backtrack_label = LABEL();
compile_backtrackingpath(common, rootbacktrack.top);
if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler)))
{
sljit_free_compiler(compiler);
SLJIT_FREE(common->optimized_cbracket, allocator_data);
SLJIT_FREE(common->private_data_ptrs, allocator_data);
PRIV(jit_free_rodata)(common->read_only_data_head, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
SLJIT_ASSERT(rootbacktrack.prev == NULL);
reset_match_label = LABEL();
if (mode == PCRE2_JIT_PARTIAL_SOFT)
{
/* Update hit_start only in the first time. */
jump = CMP(SLJIT_NOT_EQUAL, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, 0);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->start_ptr);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->start_used_ptr, SLJIT_IMM, -1);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), common->hit_start, TMP1, 0);
JUMPHERE(jump);
}
/* Check we have remaining characters. */
if ((re->overall_options & PCRE2_ANCHORED) == 0 && common->match_end_ptr != 0)
{
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->match_end_ptr);
}
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP),
(common->fast_forward_bc_ptr != NULL) ? (PRIVATE_DATA(common->fast_forward_bc_ptr + 1) >> 3) : common->start_ptr);
if ((re->overall_options & PCRE2_ANCHORED) == 0)
{
if (common->ff_newline_shortcut != NULL)
{
/* There cannot be more newlines if PCRE2_FIRSTLINE is set. */
if ((re->overall_options & PCRE2_FIRSTLINE) == 0)
{
if (common->match_end_ptr != 0)
{
OP1(SLJIT_MOV, TMP3, 0, STR_END, 0);
OP1(SLJIT_MOV, STR_END, 0, TMP1, 0);
CMPTO(SLJIT_LESS, STR_PTR, 0, TMP1, 0, common->ff_newline_shortcut);
OP1(SLJIT_MOV, STR_END, 0, TMP3, 0);
}
else
CMPTO(SLJIT_LESS, STR_PTR, 0, STR_END, 0, common->ff_newline_shortcut);
}
}
else
CMPTO(SLJIT_LESS, STR_PTR, 0, (common->match_end_ptr == 0) ? STR_END : TMP1, 0, mainloop_label);
}
/* No more remaining characters. */
if (reqcu_not_found != NULL)
set_jumps(reqcu_not_found, LABEL());
if (mode == PCRE2_JIT_PARTIAL_SOFT)
CMPTO(SLJIT_NOT_EQUAL, SLJIT_MEM1(SLJIT_SP), common->hit_start, SLJIT_IMM, -1, common->partialmatchlabel);
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE2_ERROR_NOMATCH);
JUMPTO(SLJIT_JUMP, common->quit_label);
flush_stubs(common);
if (common->might_be_empty)
{
JUMPHERE(empty_match);
OP1(SLJIT_MOV, TMP1, 0, ARGUMENTS, 0);
OP1(SLJIT_MOV_U32, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, options));
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, PCRE2_NOTEMPTY);
JUMPTO(SLJIT_NOT_ZERO, empty_match_backtrack_label);
OP2U(SLJIT_AND | SLJIT_SET_Z, TMP2, 0, SLJIT_IMM, PCRE2_NOTEMPTY_ATSTART);
JUMPTO(SLJIT_ZERO, empty_match_found_label);
OP1(SLJIT_MOV, TMP2, 0, SLJIT_MEM1(TMP1), SLJIT_OFFSETOF(jit_arguments, str));
CMPTO(SLJIT_NOT_EQUAL, TMP2, 0, STR_PTR, 0, empty_match_found_label);
JUMPTO(SLJIT_JUMP, empty_match_backtrack_label);
}
common->fast_forward_bc_ptr = NULL;
common->early_fail_start_ptr = 0;
common->early_fail_end_ptr = 0;
common->currententry = common->entries;
common->local_quit_available = TRUE;
quit_label = common->quit_label;
if (common->currententry != NULL)
{
/* A free bit for each private data. */
common->recurse_bitset_size = ((private_data_size / (int)sizeof(sljit_sw)) + 7) >> 3;
SLJIT_ASSERT(common->recurse_bitset_size > 0);
common->recurse_bitset = (sljit_u8*)SLJIT_MALLOC(common->recurse_bitset_size, allocator_data);;
if (common->recurse_bitset != NULL)
{
do
{
/* Might add new entries. */
compile_recurse(common);
if (SLJIT_UNLIKELY(sljit_get_compiler_error(compiler)))
break;
flush_stubs(common);
common->currententry = common->currententry->next;
}
while (common->currententry != NULL);
SLJIT_FREE(common->recurse_bitset, allocator_data);
}
if (common->currententry != NULL)
{
/* The common->recurse_bitset has been freed. */
SLJIT_ASSERT(sljit_get_compiler_error(compiler) || common->recurse_bitset == NULL);
sljit_free_compiler(compiler);
SLJIT_FREE(common->optimized_cbracket, allocator_data);
SLJIT_FREE(common->private_data_ptrs, allocator_data);
PRIV(jit_free_rodata)(common->read_only_data_head, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
}
common->local_quit_available = FALSE;
common->quit_label = quit_label;
/* Allocating stack, returns with PCRE_ERROR_JIT_STACKLIMIT if fails. */
/* This is a (really) rare case. */
set_jumps(common->stackalloc, LABEL());
/* RETURN_ADDR is not a saved register. */
sljit_emit_fast_enter(compiler, SLJIT_MEM1(SLJIT_SP), LOCALS0);
SLJIT_ASSERT(TMP1 == SLJIT_R0 && STR_PTR == SLJIT_R1);
OP1(SLJIT_MOV, SLJIT_MEM1(SLJIT_SP), LOCALS1, STR_PTR, 0);
OP1(SLJIT_MOV, SLJIT_R0, 0, ARGUMENTS, 0);
OP2(SLJIT_SUB, SLJIT_R1, 0, STACK_LIMIT, 0, SLJIT_IMM, STACK_GROWTH_RATE);
OP1(SLJIT_MOV, SLJIT_R0, 0, SLJIT_MEM1(SLJIT_R0), SLJIT_OFFSETOF(jit_arguments, stack));
OP1(SLJIT_MOV, STACK_LIMIT, 0, TMP2, 0);
sljit_emit_icall(compiler, SLJIT_CALL, SLJIT_ARGS2(W, W, W), SLJIT_IMM, SLJIT_FUNC_ADDR(sljit_stack_resize));
jump = CMP(SLJIT_EQUAL, SLJIT_RETURN_REG, 0, SLJIT_IMM, 0);
OP1(SLJIT_MOV, TMP2, 0, STACK_LIMIT, 0);
OP1(SLJIT_MOV, STACK_LIMIT, 0, SLJIT_RETURN_REG, 0);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), LOCALS0);
OP1(SLJIT_MOV, STR_PTR, 0, SLJIT_MEM1(SLJIT_SP), LOCALS1);
OP_SRC(SLJIT_FAST_RETURN, TMP1, 0);
/* Allocation failed. */
JUMPHERE(jump);
/* We break the return address cache here, but this is a really rare case. */
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE2_ERROR_JIT_STACKLIMIT);
JUMPTO(SLJIT_JUMP, common->quit_label);
/* Call limit reached. */
set_jumps(common->calllimit, LABEL());
OP1(SLJIT_MOV, SLJIT_RETURN_REG, 0, SLJIT_IMM, PCRE2_ERROR_MATCHLIMIT);
JUMPTO(SLJIT_JUMP, common->quit_label);
if (common->revertframes != NULL)
{
set_jumps(common->revertframes, LABEL());
do_revertframes(common);
}
if (common->wordboundary != NULL)
{
set_jumps(common->wordboundary, LABEL());
check_wordboundary(common);
}
if (common->anynewline != NULL)
{
set_jumps(common->anynewline, LABEL());
check_anynewline(common);
}
if (common->hspace != NULL)
{
set_jumps(common->hspace, LABEL());
check_hspace(common);
}
if (common->vspace != NULL)
{
set_jumps(common->vspace, LABEL());
check_vspace(common);
}
if (common->casefulcmp != NULL)
{
set_jumps(common->casefulcmp, LABEL());
do_casefulcmp(common);
}
if (common->caselesscmp != NULL)
{
set_jumps(common->caselesscmp, LABEL());
do_caselesscmp(common);
}
if (common->reset_match != NULL)
{
set_jumps(common->reset_match, LABEL());
do_reset_match(common, (re->top_bracket + 1) * 2);
CMPTO(SLJIT_GREATER, STR_PTR, 0, TMP1, 0, continue_match_label);
OP1(SLJIT_MOV, STR_PTR, 0, TMP1, 0);
JUMPTO(SLJIT_JUMP, reset_match_label);
}
#ifdef SUPPORT_UNICODE
#if PCRE2_CODE_UNIT_WIDTH == 8
if (common->utfreadchar != NULL)
{
set_jumps(common->utfreadchar, LABEL());
do_utfreadchar(common);
}
if (common->utfreadtype8 != NULL)
{
set_jumps(common->utfreadtype8, LABEL());
do_utfreadtype8(common);
}
if (common->utfpeakcharback != NULL)
{
set_jumps(common->utfpeakcharback, LABEL());
do_utfpeakcharback(common);
}
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 */
#if PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16
if (common->utfreadchar_invalid != NULL)
{
set_jumps(common->utfreadchar_invalid, LABEL());
do_utfreadchar_invalid(common);
}
if (common->utfreadnewline_invalid != NULL)
{
set_jumps(common->utfreadnewline_invalid, LABEL());
do_utfreadnewline_invalid(common);
}
if (common->utfmoveback_invalid)
{
set_jumps(common->utfmoveback_invalid, LABEL());
do_utfmoveback_invalid(common);
}
if (common->utfpeakcharback_invalid)
{
set_jumps(common->utfpeakcharback_invalid, LABEL());
do_utfpeakcharback_invalid(common);
}
#endif /* PCRE2_CODE_UNIT_WIDTH == 8 || PCRE2_CODE_UNIT_WIDTH == 16 */
if (common->getucd != NULL)
{
set_jumps(common->getucd, LABEL());
do_getucd(common);
}
if (common->getucdtype != NULL)
{
set_jumps(common->getucdtype, LABEL());
do_getucdtype(common);
}
#endif /* SUPPORT_UNICODE */
SLJIT_FREE(common->optimized_cbracket, allocator_data);
SLJIT_FREE(common->private_data_ptrs, allocator_data);
executable_func = sljit_generate_code(compiler);
executable_size = sljit_get_generated_code_size(compiler);
sljit_free_compiler(compiler);
if (executable_func == NULL)
{
PRIV(jit_free_rodata)(common->read_only_data_head, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
/* Reuse the function descriptor if possible. */
if (re->executable_jit != NULL)
functions = (executable_functions *)re->executable_jit;
else
{
functions = SLJIT_MALLOC(sizeof(executable_functions), allocator_data);
if (functions == NULL)
{
/* This case is highly unlikely since we just recently
freed a lot of memory. Not impossible though. */
sljit_free_code(executable_func, NULL);
PRIV(jit_free_rodata)(common->read_only_data_head, allocator_data);
return PCRE2_ERROR_NOMEMORY;
}
memset(functions, 0, sizeof(executable_functions));
functions->top_bracket = re->top_bracket + 1;
functions->limit_match = re->limit_match;
re->executable_jit = functions;
}
/* Turn mode into an index. */
if (mode == PCRE2_JIT_COMPLETE)
mode = 0;
else
mode = (mode == PCRE2_JIT_PARTIAL_SOFT) ? 1 : 2;
SLJIT_ASSERT(mode < JIT_NUMBER_OF_COMPILE_MODES);
functions->executable_funcs[mode] = executable_func;
functions->read_only_data_heads[mode] = common->read_only_data_head;
functions->executable_sizes[mode] = executable_size;
return 0;
}
| 0 |
[
"CWE-703",
"CWE-125"
] |
pcre2
|
03654e751e7f0700693526b67dfcadda6b42c9d0
| 64,616,293,610,244,000,000,000,000,000,000,000,000 | 669 |
Fixed an issue affecting recursions in JIT
|
my_log2(int x)
{
int i = 0;
if (x <= 0)
return 0;
while ((1<<i) <= x && (1<<i) > 0)
i++;
if ((1<<i) <= 0)
return 0;
return i;
}
| 0 |
[
"CWE-119"
] |
mupdf
|
520cc26d18c9ee245b56e9e91f9d4fcae02be5f0
| 90,161,249,389,817,300,000,000,000,000,000,000,000 | 15 |
Bug 689699: Avoid buffer overrun.
When cleaning a pdf file, various lists (of pdf_xref_len length) are
defined early on.
If we trigger a repair during the clean, this can cause pdf_xref_len
to increase causing an overrun.
Fix this by watching for changes in the length, and checking accesses
to the list for validity.
This also appears to fix bugs 698700-698703.
|
int wc_ecc_init_ex(ecc_key* key, void* heap, int devId)
{
int ret = 0;
if (key == NULL) {
return BAD_FUNC_ARG;
}
#ifdef ECC_DUMP_OID
wc_ecc_dump_oids();
#endif
XMEMSET(key, 0, sizeof(ecc_key));
key->state = ECC_STATE_NONE;
#if defined(PLUTON_CRYPTO_ECC) || defined(WOLF_CRYPTO_CB)
key->devId = devId;
#else
(void)devId;
#endif
#ifdef WOLFSSL_ATECC508A
key->slot = ATECC_INVALID_SLOT;
#else
#ifdef ALT_ECC_SIZE
key->pubkey.x = (mp_int*)&key->pubkey.xyz[0];
key->pubkey.y = (mp_int*)&key->pubkey.xyz[1];
key->pubkey.z = (mp_int*)&key->pubkey.xyz[2];
alt_fp_init(key->pubkey.x);
alt_fp_init(key->pubkey.y);
alt_fp_init(key->pubkey.z);
ret = mp_init(&key->k);
if (ret != MP_OKAY) {
return MEMORY_E;
}
#else
ret = mp_init_multi(&key->k, key->pubkey.x, key->pubkey.y, key->pubkey.z,
NULL, NULL);
if (ret != MP_OKAY) {
return MEMORY_E;
}
#endif /* ALT_ECC_SIZE */
#endif /* WOLFSSL_ATECC508A */
#ifdef WOLFSSL_HEAP_TEST
key->heap = (void*)WOLFSSL_HEAP_TEST;
#else
key->heap = heap;
#endif
#if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_ECC)
/* handle as async */
ret = wolfAsync_DevCtxInit(&key->asyncDev, WOLFSSL_ASYNC_MARKER_ECC,
key->heap, devId);
#endif
#if defined(WOLFSSL_DSP)
key->handle = -1;
#endif
return ret;
}
| 0 |
[
"CWE-326",
"CWE-203"
] |
wolfssl
|
1de07da61f0c8e9926dcbd68119f73230dae283f
| 193,392,415,837,031,500,000,000,000,000,000,000,000 | 61 |
Constant time EC map to affine for private operations
For fast math, use a constant time modular inverse when mapping to
affine when operation involves a private key - key gen, calc shared
secret, sign.
|
alloc_group_fields(JOIN *join,ORDER *group)
{
if (group)
{
for (; group ; group=group->next)
{
Cached_item *tmp=new_Cached_item(join->thd, *group->item, TRUE);
if (!tmp || join->group_fields.push_front(tmp))
return TRUE;
}
}
join->sort_and_group=1; /* Mark for do_select */
return FALSE;
}
| 0 |
[] |
server
|
ff77a09bda884fe6bf3917eb29b9d3a2f53f919b
| 112,493,114,348,527,540,000,000,000,000,000,000,000 | 14 |
MDEV-22464 Server crash on UPDATE with nested subquery
Uninitialized ref_pointer_array[] because setup_fields() got empty
fields list. mysql_multi_update() for some reason does that by
substituting the fields list with empty total_list for the
mysql_select() call (looks like wrong merge since total_list is not
used anywhere else and is always empty). The fix would be to return
back the original fields list. But this fails update_use_source.test
case:
--error ER_BAD_FIELD_ERROR
update v1 set t1c1=2 order by 1;
Actually not failing the above seems to be ok.
The other fix would be to keep resolve_in_select_list false (and that
keeps outer context from being resolved in
Item_ref::fix_fields()). This fix is more consistent with how SELECT
behaves:
--error ER_SUBQUERY_NO_1_ROW
select a from t1 where a= (select 2 from t1 having (a = 3));
So this patch implements this fix.
|
\param referer Referer used, as a C-string.
\return Value of \c filename_local.
\note Use the \c libcurl library, or the external binaries \c wget or \c curl to perform the download.
**/
inline char *load_network(const char *const url, char *const filename_local,
const unsigned int timeout, const bool try_fallback,
const char *const referer) {
if (!url)
throw CImgArgumentException("cimg::load_network(): Specified URL is (null).");
if (!filename_local)
throw CImgArgumentException("cimg::load_network(): Specified destination string is (null).");
const char *const __ext = cimg::split_filename(url), *const _ext = (*__ext && __ext>url)?__ext - 1:__ext;
CImg<char> ext = CImg<char>::string(_ext);
std::FILE *file = 0;
*filename_local = 0;
if (ext._width>16 || !cimg::strncasecmp(ext,"cgi",3)) *ext = 0;
else cimg::strwindows_reserved(ext);
do {
cimg_snprintf(filename_local,256,"%s%c%s%s",
cimg::temporary_path(),cimg_file_separator,cimg::filenamerand(),ext._data);
if ((file=cimg::std_fopen(filename_local,"rb"))!=0) cimg::fclose(file);
} while (file);
#ifdef cimg_use_curl
const unsigned int omode = cimg::exception_mode();
cimg::exception_mode(0);
try {
CURL *curl = 0;
CURLcode res;
curl = curl_easy_init();
if (curl) {
file = cimg::fopen(filename_local,"wb");
curl_easy_setopt(curl,CURLOPT_URL,url);
curl_easy_setopt(curl,CURLOPT_WRITEFUNCTION,0);
curl_easy_setopt(curl,CURLOPT_WRITEDATA,file);
curl_easy_setopt(curl,CURLOPT_SSL_VERIFYPEER,0L);
curl_easy_setopt(curl,CURLOPT_SSL_VERIFYHOST,0L);
curl_easy_setopt(curl,CURLOPT_FOLLOWLOCATION,1L);
if (timeout) curl_easy_setopt(curl,CURLOPT_TIMEOUT,(long)timeout);
if (std::strchr(url,'?')) curl_easy_setopt(curl,CURLOPT_HTTPGET,1L);
if (referer) curl_easy_setopt(curl,CURLOPT_REFERER,referer);
res = curl_easy_perform(curl);
curl_easy_cleanup(curl);
cimg::fseek(file,0,SEEK_END); // Check if file size is 0
const cimg_ulong siz = cimg::ftell(file);
cimg::fclose(file);
if (siz>0 && res==CURLE_OK) {
cimg::exception_mode(omode);
return filename_local;
} else std::remove(filename_local);
}
} catch (...) { }
cimg::exception_mode(omode);
if (!try_fallback) throw CImgIOException("cimg::load_network(): Failed to load file '%s' with libcurl.",url);
#endif
CImg<char> command((unsigned int)std::strlen(url) + 64);
cimg::unused(try_fallback);
// Try with 'curl' first.
if (timeout) {
if (referer)
cimg_snprintf(command,command._width,"%s -e %s -m %u -f --silent --compressed -o \"%s\" \"%s\"",
cimg::curl_path(),referer,timeout,filename_local,
CImg<char>::string(url)._system_strescape().data());
else
cimg_snprintf(command,command._width,"%s -m %u -f --silent --compressed -o \"%s\" \"%s\"",
cimg::curl_path(),timeout,filename_local,
CImg<char>::string(url)._system_strescape().data());
} else {
if (referer)
cimg_snprintf(command,command._width,"%s -e %s -f --silent --compressed -o \"%s\" \"%s\"",
cimg::curl_path(),referer,filename_local,
CImg<char>::string(url)._system_strescape().data());
else
cimg_snprintf(command,command._width,"%s -f --silent --compressed -o \"%s\" \"%s\"",
cimg::curl_path(),filename_local,
CImg<char>::string(url)._system_strescape().data());
}
cimg::system(command);
if (!(file=cimg::std_fopen(filename_local,"rb"))) {
// Try with 'wget' otherwise.
if (timeout) {
if (referer)
cimg_snprintf(command,command._width,"%s --referer=%s -T %u -q -r -l 0 --no-cache -O \"%s\" \"%s\"",
cimg::wget_path(),referer,timeout,filename_local,
CImg<char>::string(url)._system_strescape().data());
else
cimg_snprintf(command,command._width,"%s -T %u -q -r -l 0 --no-cache -O \"%s\" \"%s\"",
cimg::wget_path(),timeout,filename_local,
CImg<char>::string(url)._system_strescape().data());
} else {
if (referer)
cimg_snprintf(command,command._width,"%s --referer=%s -q -r -l 0 --no-cache -O \"%s\" \"%s\"",
cimg::wget_path(),referer,filename_local,
CImg<char>::string(url)._system_strescape().data());
else
cimg_snprintf(command,command._width,"%s -q -r -l 0 --no-cache -O \"%s\" \"%s\"",
cimg::wget_path(),filename_local,
CImg<char>::string(url)._system_strescape().data());
}
cimg::system(command);
if (!(file=cimg::std_fopen(filename_local,"rb")))
throw CImgIOException("cimg::load_network(): Failed to load file '%s' with external commands "
"'wget' or 'curl'.",url);
cimg::fclose(file);
// Try gunzip it.
cimg_snprintf(command,command._width,"%s.gz",filename_local);
std::rename(filename_local,command);
cimg_snprintf(command,command._width,"%s --quiet \"%s.gz\"",
gunzip_path(),filename_local);
cimg::system(command);
file = cimg::std_fopen(filename_local,"rb");
if (!file) {
cimg_snprintf(command,command._width,"%s.gz",filename_local);
std::rename(command,filename_local);
file = cimg::std_fopen(filename_local,"rb");
}
}
cimg::fseek(file,0,SEEK_END); // Check if file size is 0
if (std::ftell(file)<=0)
throw CImgIOException("cimg::load_network(): Failed to load URL '%s' with external commands "
| 0 |
[
"CWE-119",
"CWE-787"
] |
CImg
|
ac8003393569aba51048c9d67e1491559877b1d1
| 195,756,413,359,692,100,000,000,000,000,000,000,000 | 127 |
.
|
void LinkResolver::check_field_accessability(Klass* ref_klass,
Klass* resolved_klass,
Klass* sel_klass,
const fieldDescriptor& fd,
TRAPS) {
bool can_access = Reflection::verify_member_access(ref_klass,
resolved_klass,
sel_klass,
fd.access_flags(),
true, false, CHECK);
// Any existing exceptions that may have been thrown, for example LinkageErrors
// from nest-host resolution, have been allowed to propagate.
if (!can_access) {
bool same_module = (sel_klass->module() == ref_klass->module());
ResourceMark rm(THREAD);
stringStream ss;
ss.print("class %s tried to access %s%sfield %s.%s (%s%s%s)",
ref_klass->external_name(),
fd.is_protected() ? "protected " : "",
fd.is_private() ? "private " : "",
sel_klass->external_name(),
fd.name()->as_C_string(),
(same_module) ? ref_klass->joint_in_module_of_loader(sel_klass) : ref_klass->class_in_module_of_loader(),
(same_module) ? "" : "; ",
(same_module) ? "" : sel_klass->class_in_module_of_loader()
);
// For private access see if there was a problem with nest host
// resolution, and if so report that as part of the message.
if (fd.is_private()) {
print_nest_host_error_on(&ss, ref_klass, sel_klass);
}
Exceptions::fthrow(THREAD_AND_LOCATION,
vmSymbols::java_lang_IllegalAccessError(),
"%s",
ss.as_string()
);
return;
}
}
| 0 |
[] |
jdk17u
|
860464e46105b98ccf21e98abe2dc6e80155887c
| 229,853,857,791,400,850,000,000,000,000,000,000,000 | 39 |
8281866: Enhance MethodHandle invocations
Reviewed-by: mbaesken
Backport-of: d974d9da365f787f67971d88c79371c8b0769f75
|
midi_synth_start_note(int dev, int channel, int note, int velocity)
{
int orig_dev = synth_devs[dev]->midi_dev;
int msg, chn;
if (note < 0 || note > 127)
return 0;
if (channel < 0 || channel > 15)
return 0;
if (velocity < 0)
velocity = 0;
if (velocity > 127)
velocity = 127;
leave_sysex(dev);
msg = prev_out_status[orig_dev] & 0xf0;
chn = prev_out_status[orig_dev] & 0x0f;
if (chn == channel && msg == 0x90)
{ /*
* Use running status
*/
if (!prefix_cmd(orig_dev, note))
return 0;
midi_outc(orig_dev, note);
midi_outc(orig_dev, velocity);
} else
{
if (!prefix_cmd(orig_dev, 0x90 | (channel & 0x0f)))
return 0;
midi_outc(orig_dev, 0x90 | (channel & 0x0f)); /*
* Note on
*/
midi_outc(orig_dev, note);
midi_outc(orig_dev, velocity);
}
return 0;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
b769f49463711205d57286e64cf535ed4daf59e9
| 182,998,413,210,691,300,000,000,000,000,000,000,000 | 39 |
sound/oss: remove offset from load_patch callbacks
Was: [PATCH] sound/oss/midi_synth: prevent underflow, use of
uninitialized value, and signedness issue
The offset passed to midi_synth_load_patch() can be essentially
arbitrary. If it's greater than the header length, this will result in
a copy_from_user(dst, src, negative_val). While this will just return
-EFAULT on x86, on other architectures this may cause memory corruption.
Additionally, the length field of the sysex_info structure may not be
initialized prior to its use. Finally, a signed comparison may result
in an unintentionally large loop.
On suggestion by Takashi Iwai, version two removes the offset argument
from the load_patch callbacks entirely, which also resolves similar
issues in opl3. Compile tested only.
v3 adjusts comments and hopefully gets copy offsets right.
Signed-off-by: Dan Rosenberg <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]>
|
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
/*
* If we don't have pte special, then we have to use the pfn_valid()
* based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
* refcount the page if pfn_valid is true (hence insert_page rather
* than insert_pfn).
*/
if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
struct page *page;
page = pfn_to_page(pfn);
return insert_page(vma, addr, page, vma->vm_page_prot);
}
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
}
| 0 |
[
"CWE-20"
] |
linux-2.6
|
89f5b7da2a6bad2e84670422ab8192382a5aeb9f
| 259,831,448,603,174,400,000,000,000,000,000,000,000 | 22 |
Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP
KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit
557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed
the ZERO_PAGE from the VM mappings, any users of get_user_pages() will
generally now populate the VM with real empty pages needlessly.
We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but
since fault handling no longer uses ZERO_PAGE for new anonymous pages,
we now need to handle that special case in follow_page() instead.
In particular, the removal of ZERO_PAGE effectively removed the core
file writing optimization where we would skip writing pages that had not
been populated at all, and increased memory pressure a lot by allocating
all those useless newly zeroed pages.
This reinstates the optimization by making the unmapped PTE case the
same as for a non-existent page table, which already did this correctly.
While at it, this also fixes the XIP case for follow_page(), where the
caller could not differentiate between the case of a page that simply
could not be used (because it had no "struct page" associated with it)
and a page that just wasn't mapped.
We do that by simply returning an error pointer for pages that could not
be turned into a "struct page *". The error is arbitrarily picked to be
EFAULT, since that was what get_user_pages() already used for the
equivalent IO-mapped page case.
[ Also removed an impossible test for pte_offset_map_lock() failing:
that's not how that function works ]
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Nick Piggin <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
int platform_get_irq_byname(struct platform_device *dev, const char *name)
{
int ret;
ret = __platform_get_irq_byname(dev, name);
if (ret < 0 && ret != -EPROBE_DEFER)
dev_err(&dev->dev, "IRQ %s not found\n", name);
return ret;
}
| 0 |
[
"CWE-787"
] |
linux
|
aa838896d87af561a33ecefea1caa4c15a68bc47
| 124,857,042,852,083,720,000,000,000,000,000,000,000 | 10 |
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions
Convert the various sprintf fmaily calls in sysfs device show functions
to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety.
Done with:
$ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 .
And cocci script:
$ cat sysfs_emit_dev.cocci
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Signed-off-by: Joe Perches <[email protected]>
Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static void swoole_serialize_arr(seriaString *buffer, zend_array *zvalue)
{
zval *data;
zend_string *key;
zend_ulong index;
swPoolstr *swStr = NULL;
zend_uchar is_pack = zvalue->u.flags & HASH_FLAG_PACKED;
ZEND_HASH_FOREACH_KEY_VAL(zvalue, index, key, data)
{
SBucketType type = {0};
type.data_type = Z_TYPE_P(data);
//start point
size_t p = buffer->offset;
if (is_pack && zvalue->nNextFreeElement == zvalue->nNumOfElements)
{
type.key_type = KEY_TYPE_INDEX;
type.key_len = 0;
SERIA_SET_ENTRY_TYPE(buffer, type);
}
else
{
//seria key
if (key)
{
type.key_type = KEY_TYPE_STRING;
if ((swStr = swoole_mini_filter_find(key)))
{
type.key_len = 3; //means use same string
SERIA_SET_ENTRY_TYPE(buffer, type);
if (swStr->offset & 4)
{
SERIA_SET_ENTRY_SIZE4(buffer, swStr->offset);
}
else
{
SERIA_SET_ENTRY_SHORT(buffer, swStr->offset);
}
}
else
{
if (key->len <= 0xff)
{
type.key_len = 1;
SERIA_SET_ENTRY_TYPE(buffer, type);
swoole_mini_filter_add(key, buffer->offset, 1);
SERIA_SET_ENTRY_TYPE(buffer, key->len);
swoole_string_cpy(buffer, key->val, key->len);
}
else if (key->len <= 0xffff)
{//if more than this don't need optimize
type.key_len = 2;
SERIA_SET_ENTRY_TYPE(buffer, type);
swoole_mini_filter_add(key, buffer->offset, 2);
SERIA_SET_ENTRY_SHORT(buffer, key->len);
swoole_string_cpy(buffer, key->val, key->len);
}
else
{
type.key_len = 0;
SERIA_SET_ENTRY_TYPE(buffer, type);
swoole_mini_filter_add(key, buffer->offset, 3);
swoole_string_cpy(buffer, key + XtOffsetOf(zend_string, len), sizeof (size_t) + key->len);
}
}
}
else
{
type.key_type = KEY_TYPE_INDEX;
if (index <= 0xff)
{
type.key_len = 1;
SERIA_SET_ENTRY_TYPE(buffer, type);
SERIA_SET_ENTRY_TYPE(buffer, index);
}
else if (index <= 0xffff)
{
type.key_len = 2;
SERIA_SET_ENTRY_TYPE(buffer, type);
SERIA_SET_ENTRY_SHORT(buffer, index);
}
else
{
type.key_len = 3;
SERIA_SET_ENTRY_TYPE(buffer, type);
SERIA_SET_ENTRY_ULONG(buffer, index);
}
}
}
//seria data
try_again:
switch (Z_TYPE_P(data))
{
case IS_STRING:
{
if ((swStr = swoole_mini_filter_find(Z_STR_P(data))))
{
((SBucketType*) (buffer->buffer + p))->data_len = 3; //means use same string
if (swStr->offset & 4)
{
SERIA_SET_ENTRY_SIZE4(buffer, swStr->offset);
}
else
{
SERIA_SET_ENTRY_SHORT(buffer, swStr->offset);
}
}
else
{
if (Z_STRLEN_P(data) <= 0xff)
{
((SBucketType*) (buffer->buffer + p))->data_len = 1;
swoole_mini_filter_add(Z_STR_P(data), buffer->offset, 1);
SERIA_SET_ENTRY_TYPE(buffer, Z_STRLEN_P(data));
swoole_string_cpy(buffer, Z_STRVAL_P(data), Z_STRLEN_P(data));
}
else if (Z_STRLEN_P(data) <= 0xffff)
{
((SBucketType*) (buffer->buffer + p))->data_len = 2;
swoole_mini_filter_add(Z_STR_P(data), buffer->offset, 2);
SERIA_SET_ENTRY_SHORT(buffer, Z_STRLEN_P(data));
swoole_string_cpy(buffer, Z_STRVAL_P(data), Z_STRLEN_P(data));
}
else
{//if more than this don't need optimize
((SBucketType*) (buffer->buffer + p))->data_len = 0;
swoole_mini_filter_add(Z_STR_P(data), buffer->offset, 3);
swoole_string_cpy(buffer, (char*) Z_STR_P(data) + XtOffsetOf(zend_string, len), sizeof (size_t) + Z_STRLEN_P(data));
}
}
break;
}
case IS_LONG:
{
SBucketType* long_type = (SBucketType*) (buffer->buffer + p);
swoole_serialize_long(buffer, data, long_type);
break;
}
case IS_DOUBLE:
swoole_set_zend_value(buffer, &(data->value));
break;
case IS_REFERENCE:
data = Z_REFVAL_P(data);
((SBucketType*) (buffer->buffer + p))->data_type = Z_TYPE_P(data);
goto try_again;
break;
case IS_ARRAY:
{
zend_array *ht = Z_ARRVAL_P(data);
if (GC_IS_RECURSIVE(ht))
{
((SBucketType*) (buffer->buffer + p))->data_type = IS_NULL; //reset type null
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "the array has cycle ref");
}
else
{
seria_array_type(ht, buffer, p, buffer->offset);
if (ZEND_HASH_APPLY_PROTECTION(ht))
{
GC_PROTECT_RECURSION(ht);
swoole_serialize_arr(buffer, ht);
GC_UNPROTECT_RECURSION(ht);
}
else
{
swoole_serialize_arr(buffer, ht);
}
}
break;
}
//object propterty table is this type
case IS_INDIRECT:
data = Z_INDIRECT_P(data);
zend_uchar type = Z_TYPE_P(data);
((SBucketType*) (buffer->buffer + p))->data_type = (type == IS_UNDEF ? IS_NULL : type);
goto try_again;
break;
case IS_OBJECT:
{
/*
* layout
* type | key | namelen | name | bucket len |buckets
*/
((SBucketType*) (buffer->buffer + p))->data_type = IS_UNDEF;
if (ZEND_HASH_APPLY_PROTECTION(Z_OBJPROP_P(data)))
{
GC_PROTECT_RECURSION(Z_OBJPROP_P(data));
swoole_serialize_object(buffer, data, p);
GC_UNPROTECT_RECURSION(Z_OBJPROP_P(data));
}
else
{
swoole_serialize_object(buffer, data, p);
}
break;
}
default://
break;
}
}
ZEND_HASH_FOREACH_END();
}
| 0 |
[
"CWE-200",
"CWE-502"
] |
swoole-src
|
4cdbce5d9bf2fe596bb6acd7d6611f9e8c253a76
| 181,239,441,202,436,770,000,000,000,000,000,000,000 | 210 |
add buffer end check
|
getToken(FileInfo *file, CharsString *result, const char *description) {
/* Find the next string of contiguous non-whitespace characters. If this
* is the last token on the line, return 2 instead of 1. */
while (!atEndOfLine(file) && atTokenDelimiter(file)) file->linepos++;
result->length = 0;
while (!atEndOfLine(file) && !atTokenDelimiter(file)) {
int maxlen = MAXSTRING;
if (result->length >= maxlen) {
compileError(file, "more than %d characters (bytes)", maxlen);
return 0;
} else
result->chars[result->length++] = file->line[file->linepos++];
}
if (!result->length) {
/* Not enough tokens */
if (description) compileError(file, "%s not specified.", description);
return 0;
}
result->chars[result->length] = 0;
while (!atEndOfLine(file) && atTokenDelimiter(file)) file->linepos++;
return 1;
}
| 0 |
[
"CWE-787"
] |
liblouis
|
2e4772befb2b1c37cb4b9d6572945115ee28630a
| 33,140,742,340,182,010,000,000,000,000,000,000,000 | 22 |
Prevent an invalid memory writes in compileRule
Thanks to Han Zheng for reporting it
Fixes #1214
|
registerName(Buffer *buf, char *url, int line, int pos)
{
Anchor *a;
buf->name = putAnchor(buf->name, url, NULL, &a, NULL, NULL, '\0', line,
pos);
return a;
}
| 0 |
[
"CWE-119"
] |
w3m
|
4e464819dd360ffd3d58fa2a89216fe413cfcc74
| 226,710,096,985,358,230,000,000,000,000,000,000,000 | 7 |
Prevent segfault due to buffer overflows in addMultirowsForm
Bug-Debian: https://github.com/tats/w3m/issues/21
Bug-Debian: https://github.com/tats/w3m/issues/26
|
bool Alter_table_prelocking_strategy::
handle_table(THD *thd, Query_tables_list *prelocking_ctx,
TABLE_LIST *table_list, bool *need_prelocking)
{
return FALSE;
}
| 0 |
[] |
server
|
0168d1eda30dad4b517659422e347175eb89e923
| 67,367,610,433,123,450,000,000,000,000,000,000,000 | 6 |
MDEV-25766 Unused CTE lead to a crash in find_field_in_tables/find_order_in_list
Do not assume that subquery Item always present.
|
void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
{
/* Default values; may be overwritten as we are parsing the
* packets. */
crypt_stat->extent_size = ECRYPTFS_DEFAULT_EXTENT_SIZE;
set_extent_mask_and_shift(crypt_stat);
crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
crypt_stat->num_header_bytes_at_front = 0;
else {
if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
crypt_stat->num_header_bytes_at_front =
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else
crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE;
}
}
| 0 |
[
"CWE-189"
] |
linux-2.6
|
8faece5f906725c10e7a1f6caf84452abadbdc7b
| 98,452,490,623,481,420,000,000,000,000,000,000,000 | 17 |
eCryptfs: Allocate a variable number of pages for file headers
When allocating the memory used to store the eCryptfs header contents, a
single, zeroed page was being allocated with get_zeroed_page().
However, the size of an eCryptfs header is either PAGE_CACHE_SIZE or
ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE (8192), whichever is larger, and is
stored in the file's private_data->crypt_stat->num_header_bytes_at_front
field.
ecryptfs_write_metadata_to_contents() was using
num_header_bytes_at_front to decide how many bytes should be written to
the lower filesystem for the file header. Unfortunately, at least 8K
was being written from the page, despite the chance of the single,
zeroed page being smaller than 8K. This resulted in random areas of
kernel memory being written between the 0x1000 and 0x1FFF bytes offsets
in the eCryptfs file headers if PAGE_SIZE was 4K.
This patch allocates a variable number of pages, calculated with
num_header_bytes_at_front, and passes the number of allocated pages
along to ecryptfs_write_metadata_to_contents().
Thanks to Florian Streibelt for reporting the data leak and working with
me to find the problem. 2.6.28 is the only kernel release with this
vulnerability. Corresponds to CVE-2009-0787
Signed-off-by: Tyler Hicks <[email protected]>
Acked-by: Dustin Kirkland <[email protected]>
Reviewed-by: Eric Sandeen <[email protected]>
Reviewed-by: Eugene Teo <[email protected]>
Cc: Greg KH <[email protected]>
Cc: dann frazier <[email protected]>
Cc: Serge E. Hallyn <[email protected]>
Cc: Florian Streibelt <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]>
|
void SplashOutputDev::clearSoftMask(GfxState *state) {
splash->setSoftMask(nullptr);
}
| 0 |
[
"CWE-369"
] |
poppler
|
b224e2f5739fe61de9fa69955d016725b2a4b78d
| 40,439,110,486,535,377,000,000,000,000,000,000,000 | 3 |
SplashOutputDev::tilingPatternFill: Fix crash on broken file
Issue #802
|
static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
ext4_lblk_t end)
{
struct super_block *sb = inode->i_sb;
int depth = ext_depth(inode);
struct ext4_ext_path *path = NULL;
ext4_fsblk_t partial_cluster = 0;
handle_t *handle;
int i = 0, err = 0;
ext_debug("truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
handle = ext4_journal_start(inode, depth + 1);
if (IS_ERR(handle))
return PTR_ERR(handle);
again:
ext4_ext_invalidate_cache(inode);
trace_ext4_ext_remove_space(inode, start, depth);
/*
* Check if we are removing extents inside the extent tree. If that
* is the case, we are going to punch a hole inside the extent tree
* so we have to check whether we need to split the extent covering
* the last block to remove so we can easily remove the part of it
* in ext4_ext_rm_leaf().
*/
if (end < EXT_MAX_BLOCKS - 1) {
struct ext4_extent *ex;
ext4_lblk_t ee_block;
/* find extent for this block */
path = ext4_ext_find_extent(inode, end, NULL);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
return PTR_ERR(path);
}
depth = ext_depth(inode);
/* Leaf not may not exist only if inode has no blocks at all */
ex = path[depth].p_ext;
if (!ex) {
if (depth) {
EXT4_ERROR_INODE(inode,
"path[%d].p_hdr == NULL",
depth);
err = -EIO;
}
goto out;
}
ee_block = le32_to_cpu(ex->ee_block);
/*
* See if the last block is inside the extent, if so split
* the extent at 'end' block so we can easily remove the
* tail of the first part of the split extent in
* ext4_ext_rm_leaf().
*/
if (end >= ee_block &&
end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
int split_flag = 0;
if (ext4_ext_is_uninitialized(ex))
split_flag = EXT4_EXT_MARK_UNINIT1 |
EXT4_EXT_MARK_UNINIT2;
/*
* Split the extent in two so that 'end' is the last
* block in the first new extent
*/
err = ext4_split_extent_at(handle, inode, path,
end + 1, split_flag,
EXT4_GET_BLOCKS_PRE_IO |
EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
if (err < 0)
goto out;
}
}
/*
* We start scanning from right side, freeing all the blocks
* after i_size and walking into the tree depth-wise.
*/
depth = ext_depth(inode);
if (path) {
int k = i = depth;
while (--k > 0)
path[k].p_block =
le16_to_cpu(path[k].p_hdr->eh_entries)+1;
} else {
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
GFP_NOFS);
if (path == NULL) {
ext4_journal_stop(handle);
return -ENOMEM;
}
path[0].p_depth = depth;
path[0].p_hdr = ext_inode_hdr(inode);
i = 0;
if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
err = -EIO;
goto out;
}
}
err = 0;
while (i >= 0 && err == 0) {
if (i == depth) {
/* this is leaf block */
err = ext4_ext_rm_leaf(handle, inode, path,
&partial_cluster, start,
end);
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
continue;
}
/* this is index block */
if (!path[i].p_hdr) {
ext_debug("initialize header\n");
path[i].p_hdr = ext_block_hdr(path[i].p_bh);
}
if (!path[i].p_idx) {
/* this level hasn't been touched yet */
path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
ext_debug("init index ptr: hdr 0x%p, num %d\n",
path[i].p_hdr,
le16_to_cpu(path[i].p_hdr->eh_entries));
} else {
/* we were already here, see at next index */
path[i].p_idx--;
}
ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
i, EXT_FIRST_INDEX(path[i].p_hdr),
path[i].p_idx);
if (ext4_ext_more_to_rm(path + i)) {
struct buffer_head *bh;
/* go to the next level */
ext_debug("move to level %d (block %llu)\n",
i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
if (!bh) {
/* should we reset i_size? */
err = -EIO;
break;
}
if (WARN_ON(i + 1 > depth)) {
err = -EIO;
break;
}
if (ext4_ext_check_block(inode, ext_block_hdr(bh),
depth - i - 1, bh)) {
err = -EIO;
break;
}
path[i + 1].p_bh = bh;
/* save actual number of indexes since this
* number is changed at the next iteration */
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
i++;
} else {
/* we finished processing this index, go up */
if (path[i].p_hdr->eh_entries == 0 && i > 0) {
/* index is empty, remove it;
* handle must be already prepared by the
* truncatei_leaf() */
err = ext4_ext_rm_idx(handle, inode, path + i);
}
/* root level has p_bh == NULL, brelse() eats this */
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
ext_debug("return to level %d\n", i);
}
}
trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
path->p_hdr->eh_entries);
/* If we still have something in the partial cluster and we have removed
* even the first extent, then we should free the blocks in the partial
* cluster as well. */
if (partial_cluster && path->p_hdr->eh_entries == 0) {
int flags = EXT4_FREE_BLOCKS_FORGET;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
flags |= EXT4_FREE_BLOCKS_METADATA;
ext4_free_blocks(handle, inode, NULL,
EXT4_C2B(EXT4_SB(sb), partial_cluster),
EXT4_SB(sb)->s_cluster_ratio, flags);
partial_cluster = 0;
}
/* TODO: flexible tree reduction should be here */
if (path->p_hdr->eh_entries == 0) {
/*
* truncate to zero freed all the tree,
* so we need to correct eh_depth
*/
err = ext4_ext_get_access(handle, inode, path);
if (err == 0) {
ext_inode_hdr(inode)->eh_depth = 0;
ext_inode_hdr(inode)->eh_max =
cpu_to_le16(ext4_ext_space_root(inode, 0));
err = ext4_ext_dirty(handle, inode, path);
}
}
out:
ext4_ext_drop_refs(path);
kfree(path);
if (err == -EAGAIN) {
path = NULL;
goto again;
}
ext4_journal_stop(handle);
return err;
}
| 0 |
[
"CWE-362"
] |
linux-2.6
|
dee1f973ca341c266229faa5a1a5bb268bed3531
| 310,262,822,421,803,400,000,000,000,000,000,000,000 | 229 |
ext4: race-condition protection for ext4_convert_unwritten_extents_endio
We assumed that at the time we call ext4_convert_unwritten_extents_endio()
extent in question is fully inside [map.m_lblk, map->m_len] because
it was already split during submission. But this may not be true due to
a race between writeback vs fallocate.
If extent in question is larger than requested we will split it again.
Special precautions should being done if zeroout required because
[map.m_lblk, map->m_len] already contains valid data.
Signed-off-by: Dmitry Monakhov <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: [email protected]
|
static uint kill_threads_for_user(THD *thd, LEX_USER *user,
killed_state kill_signal, ha_rows *rows)
{
THD *tmp;
List<THD> threads_to_kill;
DBUG_ENTER("kill_threads_for_user");
*rows= 0;
if (thd->is_fatal_error) // If we run out of memory
DBUG_RETURN(ER_OUT_OF_RESOURCES);
DBUG_PRINT("enter", ("user: %s signal: %u", user->user.str,
(uint) kill_signal));
mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
I_List_iterator<THD> it(threads);
while ((tmp=it++))
{
if (!tmp->security_ctx->user)
continue;
/*
Check that hostname (if given) and user name matches.
host.str[0] == '%' means that host name was not given. See sql_yacc.yy
*/
if (((user->host.str[0] == '%' && !user->host.str[1]) ||
!strcmp(tmp->security_ctx->host_or_ip, user->host.str)) &&
!strcmp(tmp->security_ctx->user, user->user.str))
{
if (!(thd->security_ctx->master_access & SUPER_ACL) &&
!thd->security_ctx->user_matches(tmp->security_ctx))
{
mysql_mutex_unlock(&LOCK_thread_count);
DBUG_RETURN(ER_KILL_DENIED_ERROR);
}
if (!threads_to_kill.push_back(tmp, thd->mem_root))
mysql_mutex_lock(&tmp->LOCK_thd_data); // Lock from delete
}
}
mysql_mutex_unlock(&LOCK_thread_count);
if (!threads_to_kill.is_empty())
{
List_iterator_fast<THD> it2(threads_to_kill);
THD *next_ptr;
THD *ptr= it2++;
do
{
ptr->awake(kill_signal);
/*
Careful here: The list nodes are allocated on the memroots of the
THDs to be awakened.
But those THDs may be terminated and deleted as soon as we release
LOCK_thd_data, which will make the list nodes invalid.
Since the operation "it++" dereferences the "next" pointer of the
previous list node, we need to do this while holding LOCK_thd_data.
*/
next_ptr= it2++;
mysql_mutex_unlock(&ptr->LOCK_thd_data);
(*rows)++;
} while ((ptr= next_ptr));
}
DBUG_RETURN(0);
}
| 0 |
[] |
server
|
ba4927e520190bbad763bb5260ae154f29a61231
| 130,828,446,044,702,830,000,000,000,000,000,000,000 | 64 |
MDEV-19398: Assertion `item1->type() == Item::FIELD_ITEM ...
Window Functions code tries to minimize the number of times it
needs to sort the select's resultset by finding "compatible"
OVER (PARTITION BY ... ORDER BY ...) clauses.
This employs compare_order_elements(). That function assumed that
the order expressions are Item_field-derived objects (that refer
to a temp.table). But this is not always the case: one can
construct queries order expressions are arbitrary item expressions.
Add handling for such expressions: sort them according to the window
specification they appeared in.
This means we cannot detect that two compatible PARTITION BY clauses
that use expressions can share the sorting step.
But at least we won't crash.
|
static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
{
struct nlm_lockowner *lockowner;
list_for_each_entry(lockowner, &host->h_lockowners, list) {
if (lockowner->pid == pid)
return -EBUSY;
}
return 0;
}
| 0 |
[
"CWE-400",
"CWE-399",
"CWE-703"
] |
linux
|
0b760113a3a155269a3fba93a409c640031dd68f
| 307,762,711,689,517,730,000,000,000,000,000,000,000 | 9 |
NLM: Don't hang forever on NLM unlock requests
If the NLM daemon is killed on the NFS server, we can currently end up
hanging forever on an 'unlock' request, instead of aborting. Basically,
if the rpcbind request fails, or the server keeps returning garbage, we
really want to quit instead of retrying.
Tested-by: Vasily Averin <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
Cc: [email protected]
|
vte_sequence_handler_erase_in_display (VteTerminal *terminal, GValueArray *params)
{
GValue *value;
long param;
guint i;
/* The default parameter is 0. */
param = 0;
/* Pull out a parameter. */
for (i = 0; (params != NULL) && (i < params->n_values); i++) {
value = g_value_array_get_nth(params, i);
if (!G_VALUE_HOLDS_LONG(value)) {
continue;
}
param = g_value_get_long(value);
}
/* Clear the right area. */
switch (param) {
case 0:
/* Clear below the current line. */
vte_sequence_handler_cd (terminal, NULL);
break;
case 1:
/* Clear above the current line. */
_vte_terminal_clear_above_current (terminal);
/* Clear everything to the left of the cursor, too. */
/* FIXME: vttest. */
vte_sequence_handler_cb (terminal, NULL);
break;
case 2:
/* Clear the entire screen. */
_vte_terminal_clear_screen (terminal);
break;
default:
break;
}
/* We've modified the display. Make a note of it. */
terminal->pvt->text_deleted_flag = TRUE;
}
| 0 |
[] |
vte
|
58bc3a942f198a1a8788553ca72c19d7c1702b74
| 181,254,729,514,457,100,000,000,000,000,000,000,000 | 38 |
fix bug #548272
svn path=/trunk/; revision=2365
|
cleanup_dead_jobs ()
{
register int i;
int os;
if (js.j_jobslots == 0 || jobs_list_frozen)
return;
QUEUE_SIGCHLD(os);
/* XXX could use js.j_firstj and js.j_lastj here */
for (i = 0; i < js.j_jobslots; i++)
{
#if defined (DEBUG)
if (i < js.j_firstj && jobs[i])
itrace("cleanup_dead_jobs: job %d non-null before js.j_firstj (%d)", i, js.j_firstj);
if (i > js.j_lastj && jobs[i])
itrace("cleanup_dead_jobs: job %d non-null after js.j_lastj (%d)", i, js.j_lastj);
#endif
if (jobs[i] && DEADJOB (i) && IS_NOTIFIED (i))
delete_job (i, 0);
}
#if defined (PROCESS_SUBSTITUTION)
if (last_procsub_child && last_procsub_child->running == PS_DONE)
{
bgp_add (last_procsub_child->pid, process_exit_status (last_procsub_child->status)); /* XXX */
discard_pipeline (last_procsub_child);
last_procsub_child = (PROCESS *)NULL;
}
#endif
#if defined (COPROCESS_SUPPORT)
coproc_reap ();
#endif
UNQUEUE_SIGCHLD(os);
}
| 0 |
[] |
bash
|
955543877583837c85470f7fb8a97b7aa8d45e6c
| 47,476,331,109,334,190,000,000,000,000,000,000,000 | 39 |
bash-4.4-rc2 release
|
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
int status;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
if (status != 0)
goto out;
request->fl_flags |= FL_ACCESS;
status = do_vfs_lock(request->fl_file, request);
if (status < 0)
goto out;
down_read(&nfsi->rwsem);
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
/* Yes: cache locks! */
/* ...but avoid races with delegation recall... */
request->fl_flags = fl_flags & ~FL_SLEEP;
status = do_vfs_lock(request->fl_file, request);
goto out_unlock;
}
status = _nfs4_do_setlk(state, cmd, request, 0);
if (status != 0)
goto out_unlock;
/* Note: we always want to sleep here! */
request->fl_flags = fl_flags | FL_SLEEP;
if (do_vfs_lock(request->fl_file, request) < 0)
printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
out_unlock:
up_read(&nfsi->rwsem);
out:
request->fl_flags = fl_flags;
return status;
}
| 0 |
[
"CWE-703"
] |
linux
|
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
| 27,450,878,828,983,204,000,000,000,000,000,000,000 | 35 |
NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]>
|
static int get_line(JournalImporter *imp, char **line, size_t *size) {
ssize_t n;
char *c = NULL;
assert(imp);
assert(imp->state == IMPORTER_STATE_LINE);
assert(imp->offset <= imp->filled);
assert(imp->filled <= imp->size);
assert(!imp->buf || imp->size > 0);
assert(imp->fd >= 0);
for (;;) {
if (imp->buf) {
size_t start = MAX(imp->scanned, imp->offset);
c = memchr(imp->buf + start, '\n',
imp->filled - start);
if (c != NULL)
break;
}
imp->scanned = imp->filled;
if (imp->scanned >= DATA_SIZE_MAX)
return log_error_errno(SYNTHETIC_ERRNO(ENOBUFS),
"Entry is bigger than %u bytes.",
DATA_SIZE_MAX);
if (imp->passive_fd)
/* we have to wait for some data to come to us */
return -EAGAIN;
/* We know that imp->filled is at most DATA_SIZE_MAX, so if
we reallocate it, we'll increase the size at least a bit. */
assert_cc(DATA_SIZE_MAX < ENTRY_SIZE_MAX);
if (imp->size - imp->filled < LINE_CHUNK &&
!realloc_buffer(imp, MIN(imp->filled + LINE_CHUNK, ENTRY_SIZE_MAX)))
return log_oom();
assert(imp->buf);
assert(imp->size - imp->filled >= LINE_CHUNK ||
imp->size == ENTRY_SIZE_MAX);
n = read(imp->fd,
imp->buf + imp->filled,
imp->size - imp->filled);
if (n < 0) {
if (errno != EAGAIN)
log_error_errno(errno, "read(%d, ..., %zu): %m",
imp->fd,
imp->size - imp->filled);
return -errno;
} else if (n == 0)
return 0;
imp->filled += n;
}
*line = imp->buf + imp->offset;
*size = c + 1 - imp->buf - imp->offset;
imp->offset += *size;
return 1;
}
| 0 |
[
"CWE-770"
] |
systemd
|
ef4d6abe7c7fab6cbff975b32e76b09feee56074
| 289,716,361,449,774,160,000,000,000,000,000,000,000 | 63 |
journal-remote: set a limit on the number of fields in a message
Existing use of E2BIG is replaced with ENOBUFS (entry too long), and E2BIG is
reused for the new error condition (too many fields).
This matches the change done for systemd-journald, hence forming the second
part of the fix for CVE-2018-16865
(https://bugzilla.redhat.com/show_bug.cgi?id=1653861).
|
/* Must be called with bfqq != NULL */
static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
{
if (bfq_bfqq_busy(bfqq))
bfqq->bfqd->wr_busy_queues--;
bfqq->wr_coeff = 1;
bfqq->wr_cur_max_time = 0;
bfqq->last_wr_start_finish = jiffies;
/*
* Trigger a weight change on the next invocation of
* __bfq_entity_update_weight_prio.
*/
bfqq->entity.prio_changed = 1;
| 0 |
[
"CWE-416"
] |
linux
|
2f95fa5c955d0a9987ffdc3a095e2f4e62c5f2a9
| 48,109,608,924,822,960,000,000,000,000,000,000,000 | 13 |
block, bfq: fix use-after-free in bfq_idle_slice_timer_body
In bfq_idle_slice_timer func, bfqq = bfqd->in_service_queue is
not in bfqd-lock critical section. The bfqq, which is not
equal to NULL in bfq_idle_slice_timer, may be freed after passing
to bfq_idle_slice_timer_body. So we will access the freed memory.
In addition, considering the bfqq may be in race, we should
firstly check whether bfqq is in service before doing something
on it in bfq_idle_slice_timer_body func. If the bfqq in race is
not in service, it means the bfqq has been expired through
__bfq_bfqq_expire func, and wait_request flags has been cleared in
__bfq_bfqd_reset_in_service func. So we do not need to re-clear the
wait_request of bfqq which is not in service.
KASAN log is given as follows:
[13058.354613] ==================================================================
[13058.354640] BUG: KASAN: use-after-free in bfq_idle_slice_timer+0xac/0x290
[13058.354644] Read of size 8 at addr ffffa02cf3e63f78 by task fork13/19767
[13058.354646]
[13058.354655] CPU: 96 PID: 19767 Comm: fork13
[13058.354661] Call trace:
[13058.354667] dump_backtrace+0x0/0x310
[13058.354672] show_stack+0x28/0x38
[13058.354681] dump_stack+0xd8/0x108
[13058.354687] print_address_description+0x68/0x2d0
[13058.354690] kasan_report+0x124/0x2e0
[13058.354697] __asan_load8+0x88/0xb0
[13058.354702] bfq_idle_slice_timer+0xac/0x290
[13058.354707] __hrtimer_run_queues+0x298/0x8b8
[13058.354710] hrtimer_interrupt+0x1b8/0x678
[13058.354716] arch_timer_handler_phys+0x4c/0x78
[13058.354722] handle_percpu_devid_irq+0xf0/0x558
[13058.354731] generic_handle_irq+0x50/0x70
[13058.354735] __handle_domain_irq+0x94/0x110
[13058.354739] gic_handle_irq+0x8c/0x1b0
[13058.354742] el1_irq+0xb8/0x140
[13058.354748] do_wp_page+0x260/0xe28
[13058.354752] __handle_mm_fault+0x8ec/0x9b0
[13058.354756] handle_mm_fault+0x280/0x460
[13058.354762] do_page_fault+0x3ec/0x890
[13058.354765] do_mem_abort+0xc0/0x1b0
[13058.354768] el0_da+0x24/0x28
[13058.354770]
[13058.354773] Allocated by task 19731:
[13058.354780] kasan_kmalloc+0xe0/0x190
[13058.354784] kasan_slab_alloc+0x14/0x20
[13058.354788] kmem_cache_alloc_node+0x130/0x440
[13058.354793] bfq_get_queue+0x138/0x858
[13058.354797] bfq_get_bfqq_handle_split+0xd4/0x328
[13058.354801] bfq_init_rq+0x1f4/0x1180
[13058.354806] bfq_insert_requests+0x264/0x1c98
[13058.354811] blk_mq_sched_insert_requests+0x1c4/0x488
[13058.354818] blk_mq_flush_plug_list+0x2d4/0x6e0
[13058.354826] blk_flush_plug_list+0x230/0x548
[13058.354830] blk_finish_plug+0x60/0x80
[13058.354838] read_pages+0xec/0x2c0
[13058.354842] __do_page_cache_readahead+0x374/0x438
[13058.354846] ondemand_readahead+0x24c/0x6b0
[13058.354851] page_cache_sync_readahead+0x17c/0x2f8
[13058.354858] generic_file_buffered_read+0x588/0xc58
[13058.354862] generic_file_read_iter+0x1b4/0x278
[13058.354965] ext4_file_read_iter+0xa8/0x1d8 [ext4]
[13058.354972] __vfs_read+0x238/0x320
[13058.354976] vfs_read+0xbc/0x1c0
[13058.354980] ksys_read+0xdc/0x1b8
[13058.354984] __arm64_sys_read+0x50/0x60
[13058.354990] el0_svc_common+0xb4/0x1d8
[13058.354994] el0_svc_handler+0x50/0xa8
[13058.354998] el0_svc+0x8/0xc
[13058.354999]
[13058.355001] Freed by task 19731:
[13058.355007] __kasan_slab_free+0x120/0x228
[13058.355010] kasan_slab_free+0x10/0x18
[13058.355014] kmem_cache_free+0x288/0x3f0
[13058.355018] bfq_put_queue+0x134/0x208
[13058.355022] bfq_exit_icq_bfqq+0x164/0x348
[13058.355026] bfq_exit_icq+0x28/0x40
[13058.355030] ioc_exit_icq+0xa0/0x150
[13058.355035] put_io_context_active+0x250/0x438
[13058.355038] exit_io_context+0xd0/0x138
[13058.355045] do_exit+0x734/0xc58
[13058.355050] do_group_exit+0x78/0x220
[13058.355054] __wake_up_parent+0x0/0x50
[13058.355058] el0_svc_common+0xb4/0x1d8
[13058.355062] el0_svc_handler+0x50/0xa8
[13058.355066] el0_svc+0x8/0xc
[13058.355067]
[13058.355071] The buggy address belongs to the object at ffffa02cf3e63e70#012 which belongs to the cache bfq_queue of size 464
[13058.355075] The buggy address is located 264 bytes inside of#012 464-byte region [ffffa02cf3e63e70, ffffa02cf3e64040)
[13058.355077] The buggy address belongs to the page:
[13058.355083] page:ffff7e80b3cf9800 count:1 mapcount:0 mapping:ffff802db5c90780 index:0xffffa02cf3e606f0 compound_mapcount: 0
[13058.366175] flags: 0x2ffffe0000008100(slab|head)
[13058.370781] raw: 2ffffe0000008100 ffff7e80b53b1408 ffffa02d730c1c90 ffff802db5c90780
[13058.370787] raw: ffffa02cf3e606f0 0000000000370023 00000001ffffffff 0000000000000000
[13058.370789] page dumped because: kasan: bad access detected
[13058.370791]
[13058.370792] Memory state around the buggy address:
[13058.370797] ffffa02cf3e63e00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fb fb
[13058.370801] ffffa02cf3e63e80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370805] >ffffa02cf3e63f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370808] ^
[13058.370811] ffffa02cf3e63f80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[13058.370815] ffffa02cf3e64000: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
[13058.370817] ==================================================================
[13058.370820] Disabling lock debugging due to kernel taint
Here, we directly pass the bfqd to bfq_idle_slice_timer_body func.
--
V2->V3: rewrite the comment as suggested by Paolo Valente
V1->V2: add one comment, and add Fixes and Reported-by tag.
Fixes: aee69d78d ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler")
Acked-by: Paolo Valente <[email protected]>
Reported-by: Wang Wang <[email protected]>
Signed-off-by: Zhiqiang Liu <[email protected]>
Signed-off-by: Feilong Lin <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
ModuleExport void UnregisterCUTImage(void)
{
(void) UnregisterMagickInfo("CUT");
}
| 0 |
[
"CWE-787"
] |
ImageMagick
|
cc4ac341f29fa368da6ef01c207deaf8c61f6a2e
| 14,394,715,038,966,709,000,000,000,000,000,000,000 | 4 |
https://github.com/ImageMagick/ImageMagick/issues/1162
|
void Inspect::operator()(Arguments_Ptr a)
{
append_string("(");
if (!a->empty()) {
(*a)[0]->perform(this);
for (size_t i = 1, L = a->length(); i < L; ++i) {
append_string(", "); // verified
// Sass Bug? append_comma_separator();
(*a)[i]->perform(this);
}
}
append_string(")");
}
| 0 |
[
"CWE-476"
] |
libsass
|
38f4c3699d06b64128bebc7cf1e8b3125be74dc4
| 313,144,907,110,220,200,000,000,000,000,000,000,000 | 13 |
Fix possible bug with handling empty reference combinators
Fixes #2665
|
const EVP_MD *tls12_get_hash(unsigned char hash_alg)
{
const tls12_hash_info *inf;
#ifndef OPENSSL_FIPS
if (hash_alg == TLSEXT_hash_md5 && FIPS_mode())
return NULL;
#endif
inf = tls12_get_hash_info(hash_alg);
if (!inf || !inf->mfunc)
return NULL;
return inf->mfunc();
}
| 0 |
[] |
openssl
|
80bd7b41b30af6ee96f519e629463583318de3b0
| 217,230,975,512,929,430,000,000,000,000,000,000,000 | 12 |
Fix SRP ciphersuite DoS vulnerability.
If a client attempted to use an SRP ciphersuite and it had not been
set up correctly it would crash with a null pointer read. A malicious
server could exploit this in a DoS attack.
Thanks to Joonas Kuorilehto and Riku Hietamäki from Codenomicon
for reporting this issue.
CVE-2014-2970
Reviewed-by: Tim Hudson <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.