CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2016-7411
|
https://www.cvedetails.com/cve/CVE-2016-7411/
|
CWE-119
|
https://github.com/php/php-src/commit/6a7cc8ff85827fa9ac715b3a83c2d9147f33cd43?w=1
|
6a7cc8ff85827fa9ac715b3a83c2d9147f33cd43?w=1
|
Fix bug #73052 - Memory Corruption in During Deserialized-object Destruction
|
PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER)
{
const unsigned char *cursor, *limit, *marker, *start;
zval **rval_ref;
limit = max;
cursor = *p;
if (YYCURSOR >= YYLIMIT) {
return 0;
}
if (var_hash && cursor[0] != 'R') {
var_push(var_hash, rval);
}
start = cursor;
#line 496 "ext/standard/var_unserializer.c"
{
YYCTYPE yych;
static const unsigned char yybm[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7);
yych = *YYCURSOR;
switch (yych) {
case 'C':
case 'O': goto yy13;
case 'N': goto yy5;
case 'R': goto yy2;
case 'S': goto yy10;
case 'a': goto yy11;
case 'b': goto yy6;
case 'd': goto yy8;
case 'i': goto yy7;
case 'o': goto yy12;
case 'r': goto yy4;
case 's': goto yy9;
case '}': goto yy14;
default: goto yy16;
}
yy2:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy95;
yy3:
#line 861 "ext/standard/var_unserializer.re"
{ return 0; }
#line 558 "ext/standard/var_unserializer.c"
yy4:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy89;
goto yy3;
yy5:
yych = *++YYCURSOR;
if (yych == ';') goto yy87;
goto yy3;
yy6:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy83;
goto yy3;
yy7:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy77;
goto yy3;
yy8:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy53;
goto yy3;
yy9:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy46;
goto yy3;
yy10:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy39;
goto yy3;
yy11:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy32;
goto yy3;
yy12:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy25;
goto yy3;
yy13:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy17;
goto yy3;
yy14:
++YYCURSOR;
#line 855 "ext/standard/var_unserializer.re"
{
/* this is the case where we have less data than planned */
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data");
return 0; /* not sure if it should be 0 or 1 here? */
}
#line 607 "ext/standard/var_unserializer.c"
yy16:
yych = *++YYCURSOR;
goto yy3;
yy17:
yych = *++YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
if (yych == '+') goto yy19;
yy18:
YYCURSOR = YYMARKER;
goto yy3;
yy19:
yych = *++YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
goto yy18;
yy20:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
if (yych <= '/') goto yy18;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 708 "ext/standard/var_unserializer.re"
{
size_t len, len2, len3, maxlen;
long elements;
char *class_name;
zend_class_entry *ce;
zend_class_entry **pce;
int incomplete_class = 0;
int custom_object = 0;
zval *user_func;
zval *retval_ptr;
zval **args[1];
zval *arg_func_name;
if (!var_hash) return 0;
if (*start == 'C') {
custom_object = 1;
}
INIT_PZVAL(*rval);
len2 = len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len || len == 0) {
*p = start + 2;
return 0;
}
class_name = (char*)YYCURSOR;
YYCURSOR += len;
if (*(YYCURSOR) != '"') {
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR+1) != ':') {
*p = YYCURSOR+1;
return 0;
}
len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\");
if (len3 != len)
{
*p = YYCURSOR + len3 - len;
return 0;
}
class_name = estrndup(class_name, len);
do {
/* Try to find class directly */
BG(serialize_lock)++;
if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) {
BG(serialize_lock)--;
if (EG(exception)) {
efree(class_name);
return 0;
}
ce = *pce;
break;
}
BG(serialize_lock)--;
if (EG(exception)) {
efree(class_name);
return 0;
}
/* Check for unserialize callback */
if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) {
incomplete_class = 1;
ce = PHP_IC_ENTRY;
break;
}
/* Call unserialize callback */
MAKE_STD_ZVAL(user_func);
ZVAL_STRING(user_func, PG(unserialize_callback_func), 1);
args[0] = &arg_func_name;
MAKE_STD_ZVAL(arg_func_name);
ZVAL_STRING(arg_func_name, class_name, 1);
BG(serialize_lock)++;
if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) {
BG(serialize_lock)--;
if (EG(exception)) {
efree(class_name);
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&arg_func_name);
return 0;
}
php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val);
incomplete_class = 1;
ce = PHP_IC_ENTRY;
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&arg_func_name);
break;
}
BG(serialize_lock)--;
if (retval_ptr) {
zval_ptr_dtor(&retval_ptr);
}
if (EG(exception)) {
efree(class_name);
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&arg_func_name);
return 0;
}
/* The callback function may have defined the class */
if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) {
ce = *pce;
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val);
incomplete_class = 1;
ce = PHP_IC_ENTRY;
}
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&arg_func_name);
break;
} while (1);
*p = YYCURSOR;
if (custom_object) {
int ret;
ret = object_custom(UNSERIALIZE_PASSTHRU, ce);
if (ret && incomplete_class) {
php_store_class_name(*rval, class_name, len2);
}
efree(class_name);
return ret;
}
elements = object_common1(UNSERIALIZE_PASSTHRU, ce);
if (incomplete_class) {
php_store_class_name(*rval, class_name, len2);
}
efree(class_name);
return object_common2(UNSERIALIZE_PASSTHRU, elements);
}
#line 785 "ext/standard/var_unserializer.c"
yy25:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy26;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy27;
goto yy18;
}
yy26:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy27:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy27;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 699 "ext/standard/var_unserializer.re"
{
if (!var_hash) return 0;
INIT_PZVAL(*rval);
return object_common2(UNSERIALIZE_PASSTHRU,
object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR));
}
#line 819 "ext/standard/var_unserializer.c"
yy32:
yych = *++YYCURSOR;
if (yych == '+') goto yy33;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy34;
goto yy18;
yy33:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy34:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy34;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '{') goto yy18;
++YYCURSOR;
#line 678 "ext/standard/var_unserializer.re"
{
long elements = parse_iv(start + 2);
/* use iv() not uiv() in order to check data range */
*p = YYCURSOR;
if (!var_hash) return 0;
if (elements < 0) {
return 0;
}
INIT_PZVAL(*rval);
array_init_size(*rval, elements);
if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) {
return 0;
}
return finish_nested_data(UNSERIALIZE_PASSTHRU);
}
#line 861 "ext/standard/var_unserializer.c"
yy39:
yych = *++YYCURSOR;
if (yych == '+') goto yy40;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy41;
goto yy18;
yy40:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy41:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy41;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 643 "ext/standard/var_unserializer.re"
{
size_t len, maxlen;
char *str;
len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len) {
*p = start + 2;
return 0;
}
if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) {
return 0;
}
if (*(YYCURSOR) != '"') {
efree(str);
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR + 1) != ';') {
efree(str);
*p = YYCURSOR + 1;
return 0;
}
YYCURSOR += 2;
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_STRINGL(*rval, str, len, 0);
return 1;
}
#line 917 "ext/standard/var_unserializer.c"
yy46:
yych = *++YYCURSOR;
if (yych == '+') goto yy47;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy48;
goto yy18;
yy47:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy48:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy48;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 610 "ext/standard/var_unserializer.re"
{
size_t len, maxlen;
char *str;
len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len) {
*p = start + 2;
return 0;
}
str = (char*)YYCURSOR;
YYCURSOR += len;
if (*(YYCURSOR) != '"') {
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR + 1) != ';') {
*p = YYCURSOR + 1;
return 0;
}
YYCURSOR += 2;
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_STRINGL(*rval, str, len, 1);
return 1;
}
#line 971 "ext/standard/var_unserializer.c"
yy53:
yych = *++YYCURSOR;
if (yych <= '/') {
if (yych <= ',') {
if (yych == '+') goto yy57;
goto yy18;
} else {
if (yych <= '-') goto yy55;
if (yych <= '.') goto yy60;
goto yy18;
}
} else {
if (yych <= 'I') {
if (yych <= '9') goto yy58;
if (yych <= 'H') goto yy18;
goto yy56;
} else {
if (yych != 'N') goto yy18;
}
}
yych = *++YYCURSOR;
if (yych == 'A') goto yy76;
goto yy18;
yy55:
yych = *++YYCURSOR;
if (yych <= '/') {
if (yych == '.') goto yy60;
goto yy18;
} else {
if (yych <= '9') goto yy58;
if (yych != 'I') goto yy18;
}
yy56:
yych = *++YYCURSOR;
if (yych == 'N') goto yy72;
goto yy18;
yy57:
yych = *++YYCURSOR;
if (yych == '.') goto yy60;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy58:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ':') {
if (yych <= '.') {
if (yych <= '-') goto yy18;
goto yy70;
} else {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy58;
goto yy18;
}
} else {
if (yych <= 'E') {
if (yych <= ';') goto yy63;
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy60:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy61:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ';') {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy61;
if (yych <= ':') goto yy18;
} else {
if (yych <= 'E') {
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy63:
++YYCURSOR;
#line 600 "ext/standard/var_unserializer.re"
{
#if SIZEOF_LONG == 4
use_double:
#endif
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL));
return 1;
}
#line 1069 "ext/standard/var_unserializer.c"
yy65:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy66;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
goto yy18;
}
yy66:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych == '+') goto yy69;
goto yy18;
} else {
if (yych <= '-') goto yy69;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
}
yy67:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
if (yych == ';') goto yy63;
goto yy18;
yy69:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
goto yy18;
yy70:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ';') {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy70;
if (yych <= ':') goto yy18;
goto yy63;
} else {
if (yych <= 'E') {
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy72:
yych = *++YYCURSOR;
if (yych != 'F') goto yy18;
yy73:
yych = *++YYCURSOR;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 585 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
INIT_PZVAL(*rval);
if (!strncmp(start + 2, "NAN", 3)) {
ZVAL_DOUBLE(*rval, php_get_nan());
} else if (!strncmp(start + 2, "INF", 3)) {
ZVAL_DOUBLE(*rval, php_get_inf());
} else if (!strncmp(start + 2, "-INF", 4)) {
ZVAL_DOUBLE(*rval, -php_get_inf());
}
return 1;
}
#line 1143 "ext/standard/var_unserializer.c"
yy76:
yych = *++YYCURSOR;
if (yych == 'N') goto yy73;
goto yy18;
yy77:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy78;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy79;
goto yy18;
}
yy78:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy79:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy79;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 558 "ext/standard/var_unserializer.re"
{
#if SIZEOF_LONG == 4
int digits = YYCURSOR - start - 3;
if (start[2] == '-' || start[2] == '+') {
digits--;
}
/* Use double for large long values that were serialized on a 64-bit system */
if (digits >= MAX_LENGTH_OF_LONG - 1) {
if (digits == MAX_LENGTH_OF_LONG - 1) {
int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1);
if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) {
goto use_double;
}
} else {
goto use_double;
}
}
#endif
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_LONG(*rval, parse_iv(start + 2));
return 1;
}
#line 1197 "ext/standard/var_unserializer.c"
yy83:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= '2') goto yy18;
yych = *++YYCURSOR;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 551 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_BOOL(*rval, parse_iv(start + 2));
return 1;
}
#line 1212 "ext/standard/var_unserializer.c"
yy87:
++YYCURSOR;
#line 544 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_NULL(*rval);
return 1;
}
#line 1222 "ext/standard/var_unserializer.c"
yy89:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy90;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy91;
goto yy18;
}
yy90:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy91:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy91;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 521 "ext/standard/var_unserializer.re"
{
long id;
*p = YYCURSOR;
if (!var_hash) return 0;
id = parse_iv(start + 2) - 1;
if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) {
return 0;
}
if (*rval == *rval_ref) return 0;
if (*rval != NULL) {
var_push_dtor_no_addref(var_hash, rval);
}
*rval = *rval_ref;
Z_ADDREF_PP(rval);
Z_UNSET_ISREF_PP(rval);
return 1;
}
#line 1268 "ext/standard/var_unserializer.c"
yy95:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy96;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy97;
goto yy18;
}
yy96:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy97:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy97;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 500 "ext/standard/var_unserializer.re"
{
long id;
*p = YYCURSOR;
if (!var_hash) return 0;
id = parse_iv(start + 2) - 1;
if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) {
return 0;
}
if (*rval != NULL) {
var_push_dtor_no_addref(var_hash, rval);
}
*rval = *rval_ref;
Z_ADDREF_PP(rval);
Z_SET_ISREF_PP(rval);
return 1;
}
#line 1312 "ext/standard/var_unserializer.c"
}
#line 863 "ext/standard/var_unserializer.re"
return 0;
}
|
PHPAPI int php_var_unserialize(UNSERIALIZE_PARAMETER)
{
const unsigned char *cursor, *limit, *marker, *start;
zval **rval_ref;
limit = max;
cursor = *p;
if (YYCURSOR >= YYLIMIT) {
return 0;
}
if (var_hash && cursor[0] != 'R') {
var_push(var_hash, rval);
}
start = cursor;
#line 495 "ext/standard/var_unserializer.c"
{
YYCTYPE yych;
static const unsigned char yybm[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
if ((YYLIMIT - YYCURSOR) < 7) YYFILL(7);
yych = *YYCURSOR;
switch (yych) {
case 'C':
case 'O': goto yy13;
case 'N': goto yy5;
case 'R': goto yy2;
case 'S': goto yy10;
case 'a': goto yy11;
case 'b': goto yy6;
case 'd': goto yy8;
case 'i': goto yy7;
case 'o': goto yy12;
case 'r': goto yy4;
case 's': goto yy9;
case '}': goto yy14;
default: goto yy16;
}
yy2:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy95;
yy3:
#line 860 "ext/standard/var_unserializer.re"
{ return 0; }
#line 557 "ext/standard/var_unserializer.c"
yy4:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy89;
goto yy3;
yy5:
yych = *++YYCURSOR;
if (yych == ';') goto yy87;
goto yy3;
yy6:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy83;
goto yy3;
yy7:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy77;
goto yy3;
yy8:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy53;
goto yy3;
yy9:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy46;
goto yy3;
yy10:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy39;
goto yy3;
yy11:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy32;
goto yy3;
yy12:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy25;
goto yy3;
yy13:
yych = *(YYMARKER = ++YYCURSOR);
if (yych == ':') goto yy17;
goto yy3;
yy14:
++YYCURSOR;
#line 854 "ext/standard/var_unserializer.re"
{
/* this is the case where we have less data than planned */
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Unexpected end of serialized data");
return 0; /* not sure if it should be 0 or 1 here? */
}
#line 606 "ext/standard/var_unserializer.c"
yy16:
yych = *++YYCURSOR;
goto yy3;
yy17:
yych = *++YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
if (yych == '+') goto yy19;
yy18:
YYCURSOR = YYMARKER;
goto yy3;
yy19:
yych = *++YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
goto yy18;
yy20:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yybm[0+yych] & 128) {
goto yy20;
}
if (yych <= '/') goto yy18;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 707 "ext/standard/var_unserializer.re"
{
size_t len, len2, len3, maxlen;
long elements;
char *class_name;
zend_class_entry *ce;
zend_class_entry **pce;
int incomplete_class = 0;
int custom_object = 0;
zval *user_func;
zval *retval_ptr;
zval **args[1];
zval *arg_func_name;
if (!var_hash) return 0;
if (*start == 'C') {
custom_object = 1;
}
INIT_PZVAL(*rval);
len2 = len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len || len == 0) {
*p = start + 2;
return 0;
}
class_name = (char*)YYCURSOR;
YYCURSOR += len;
if (*(YYCURSOR) != '"') {
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR+1) != ':') {
*p = YYCURSOR+1;
return 0;
}
len3 = strspn(class_name, "0123456789_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377\\");
if (len3 != len)
{
*p = YYCURSOR + len3 - len;
return 0;
}
class_name = estrndup(class_name, len);
do {
/* Try to find class directly */
BG(serialize_lock)++;
if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) {
BG(serialize_lock)--;
if (EG(exception)) {
efree(class_name);
return 0;
}
ce = *pce;
break;
}
BG(serialize_lock)--;
if (EG(exception)) {
efree(class_name);
return 0;
}
/* Check for unserialize callback */
if ((PG(unserialize_callback_func) == NULL) || (PG(unserialize_callback_func)[0] == '\0')) {
incomplete_class = 1;
ce = PHP_IC_ENTRY;
break;
}
/* Call unserialize callback */
MAKE_STD_ZVAL(user_func);
ZVAL_STRING(user_func, PG(unserialize_callback_func), 1);
args[0] = &arg_func_name;
MAKE_STD_ZVAL(arg_func_name);
ZVAL_STRING(arg_func_name, class_name, 1);
BG(serialize_lock)++;
if (call_user_function_ex(CG(function_table), NULL, user_func, &retval_ptr, 1, args, 0, NULL TSRMLS_CC) != SUCCESS) {
BG(serialize_lock)--;
if (EG(exception)) {
efree(class_name);
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&arg_func_name);
return 0;
}
php_error_docref(NULL TSRMLS_CC, E_WARNING, "defined (%s) but not found", user_func->value.str.val);
incomplete_class = 1;
ce = PHP_IC_ENTRY;
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&arg_func_name);
break;
}
BG(serialize_lock)--;
if (retval_ptr) {
zval_ptr_dtor(&retval_ptr);
}
if (EG(exception)) {
efree(class_name);
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&arg_func_name);
return 0;
}
/* The callback function may have defined the class */
if (zend_lookup_class(class_name, len2, &pce TSRMLS_CC) == SUCCESS) {
ce = *pce;
} else {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Function %s() hasn't defined the class it was called for", user_func->value.str.val);
incomplete_class = 1;
ce = PHP_IC_ENTRY;
}
zval_ptr_dtor(&user_func);
zval_ptr_dtor(&arg_func_name);
break;
} while (1);
*p = YYCURSOR;
if (custom_object) {
int ret;
ret = object_custom(UNSERIALIZE_PASSTHRU, ce);
if (ret && incomplete_class) {
php_store_class_name(*rval, class_name, len2);
}
efree(class_name);
return ret;
}
elements = object_common1(UNSERIALIZE_PASSTHRU, ce);
if (incomplete_class) {
php_store_class_name(*rval, class_name, len2);
}
efree(class_name);
return object_common2(UNSERIALIZE_PASSTHRU, elements);
}
#line 784 "ext/standard/var_unserializer.c"
yy25:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy26;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy27;
goto yy18;
}
yy26:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy27:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy27;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 698 "ext/standard/var_unserializer.re"
{
if (!var_hash) return 0;
INIT_PZVAL(*rval);
return object_common2(UNSERIALIZE_PASSTHRU,
object_common1(UNSERIALIZE_PASSTHRU, ZEND_STANDARD_CLASS_DEF_PTR));
}
#line 818 "ext/standard/var_unserializer.c"
yy32:
yych = *++YYCURSOR;
if (yych == '+') goto yy33;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy34;
goto yy18;
yy33:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy34:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy34;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '{') goto yy18;
++YYCURSOR;
#line 677 "ext/standard/var_unserializer.re"
{
long elements = parse_iv(start + 2);
/* use iv() not uiv() in order to check data range */
*p = YYCURSOR;
if (!var_hash) return 0;
if (elements < 0) {
return 0;
}
INIT_PZVAL(*rval);
array_init_size(*rval, elements);
if (!process_nested_data(UNSERIALIZE_PASSTHRU, Z_ARRVAL_PP(rval), elements, 0)) {
return 0;
}
return finish_nested_data(UNSERIALIZE_PASSTHRU);
}
#line 860 "ext/standard/var_unserializer.c"
yy39:
yych = *++YYCURSOR;
if (yych == '+') goto yy40;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy41;
goto yy18;
yy40:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy41:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy41;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 642 "ext/standard/var_unserializer.re"
{
size_t len, maxlen;
char *str;
len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len) {
*p = start + 2;
return 0;
}
if ((str = unserialize_str(&YYCURSOR, &len, maxlen)) == NULL) {
return 0;
}
if (*(YYCURSOR) != '"') {
efree(str);
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR + 1) != ';') {
efree(str);
*p = YYCURSOR + 1;
return 0;
}
YYCURSOR += 2;
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_STRINGL(*rval, str, len, 0);
return 1;
}
#line 916 "ext/standard/var_unserializer.c"
yy46:
yych = *++YYCURSOR;
if (yych == '+') goto yy47;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy48;
goto yy18;
yy47:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy48:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 2) YYFILL(2);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy48;
if (yych >= ';') goto yy18;
yych = *++YYCURSOR;
if (yych != '"') goto yy18;
++YYCURSOR;
#line 609 "ext/standard/var_unserializer.re"
{
size_t len, maxlen;
char *str;
len = parse_uiv(start + 2);
maxlen = max - YYCURSOR;
if (maxlen < len) {
*p = start + 2;
return 0;
}
str = (char*)YYCURSOR;
YYCURSOR += len;
if (*(YYCURSOR) != '"') {
*p = YYCURSOR;
return 0;
}
if (*(YYCURSOR + 1) != ';') {
*p = YYCURSOR + 1;
return 0;
}
YYCURSOR += 2;
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_STRINGL(*rval, str, len, 1);
return 1;
}
#line 970 "ext/standard/var_unserializer.c"
yy53:
yych = *++YYCURSOR;
if (yych <= '/') {
if (yych <= ',') {
if (yych == '+') goto yy57;
goto yy18;
} else {
if (yych <= '-') goto yy55;
if (yych <= '.') goto yy60;
goto yy18;
}
} else {
if (yych <= 'I') {
if (yych <= '9') goto yy58;
if (yych <= 'H') goto yy18;
goto yy56;
} else {
if (yych != 'N') goto yy18;
}
}
yych = *++YYCURSOR;
if (yych == 'A') goto yy76;
goto yy18;
yy55:
yych = *++YYCURSOR;
if (yych <= '/') {
if (yych == '.') goto yy60;
goto yy18;
} else {
if (yych <= '9') goto yy58;
if (yych != 'I') goto yy18;
}
yy56:
yych = *++YYCURSOR;
if (yych == 'N') goto yy72;
goto yy18;
yy57:
yych = *++YYCURSOR;
if (yych == '.') goto yy60;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy58:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ':') {
if (yych <= '.') {
if (yych <= '-') goto yy18;
goto yy70;
} else {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy58;
goto yy18;
}
} else {
if (yych <= 'E') {
if (yych <= ';') goto yy63;
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy60:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy61:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ';') {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy61;
if (yych <= ':') goto yy18;
} else {
if (yych <= 'E') {
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy63:
++YYCURSOR;
#line 599 "ext/standard/var_unserializer.re"
{
#if SIZEOF_LONG == 4
use_double:
#endif
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_DOUBLE(*rval, zend_strtod((const char *)start + 2, NULL));
return 1;
}
#line 1068 "ext/standard/var_unserializer.c"
yy65:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy66;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
goto yy18;
}
yy66:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych == '+') goto yy69;
goto yy18;
} else {
if (yych <= '-') goto yy69;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
}
yy67:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
if (yych == ';') goto yy63;
goto yy18;
yy69:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy67;
goto yy18;
yy70:
++YYCURSOR;
if ((YYLIMIT - YYCURSOR) < 4) YYFILL(4);
yych = *YYCURSOR;
if (yych <= ';') {
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy70;
if (yych <= ':') goto yy18;
goto yy63;
} else {
if (yych <= 'E') {
if (yych <= 'D') goto yy18;
goto yy65;
} else {
if (yych == 'e') goto yy65;
goto yy18;
}
}
yy72:
yych = *++YYCURSOR;
if (yych != 'F') goto yy18;
yy73:
yych = *++YYCURSOR;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 584 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
INIT_PZVAL(*rval);
if (!strncmp(start + 2, "NAN", 3)) {
ZVAL_DOUBLE(*rval, php_get_nan());
} else if (!strncmp(start + 2, "INF", 3)) {
ZVAL_DOUBLE(*rval, php_get_inf());
} else if (!strncmp(start + 2, "-INF", 4)) {
ZVAL_DOUBLE(*rval, -php_get_inf());
}
return 1;
}
#line 1142 "ext/standard/var_unserializer.c"
yy76:
yych = *++YYCURSOR;
if (yych == 'N') goto yy73;
goto yy18;
yy77:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy78;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy79;
goto yy18;
}
yy78:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy79:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy79;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 557 "ext/standard/var_unserializer.re"
{
#if SIZEOF_LONG == 4
int digits = YYCURSOR - start - 3;
if (start[2] == '-' || start[2] == '+') {
digits--;
}
/* Use double for large long values that were serialized on a 64-bit system */
if (digits >= MAX_LENGTH_OF_LONG - 1) {
if (digits == MAX_LENGTH_OF_LONG - 1) {
int cmp = strncmp(YYCURSOR - MAX_LENGTH_OF_LONG, long_min_digits, MAX_LENGTH_OF_LONG - 1);
if (!(cmp < 0 || (cmp == 0 && start[2] == '-'))) {
goto use_double;
}
} else {
goto use_double;
}
}
#endif
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_LONG(*rval, parse_iv(start + 2));
return 1;
}
#line 1196 "ext/standard/var_unserializer.c"
yy83:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= '2') goto yy18;
yych = *++YYCURSOR;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 550 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_BOOL(*rval, parse_iv(start + 2));
return 1;
}
#line 1211 "ext/standard/var_unserializer.c"
yy87:
++YYCURSOR;
#line 543 "ext/standard/var_unserializer.re"
{
*p = YYCURSOR;
INIT_PZVAL(*rval);
ZVAL_NULL(*rval);
return 1;
}
#line 1221 "ext/standard/var_unserializer.c"
yy89:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy90;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy91;
goto yy18;
}
yy90:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy91:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy91;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 520 "ext/standard/var_unserializer.re"
{
long id;
*p = YYCURSOR;
if (!var_hash) return 0;
id = parse_iv(start + 2) - 1;
if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) {
return 0;
}
if (*rval == *rval_ref) return 0;
if (*rval != NULL) {
var_push_dtor_no_addref(var_hash, rval);
}
*rval = *rval_ref;
Z_ADDREF_PP(rval);
Z_UNSET_ISREF_PP(rval);
return 1;
}
#line 1267 "ext/standard/var_unserializer.c"
yy95:
yych = *++YYCURSOR;
if (yych <= ',') {
if (yych != '+') goto yy18;
} else {
if (yych <= '-') goto yy96;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy97;
goto yy18;
}
yy96:
yych = *++YYCURSOR;
if (yych <= '/') goto yy18;
if (yych >= ':') goto yy18;
yy97:
++YYCURSOR;
if (YYLIMIT <= YYCURSOR) YYFILL(1);
yych = *YYCURSOR;
if (yych <= '/') goto yy18;
if (yych <= '9') goto yy97;
if (yych != ';') goto yy18;
++YYCURSOR;
#line 499 "ext/standard/var_unserializer.re"
{
long id;
*p = YYCURSOR;
if (!var_hash) return 0;
id = parse_iv(start + 2) - 1;
if (id == -1 || var_access(var_hash, id, &rval_ref) != SUCCESS) {
return 0;
}
if (*rval != NULL) {
var_push_dtor_no_addref(var_hash, rval);
}
*rval = *rval_ref;
Z_ADDREF_PP(rval);
Z_SET_ISREF_PP(rval);
return 1;
}
#line 1311 "ext/standard/var_unserializer.c"
}
#line 862 "ext/standard/var_unserializer.re"
return 0;
}
|
C
|
php-src
| 1 |
CVE-2015-3183
|
https://www.cvedetails.com/cve/CVE-2015-3183/
|
CWE-20
|
https://github.com/apache/httpd/commit/e427c41257957b57036d5a549b260b6185d1dd73
|
e427c41257957b57036d5a549b260b6185d1dd73
|
Limit accepted chunk-size to 2^63-1 and be strict about chunk-ext
authorized characters.
Submitted by: Yann Ylavic
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1684513 13f79535-47bb-0310-9956-ffa450edef68
|
static apr_status_t send_all_header_fields(header_struct *h,
const request_rec *r)
{
const apr_array_header_t *elts;
const apr_table_entry_t *t_elt;
const apr_table_entry_t *t_end;
struct iovec *vec;
struct iovec *vec_next;
elts = apr_table_elts(r->headers_out);
if (elts->nelts == 0) {
return APR_SUCCESS;
}
t_elt = (const apr_table_entry_t *)(elts->elts);
t_end = t_elt + elts->nelts;
vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
sizeof(struct iovec));
vec_next = vec;
/* For each field, generate
* name ": " value CRLF
*/
do {
vec_next->iov_base = (void*)(t_elt->key);
vec_next->iov_len = strlen(t_elt->key);
vec_next++;
vec_next->iov_base = ": ";
vec_next->iov_len = sizeof(": ") - 1;
vec_next++;
vec_next->iov_base = (void*)(t_elt->val);
vec_next->iov_len = strlen(t_elt->val);
vec_next++;
vec_next->iov_base = CRLF;
vec_next->iov_len = sizeof(CRLF) - 1;
vec_next++;
t_elt++;
} while (t_elt < t_end);
if (APLOGrtrace4(r)) {
t_elt = (const apr_table_entry_t *)(elts->elts);
do {
ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
ap_escape_logitem(r->pool, t_elt->key),
ap_escape_logitem(r->pool, t_elt->val));
t_elt++;
} while (t_elt < t_end);
}
#if APR_CHARSET_EBCDIC
{
apr_size_t len;
char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
ap_xlate_proto_to_ascii(tmp, len);
return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
}
#else
return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
#endif
}
|
static apr_status_t send_all_header_fields(header_struct *h,
const request_rec *r)
{
const apr_array_header_t *elts;
const apr_table_entry_t *t_elt;
const apr_table_entry_t *t_end;
struct iovec *vec;
struct iovec *vec_next;
elts = apr_table_elts(r->headers_out);
if (elts->nelts == 0) {
return APR_SUCCESS;
}
t_elt = (const apr_table_entry_t *)(elts->elts);
t_end = t_elt + elts->nelts;
vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
sizeof(struct iovec));
vec_next = vec;
/* For each field, generate
* name ": " value CRLF
*/
do {
vec_next->iov_base = (void*)(t_elt->key);
vec_next->iov_len = strlen(t_elt->key);
vec_next++;
vec_next->iov_base = ": ";
vec_next->iov_len = sizeof(": ") - 1;
vec_next++;
vec_next->iov_base = (void*)(t_elt->val);
vec_next->iov_len = strlen(t_elt->val);
vec_next++;
vec_next->iov_base = CRLF;
vec_next->iov_len = sizeof(CRLF) - 1;
vec_next++;
t_elt++;
} while (t_elt < t_end);
if (APLOGrtrace4(r)) {
t_elt = (const apr_table_entry_t *)(elts->elts);
do {
ap_log_rerror(APLOG_MARK, APLOG_TRACE4, 0, r, " %s: %s",
ap_escape_logitem(r->pool, t_elt->key),
ap_escape_logitem(r->pool, t_elt->val));
t_elt++;
} while (t_elt < t_end);
}
#if APR_CHARSET_EBCDIC
{
apr_size_t len;
char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
ap_xlate_proto_to_ascii(tmp, len);
return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
}
#else
return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
#endif
}
|
C
|
httpd
| 0 |
CVE-2019-17351
|
https://www.cvedetails.com/cve/CVE-2019-17351/
|
CWE-400
|
https://github.com/torvalds/linux/commit/6ef36ab967c71690ebe7e5ef997a8be4da3bc844
|
6ef36ab967c71690ebe7e5ef997a8be4da3bc844
|
xen: let alloc_xenballooned_pages() fail if not enough memory free
commit a1078e821b605813b63bf6bca414a85f804d5c66 upstream.
Instead of trying to allocate pages with GFP_USER in
add_ballooned_pages() check the available free memory via
si_mem_available(). GFP_USER is far less limiting memory exhaustion
than the test via si_mem_available().
This will avoid dom0 running out of memory due to excessive foreign
page mappings especially on ARM and on x86 in PVH mode, as those don't
have a pre-ballooned area which can be used for foreign mappings.
As the normal ballooning suffers from the same problem don't balloon
down more than si_mem_available() pages in one iteration. At the same
time limit the default maximum number of retries.
This is part of XSA-300.
Signed-off-by: Juergen Gross <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static bool balloon_is_inflated(void)
{
return balloon_stats.balloon_low || balloon_stats.balloon_high;
}
|
static bool balloon_is_inflated(void)
{
return balloon_stats.balloon_low || balloon_stats.balloon_high;
}
|
C
|
linux
| 0 |
CVE-2016-6327
|
https://www.cvedetails.com/cve/CVE-2016-6327/
|
CWE-476
|
https://github.com/torvalds/linux/commit/51093254bf879bc9ce96590400a87897c7498463
|
51093254bf879bc9ce96590400a87897c7498463
|
IB/srpt: Simplify srpt_handle_tsk_mgmt()
Let the target core check task existence instead of the SRP target
driver. Additionally, let the target core check the validity of the
task management request instead of the ib_srpt driver.
This patch fixes the following kernel crash:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000001
IP: [<ffffffffa0565f37>] srpt_handle_new_iu+0x6d7/0x790 [ib_srpt]
Oops: 0002 [#1] SMP
Call Trace:
[<ffffffffa05660ce>] srpt_process_completion+0xde/0x570 [ib_srpt]
[<ffffffffa056669f>] srpt_compl_thread+0x13f/0x160 [ib_srpt]
[<ffffffff8109726f>] kthread+0xcf/0xe0
[<ffffffff81613cfc>] ret_from_fork+0x7c/0xb0
Signed-off-by: Bart Van Assche <[email protected]>
Fixes: 3e4f574857ee ("ib_srpt: Convert TMR path to target_submit_tmr")
Tested-by: Alex Estrin <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: Nicholas Bellinger <[email protected]>
Cc: Sagi Grimberg <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Doug Ledford <[email protected]>
|
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
unsigned long val;
int ret;
ret = kstrtoul(page, 0, &val);
if (ret < 0) {
pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RDMA_SIZE) {
pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
MAX_SRPT_RDMA_SIZE);
return -EINVAL;
}
if (val < DEFAULT_MAX_RDMA_SIZE) {
pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
val, DEFAULT_MAX_RDMA_SIZE);
return -EINVAL;
}
sport->port_attrib.srp_max_rdma_size = val;
return count;
}
|
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
unsigned long val;
int ret;
ret = kstrtoul(page, 0, &val);
if (ret < 0) {
pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RDMA_SIZE) {
pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
MAX_SRPT_RDMA_SIZE);
return -EINVAL;
}
if (val < DEFAULT_MAX_RDMA_SIZE) {
pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
val, DEFAULT_MAX_RDMA_SIZE);
return -EINVAL;
}
sport->port_attrib.srp_max_rdma_size = val;
return count;
}
|
C
|
linux
| 0 |
CVE-2016-2464
|
https://www.cvedetails.com/cve/CVE-2016-2464/
|
CWE-20
|
https://android.googlesource.com/platform/external/libvpx/+/cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d
|
cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d
|
external/libvpx/libwebm: Update snapshot
Update libwebm snapshot. This update contains security fixes from upstream.
Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b
BUG=23167726
Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207
(cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a)
|
ContentEncoding::ContentCompression::ContentCompression()
: algo(0), settings(NULL), settings_len(0) {}
|
ContentEncoding::ContentCompression::ContentCompression()
: algo(0), settings(NULL), settings_len(0) {}
|
C
|
Android
| 0 |
CVE-2015-1296
|
https://www.cvedetails.com/cve/CVE-2015-1296/
|
CWE-254
|
https://github.com/chromium/chromium/commit/5fc08cfb098acce49344d2e89cc27c915903f81c
|
5fc08cfb098acce49344d2e89cc27c915903f81c
|
Clean up Android DownloadManager code as most download now go through Chrome Network stack
The only exception is OMA DRM download.
And it only applies to context menu download interception.
Clean up the remaining unused code now.
BUG=647755
Review-Url: https://codereview.chromium.org/2371773003
Cr-Commit-Position: refs/heads/master@{#421332}
|
void DownloadController::OnDownloadUpdated(DownloadItem* item) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (item->IsDangerous() && (item->GetState() != DownloadItem::CANCELLED))
OnDangerousDownload(item);
JNIEnv* env = base::android::AttachCurrentThread();
ScopedJavaLocalRef<jstring> jguid =
ConvertUTF8ToJavaString(env, item->GetGuid());
ScopedJavaLocalRef<jstring> jurl =
ConvertUTF8ToJavaString(env, item->GetURL().spec());
ScopedJavaLocalRef<jstring> jmime_type =
ConvertUTF8ToJavaString(env, item->GetMimeType());
ScopedJavaLocalRef<jstring> jpath =
ConvertUTF8ToJavaString(env, item->GetTargetFilePath().value());
ScopedJavaLocalRef<jstring> jfilename = ConvertUTF8ToJavaString(
env, item->GetTargetFilePath().BaseName().value());
ScopedJavaLocalRef<jstring> joriginal_url =
ConvertUTF8ToJavaString(env, item->GetOriginalUrl().spec());
ScopedJavaLocalRef<jstring> jreferrer_url =
ConvertUTF8ToJavaString(env, item->GetReferrerUrl().spec());
ui::PageTransition base_transition =
ui::PageTransitionStripQualifier(item->GetTransitionType());
bool user_initiated =
item->GetTransitionType() & ui::PAGE_TRANSITION_FROM_ADDRESS_BAR ||
base_transition == ui::PAGE_TRANSITION_TYPED ||
base_transition == ui::PAGE_TRANSITION_AUTO_BOOKMARK ||
base_transition == ui::PAGE_TRANSITION_GENERATED ||
base_transition == ui::PAGE_TRANSITION_RELOAD ||
base_transition == ui::PAGE_TRANSITION_KEYWORD;
bool hasUserGesture = item->HasUserGesture() || user_initiated;
switch (item->GetState()) {
case DownloadItem::IN_PROGRESS: {
base::TimeDelta time_delta;
item->TimeRemaining(&time_delta);
Java_DownloadController_onDownloadUpdated(
env, GetJavaObject()->Controller(env), jurl, jmime_type, jfilename,
jpath, item->GetReceivedBytes(), jguid, item->PercentComplete(),
time_delta.InMilliseconds(), hasUserGesture, item->IsPaused(),
item->GetBrowserContext()->IsOffTheRecord());
break;
}
case DownloadItem::COMPLETE:
item->RemoveObserver(this);
Java_DownloadController_onDownloadCompleted(
env, GetJavaObject()->Controller(env), jurl, jmime_type, jfilename,
jpath, item->GetReceivedBytes(), jguid, joriginal_url, jreferrer_url,
hasUserGesture);
DownloadController::RecordDownloadCancelReason(
DownloadController::CANCEL_REASON_NOT_CANCELED);
break;
case DownloadItem::CANCELLED:
Java_DownloadController_onDownloadCancelled(
env, GetJavaObject()->Controller(env), jguid);
break;
case DownloadItem::INTERRUPTED:
Java_DownloadController_onDownloadInterrupted(
env, GetJavaObject()->Controller(env), jurl, jmime_type, jfilename,
jpath, item->GetReceivedBytes(), jguid, item->CanResume(),
IsInterruptedDownloadAutoResumable(item),
item->GetBrowserContext()->IsOffTheRecord());
item->RemoveObserver(this);
break;
case DownloadItem::MAX_DOWNLOAD_STATE:
NOTREACHED();
}
}
|
void DownloadController::OnDownloadUpdated(DownloadItem* item) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
if (item->IsDangerous() && (item->GetState() != DownloadItem::CANCELLED))
OnDangerousDownload(item);
JNIEnv* env = base::android::AttachCurrentThread();
ScopedJavaLocalRef<jstring> jguid =
ConvertUTF8ToJavaString(env, item->GetGuid());
ScopedJavaLocalRef<jstring> jurl =
ConvertUTF8ToJavaString(env, item->GetURL().spec());
ScopedJavaLocalRef<jstring> jmime_type =
ConvertUTF8ToJavaString(env, item->GetMimeType());
ScopedJavaLocalRef<jstring> jpath =
ConvertUTF8ToJavaString(env, item->GetTargetFilePath().value());
ScopedJavaLocalRef<jstring> jfilename = ConvertUTF8ToJavaString(
env, item->GetTargetFilePath().BaseName().value());
ScopedJavaLocalRef<jstring> joriginal_url =
ConvertUTF8ToJavaString(env, item->GetOriginalUrl().spec());
ScopedJavaLocalRef<jstring> jreferrer_url =
ConvertUTF8ToJavaString(env, item->GetReferrerUrl().spec());
ui::PageTransition base_transition =
ui::PageTransitionStripQualifier(item->GetTransitionType());
bool user_initiated =
item->GetTransitionType() & ui::PAGE_TRANSITION_FROM_ADDRESS_BAR ||
base_transition == ui::PAGE_TRANSITION_TYPED ||
base_transition == ui::PAGE_TRANSITION_AUTO_BOOKMARK ||
base_transition == ui::PAGE_TRANSITION_GENERATED ||
base_transition == ui::PAGE_TRANSITION_RELOAD ||
base_transition == ui::PAGE_TRANSITION_KEYWORD;
bool hasUserGesture = item->HasUserGesture() || user_initiated;
switch (item->GetState()) {
case DownloadItem::IN_PROGRESS: {
base::TimeDelta time_delta;
item->TimeRemaining(&time_delta);
Java_DownloadController_onDownloadUpdated(
env, GetJavaObject()->Controller(env), jurl, jmime_type, jfilename,
jpath, item->GetReceivedBytes(), jguid, item->PercentComplete(),
time_delta.InMilliseconds(), hasUserGesture, item->IsPaused(),
item->GetBrowserContext()->IsOffTheRecord());
break;
}
case DownloadItem::COMPLETE:
item->RemoveObserver(this);
Java_DownloadController_onDownloadCompleted(
env, GetJavaObject()->Controller(env), jurl, jmime_type, jfilename,
jpath, item->GetReceivedBytes(), jguid, joriginal_url, jreferrer_url,
hasUserGesture);
DownloadController::RecordDownloadCancelReason(
DownloadController::CANCEL_REASON_NOT_CANCELED);
break;
case DownloadItem::CANCELLED:
Java_DownloadController_onDownloadCancelled(
env, GetJavaObject()->Controller(env), jguid);
break;
case DownloadItem::INTERRUPTED:
Java_DownloadController_onDownloadInterrupted(
env, GetJavaObject()->Controller(env), jurl, jmime_type, jfilename,
jpath, item->GetReceivedBytes(), jguid, item->CanResume(),
IsInterruptedDownloadAutoResumable(item),
item->GetBrowserContext()->IsOffTheRecord());
item->RemoveObserver(this);
break;
case DownloadItem::MAX_DOWNLOAD_STATE:
NOTREACHED();
}
}
|
C
|
Chrome
| 0 |
CVE-2017-8069
|
https://www.cvedetails.com/cve/CVE-2017-8069/
|
CWE-119
|
https://github.com/torvalds/linux/commit/7926aff5c57b577ab0f43364ff0c59d968f6a414
|
7926aff5c57b577ab0f43364ff0c59d968f6a414
|
rtl8150: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int rtl8150_resume(struct usb_interface *intf)
{
rtl8150_t *dev = usb_get_intfdata(intf);
netif_device_attach(dev->netdev);
if (netif_running(dev->netdev)) {
dev->rx_urb->status = 0;
dev->rx_urb->actual_length = 0;
read_bulk_callback(dev->rx_urb);
dev->intr_urb->status = 0;
dev->intr_urb->actual_length = 0;
intr_callback(dev->intr_urb);
}
return 0;
}
|
static int rtl8150_resume(struct usb_interface *intf)
{
rtl8150_t *dev = usb_get_intfdata(intf);
netif_device_attach(dev->netdev);
if (netif_running(dev->netdev)) {
dev->rx_urb->status = 0;
dev->rx_urb->actual_length = 0;
read_bulk_callback(dev->rx_urb);
dev->intr_urb->status = 0;
dev->intr_urb->actual_length = 0;
intr_callback(dev->intr_urb);
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2013-0839
|
https://www.cvedetails.com/cve/CVE-2013-0839/
|
CWE-399
|
https://github.com/chromium/chromium/commit/dd3b6fe574edad231c01c78e4647a74c38dc4178
|
dd3b6fe574edad231c01c78e4647a74c38dc4178
|
Remove parent* arg from GDataEntry ctor.
* Remove static FromDocumentEntry from GDataEntry, GDataFile, GDataDirectory. Replace with InitFromDocumentEntry.
* Move common code from GDataFile::InitFromDocumentEntry and GDataDirectory::InitFromDocumentEntry to GDataEntry::InitFromDocumentEntry.
* Add GDataDirectoryService::FromDocumentEntry and use this everywhere.
* Make ctors of GDataFile, GDataDirectory private, so these must be created by GDataDirectoryService's CreateGDataFile and
CreateGDataDirectory. Make GDataEntry ctor protected.
BUG=141494
TEST=unit tests.
Review URL: https://chromiumcodereview.appspot.com/10854083
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151008 0039d316-1c4b-4281-b951-d872f2087c98
|
void GDataFileSystem::Copy(const FilePath& src_file_path,
const FilePath& dest_file_path,
const FileOperationCallback& callback) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI) ||
BrowserThread::CurrentlyOn(BrowserThread::IO));
DCHECK(!callback.is_null());
RunTaskOnUIThread(base::Bind(&GDataFileSystem::CopyOnUIThread,
ui_weak_ptr_,
src_file_path,
dest_file_path,
CreateRelayCallback(callback)));
}
|
void GDataFileSystem::Copy(const FilePath& src_file_path,
const FilePath& dest_file_path,
const FileOperationCallback& callback) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI) ||
BrowserThread::CurrentlyOn(BrowserThread::IO));
DCHECK(!callback.is_null());
RunTaskOnUIThread(base::Bind(&GDataFileSystem::CopyOnUIThread,
ui_weak_ptr_,
src_file_path,
dest_file_path,
CreateRelayCallback(callback)));
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
d4cd2b2c0953ad7e9fa988c234eb9361be80fe81
|
DevTools: 'Overrides' UI overlay obstructs page and element inspector
BUG=302862
[email protected]
Review URL: https://codereview.chromium.org/40233006
git-svn-id: svn://svn.chromium.org/blink/trunk@160559 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void InspectorPageAgent::getResourceTree(ErrorString*, RefPtr<TypeBuilder::Page::FrameResourceTree>& object)
{
object = buildObjectForFrameTree(m_page->mainFrame());
}
|
void InspectorPageAgent::getResourceTree(ErrorString*, RefPtr<TypeBuilder::Page::FrameResourceTree>& object)
{
object = buildObjectForFrameTree(m_page->mainFrame());
}
|
C
|
Chrome
| 0 |
CVE-2013-6621
|
https://www.cvedetails.com/cve/CVE-2013-6621/
|
CWE-399
|
https://github.com/chromium/chromium/commit/4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
4039d2fcaab746b6c20017ba9bb51c3a2403a76c
|
Add logging to figure out which IPC we're failing to deserialize in RenderFrame.
BUG=369553
[email protected]
Review URL: https://codereview.chromium.org/263833020
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268565 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderFrameImpl::didChangeLoadProgress(double load_progress) {
render_view_->FrameDidChangeLoadProgress(frame_, load_progress);
}
|
void RenderFrameImpl::didChangeLoadProgress(double load_progress) {
render_view_->FrameDidChangeLoadProgress(frame_, load_progress);
}
|
C
|
Chrome
| 0 |
CVE-2016-3746
|
https://www.cvedetails.com/cve/CVE-2016-3746/
| null |
https://android.googlesource.com/platform/hardware/qcom/media/+/5b82f4f90c3d531313714df4b936f92fb0ff15cf
|
5b82f4f90c3d531313714df4b936f92fb0ff15cf
|
DO NOT MERGE mm-video-v4l2: vdec: Avoid processing ETBs/FTBs in invalid states
(per the spec) ETB/FTB should not be handled in states other than
Executing, Paused and Idle. This avoids accessing invalid buffers.
Also add a lock to protect the private-buffers from being deleted
while accessing from another thread.
Bug: 27890802
Security Vulnerability - Heap Use-After-Free and Possible LPE in
MediaServer (libOmxVdec problem #6)
CRs-Fixed: 1008882
Change-Id: Iaac2e383cd53cf9cf8042c9ed93ddc76dba3907e
|
OMX_ERRORTYPE omx_vdec::describeColorFormat(OMX_PTR pParam) {
#ifndef FLEXYUV_SUPPORTED
(void) pParam;
return OMX_ErrorUndefined;
#else
if (pParam == NULL) {
DEBUG_PRINT_ERROR("describeColorFormat: invalid params");
return OMX_ErrorBadParameter;
}
DescribeColorFormatParams *params = (DescribeColorFormatParams*)pParam;
MediaImage *img = &(params->sMediaImage);
switch(params->eColorFormat) {
case QOMX_COLOR_FORMATYUV420PackedSemiPlanar32m:
{
img->mType = MediaImage::MEDIA_IMAGE_TYPE_YUV;
img->mNumPlanes = 3;
img->mWidth = params->nFrameWidth;
img->mHeight = params->nFrameHeight;
size_t planeWidth = VENUS_Y_STRIDE(COLOR_FMT_NV12, params->nFrameWidth);
size_t planeHeight = VENUS_Y_SCANLINES(COLOR_FMT_NV12, params->nFrameHeight);
img->mBitDepth = 8;
img->mPlane[MediaImage::Y].mOffset = 0;
img->mPlane[MediaImage::Y].mColInc = 1;
img->mPlane[MediaImage::Y].mRowInc = planeWidth; //same as stride
img->mPlane[MediaImage::Y].mHorizSubsampling = 1;
img->mPlane[MediaImage::Y].mVertSubsampling = 1;
img->mPlane[MediaImage::U].mOffset = planeWidth * planeHeight;
img->mPlane[MediaImage::U].mColInc = 2; //interleaved UV
img->mPlane[MediaImage::U].mRowInc =
VENUS_UV_STRIDE(COLOR_FMT_NV12, params->nFrameWidth);
img->mPlane[MediaImage::U].mHorizSubsampling = 2;
img->mPlane[MediaImage::U].mVertSubsampling = 2;
img->mPlane[MediaImage::V].mOffset = planeWidth * planeHeight + 1;
img->mPlane[MediaImage::V].mColInc = 2; //interleaved UV
img->mPlane[MediaImage::V].mRowInc =
VENUS_UV_STRIDE(COLOR_FMT_NV12, params->nFrameWidth);
img->mPlane[MediaImage::V].mHorizSubsampling = 2;
img->mPlane[MediaImage::V].mVertSubsampling = 2;
break;
}
case OMX_COLOR_FormatYUV420Planar:
case OMX_COLOR_FormatYUV420SemiPlanar:
return OMX_ErrorUnsupportedSetting;
default:
DEBUG_PRINT_LOW("color-format %x is not flexible", params->eColorFormat);
img->mType = MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
return OMX_ErrorNone;
};
DEBUG_PRINT_LOW("NOTE: Describe color format : %x", params->eColorFormat);
DEBUG_PRINT_LOW(" FrameWidth x FrameHeight : %d x %d", params->nFrameWidth, params->nFrameHeight);
DEBUG_PRINT_LOW(" YWidth x YHeight : %d x %d", img->mWidth, img->mHeight);
for (size_t i = 0; i < img->mNumPlanes; ++i) {
DEBUG_PRINT_LOW(" Plane[%d] : offset=%d / xStep=%d / yStep = %d",
i, img->mPlane[i].mOffset, img->mPlane[i].mColInc, img->mPlane[i].mRowInc);
}
return OMX_ErrorNone;
#endif //FLEXYUV_SUPPORTED
}
|
OMX_ERRORTYPE omx_vdec::describeColorFormat(OMX_PTR pParam) {
#ifndef FLEXYUV_SUPPORTED
(void) pParam;
return OMX_ErrorUndefined;
#else
if (pParam == NULL) {
DEBUG_PRINT_ERROR("describeColorFormat: invalid params");
return OMX_ErrorBadParameter;
}
DescribeColorFormatParams *params = (DescribeColorFormatParams*)pParam;
MediaImage *img = &(params->sMediaImage);
switch(params->eColorFormat) {
case QOMX_COLOR_FORMATYUV420PackedSemiPlanar32m:
{
img->mType = MediaImage::MEDIA_IMAGE_TYPE_YUV;
img->mNumPlanes = 3;
img->mWidth = params->nFrameWidth;
img->mHeight = params->nFrameHeight;
size_t planeWidth = VENUS_Y_STRIDE(COLOR_FMT_NV12, params->nFrameWidth);
size_t planeHeight = VENUS_Y_SCANLINES(COLOR_FMT_NV12, params->nFrameHeight);
img->mBitDepth = 8;
img->mPlane[MediaImage::Y].mOffset = 0;
img->mPlane[MediaImage::Y].mColInc = 1;
img->mPlane[MediaImage::Y].mRowInc = planeWidth; //same as stride
img->mPlane[MediaImage::Y].mHorizSubsampling = 1;
img->mPlane[MediaImage::Y].mVertSubsampling = 1;
img->mPlane[MediaImage::U].mOffset = planeWidth * planeHeight;
img->mPlane[MediaImage::U].mColInc = 2; //interleaved UV
img->mPlane[MediaImage::U].mRowInc =
VENUS_UV_STRIDE(COLOR_FMT_NV12, params->nFrameWidth);
img->mPlane[MediaImage::U].mHorizSubsampling = 2;
img->mPlane[MediaImage::U].mVertSubsampling = 2;
img->mPlane[MediaImage::V].mOffset = planeWidth * planeHeight + 1;
img->mPlane[MediaImage::V].mColInc = 2; //interleaved UV
img->mPlane[MediaImage::V].mRowInc =
VENUS_UV_STRIDE(COLOR_FMT_NV12, params->nFrameWidth);
img->mPlane[MediaImage::V].mHorizSubsampling = 2;
img->mPlane[MediaImage::V].mVertSubsampling = 2;
break;
}
case OMX_COLOR_FormatYUV420Planar:
case OMX_COLOR_FormatYUV420SemiPlanar:
return OMX_ErrorUnsupportedSetting;
default:
DEBUG_PRINT_LOW("color-format %x is not flexible", params->eColorFormat);
img->mType = MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
return OMX_ErrorNone;
};
DEBUG_PRINT_LOW("NOTE: Describe color format : %x", params->eColorFormat);
DEBUG_PRINT_LOW(" FrameWidth x FrameHeight : %d x %d", params->nFrameWidth, params->nFrameHeight);
DEBUG_PRINT_LOW(" YWidth x YHeight : %d x %d", img->mWidth, img->mHeight);
for (size_t i = 0; i < img->mNumPlanes; ++i) {
DEBUG_PRINT_LOW(" Plane[%d] : offset=%d / xStep=%d / yStep = %d",
i, img->mPlane[i].mOffset, img->mPlane[i].mColInc, img->mPlane[i].mRowInc);
}
return OMX_ErrorNone;
#endif //FLEXYUV_SUPPORTED
}
|
C
|
Android
| 0 |
CVE-2013-0920
|
https://www.cvedetails.com/cve/CVE-2013-0920/
|
CWE-399
|
https://github.com/chromium/chromium/commit/12baa2097220e33c12b60aa5e6da6701637761bf
|
12baa2097220e33c12b60aa5e6da6701637761bf
|
Fix heap-use-after-free in BookmarksIOFunction::ShowSelectFileDialog.
BUG=177410
Review URL: https://chromiumcodereview.appspot.com/12326086
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@184586 0039d316-1c4b-4281-b951-d872f2087c98
|
void BookmarkEventRouter::BookmarkNodeMoved(BookmarkModel* model,
const BookmarkNode* old_parent,
int old_index,
const BookmarkNode* new_parent,
int new_index) {
scoped_ptr<ListValue> args(new ListValue());
const BookmarkNode* node = new_parent->GetChild(new_index);
args->Append(new StringValue(base::Int64ToString(node->id())));
DictionaryValue* object_args = new DictionaryValue();
object_args->SetString(keys::kParentIdKey,
base::Int64ToString(new_parent->id()));
object_args->SetInteger(keys::kIndexKey, new_index);
object_args->SetString(keys::kOldParentIdKey,
base::Int64ToString(old_parent->id()));
object_args->SetInteger(keys::kOldIndexKey, old_index);
args->Append(object_args);
DispatchEvent(model->profile(), keys::kOnBookmarkMoved, args.Pass());
}
|
void BookmarkEventRouter::BookmarkNodeMoved(BookmarkModel* model,
const BookmarkNode* old_parent,
int old_index,
const BookmarkNode* new_parent,
int new_index) {
scoped_ptr<ListValue> args(new ListValue());
const BookmarkNode* node = new_parent->GetChild(new_index);
args->Append(new StringValue(base::Int64ToString(node->id())));
DictionaryValue* object_args = new DictionaryValue();
object_args->SetString(keys::kParentIdKey,
base::Int64ToString(new_parent->id()));
object_args->SetInteger(keys::kIndexKey, new_index);
object_args->SetString(keys::kOldParentIdKey,
base::Int64ToString(old_parent->id()));
object_args->SetInteger(keys::kOldIndexKey, old_index);
args->Append(object_args);
DispatchEvent(model->profile(), keys::kOnBookmarkMoved, args.Pass());
}
|
C
|
Chrome
| 0 |
CVE-2017-16939
|
https://www.cvedetails.com/cve/CVE-2017-16939/
|
CWE-416
|
https://github.com/torvalds/linux/commit/1137b5e2529a8f5ca8ee709288ecba3e68044df2
|
1137b5e2529a8f5ca8ee709288ecba3e68044df2
|
ipsec: Fix aborted xfrm policy dump crash
An independent security researcher, Mohamed Ghannam, has reported
this vulnerability to Beyond Security's SecuriTeam Secure Disclosure
program.
The xfrm_dump_policy_done function expects xfrm_dump_policy to
have been called at least once or it will crash. This can be
triggered if a dump fails because the target socket's receive
buffer is full.
This patch fixes it by using the cb->start mechanism to ensure that
the initialisation is always done regardless of the buffer situation.
Fixes: 12a169e7d8f4 ("ipsec: Put dumpers on the dump list")
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct xfrm_userspi_info *p;
struct sk_buff *resp_skb;
xfrm_address_t *daddr;
int family;
int err;
u32 mark;
struct xfrm_mark m;
p = nlmsg_data(nlh);
err = verify_spi_info(p->info.id.proto, p->min, p->max);
if (err)
goto out_noput;
family = p->info.family;
daddr = &p->info.id.daddr;
x = NULL;
mark = xfrm_mark_get(attrs, &m);
if (p->info.seq) {
x = xfrm_find_acq_byseq(net, mark, p->info.seq);
if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
xfrm_state_put(x);
x = NULL;
}
}
if (!x)
x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
p->info.id.proto, daddr,
&p->info.saddr, 1,
family);
err = -ENOENT;
if (x == NULL)
goto out_noput;
err = xfrm_alloc_spi(x, p->min, p->max);
if (err)
goto out;
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
goto out;
}
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
out:
xfrm_state_put(x);
out_noput:
return err;
}
|
static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct xfrm_userspi_info *p;
struct sk_buff *resp_skb;
xfrm_address_t *daddr;
int family;
int err;
u32 mark;
struct xfrm_mark m;
p = nlmsg_data(nlh);
err = verify_spi_info(p->info.id.proto, p->min, p->max);
if (err)
goto out_noput;
family = p->info.family;
daddr = &p->info.id.daddr;
x = NULL;
mark = xfrm_mark_get(attrs, &m);
if (p->info.seq) {
x = xfrm_find_acq_byseq(net, mark, p->info.seq);
if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
xfrm_state_put(x);
x = NULL;
}
}
if (!x)
x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
p->info.id.proto, daddr,
&p->info.saddr, 1,
family);
err = -ENOENT;
if (x == NULL)
goto out_noput;
err = xfrm_alloc_spi(x, p->min, p->max);
if (err)
goto out;
resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
if (IS_ERR(resp_skb)) {
err = PTR_ERR(resp_skb);
goto out;
}
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
out:
xfrm_state_put(x);
out_noput:
return err;
}
|
C
|
linux
| 0 |
CVE-2011-4131
|
https://www.cvedetails.com/cve/CVE-2011-4131/
|
CWE-189
|
https://github.com/torvalds/linux/commit/bf118a342f10dafe44b14451a1392c3254629a1f
|
bf118a342f10dafe44b14451a1392c3254629a1f
|
NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: [email protected]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
struct nfs_client *clp;
p = reserve_space(xdr, 4);
switch(arg->open_flags & O_EXCL) {
case 0:
*p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
encode_attrs(xdr, arg->u.attrs, arg->server);
break;
default:
clp = arg->server->nfs_client;
if (clp->cl_mvops->minor_version > 0) {
if (nfs4_has_persistent_session(clp)) {
*p = cpu_to_be32(NFS4_CREATE_GUARDED);
encode_attrs(xdr, arg->u.attrs, arg->server);
} else {
struct iattr dummy;
*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
encode_nfs4_verifier(xdr, &arg->u.verifier);
dummy.ia_valid = 0;
encode_attrs(xdr, &dummy, arg->server);
}
} else {
*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
encode_nfs4_verifier(xdr, &arg->u.verifier);
}
}
}
|
static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
{
__be32 *p;
struct nfs_client *clp;
p = reserve_space(xdr, 4);
switch(arg->open_flags & O_EXCL) {
case 0:
*p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
encode_attrs(xdr, arg->u.attrs, arg->server);
break;
default:
clp = arg->server->nfs_client;
if (clp->cl_mvops->minor_version > 0) {
if (nfs4_has_persistent_session(clp)) {
*p = cpu_to_be32(NFS4_CREATE_GUARDED);
encode_attrs(xdr, arg->u.attrs, arg->server);
} else {
struct iattr dummy;
*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
encode_nfs4_verifier(xdr, &arg->u.verifier);
dummy.ia_valid = 0;
encode_attrs(xdr, &dummy, arg->server);
}
} else {
*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
encode_nfs4_verifier(xdr, &arg->u.verifier);
}
}
}
|
C
|
linux
| 0 |
CVE-2014-0076
|
https://www.cvedetails.com/cve/CVE-2014-0076/
|
CWE-310
|
https://git.openssl.org/gitweb/?p=openssl.git;a=commit;h=2198be3483259de374f91e57d247d0fc667aef29
|
2198be3483259de374f91e57d247d0fc667aef29
| null |
const BIGNUM *BN_value_one(void)
{
static const BN_ULONG data_one=1L;
static const BIGNUM const_one={(BN_ULONG *)&data_one,1,1,0,BN_FLG_STATIC_DATA};
return(&const_one);
}
|
const BIGNUM *BN_value_one(void)
{
static const BN_ULONG data_one=1L;
static const BIGNUM const_one={(BN_ULONG *)&data_one,1,1,0,BN_FLG_STATIC_DATA};
return(&const_one);
}
|
C
|
openssl
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/5041f984669fe3a989a84c348eb838c8f7233f6b
|
5041f984669fe3a989a84c348eb838c8f7233f6b
|
AutoFill: Release the cached frame when we receive the frameDestroyed() message
from WebKit.
BUG=48857
TEST=none
Review URL: http://codereview.chromium.org/3173005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@55789 0039d316-1c4b-4281-b951-d872f2087c98
|
void RenderView::Close() {
WebView* doomed = webview();
RenderWidget::Close();
Singleton<ViewMap>::get()->erase(doomed);
}
|
void RenderView::Close() {
WebView* doomed = webview();
RenderWidget::Close();
Singleton<ViewMap>::get()->erase(doomed);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/eb7971fdb0c3b76bacfb77c1ecc76459ef481f17
|
eb7971fdb0c3b76bacfb77c1ecc76459ef481f17
|
Implement delegation to Metro file pickers.
[email protected],[email protected]
BUG=None
TEST=None
Review URL: https://chromiumcodereview.appspot.com/10310103
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@136624 0039d316-1c4b-4281-b951-d872f2087c98
|
UINT_PTR CALLBACK SaveAsDialogHook(HWND dialog, UINT message,
WPARAM wparam, LPARAM lparam) {
static const UINT kPrivateMessage = 0x2F3F;
switch (message) {
case WM_INITDIALOG: {
PostMessage(dialog, kPrivateMessage, 0, 0);
return TRUE;
}
case kPrivateMessage: {
HWND real_dialog = GetParent(dialog);
RECT dialog_rect;
GetWindowRect(real_dialog, &dialog_rect);
POINT point = { dialog_rect.left, dialog_rect.top };
HMONITOR monitor1 = MonitorFromPoint(point, MONITOR_DEFAULTTONULL);
point.x = dialog_rect.right;
point.y = dialog_rect.bottom;
HMONITOR monitor2 = MonitorFromPoint(point, MONITOR_DEFAULTTONULL);
if (monitor1 && monitor2)
return 0;
HWND parent_window = GetParent(real_dialog);
if (!parent_window)
return 0;
WINDOWINFO parent_info;
parent_info.cbSize = sizeof(WINDOWINFO);
GetWindowInfo(parent_window, &parent_info);
SetWindowPos(real_dialog, NULL,
parent_info.rcClient.left,
parent_info.rcClient.top,
0, 0, // Size.
SWP_NOACTIVATE | SWP_NOOWNERZORDER | SWP_NOSIZE |
SWP_NOZORDER);
return 0;
}
}
return 0;
}
|
UINT_PTR CALLBACK SaveAsDialogHook(HWND dialog, UINT message,
WPARAM wparam, LPARAM lparam) {
static const UINT kPrivateMessage = 0x2F3F;
switch (message) {
case WM_INITDIALOG: {
PostMessage(dialog, kPrivateMessage, 0, 0);
return TRUE;
}
case kPrivateMessage: {
HWND real_dialog = GetParent(dialog);
RECT dialog_rect;
GetWindowRect(real_dialog, &dialog_rect);
POINT point = { dialog_rect.left, dialog_rect.top };
HMONITOR monitor1 = MonitorFromPoint(point, MONITOR_DEFAULTTONULL);
point.x = dialog_rect.right;
point.y = dialog_rect.bottom;
HMONITOR monitor2 = MonitorFromPoint(point, MONITOR_DEFAULTTONULL);
if (monitor1 && monitor2)
return 0;
HWND parent_window = GetParent(real_dialog);
if (!parent_window)
return 0;
WINDOWINFO parent_info;
parent_info.cbSize = sizeof(WINDOWINFO);
GetWindowInfo(parent_window, &parent_info);
SetWindowPos(real_dialog, NULL,
parent_info.rcClient.left,
parent_info.rcClient.top,
0, 0, // Size.
SWP_NOACTIVATE | SWP_NOOWNERZORDER | SWP_NOSIZE |
SWP_NOZORDER);
return 0;
}
}
return 0;
}
|
C
|
Chrome
| 0 |
CVE-2012-5375
|
https://www.cvedetails.com/cve/CVE-2012-5375/
|
CWE-310
|
https://github.com/torvalds/linux/commit/9c52057c698fb96f8f07e7a4bcf4801a092bda89
|
9c52057c698fb96f8f07e7a4bcf4801a092bda89
|
Btrfs: fix hash overflow handling
The handling for directory crc hash overflows was fairly obscure,
split_leaf returns EOVERFLOW when we try to extend the item and that is
supposed to bubble up to userland. For a while it did so, but along the
way we added better handling of errors and forced the FS readonly if we
hit IO errors during the directory insertion.
Along the way, we started testing only for EEXIST and the EOVERFLOW case
was dropped. The end result is that we may force the FS readonly if we
catch a directory hash bucket overflow.
This fixes a few problem spots. First I add tests for EOVERFLOW in the
places where we can safely just return the error up the chain.
btrfs_rename is harder though, because it tries to insert the new
directory item only after it has already unlinked anything the rename
was going to overwrite. Rather than adding very complex logic, I added
a helper to test for the hash overflow case early while it is still safe
to bail out.
Snapshot and subvolume creation had a similar problem, so they are using
the new helper now too.
Signed-off-by: Chris Mason <[email protected]>
Reported-by: Pascal Junod <[email protected]>
|
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
int err;
if (btrfs_root_readonly(root))
return -EROFS;
err = inode_change_ok(inode, attr);
if (err)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
err = btrfs_setsize(inode, attr->ia_size);
if (err)
return err;
}
if (attr->ia_valid) {
setattr_copy(inode, attr);
inode_inc_iversion(inode);
err = btrfs_dirty_inode(inode);
if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
return err;
}
|
static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
int err;
if (btrfs_root_readonly(root))
return -EROFS;
err = inode_change_ok(inode, attr);
if (err)
return err;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
err = btrfs_setsize(inode, attr->ia_size);
if (err)
return err;
}
if (attr->ia_valid) {
setattr_copy(inode, attr);
inode_inc_iversion(inode);
err = btrfs_dirty_inode(inode);
if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
return err;
}
|
C
|
linux
| 0 |
CVE-2014-2669
|
https://www.cvedetails.com/cve/CVE-2014-2669/
|
CWE-189
|
https://github.com/postgres/postgres/commit/31400a673325147e1205326008e32135a78b4d8a
|
31400a673325147e1205326008e32135a78b4d8a
|
Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
|
box_area(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_FLOAT8(box_ar(box));
}
|
box_area(PG_FUNCTION_ARGS)
{
BOX *box = PG_GETARG_BOX_P(0);
PG_RETURN_FLOAT8(box_ar(box));
}
|
C
|
postgres
| 0 |
CVE-2014-3171
|
https://www.cvedetails.com/cve/CVE-2014-3171/
| null |
https://github.com/chromium/chromium/commit/d10a8dac48d3a9467e81c62cb45208344f4542db
|
d10a8dac48d3a9467e81c62cb45208344f4542db
|
Replace further questionable HashMap::add usages in bindings
BUG=390928
[email protected]
Review URL: https://codereview.chromium.org/411273002
git-svn-id: svn://svn.chromium.org/blink/trunk@178823 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void writeImageData(v8::Handle<v8::Value> value)
{
ImageData* imageData = V8ImageData::toNative(value.As<v8::Object>());
if (!imageData)
return;
Uint8ClampedArray* pixelArray = imageData->data();
m_writer.writeImageData(imageData->width(), imageData->height(), pixelArray->data(), pixelArray->length());
}
|
void writeImageData(v8::Handle<v8::Value> value)
{
ImageData* imageData = V8ImageData::toNative(value.As<v8::Object>());
if (!imageData)
return;
Uint8ClampedArray* pixelArray = imageData->data();
m_writer.writeImageData(imageData->width(), imageData->height(), pixelArray->data(), pixelArray->length());
}
|
C
|
Chrome
| 0 |
CVE-2014-8106
|
https://www.cvedetails.com/cve/CVE-2014-8106/
|
CWE-119
|
https://git.qemu.org/?p=qemu.git;a=commit;h=bf25983345ca44aec3dd92c57142be45452bd38a
|
bf25983345ca44aec3dd92c57142be45452bd38a
| null |
static void cirrus_vga_mem_write(void *opaque,
hwaddr addr,
uint64_t mem_value,
uint32_t size)
{
CirrusVGAState *s = opaque;
unsigned bank_index;
unsigned bank_offset;
unsigned mode;
if ((s->vga.sr[0x07] & 0x01) == 0) {
vga_mem_writeb(&s->vga, addr, mem_value);
return;
}
if (addr < 0x10000) {
if (s->cirrus_srcptr != s->cirrus_srcptr_end) {
/* bitblt */
*s->cirrus_srcptr++ = (uint8_t) mem_value;
if (s->cirrus_srcptr >= s->cirrus_srcptr_end) {
cirrus_bitblt_cputovideo_next(s);
}
} else {
/* video memory */
bank_index = addr >> 15;
bank_offset = addr & 0x7fff;
if (bank_offset < s->cirrus_bank_limit[bank_index]) {
bank_offset += s->cirrus_bank_base[bank_index];
if ((s->vga.gr[0x0B] & 0x14) == 0x14) {
bank_offset <<= 4;
} else if (s->vga.gr[0x0B] & 0x02) {
bank_offset <<= 3;
}
bank_offset &= s->cirrus_addr_mask;
mode = s->vga.gr[0x05] & 0x7;
if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) {
*(s->vga.vram_ptr + bank_offset) = mem_value;
memory_region_set_dirty(&s->vga.vram, bank_offset,
sizeof(mem_value));
} else {
if ((s->vga.gr[0x0B] & 0x14) != 0x14) {
cirrus_mem_writeb_mode4and5_8bpp(s, mode,
bank_offset,
mem_value);
} else {
cirrus_mem_writeb_mode4and5_16bpp(s, mode,
bank_offset,
mem_value);
}
}
}
}
} else if (addr >= 0x18000 && addr < 0x18100) {
/* memory-mapped I/O */
if ((s->vga.sr[0x17] & 0x44) == 0x04) {
cirrus_mmio_blt_write(s, addr & 0xff, mem_value);
}
} else {
#ifdef DEBUG_CIRRUS
printf("cirrus: mem_writeb " TARGET_FMT_plx " value 0x%02" PRIu64 "\n", addr,
mem_value);
#endif
}
}
|
static void cirrus_vga_mem_write(void *opaque,
hwaddr addr,
uint64_t mem_value,
uint32_t size)
{
CirrusVGAState *s = opaque;
unsigned bank_index;
unsigned bank_offset;
unsigned mode;
if ((s->vga.sr[0x07] & 0x01) == 0) {
vga_mem_writeb(&s->vga, addr, mem_value);
return;
}
if (addr < 0x10000) {
if (s->cirrus_srcptr != s->cirrus_srcptr_end) {
/* bitblt */
*s->cirrus_srcptr++ = (uint8_t) mem_value;
if (s->cirrus_srcptr >= s->cirrus_srcptr_end) {
cirrus_bitblt_cputovideo_next(s);
}
} else {
/* video memory */
bank_index = addr >> 15;
bank_offset = addr & 0x7fff;
if (bank_offset < s->cirrus_bank_limit[bank_index]) {
bank_offset += s->cirrus_bank_base[bank_index];
if ((s->vga.gr[0x0B] & 0x14) == 0x14) {
bank_offset <<= 4;
} else if (s->vga.gr[0x0B] & 0x02) {
bank_offset <<= 3;
}
bank_offset &= s->cirrus_addr_mask;
mode = s->vga.gr[0x05] & 0x7;
if (mode < 4 || mode > 5 || ((s->vga.gr[0x0B] & 0x4) == 0)) {
*(s->vga.vram_ptr + bank_offset) = mem_value;
memory_region_set_dirty(&s->vga.vram, bank_offset,
sizeof(mem_value));
} else {
if ((s->vga.gr[0x0B] & 0x14) != 0x14) {
cirrus_mem_writeb_mode4and5_8bpp(s, mode,
bank_offset,
mem_value);
} else {
cirrus_mem_writeb_mode4and5_16bpp(s, mode,
bank_offset,
mem_value);
}
}
}
}
} else if (addr >= 0x18000 && addr < 0x18100) {
/* memory-mapped I/O */
if ((s->vga.sr[0x17] & 0x44) == 0x04) {
cirrus_mmio_blt_write(s, addr & 0xff, mem_value);
}
} else {
#ifdef DEBUG_CIRRUS
printf("cirrus: mem_writeb " TARGET_FMT_plx " value 0x%02" PRIu64 "\n", addr,
mem_value);
#endif
}
}
|
C
|
qemu
| 0 |
CVE-2018-6127
|
https://www.cvedetails.com/cve/CVE-2018-6127/
| null |
https://github.com/chromium/chromium/commit/28044cb7ef4488e7278c2b80f0e3a2c3707d03b6
|
28044cb7ef4488e7278c2b80f0e3a2c3707d03b6
|
[IndexedDB] Fixing early destruction of connection during forceclose
Patch is as small as possible for merging.
Bug: 842990
Change-Id: I9968ffee1bf3279e61e1ec13e4d541f713caf12f
Reviewed-on: https://chromium-review.googlesource.com/1062935
Commit-Queue: Daniel Murphy <[email protected]>
Commit-Queue: Victor Costan <[email protected]>
Reviewed-by: Victor Costan <[email protected]>
Cr-Commit-Position: refs/heads/master@{#559383}
|
void IndexedDBDatabase::SetIndexesReady(IndexedDBTransaction* transaction,
int64_t,
const std::vector<int64_t>& index_ids) {
DCHECK(transaction);
DCHECK_EQ(transaction->mode(), blink::kWebIDBTransactionModeVersionChange);
transaction->ScheduleTask(
blink::kWebIDBTaskTypePreemptive,
base::BindOnce(&IndexedDBDatabase::SetIndexesReadyOperation, this,
index_ids.size()));
}
|
void IndexedDBDatabase::SetIndexesReady(IndexedDBTransaction* transaction,
int64_t,
const std::vector<int64_t>& index_ids) {
DCHECK(transaction);
DCHECK_EQ(transaction->mode(), blink::kWebIDBTransactionModeVersionChange);
transaction->ScheduleTask(
blink::kWebIDBTaskTypePreemptive,
base::BindOnce(&IndexedDBDatabase::SetIndexesReadyOperation, this,
index_ids.size()));
}
|
C
|
Chrome
| 0 |
CVE-2016-5156
|
https://www.cvedetails.com/cve/CVE-2016-5156/
|
CWE-416
|
https://github.com/chromium/chromium/commit/ba011d9f8322c62633a069a59c2c5525e3ff46cc
|
ba011d9f8322c62633a069a59c2c5525e3ff46cc
|
Ignore filtered event if an event matcher cannot be added.
BUG=625404
Review-Url: https://codereview.chromium.org/2236133002
Cr-Commit-Position: refs/heads/master@{#411472}
|
EventFilteringInfo ParseFromObject(v8::Local<v8::Object> object,
v8::Isolate* isolate) {
EventFilteringInfo info;
v8::Local<v8::String> url(v8::String::NewFromUtf8(isolate, "url"));
if (object->Has(url)) {
v8::Local<v8::Value> url_value(object->Get(url));
info.SetURL(GURL(*v8::String::Utf8Value(url_value)));
}
v8::Local<v8::String> instance_id(
v8::String::NewFromUtf8(isolate, "instanceId"));
if (object->Has(instance_id)) {
v8::Local<v8::Value> instance_id_value(object->Get(instance_id));
info.SetInstanceID(instance_id_value->IntegerValue());
}
v8::Local<v8::String> service_type(
v8::String::NewFromUtf8(isolate, "serviceType"));
if (object->Has(service_type)) {
v8::Local<v8::Value> service_type_value(object->Get(service_type));
info.SetServiceType(*v8::String::Utf8Value(service_type_value));
}
v8::Local<v8::String> window_types(
v8::String::NewFromUtf8(isolate, "windowType"));
if (object->Has(window_types)) {
v8::Local<v8::Value> window_types_value(object->Get(window_types));
info.SetWindowType(*v8::String::Utf8Value(window_types_value));
}
v8::Local<v8::String> window_exposed(
v8::String::NewFromUtf8(isolate, "windowExposedByDefault"));
if (object->Has(window_exposed)) {
v8::Local<v8::Value> window_exposed_value(object->Get(window_exposed));
info.SetWindowExposedByDefault(
window_exposed_value.As<v8::Boolean>()->Value());
}
return info;
}
|
EventFilteringInfo ParseFromObject(v8::Local<v8::Object> object,
v8::Isolate* isolate) {
EventFilteringInfo info;
v8::Local<v8::String> url(v8::String::NewFromUtf8(isolate, "url"));
if (object->Has(url)) {
v8::Local<v8::Value> url_value(object->Get(url));
info.SetURL(GURL(*v8::String::Utf8Value(url_value)));
}
v8::Local<v8::String> instance_id(
v8::String::NewFromUtf8(isolate, "instanceId"));
if (object->Has(instance_id)) {
v8::Local<v8::Value> instance_id_value(object->Get(instance_id));
info.SetInstanceID(instance_id_value->IntegerValue());
}
v8::Local<v8::String> service_type(
v8::String::NewFromUtf8(isolate, "serviceType"));
if (object->Has(service_type)) {
v8::Local<v8::Value> service_type_value(object->Get(service_type));
info.SetServiceType(*v8::String::Utf8Value(service_type_value));
}
v8::Local<v8::String> window_types(
v8::String::NewFromUtf8(isolate, "windowType"));
if (object->Has(window_types)) {
v8::Local<v8::Value> window_types_value(object->Get(window_types));
info.SetWindowType(*v8::String::Utf8Value(window_types_value));
}
v8::Local<v8::String> window_exposed(
v8::String::NewFromUtf8(isolate, "windowExposedByDefault"));
if (object->Has(window_exposed)) {
v8::Local<v8::Value> window_exposed_value(object->Get(window_exposed));
info.SetWindowExposedByDefault(
window_exposed_value.As<v8::Boolean>()->Value());
}
return info;
}
|
C
|
Chrome
| 0 |
CVE-2014-3122
|
https://www.cvedetails.com/cve/CVE-2014-3122/
|
CWE-264
|
https://github.com/torvalds/linux/commit/57e68e9cd65b4b8eb4045a1e0d0746458502554c
|
57e68e9cd65b4b8eb4045a1e0d0746458502554c
|
mm: try_to_unmap_cluster() should lock_page() before mlocking
A BUG_ON(!PageLocked) was triggered in mlock_vma_page() by Sasha Levin
fuzzing with trinity. The call site try_to_unmap_cluster() does not lock
the pages other than its check_page parameter (which is already locked).
The BUG_ON in mlock_vma_page() is not documented and its purpose is
somewhat unclear, but apparently it serializes against page migration,
which could otherwise fail to transfer the PG_mlocked flag. This would
not be fatal, as the page would be eventually encountered again, but
NR_MLOCK accounting would become distorted nevertheless. This patch adds
a comment to the BUG_ON in mlock_vma_page() and munlock_vma_page() to that
effect.
The call site try_to_unmap_cluster() is fixed so that for page !=
check_page, trylock_page() is attempted (to avoid possible deadlocks as we
already have check_page locked) and mlock_vma_page() is performed only
upon success. If the page lock cannot be obtained, the page is left
without PG_mlocked, which is again not a problem in the whole unevictable
memory design.
Signed-off-by: Vlastimil Babka <[email protected]>
Signed-off-by: Bob Liu <[email protected]>
Reported-by: Sasha Levin <[email protected]>
Cc: Wanpeng Li <[email protected]>
Cc: Michel Lespinasse <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
void page_remove_rmap(struct page *page)
{
bool anon = PageAnon(page);
bool locked;
unsigned long flags;
/*
* The anon case has no mem_cgroup page_stat to update; but may
* uncharge_page() below, where the lock ordering can deadlock if
* we hold the lock against page_stat move: so avoid it on anon.
*/
if (!anon)
mem_cgroup_begin_update_page_stat(page, &locked, &flags);
/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
goto out;
/*
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
* and not charged by memcg for now.
*/
if (unlikely(PageHuge(page)))
goto out;
if (anon) {
mem_cgroup_uncharge_page(page);
if (PageTransHuge(page))
__dec_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
-hpage_nr_pages(page));
} else {
__dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping
* before us: so leave the reset to free_hot_cold_page,
* and remember that it's only reliable while mapped.
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/
return;
out:
if (!anon)
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}
|
void page_remove_rmap(struct page *page)
{
bool anon = PageAnon(page);
bool locked;
unsigned long flags;
/*
* The anon case has no mem_cgroup page_stat to update; but may
* uncharge_page() below, where the lock ordering can deadlock if
* we hold the lock against page_stat move: so avoid it on anon.
*/
if (!anon)
mem_cgroup_begin_update_page_stat(page, &locked, &flags);
/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
goto out;
/*
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
* and not charged by memcg for now.
*/
if (unlikely(PageHuge(page)))
goto out;
if (anon) {
mem_cgroup_uncharge_page(page);
if (PageTransHuge(page))
__dec_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
-hpage_nr_pages(page));
} else {
__dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping
* before us: so leave the reset to free_hot_cold_page,
* and remember that it's only reliable while mapped.
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/
return;
out:
if (!anon)
mem_cgroup_end_update_page_stat(page, &locked, &flags);
}
|
C
|
linux
| 0 |
CVE-2018-18955
|
https://www.cvedetails.com/cve/CVE-2018-18955/
|
CWE-20
|
https://github.com/torvalds/linux/commit/d2f007dbe7e4c9583eea6eb04d60001e85c6f1bd
|
d2f007dbe7e4c9583eea6eb04d60001e85c6f1bd
|
userns: also map extents in the reverse map to kernel IDs
The current logic first clones the extent array and sorts both copies, then
maps the lower IDs of the forward mapping into the lower namespace, but
doesn't map the lower IDs of the reverse mapping.
This means that code in a nested user namespace with >5 extents will see
incorrect IDs. It also breaks some access checks, like
inode_owner_or_capable() and privileged_wrt_inode_uidgid(), so a process
can incorrectly appear to be capable relative to an inode.
To fix it, we have to make sure that the "lower_first" members of extents
in both arrays are translated; and we have to make sure that the reverse
map is sorted *after* the translation (since otherwise the translation can
break the sorting).
This is CVE-2018-18955.
Fixes: 6397fac4915a ("userns: bump idmap limits to 340")
Cc: [email protected]
Signed-off-by: Jann Horn <[email protected]>
Tested-by: Eric W. Biederman <[email protected]>
Reviewed-by: Eric W. Biederman <[email protected]>
Signed-off-by: Eric W. Biederman <[email protected]>
|
static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
{
return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
}
|
static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
{
return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
}
|
C
|
linux
| 0 |
CVE-2018-20679
|
https://www.cvedetails.com/cve/CVE-2018-20679/
|
CWE-125
|
https://git.busybox.net/busybox/commit/?id=6d3b4bb24da9a07c263f3c1acf8df85382ff562c
|
6d3b4bb24da9a07c263f3c1acf8df85382ff562c
| null |
static void clear_leases(const uint8_t *chaddr, uint32_t yiaddr)
{
unsigned i;
for (i = 0; i < server_config.max_leases; i++) {
if ((chaddr && memcmp(g_leases[i].lease_mac, chaddr, 6) == 0)
|| (yiaddr && g_leases[i].lease_nip == yiaddr)
) {
memset(&g_leases[i], 0, sizeof(g_leases[i]));
}
}
}
|
static void clear_leases(const uint8_t *chaddr, uint32_t yiaddr)
{
unsigned i;
for (i = 0; i < server_config.max_leases; i++) {
if ((chaddr && memcmp(g_leases[i].lease_mac, chaddr, 6) == 0)
|| (yiaddr && g_leases[i].lease_nip == yiaddr)
) {
memset(&g_leases[i], 0, sizeof(g_leases[i]));
}
}
}
|
C
|
busybox
| 0 |
CVE-2017-7375
|
https://www.cvedetails.com/cve/CVE-2017-7375/
|
CWE-611
|
https://android.googlesource.com/platform/external/libxml2/+/308396a55280f69ad4112d4f9892f4cbeff042aa
|
308396a55280f69ad4112d4f9892f4cbeff042aa
|
DO NOT MERGE: Add validation for eternal enities
https://bugzilla.gnome.org/show_bug.cgi?id=780691
Bug: 36556310
Change-Id: I9450743e167c3c73af5e4071f3fc85e81d061648
(cherry picked from commit bef9af3d89d241bcb518c20cba6da2a2fd9ba049)
|
xmlParseEntityRef(xmlParserCtxtPtr ctxt) {
const xmlChar *name;
xmlEntityPtr ent = NULL;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (RAW != '&')
return(NULL);
NEXT;
name = xmlParseName(ctxt);
if (name == NULL) {
xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED,
"xmlParseEntityRef: no name\n");
return(NULL);
}
if (RAW != ';') {
xmlFatalErr(ctxt, XML_ERR_ENTITYREF_SEMICOL_MISSING, NULL);
return(NULL);
}
NEXT;
/*
* Predefined entities override any extra definition
*/
if ((ctxt->options & XML_PARSE_OLDSAX) == 0) {
ent = xmlGetPredefinedEntity(name);
if (ent != NULL)
return(ent);
}
/*
* Increase the number of entity references parsed
*/
ctxt->nbentities++;
/*
* Ask first SAX for entity resolution, otherwise try the
* entities which may have stored in the parser context.
*/
if (ctxt->sax != NULL) {
if (ctxt->sax->getEntity != NULL)
ent = ctxt->sax->getEntity(ctxt->userData, name);
if ((ctxt->wellFormed == 1 ) && (ent == NULL) &&
(ctxt->options & XML_PARSE_OLDSAX))
ent = xmlGetPredefinedEntity(name);
if ((ctxt->wellFormed == 1 ) && (ent == NULL) &&
(ctxt->userData==ctxt)) {
ent = xmlSAX2GetEntity(ctxt, name);
}
}
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
/*
* [ WFC: Entity Declared ]
* In a document without any DTD, a document with only an
* internal DTD subset which contains no parameter entity
* references, or a document with "standalone='yes'", the
* Name given in the entity reference must match that in an
* entity declaration, except that well-formed documents
* need not declare any of the following entities: amp, lt,
* gt, apos, quot.
* The declaration of a parameter entity must precede any
* reference to it.
* Similarly, the declaration of a general entity must
* precede any reference to it which appears in a default
* value in an attribute-list declaration. Note that if
* entities are declared in the external subset or in
* external parameter entities, a non-validating processor
* is not obligated to read and process their declarations;
* for such documents, the rule that an entity must be
* declared is a well-formedness constraint only if
* standalone='yes'.
*/
if (ent == NULL) {
if ((ctxt->standalone == 1) ||
((ctxt->hasExternalSubset == 0) &&
(ctxt->hasPErefs == 0))) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY,
"Entity '%s' not defined\n", name);
} else {
xmlErrMsgStr(ctxt, XML_WAR_UNDECLARED_ENTITY,
"Entity '%s' not defined\n", name);
if ((ctxt->inSubset == 0) &&
(ctxt->sax != NULL) &&
(ctxt->sax->reference != NULL)) {
ctxt->sax->reference(ctxt->userData, name);
}
}
xmlParserEntityCheck(ctxt, 0, ent, 0);
ctxt->valid = 0;
}
/*
* [ WFC: Parsed Entity ]
* An entity reference must not contain the name of an
* unparsed entity
*/
else if (ent->etype == XML_EXTERNAL_GENERAL_UNPARSED_ENTITY) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNPARSED_ENTITY,
"Entity reference to unparsed entity %s\n", name);
}
/*
* [ WFC: No External Entity References ]
* Attribute values cannot contain direct or indirect
* entity references to external entities.
*/
else if ((ctxt->instate == XML_PARSER_ATTRIBUTE_VALUE) &&
(ent->etype == XML_EXTERNAL_GENERAL_PARSED_ENTITY)) {
xmlFatalErrMsgStr(ctxt, XML_ERR_ENTITY_IS_EXTERNAL,
"Attribute references external entity '%s'\n", name);
}
/*
* [ WFC: No < in Attribute Values ]
* The replacement text of any entity referred to directly or
* indirectly in an attribute value (other than "<") must
* not contain a <.
*/
else if ((ctxt->instate == XML_PARSER_ATTRIBUTE_VALUE) &&
(ent != NULL) &&
(ent->etype != XML_INTERNAL_PREDEFINED_ENTITY)) {
if (((ent->checked & 1) || (ent->checked == 0)) &&
(ent->content != NULL) && (xmlStrchr(ent->content, '<'))) {
xmlFatalErrMsgStr(ctxt, XML_ERR_LT_IN_ATTRIBUTE,
"'<' in entity '%s' is not allowed in attributes values\n", name);
}
}
/*
* Internal check, no parameter entities here ...
*/
else {
switch (ent->etype) {
case XML_INTERNAL_PARAMETER_ENTITY:
case XML_EXTERNAL_PARAMETER_ENTITY:
xmlFatalErrMsgStr(ctxt, XML_ERR_ENTITY_IS_PARAMETER,
"Attempt to reference the parameter entity '%s'\n",
name);
break;
default:
break;
}
}
/*
* [ WFC: No Recursion ]
* A parsed entity must not contain a recursive reference
* to itself, either directly or indirectly.
* Done somewhere else
*/
return(ent);
}
|
xmlParseEntityRef(xmlParserCtxtPtr ctxt) {
const xmlChar *name;
xmlEntityPtr ent = NULL;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
if (RAW != '&')
return(NULL);
NEXT;
name = xmlParseName(ctxt);
if (name == NULL) {
xmlFatalErrMsg(ctxt, XML_ERR_NAME_REQUIRED,
"xmlParseEntityRef: no name\n");
return(NULL);
}
if (RAW != ';') {
xmlFatalErr(ctxt, XML_ERR_ENTITYREF_SEMICOL_MISSING, NULL);
return(NULL);
}
NEXT;
/*
* Predefined entities override any extra definition
*/
if ((ctxt->options & XML_PARSE_OLDSAX) == 0) {
ent = xmlGetPredefinedEntity(name);
if (ent != NULL)
return(ent);
}
/*
* Increase the number of entity references parsed
*/
ctxt->nbentities++;
/*
* Ask first SAX for entity resolution, otherwise try the
* entities which may have stored in the parser context.
*/
if (ctxt->sax != NULL) {
if (ctxt->sax->getEntity != NULL)
ent = ctxt->sax->getEntity(ctxt->userData, name);
if ((ctxt->wellFormed == 1 ) && (ent == NULL) &&
(ctxt->options & XML_PARSE_OLDSAX))
ent = xmlGetPredefinedEntity(name);
if ((ctxt->wellFormed == 1 ) && (ent == NULL) &&
(ctxt->userData==ctxt)) {
ent = xmlSAX2GetEntity(ctxt, name);
}
}
if (ctxt->instate == XML_PARSER_EOF)
return(NULL);
/*
* [ WFC: Entity Declared ]
* In a document without any DTD, a document with only an
* internal DTD subset which contains no parameter entity
* references, or a document with "standalone='yes'", the
* Name given in the entity reference must match that in an
* entity declaration, except that well-formed documents
* need not declare any of the following entities: amp, lt,
* gt, apos, quot.
* The declaration of a parameter entity must precede any
* reference to it.
* Similarly, the declaration of a general entity must
* precede any reference to it which appears in a default
* value in an attribute-list declaration. Note that if
* entities are declared in the external subset or in
* external parameter entities, a non-validating processor
* is not obligated to read and process their declarations;
* for such documents, the rule that an entity must be
* declared is a well-formedness constraint only if
* standalone='yes'.
*/
if (ent == NULL) {
if ((ctxt->standalone == 1) ||
((ctxt->hasExternalSubset == 0) &&
(ctxt->hasPErefs == 0))) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNDECLARED_ENTITY,
"Entity '%s' not defined\n", name);
} else {
xmlErrMsgStr(ctxt, XML_WAR_UNDECLARED_ENTITY,
"Entity '%s' not defined\n", name);
if ((ctxt->inSubset == 0) &&
(ctxt->sax != NULL) &&
(ctxt->sax->reference != NULL)) {
ctxt->sax->reference(ctxt->userData, name);
}
}
xmlParserEntityCheck(ctxt, 0, ent, 0);
ctxt->valid = 0;
}
/*
* [ WFC: Parsed Entity ]
* An entity reference must not contain the name of an
* unparsed entity
*/
else if (ent->etype == XML_EXTERNAL_GENERAL_UNPARSED_ENTITY) {
xmlFatalErrMsgStr(ctxt, XML_ERR_UNPARSED_ENTITY,
"Entity reference to unparsed entity %s\n", name);
}
/*
* [ WFC: No External Entity References ]
* Attribute values cannot contain direct or indirect
* entity references to external entities.
*/
else if ((ctxt->instate == XML_PARSER_ATTRIBUTE_VALUE) &&
(ent->etype == XML_EXTERNAL_GENERAL_PARSED_ENTITY)) {
xmlFatalErrMsgStr(ctxt, XML_ERR_ENTITY_IS_EXTERNAL,
"Attribute references external entity '%s'\n", name);
}
/*
* [ WFC: No < in Attribute Values ]
* The replacement text of any entity referred to directly or
* indirectly in an attribute value (other than "<") must
* not contain a <.
*/
else if ((ctxt->instate == XML_PARSER_ATTRIBUTE_VALUE) &&
(ent != NULL) &&
(ent->etype != XML_INTERNAL_PREDEFINED_ENTITY)) {
if (((ent->checked & 1) || (ent->checked == 0)) &&
(ent->content != NULL) && (xmlStrchr(ent->content, '<'))) {
xmlFatalErrMsgStr(ctxt, XML_ERR_LT_IN_ATTRIBUTE,
"'<' in entity '%s' is not allowed in attributes values\n", name);
}
}
/*
* Internal check, no parameter entities here ...
*/
else {
switch (ent->etype) {
case XML_INTERNAL_PARAMETER_ENTITY:
case XML_EXTERNAL_PARAMETER_ENTITY:
xmlFatalErrMsgStr(ctxt, XML_ERR_ENTITY_IS_PARAMETER,
"Attempt to reference the parameter entity '%s'\n",
name);
break;
default:
break;
}
}
/*
* [ WFC: No Recursion ]
* A parsed entity must not contain a recursive reference
* to itself, either directly or indirectly.
* Done somewhere else
*/
return(ent);
}
|
C
|
Android
| 0 |
CVE-2016-2496
|
https://www.cvedetails.com/cve/CVE-2016-2496/
|
CWE-264
|
https://android.googlesource.com/platform/frameworks/native/+/03a53d1c7765eeb3af0bc34c3dff02ada1953fbf
|
03a53d1c7765eeb3af0bc34c3dff02ada1953fbf
|
Add new MotionEvent flag for partially obscured windows.
Due to more complex window layouts resulting in lots of overlapping
windows, the policy around FLAG_WINDOW_IS_OBSCURED has changed to
only be set when the point at which the window was touched is
obscured. Unfortunately, this doesn't prevent tapjacking attacks that
overlay the dialog's text, making a potentially dangerous operation
seem innocuous. To avoid this on particularly sensitive dialogs,
introduce a new flag that really does tell you when your window is
being even partially overlapped.
We aren't exposing this as API since we plan on making the original
flag more robust. This is really a workaround for system dialogs
since we generally know their layout and screen position, and that
they're unlikely to be overlapped by other applications.
Bug: 26677796
Change-Id: I9e336afe90f262ba22015876769a9c510048fd47
|
void InputDispatcher::notifyMotion(const NotifyMotionArgs* args) {
#if DEBUG_INBOUND_EVENT_DETAILS
ALOGD("notifyMotion - eventTime=%lld, deviceId=%d, source=0x%x, policyFlags=0x%x, "
"action=0x%x, actionButton=0x%x, flags=0x%x, metaState=0x%x, buttonState=0x%x,"
"edgeFlags=0x%x, xPrecision=%f, yPrecision=%f, downTime=%lld",
args->eventTime, args->deviceId, args->source, args->policyFlags,
args->action, args->actionButton, args->flags, args->metaState, args->buttonState,
args->edgeFlags, args->xPrecision, args->yPrecision, args->downTime);
for (uint32_t i = 0; i < args->pointerCount; i++) {
ALOGD(" Pointer %d: id=%d, toolType=%d, "
"x=%f, y=%f, pressure=%f, size=%f, "
"touchMajor=%f, touchMinor=%f, toolMajor=%f, toolMinor=%f, "
"orientation=%f",
i, args->pointerProperties[i].id,
args->pointerProperties[i].toolType,
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_X),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_Y),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_PRESSURE),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_SIZE),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_TOUCH_MAJOR),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_TOUCH_MINOR),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_TOOL_MAJOR),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_TOOL_MINOR),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_ORIENTATION));
}
#endif
if (!validateMotionEvent(args->action, args->actionButton,
args->pointerCount, args->pointerProperties)) {
return;
}
uint32_t policyFlags = args->policyFlags;
policyFlags |= POLICY_FLAG_TRUSTED;
mPolicy->interceptMotionBeforeQueueing(args->eventTime, /*byref*/ policyFlags);
bool needWake;
{ // acquire lock
mLock.lock();
if (shouldSendMotionToInputFilterLocked(args)) {
mLock.unlock();
MotionEvent event;
event.initialize(args->deviceId, args->source, args->action, args->actionButton,
args->flags, args->edgeFlags, args->metaState, args->buttonState,
0, 0, args->xPrecision, args->yPrecision,
args->downTime, args->eventTime,
args->pointerCount, args->pointerProperties, args->pointerCoords);
policyFlags |= POLICY_FLAG_FILTERED;
if (!mPolicy->filterInputEvent(&event, policyFlags)) {
return; // event was consumed by the filter
}
mLock.lock();
}
MotionEntry* newEntry = new MotionEntry(args->eventTime,
args->deviceId, args->source, policyFlags,
args->action, args->actionButton, args->flags,
args->metaState, args->buttonState,
args->edgeFlags, args->xPrecision, args->yPrecision, args->downTime,
args->displayId,
args->pointerCount, args->pointerProperties, args->pointerCoords, 0, 0);
needWake = enqueueInboundEventLocked(newEntry);
mLock.unlock();
} // release lock
if (needWake) {
mLooper->wake();
}
}
|
void InputDispatcher::notifyMotion(const NotifyMotionArgs* args) {
#if DEBUG_INBOUND_EVENT_DETAILS
ALOGD("notifyMotion - eventTime=%lld, deviceId=%d, source=0x%x, policyFlags=0x%x, "
"action=0x%x, actionButton=0x%x, flags=0x%x, metaState=0x%x, buttonState=0x%x,"
"edgeFlags=0x%x, xPrecision=%f, yPrecision=%f, downTime=%lld",
args->eventTime, args->deviceId, args->source, args->policyFlags,
args->action, args->actionButton, args->flags, args->metaState, args->buttonState,
args->edgeFlags, args->xPrecision, args->yPrecision, args->downTime);
for (uint32_t i = 0; i < args->pointerCount; i++) {
ALOGD(" Pointer %d: id=%d, toolType=%d, "
"x=%f, y=%f, pressure=%f, size=%f, "
"touchMajor=%f, touchMinor=%f, toolMajor=%f, toolMinor=%f, "
"orientation=%f",
i, args->pointerProperties[i].id,
args->pointerProperties[i].toolType,
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_X),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_Y),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_PRESSURE),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_SIZE),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_TOUCH_MAJOR),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_TOUCH_MINOR),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_TOOL_MAJOR),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_TOOL_MINOR),
args->pointerCoords[i].getAxisValue(AMOTION_EVENT_AXIS_ORIENTATION));
}
#endif
if (!validateMotionEvent(args->action, args->actionButton,
args->pointerCount, args->pointerProperties)) {
return;
}
uint32_t policyFlags = args->policyFlags;
policyFlags |= POLICY_FLAG_TRUSTED;
mPolicy->interceptMotionBeforeQueueing(args->eventTime, /*byref*/ policyFlags);
bool needWake;
{ // acquire lock
mLock.lock();
if (shouldSendMotionToInputFilterLocked(args)) {
mLock.unlock();
MotionEvent event;
event.initialize(args->deviceId, args->source, args->action, args->actionButton,
args->flags, args->edgeFlags, args->metaState, args->buttonState,
0, 0, args->xPrecision, args->yPrecision,
args->downTime, args->eventTime,
args->pointerCount, args->pointerProperties, args->pointerCoords);
policyFlags |= POLICY_FLAG_FILTERED;
if (!mPolicy->filterInputEvent(&event, policyFlags)) {
return; // event was consumed by the filter
}
mLock.lock();
}
MotionEntry* newEntry = new MotionEntry(args->eventTime,
args->deviceId, args->source, policyFlags,
args->action, args->actionButton, args->flags,
args->metaState, args->buttonState,
args->edgeFlags, args->xPrecision, args->yPrecision, args->downTime,
args->displayId,
args->pointerCount, args->pointerProperties, args->pointerCoords, 0, 0);
needWake = enqueueInboundEventLocked(newEntry);
mLock.unlock();
} // release lock
if (needWake) {
mLooper->wake();
}
}
|
C
|
Android
| 0 |
CVE-2013-7339
|
https://www.cvedetails.com/cve/CVE-2013-7339/
|
CWE-399
|
https://github.com/torvalds/linux/commit/c2349758acf1874e4c2b93fe41d072336f1a31d0
|
c2349758acf1874e4c2b93fe41d072336f1a31d0
|
rds: prevent dereference of a NULL device
Binding might result in a NULL device, which is dereferenced
causing this BUG:
[ 1317.260548] BUG: unable to handle kernel NULL pointer dereference at 000000000000097
4
[ 1317.261847] IP: [<ffffffff84225f52>] rds_ib_laddr_check+0x82/0x110
[ 1317.263315] PGD 418bcb067 PUD 3ceb21067 PMD 0
[ 1317.263502] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
[ 1317.264179] Dumping ftrace buffer:
[ 1317.264774] (ftrace buffer empty)
[ 1317.265220] Modules linked in:
[ 1317.265824] CPU: 4 PID: 836 Comm: trinity-child46 Tainted: G W 3.13.0-rc4-
next-20131218-sasha-00013-g2cebb9b-dirty #4159
[ 1317.267415] task: ffff8803ddf33000 ti: ffff8803cd31a000 task.ti: ffff8803cd31a000
[ 1317.268399] RIP: 0010:[<ffffffff84225f52>] [<ffffffff84225f52>] rds_ib_laddr_check+
0x82/0x110
[ 1317.269670] RSP: 0000:ffff8803cd31bdf8 EFLAGS: 00010246
[ 1317.270230] RAX: 0000000000000000 RBX: ffff88020b0dd388 RCX: 0000000000000000
[ 1317.270230] RDX: ffffffff8439822e RSI: 00000000000c000a RDI: 0000000000000286
[ 1317.270230] RBP: ffff8803cd31be38 R08: 0000000000000000 R09: 0000000000000000
[ 1317.270230] R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000000
[ 1317.270230] R13: 0000000054086700 R14: 0000000000a25de0 R15: 0000000000000031
[ 1317.270230] FS: 00007ff40251d700(0000) GS:ffff88022e200000(0000) knlGS:000000000000
0000
[ 1317.270230] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[ 1317.270230] CR2: 0000000000000974 CR3: 00000003cd478000 CR4: 00000000000006e0
[ 1317.270230] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[ 1317.270230] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000090602
[ 1317.270230] Stack:
[ 1317.270230] 0000000054086700 5408670000a25de0 5408670000000002 0000000000000000
[ 1317.270230] ffffffff84223542 00000000ea54c767 0000000000000000 ffffffff86d26160
[ 1317.270230] ffff8803cd31be68 ffffffff84223556 ffff8803cd31beb8 ffff8800c6765280
[ 1317.270230] Call Trace:
[ 1317.270230] [<ffffffff84223542>] ? rds_trans_get_preferred+0x42/0xa0
[ 1317.270230] [<ffffffff84223556>] rds_trans_get_preferred+0x56/0xa0
[ 1317.270230] [<ffffffff8421c9c3>] rds_bind+0x73/0xf0
[ 1317.270230] [<ffffffff83e4ce62>] SYSC_bind+0x92/0xf0
[ 1317.270230] [<ffffffff812493f8>] ? context_tracking_user_exit+0xb8/0x1d0
[ 1317.270230] [<ffffffff8119313d>] ? trace_hardirqs_on+0xd/0x10
[ 1317.270230] [<ffffffff8107a852>] ? syscall_trace_enter+0x32/0x290
[ 1317.270230] [<ffffffff83e4cece>] SyS_bind+0xe/0x10
[ 1317.270230] [<ffffffff843a6ad0>] tracesys+0xdd/0xe2
[ 1317.270230] Code: 00 8b 45 cc 48 8d 75 d0 48 c7 45 d8 00 00 00 00 66 c7 45 d0 02 00
89 45 d4 48 89 df e8 78 49 76 ff 41 89 c4 85 c0 75 0c 48 8b 03 <80> b8 74 09 00 00 01 7
4 06 41 bc 9d ff ff ff f6 05 2a b6 c2 02
[ 1317.270230] RIP [<ffffffff84225f52>] rds_ib_laddr_check+0x82/0x110
[ 1317.270230] RSP <ffff8803cd31bdf8>
[ 1317.270230] CR2: 0000000000000974
Signed-off-by: Sasha Levin <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void rds_ib_unregister_client(void)
{
ib_unregister_client(&rds_ib_client);
/* wait for rds_ib_dev_free() to complete */
flush_workqueue(rds_wq);
}
|
static void rds_ib_unregister_client(void)
{
ib_unregister_client(&rds_ib_client);
/* wait for rds_ib_dev_free() to complete */
flush_workqueue(rds_wq);
}
|
C
|
linux
| 0 |
CVE-2016-7398
|
https://www.cvedetails.com/cve/CVE-2016-7398/
|
CWE-704
|
https://github.com/m6w6/ext-http/commit/17137d4ab1ce81a2cee0fae842340a344ef3da83
|
17137d4ab1ce81a2cee0fae842340a344ef3da83
|
fix bug #73055
|
PHP_METHOD(HttpParams, offsetGet)
{
char *name_str;
int name_len;
zval **zparam, *zparams;
if (SUCCESS != zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &name_str, &name_len)) {
return;
}
zparams = php_http_ztyp(IS_ARRAY, zend_read_property(php_http_params_class_entry, getThis(), ZEND_STRL("params"), 0 TSRMLS_CC));
if (SUCCESS == zend_symtable_find(Z_ARRVAL_P(zparams), name_str, name_len + 1, (void *) &zparam)) {
RETVAL_ZVAL(*zparam, 1, 0);
}
zval_ptr_dtor(&zparams);
}
|
PHP_METHOD(HttpParams, offsetGet)
{
char *name_str;
int name_len;
zval **zparam, *zparams;
if (SUCCESS != zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &name_str, &name_len)) {
return;
}
zparams = php_http_ztyp(IS_ARRAY, zend_read_property(php_http_params_class_entry, getThis(), ZEND_STRL("params"), 0 TSRMLS_CC));
if (SUCCESS == zend_symtable_find(Z_ARRVAL_P(zparams), name_str, name_len + 1, (void *) &zparam)) {
RETVAL_ZVAL(*zparam, 1, 0);
}
zval_ptr_dtor(&zparams);
}
|
C
|
ext-http
| 0 |
CVE-2017-7374
|
https://www.cvedetails.com/cve/CVE-2017-7374/
|
CWE-416
|
https://github.com/torvalds/linux/commit/1b53cf9815bb4744958d41f3795d5d5a1d365e2d
|
1b53cf9815bb4744958d41f3795d5d5a1d365e2d
|
fscrypt: remove broken support for detecting keyring key revocation
Filesystem encryption ostensibly supported revoking a keyring key that
had been used to "unlock" encrypted files, causing those files to become
"locked" again. This was, however, buggy for several reasons, the most
severe of which was that when key revocation happened to be detected for
an inode, its fscrypt_info was immediately freed, even while other
threads could be using it for encryption or decryption concurrently.
This could be exploited to crash the kernel or worse.
This patch fixes the use-after-free by removing the code which detects
the keyring key having been revoked, invalidated, or expired. Instead,
an encrypted inode that is "unlocked" now simply remains unlocked until
it is evicted from memory. Note that this is no worse than the case for
block device-level encryption, e.g. dm-crypt, and it still remains
possible for a privileged user to evict unused pages, inodes, and
dentries by running 'sync; echo 3 > /proc/sys/vm/drop_caches', or by
simply unmounting the filesystem. In fact, one of those actions was
already needed anyway for key revocation to work even somewhat sanely.
This change is not expected to break any applications.
In the future I'd like to implement a real API for fscrypt key
revocation that interacts sanely with ongoing filesystem operations ---
waiting for existing operations to complete and blocking new operations,
and invalidating and sanitizing key material and plaintext from the VFS
caches. But this is a hard problem, and for now this bug must be fixed.
This bug affected almost all versions of ext4, f2fs, and ubifs
encryption, and it was potentially reachable in any kernel configured
with encryption support (CONFIG_EXT4_ENCRYPTION=y,
CONFIG_EXT4_FS_ENCRYPTION=y, CONFIG_F2FS_FS_ENCRYPTION=y, or
CONFIG_UBIFS_FS_ENCRYPTION=y). Note that older kernels did not use the
shared fs/crypto/ code, but due to the potential security implications
of this bug, it may still be worthwhile to backport this fix to them.
Fixes: b7236e21d55f ("ext4 crypto: reorganize how we store keys in the inode")
Cc: [email protected] # v4.2+
Signed-off-by: Eric Biggers <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Acked-by: Michael Halcrow <[email protected]>
|
static int digest_encode(const char *src, int len, char *dst)
{
int i = 0, bits = 0, ac = 0;
char *cp = dst;
while (i < len) {
ac += (((unsigned char) src[i]) << bits);
bits += 8;
do {
*cp++ = lookup_table[ac & 0x3f];
ac >>= 6;
bits -= 6;
} while (bits >= 6);
i++;
}
if (bits)
*cp++ = lookup_table[ac & 0x3f];
return cp - dst;
}
|
static int digest_encode(const char *src, int len, char *dst)
{
int i = 0, bits = 0, ac = 0;
char *cp = dst;
while (i < len) {
ac += (((unsigned char) src[i]) << bits);
bits += 8;
do {
*cp++ = lookup_table[ac & 0x3f];
ac >>= 6;
bits -= 6;
} while (bits >= 6);
i++;
}
if (bits)
*cp++ = lookup_table[ac & 0x3f];
return cp - dst;
}
|
C
|
linux
| 0 |
CVE-2015-1352
|
https://www.cvedetails.com/cve/CVE-2015-1352/
| null |
https://git.php.net/?p=php-src.git;a=commit;h=124fb22a13fafa3648e4e15b4f207c7096d8155e
|
124fb22a13fafa3648e4e15b4f207c7096d8155e
| null |
static size_t curl_write_header(char *data, size_t size, size_t nmemb, void *ctx)
{
php_curl *ch = (php_curl *) ctx;
php_curl_write *t = ch->handlers->write_header;
size_t length = size * nmemb;
switch (t->method) {
case PHP_CURL_STDOUT:
/* Handle special case write when we're returning the entire transfer
*/
if (ch->handlers->write->method == PHP_CURL_RETURN && length > 0) {
smart_str_appendl(&ch->handlers->write->buf, data, (int) length);
} else {
PHPWRITE(data, length);
}
break;
case PHP_CURL_FILE:
return fwrite(data, size, nmemb, t->fp);
case PHP_CURL_USER: {
zval argv[2];
zval retval;
int error;
zend_fcall_info fci;
ZVAL_RES(&argv[0], ch->res);
Z_ADDREF(argv[0]);
ZVAL_STRINGL(&argv[1], data, length);
fci.size = sizeof(fci);
fci.function_table = EG(function_table);
ZVAL_COPY_VALUE(&fci.function_name, &t->func_name);
fci.symbol_table = NULL;
fci.object = NULL;
fci.retval = &retval;
fci.param_count = 2;
fci.params = argv;
fci.no_separation = 0;
ch->in_callback = 1;
error = zend_call_function(&fci, &t->fci_cache);
ch->in_callback = 0;
if (error == FAILURE) {
php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_HEADERFUNCTION");
length = -1;
} else if (!Z_ISUNDEF(retval)) {
if (Z_TYPE(retval) != IS_LONG) {
convert_to_long_ex(&retval);
}
length = Z_LVAL(retval);
}
zval_ptr_dtor(&argv[0]);
zval_ptr_dtor(&argv[1]);
break;
}
case PHP_CURL_IGNORE:
return length;
default:
return -1;
}
return length;
}
|
static size_t curl_write_header(char *data, size_t size, size_t nmemb, void *ctx)
{
php_curl *ch = (php_curl *) ctx;
php_curl_write *t = ch->handlers->write_header;
size_t length = size * nmemb;
switch (t->method) {
case PHP_CURL_STDOUT:
/* Handle special case write when we're returning the entire transfer
*/
if (ch->handlers->write->method == PHP_CURL_RETURN && length > 0) {
smart_str_appendl(&ch->handlers->write->buf, data, (int) length);
} else {
PHPWRITE(data, length);
}
break;
case PHP_CURL_FILE:
return fwrite(data, size, nmemb, t->fp);
case PHP_CURL_USER: {
zval argv[2];
zval retval;
int error;
zend_fcall_info fci;
ZVAL_RES(&argv[0], ch->res);
Z_ADDREF(argv[0]);
ZVAL_STRINGL(&argv[1], data, length);
fci.size = sizeof(fci);
fci.function_table = EG(function_table);
ZVAL_COPY_VALUE(&fci.function_name, &t->func_name);
fci.symbol_table = NULL;
fci.object = NULL;
fci.retval = &retval;
fci.param_count = 2;
fci.params = argv;
fci.no_separation = 0;
ch->in_callback = 1;
error = zend_call_function(&fci, &t->fci_cache);
ch->in_callback = 0;
if (error == FAILURE) {
php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_HEADERFUNCTION");
length = -1;
} else if (!Z_ISUNDEF(retval)) {
if (Z_TYPE(retval) != IS_LONG) {
convert_to_long_ex(&retval);
}
length = Z_LVAL(retval);
}
zval_ptr_dtor(&argv[0]);
zval_ptr_dtor(&argv[1]);
break;
}
case PHP_CURL_IGNORE:
return length;
default:
return -1;
}
return length;
}
|
C
|
php
| 0 |
CVE-2010-1149
|
https://www.cvedetails.com/cve/CVE-2010-1149/
|
CWE-200
|
https://cgit.freedesktop.org/udisks/commit/?id=0fcc7cb3b66f23fac53ae08647aa0007a2bd56c4
|
0fcc7cb3b66f23fac53ae08647aa0007a2bd56c4
| null |
linux_md_expand_completed_cb (DBusGMethodInvocation *context,
Device *device,
gboolean job_was_cancelled,
int status,
const char *stderr,
const char *stdout,
gpointer user_data)
{
if (WEXITSTATUS (status) == 0 && !job_was_cancelled)
{
/* the kernel side of md currently doesn't emit a 'changed' event so
* generate one since state may have changed (e.g. rebuild started etc.)
*/
device_generate_kernel_change_event (device);
dbus_g_method_return (context);
}
else
{
if (job_was_cancelled)
{
throw_error (context, ERROR_CANCELLED, "Job was cancelled");
}
else
{
throw_error (context,
ERROR_FAILED,
"Error expanding array: helper script exited with exit code %d: %s",
WEXITSTATUS (status),
stderr);
}
}
}
|
linux_md_expand_completed_cb (DBusGMethodInvocation *context,
Device *device,
gboolean job_was_cancelled,
int status,
const char *stderr,
const char *stdout,
gpointer user_data)
{
if (WEXITSTATUS (status) == 0 && !job_was_cancelled)
{
/* the kernel side of md currently doesn't emit a 'changed' event so
* generate one since state may have changed (e.g. rebuild started etc.)
*/
device_generate_kernel_change_event (device);
dbus_g_method_return (context);
}
else
{
if (job_was_cancelled)
{
throw_error (context, ERROR_CANCELLED, "Job was cancelled");
}
else
{
throw_error (context,
ERROR_FAILED,
"Error expanding array: helper script exited with exit code %d: %s",
WEXITSTATUS (status),
stderr);
}
}
}
|
C
|
udisks
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/1161a49d663dd395bd639549c2dfe7324f847938
|
1161a49d663dd395bd639549c2dfe7324f847938
|
Don't populate URL data in WebDropData when dragging files.
This is considered a potential security issue as well, since it leaks
filesystem paths.
BUG=332579
Review URL: https://codereview.chromium.org/135633002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@244538 0039d316-1c4b-4281-b951-d872f2087c98
|
gfx::NativeView OmniboxViewViews::GetRelativeWindowForPopup() const {
return GetWidget()->GetTopLevelWidget()->GetNativeView();
}
|
gfx::NativeView OmniboxViewViews::GetRelativeWindowForPopup() const {
return GetWidget()->GetTopLevelWidget()->GetNativeView();
}
|
C
|
Chrome
| 0 |
CVE-2014-0143
|
https://www.cvedetails.com/cve/CVE-2014-0143/
|
CWE-190
|
https://git.qemu.org/?p=qemu.git;a=commit;h=8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
|
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
| null |
void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
{
notifier_list_add(&bs->close_notifiers, notify);
}
|
void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify)
{
notifier_list_add(&bs->close_notifiers, notify);
}
|
C
|
qemu
| 0 |
CVE-2017-5013
|
https://www.cvedetails.com/cve/CVE-2017-5013/
| null |
https://github.com/chromium/chromium/commit/8f3a9a68b2dcdd2c54cf49a41ad34729ab576702
|
8f3a9a68b2dcdd2c54cf49a41ad34729ab576702
|
Don't focus the location bar for NTP navigations in non-selected tabs.
BUG=677716
TEST=See bug for repro steps.
Review-Url: https://codereview.chromium.org/2624373002
Cr-Commit-Position: refs/heads/master@{#443338}
|
void Browser::ActivateContents(WebContents* contents) {
tab_strip_model_->ActivateTabAt(
tab_strip_model_->GetIndexOfWebContents(contents), false);
window_->Activate();
}
|
void Browser::ActivateContents(WebContents* contents) {
tab_strip_model_->ActivateTabAt(
tab_strip_model_->GetIndexOfWebContents(contents), false);
window_->Activate();
}
|
C
|
Chrome
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
GF_Box *mvhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieHeaderBox, GF_ISOM_BOX_TYPE_MVHD);
tmp->preferredRate = (1<<16);
tmp->preferredVolume = (1<<8);
tmp->matrixA = (1<<16);
tmp->matrixD = (1<<16);
tmp->matrixW = (1<<30);
tmp->nextTrackID = 1;
return (GF_Box *)tmp;
}
|
GF_Box *mvhd_New()
{
ISOM_DECL_BOX_ALLOC(GF_MovieHeaderBox, GF_ISOM_BOX_TYPE_MVHD);
tmp->preferredRate = (1<<16);
tmp->preferredVolume = (1<<8);
tmp->matrixA = (1<<16);
tmp->matrixD = (1<<16);
tmp->matrixW = (1<<30);
tmp->nextTrackID = 1;
return (GF_Box *)tmp;
}
|
C
|
gpac
| 0 |
CVE-2018-16427
|
https://www.cvedetails.com/cve/CVE-2018-16427/
|
CWE-125
|
https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
8fe377e93b4b56060e5bbfb6f3142ceaeca744fa
|
fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes.
|
coolkey_v0_get_attribute_len(const u8 *attr, size_t buf_len, size_t *len)
{
coolkey_v0_attribute_header_t *attribute_head = (coolkey_v0_attribute_header_t *)attr;
/* don't reference beyond our buffer */
if (buf_len < sizeof(coolkey_v0_attribute_header_t)) {
return SC_ERROR_CORRUPTED_DATA;
}
*len = bebytes2ushort(attribute_head->attribute_data_len);
return SC_SUCCESS;
}
|
coolkey_v0_get_attribute_len(const u8 *attr, size_t buf_len, size_t *len)
{
coolkey_v0_attribute_header_t *attribute_head = (coolkey_v0_attribute_header_t *)attr;
/* don't reference beyond our buffer */
if (buf_len < sizeof(coolkey_v0_attribute_header_t)) {
return SC_ERROR_CORRUPTED_DATA;
}
*len = bebytes2ushort(attribute_head->attribute_data_len);
return SC_SUCCESS;
}
|
C
|
OpenSC
| 0 |
CVE-2016-5688
|
https://www.cvedetails.com/cve/CVE-2016-5688/
|
CWE-119
|
https://github.com/ImageMagick/ImageMagick/commit/aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
aecd0ada163a4d6c769cec178955d5f3e9316f2f
|
Set pixel cache to undefined if any resource limit is exceeded
|
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
|
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
|
C
|
ImageMagick
| 0 |
CVE-2012-4462
|
https://www.cvedetails.com/cve/CVE-2012-4462/
|
CWE-20
|
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=8f9b304c4f6c0a98dafa61b2c0e4beb3b70e4c84
|
8f9b304c4f6c0a98dafa61b2c0e4beb3b70e4c84
| null |
SchedulerObject::_continue(std::string key, std::string &/*reason*/, std::string &text)
{
PROC_ID id = getProcByString(key.c_str());
if (id.cluster <= 0 || id.proc < 0) {
dprintf(D_FULLDEBUG, "Remove: Failed to parse id: %s\n", key.c_str());
text = "Invalid Id";
return false;
}
scheduler.enqueueActOnJobMyself(id,JA_CONTINUE_JOBS,true);
return true;
}
|
SchedulerObject::_continue(std::string key, std::string &/*reason*/, std::string &text)
{
PROC_ID id = getProcByString(key.c_str());
if (id.cluster < 0 || id.proc < 0) {
dprintf(D_FULLDEBUG, "Remove: Failed to parse id: %s\n", key.c_str());
text = "Invalid Id";
return false;
}
scheduler.enqueueActOnJobMyself(id,JA_CONTINUE_JOBS,true);
return true;
}
|
CPP
|
htcondor
| 1 |
CVE-2013-2909
|
https://www.cvedetails.com/cve/CVE-2013-2909/
|
CWE-399
|
https://github.com/chromium/chromium/commit/248a92c21c20c14b5983680c50e1d8b73fc79a2f
|
248a92c21c20c14b5983680c50e1d8b73fc79a2f
|
Update containtingIsolate to go back all the way to top isolate from current root, rather than stopping at the first isolate it finds. This works because the current root is always updated with each isolate run.
BUG=279277
Review URL: https://chromiumcodereview.appspot.com/23972003
git-svn-id: svn://svn.chromium.org/blink/trunk@157268 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
RenderFlowThread* flowThread() const { return m_flowThread; }
|
RenderFlowThread* flowThread() const { return m_flowThread; }
|
C
|
Chrome
| 0 |
CVE-2014-1713
|
https://www.cvedetails.com/cve/CVE-2014-1713/
|
CWE-399
|
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
|
f85a87ec670ad0fce9d98d90c9a705b72a288154
|
document.location bindings fix
BUG=352374
[email protected]
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
static void activityLoggingGetterPerWorldBindingsLongAttributeAttributeGetterCallbackForMainWorld(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
V8PerContextData* contextData = V8PerContextData::from(info.GetIsolate()->GetCurrentContext());
if (contextData && contextData->activityLogger())
contextData->activityLogger()->log("TestObjectPython.activityLoggingGetterPerWorldBindingsLongAttribute", 0, 0, "Getter");
TestObjectPythonV8Internal::activityLoggingGetterPerWorldBindingsLongAttributeAttributeGetterForMainWorld(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
static void activityLoggingGetterPerWorldBindingsLongAttributeAttributeGetterCallbackForMainWorld(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
V8PerContextData* contextData = V8PerContextData::from(info.GetIsolate()->GetCurrentContext());
if (contextData && contextData->activityLogger())
contextData->activityLogger()->log("TestObjectPython.activityLoggingGetterPerWorldBindingsLongAttribute", 0, 0, "Getter");
TestObjectPythonV8Internal::activityLoggingGetterPerWorldBindingsLongAttributeAttributeGetterForMainWorld(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
|
C
|
Chrome
| 0 |
CVE-2015-6773
|
https://www.cvedetails.com/cve/CVE-2015-6773/
|
CWE-119
|
https://github.com/chromium/chromium/commit/33827275411b33371e7bb750cce20f11de85002d
|
33827275411b33371e7bb750cce20f11de85002d
|
Move SelectionTemplate::is_handle_visible_ to FrameSelection
This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate|
since handle visibility is used only for setting |FrameSelection|, hence it is
a redundant member variable of |SelectionTemplate|.
Bug: 742093
Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e
Reviewed-on: https://chromium-review.googlesource.com/595389
Commit-Queue: Yoshifumi Inoue <[email protected]>
Reviewed-by: Xiaocheng Hu <[email protected]>
Reviewed-by: Kent Tamura <[email protected]>
Cr-Commit-Position: refs/heads/master@{#491660}
|
void SelectionController::SetCaretAtHitTestResult(
const HitTestResult& hit_test_result) {
Node* inner_node = hit_test_result.InnerNode();
const VisiblePositionInFlatTree& visible_hit_pos =
VisiblePositionOfHitTestResult(hit_test_result);
const VisiblePositionInFlatTree& visible_pos =
visible_hit_pos.IsNull()
? CreateVisiblePosition(
PositionInFlatTree::FirstPositionInOrBeforeNode(inner_node))
: visible_hit_pos;
if (visible_pos.IsNull()) {
UpdateSelectionForMouseDownDispatchingSelectStart(
inner_node, SelectionInFlatTree(), TextGranularity::kCharacter,
HandleVisibility::kVisible);
return;
}
UpdateSelectionForMouseDownDispatchingSelectStart(
inner_node,
ExpandSelectionToRespectUserSelectAll(
inner_node, SelectionInFlatTree::Builder()
.Collapse(visible_pos.ToPositionWithAffinity())
.Build()),
TextGranularity::kCharacter, HandleVisibility::kVisible);
}
|
void SelectionController::SetCaretAtHitTestResult(
const HitTestResult& hit_test_result) {
Node* inner_node = hit_test_result.InnerNode();
const VisiblePositionInFlatTree& visible_hit_pos =
VisiblePositionOfHitTestResult(hit_test_result);
const VisiblePositionInFlatTree& visible_pos =
visible_hit_pos.IsNull()
? CreateVisiblePosition(
PositionInFlatTree::FirstPositionInOrBeforeNode(inner_node))
: visible_hit_pos;
if (visible_pos.IsNull()) {
UpdateSelectionForMouseDownDispatchingSelectStart(
inner_node, SelectionInFlatTree(), TextGranularity::kCharacter,
HandleVisibility::kVisible);
return;
}
UpdateSelectionForMouseDownDispatchingSelectStart(
inner_node,
ExpandSelectionToRespectUserSelectAll(
inner_node, SelectionInFlatTree::Builder()
.Collapse(visible_pos.ToPositionWithAffinity())
.Build()),
TextGranularity::kCharacter, HandleVisibility::kVisible);
}
|
C
|
Chrome
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/c96312d46205ea82764aba6255ecbb8dd5f57d11
|
c96312d46205ea82764aba6255ecbb8dd5f57d11
|
More testing for Graphics3D
BUG= none
TEST= ppapi_ppb_graphics3d
Review URL: http://codereview.chromium.org/7888028
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101555 0039d316-1c4b-4281-b951-d872f2087c98
|
void SetupTests() {
RegisterTest("TestGraphics3DInterface", TestGraphics3DInterface);
RegisterTest("TestOpenGLES2Interface", TestOpenGLES2Interface);
RegisterTest("TestCreate", TestCreate);
RegisterTest("TestIsGraphics3D", TestIsGraphics3D);
RegisterTest("Test_glInitializePPAPI", Test_glInitializePPAPI);
RegisterTest("TestBasicSetup", TestBasicSetup);
RegisterTest("TestSwapBuffers", TestSwapBuffers);
RegisterTest("TestResizeBuffersWithoutDepthBuffer",
TestResizeBuffersWithoutDepthBuffer);
RegisterTest("TestResizeBuffersWithDepthBuffer",
TestResizeBuffersWithDepthBuffer);
RegisterTest("Test_glTerminatePPAPI", Test_glTerminatePPAPI);
}
|
void SetupTests() {
RegisterTest("TestGraphics3DInterface", TestGraphics3DInterface);
RegisterTest("TestOpenGLES2Interface", TestOpenGLES2Interface);
RegisterTest("TestCreate", TestCreate);
RegisterTest("TestIsGraphics3D", TestIsGraphics3D);
RegisterTest("Test_glInitializePPAPI", Test_glInitializePPAPI);
RegisterTest("TestSwapBuffers", TestSwapBuffers);
RegisterTest("TestResizeBuffersWithoutDepthBuffer",
TestResizeBuffersWithoutDepthBuffer);
RegisterTest("TestResizeBuffersWithDepthBuffer",
TestResizeBuffersWithDepthBuffer);
RegisterTest("Test_glTerminatePPAPI", Test_glTerminatePPAPI);
}
|
C
|
Chrome
| 1 |
CVE-2017-5093
|
https://www.cvedetails.com/cve/CVE-2017-5093/
|
CWE-20
|
https://github.com/chromium/chromium/commit/0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
0720b02e4f303ea6b114d4ae9453e3a7ff55f8dc
|
If JavaScript shows a dialog, cause the page to lose fullscreen.
BUG=670135, 550017, 726761, 728276
Review-Url: https://codereview.chromium.org/2906133004
Cr-Commit-Position: refs/heads/master@{#478884}
|
void WebContentsImpl::CollapseSelection() {
RenderFrameHost* focused_frame = GetFocusedFrame();
if (!focused_frame)
return;
focused_frame->GetFrameInputHandler()->CollapseSelection();
}
|
void WebContentsImpl::CollapseSelection() {
RenderFrameHost* focused_frame = GetFocusedFrame();
if (!focused_frame)
return;
focused_frame->GetFrameInputHandler()->CollapseSelection();
}
|
C
|
Chrome
| 0 |
CVE-2018-16790
|
https://www.cvedetails.com/cve/CVE-2018-16790/
|
CWE-125
|
https://github.com/mongodb/mongo-c-driver/commit/0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
|
Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example.
|
bson_iter_overwrite_oid (bson_iter_t *iter, const bson_oid_t *value)
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_OID) {
memcpy (
(void *) (iter->raw + iter->d1), value->bytes, sizeof (value->bytes));
}
}
|
bson_iter_overwrite_oid (bson_iter_t *iter, const bson_oid_t *value)
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_OID) {
memcpy (
(void *) (iter->raw + iter->d1), value->bytes, sizeof (value->bytes));
}
}
|
C
|
mongo-c-driver
| 0 |
CVE-2018-18339
|
https://www.cvedetails.com/cve/CVE-2018-18339/
|
CWE-119
|
https://github.com/chromium/chromium/commit/e34e01b1b0987e418bc22e3ef1cf2e4ecaead264
|
e34e01b1b0987e418bc22e3ef1cf2e4ecaead264
|
[scheduler] Remove implicit fallthrough in switch
Bail out early when a condition in the switch is fulfilled.
This does not change behaviour due to RemoveTaskObserver being no-op when
the task observer is not present in the list.
[email protected]
Bug: 177475
Change-Id: Ibc7772c79f8a8c8a1d63a997dabe1efda5d3a7bd
Reviewed-on: https://chromium-review.googlesource.com/891187
Reviewed-by: Nico Weber <[email protected]>
Commit-Queue: Alexander Timin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#532649}
|
RendererSchedulerImpl::NewRenderWidgetSchedulingState() {
return render_widget_scheduler_signals_.NewRenderWidgetSchedulingState();
}
|
RendererSchedulerImpl::NewRenderWidgetSchedulingState() {
return render_widget_scheduler_signals_.NewRenderWidgetSchedulingState();
}
|
C
|
Chrome
| 0 |
CVE-2015-3842
|
https://www.cvedetails.com/cve/CVE-2015-3842/
|
CWE-119
|
https://android.googlesource.com/platform/frameworks/av/+/aeea52da00d210587fb3ed895de3d5f2e0264c88
|
aeea52da00d210587fb3ed895de3d5f2e0264c88
|
audio effects: fix heap overflow
Check consistency of effect command reply sizes before
copying to reply address.
Also add null pointer check on reply size.
Also remove unused parameter warning.
Bug: 21953516.
Change-Id: I4cf00c12eaed696af28f3b7613f7e36f47a160c4
(cherry picked from commit 0f714a464d2425afe00d6450535e763131b40844)
|
int Effect_Create(preproc_effect_t *effect,
preproc_session_t *session,
effect_handle_t *interface)
{
effect->session = session;
*interface = (effect_handle_t)&effect->itfe;
return Effect_SetState(effect, PREPROC_EFFECT_STATE_CREATED);
}
|
int Effect_Create(preproc_effect_t *effect,
preproc_session_t *session,
effect_handle_t *interface)
{
effect->session = session;
*interface = (effect_handle_t)&effect->itfe;
return Effect_SetState(effect, PREPROC_EFFECT_STATE_CREATED);
}
|
C
|
Android
| 0 |
CVE-2012-1601
|
https://www.cvedetails.com/cve/CVE-2012-1601/
|
CWE-399
|
https://github.com/torvalds/linux/commit/9c895160d25a76c21b65bad141b08e8d4f99afef
|
9c895160d25a76c21b65bad141b08e8d4f99afef
|
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
(cherry picked from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e)
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
static inline int kvm_tsc_changes_freq(void)
{
int cpu = get_cpu();
int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
cpufreq_quick_get(cpu) != 0;
put_cpu();
return ret;
}
|
static inline int kvm_tsc_changes_freq(void)
{
int cpu = get_cpu();
int ret = !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
cpufreq_quick_get(cpu) != 0;
put_cpu();
return ret;
}
|
C
|
linux
| 0 |
CVE-2016-6711
|
https://www.cvedetails.com/cve/CVE-2016-6711/
|
CWE-20
|
https://android.googlesource.com/platform/external/libvpx/+/063be1485e0099bc81ace3a08b0ec9186dcad693
|
063be1485e0099bc81ace3a08b0ec9186dcad693
|
DO NOT MERGE | libvpx: Cherry-pick 0f42d1f from upstream
Description from upstream:
vp8: fix decoder crash with invalid leading keyframes
decoding the same invalid keyframe twice would result in a crash as the
second time through the decoder would be assumed to have been
initialized as there was no resolution change. in this case the
resolution was itself invalid (0x6), but vp8_peek_si() was only failing
in the case of 0x0.
invalid-vp80-00-comprehensive-018.ivf.2kf_0x6.ivf tests this case by
duplicating the first keyframe and additionally adds a valid one to
ensure decoding can resume without error.
Bug: 30593765
Change-Id: I0de85f5a5eb5c0a5605230faf20c042b69aea507
(cherry picked from commit fc0466b695dce03e10390101844caa374848d903)
(cherry picked from commit 1114575245cb9d2f108749f916c76549524f5136)
|
static void yuvconfig2image(vpx_image_t *img,
const YV12_BUFFER_CONFIG *yv12,
void *user_priv)
{
/** vpx_img_wrap() doesn't allow specifying independent strides for
* the Y, U, and V planes, nor other alignment adjustments that
* might be representable by a YV12_BUFFER_CONFIG, so we just
* initialize all the fields.*/
img->fmt = VPX_IMG_FMT_I420;
img->w = yv12->y_stride;
img->h = (yv12->y_height + 2 * VP8BORDERINPIXELS + 15) & ~15;
img->d_w = yv12->y_width;
img->d_h = yv12->y_height;
img->x_chroma_shift = 1;
img->y_chroma_shift = 1;
img->planes[VPX_PLANE_Y] = yv12->y_buffer;
img->planes[VPX_PLANE_U] = yv12->u_buffer;
img->planes[VPX_PLANE_V] = yv12->v_buffer;
img->planes[VPX_PLANE_ALPHA] = NULL;
img->stride[VPX_PLANE_Y] = yv12->y_stride;
img->stride[VPX_PLANE_U] = yv12->uv_stride;
img->stride[VPX_PLANE_V] = yv12->uv_stride;
img->stride[VPX_PLANE_ALPHA] = yv12->y_stride;
img->bit_depth = 8;
img->bps = 12;
img->user_priv = user_priv;
img->img_data = yv12->buffer_alloc;
img->img_data_owner = 0;
img->self_allocd = 0;
}
|
static void yuvconfig2image(vpx_image_t *img,
const YV12_BUFFER_CONFIG *yv12,
void *user_priv)
{
/** vpx_img_wrap() doesn't allow specifying independent strides for
* the Y, U, and V planes, nor other alignment adjustments that
* might be representable by a YV12_BUFFER_CONFIG, so we just
* initialize all the fields.*/
img->fmt = VPX_IMG_FMT_I420;
img->w = yv12->y_stride;
img->h = (yv12->y_height + 2 * VP8BORDERINPIXELS + 15) & ~15;
img->d_w = yv12->y_width;
img->d_h = yv12->y_height;
img->x_chroma_shift = 1;
img->y_chroma_shift = 1;
img->planes[VPX_PLANE_Y] = yv12->y_buffer;
img->planes[VPX_PLANE_U] = yv12->u_buffer;
img->planes[VPX_PLANE_V] = yv12->v_buffer;
img->planes[VPX_PLANE_ALPHA] = NULL;
img->stride[VPX_PLANE_Y] = yv12->y_stride;
img->stride[VPX_PLANE_U] = yv12->uv_stride;
img->stride[VPX_PLANE_V] = yv12->uv_stride;
img->stride[VPX_PLANE_ALPHA] = yv12->y_stride;
img->bit_depth = 8;
img->bps = 12;
img->user_priv = user_priv;
img->img_data = yv12->buffer_alloc;
img->img_data_owner = 0;
img->self_allocd = 0;
}
|
C
|
Android
| 0 |
CVE-2016-10197
|
https://www.cvedetails.com/cve/CVE-2016-10197/
|
CWE-125
|
https://github.com/libevent/libevent/commit/ec65c42052d95d2c23d1d837136d1cf1d9ecef9e
|
ec65c42052d95d2c23d1d837136d1cf1d9ecef9e
|
evdns: fix searching empty hostnames
From #332:
Here follows a bug report by **Guido Vranken** via the _Tor bug bounty program_. Please credit Guido accordingly.
## Bug report
The DNS code of Libevent contains this rather obvious OOB read:
```c
static char *
search_make_new(const struct search_state *const state, int n, const char *const base_name) {
const size_t base_len = strlen(base_name);
const char need_to_append_dot = base_name[base_len - 1] == '.' ? 0 : 1;
```
If the length of ```base_name``` is 0, then line 3125 reads 1 byte before the buffer. This will trigger a crash on ASAN-protected builds.
To reproduce:
Build libevent with ASAN:
```
$ CFLAGS='-fomit-frame-pointer -fsanitize=address' ./configure && make -j4
```
Put the attached ```resolv.conf``` and ```poc.c``` in the source directory and then do:
```
$ gcc -fsanitize=address -fomit-frame-pointer poc.c .libs/libevent.a
$ ./a.out
=================================================================
==22201== ERROR: AddressSanitizer: heap-buffer-overflow on address 0x60060000efdf at pc 0x4429da bp 0x7ffe1ed47300 sp 0x7ffe1ed472f8
READ of size 1 at 0x60060000efdf thread T0
```
P.S. we can add a check earlier, but since this is very uncommon, I didn't add it.
Fixes: #332
|
evdns_getaddrinfo_gotresolve(int result, char type, int count,
int ttl, void *addresses, void *arg)
{
int i;
struct getaddrinfo_subrequest *req = arg;
struct getaddrinfo_subrequest *other_req;
struct evdns_getaddrinfo_request *data;
struct evutil_addrinfo *res;
struct sockaddr_in sin;
struct sockaddr_in6 sin6;
struct sockaddr *sa;
int socklen, addrlen;
void *addrp;
int err;
int user_canceled;
EVUTIL_ASSERT(req->type == DNS_IPv4_A || req->type == DNS_IPv6_AAAA);
if (req->type == DNS_IPv4_A) {
data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv4_request);
other_req = &data->ipv6_request;
} else {
data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv6_request);
other_req = &data->ipv4_request;
}
/** Called from evdns_base_free() with @fail_requests == 1 */
if (result != DNS_ERR_SHUTDOWN) {
EVDNS_LOCK(data->evdns_base);
if (evdns_result_is_answer(result)) {
if (req->type == DNS_IPv4_A)
++data->evdns_base->getaddrinfo_ipv4_answered;
else
++data->evdns_base->getaddrinfo_ipv6_answered;
}
user_canceled = data->user_canceled;
if (other_req->r == NULL)
data->request_done = 1;
EVDNS_UNLOCK(data->evdns_base);
} else {
data->evdns_base = NULL;
user_canceled = data->user_canceled;
}
req->r = NULL;
if (result == DNS_ERR_CANCEL && ! user_canceled) {
/* Internal cancel request from timeout or internal error.
* we already answered the user. */
if (other_req->r == NULL)
free_getaddrinfo_request(data);
return;
}
if (data->user_cb == NULL) {
/* We already answered. XXXX This shouldn't be needed; see
* comments in evdns_getaddrinfo_timeout_cb */
free_getaddrinfo_request(data);
return;
}
if (result == DNS_ERR_NONE) {
if (count == 0)
err = EVUTIL_EAI_NODATA;
else
err = 0;
} else {
err = evdns_err_to_getaddrinfo_err(result);
}
if (err) {
/* Looks like we got an error. */
if (other_req->r) {
/* The other request is still working; maybe it will
* succeed. */
/* XXXX handle failure from set_timeout */
if (result != DNS_ERR_SHUTDOWN) {
evdns_getaddrinfo_set_timeout(data->evdns_base, data);
}
data->pending_error = err;
return;
}
if (user_canceled) {
data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data);
} else if (data->pending_result) {
/* If we have an answer waiting, and we weren't
* canceled, ignore this error. */
add_cname_to_reply(data, data->pending_result);
data->user_cb(0, data->pending_result, data->user_data);
data->pending_result = NULL;
} else {
if (data->pending_error)
err = getaddrinfo_merge_err(err,
data->pending_error);
data->user_cb(err, NULL, data->user_data);
}
free_getaddrinfo_request(data);
return;
} else if (user_canceled) {
if (other_req->r) {
/* The other request is still working; let it hit this
* callback with EVUTIL_EAI_CANCEL callback and report
* the failure. */
return;
}
data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data);
free_getaddrinfo_request(data);
return;
}
/* Looks like we got some answers. We should turn them into addrinfos
* and then either queue those or return them all. */
EVUTIL_ASSERT(type == DNS_IPv4_A || type == DNS_IPv6_AAAA);
if (type == DNS_IPv4_A) {
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_port = htons(data->port);
sa = (struct sockaddr *)&sin;
socklen = sizeof(sin);
addrlen = 4;
addrp = &sin.sin_addr.s_addr;
} else {
memset(&sin6, 0, sizeof(sin6));
sin6.sin6_family = AF_INET6;
sin6.sin6_port = htons(data->port);
sa = (struct sockaddr *)&sin6;
socklen = sizeof(sin6);
addrlen = 16;
addrp = &sin6.sin6_addr.s6_addr;
}
res = NULL;
for (i=0; i < count; ++i) {
struct evutil_addrinfo *ai;
memcpy(addrp, ((char*)addresses)+i*addrlen, addrlen);
ai = evutil_new_addrinfo_(sa, socklen, &data->hints);
if (!ai) {
if (other_req->r) {
evdns_cancel_request(NULL, other_req->r);
}
data->user_cb(EVUTIL_EAI_MEMORY, NULL, data->user_data);
if (res)
evutil_freeaddrinfo(res);
if (other_req->r == NULL)
free_getaddrinfo_request(data);
return;
}
res = evutil_addrinfo_append_(res, ai);
}
if (other_req->r) {
/* The other request is still in progress; wait for it */
/* XXXX handle failure from set_timeout */
evdns_getaddrinfo_set_timeout(data->evdns_base, data);
data->pending_result = res;
return;
} else {
/* The other request is done or never started; append its
* results (if any) and return them. */
if (data->pending_result) {
if (req->type == DNS_IPv4_A)
res = evutil_addrinfo_append_(res,
data->pending_result);
else
res = evutil_addrinfo_append_(
data->pending_result, res);
data->pending_result = NULL;
}
/* Call the user callback. */
add_cname_to_reply(data, res);
data->user_cb(0, res, data->user_data);
/* Free data. */
free_getaddrinfo_request(data);
}
}
|
evdns_getaddrinfo_gotresolve(int result, char type, int count,
int ttl, void *addresses, void *arg)
{
int i;
struct getaddrinfo_subrequest *req = arg;
struct getaddrinfo_subrequest *other_req;
struct evdns_getaddrinfo_request *data;
struct evutil_addrinfo *res;
struct sockaddr_in sin;
struct sockaddr_in6 sin6;
struct sockaddr *sa;
int socklen, addrlen;
void *addrp;
int err;
int user_canceled;
EVUTIL_ASSERT(req->type == DNS_IPv4_A || req->type == DNS_IPv6_AAAA);
if (req->type == DNS_IPv4_A) {
data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv4_request);
other_req = &data->ipv6_request;
} else {
data = EVUTIL_UPCAST(req, struct evdns_getaddrinfo_request, ipv6_request);
other_req = &data->ipv4_request;
}
/** Called from evdns_base_free() with @fail_requests == 1 */
if (result != DNS_ERR_SHUTDOWN) {
EVDNS_LOCK(data->evdns_base);
if (evdns_result_is_answer(result)) {
if (req->type == DNS_IPv4_A)
++data->evdns_base->getaddrinfo_ipv4_answered;
else
++data->evdns_base->getaddrinfo_ipv6_answered;
}
user_canceled = data->user_canceled;
if (other_req->r == NULL)
data->request_done = 1;
EVDNS_UNLOCK(data->evdns_base);
} else {
data->evdns_base = NULL;
user_canceled = data->user_canceled;
}
req->r = NULL;
if (result == DNS_ERR_CANCEL && ! user_canceled) {
/* Internal cancel request from timeout or internal error.
* we already answered the user. */
if (other_req->r == NULL)
free_getaddrinfo_request(data);
return;
}
if (data->user_cb == NULL) {
/* We already answered. XXXX This shouldn't be needed; see
* comments in evdns_getaddrinfo_timeout_cb */
free_getaddrinfo_request(data);
return;
}
if (result == DNS_ERR_NONE) {
if (count == 0)
err = EVUTIL_EAI_NODATA;
else
err = 0;
} else {
err = evdns_err_to_getaddrinfo_err(result);
}
if (err) {
/* Looks like we got an error. */
if (other_req->r) {
/* The other request is still working; maybe it will
* succeed. */
/* XXXX handle failure from set_timeout */
if (result != DNS_ERR_SHUTDOWN) {
evdns_getaddrinfo_set_timeout(data->evdns_base, data);
}
data->pending_error = err;
return;
}
if (user_canceled) {
data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data);
} else if (data->pending_result) {
/* If we have an answer waiting, and we weren't
* canceled, ignore this error. */
add_cname_to_reply(data, data->pending_result);
data->user_cb(0, data->pending_result, data->user_data);
data->pending_result = NULL;
} else {
if (data->pending_error)
err = getaddrinfo_merge_err(err,
data->pending_error);
data->user_cb(err, NULL, data->user_data);
}
free_getaddrinfo_request(data);
return;
} else if (user_canceled) {
if (other_req->r) {
/* The other request is still working; let it hit this
* callback with EVUTIL_EAI_CANCEL callback and report
* the failure. */
return;
}
data->user_cb(EVUTIL_EAI_CANCEL, NULL, data->user_data);
free_getaddrinfo_request(data);
return;
}
/* Looks like we got some answers. We should turn them into addrinfos
* and then either queue those or return them all. */
EVUTIL_ASSERT(type == DNS_IPv4_A || type == DNS_IPv6_AAAA);
if (type == DNS_IPv4_A) {
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_port = htons(data->port);
sa = (struct sockaddr *)&sin;
socklen = sizeof(sin);
addrlen = 4;
addrp = &sin.sin_addr.s_addr;
} else {
memset(&sin6, 0, sizeof(sin6));
sin6.sin6_family = AF_INET6;
sin6.sin6_port = htons(data->port);
sa = (struct sockaddr *)&sin6;
socklen = sizeof(sin6);
addrlen = 16;
addrp = &sin6.sin6_addr.s6_addr;
}
res = NULL;
for (i=0; i < count; ++i) {
struct evutil_addrinfo *ai;
memcpy(addrp, ((char*)addresses)+i*addrlen, addrlen);
ai = evutil_new_addrinfo_(sa, socklen, &data->hints);
if (!ai) {
if (other_req->r) {
evdns_cancel_request(NULL, other_req->r);
}
data->user_cb(EVUTIL_EAI_MEMORY, NULL, data->user_data);
if (res)
evutil_freeaddrinfo(res);
if (other_req->r == NULL)
free_getaddrinfo_request(data);
return;
}
res = evutil_addrinfo_append_(res, ai);
}
if (other_req->r) {
/* The other request is still in progress; wait for it */
/* XXXX handle failure from set_timeout */
evdns_getaddrinfo_set_timeout(data->evdns_base, data);
data->pending_result = res;
return;
} else {
/* The other request is done or never started; append its
* results (if any) and return them. */
if (data->pending_result) {
if (req->type == DNS_IPv4_A)
res = evutil_addrinfo_append_(res,
data->pending_result);
else
res = evutil_addrinfo_append_(
data->pending_result, res);
data->pending_result = NULL;
}
/* Call the user callback. */
add_cname_to_reply(data, res);
data->user_cb(0, res, data->user_data);
/* Free data. */
free_getaddrinfo_request(data);
}
}
|
C
|
libevent
| 0 |
CVE-2016-7411
|
https://www.cvedetails.com/cve/CVE-2016-7411/
|
CWE-119
|
https://github.com/php/php-src/commit/6a7cc8ff85827fa9ac715b3a83c2d9147f33cd43?w=1
|
6a7cc8ff85827fa9ac715b3a83c2d9147f33cd43?w=1
|
Fix bug #73052 - Memory Corruption in During Deserialized-object Destruction
|
ZEND_API void zend_objects_proxy_free_storage(zend_proxy_object *object TSRMLS_DC)
{
zval_ptr_dtor(&object->object);
zval_ptr_dtor(&object->property);
efree(object);
}
|
ZEND_API void zend_objects_proxy_free_storage(zend_proxy_object *object TSRMLS_DC)
{
zval_ptr_dtor(&object->object);
zval_ptr_dtor(&object->property);
efree(object);
}
|
C
|
php-src
| 0 |
CVE-2013-4531
|
https://www.cvedetails.com/cve/CVE-2013-4531/
|
CWE-119
|
https://git.qemu.org/?p=qemu.git;a=commit;h=d2ef4b61fe6d33d2a5dcf100a9b9440de341ad62
|
d2ef4b61fe6d33d2a5dcf100a9b9440de341ad62
| null |
static int get_uint64_equal(QEMUFile *f, void *pv, size_t size)
{
uint64_t *v = pv;
uint64_t v2;
qemu_get_be64s(f, &v2);
if (*v == v2) {
return 0;
}
return -EINVAL;
}
|
static int get_uint64_equal(QEMUFile *f, void *pv, size_t size)
{
uint64_t *v = pv;
uint64_t v2;
qemu_get_be64s(f, &v2);
if (*v == v2) {
return 0;
}
return -EINVAL;
}
|
C
|
qemu
| 0 |
CVE-2018-12249
|
https://www.cvedetails.com/cve/CVE-2018-12249/
|
CWE-476
|
https://github.com/mruby/mruby/commit/faa4eaf6803bd11669bc324b4c34e7162286bfa3
|
faa4eaf6803bd11669bc324b4c34e7162286bfa3
|
`mrb_class_real()` did not work for `BasicObject`; fix #4037
|
check_cv_name_str(mrb_state *mrb, mrb_value str)
{
const char *s = RSTRING_PTR(str);
mrb_int len = RSTRING_LEN(str);
if (len < 3 || !(s[0] == '@' && s[1] == '@')) {
mrb_name_error(mrb, mrb_intern_str(mrb, str), "'%S' is not allowed as a class variable name", str);
}
}
|
check_cv_name_str(mrb_state *mrb, mrb_value str)
{
const char *s = RSTRING_PTR(str);
mrb_int len = RSTRING_LEN(str);
if (len < 3 || !(s[0] == '@' && s[1] == '@')) {
mrb_name_error(mrb, mrb_intern_str(mrb, str), "'%S' is not allowed as a class variable name", str);
}
}
|
C
|
mruby
| 0 |
CVE-2017-9798
|
https://www.cvedetails.com/cve/CVE-2017-9798/
|
CWE-416
|
https://github.com/apache/httpd/commit/29afdd2550b3d30a8defece2b95ae81edcf66ac9
|
29afdd2550b3d30a8defece2b95ae81edcf66ac9
|
core: Disallow Methods' registration at run time (.htaccess), they may be
used only if registered at init time (httpd.conf).
Calling ap_method_register() in children processes is not the right scope
since it won't be shared for all requests.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68
|
static const char *ifsection(cmd_parms *cmd, void *mconfig, const char *arg)
{
const char *errmsg;
const char *endp = ap_strrchr_c(arg, '>');
int old_overrides = cmd->override;
char *old_path = cmd->path;
core_dir_config *conf;
const command_rec *thiscmd = cmd->cmd;
ap_conf_vector_t *new_if_conf = ap_create_per_dir_config(cmd->pool);
const char *err = ap_check_cmd_context(cmd, NOT_IN_LIMIT);
const char *condition;
const char *expr_err;
if (err != NULL) {
return err;
}
if (endp == NULL) {
return unclosed_directive(cmd);
}
arg = apr_pstrndup(cmd->temp_pool, arg, endp - arg);
/*
* Set a dummy value so that other directives notice that they are inside
* a config section.
*/
cmd->path = "*If";
/* Only if not an .htaccess file */
if (!old_path) {
cmd->override = OR_ALL|ACCESS_CONF;
}
/* initialize our config and fetch it */
conf = ap_set_config_vectors(cmd->server, new_if_conf, cmd->path,
&core_module, cmd->pool);
if (cmd->cmd->cmd_data == COND_IF)
conf->condition_ifelse = AP_CONDITION_IF;
else if (cmd->cmd->cmd_data == COND_ELSEIF)
conf->condition_ifelse = AP_CONDITION_ELSEIF;
else if (cmd->cmd->cmd_data == COND_ELSE)
conf->condition_ifelse = AP_CONDITION_ELSE;
else
ap_assert(0);
if (conf->condition_ifelse == AP_CONDITION_ELSE) {
if (arg[0])
return "<Else> does not take an argument";
}
else {
if (!arg[0])
return missing_container_arg(cmd);
condition = ap_getword_conf(cmd->pool, &arg);
conf->condition = ap_expr_parse_cmd(cmd, condition, 0, &expr_err, NULL);
if (expr_err)
return apr_psprintf(cmd->pool, "Cannot parse condition clause: %s",
expr_err);
}
errmsg = ap_walk_config(cmd->directive->first_child, cmd, new_if_conf);
if (errmsg != NULL)
return errmsg;
conf->d = cmd->path;
conf->d_is_fnmatch = 0;
conf->r = NULL;
errmsg = ap_add_if_conf(cmd->pool, (core_dir_config *)mconfig, new_if_conf);
if (errmsg != NULL)
return errmsg;
if (*arg != '\0') {
return apr_pstrcat(cmd->pool, "Multiple ", thiscmd->name,
"> arguments not supported.", NULL);
}
cmd->path = old_path;
cmd->override = old_overrides;
return NULL;
}
|
static const char *ifsection(cmd_parms *cmd, void *mconfig, const char *arg)
{
const char *errmsg;
const char *endp = ap_strrchr_c(arg, '>');
int old_overrides = cmd->override;
char *old_path = cmd->path;
core_dir_config *conf;
const command_rec *thiscmd = cmd->cmd;
ap_conf_vector_t *new_if_conf = ap_create_per_dir_config(cmd->pool);
const char *err = ap_check_cmd_context(cmd, NOT_IN_LIMIT);
const char *condition;
const char *expr_err;
if (err != NULL) {
return err;
}
if (endp == NULL) {
return unclosed_directive(cmd);
}
arg = apr_pstrndup(cmd->temp_pool, arg, endp - arg);
/*
* Set a dummy value so that other directives notice that they are inside
* a config section.
*/
cmd->path = "*If";
/* Only if not an .htaccess file */
if (!old_path) {
cmd->override = OR_ALL|ACCESS_CONF;
}
/* initialize our config and fetch it */
conf = ap_set_config_vectors(cmd->server, new_if_conf, cmd->path,
&core_module, cmd->pool);
if (cmd->cmd->cmd_data == COND_IF)
conf->condition_ifelse = AP_CONDITION_IF;
else if (cmd->cmd->cmd_data == COND_ELSEIF)
conf->condition_ifelse = AP_CONDITION_ELSEIF;
else if (cmd->cmd->cmd_data == COND_ELSE)
conf->condition_ifelse = AP_CONDITION_ELSE;
else
ap_assert(0);
if (conf->condition_ifelse == AP_CONDITION_ELSE) {
if (arg[0])
return "<Else> does not take an argument";
}
else {
if (!arg[0])
return missing_container_arg(cmd);
condition = ap_getword_conf(cmd->pool, &arg);
conf->condition = ap_expr_parse_cmd(cmd, condition, 0, &expr_err, NULL);
if (expr_err)
return apr_psprintf(cmd->pool, "Cannot parse condition clause: %s",
expr_err);
}
errmsg = ap_walk_config(cmd->directive->first_child, cmd, new_if_conf);
if (errmsg != NULL)
return errmsg;
conf->d = cmd->path;
conf->d_is_fnmatch = 0;
conf->r = NULL;
errmsg = ap_add_if_conf(cmd->pool, (core_dir_config *)mconfig, new_if_conf);
if (errmsg != NULL)
return errmsg;
if (*arg != '\0') {
return apr_pstrcat(cmd->pool, "Multiple ", thiscmd->name,
"> arguments not supported.", NULL);
}
cmd->path = old_path;
cmd->override = old_overrides;
return NULL;
}
|
C
|
httpd
| 0 |
null | null | null |
https://github.com/chromium/chromium/commit/d30a8bd191f17b61938fc87890bffc80049b0774
|
d30a8bd191f17b61938fc87890bffc80049b0774
|
[Extensions] Rework inline installation observation
Instead of observing through the WebstoreAPI, observe directly in the TabHelper.
This is a great deal less code, more direct, and also fixes a lifetime issue
with the TabHelper being deleted before the inline installation completes.
BUG=613949
Review-Url: https://codereview.chromium.org/2103663002
Cr-Commit-Position: refs/heads/master@{#403188}
|
WebstoreAPI::ObservedInstallInfo::ObservedInstallInfo(
int routing_id,
const std::string& extension_id,
IPC::Sender* ipc_sender)
: routing_id(routing_id),
extension_id(extension_id),
ipc_sender(ipc_sender) {}
|
WebstoreAPI::ObservedInstallInfo::ObservedInstallInfo(
int routing_id,
const std::string& extension_id,
IPC::Sender* ipc_sender)
: routing_id(routing_id),
extension_id(extension_id),
ipc_sender(ipc_sender) {}
|
C
|
Chrome
| 0 |
CVE-2016-9806
|
https://www.cvedetails.com/cve/CVE-2016-9806/
|
CWE-415
|
https://github.com/torvalds/linux/commit/92964c79b357efd980812c4de5c1fd2ec8bb5520
|
92964c79b357efd980812c4de5c1fd2ec8bb5520
|
netlink: Fix dump skb leak/double free
When we free cb->skb after a dump, we do it after releasing the
lock. This means that a new dump could have started in the time
being and we'll end up freeing their skb instead of ours.
This patch saves the skb and module before we unlock so we free
the right memory.
Fixes: 16b304f3404f ("netlink: Eliminate kmalloc in netlink dump operation.")
Reported-by: Baozeng Ding <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Acked-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int netlink_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
int err = 0;
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
if (protocol < 0 || protocol >= MAX_LINKS)
return -EPROTONOSUPPORT;
netlink_lock_table();
#ifdef CONFIG_MODULES
if (!nl_table[protocol].registered) {
netlink_unlock_table();
request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
netlink_lock_table();
}
#endif
if (nl_table[protocol].registered &&
try_module_get(nl_table[protocol].module))
module = nl_table[protocol].module;
else
err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
bind = nl_table[protocol].bind;
unbind = nl_table[protocol].unbind;
netlink_unlock_table();
if (err < 0)
goto out;
err = __netlink_create(net, sock, cb_mutex, protocol, kern);
if (err < 0)
goto out_module;
local_bh_disable();
sock_prot_inuse_add(net, &netlink_proto, 1);
local_bh_enable();
nlk = nlk_sk(sock->sk);
nlk->module = module;
nlk->netlink_bind = bind;
nlk->netlink_unbind = unbind;
out:
return err;
out_module:
module_put(module);
goto out;
}
|
static int netlink_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
int err = 0;
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
if (protocol < 0 || protocol >= MAX_LINKS)
return -EPROTONOSUPPORT;
netlink_lock_table();
#ifdef CONFIG_MODULES
if (!nl_table[protocol].registered) {
netlink_unlock_table();
request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
netlink_lock_table();
}
#endif
if (nl_table[protocol].registered &&
try_module_get(nl_table[protocol].module))
module = nl_table[protocol].module;
else
err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
bind = nl_table[protocol].bind;
unbind = nl_table[protocol].unbind;
netlink_unlock_table();
if (err < 0)
goto out;
err = __netlink_create(net, sock, cb_mutex, protocol, kern);
if (err < 0)
goto out_module;
local_bh_disable();
sock_prot_inuse_add(net, &netlink_proto, 1);
local_bh_enable();
nlk = nlk_sk(sock->sk);
nlk->module = module;
nlk->netlink_bind = bind;
nlk->netlink_unbind = unbind;
out:
return err;
out_module:
module_put(module);
goto out;
}
|
C
|
linux
| 0 |
CVE-2016-5199
|
https://www.cvedetails.com/cve/CVE-2016-5199/
|
CWE-119
|
https://github.com/chromium/chromium/commit/c995d4fe5e96f4d6d4a88b7867279b08e72d2579
|
c995d4fe5e96f4d6d4a88b7867279b08e72d2579
|
Move IsDataSaverEnabledByUser to be a static method and use it
This method now officially becomes the source of truth that
everything in the code base eventually calls into to determine whether
or not DataSaver is enabled.
Bug: 934399
Change-Id: Iae837b710ace8cc3101188f79d02cbc2d4f0fd93
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1537242
Reviewed-by: Joshua Pawlicki <[email protected]>
Reviewed-by: Tarun Bansal <[email protected]>
Commit-Queue: Robert Ogden <[email protected]>
Cr-Commit-Position: refs/heads/master@{#643948}
|
void DataReductionProxySettings::InitPrefMembers() {
|
void DataReductionProxySettings::InitPrefMembers() {
DCHECK(thread_checker_.CalledOnValidThread());
spdy_proxy_auth_enabled_.Init(
prefs::kDataSaverEnabled, GetOriginalProfilePrefs(),
base::Bind(&DataReductionProxySettings::OnProxyEnabledPrefChange,
base::Unretained(this)));
}
|
C
|
Chrome
| 1 |
null | null | null |
https://github.com/chromium/chromium/commit/8353baf8d1504dbdd4ad7584ff2466de657521cd
|
8353baf8d1504dbdd4ad7584ff2466de657521cd
|
Remove WebFrame::canHaveSecureChild
To simplify the public API, ServiceWorkerNetworkProvider can do the
parent walk itself.
Follow-up to https://crrev.com/ad1850962644e19.
BUG=607543
Review-Url: https://codereview.chromium.org/2082493002
Cr-Commit-Position: refs/heads/master@{#400896}
|
WebInsecureRequestPolicy WebFrame::getInsecureRequestPolicy() const
{
return toImplBase()->frame()->securityContext()->getInsecureRequestPolicy();
}
|
WebInsecureRequestPolicy WebFrame::getInsecureRequestPolicy() const
{
return toImplBase()->frame()->securityContext()->getInsecureRequestPolicy();
}
|
C
|
Chrome
| 0 |
CVE-2016-5185
|
https://www.cvedetails.com/cve/CVE-2016-5185/
|
CWE-416
|
https://github.com/chromium/chromium/commit/f2d26633cbd50735ac2af30436888b71ac0abad3
|
f2d26633cbd50735ac2af30436888b71ac0abad3
|
[Autofill] Remove AutofillPopupViewViews and associated feature.
Bug: 906135,831603
Change-Id: I3c982f8b3ffb4928c7c878e74e10113999106499
Reviewed-on: https://chromium-review.googlesource.com/c/1387124
Reviewed-by: Robert Kaplow <[email protected]>
Reviewed-by: Vasilii Sukhanov <[email protected]>
Reviewed-by: Fabio Tirelo <[email protected]>
Reviewed-by: Tommy Martino <[email protected]>
Commit-Queue: Mathieu Perreault <[email protected]>
Cr-Commit-Position: refs/heads/master@{#621360}
|
AutofillPopupFooterView* AutofillPopupFooterView::Create(
AutofillPopupViewNativeViews* popup_view,
int line_number,
int frontend_id) {
AutofillPopupFooterView* result =
new AutofillPopupFooterView(popup_view, line_number, frontend_id);
result->Init();
return result;
}
|
AutofillPopupFooterView* AutofillPopupFooterView::Create(
AutofillPopupViewNativeViews* popup_view,
int line_number,
int frontend_id) {
AutofillPopupFooterView* result =
new AutofillPopupFooterView(popup_view, line_number, frontend_id);
result->Init();
return result;
}
|
C
|
Chrome
| 0 |
CVE-2016-1675
|
https://www.cvedetails.com/cve/CVE-2016-1675/
|
CWE-284
|
https://github.com/chromium/chromium/commit/b276d0570cc816bfe25b431f2ee9bc265a6ad478
|
b276d0570cc816bfe25b431f2ee9bc265a6ad478
|
Fix one implicit 64-bit -> 32-bit implicit conversion in a PPAPI test.
../../ppapi/tests/test_url_loader.cc:877:11: warning: implicit conversion loses integer precision: 'int64_t' (aka 'long long') to 'int32_t' (aka 'int') [-Wshorten-64-to-32]
total_bytes_to_be_received);
^~~~~~~~~~~~~~~~~~~~~~~~~~
BUG=879657
Change-Id: I152f456368131fe7a2891ff0c97bf83f26ef0906
Reviewed-on: https://chromium-review.googlesource.com/c/1220173
Commit-Queue: Raymes Khoury <[email protected]>
Reviewed-by: Raymes Khoury <[email protected]>
Cr-Commit-Position: refs/heads/master@{#600182}
|
std::string TestURLLoader::TestPrefetchBufferThreshold() {
int32_t rv = OpenWithPrefetchBufferThreshold(-1, 1);
if (rv != PP_ERROR_FAILED) {
return ReportError("The prefetch limits contained a negative value but "
"the URLLoader did not fail.", rv);
}
rv = OpenWithPrefetchBufferThreshold(0, 1);
if (rv != PP_OK) {
return ReportError("The prefetch buffer limits were legal values but "
"the URLLoader failed.", rv);
}
rv = OpenWithPrefetchBufferThreshold(1000, 1);
if (rv != PP_ERROR_FAILED) {
return ReportError("The lower buffer value was higher than the upper but "
"the URLLoader did not fail.", rv);
}
PASS();
}
|
std::string TestURLLoader::TestPrefetchBufferThreshold() {
int32_t rv = OpenWithPrefetchBufferThreshold(-1, 1);
if (rv != PP_ERROR_FAILED) {
return ReportError("The prefetch limits contained a negative value but "
"the URLLoader did not fail.", rv);
}
rv = OpenWithPrefetchBufferThreshold(0, 1);
if (rv != PP_OK) {
return ReportError("The prefetch buffer limits were legal values but "
"the URLLoader failed.", rv);
}
rv = OpenWithPrefetchBufferThreshold(1000, 1);
if (rv != PP_ERROR_FAILED) {
return ReportError("The lower buffer value was higher than the upper but "
"the URLLoader did not fail.", rv);
}
PASS();
}
|
C
|
Chrome
| 0 |
CVE-2016-5185
|
https://www.cvedetails.com/cve/CVE-2016-5185/
|
CWE-416
|
https://github.com/chromium/chromium/commit/f2d26633cbd50735ac2af30436888b71ac0abad3
|
f2d26633cbd50735ac2af30436888b71ac0abad3
|
[Autofill] Remove AutofillPopupViewViews and associated feature.
Bug: 906135,831603
Change-Id: I3c982f8b3ffb4928c7c878e74e10113999106499
Reviewed-on: https://chromium-review.googlesource.com/c/1387124
Reviewed-by: Robert Kaplow <[email protected]>
Reviewed-by: Vasilii Sukhanov <[email protected]>
Reviewed-by: Fabio Tirelo <[email protected]>
Reviewed-by: Tommy Martino <[email protected]>
Commit-Queue: Mathieu Perreault <[email protected]>
Cr-Commit-Position: refs/heads/master@{#621360}
|
bool SkipConditionalFeatureEntry(const FeatureEntry& entry) {
version_info::Channel channel = chrome::GetChannel();
#if defined(OS_CHROMEOS)
if (!strcmp("mash", entry.internal_name) &&
channel == version_info::Channel::STABLE) {
return true;
}
if (!strcmp(ui_devtools::switches::kEnableUiDevTools, entry.internal_name) &&
channel == version_info::Channel::STABLE) {
return true;
}
if (!strcmp("enable-experimental-crostini-ui", entry.internal_name) &&
!base::FeatureList::IsEnabled(features::kCrostini)) {
return true;
}
#endif // defined(OS_CHROMEOS)
if ((!strcmp("data-reduction-proxy-lo-fi", entry.internal_name) ||
!strcmp("enable-data-reduction-proxy-lite-page", entry.internal_name)) &&
channel != version_info::Channel::BETA &&
channel != version_info::Channel::DEV &&
channel != version_info::Channel::CANARY &&
channel != version_info::Channel::UNKNOWN) {
return true;
}
#if defined(OS_WIN)
if (!strcmp("enable-hdr", entry.internal_name) &&
base::win::GetVersion() < base::win::Version::VERSION_WIN10) {
return true;
}
#endif // OS_WIN
return false;
}
|
bool SkipConditionalFeatureEntry(const FeatureEntry& entry) {
version_info::Channel channel = chrome::GetChannel();
#if defined(OS_CHROMEOS)
if (!strcmp("mash", entry.internal_name) &&
channel == version_info::Channel::STABLE) {
return true;
}
if (!strcmp(ui_devtools::switches::kEnableUiDevTools, entry.internal_name) &&
channel == version_info::Channel::STABLE) {
return true;
}
if (!strcmp("enable-experimental-crostini-ui", entry.internal_name) &&
!base::FeatureList::IsEnabled(features::kCrostini)) {
return true;
}
#endif // defined(OS_CHROMEOS)
if ((!strcmp("data-reduction-proxy-lo-fi", entry.internal_name) ||
!strcmp("enable-data-reduction-proxy-lite-page", entry.internal_name)) &&
channel != version_info::Channel::BETA &&
channel != version_info::Channel::DEV &&
channel != version_info::Channel::CANARY &&
channel != version_info::Channel::UNKNOWN) {
return true;
}
#if defined(OS_WIN)
if (!strcmp("enable-hdr", entry.internal_name) &&
base::win::GetVersion() < base::win::Version::VERSION_WIN10) {
return true;
}
#endif // OS_WIN
return false;
}
|
C
|
Chrome
| 0 |
CVE-2018-13006
|
https://www.cvedetails.com/cve/CVE-2018-13006/
|
CWE-125
|
https://github.com/gpac/gpac/commit/bceb03fd2be95097a7b409ea59914f332fb6bc86
|
bceb03fd2be95097a7b409ea59914f332fb6bc86
|
fixed 2 possible heap overflows (inc. #1088)
|
GF_Box *ssix_New()
{
ISOM_DECL_BOX_ALLOC(GF_SubsegmentIndexBox, GF_ISOM_BOX_TYPE_SSIX);
return (GF_Box *)tmp;
}
|
GF_Box *ssix_New()
{
ISOM_DECL_BOX_ALLOC(GF_SubsegmentIndexBox, GF_ISOM_BOX_TYPE_SSIX);
return (GF_Box *)tmp;
}
|
C
|
gpac
| 0 |
CVE-2015-1213
|
https://www.cvedetails.com/cve/CVE-2015-1213/
|
CWE-119
|
https://github.com/chromium/chromium/commit/faaa2fd0a05f1622d9a8806da118d4f3b602e707
|
faaa2fd0a05f1622d9a8806da118d4f3b602e707
|
[Blink>Media] Allow autoplay muted on Android by default
There was a mistake causing autoplay muted is shipped on Android
but it will be disabled if the chromium embedder doesn't specify
content setting for "AllowAutoplay" preference. This CL makes the
AllowAutoplay preference true by default so that it is allowed by
embedders (including AndroidWebView) unless they explicitly
disable it.
Intent to ship:
https://groups.google.com/a/chromium.org/d/msg/blink-dev/Q1cnzNI2GpI/AL_eyUNABgAJ
BUG=689018
Review-Url: https://codereview.chromium.org/2677173002
Cr-Commit-Position: refs/heads/master@{#448423}
|
bool HTMLMediaElement::shouldAutoplay() {
if (document().isSandboxed(SandboxAutomaticFeatures))
return false;
return m_autoplaying && m_paused && autoplay();
}
|
bool HTMLMediaElement::shouldAutoplay() {
if (document().isSandboxed(SandboxAutomaticFeatures))
return false;
return m_autoplaying && m_paused && autoplay();
}
|
C
|
Chrome
| 0 |
CVE-2018-6042
|
https://www.cvedetails.com/cve/CVE-2018-6042/
|
CWE-20
|
https://github.com/chromium/chromium/commit/b3f0207c14fccc11aaa9d4975ebe46554ad289cb
|
b3f0207c14fccc11aaa9d4975ebe46554ad289cb
|
Add a few more confusable map entries
1. Map Malaylam U+0D1F to 's'.
2. Map 'small-cap-like' Cyrillic letters to "look-alike" Latin lowercase
letters.
The characters in new confusable map entries are replaced by their Latin
"look-alike" characters before the skeleton is calculated to compare with
top domain names.
Bug: 784761,773930
Test: components_unittests --gtest_filter=*IDNToUni*
Change-Id: Ib26664e21ac5eb290e4a2993b01cbf0edaade0ee
Reviewed-on: https://chromium-review.googlesource.com/805214
Reviewed-by: Peter Kasting <[email protected]>
Commit-Queue: Jungshik Shin <[email protected]>
Cr-Commit-Position: refs/heads/master@{#521648}
|
bool IDNSpoofChecker::IsMadeOfLatinAlikeCyrillic(
const icu::UnicodeString& label) {
icu::UnicodeSet cyrillic_in_label;
icu::StringCharacterIterator it(label);
for (it.setToStart(); it.hasNext();) {
const UChar32 c = it.next32PostInc();
if (cyrillic_letters_.contains(c))
cyrillic_in_label.add(c);
}
return !cyrillic_in_label.isEmpty() &&
cyrillic_letters_latin_alike_.containsAll(cyrillic_in_label);
}
|
bool IDNSpoofChecker::IsMadeOfLatinAlikeCyrillic(
const icu::UnicodeString& label) {
icu::UnicodeSet cyrillic_in_label;
icu::StringCharacterIterator it(label);
for (it.setToStart(); it.hasNext();) {
const UChar32 c = it.next32PostInc();
if (cyrillic_letters_.contains(c))
cyrillic_in_label.add(c);
}
return !cyrillic_in_label.isEmpty() &&
cyrillic_letters_latin_alike_.containsAll(cyrillic_in_label);
}
|
C
|
Chrome
| 0 |
CVE-2015-1335
|
https://www.cvedetails.com/cve/CVE-2015-1335/
|
CWE-59
|
https://github.com/lxc/lxc/commit/592fd47a6245508b79fe6ac819fe6d3b2c1289be
|
592fd47a6245508b79fe6ac819fe6d3b2c1289be
|
CVE-2015-1335: Protect container mounts against symlinks
When a container starts up, lxc sets up the container's inital fstree
by doing a bunch of mounting, guided by the container configuration
file. The container config is owned by the admin or user on the host,
so we do not try to guard against bad entries. However, since the
mount target is in the container, it's possible that the container admin
could divert the mount with symbolic links. This could bypass proper
container startup (i.e. confinement of a root-owned container by the
restrictive apparmor policy, by diverting the required write to
/proc/self/attr/current), or bypass the (path-based) apparmor policy
by diverting, say, /proc to /mnt in the container.
To prevent this,
1. do not allow mounts to paths containing symbolic links
2. do not allow bind mounts from relative paths containing symbolic
links.
Details:
Define safe_mount which ensures that the container has not inserted any
symbolic links into any mount targets for mounts to be done during
container setup.
The host's mount path may contain symbolic links. As it is under the
control of the administrator, that's ok. So safe_mount begins the check
for symbolic links after the rootfs->mount, by opening that directory.
It opens each directory along the path using openat() relative to the
parent directory using O_NOFOLLOW. When the target is reached, it
mounts onto /proc/self/fd/<targetfd>.
Use safe_mount() in mount_entry(), when mounting container proc,
and when needed. In particular, safe_mount() need not be used in
any case where:
1. the mount is done in the container's namespace
2. the mount is for the container's rootfs
3. the mount is relative to a tmpfs or proc/sysfs which we have
just safe_mount()ed ourselves
Since we were using proc/net as a temporary placeholder for /proc/sys/net
during container startup, and proc/net is a symbolic link, use proc/tty
instead.
Update the lxc.container.conf manpage with details about the new
restrictions.
Finally, add a testcase to test some symbolic link possibilities.
Reported-by: Roman Fiedler
Signed-off-by: Serge Hallyn <[email protected]>
Acked-by: Stéphane Graber <[email protected]>
|
static int mount_file_entries(const struct lxc_rootfs *rootfs, FILE *file,
const char *lxc_name)
{
struct mntent mntent;
char buf[4096];
int ret = -1;
while (getmntent_r(file, &mntent, buf, sizeof(buf))) {
if (!rootfs->path) {
if (mount_entry_on_systemfs(&mntent))
goto out;
continue;
}
/* We have a separate root, mounts are relative to it */
if (mntent.mnt_dir[0] != '/') {
if (mount_entry_on_relative_rootfs(&mntent,
rootfs->mount))
goto out;
continue;
}
if (mount_entry_on_absolute_rootfs(&mntent, rootfs, lxc_name))
goto out;
}
ret = 0;
INFO("mount points have been setup");
out:
return ret;
}
|
static int mount_file_entries(const struct lxc_rootfs *rootfs, FILE *file,
const char *lxc_name)
{
struct mntent mntent;
char buf[4096];
int ret = -1;
while (getmntent_r(file, &mntent, buf, sizeof(buf))) {
if (!rootfs->path) {
if (mount_entry_on_systemfs(&mntent))
goto out;
continue;
}
/* We have a separate root, mounts are relative to it */
if (mntent.mnt_dir[0] != '/') {
if (mount_entry_on_relative_rootfs(&mntent,
rootfs->mount))
goto out;
continue;
}
if (mount_entry_on_absolute_rootfs(&mntent, rootfs, lxc_name))
goto out;
}
ret = 0;
INFO("mount points have been setup");
out:
return ret;
}
|
C
|
lxc
| 0 |
CVE-2012-3412
|
https://www.cvedetails.com/cve/CVE-2012-3412/
|
CWE-189
|
https://github.com/torvalds/linux/commit/68cb695ccecf949d48949e72f8ce591fdaaa325c
|
68cb695ccecf949d48949e72f8ce591fdaaa325c
|
sfc: Fix maximum number of TSO segments and minimum TX queue size
[ Upstream commit 7e6d06f0de3f74ca929441add094518ae332257c ]
Currently an skb requiring TSO may not fit within a minimum-size TX
queue. The TX queue selected for the skb may stall and trigger the TX
watchdog repeatedly (since the problem skb will be retried after the
TX reset). This issue is designated as CVE-2012-3412.
Set the maximum number of TSO segments for our devices to 100. This
should make no difference to behaviour unless the actual MSS is less
than about 700. Increase the minimum TX queue size accordingly to
allow for 2 worst-case skbs, so that there will definitely be space
to add an skb after we wake a queue.
To avoid invalidating existing configurations, change
efx_ethtool_set_ringparam() to fix up values that are too small rather
than returning -EINVAL.
Signed-off-by: Ben Hutchings <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
Signed-off-by: Ben Hutchings <[email protected]>
|
static void efx_stop_channel(struct efx_channel *channel)
{
if (!channel->enabled)
return;
netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
"stop chan %d\n", channel->channel);
channel->enabled = false;
napi_disable(&channel->napi_str);
}
|
static void efx_stop_channel(struct efx_channel *channel)
{
if (!channel->enabled)
return;
netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
"stop chan %d\n", channel->channel);
channel->enabled = false;
napi_disable(&channel->napi_str);
}
|
C
|
linux
| 0 |
CVE-2013-0886
|
https://www.cvedetails.com/cve/CVE-2013-0886/
| null |
https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76
|
18d67244984a574ba2dd8779faabc0e3e34f4b76
|
Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
[email protected]
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
|
gfx::NativeView RenderWidgetHostViewGtk::GetNativeView() const {
return view_.get();
}
|
gfx::NativeView RenderWidgetHostViewGtk::GetNativeView() const {
return view_.get();
}
|
C
|
Chrome
| 0 |
CVE-2016-3835
|
https://www.cvedetails.com/cve/CVE-2016-3835/
|
CWE-200
|
https://android.googlesource.com/platform/hardware/qcom/media/+/7558d03e6498e970b761aa44fff6b2c659202d95
|
7558d03e6498e970b761aa44fff6b2c659202d95
|
DO NOT MERGE mm-video-v4l2: venc: add checks before accessing heap pointers
Heap pointers do not point to user virtual addresses in case
of secure session.
Set them to NULL and add checks to avoid accesing them
Bug: 28815329
Bug: 28920116
Change-Id: I94fd5808e753b58654d65e175d3857ef46ffba26
|
OMX_ERRORTYPE omx_video::get_extension_index(OMX_IN OMX_HANDLETYPE hComp,
OMX_IN OMX_STRING paramName,
OMX_OUT OMX_INDEXTYPE* indexType)
{
(void)hComp;
if (m_state == OMX_StateInvalid) {
DEBUG_PRINT_ERROR("ERROR: Get Extension Index in Invalid State");
return OMX_ErrorInvalidState;
}
#ifdef MAX_RES_1080P
if (!strncmp(paramName, "OMX.QCOM.index.param.SliceDeliveryMode",
sizeof("OMX.QCOM.index.param.SliceDeliveryMode") - 1)) {
*indexType = (OMX_INDEXTYPE)OMX_QcomIndexEnableSliceDeliveryMode;
return OMX_ErrorNone;
}
#endif
#ifdef _ANDROID_ICS_
if (!strncmp(paramName, "OMX.google.android.index.storeMetaDataInBuffers",
sizeof("OMX.google.android.index.storeMetaDataInBuffers") - 1)) {
*indexType = (OMX_INDEXTYPE)OMX_QcomIndexParamVideoMetaBufferMode;
return OMX_ErrorNone;
}
#endif
if (!strncmp(paramName, "OMX.google.android.index.prependSPSPPSToIDRFrames",
sizeof("OMX.google.android.index.prependSPSPPSToIDRFrames") - 1)) {
*indexType = (OMX_INDEXTYPE)OMX_QcomIndexParamSequenceHeaderWithIDR;
return OMX_ErrorNone;
}
return OMX_ErrorNotImplemented;
}
|
OMX_ERRORTYPE omx_video::get_extension_index(OMX_IN OMX_HANDLETYPE hComp,
OMX_IN OMX_STRING paramName,
OMX_OUT OMX_INDEXTYPE* indexType)
{
(void)hComp;
if (m_state == OMX_StateInvalid) {
DEBUG_PRINT_ERROR("ERROR: Get Extension Index in Invalid State");
return OMX_ErrorInvalidState;
}
#ifdef MAX_RES_1080P
if (!strncmp(paramName, "OMX.QCOM.index.param.SliceDeliveryMode",
sizeof("OMX.QCOM.index.param.SliceDeliveryMode") - 1)) {
*indexType = (OMX_INDEXTYPE)OMX_QcomIndexEnableSliceDeliveryMode;
return OMX_ErrorNone;
}
#endif
#ifdef _ANDROID_ICS_
if (!strncmp(paramName, "OMX.google.android.index.storeMetaDataInBuffers",
sizeof("OMX.google.android.index.storeMetaDataInBuffers") - 1)) {
*indexType = (OMX_INDEXTYPE)OMX_QcomIndexParamVideoMetaBufferMode;
return OMX_ErrorNone;
}
#endif
if (!strncmp(paramName, "OMX.google.android.index.prependSPSPPSToIDRFrames",
sizeof("OMX.google.android.index.prependSPSPPSToIDRFrames") - 1)) {
*indexType = (OMX_INDEXTYPE)OMX_QcomIndexParamSequenceHeaderWithIDR;
return OMX_ErrorNone;
}
return OMX_ErrorNotImplemented;
}
|
C
|
Android
| 0 |
CVE-2016-9084
|
https://www.cvedetails.com/cve/CVE-2016-9084/
|
CWE-190
|
https://github.com/torvalds/linux/commit/05692d7005a364add85c6e25a6c4447ce08f913a
|
05692d7005a364add85c6e25a6c4447ce08f913a
|
vfio/pci: Fix integer overflows, bitmask check
The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize
user-supplied integers, potentially allowing memory corruption. This
patch adds appropriate integer overflow checks, checks the range bounds
for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element
in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set.
VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in
vfio_pci_set_irqs_ioctl().
Furthermore, a kzalloc is changed to a kcalloc because the use of a
kzalloc with an integer multiplication allowed an integer overflow
condition to be reached without this patch. kcalloc checks for overflow
and should prevent a similar occurrence.
Signed-off-by: Vlad Tsyrklevich <[email protected]>
Signed-off-by: Alex Williamson <[email protected]>
|
static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
return 1;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msi_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags);
return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
}
} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msix_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSIX_FLAGS, &flags);
return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
}
} else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
if (pci_is_pcie(vdev->pdev))
return 1;
} else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
return 1;
}
return 0;
}
|
static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
{
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
return 1;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msi_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags);
return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
}
} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
u8 pos;
u16 flags;
pos = vdev->pdev->msix_cap;
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSIX_FLAGS, &flags);
return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
}
} else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
if (pci_is_pcie(vdev->pdev))
return 1;
} else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
return 1;
}
return 0;
}
|
C
|
linux
| 0 |
CVE-2019-5826
| null | null |
https://github.com/chromium/chromium/commit/eaf2e8bce3855d362e53034bd83f0e3aff8714e4
|
eaf2e8bce3855d362e53034bd83f0e3aff8714e4
|
[IndexedDB] Fixed force close during pending connection open
During a force close of the database, the connections to that database
are iterated and force closed. The iteration method was not safe to
modification, and if there was a pending connection waiting to open,
that request would execute once all the other connections were
destroyed and create a new connection.
This change changes the iteration method to account for new connections
that are added during the iteration.
[email protected]
Bug: 941746
Change-Id: If1b3137237dc2920ad369d6ac99c963ed9c57d0c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1522330
Commit-Queue: Daniel Murphy <[email protected]>
Reviewed-by: Chase Phillips <[email protected]>
Cr-Commit-Position: refs/heads/master@{#640604}
|
IndexedDBDatabaseOperationAbortTest() {
commit_success_ = leveldb::Status::NotFound("Bummer.");
}
|
IndexedDBDatabaseOperationAbortTest() {
commit_success_ = leveldb::Status::NotFound("Bummer.");
}
|
C
|
Chrome
| 0 |
CVE-2011-4112
|
https://www.cvedetails.com/cve/CVE-2011-4112/
|
CWE-264
|
https://github.com/torvalds/linux/commit/550fd08c2cebad61c548def135f67aba284c6162
|
550fd08c2cebad61c548def135f67aba284c6162
|
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int get_wep_tx_idx(struct airo_info *ai)
{
WepKeyRid wkr;
int rc;
__le16 lastindex;
rc = readWepKeyRid(ai, &wkr, 1, 1);
if (rc != SUCCESS)
return -1;
do {
lastindex = wkr.kindex;
if (wkr.kindex == cpu_to_le16(0xffff))
return wkr.mac[0];
rc = readWepKeyRid(ai, &wkr, 0, 1);
if (rc != SUCCESS)
return -1;
} while (lastindex != wkr.kindex);
return -1;
}
|
static int get_wep_tx_idx(struct airo_info *ai)
{
WepKeyRid wkr;
int rc;
__le16 lastindex;
rc = readWepKeyRid(ai, &wkr, 1, 1);
if (rc != SUCCESS)
return -1;
do {
lastindex = wkr.kindex;
if (wkr.kindex == cpu_to_le16(0xffff))
return wkr.mac[0];
rc = readWepKeyRid(ai, &wkr, 0, 1);
if (rc != SUCCESS)
return -1;
} while (lastindex != wkr.kindex);
return -1;
}
|
C
|
linux
| 0 |
CVE-2017-11450
|
https://www.cvedetails.com/cve/CVE-2017-11450/
|
CWE-20
|
https://github.com/ImageMagick/ImageMagick/commit/948356eec65aea91995d4b7cc487d197d2c5f602
|
948356eec65aea91995d4b7cc487d197d2c5f602
|
...
|
static void WriteProfile(j_compress_ptr jpeg_info,Image *image)
{
const char
*name;
const StringInfo
*profile;
MagickBooleanType
iptc;
register ssize_t
i;
size_t
length,
tag_length;
StringInfo
*custom_profile;
/*
Save image profile as a APP marker.
*/
iptc=MagickFalse;
custom_profile=AcquireStringInfo(65535L);
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
register unsigned char
*p;
profile=GetImageProfile(image,name);
p=GetStringInfoDatum(custom_profile);
if (LocaleCompare(name,"EXIF") == 0)
{
length=GetStringInfoLength(profile);
if (length > 65533L)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CoderWarning,"ExifProfileSizeExceedsLimit","`%s'",
image->filename);
length=65533L;
}
jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile),
(unsigned int) length);
}
if (LocaleCompare(name,"ICC") == 0)
{
register unsigned char
*p;
tag_length=strlen(ICC_PROFILE);
p=GetStringInfoDatum(custom_profile);
(void) CopyMagickMemory(p,ICC_PROFILE,tag_length);
p[tag_length]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65519L);
p[12]=(unsigned char) ((i/65519L)+1);
p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1);
(void) CopyMagickMemory(p+tag_length+3,GetStringInfoDatum(profile)+i,
length);
jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+3));
}
}
if (((LocaleCompare(name,"IPTC") == 0) ||
(LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse))
{
size_t
roundup;
iptc=MagickTrue;
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65500L);
roundup=(size_t) (length & 0x01);
if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0)
{
(void) memcpy(p,"Photoshop 3.0 ",14);
tag_length=14;
}
else
{
(void) CopyMagickMemory(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24);
tag_length=26;
p[24]=(unsigned char) (length >> 8);
p[25]=(unsigned char) (length & 0xff);
}
p[13]=0x00;
(void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length);
if (roundup != 0)
p[length+tag_length]='\0';
jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+roundup));
}
}
if (LocaleCompare(name,"XMP") == 0)
{
StringInfo
*xmp_profile;
/*
Add namespace to XMP profile.
*/
xmp_profile=StringToStringInfo("http://ns.adobe.com/xap/1.0/ ");
if (xmp_profile != (StringInfo *) NULL)
{
if (profile != (StringInfo *) NULL)
ConcatenateStringInfo(xmp_profile,profile);
GetStringInfoDatum(xmp_profile)[28]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L)
{
length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L);
jpeg_write_marker(jpeg_info,XML_MARKER,
GetStringInfoDatum(xmp_profile)+i,(unsigned int) length);
}
xmp_profile=DestroyStringInfo(xmp_profile);
}
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile));
name=GetNextImageProfile(image);
}
custom_profile=DestroyStringInfo(custom_profile);
}
|
static void WriteProfile(j_compress_ptr jpeg_info,Image *image)
{
const char
*name;
const StringInfo
*profile;
MagickBooleanType
iptc;
register ssize_t
i;
size_t
length,
tag_length;
StringInfo
*custom_profile;
/*
Save image profile as a APP marker.
*/
iptc=MagickFalse;
custom_profile=AcquireStringInfo(65535L);
ResetImageProfileIterator(image);
for (name=GetNextImageProfile(image); name != (const char *) NULL; )
{
register unsigned char
*p;
profile=GetImageProfile(image,name);
p=GetStringInfoDatum(custom_profile);
if (LocaleCompare(name,"EXIF") == 0)
{
length=GetStringInfoLength(profile);
if (length > 65533L)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CoderWarning,"ExifProfileSizeExceedsLimit","`%s'",
image->filename);
length=65533L;
}
jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile),
(unsigned int) length);
}
if (LocaleCompare(name,"ICC") == 0)
{
register unsigned char
*p;
tag_length=strlen(ICC_PROFILE);
p=GetStringInfoDatum(custom_profile);
(void) CopyMagickMemory(p,ICC_PROFILE,tag_length);
p[tag_length]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65519L);
p[12]=(unsigned char) ((i/65519L)+1);
p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1);
(void) CopyMagickMemory(p+tag_length+3,GetStringInfoDatum(profile)+i,
length);
jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+3));
}
}
if (((LocaleCompare(name,"IPTC") == 0) ||
(LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse))
{
size_t
roundup;
iptc=MagickTrue;
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L)
{
length=MagickMin(GetStringInfoLength(profile)-i,65500L);
roundup=(size_t) (length & 0x01);
if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0)
{
(void) memcpy(p,"Photoshop 3.0 ",14);
tag_length=14;
}
else
{
(void) CopyMagickMemory(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24);
tag_length=26;
p[24]=(unsigned char) (length >> 8);
p[25]=(unsigned char) (length & 0xff);
}
p[13]=0x00;
(void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length);
if (roundup != 0)
p[length+tag_length]='\0';
jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum(
custom_profile),(unsigned int) (length+tag_length+roundup));
}
}
if (LocaleCompare(name,"XMP") == 0)
{
StringInfo
*xmp_profile;
/*
Add namespace to XMP profile.
*/
xmp_profile=StringToStringInfo("http://ns.adobe.com/xap/1.0/ ");
if (xmp_profile != (StringInfo *) NULL)
{
if (profile != (StringInfo *) NULL)
ConcatenateStringInfo(xmp_profile,profile);
GetStringInfoDatum(xmp_profile)[28]='\0';
for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L)
{
length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L);
jpeg_write_marker(jpeg_info,XML_MARKER,
GetStringInfoDatum(xmp_profile)+i,(unsigned int) length);
}
xmp_profile=DestroyStringInfo(xmp_profile);
}
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile));
name=GetNextImageProfile(image);
}
custom_profile=DestroyStringInfo(custom_profile);
}
|
C
|
ImageMagick
| 0 |
CVE-2018-15857
|
https://www.cvedetails.com/cve/CVE-2018-15857/
|
CWE-416
|
https://github.com/xkbcommon/libxkbcommon/commit/c1e5ac16e77a21f87bdf3bc4dea61b037a17dddb
|
c1e5ac16e77a21f87bdf3bc4dea61b037a17dddb
|
xkbcomp: fix pointer value for FreeStmt
Signed-off-by: Peter Hutterer <[email protected]>
|
LedNameCreate(unsigned ndx, ExprDef *name, bool virtual)
{
LedNameDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_LED_NAME;
def->common.next = NULL;
def->merge = MERGE_DEFAULT;
def->ndx = ndx;
def->name = name;
def->virtual = virtual;
return def;
}
|
LedNameCreate(unsigned ndx, ExprDef *name, bool virtual)
{
LedNameDef *def = malloc(sizeof(*def));
if (!def)
return NULL;
def->common.type = STMT_LED_NAME;
def->common.next = NULL;
def->merge = MERGE_DEFAULT;
def->ndx = ndx;
def->name = name;
def->virtual = virtual;
return def;
}
|
C
|
libxkbcommon
| 0 |
CVE-2014-9659
|
https://www.cvedetails.com/cve/CVE-2014-9659/
|
CWE-119
|
https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=2cdc4562f873237f1c77d43540537c7a721d3fd8
|
2cdc4562f873237f1c77d43540537c7a721d3fd8
| null |
cf2_hint_isTop( const CF2_Hint hint )
{
return (FT_Bool)( ( hint->flags &
( CF2_PairTop | CF2_GhostTop ) ) != 0 );
}
|
cf2_hint_isTop( const CF2_Hint hint )
{
return (FT_Bool)( ( hint->flags &
( CF2_PairTop | CF2_GhostTop ) ) != 0 );
}
|
C
|
savannah
| 0 |
CVE-2013-7448
|
https://www.cvedetails.com/cve/CVE-2013-7448/
|
CWE-22
|
https://github.com/yarolig/didiwiki/commit/5e5c796617e1712905dc5462b94bd5e6c08d15ea
|
5e5c796617e1712905dc5462b94bd5e6c08d15ea
|
page_name_is_good function
|
changes_compar(const struct dirent **d1, const struct dirent **d2)
{
struct stat st1, st2;
stat((*d1)->d_name, &st1);
stat((*d2)->d_name, &st2);
if (st1.st_mtime > st2.st_mtime)
return 1;
else
return -1;
}
|
changes_compar(const struct dirent **d1, const struct dirent **d2)
{
struct stat st1, st2;
stat((*d1)->d_name, &st1);
stat((*d2)->d_name, &st2);
if (st1.st_mtime > st2.st_mtime)
return 1;
else
return -1;
}
|
C
|
didiwiki
| 0 |
CVE-2014-3610
|
https://www.cvedetails.com/cve/CVE-2014-3610/
|
CWE-264
|
https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
854e8bb1aa06c578c2c9145fa6bfe3680ef63b23
|
KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static inline void mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
}
|
static inline void mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
}
|
C
|
linux
| 0 |
CVE-2014-0143
|
https://www.cvedetails.com/cve/CVE-2014-0143/
|
CWE-190
|
https://git.qemu.org/?p=qemu.git;a=commit;h=8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
|
8f4754ede56e3f9ea3fd7207f4a7c4453e59285b
| null |
static void tracked_request_begin(BdrvTrackedRequest *req,
BlockDriverState *bs,
int64_t offset,
unsigned int bytes, bool is_write)
{
*req = (BdrvTrackedRequest){
.bs = bs,
.offset = offset,
.bytes = bytes,
.is_write = is_write,
.co = qemu_coroutine_self(),
.serialising = false,
.overlap_offset = offset,
.overlap_bytes = bytes,
};
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
}
|
static void tracked_request_begin(BdrvTrackedRequest *req,
BlockDriverState *bs,
int64_t offset,
unsigned int bytes, bool is_write)
{
*req = (BdrvTrackedRequest){
.bs = bs,
.offset = offset,
.bytes = bytes,
.is_write = is_write,
.co = qemu_coroutine_self(),
.serialising = false,
.overlap_offset = offset,
.overlap_bytes = bytes,
};
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
}
|
C
|
qemu
| 0 |
CVE-2016-5337
|
https://www.cvedetails.com/cve/CVE-2016-5337/
|
CWE-200
|
https://git.qemu.org/?p=qemu.git;a=commit;h=844864fbae66935951529408831c2f22367a57b6
|
844864fbae66935951529408831c2f22367a57b6
| null |
static bool megasas_use_msix(MegasasState *s)
{
return s->flags & MEGASAS_MASK_USE_MSIX;
}
|
static bool megasas_use_msix(MegasasState *s)
{
return s->flags & MEGASAS_MASK_USE_MSIX;
}
|
C
|
qemu
| 0 |
CVE-2011-2790
|
https://www.cvedetails.com/cve/CVE-2011-2790/
|
CWE-399
|
https://github.com/chromium/chromium/commit/adb3498ca0b69561d8c6b60bab641de4b0e37dbf
|
adb3498ca0b69561d8c6b60bab641de4b0e37dbf
|
Reviewed by Kevin Ollivier.
[wx] Fix strokeArc and fillRoundedRect drawing, and add clipPath support.
https://bugs.webkit.org/show_bug.cgi?id=60847
git-svn-id: svn://svn.chromium.org/blink/trunk@86502 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void GraphicsContext::translate(float tx, float ty)
{
#if USE(WXGC)
if (m_data->context) {
wxGraphicsContext* gc = m_data->context->GetGraphicsContext();
gc->Translate(tx, ty);
}
#endif
}
|
void GraphicsContext::translate(float tx, float ty)
{
#if USE(WXGC)
if (m_data->context) {
wxGraphicsContext* gc = m_data->context->GetGraphicsContext();
gc->Translate(tx, ty);
}
#endif
}
|
C
|
Chrome
| 0 |
CVE-2015-6768
|
https://www.cvedetails.com/cve/CVE-2015-6768/
|
CWE-264
|
https://github.com/chromium/chromium/commit/4c8b008f055f79e622344627fed7f820375a4f01
|
4c8b008f055f79e622344627fed7f820375a4f01
|
Change Document::detach() to RELEASE_ASSERT all subframes are gone.
BUG=556724,577105
Review URL: https://codereview.chromium.org/1667573002
Cr-Commit-Position: refs/heads/master@{#373642}
|
Document::Document(const DocumentInit& initializer, DocumentClassFlags documentClasses)
: ContainerNode(0, CreateDocument)
, TreeScope(*this)
, m_hasNodesWithPlaceholderStyle(false)
, m_evaluateMediaQueriesOnStyleRecalc(false)
, m_pendingSheetLayout(NoLayoutWithPendingSheets)
, m_frame(initializer.frame())
, m_domWindow(m_frame ? m_frame->localDOMWindow() : 0)
, m_importsController(initializer.importsController())
, m_activeParserCount(0)
, m_contextFeatures(ContextFeatures::defaultSwitch())
, m_wellFormed(false)
, m_printing(false)
, m_wasPrinting(false)
, m_paginatedForScreen(false)
, m_compatibilityMode(NoQuirksMode)
, m_compatibilityModeLocked(false)
, m_executeScriptsWaitingForResourcesTask(CancellableTaskFactory::create(this, &Document::executeScriptsWaitingForResources))
, m_hasAutofocused(false)
, m_clearFocusedElementTimer(this, &Document::clearFocusedElementTimerFired)
, m_domTreeVersion(++s_globalTreeVersion)
, m_styleVersion(0)
, m_listenerTypes(0)
, m_mutationObserverTypes(0)
, m_visitedLinkState(VisitedLinkState::create(*this))
, m_visuallyOrdered(false)
, m_readyState(Complete)
, m_parsingState(FinishedParsing)
, m_gotoAnchorNeededAfterStylesheetsLoad(false)
, m_containsValidityStyleRules(false)
, m_containsPlugins(false)
, m_updateFocusAppearanceSelectionBahavior(SelectionBehaviorOnFocus::Reset)
, m_ignoreDestructiveWriteCount(0)
, m_markers(adoptPtrWillBeNoop(new DocumentMarkerController))
, m_updateFocusAppearanceTimer(this, &Document::updateFocusAppearanceTimerFired)
, m_cssTarget(nullptr)
, m_loadEventProgress(LoadEventNotRun)
, m_startTime(currentTime())
, m_scriptRunner(ScriptRunner::create(this))
, m_xmlVersion("1.0")
, m_xmlStandalone(StandaloneUnspecified)
, m_hasXMLDeclaration(0)
, m_designMode(false)
, m_isRunningExecCommand(false)
, m_hasAnnotatedRegions(false)
, m_annotatedRegionsDirty(false)
, m_useSecureKeyboardEntryWhenActive(false)
, m_documentClasses(documentClasses)
, m_isViewSource(false)
, m_sawElementsInKnownNamespaces(false)
, m_isSrcdocDocument(false)
, m_isMobileDocument(false)
, m_layoutView(0)
#if !ENABLE(OILPAN)
, m_weakFactory(this)
#endif
, m_contextDocument(initializer.contextDocument())
, m_hasFullscreenSupplement(false)
, m_loadEventDelayCount(0)
, m_loadEventDelayTimer(this, &Document::loadEventDelayTimerFired)
, m_pluginLoadingTimer(this, &Document::pluginLoadingTimerFired)
, m_documentTiming(*this)
, m_writeRecursionIsTooDeep(false)
, m_writeRecursionDepth(0)
, m_taskRunner(MainThreadTaskRunner::create(this))
, m_registrationContext(initializer.registrationContext(this))
, m_elementDataCacheClearTimer(this, &Document::elementDataCacheClearTimerFired)
, m_timeline(AnimationTimeline::create(this))
, m_templateDocumentHost(nullptr)
, m_didAssociateFormControlsTimer(this, &Document::didAssociateFormControlsTimerFired)
, m_timers(timerTaskRunner()->adoptClone())
, m_hasViewportUnits(false)
, m_parserSyncPolicy(AllowAsynchronousParsing)
, m_nodeCount(0)
{
if (m_frame) {
ASSERT(m_frame->page());
provideContextFeaturesToDocumentFrom(*this, *m_frame->page());
m_fetcher = m_frame->loader().documentLoader()->fetcher();
FrameFetchContext::provideDocumentToContext(m_fetcher->context(), this);
} else if (m_importsController) {
m_fetcher = FrameFetchContext::createContextAndFetcher(nullptr);
FrameFetchContext::provideDocumentToContext(m_fetcher->context(), this);
} else {
m_fetcher = ResourceFetcher::create(nullptr);
}
if (initializer.shouldSetURL())
setURL(initializer.url());
initSecurityContext(initializer);
initDNSPrefetch();
#if !ENABLE(OILPAN)
for (unsigned i = 0; i < WTF_ARRAY_LENGTH(m_nodeListCounts); ++i)
m_nodeListCounts[i] = 0;
#endif
InstanceCounters::incrementCounter(InstanceCounters::DocumentCounter);
m_lifecycle.advanceTo(DocumentLifecycle::Inactive);
m_styleEngine = StyleEngine::create(*this);
ASSERT(!parentDocument() || !parentDocument()->activeDOMObjectsAreSuspended());
liveDocumentSet().add(this);
}
|
Document::Document(const DocumentInit& initializer, DocumentClassFlags documentClasses)
: ContainerNode(0, CreateDocument)
, TreeScope(*this)
, m_hasNodesWithPlaceholderStyle(false)
, m_evaluateMediaQueriesOnStyleRecalc(false)
, m_pendingSheetLayout(NoLayoutWithPendingSheets)
, m_frame(initializer.frame())
, m_domWindow(m_frame ? m_frame->localDOMWindow() : 0)
, m_importsController(initializer.importsController())
, m_activeParserCount(0)
, m_contextFeatures(ContextFeatures::defaultSwitch())
, m_wellFormed(false)
, m_printing(false)
, m_wasPrinting(false)
, m_paginatedForScreen(false)
, m_compatibilityMode(NoQuirksMode)
, m_compatibilityModeLocked(false)
, m_executeScriptsWaitingForResourcesTask(CancellableTaskFactory::create(this, &Document::executeScriptsWaitingForResources))
, m_hasAutofocused(false)
, m_clearFocusedElementTimer(this, &Document::clearFocusedElementTimerFired)
, m_domTreeVersion(++s_globalTreeVersion)
, m_styleVersion(0)
, m_listenerTypes(0)
, m_mutationObserverTypes(0)
, m_visitedLinkState(VisitedLinkState::create(*this))
, m_visuallyOrdered(false)
, m_readyState(Complete)
, m_parsingState(FinishedParsing)
, m_gotoAnchorNeededAfterStylesheetsLoad(false)
, m_containsValidityStyleRules(false)
, m_containsPlugins(false)
, m_updateFocusAppearanceSelectionBahavior(SelectionBehaviorOnFocus::Reset)
, m_ignoreDestructiveWriteCount(0)
, m_markers(adoptPtrWillBeNoop(new DocumentMarkerController))
, m_updateFocusAppearanceTimer(this, &Document::updateFocusAppearanceTimerFired)
, m_cssTarget(nullptr)
, m_loadEventProgress(LoadEventNotRun)
, m_startTime(currentTime())
, m_scriptRunner(ScriptRunner::create(this))
, m_xmlVersion("1.0")
, m_xmlStandalone(StandaloneUnspecified)
, m_hasXMLDeclaration(0)
, m_designMode(false)
, m_isRunningExecCommand(false)
, m_hasAnnotatedRegions(false)
, m_annotatedRegionsDirty(false)
, m_useSecureKeyboardEntryWhenActive(false)
, m_documentClasses(documentClasses)
, m_isViewSource(false)
, m_sawElementsInKnownNamespaces(false)
, m_isSrcdocDocument(false)
, m_isMobileDocument(false)
, m_layoutView(0)
#if !ENABLE(OILPAN)
, m_weakFactory(this)
#endif
, m_contextDocument(initializer.contextDocument())
, m_hasFullscreenSupplement(false)
, m_loadEventDelayCount(0)
, m_loadEventDelayTimer(this, &Document::loadEventDelayTimerFired)
, m_pluginLoadingTimer(this, &Document::pluginLoadingTimerFired)
, m_documentTiming(*this)
, m_writeRecursionIsTooDeep(false)
, m_writeRecursionDepth(0)
, m_taskRunner(MainThreadTaskRunner::create(this))
, m_registrationContext(initializer.registrationContext(this))
, m_elementDataCacheClearTimer(this, &Document::elementDataCacheClearTimerFired)
, m_timeline(AnimationTimeline::create(this))
, m_templateDocumentHost(nullptr)
, m_didAssociateFormControlsTimer(this, &Document::didAssociateFormControlsTimerFired)
, m_timers(timerTaskRunner()->adoptClone())
, m_hasViewportUnits(false)
, m_parserSyncPolicy(AllowAsynchronousParsing)
, m_nodeCount(0)
{
if (m_frame) {
ASSERT(m_frame->page());
provideContextFeaturesToDocumentFrom(*this, *m_frame->page());
m_fetcher = m_frame->loader().documentLoader()->fetcher();
FrameFetchContext::provideDocumentToContext(m_fetcher->context(), this);
} else if (m_importsController) {
m_fetcher = FrameFetchContext::createContextAndFetcher(nullptr);
FrameFetchContext::provideDocumentToContext(m_fetcher->context(), this);
} else {
m_fetcher = ResourceFetcher::create(nullptr);
}
if (initializer.shouldSetURL())
setURL(initializer.url());
initSecurityContext(initializer);
initDNSPrefetch();
#if !ENABLE(OILPAN)
for (unsigned i = 0; i < WTF_ARRAY_LENGTH(m_nodeListCounts); ++i)
m_nodeListCounts[i] = 0;
#endif
InstanceCounters::incrementCounter(InstanceCounters::DocumentCounter);
m_lifecycle.advanceTo(DocumentLifecycle::Inactive);
m_styleEngine = StyleEngine::create(*this);
ASSERT(!parentDocument() || !parentDocument()->activeDOMObjectsAreSuspended());
liveDocumentSet().add(this);
}
|
C
|
Chrome
| 0 |
CVE-2013-0904
|
https://www.cvedetails.com/cve/CVE-2013-0904/
|
CWE-119
|
https://github.com/chromium/chromium/commit/b2b21468c1f7f08b30a7c1755316f6026c50eb2a
|
b2b21468c1f7f08b30a7c1755316f6026c50eb2a
|
Separate repaint and layout requirements of StyleDifference (Step 1)
Previously StyleDifference was an enum that proximately bigger values
imply smaller values (e.g. StyleDifferenceLayout implies
StyleDifferenceRepaint). This causes unnecessary repaints in some cases
on layout change.
Convert StyleDifference to a structure containing relatively independent
flags.
This change doesn't directly improve the result, but can make further
repaint optimizations possible.
Step 1 doesn't change any functionality. RenderStyle still generate the
legacy StyleDifference enum when comparing styles and convert the result
to the new StyleDifference. Implicit requirements are not handled during
the conversion.
Converted call sites to use the new StyleDifference according to the
following conversion rules:
- diff == StyleDifferenceEqual (&& !context) => diff.hasNoChange()
- diff == StyleDifferenceRepaint => diff.needsRepaintObjectOnly()
- diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer()
- diff == StyleDifferenceRepaint || diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer()
- diff >= StyleDifferenceRepaint => diff.needsRepaint() || diff.needsLayout()
- diff >= StyleDifferenceRepaintLayer => diff.needsRepaintLayer() || diff.needsLayout()
- diff > StyleDifferenceRepaintLayer => diff.needsLayout()
- diff == StyleDifferencePositionedMovementLayoutOnly => diff.needsPositionedMovementLayoutOnly()
- diff == StyleDifferenceLayout => diff.needsFullLayout()
BUG=358460
TEST=All existing layout tests.
[email protected], [email protected], [email protected]
Committed: https://src.chromium.org/viewvc/blink?view=rev&revision=171983
Review URL: https://codereview.chromium.org/236203020
git-svn-id: svn://svn.chromium.org/blink/trunk@172331 bbb929c8-8fbe-4397-9dbb-9b2b20218538
|
void adjust(LayoutSize& offset) const
{
LayoutUnit currLogicalLeftOffset = (m_isHorizontal ? m_colRect.x() : m_colRect.y()) - m_logicalLeft;
offset += m_isHorizontal ? LayoutSize(currLogicalLeftOffset, m_currLogicalTopOffset) : LayoutSize(m_currLogicalTopOffset, currLogicalLeftOffset);
if (m_colInfo->progressionAxis() == ColumnInfo::BlockAxis) {
if (m_isHorizontal)
offset.expand(0, m_colRect.y() - m_block.borderTop() - m_block.paddingTop());
else
offset.expand(m_colRect.x() - m_block.borderLeft() - m_block.paddingLeft(), 0);
}
}
|
void adjust(LayoutSize& offset) const
{
LayoutUnit currLogicalLeftOffset = (m_isHorizontal ? m_colRect.x() : m_colRect.y()) - m_logicalLeft;
offset += m_isHorizontal ? LayoutSize(currLogicalLeftOffset, m_currLogicalTopOffset) : LayoutSize(m_currLogicalTopOffset, currLogicalLeftOffset);
if (m_colInfo->progressionAxis() == ColumnInfo::BlockAxis) {
if (m_isHorizontal)
offset.expand(0, m_colRect.y() - m_block.borderTop() - m_block.paddingTop());
else
offset.expand(m_colRect.x() - m_block.borderLeft() - m_block.paddingLeft(), 0);
}
}
|
C
|
Chrome
| 0 |
CVE-2012-5112
|
https://www.cvedetails.com/cve/CVE-2012-5112/
|
CWE-399
|
https://github.com/chromium/chromium/commit/7bc64304a46b76928da4149693bb4e60907e54c8
|
7bc64304a46b76928da4149693bb4e60907e54c8
|
Disable tcmalloc profile files.
BUG=154983
[email protected]
NOTRY=true
Review URL: https://chromiumcodereview.appspot.com/11087041
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@161048 0039d316-1c4b-4281-b951-d872f2087c98
|
ChromeRenderProcessObserver::content_setting_rules() const {
return &content_setting_rules_;
}
|
ChromeRenderProcessObserver::content_setting_rules() const {
return &content_setting_rules_;
}
|
C
|
Chrome
| 0 |
CVE-2014-7904
|
https://www.cvedetails.com/cve/CVE-2014-7904/
|
CWE-119
|
https://github.com/chromium/chromium/commit/9965adea952e84c925de418e971b204dfda7d6e0
|
9965adea952e84c925de418e971b204dfda7d6e0
|
Replace fixed string uses of AddHeaderFromString
Uses of AddHeaderFromString() with a static string may as well be
replaced with SetHeader(). Do so.
BUG=None
Review-Url: https://codereview.chromium.org/2236933005
Cr-Commit-Position: refs/heads/master@{#418161}
|
MockNetworkTransaction::MockNetworkTransaction(RequestPriority priority,
MockNetworkLayer* factory)
: request_(nullptr),
data_cursor_(0),
content_length_(0),
priority_(priority),
read_handler_(nullptr),
websocket_handshake_stream_create_helper_(nullptr),
transaction_factory_(factory->AsWeakPtr()),
received_bytes_(0),
sent_bytes_(0),
socket_log_id_(NetLog::Source::kInvalidId),
done_reading_called_(false),
weak_factory_(this) {}
|
MockNetworkTransaction::MockNetworkTransaction(RequestPriority priority,
MockNetworkLayer* factory)
: request_(nullptr),
data_cursor_(0),
content_length_(0),
priority_(priority),
read_handler_(nullptr),
websocket_handshake_stream_create_helper_(nullptr),
transaction_factory_(factory->AsWeakPtr()),
received_bytes_(0),
sent_bytes_(0),
socket_log_id_(NetLog::Source::kInvalidId),
done_reading_called_(false),
weak_factory_(this) {}
|
C
|
Chrome
| 0 |
CVE-2015-6773
|
https://www.cvedetails.com/cve/CVE-2015-6773/
|
CWE-119
|
https://github.com/chromium/chromium/commit/33827275411b33371e7bb750cce20f11de85002d
|
33827275411b33371e7bb750cce20f11de85002d
|
Move SelectionTemplate::is_handle_visible_ to FrameSelection
This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate|
since handle visibility is used only for setting |FrameSelection|, hence it is
a redundant member variable of |SelectionTemplate|.
Bug: 742093
Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e
Reviewed-on: https://chromium-review.googlesource.com/595389
Commit-Queue: Yoshifumi Inoue <[email protected]>
Reviewed-by: Xiaocheng Hu <[email protected]>
Reviewed-by: Kent Tamura <[email protected]>
Cr-Commit-Position: refs/heads/master@{#491660}
|
bool InputMethodController::SetEditableSelectionOffsets(
const PlainTextRange& selection_offsets) {
return SetEditableSelectionOffsets(selection_offsets,
TypingContinuation::kEnd);
}
|
bool InputMethodController::SetEditableSelectionOffsets(
const PlainTextRange& selection_offsets) {
return SetEditableSelectionOffsets(selection_offsets,
TypingContinuation::kEnd);
}
|
C
|
Chrome
| 0 |
CVE-2017-16939
|
https://www.cvedetails.com/cve/CVE-2017-16939/
|
CWE-416
|
https://github.com/torvalds/linux/commit/1137b5e2529a8f5ca8ee709288ecba3e68044df2
|
1137b5e2529a8f5ca8ee709288ecba3e68044df2
|
ipsec: Fix aborted xfrm policy dump crash
An independent security researcher, Mohamed Ghannam, has reported
this vulnerability to Beyond Security's SecuriTeam Secure Disclosure
program.
The xfrm_dump_policy_done function expects xfrm_dump_policy to
have been called at least once or it will crash. This can be
triggered if a dump fails because the target socket's receive
buffer is full.
This patch fixes it by using the cb->start mechanism to ensure that
the initialisation is always done regardless of the buffer situation.
Fixes: 12a169e7d8f4 ("ipsec: Put dumpers on the dump list")
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct net *net = xp_net(xp);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_polexpire(skb, xp, dir, c) < 0)
BUG();
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
}
|
static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
struct net *net = xp_net(xp);
struct sk_buff *skb;
skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
if (build_polexpire(skb, xp, dir, c) < 0)
BUG();
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
}
|
C
|
linux
| 0 |
CVE-2011-4621
|
https://www.cvedetails.com/cve/CVE-2011-4621/
| null |
https://github.com/torvalds/linux/commit/f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
f26f9aff6aaf67e9a430d16c266f91b13a5bff64
|
Sched: fix skip_clock_update optimization
idle_balance() drops/retakes rq->lock, leaving the previous task
vulnerable to set_tsk_need_resched(). Clear it after we return
from balancing instead, and in setup_thread_stack() as well, so
no successfully descheduled or never scheduled task has it set.
Need resched confused the skip_clock_update logic, which assumes
that the next call to update_rq_clock() will come nearly immediately
after being set. Make the optimization robust against the waking
a sleeper before it sucessfully deschedules case by checking that
the current task has not been dequeued before setting the flag,
since it is that useless clock update we're trying to save, and
clear unconditionally in schedule() proper instead of conditionally
in put_prev_task().
Signed-off-by: Mike Galbraith <[email protected]>
Reported-by: Bjoern B. Brandenburg <[email protected]>
Tested-by: Yong Zhang <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
Cc: [email protected]
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
inline int task_curr(const struct task_struct *p)
{
return cpu_curr(task_cpu(p)) == p;
}
|
inline int task_curr(const struct task_struct *p)
{
return cpu_curr(task_cpu(p)) == p;
}
|
C
|
linux
| 0 |
CVE-2016-10129
|
https://www.cvedetails.com/cve/CVE-2016-10129/
|
CWE-476
|
https://github.com/libgit2/libgit2/commit/2fdef641fd0dd2828bd948234ae86de75221a11a
|
2fdef641fd0dd2828bd948234ae86de75221a11a
|
smart_pkt: treat empty packet lines as error
The Git protocol does not specify what should happen in the case
of an empty packet line (that is a packet line "0004"). We
currently indicate success, but do not return a packet in the
case where we hit an empty line. The smart protocol was not
prepared to handle such packets in all cases, though, resulting
in a `NULL` pointer dereference.
Fix the issue by returning an error instead. As such kind of
packets is not even specified by upstream, this is the right
thing to do.
|
int git_smart__negotiate_fetch(git_transport *transport, git_repository *repo, const git_remote_head * const *wants, size_t count)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_buf data = GIT_BUF_INIT;
git_revwalk *walk = NULL;
int error = -1, pkt_type;
unsigned int i;
git_oid oid;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
return error;
if ((error = fetch_setup_walk(&walk, repo)) < 0)
goto on_error;
/*
* Our support for ACK extensions is simply to parse them. On
* the first ACK we will accept that as enough common
* objects. We give up if we haven't found an answer in the
* first 256 we send.
*/
i = 0;
while (i < 256) {
error = git_revwalk_next(&oid, walk);
if (error < 0) {
if (GIT_ITEROVER == error)
break;
goto on_error;
}
git_pkt_buffer_have(&oid, &data);
i++;
if (i % 20 == 0) {
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
git_pkt_buffer_flush(&data);
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_clear(&data);
if (t->caps.multi_ack || t->caps.multi_ack_detailed) {
if ((error = store_common(t)) < 0)
goto on_error;
} else {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type == GIT_PKT_ACK) {
break;
} else if (pkt_type == GIT_PKT_NAK) {
continue;
} else if (pkt_type < 0) {
/* recv_pkt returned an error */
error = pkt_type;
goto on_error;
} else {
giterr_set(GITERR_NET, "Unexpected pkt type");
error = -1;
goto on_error;
}
}
}
if (t->common.length > 0)
break;
if (i % 20 == 0 && t->rpc) {
git_pkt_ack *pkt;
unsigned int j;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, j, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
}
/* Tell the other end that we're done negotiating */
if (t->rpc && t->common.length > 0) {
git_pkt_ack *pkt;
unsigned int j;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, j, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
if ((error = git_pkt_buffer_done(&data)) < 0)
goto on_error;
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_free(&data);
git_revwalk_free(walk);
/* Now let's eat up whatever the server gives us */
if (!t->caps.multi_ack && !t->caps.multi_ack_detailed) {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type < 0) {
return pkt_type;
} else if (pkt_type != GIT_PKT_ACK && pkt_type != GIT_PKT_NAK) {
giterr_set(GITERR_NET, "Unexpected pkt type");
return -1;
}
} else {
error = wait_while_ack(buf);
}
return error;
on_error:
git_revwalk_free(walk);
git_buf_free(&data);
return error;
}
|
int git_smart__negotiate_fetch(git_transport *transport, git_repository *repo, const git_remote_head * const *wants, size_t count)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_buf data = GIT_BUF_INIT;
git_revwalk *walk = NULL;
int error = -1, pkt_type;
unsigned int i;
git_oid oid;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
return error;
if ((error = fetch_setup_walk(&walk, repo)) < 0)
goto on_error;
/*
* Our support for ACK extensions is simply to parse them. On
* the first ACK we will accept that as enough common
* objects. We give up if we haven't found an answer in the
* first 256 we send.
*/
i = 0;
while (i < 256) {
error = git_revwalk_next(&oid, walk);
if (error < 0) {
if (GIT_ITEROVER == error)
break;
goto on_error;
}
git_pkt_buffer_have(&oid, &data);
i++;
if (i % 20 == 0) {
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
git_pkt_buffer_flush(&data);
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_clear(&data);
if (t->caps.multi_ack || t->caps.multi_ack_detailed) {
if ((error = store_common(t)) < 0)
goto on_error;
} else {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type == GIT_PKT_ACK) {
break;
} else if (pkt_type == GIT_PKT_NAK) {
continue;
} else if (pkt_type < 0) {
/* recv_pkt returned an error */
error = pkt_type;
goto on_error;
} else {
giterr_set(GITERR_NET, "Unexpected pkt type");
error = -1;
goto on_error;
}
}
}
if (t->common.length > 0)
break;
if (i % 20 == 0 && t->rpc) {
git_pkt_ack *pkt;
unsigned int j;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, j, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
}
/* Tell the other end that we're done negotiating */
if (t->rpc && t->common.length > 0) {
git_pkt_ack *pkt;
unsigned int j;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, j, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
if ((error = git_pkt_buffer_done(&data)) < 0)
goto on_error;
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_free(&data);
git_revwalk_free(walk);
/* Now let's eat up whatever the server gives us */
if (!t->caps.multi_ack && !t->caps.multi_ack_detailed) {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type < 0) {
return pkt_type;
} else if (pkt_type != GIT_PKT_ACK && pkt_type != GIT_PKT_NAK) {
giterr_set(GITERR_NET, "Unexpected pkt type");
return -1;
}
} else {
error = wait_while_ack(buf);
}
return error;
on_error:
git_revwalk_free(walk);
git_buf_free(&data);
return error;
}
|
C
|
libgit2
| 0 |
CVE-2016-10165
|
https://www.cvedetails.com/cve/CVE-2016-10165/
|
CWE-125
|
https://github.com/mm2/Little-CMS/commit/5ca71a7bc18b6897ab21d815d15e218e204581e2
|
5ca71a7bc18b6897ab21d815d15e218e204581e2
|
Added an extra check to MLU bounds
Thanks to Ibrahim el-sayed for spotting the bug
|
void *Type_U16Fixed16_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
cmsFloat64Number* array_double;
cmsUInt32Number v;
cmsUInt32Number i, n;
*nItems = 0;
n = SizeOfTag / sizeof(cmsUInt32Number);
array_double = (cmsFloat64Number*) _cmsCalloc(self ->ContextID, n, sizeof(cmsFloat64Number));
if (array_double == NULL) return NULL;
for (i=0; i < n; i++) {
if (!_cmsReadUInt32Number(io, &v)) {
_cmsFree(self ->ContextID, (void*) array_double);
return NULL;
}
array_double[i] = (cmsFloat64Number) (v / 65536.0);
}
*nItems = n;
return (void*) array_double;
}
|
void *Type_U16Fixed16_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
cmsFloat64Number* array_double;
cmsUInt32Number v;
cmsUInt32Number i, n;
*nItems = 0;
n = SizeOfTag / sizeof(cmsUInt32Number);
array_double = (cmsFloat64Number*) _cmsCalloc(self ->ContextID, n, sizeof(cmsFloat64Number));
if (array_double == NULL) return NULL;
for (i=0; i < n; i++) {
if (!_cmsReadUInt32Number(io, &v)) {
_cmsFree(self ->ContextID, (void*) array_double);
return NULL;
}
array_double[i] = (cmsFloat64Number) (v / 65536.0);
}
*nItems = n;
return (void*) array_double;
}
|
C
|
Little-CMS
| 0 |
CVE-2018-12326
|
https://www.cvedetails.com/cve/CVE-2018-12326/
|
CWE-119
|
https://github.com/antirez/redis/commit/9fdcc15962f9ff4baebe6fdd947816f43f730d50
|
9fdcc15962f9ff4baebe6fdd947816f43f730d50
|
Security: fix redis-cli buffer overflow.
Thanks to Fakhri Zulkifli for reporting it.
The fix switched to dynamic allocation, copying the final prompt in the
static buffer only at the end.
|
static long getLongInfoField(char *info, char *field) {
char *value = getInfoField(info,field);
long l;
if (!value) return LONG_MIN;
l = strtol(value,NULL,10);
zfree(value);
return l;
}
|
static long getLongInfoField(char *info, char *field) {
char *value = getInfoField(info,field);
long l;
if (!value) return LONG_MIN;
l = strtol(value,NULL,10);
zfree(value);
return l;
}
|
C
|
redis
| 0 |
CVE-2017-5019
|
https://www.cvedetails.com/cve/CVE-2017-5019/
|
CWE-416
|
https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
|
f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
|
Convert FrameHostMsg_DidAddMessageToConsole to Mojo.
Note: Since this required changing the test
RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually
re-introduced https://crbug.com/666714 locally (the bug the test was
added for), and reran the test to confirm that it still covers the bug.
Bug: 786836
Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270
Commit-Queue: Lowell Manners <[email protected]>
Reviewed-by: Daniel Cheng <[email protected]>
Reviewed-by: Camille Lamy <[email protected]>
Cr-Commit-Position: refs/heads/master@{#653137}
|
void RenderFrameImpl::DidCompleteResponse(
int request_id,
const network::URLLoaderCompletionStatus& status) {
for (auto& observer : observers_)
observer.DidCompleteResponse(request_id, status);
}
|
void RenderFrameImpl::DidCompleteResponse(
int request_id,
const network::URLLoaderCompletionStatus& status) {
for (auto& observer : observers_)
observer.DidCompleteResponse(request_id, status);
}
|
C
|
Chrome
| 0 |
CVE-2016-0850
|
https://www.cvedetails.com/cve/CVE-2016-0850/
|
CWE-264
|
https://android.googlesource.com/platform/external/bluetooth/bluedroid/+/c677ee92595335233eb0e7b59809a1a94e7a678a
|
c677ee92595335233eb0e7b59809a1a94e7a678a
|
DO NOT MERGE Remove Porsche car-kit pairing workaround
Bug: 26551752
Change-Id: I14c5e3fcda0849874c8a94e48aeb7d09585617e1
|
BOOLEAN BTM_GetSecurityFlagsByTransport (BD_ADDR bd_addr, UINT8 * p_sec_flags,
tBT_TRANSPORT transport)
{
tBTM_SEC_DEV_REC *p_dev_rec;
if ((p_dev_rec = btm_find_dev (bd_addr)) != NULL)
{
if (transport == BT_TRANSPORT_BR_EDR)
*p_sec_flags = (UINT8) p_dev_rec->sec_flags;
else
*p_sec_flags = (UINT8) (p_dev_rec->sec_flags >> 8);
return(TRUE);
}
BTM_TRACE_ERROR ("BTM_GetSecurityFlags false");
return(FALSE);
}
|
BOOLEAN BTM_GetSecurityFlagsByTransport (BD_ADDR bd_addr, UINT8 * p_sec_flags,
tBT_TRANSPORT transport)
{
tBTM_SEC_DEV_REC *p_dev_rec;
if ((p_dev_rec = btm_find_dev (bd_addr)) != NULL)
{
if (transport == BT_TRANSPORT_BR_EDR)
*p_sec_flags = (UINT8) p_dev_rec->sec_flags;
else
*p_sec_flags = (UINT8) (p_dev_rec->sec_flags >> 8);
return(TRUE);
}
BTM_TRACE_ERROR ("BTM_GetSecurityFlags false");
return(FALSE);
}
|
C
|
Android
| 0 |
CVE-2019-5755
|
https://www.cvedetails.com/cve/CVE-2019-5755/
|
CWE-189
|
https://github.com/chromium/chromium/commit/971548cdca2d4c0a6fedd3db0c94372c2a27eac3
|
971548cdca2d4c0a6fedd3db0c94372c2a27eac3
|
Make MediaStreamDispatcherHost per-request instead of per-frame.
Instead of having RenderFrameHost own a single MSDH to handle all
requests from a frame, MSDH objects will be owned by a strong binding.
A consequence of this is that an additional requester ID is added to
requests to MediaStreamManager, so that an MSDH is able to cancel only
requests generated by it.
In practice, MSDH will continue to be per frame in most cases since
each frame normally makes a single request for an MSDH object.
This fixes a lifetime issue caused by the IO thread executing tasks
after the RenderFrameHost dies.
Drive-by: Fix some minor lint issues.
Bug: 912520
Change-Id: I52742ffc98b9fc57ce8e6f5093a61aed86d3e516
Reviewed-on: https://chromium-review.googlesource.com/c/1369799
Reviewed-by: Emircan Uysaler <[email protected]>
Reviewed-by: Ken Buchanan <[email protected]>
Reviewed-by: Olga Sharonova <[email protected]>
Commit-Queue: Guido Urdaneta <[email protected]>
Cr-Commit-Position: refs/heads/master@{#616347}
|
void RenderFrameHostImpl::SwapOut(
RenderFrameProxyHost* proxy,
bool is_loading) {
TRACE_EVENT_ASYNC_BEGIN1("navigation", "RenderFrameHostImpl::SwapOut", this,
"frame_tree_node",
frame_tree_node_->frame_tree_node_id());
if (unload_state_ != UnloadState::NotRun) {
NOTREACHED() << "RFH should be in default state when calling SwapOut.";
return;
}
if (swapout_event_monitor_timeout_) {
swapout_event_monitor_timeout_->Start(base::TimeDelta::FromMilliseconds(
RenderViewHostImpl::kUnloadTimeoutMS));
}
CHECK(proxy);
is_waiting_for_swapout_ack_ = true;
unload_state_ = UnloadState::InProgress;
if (IsRenderFrameLive()) {
FrameReplicationState replication_state =
proxy->frame_tree_node()->current_replication_state();
Send(new FrameMsg_SwapOut(routing_id_, proxy->GetRoutingID(), is_loading,
replication_state));
proxy->set_render_frame_proxy_created(true);
StartPendingDeletionOnSubtree();
}
PendingDeletionCheckCompletedOnSubtree();
if (web_ui())
web_ui()->RenderFrameHostSwappingOut();
}
|
void RenderFrameHostImpl::SwapOut(
RenderFrameProxyHost* proxy,
bool is_loading) {
TRACE_EVENT_ASYNC_BEGIN1("navigation", "RenderFrameHostImpl::SwapOut", this,
"frame_tree_node",
frame_tree_node_->frame_tree_node_id());
if (unload_state_ != UnloadState::NotRun) {
NOTREACHED() << "RFH should be in default state when calling SwapOut.";
return;
}
if (swapout_event_monitor_timeout_) {
swapout_event_monitor_timeout_->Start(base::TimeDelta::FromMilliseconds(
RenderViewHostImpl::kUnloadTimeoutMS));
}
CHECK(proxy);
is_waiting_for_swapout_ack_ = true;
unload_state_ = UnloadState::InProgress;
if (IsRenderFrameLive()) {
FrameReplicationState replication_state =
proxy->frame_tree_node()->current_replication_state();
Send(new FrameMsg_SwapOut(routing_id_, proxy->GetRoutingID(), is_loading,
replication_state));
proxy->set_render_frame_proxy_created(true);
StartPendingDeletionOnSubtree();
}
PendingDeletionCheckCompletedOnSubtree();
if (web_ui())
web_ui()->RenderFrameHostSwappingOut();
}
|
C
|
Chrome
| 0 |
CVE-2016-1691
|
https://www.cvedetails.com/cve/CVE-2016-1691/
|
CWE-119
|
https://github.com/chromium/chromium/commit/e3aa8a56706c4abe208934d5c294f7b594b8b693
|
e3aa8a56706c4abe208934d5c294f7b594b8b693
|
Enforce the WebUsbAllowDevicesForUrls policy
This change modifies UsbChooserContext to use the UsbAllowDevicesForUrls
class to consider devices allowed by the WebUsbAllowDevicesForUrls
policy. The WebUsbAllowDevicesForUrls policy overrides the other WebUSB
policies. Unit tests are also added to ensure that the policy is being
enforced correctly.
The design document for this feature is found at:
https://docs.google.com/document/d/1MPvsrWiVD_jAC8ELyk8njFpy6j1thfVU5aWT3TCWE8w
Bug: 854329
Change-Id: I5f82e662ca9dc544da5918eae766b5535a31296b
Reviewed-on: https://chromium-review.googlesource.com/c/1259289
Commit-Queue: Ovidio Henriquez <[email protected]>
Reviewed-by: Reilly Grant <[email protected]>
Reviewed-by: Julian Pastarmov <[email protected]>
Cr-Commit-Position: refs/heads/master@{#597926}
|
explicit MakeRequestFail(const std::string& host) : host_(host) {
base::RunLoop run_loop;
base::PostTaskWithTraitsAndReply(FROM_HERE, {BrowserThread::IO},
base::BindOnce(MakeRequestFailOnIO, host_),
run_loop.QuitClosure());
run_loop.Run();
}
|
explicit MakeRequestFail(const std::string& host) : host_(host) {
base::RunLoop run_loop;
base::PostTaskWithTraitsAndReply(FROM_HERE, {BrowserThread::IO},
base::BindOnce(MakeRequestFailOnIO, host_),
run_loop.QuitClosure());
run_loop.Run();
}
|
C
|
Chrome
| 0 |
CVE-2018-12904
|
https://www.cvedetails.com/cve/CVE-2018-12904/
| null |
https://github.com/torvalds/linux/commit/727ba748e110b4de50d142edca9d6a9b7e6111d8
|
727ba748e110b4de50d142edca9d6a9b7e6111d8
|
kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
static int handle_vmwrite(struct kvm_vcpu *vcpu)
{
unsigned long field;
gva_t gva;
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
/* The value to write might be 32 or 64 bits, depending on L1's long
* mode, and eventually we need to write that into a field of several
* possible lengths. The code below first zero-extends the value to 64
* bit (field_value), and then copies only the appropriate number of
* bits into the vmcs12 field.
*/
u64 field_value = 0;
struct x86_exception e;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (!nested_vmx_check_vmcs12(vcpu))
return kvm_skip_emulated_instruction(vcpu);
if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu,
(((vmx_instruction_info) >> 3) & 0xf));
else {
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
}
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/*
* If the vCPU supports "VMWRITE to any supported field in the
* VMCS," then the "read-only" fields are actually read/write.
*/
if (vmcs_field_readonly(field) &&
!nested_cpu_has_vmwrite_any_field(vcpu)) {
nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu);
}
if (vmcs12_write_any(vcpu, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu);
}
switch (field) {
#define SHADOW_FIELD_RW(x) case x:
#include "vmx_shadow_fields.h"
/*
* The fields that can be updated by L1 without a vmexit are
* always updated in the vmcs02, the others go down the slow
* path of prepare_vmcs02.
*/
break;
default:
vmx->nested.dirty_vmcs12 = true;
break;
}
nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
|
static int handle_vmwrite(struct kvm_vcpu *vcpu)
{
unsigned long field;
gva_t gva;
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
/* The value to write might be 32 or 64 bits, depending on L1's long
* mode, and eventually we need to write that into a field of several
* possible lengths. The code below first zero-extends the value to 64
* bit (field_value), and then copies only the appropriate number of
* bits into the vmcs12 field.
*/
u64 field_value = 0;
struct x86_exception e;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (!nested_vmx_check_vmcs12(vcpu))
return kvm_skip_emulated_instruction(vcpu);
if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu,
(((vmx_instruction_info) >> 3) & 0xf));
else {
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
}
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/*
* If the vCPU supports "VMWRITE to any supported field in the
* VMCS," then the "read-only" fields are actually read/write.
*/
if (vmcs_field_readonly(field) &&
!nested_cpu_has_vmwrite_any_field(vcpu)) {
nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu);
}
if (vmcs12_write_any(vcpu, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
return kvm_skip_emulated_instruction(vcpu);
}
switch (field) {
#define SHADOW_FIELD_RW(x) case x:
#include "vmx_shadow_fields.h"
/*
* The fields that can be updated by L1 without a vmexit are
* always updated in the vmcs02, the others go down the slow
* path of prepare_vmcs02.
*/
break;
default:
vmx->nested.dirty_vmcs12 = true;
break;
}
nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
|
C
|
linux
| 0 |
CVE-2016-2464
|
https://www.cvedetails.com/cve/CVE-2016-2464/
|
CWE-20
|
https://android.googlesource.com/platform/external/libvpx/+/65c49d5b382de4085ee5668732bcb0f6ecaf7148
|
65c49d5b382de4085ee5668732bcb0f6ecaf7148
|
Fix ParseElementHeader to support 0 payload elements
Cherry-pick'ing Change 5c83bbec9a5f6f00a349674ddad85b753d2ea219
from upstream. This fixes regression in some edge cases for mkv
playback.
BUG=26499283
Change-Id: I88de03219a3d941b6b2f251d384e29c36bdd4d9b
|
long long Segment::GetDuration() const {
assert(m_pInfo);
return m_pInfo->GetDuration();
}
|
long long Segment::GetDuration() const {
assert(m_pInfo);
return m_pInfo->GetDuration();
}
|
C
|
Android
| 0 |
CVE-2011-3638
|
https://www.cvedetails.com/cve/CVE-2011-3638/
| null |
https://github.com/torvalds/linux/commit/667eff35a1f56fa74ce98a0c7c29a40adc1ba4e3
|
667eff35a1f56fa74ce98a0c7c29a40adc1ba4e3
|
ext4: reimplement convert and split_unwritten
Reimplement ext4_ext_convert_to_initialized() and
ext4_split_unwritten_extents() using ext4_split_extent()
Signed-off-by: Yongqiang Yang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Tested-by: Allison Henderson <[email protected]>
|
static int ext4_ext_try_to_merge(struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex) {
struct ext4_extent_header *eh;
unsigned int depth;
int merge_done = 0;
int ret = 0;
depth = ext_depth(inode);
BUG_ON(path[depth].p_hdr == NULL);
eh = path[depth].p_hdr;
if (ex > EXT_FIRST_EXTENT(eh))
merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
if (!merge_done)
ret = ext4_ext_try_to_merge_right(inode, path, ex);
return ret;
}
|
static int ext4_ext_try_to_merge(struct inode *inode,
struct ext4_ext_path *path,
struct ext4_extent *ex) {
struct ext4_extent_header *eh;
unsigned int depth;
int merge_done = 0;
int ret = 0;
depth = ext_depth(inode);
BUG_ON(path[depth].p_hdr == NULL);
eh = path[depth].p_hdr;
if (ex > EXT_FIRST_EXTENT(eh))
merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
if (!merge_done)
ret = ext4_ext_try_to_merge_right(inode, path, ex);
return ret;
}
|
C
|
linux
| 0 |
CVE-2018-6177
|
https://www.cvedetails.com/cve/CVE-2018-6177/
|
CWE-200
|
https://github.com/chromium/chromium/commit/4504a474c069d07104237d0c03bfce7b29a42de6
|
4504a474c069d07104237d0c03bfce7b29a42de6
|
defeat cors attacks on audio/video tags
Neutralize error messages and fire no progress events
until media metadata has been loaded for media loaded
from cross-origin locations.
Bug: 828265, 826187
Change-Id: Iaf15ef38676403687d6a913cbdc84f2d70a6f5c6
Reviewed-on: https://chromium-review.googlesource.com/1015794
Reviewed-by: Mounir Lamouri <[email protected]>
Reviewed-by: Dale Curtis <[email protected]>
Commit-Queue: Fredrik Hubinette <[email protected]>
Cr-Commit-Position: refs/heads/master@{#557312}
|
void HTMLMediaElement::AudioSourceProviderImpl::ProvideInput(
AudioBus* bus,
size_t frames_to_process) {
DCHECK(bus);
MutexTryLocker try_locker(provide_input_lock);
if (!try_locker.Locked() || !web_audio_source_provider_ || !client_.Get()) {
bus->Zero();
return;
}
size_t n = bus->NumberOfChannels();
WebVector<float*> web_audio_data(n);
for (size_t i = 0; i < n; ++i)
web_audio_data[i] = bus->Channel(i)->MutableData();
web_audio_source_provider_->ProvideInput(web_audio_data, frames_to_process);
}
|
void HTMLMediaElement::AudioSourceProviderImpl::ProvideInput(
AudioBus* bus,
size_t frames_to_process) {
DCHECK(bus);
MutexTryLocker try_locker(provide_input_lock);
if (!try_locker.Locked() || !web_audio_source_provider_ || !client_.Get()) {
bus->Zero();
return;
}
size_t n = bus->NumberOfChannels();
WebVector<float*> web_audio_data(n);
for (size_t i = 0; i < n; ++i)
web_audio_data[i] = bus->Channel(i)->MutableData();
web_audio_source_provider_->ProvideInput(web_audio_data, frames_to_process);
}
|
C
|
Chrome
| 0 |
CVE-2015-2831
|
https://www.cvedetails.com/cve/CVE-2015-2831/
|
CWE-119
|
https://github.com/kmatheussen/das_watchdog/commit/bd20bb02e75e2c0483832b52f2577253febfb690
|
bd20bb02e75e2c0483832b52f2577253febfb690
|
Fix memory overflow if the name of an environment is larger than 500 characters. Bug found by Adam Sampson.
|
static int *get_userlist(struct proclistlist *pll, int *num_users){
int *ret=calloc(sizeof(int),pll->length);
int lokke;
*num_users=0;
for(lokke=0;lokke<pll->length;lokke++){
glibtop_proc_uid uid;
glibtop_get_proc_uid(&uid,pll->proclist[lokke].pid);
if( ! is_a_member(uid.uid,ret,*num_users)){ // ???
ret[*num_users]=uid.uid;
(*num_users)++;
}
}
return ret;
}
|
static int *get_userlist(struct proclistlist *pll, int *num_users){
int *ret=calloc(sizeof(int),pll->length);
int lokke;
*num_users=0;
for(lokke=0;lokke<pll->length;lokke++){
glibtop_proc_uid uid;
glibtop_get_proc_uid(&uid,pll->proclist[lokke].pid);
if( ! is_a_member(uid.uid,ret,*num_users)){ // ???
ret[*num_users]=uid.uid;
(*num_users)++;
}
}
return ret;
}
|
C
|
das_watchdog
| 0 |
CVE-2013-2128
|
https://www.cvedetails.com/cve/CVE-2013-2128/
|
CWE-119
|
https://github.com/torvalds/linux/commit/baff42ab1494528907bf4d5870359e31711746ae
|
baff42ab1494528907bf4d5870359e31711746ae
|
net: Fix oops from tcp_collapse() when using splice()
tcp_read_sock() can have a eat skbs without immediately advancing copied_seq.
This can cause a panic in tcp_collapse() if it is called as a result
of the recv_actor dropping the socket lock.
A userspace program that splices data from a socket to either another
socket or to a file can trigger this bug.
Signed-off-by: Steven J. Magnani <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags;
int mss_now, size_goal;
int sg, err, copied;
long timeo;
lock_sock(sk);
TCP_CHECK_TIMER(sk);
flags = msg->msg_flags;
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
/* This should be in poll */
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_send_mss(sk, &size_goal, flags);
/* Ok commence sending. */
iovlen = msg->msg_iovlen;
iov = msg->msg_iov;
copied = 0;
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
sg = sk->sk_route_caps & NETIF_F_SG;
while (--iovlen >= 0) {
int seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
iov++;
while (seglen > 0) {
int copy = 0;
int max = size_goal;
skb = tcp_write_queue_tail(sk);
if (tcp_send_head(sk)) {
if (skb->ip_summed == CHECKSUM_NONE)
max = mss_now;
copy = max - skb->len;
}
if (copy <= 0) {
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
*/
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
skb = sk_stream_alloc_skb(sk,
select_size(sk, sg),
sk->sk_allocation);
if (!skb)
goto wait_for_memory;
/*
* Check whether we can use HW checksum.
*/
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
skb->ip_summed = CHECKSUM_PARTIAL;
skb_entail(sk, skb);
copy = size_goal;
max = size_goal;
}
/* Try to append data to the end of skb. */
if (copy > seglen)
copy = seglen;
/* Where to copy to? */
if (skb_tailroom(skb) > 0) {
/* We have some space in skb head. Superb! */
if (copy > skb_tailroom(skb))
copy = skb_tailroom(skb);
if ((err = skb_add_data(skb, from, copy)) != 0)
goto do_fault;
} else {
int merge = 0;
int i = skb_shinfo(skb)->nr_frags;
struct page *page = TCP_PAGE(sk);
int off = TCP_OFF(sk);
if (skb_can_coalesce(skb, i, page, off) &&
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
merge = 1;
} else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
* or because all the page slots are
* busy. */
tcp_mark_push(tp, skb);
goto new_segment;
} else if (page) {
if (off == PAGE_SIZE) {
put_page(page);
TCP_PAGE(sk) = page = NULL;
off = 0;
}
} else
off = 0;
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
if (!sk_wmem_schedule(sk, copy))
goto wait_for_memory;
if (!page) {
/* Allocate new cache page. */
if (!(page = sk_stream_alloc_page(sk)))
goto wait_for_memory;
}
/* Time to copy data. We are close to
* the end! */
err = skb_copy_to_page(sk, from, skb, page,
off, copy);
if (err) {
/* If this page was new, give it to the
* socket so it does not get leaked.
*/
if (!TCP_PAGE(sk)) {
TCP_PAGE(sk) = page;
TCP_OFF(sk) = 0;
}
goto do_error;
}
/* Update the skb. */
if (merge) {
skb_shinfo(skb)->frags[i - 1].size +=
copy;
} else {
skb_fill_page_desc(skb, i, page, off, copy);
if (TCP_PAGE(sk)) {
get_page(page);
} else if (off + copy < PAGE_SIZE) {
get_page(page);
TCP_PAGE(sk) = page;
}
}
TCP_OFF(sk) = off + copy;
}
if (!copied)
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
skb_shinfo(skb)->gso_segs = 0;
from += copy;
copied += copy;
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
if (skb->len < max || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_send_mss(sk, &size_goal, flags);
}
}
out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle);
TCP_CHECK_TIMER(sk);
release_sock(sk);
return copied;
do_fault:
if (!skb->len) {
tcp_unlink_write_queue(skb, sk);
/* It is the one place in all of TCP, except connection
* reset, where we can be unlinking the send_head.
*/
tcp_check_send_head(sk, skb);
sk_wmem_free_skb(sk, skb);
}
do_error:
if (copied)
goto out;
out_err:
err = sk_stream_error(sk, flags, err);
TCP_CHECK_TIMER(sk);
release_sock(sk);
return err;
}
|
int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags;
int mss_now, size_goal;
int sg, err, copied;
long timeo;
lock_sock(sk);
TCP_CHECK_TIMER(sk);
flags = msg->msg_flags;
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
goto out_err;
/* This should be in poll */
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
mss_now = tcp_send_mss(sk, &size_goal, flags);
/* Ok commence sending. */
iovlen = msg->msg_iovlen;
iov = msg->msg_iov;
copied = 0;
err = -EPIPE;
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
sg = sk->sk_route_caps & NETIF_F_SG;
while (--iovlen >= 0) {
int seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
iov++;
while (seglen > 0) {
int copy = 0;
int max = size_goal;
skb = tcp_write_queue_tail(sk);
if (tcp_send_head(sk)) {
if (skb->ip_summed == CHECKSUM_NONE)
max = mss_now;
copy = max - skb->len;
}
if (copy <= 0) {
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
*/
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
skb = sk_stream_alloc_skb(sk,
select_size(sk, sg),
sk->sk_allocation);
if (!skb)
goto wait_for_memory;
/*
* Check whether we can use HW checksum.
*/
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
skb->ip_summed = CHECKSUM_PARTIAL;
skb_entail(sk, skb);
copy = size_goal;
max = size_goal;
}
/* Try to append data to the end of skb. */
if (copy > seglen)
copy = seglen;
/* Where to copy to? */
if (skb_tailroom(skb) > 0) {
/* We have some space in skb head. Superb! */
if (copy > skb_tailroom(skb))
copy = skb_tailroom(skb);
if ((err = skb_add_data(skb, from, copy)) != 0)
goto do_fault;
} else {
int merge = 0;
int i = skb_shinfo(skb)->nr_frags;
struct page *page = TCP_PAGE(sk);
int off = TCP_OFF(sk);
if (skb_can_coalesce(skb, i, page, off) &&
off != PAGE_SIZE) {
/* We can extend the last page
* fragment. */
merge = 1;
} else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
* or because all the page slots are
* busy. */
tcp_mark_push(tp, skb);
goto new_segment;
} else if (page) {
if (off == PAGE_SIZE) {
put_page(page);
TCP_PAGE(sk) = page = NULL;
off = 0;
}
} else
off = 0;
if (copy > PAGE_SIZE - off)
copy = PAGE_SIZE - off;
if (!sk_wmem_schedule(sk, copy))
goto wait_for_memory;
if (!page) {
/* Allocate new cache page. */
if (!(page = sk_stream_alloc_page(sk)))
goto wait_for_memory;
}
/* Time to copy data. We are close to
* the end! */
err = skb_copy_to_page(sk, from, skb, page,
off, copy);
if (err) {
/* If this page was new, give it to the
* socket so it does not get leaked.
*/
if (!TCP_PAGE(sk)) {
TCP_PAGE(sk) = page;
TCP_OFF(sk) = 0;
}
goto do_error;
}
/* Update the skb. */
if (merge) {
skb_shinfo(skb)->frags[i - 1].size +=
copy;
} else {
skb_fill_page_desc(skb, i, page, off, copy);
if (TCP_PAGE(sk)) {
get_page(page);
} else if (off + copy < PAGE_SIZE) {
get_page(page);
TCP_PAGE(sk) = page;
}
}
TCP_OFF(sk) = off + copy;
}
if (!copied)
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
skb_shinfo(skb)->gso_segs = 0;
from += copy;
copied += copy;
if ((seglen -= copy) == 0 && iovlen == 0)
goto out;
if (skb->len < max || (flags & MSG_OOB))
continue;
if (forced_push(tp)) {
tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now);
continue;
wait_for_sndbuf:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error;
mss_now = tcp_send_mss(sk, &size_goal, flags);
}
}
out:
if (copied)
tcp_push(sk, flags, mss_now, tp->nonagle);
TCP_CHECK_TIMER(sk);
release_sock(sk);
return copied;
do_fault:
if (!skb->len) {
tcp_unlink_write_queue(skb, sk);
/* It is the one place in all of TCP, except connection
* reset, where we can be unlinking the send_head.
*/
tcp_check_send_head(sk, skb);
sk_wmem_free_skb(sk, skb);
}
do_error:
if (copied)
goto out;
out_err:
err = sk_stream_error(sk, flags, err);
TCP_CHECK_TIMER(sk);
release_sock(sk);
return err;
}
|
C
|
linux
| 0 |
Subsets and Splits
CWE-119 Function Changes
This query retrieves specific examples (before and after code changes) of vulnerabilities with CWE-119, providing basic filtering but limited insight.
Vulnerable Code with CWE IDs
The query filters and combines records from multiple datasets to list specific vulnerability details, providing a basic overview of vulnerable functions but lacking deeper insights.
Vulnerable Functions in BigVul
Retrieves details of vulnerable functions from both validation and test datasets where vulnerabilities are present, providing a basic set of data points for further analysis.
Vulnerable Code Functions
This query filters and shows raw data for vulnerable functions, which provides basic insight into specific vulnerabilities but lacks broader analytical value.
Top 100 Vulnerable Functions
Retrieves 100 samples of vulnerabilities from the training dataset, showing the CVE ID, CWE ID, and code changes before and after the vulnerability, which is a basic filtering of vulnerability data.