func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
void xt_compat_unlock(int af) { mutex_unlock(&xt[af].compat_mutex); }
0
[ "CWE-787" ]
linux
9fa492cdc160cd27ce1046cb36f47d3b2b1efa21
8,014,819,838,078,479,000,000,000,000,000,000,000
4
[NETFILTER]: x_tables: simplify compat API Split the xt_compat_match/xt_compat_target into smaller type-safe functions performing just one operation. Handle all alignment and size-related conversions centrally in these function instead of requiring each module to implement a full-blown conversion function. Replace ->compat callback by ->compat_from_user and ->compat_to_user callbacks, responsible for converting just a single private structure. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
_archive_read_next_header2(struct archive *_a, struct archive_entry *entry) { struct archive_read *a = (struct archive_read *)_a; int r1 = ARCHIVE_OK, r2; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_HEADER | ARCHIVE_STATE_DATA, "archive_read_next_header"); archive_entry_clear(entry); archive_clear_error(&a->archive); /* * If client didn't consume entire data, skip any remainder * (This is especially important for GNU incremental directories.) */ if (a->archive.state == ARCHIVE_STATE_DATA) { r1 = archive_read_data_skip(&a->archive); if (r1 == ARCHIVE_EOF) archive_set_error(&a->archive, EIO, "Premature end-of-file."); if (r1 == ARCHIVE_EOF || r1 == ARCHIVE_FATAL) { a->archive.state = ARCHIVE_STATE_FATAL; return (ARCHIVE_FATAL); } } /* Record start-of-header offset in uncompressed stream. */ a->header_position = a->filter->position; ++_a->file_count; r2 = (a->format->read_header)(a, entry); /* * EOF and FATAL are persistent at this layer. By * modifying the state, we guarantee that future calls to * read a header or read data will fail. */ switch (r2) { case ARCHIVE_EOF: a->archive.state = ARCHIVE_STATE_EOF; --_a->file_count;/* Revert a file counter. */ break; case ARCHIVE_OK: a->archive.state = ARCHIVE_STATE_DATA; break; case ARCHIVE_WARN: a->archive.state = ARCHIVE_STATE_DATA; break; case ARCHIVE_RETRY: break; case ARCHIVE_FATAL: a->archive.state = ARCHIVE_STATE_FATAL; break; } a->read_data_output_offset = 0; a->read_data_remaining = 0; a->read_data_is_posix_read = 0; a->read_data_requested = 0; a->data_start_node = a->client.cursor; /* EOF always wins; otherwise return the worst error. */ return (r2 < r1 || r2 == ARCHIVE_EOF) ? r2 : r1; }
0
[ "CWE-125" ]
libarchive
e6c9668f3202215ddb71617b41c19b6f05acf008
24,308,637,703,512,155,000,000,000,000,000,000,000
64
Add a check to archive_read_filter_consume to reject any attempts to move the file pointer by a negative amount. Note: Either this or commit 3865cf2 provides a fix for Issue 394.
int main( int argc, char **argv) /* pbs_track */ { int ArgIndex; int NumErrs = 0; char *Args[MAXARGS]; int aindex = 0; int rc; int pid; char tmpJobID[PBS_MAXCLTJOBID]; /* from the command line */ char JobID[PBS_MAXCLTJOBID]; /* modified job ID for MOM/server consumption */ char ServerName[MAXSERVERNAME]; int DoBackground = 0; tmpJobID[0] = '\0'; /* USAGE: pbs_track [-j <JOBID>] -- a.out arg1 arg2 ... argN */ #define GETOPT_ARGS "bj:" while ((ArgIndex = getopt(argc, argv, GETOPT_ARGS)) != EOF) { switch (ArgIndex) { case 'b': /* background process */ DoBackground = 1; break; case 'j': strncpy(tmpJobID, optarg, sizeof(tmpJobID)); if (tmpJobID[PBS_MAXCLTJOBID-1] != '\0') { /* truncation occurred! */ fprintf(stderr, "pbs_track: given job ID too large (> %d)\n", PBS_MAXCLTJOBID); exit(-1); } break; default: NumErrs++; break; } } if ((NumErrs > 0) || (optind >= argc) || (tmpJobID[0] == '\0')) { static char Usage[] = "USAGE: pbs_track [-j <JOBID>] [-b] -- a.out arg1 arg2 ... argN\n"; fprintf(stderr, "%s", Usage); exit(2); } if (getenv(NO_SERVER_SUFFIX) != NULL) { snprintf(JobID, sizeof(JobID), "%s", tmpJobID); } else { if (get_server(tmpJobID, JobID, sizeof(JobID), ServerName, sizeof(ServerName))) { fprintf(stderr, "pbs_track: illegally formed job identifier: '%s'\n", JobID); exit(1); } } /* gather a.out and other arguments */ aindex = 0; for (;optind < argc;optind++) { Args[aindex++] = strdup(argv[optind]); printf("Got arg: %s\n", Args[aindex-1]); } Args[aindex] = NULL; /* decide if we should fork or not */ pid = 1; if (DoBackground == 1) { printf("FORKING!\n"); pid = fork(); } if ((DoBackground == 0) || (pid == 0)) { /* either parent or child, depending on the setting */ /* call tm_adopt() to start tracking this process */ rc = tm_adopt(JobID, TM_ADOPT_JOBID, getpid()); switch (rc) { case TM_SUCCESS: /* success! */ break; case TM_ENOTFOUND: fprintf(stderr, "pbs_track: MOM could not find job %s\n", JobID); break; case TM_ESYSTEM: case TM_ENOTCONNECTED: fprintf(stderr, "pbs_track: error occurred while trying to communication with pbs_mom: %s (%d)\n", pbse_to_txt(rc), rc); break; default: /* Unexpected error occurred */ fprintf(stderr, "pbs_track: unexpected error %s (%d) occurred\n", pbse_to_txt(rc), rc); break; } /* END switch(rc) */ if (rc != TM_SUCCESS) { exit(-1); } /* do the exec */ if (execvp(Args[0], Args) == -1) { fprintf(stderr,"execvp failed with error %d, message:\n%s\n", errno, strerror(errno)); } } /* END if ((DoBackground == 0) || (pid == 0)) */ else if (pid > 0) { /* parent*/ fclose(stdin); fclose(stdout); fclose(stderr); } else if (pid < 0) { fprintf(stderr, "pbs_track: could not fork (%d:%s)\n", errno, strerror(errno)); } exit(0); } /* END main() */
1
[ "CWE-264" ]
torque
f2f4c950f3d461a249111c8826da3beaafccace9
263,352,489,502,947,600,000,000,000,000,000,000,000
187
TRQ-2885 - limit tm_adopt() to only adopt a session id that is owned by the calling user.
static BOOL rdp_print_input_capability_set(wStream* s, UINT16 length) { UINT16 inputFlags; UINT16 pad2OctetsA; UINT32 keyboardLayout; UINT32 keyboardType; UINT32 keyboardSubType; UINT32 keyboardFunctionKey; WLog_INFO(TAG, "InputCapabilitySet (length %" PRIu16 ")", length); if (length < 88) return FALSE; Stream_Read_UINT16(s, inputFlags); /* inputFlags (2 bytes) */ Stream_Read_UINT16(s, pad2OctetsA); /* pad2OctetsA (2 bytes) */ Stream_Read_UINT32(s, keyboardLayout); /* keyboardLayout (4 bytes) */ Stream_Read_UINT32(s, keyboardType); /* keyboardType (4 bytes) */ Stream_Read_UINT32(s, keyboardSubType); /* keyboardSubType (4 bytes) */ Stream_Read_UINT32(s, keyboardFunctionKey); /* keyboardFunctionKeys (4 bytes) */ Stream_Seek(s, 64); /* imeFileName (64 bytes) */ WLog_INFO(TAG, "\tinputFlags: 0x%04" PRIX16 "", inputFlags); WLog_INFO(TAG, "\tpad2OctetsA: 0x%04" PRIX16 "", pad2OctetsA); WLog_INFO(TAG, "\tkeyboardLayout: 0x%08" PRIX32 "", keyboardLayout); WLog_INFO(TAG, "\tkeyboardType: 0x%08" PRIX32 "", keyboardType); WLog_INFO(TAG, "\tkeyboardSubType: 0x%08" PRIX32 "", keyboardSubType); WLog_INFO(TAG, "\tkeyboardFunctionKey: 0x%08" PRIX32 "", keyboardFunctionKey); return TRUE; }
0
[ "CWE-119", "CWE-125" ]
FreeRDP
3627aaf7d289315b614a584afb388f04abfb5bbf
92,834,056,900,707,880,000,000,000,000,000,000,000
28
Fixed #6011: Bounds check in rdp_read_font_capability_set
int dsdb_find_dn_by_guid(struct ldb_context *ldb, TALLOC_CTX *mem_ctx, const struct GUID *guid, uint32_t dsdb_flags, struct ldb_dn **dn) { int ret; struct ldb_result *res; const char *attrs[] = { NULL }; char *guid_str = GUID_string(mem_ctx, guid); if (!guid_str) { return ldb_operr(ldb); } ret = dsdb_search(ldb, mem_ctx, &res, NULL, LDB_SCOPE_SUBTREE, attrs, DSDB_SEARCH_SEARCH_ALL_PARTITIONS | DSDB_SEARCH_SHOW_EXTENDED_DN | DSDB_SEARCH_ONE_ONLY | dsdb_flags, "objectGUID=%s", guid_str); talloc_free(guid_str); if (ret != LDB_SUCCESS) { return ret; } *dn = talloc_steal(mem_ctx, res->msgs[0]->dn); talloc_free(res); return LDB_SUCCESS; }
0
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
44,790,912,514,858,230,000,000,000,000,000,000,000
30
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <[email protected]>
xmlFileOpenW (const char *filename) { const char *path = NULL; FILE *fd; if (!strcmp(filename, "-")) { fd = stdout; return((void *) fd); } if (!xmlStrncasecmp(BAD_CAST filename, BAD_CAST "file://localhost/", 17)) #if defined (_WIN32) || defined (__DJGPP__) && !defined(__CYGWIN__) path = &filename[17]; #else path = &filename[16]; #endif else if (!xmlStrncasecmp(BAD_CAST filename, BAD_CAST "file:///", 8)) { #if defined (_WIN32) || defined (__DJGPP__) && !defined(__CYGWIN__) path = &filename[8]; #else path = &filename[7]; #endif } else path = filename; if (path == NULL) return(NULL); #if defined(_WIN32) || defined (__DJGPP__) && !defined (__CYGWIN__) fd = xmlWrapOpen(path, 1); #else fd = fopen(path, "wb"); #endif /* WIN32 */ if (fd == NULL) xmlIOErr(0, path); return((void *) fd); }
0
[ "CWE-134" ]
libxml2
4472c3a5a5b516aaf59b89be602fbce52756c3e9
324,199,731,747,646,060,000,000,000,000,000,000,000
36
Fix some format string warnings with possible format string vulnerability For https://bugzilla.gnome.org/show_bug.cgi?id=761029 Decorate every method in libxml2 with the appropriate LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups following the reports.
nfa_regatom(void) { int c; int charclass; int equiclass; int collclass; int got_coll_char; char_u *p; char_u *endp; char_u *old_regparse = regparse; int extra = 0; int emit_range; int negated; int result; int startc = -1; int save_prev_at_start = prev_at_start; c = getchr(); switch (c) { case NUL: EMSG_RET_FAIL(_(e_nul_found)); case Magic('^'): EMIT(NFA_BOL); break; case Magic('$'): EMIT(NFA_EOL); #if defined(FEAT_SYN_HL) || defined(PROTO) had_eol = TRUE; #endif break; case Magic('<'): EMIT(NFA_BOW); break; case Magic('>'): EMIT(NFA_EOW); break; case Magic('_'): c = no_Magic(getchr()); if (c == NUL) EMSG_RET_FAIL(_(e_nul_found)); if (c == '^') // "\_^" is start-of-line { EMIT(NFA_BOL); break; } if (c == '$') // "\_$" is end-of-line { EMIT(NFA_EOL); #if defined(FEAT_SYN_HL) || defined(PROTO) had_eol = TRUE; #endif break; } extra = NFA_ADD_NL; // "\_[" is collection plus newline if (c == '[') goto collection; // "\_x" is character class plus newline // FALLTHROUGH /* * Character classes. */ case Magic('.'): case Magic('i'): case Magic('I'): case Magic('k'): case Magic('K'): case Magic('f'): case Magic('F'): case Magic('p'): case Magic('P'): case Magic('s'): case Magic('S'): case Magic('d'): case Magic('D'): case Magic('x'): case Magic('X'): case Magic('o'): case Magic('O'): case Magic('w'): case Magic('W'): case Magic('h'): case Magic('H'): case Magic('a'): case Magic('A'): case Magic('l'): case Magic('L'): case Magic('u'): case Magic('U'): p = vim_strchr(classchars, no_Magic(c)); if (p == NULL) { if (extra == NFA_ADD_NL) { semsg(_(e_ill_char_class), c); rc_did_emsg = TRUE; return FAIL; } siemsg("INTERNAL: Unknown character class char: %d", c); return FAIL; } // When '.' is followed by a composing char ignore the dot, so that // the composing char is matched here. if (enc_utf8 && c == Magic('.') && utf_iscomposing(peekchr())) { old_regparse = regparse; c = getchr(); goto nfa_do_multibyte; } EMIT(nfa_classcodes[p - classchars]); if (extra == NFA_ADD_NL) { EMIT(NFA_NEWL); EMIT(NFA_OR); regflags |= RF_HASNL; } break; case Magic('n'): if (reg_string) // In a string "\n" matches a newline character. EMIT(NL); else { // In buffer text "\n" matches the end of a line. EMIT(NFA_NEWL); regflags |= RF_HASNL; } break; case Magic('('): if (nfa_reg(REG_PAREN) == FAIL) return FAIL; // cascaded error break; case Magic('|'): case Magic('&'): case Magic(')'): semsg(_(e_misplaced), no_Magic(c)); return FAIL; case Magic('='): case Magic('?'): case Magic('+'): case Magic('@'): case Magic('*'): case Magic('{'): // these should follow an atom, not form an atom semsg(_(e_misplaced), no_Magic(c)); return FAIL; case Magic('~'): { char_u *lp; // Previous substitute pattern. // Generated as "\%(pattern\)". if (reg_prev_sub == NULL) { emsg(_(e_no_previous_substitute_regular_expression)); return FAIL; } for (lp = reg_prev_sub; *lp != NUL; MB_CPTR_ADV(lp)) { EMIT(PTR2CHAR(lp)); if (lp != reg_prev_sub) EMIT(NFA_CONCAT); } EMIT(NFA_NOPEN); break; } case Magic('1'): case Magic('2'): case Magic('3'): case Magic('4'): case Magic('5'): case Magic('6'): case Magic('7'): case Magic('8'): case Magic('9'): { int refnum = no_Magic(c) - '1'; if (!seen_endbrace(refnum + 1)) return FAIL; EMIT(NFA_BACKREF1 + refnum); rex.nfa_has_backref = TRUE; } break; case Magic('z'): c = no_Magic(getchr()); switch (c) { case 's': EMIT(NFA_ZSTART); if (re_mult_next("\\zs") == FAIL) return FAIL; break; case 'e': EMIT(NFA_ZEND); rex.nfa_has_zend = TRUE; if (re_mult_next("\\ze") == FAIL) return FAIL; break; #ifdef FEAT_SYN_HL case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': // \z1...\z9 if ((reg_do_extmatch & REX_USE) == 0) EMSG_RET_FAIL(_(e_z1_not_allowed)); EMIT(NFA_ZREF1 + (no_Magic(c) - '1')); // No need to set rex.nfa_has_backref, the sub-matches don't // change when \z1 .. \z9 matches or not. re_has_z = REX_USE; break; case '(': // \z( if ((reg_do_extmatch & REX_SET) == 0) EMSG_RET_FAIL(_(e_z_not_allowed)); if (nfa_reg(REG_ZPAREN) == FAIL) return FAIL; // cascaded error re_has_z = REX_SET; break; #endif default: semsg(_("E867: (NFA) Unknown operator '\\z%c'"), no_Magic(c)); return FAIL; } break; case Magic('%'): c = no_Magic(getchr()); switch (c) { // () without a back reference case '(': if (nfa_reg(REG_NPAREN) == FAIL) return FAIL; EMIT(NFA_NOPEN); break; case 'd': // %d123 decimal case 'o': // %o123 octal case 'x': // %xab hex 2 case 'u': // %uabcd hex 4 case 'U': // %U1234abcd hex 8 { long nr; switch (c) { case 'd': nr = getdecchrs(); break; case 'o': nr = getoctchrs(); break; case 'x': nr = gethexchrs(2); break; case 'u': nr = gethexchrs(4); break; case 'U': nr = gethexchrs(8); break; default: nr = -1; break; } if (nr < 0 || nr > INT_MAX) EMSG2_RET_FAIL( _("E678: Invalid character after %s%%[dxouU]"), reg_magic == MAGIC_ALL); // A NUL is stored in the text as NL // TODO: what if a composing character follows? EMIT(nr == 0 ? 0x0a : nr); } break; // Catch \%^ and \%$ regardless of where they appear in the // pattern -- regardless of whether or not it makes sense. case '^': EMIT(NFA_BOF); break; case '$': EMIT(NFA_EOF); break; case '#': EMIT(NFA_CURSOR); break; case 'V': EMIT(NFA_VISUAL); break; case 'C': EMIT(NFA_ANY_COMPOSING); break; case '[': { int n; // \%[abc] for (n = 0; (c = peekchr()) != ']'; ++n) { if (c == NUL) EMSG2_RET_FAIL(_(e_missing_sb), reg_magic == MAGIC_ALL); // recursive call! if (nfa_regatom() == FAIL) return FAIL; } getchr(); // get the ] if (n == 0) EMSG2_RET_FAIL(_(e_empty_sb), reg_magic == MAGIC_ALL); EMIT(NFA_OPT_CHARS); EMIT(n); // Emit as "\%(\%[abc]\)" to be able to handle // "\%[abc]*" which would cause the empty string to be // matched an unlimited number of times. NFA_NOPEN is // added only once at a position, while NFA_SPLIT is // added multiple times. This is more efficient than // not allowing NFA_SPLIT multiple times, it is used // a lot. EMIT(NFA_NOPEN); break; } default: { long_u n = 0; int cmp = c; int cur = FALSE; if (c == '<' || c == '>') c = getchr(); if (no_Magic(c) == '.') { cur = TRUE; c = getchr(); } while (VIM_ISDIGIT(c)) { long_u tmp; if (cur) semsg(_(e_regexp_number_after_dot_pos_search), no_Magic(c)); tmp = n * 10 + (c - '0'); if (tmp < n) { // overflow. emsg(_(e_value_too_large)); return FAIL; } n = tmp; c = getchr(); } if (c == 'l' || c == 'c' || c == 'v') { long_u limit = INT_MAX; if (c == 'l') { if (cur) n = curwin->w_cursor.lnum; // \%{n}l \%{n}<l \%{n}>l EMIT(cmp == '<' ? NFA_LNUM_LT : cmp == '>' ? NFA_LNUM_GT : NFA_LNUM); if (save_prev_at_start) at_start = TRUE; } else if (c == 'c') { if (cur) { n = curwin->w_cursor.col; n++; } // \%{n}c \%{n}<c \%{n}>c EMIT(cmp == '<' ? NFA_COL_LT : cmp == '>' ? NFA_COL_GT : NFA_COL); } else { if (cur) { colnr_T vcol = 0; getvvcol(curwin, &curwin->w_cursor, NULL, NULL, &vcol); n = ++vcol; } // \%{n}v \%{n}<v \%{n}>v EMIT(cmp == '<' ? NFA_VCOL_LT : cmp == '>' ? NFA_VCOL_GT : NFA_VCOL); limit = INT_MAX / MB_MAXBYTES; } if (n >= limit) { emsg(_(e_value_too_large)); return FAIL; } EMIT((int)n); break; } else if (c == '\'' && n == 0) { // \%'m \%<'m \%>'m EMIT(cmp == '<' ? NFA_MARK_LT : cmp == '>' ? NFA_MARK_GT : NFA_MARK); EMIT(getchr()); break; } } semsg(_("E867: (NFA) Unknown operator '\\%%%c'"), no_Magic(c)); return FAIL; } break; case Magic('['): collection: /* * [abc] uses NFA_START_COLL - NFA_END_COLL * [^abc] uses NFA_START_NEG_COLL - NFA_END_NEG_COLL * Each character is produced as a regular state, using * NFA_CONCAT to bind them together. * Besides normal characters there can be: * - character classes NFA_CLASS_* * - ranges, two characters followed by NFA_RANGE. */ p = regparse; endp = skip_anyof(p); if (*endp == ']') { /* * Try to reverse engineer character classes. For example, * recognize that [0-9] stands for \d and [A-Za-z_] for \h, * and perform the necessary substitutions in the NFA. */ result = nfa_recognize_char_class(regparse, endp, extra == NFA_ADD_NL); if (result != FAIL) { if (result >= NFA_FIRST_NL && result <= NFA_LAST_NL) { EMIT(result - NFA_ADD_NL); EMIT(NFA_NEWL); EMIT(NFA_OR); } else EMIT(result); regparse = endp; MB_PTR_ADV(regparse); return OK; } /* * Failed to recognize a character class. Use the simple * version that turns [abc] into 'a' OR 'b' OR 'c' */ startc = -1; negated = FALSE; if (*regparse == '^') // negated range { negated = TRUE; MB_PTR_ADV(regparse); EMIT(NFA_START_NEG_COLL); } else EMIT(NFA_START_COLL); if (*regparse == '-') { startc = '-'; EMIT(startc); EMIT(NFA_CONCAT); MB_PTR_ADV(regparse); } // Emit the OR branches for each character in the [] emit_range = FALSE; while (regparse < endp) { int oldstartc = startc; startc = -1; got_coll_char = FALSE; if (*regparse == '[') { // Check for [: :], [= =], [. .] equiclass = collclass = 0; charclass = get_char_class(&regparse); if (charclass == CLASS_NONE) { equiclass = get_equi_class(&regparse); if (equiclass == 0) collclass = get_coll_element(&regparse); } // Character class like [:alpha:] if (charclass != CLASS_NONE) { switch (charclass) { case CLASS_ALNUM: EMIT(NFA_CLASS_ALNUM); break; case CLASS_ALPHA: EMIT(NFA_CLASS_ALPHA); break; case CLASS_BLANK: EMIT(NFA_CLASS_BLANK); break; case CLASS_CNTRL: EMIT(NFA_CLASS_CNTRL); break; case CLASS_DIGIT: EMIT(NFA_CLASS_DIGIT); break; case CLASS_GRAPH: EMIT(NFA_CLASS_GRAPH); break; case CLASS_LOWER: wants_nfa = TRUE; EMIT(NFA_CLASS_LOWER); break; case CLASS_PRINT: EMIT(NFA_CLASS_PRINT); break; case CLASS_PUNCT: EMIT(NFA_CLASS_PUNCT); break; case CLASS_SPACE: EMIT(NFA_CLASS_SPACE); break; case CLASS_UPPER: wants_nfa = TRUE; EMIT(NFA_CLASS_UPPER); break; case CLASS_XDIGIT: EMIT(NFA_CLASS_XDIGIT); break; case CLASS_TAB: EMIT(NFA_CLASS_TAB); break; case CLASS_RETURN: EMIT(NFA_CLASS_RETURN); break; case CLASS_BACKSPACE: EMIT(NFA_CLASS_BACKSPACE); break; case CLASS_ESCAPE: EMIT(NFA_CLASS_ESCAPE); break; case CLASS_IDENT: EMIT(NFA_CLASS_IDENT); break; case CLASS_KEYWORD: EMIT(NFA_CLASS_KEYWORD); break; case CLASS_FNAME: EMIT(NFA_CLASS_FNAME); break; } EMIT(NFA_CONCAT); continue; } // Try equivalence class [=a=] and the like if (equiclass != 0) { result = nfa_emit_equi_class(equiclass); if (result == FAIL) { // should never happen EMSG_RET_FAIL(_("E868: Error building NFA with equivalence class!")); } continue; } // Try collating class like [. .] if (collclass != 0) { startc = collclass; // allow [.a.]-x as a range // Will emit the proper atom at the end of the // while loop. } } // Try a range like 'a-x' or '\t-z'. Also allows '-' as a // start character. if (*regparse == '-' && oldstartc != -1) { emit_range = TRUE; startc = oldstartc; MB_PTR_ADV(regparse); continue; // reading the end of the range } // Now handle simple and escaped characters. // Only "\]", "\^", "\]" and "\\" are special in Vi. Vim // accepts "\t", "\e", etc., but only when the 'l' flag in // 'cpoptions' is not included. // Posix doesn't recognize backslash at all. if (*regparse == '\\' && !reg_cpo_bsl && regparse + 1 <= endp && (vim_strchr(REGEXP_INRANGE, regparse[1]) != NULL || (!reg_cpo_lit && vim_strchr(REGEXP_ABBR, regparse[1]) != NULL) ) ) { MB_PTR_ADV(regparse); if (*regparse == 'n') startc = (reg_string || emit_range || regparse[1] == '-') ? NL : NFA_NEWL; else if (*regparse == 'd' || *regparse == 'o' || *regparse == 'x' || *regparse == 'u' || *regparse == 'U' ) { // TODO(RE) This needs more testing startc = coll_get_char(); got_coll_char = TRUE; MB_PTR_BACK(old_regparse, regparse); } else { // \r,\t,\e,\b startc = backslash_trans(*regparse); } } // Normal printable char if (startc == -1) startc = PTR2CHAR(regparse); // Previous char was '-', so this char is end of range. if (emit_range) { int endc = startc; startc = oldstartc; if (startc > endc) EMSG_RET_FAIL(_(e_reverse_range)); if (endc > startc + 2) { // Emit a range instead of the sequence of // individual characters. if (startc == 0) // \x00 is translated to \x0a, start at \x01. EMIT(1); else --post_ptr; // remove NFA_CONCAT EMIT(endc); EMIT(NFA_RANGE); EMIT(NFA_CONCAT); } else if (has_mbyte && ((*mb_char2len)(startc) > 1 || (*mb_char2len)(endc) > 1)) { // Emit the characters in the range. // "startc" was already emitted, so skip it. // for (c = startc + 1; c <= endc; c++) { EMIT(c); EMIT(NFA_CONCAT); } } else { #ifdef EBCDIC int alpha_only = FALSE; // for alphabetical range skip the gaps // 'i'-'j', 'r'-'s', 'I'-'J' and 'R'-'S'. if (isalpha(startc) && isalpha(endc)) alpha_only = TRUE; #endif // Emit the range. "startc" was already emitted, so // skip it. for (c = startc + 1; c <= endc; c++) #ifdef EBCDIC if (!alpha_only || isalpha(startc)) #endif { EMIT(c); EMIT(NFA_CONCAT); } } emit_range = FALSE; startc = -1; } else { // This char (startc) is not part of a range. Just // emit it. // Normally, simply emit startc. But if we get char // code=0 from a collating char, then replace it with // 0x0a. // This is needed to completely mimic the behaviour of // the backtracking engine. if (startc == NFA_NEWL) { // Line break can't be matched as part of the // collection, add an OR below. But not for negated // range. if (!negated) extra = NFA_ADD_NL; } else { if (got_coll_char == TRUE && startc == 0) EMIT(0x0a); else EMIT(startc); EMIT(NFA_CONCAT); } } MB_PTR_ADV(regparse); } // while (p < endp) MB_PTR_BACK(old_regparse, regparse); if (*regparse == '-') // if last, '-' is just a char { EMIT('-'); EMIT(NFA_CONCAT); } // skip the trailing ] regparse = endp; MB_PTR_ADV(regparse); // Mark end of the collection. if (negated == TRUE) EMIT(NFA_END_NEG_COLL); else EMIT(NFA_END_COLL); // \_[] also matches \n but it's not negated if (extra == NFA_ADD_NL) { EMIT(reg_string ? NL : NFA_NEWL); EMIT(NFA_OR); } return OK; } // if exists closing ] if (reg_strict) EMSG_RET_FAIL(_(e_missingbracket)); // FALLTHROUGH default: { int plen; nfa_do_multibyte: // plen is length of current char with composing chars if (enc_utf8 && ((*mb_char2len)(c) != (plen = utfc_ptr2len(old_regparse)) || utf_iscomposing(c))) { int i = 0; // A base character plus composing characters, or just one // or more composing characters. // This requires creating a separate atom as if enclosing // the characters in (), where NFA_COMPOSING is the ( and // NFA_END_COMPOSING is the ). Note that right now we are // building the postfix form, not the NFA itself; // a composing char could be: a, b, c, NFA_COMPOSING // where 'b' and 'c' are chars with codes > 256. for (;;) { EMIT(c); if (i > 0) EMIT(NFA_CONCAT); if ((i += utf_char2len(c)) >= plen) break; c = utf_ptr2char(old_regparse + i); } EMIT(NFA_COMPOSING); regparse = old_regparse + plen; } else { c = no_Magic(c); EMIT(c); } return OK; } } return OK; }
0
[ "CWE-122" ]
vim
65b605665997fad54ef39a93199e305af2fe4d7f
185,667,819,130,638,200,000,000,000,000,000,000,000
819
patch 8.2.3409: reading beyond end of line with invalid utf-8 character Problem: Reading beyond end of line with invalid utf-8 character. Solution: Check for NUL when advancing.
\param dz Checked image depth. \param dc Checked image spectrum. **/ bool is_sameXYZC(const unsigned int dx, const unsigned int dy, const unsigned int dz, const unsigned int dc) const { bool res = true;
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
48,084,417,557,981,450,000,000,000,000,000,000,000
6
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
static MonoMethod* methodbuilder_to_mono_method (MonoClass *klass, MonoReflectionMethodBuilder* mb) { ReflectionMethodBuilder rmb; MonoMethodSignature *sig; mono_loader_lock (); sig = method_builder_to_signature (klass->image, mb); mono_loader_unlock (); reflection_methodbuilder_from_method_builder (&rmb, mb); mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig); mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs); /* If we are in a generic class, we might be called multiple times from inflate_method */ if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) { /* ilgen is no longer needed */ mb->ilgen = NULL; } return mb->mhandle;
0
[ "CWE-20" ]
mono
4905ef1130feb26c3150b28b97e4a96752e0d399
142,638,642,538,075,650,000,000,000,000,000,000,000
21
Handle invalid instantiation of generic methods. * verify.c: Add new function to internal verifier API to check method instantiations. * reflection.c (mono_reflection_bind_generic_method_parameters): Check the instantiation before returning it. Fixes #655847
int HttpDownstreamConnection::write_first() { int rv; process_blocked_request_buf(); if (conn_.tls.ssl) { rv = write_tls(); } else { rv = write_clear(); } if (rv != 0) { return SHRPX_ERR_RETRY; } if (conn_.tls.ssl) { on_write_ = &HttpDownstreamConnection::write_tls; } else { on_write_ = &HttpDownstreamConnection::write_clear; } first_write_done_ = true; downstream_->set_request_header_sent(true); auto buf = downstream_->get_blocked_request_buf(); buf->reset(); return 0; }
1
[]
nghttp2
319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c
132,537,534,933,043,510,000,000,000,000,000,000,000
29
nghttpx: Fix request stall Fix request stall if backend connection is reused and buffer is full.
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net *tgt_net = net; struct ifinfomsg *ifm; char ifname[IFNAMSIZ]; struct nlattr *tb[IFLA_MAX+1]; struct net_device *dev = NULL; struct sk_buff *nskb; int netnsid = -1; int err; u32 ext_filter_mask = 0; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, extack); if (err < 0) return err; if (tb[IFLA_IF_NETNSID]) { netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); tgt_net = get_target_net(skb, netnsid); if (IS_ERR(tgt_net)) return PTR_ERR(tgt_net); } if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); err = -EINVAL; ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) dev = __dev_get_by_index(tgt_net, ifm->ifi_index); else if (tb[IFLA_IFNAME]) dev = __dev_get_by_name(tgt_net, ifname); else goto out; err = -ENODEV; if (dev == NULL) goto out; err = -ENOBUFS; nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL); if (nskb == NULL) goto out; err = rtnl_fill_ifinfo(nskb, dev, net, RTM_NEWLINK, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0, NULL, netnsid); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size */ WARN_ON(err == -EMSGSIZE); kfree_skb(nskb); } else err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid); out: if (netnsid >= 0) put_net(tgt_net); return err; }
1
[ "CWE-476" ]
linux
f428fe4a04cc339166c8bbd489789760de3a0cee
30,732,359,302,526,444,000,000,000,000,000,000,000
65
rtnetlink: give a user socket to get_target_net() This function is used from two places: rtnl_dump_ifinfo and rtnl_getlink. In rtnl_getlink(), we give a request skb into get_target_net(), but in rtnl_dump_ifinfo, we give a response skb into get_target_net(). The problem here is that NETLINK_CB() isn't initialized for the response skb. In both cases we can get a user socket and give it instead of skb into get_target_net(). This bug was found by syzkaller with this call-trace: kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN Modules linked in: CPU: 1 PID: 3149 Comm: syzkaller140561 Not tainted 4.15.0-rc4-mm1+ #47 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:__netlink_ns_capable+0x8b/0x120 net/netlink/af_netlink.c:868 RSP: 0018:ffff8801c880f348 EFLAGS: 00010206 RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff8443f900 RDX: 000000000000007b RSI: ffffffff86510f40 RDI: 00000000000003d8 RBP: ffff8801c880f360 R08: 0000000000000000 R09: 1ffff10039101e4f R10: 0000000000000000 R11: 0000000000000001 R12: ffffffff86510f40 R13: 000000000000000c R14: 0000000000000004 R15: 0000000000000011 FS: 0000000001a1a880(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020151000 CR3: 00000001c9511005 CR4: 00000000001606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: netlink_ns_capable+0x26/0x30 net/netlink/af_netlink.c:886 get_target_net+0x9d/0x120 net/core/rtnetlink.c:1765 rtnl_dump_ifinfo+0x2e5/0xee0 net/core/rtnetlink.c:1806 netlink_dump+0x48c/0xce0 net/netlink/af_netlink.c:2222 __netlink_dump_start+0x4f0/0x6d0 net/netlink/af_netlink.c:2319 netlink_dump_start include/linux/netlink.h:214 [inline] rtnetlink_rcv_msg+0x7f0/0xb10 net/core/rtnetlink.c:4485 netlink_rcv_skb+0x21e/0x460 net/netlink/af_netlink.c:2441 rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:4540 netlink_unicast_kernel net/netlink/af_netlink.c:1308 [inline] netlink_unicast+0x4be/0x6a0 net/netlink/af_netlink.c:1334 netlink_sendmsg+0xa4a/0xe60 net/netlink/af_netlink.c:1897 Cc: Jiri Benc <[email protected]> Fixes: 79e1ad148c84 ("rtnetlink: use netnsid to query interface") Signed-off-by: Andrei Vagin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
httpGetBlocking(http_t *http) /* I - HTTP connection */ { return (http ? http->blocking : 0); }
0
[ "CWE-120" ]
cups
f24e6cf6a39300ad0c3726a41a4aab51ad54c109
290,780,879,972,422,800,000,000,000,000,000,000,000
4
Fix multiple security/disclosure issues: - CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251) - Fixed IPP buffer overflow (rdar://50035411) - Fixed memory disclosure issue in the scheduler (rdar://51373853) - Fixed DoS issues in the scheduler (rdar://51373929)
static BROTLI_INLINE BROTLI_BOOL BrotliSafeReadBits( BrotliBitReader* const br, uint32_t n_bits, uint32_t* val) { BROTLI_DCHECK(n_bits <= 24); while (BrotliGetAvailableBits(br) < n_bits) { if (!BrotliPullByte(br)) { return BROTLI_FALSE; } } BrotliTakeBits(br, n_bits, val); return BROTLI_TRUE; }
0
[ "CWE-120" ]
brotli
223d80cfbec8fd346e32906c732c8ede21f0cea6
107,959,705,521,532,300,000,000,000,000,000,000,000
11
Update (#826) * IMPORTANT: decoder: fix potential overflow when input chunk is >2GiB * simplify max Huffman table size calculation * eliminate symbol duplicates (static arrays in .h files) * minor combing in research/ code
free_urlpos (struct urlpos *l) { while (l) { struct urlpos *next = l->next; if (l->url) url_free (l->url); xfree_null (l->local_name); xfree (l); l = next; } }
0
[ "CWE-20" ]
wget
3e25a9817f47fbb8660cc6a3b2f3eea239526c6c
260,416,569,192,253,150,000,000,000,000,000,000,000
12
Introduce --trust-server-names. Close CVE-2010-2252.
static bool get_signature_algorithm( X509* certificate, std::string& signature_algorithm, SecurityException& exception) { bool returnedValue = false; BUF_MEM* ptr = nullptr; OPENSSL_CONST X509_ALGOR* sigalg = nullptr; OPENSSL_CONST ASN1_BIT_STRING* sig = nullptr; BIO* out = BIO_new(BIO_s_mem()); if (out != nullptr) { X509_get0_signature(&sig, &sigalg, certificate); if (sigalg != nullptr) { if (i2a_ASN1_OBJECT(out, sigalg->algorithm) > 0) { BIO_get_mem_ptr(out, &ptr); if (ptr != nullptr) { if (strncmp(ptr->data, "ecdsa-with-SHA256", ptr->length) == 0) { signature_algorithm = ECDSA_SHA256; returnedValue = true; } else if (strncmp(ptr->data, "sha256WithRSAEncryption", ptr->length) == 0) { signature_algorithm = RSA_SHA256; returnedValue = true; } else if (strncmp(ptr->data, "sha1WithRSAEncryption", ptr->length) == 0) { signature_algorithm = RSA_SHA256; returnedValue = true; } } else { exception = _SecurityException_("OpenSSL library cannot retrieve mem ptr"); } } } else { exception = _SecurityException_("OpenSSL library cannot write cert"); } BIO_free(out); } else { exception = _SecurityException_("OpenSSL library cannot allocate mem"); } return returnedValue; }
0
[ "CWE-284" ]
Fast-DDS
d2aeab37eb4fad4376b68ea4dfbbf285a2926384
44,879,511,822,319,805,000,000,000,000,000,000,000
60
check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <[email protected]> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <[email protected]> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <[email protected]> * refs 3680. uncrustify Signed-off-by: Iker Luengo <[email protected]> Co-authored-by: Miguel Company <[email protected]>
const am_cond_t *am_cond_substitue(request_rec *r, const am_cond_t *ce, const apr_array_header_t *backrefs) { am_cond_t *c; const char *instr = ce->str; apr_size_t inlen = strlen(instr); const char *outstr = ""; size_t last; size_t i; c = (am_cond_t *)apr_pmemdup(r->pool, ce, sizeof(*ce)); last = 0; for (i = strcspn(instr, "%"); i < inlen; i += strcspn(instr + i, "%")) { const char *fstr; const char *ns; const char *name; const char *value; apr_size_t flen; apr_size_t pad; apr_size_t nslen; /* * Make sure we got a % */ assert(instr[i] == '%'); /* * Copy the format string in fstr. It can be a single * digit (e.g.: %1) , or a curly-brace enclosed text * (e.g.: %{12}) */ fstr = instr + i + 1; if (*fstr == '{') { /* Curly-brace enclosed text */ pad = 3; /* 3 for %{} */ fstr++; flen = strcspn(fstr, "}"); /* If there is no closing }, we do not substitute */ if (fstr[flen] == '\0') { pad = 2; /* 2 for %{ */ i += flen + pad; break; } } else if (*fstr == '\0') { /* String ending by a % */ break; } else { /* Single digit */ pad = 1; /* 1 for % */ flen = 1; } /* * Try to extract a namespace (ns) and a name, e.g: %{ENV:foo} */ fstr = apr_pstrndup(r->pool, fstr, flen); if ((nslen = strcspn(fstr, ":")) != flen) { ns = apr_pstrndup(r->pool, fstr, nslen); name = fstr + nslen + 1; /* +1 for : */ } else { nslen = 0; ns = ""; name = fstr; } value = NULL; if ((*ns == '\0') && (strspn(fstr, "0123456789") == flen) && (backrefs != NULL)) { /* * If fstr has only digits, this is a regexp backreference */ int d = (int)apr_atoi64(fstr); if ((d >= 0) && (d < backrefs->nelts)) value = ((const char **)(backrefs->elts))[d]; } else if ((*ns == '\0') && (strcmp(fstr, "%") == 0)) { /* * %-escape */ value = fstr; } else if (strcmp(ns, "ENV") == 0) { /* * ENV namespace. Get value from apache environment. * This is akin to how Apache itself does it during expression evaluation. */ value = apr_table_get(r->subprocess_env, name); if (value == NULL) { value = apr_table_get(r->notes, name); } if (value == NULL) { value = getenv(name); } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "Resolving \"%s\" from ENV to \"%s\"", name, value == NULL ? "(nothing)" : value); } /* * If we did not find a value, substitue the * format string with an empty string. */ if (value == NULL) value = ""; /* * Concatenate the value with leading text, and * keep track * of the last location we copied in source string */ outstr = apr_pstrcat(r->pool, outstr, apr_pstrndup(r->pool, instr + last, i - last), value, NULL); last = i + flen + pad; /* * Move index to the end of the format string */ i += flen + pad; } /* * Copy text remaining after the last format string. */ outstr = apr_pstrcat(r->pool, outstr, apr_pstrndup(r->pool, instr + last, i - last), NULL); c->str = outstr; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "Directive %s, \"%s\" substituted into \"%s\"", ce->directive, instr, outstr); /* * If this was a regexp, recompile it. */ if (ce->flags & AM_COND_FLAG_REG) { int regex_flags = AP_REG_EXTENDED|AP_REG_NOSUB; if (ce->flags & AM_COND_FLAG_NC) regex_flags |= AP_REG_ICASE; c->regex = ap_pregcomp(r->pool, outstr, regex_flags); if (c->regex == NULL) { AM_LOG_RERROR(APLOG_MARK, APLOG_WARNING, 0, r, "Invalid regular expression \"%s\"", outstr); return ce; } } return (const am_cond_t *)c; }
0
[ "CWE-601" ]
mod_auth_mellon
42a11261b9dad2e48d70bdff7c53dd57a12db6f5
220,844,362,035,607,520,000,000,000,000,000,000,000
153
Prevent redirect to URLs that begin with '///' Visiting a logout URL like this: https://rp.example.co.jp/mellon/logout?ReturnTo=///fishing-site.example.com/logout.html would have redirected the user to fishing-site.example.com With the patch, this URL would be rejected. Fixes: CVE-2021-3639
BSONObj operand() { return fromjson("{'':undefined}"); }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
32,209,830,503,960,820,000,000,000,000,000,000,000
3
SERVER-38070 fix infinite loop in agg expression
onig_region_resize_clear(OnigRegion* region, int n) { int r; r = onig_region_resize(region, n); if (r != 0) return r; onig_region_clear(region); return 0; }
0
[ "CWE-125" ]
oniguruma
690313a061f7a4fa614ec5cc8368b4f2284e059b
42,833,890,890,585,870,000,000,000,000,000,000,000
9
fix #57 : DATA_ENSURE() check must be before data access
static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_request_info *info, struct iw_point *erq, char *keybuf) { struct orinoco_private *priv = ndev_priv(dev); int index = (erq->flags & IW_ENCODE_INDEX) - 1; int setindex = priv->tx_key; enum orinoco_alg encode_alg = priv->encode_alg; int restricted = priv->wep_restrict; int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; if (!priv->has_wep) return -EOPNOTSUPP; if (erq->pointer) { /* We actually have a key to set - check its length */ if (erq->length > LARGE_KEY_SIZE) return -E2BIG; if ((erq->length > SMALL_KEY_SIZE) && !priv->has_big_wep) return -E2BIG; } if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Clear any TKIP key we have */ if ((priv->has_wpa) && (priv->encode_alg == ORINOCO_ALG_TKIP)) (void) orinoco_clear_tkip_key(priv, setindex); if (erq->length > 0) { if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) index = priv->tx_key; /* Switch on WEP if off */ if (encode_alg != ORINOCO_ALG_WEP) { setindex = index; encode_alg = ORINOCO_ALG_WEP; } } else { /* Important note : if the user do "iwconfig eth0 enc off", * we will arrive there with an index of -1. This is valid * but need to be taken care off... Jean II */ if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) { if ((index != -1) || (erq->flags == 0)) { err = -EINVAL; goto out; } } else { /* Set the index : Check that the key is valid */ if (priv->keys[index].key_len == 0) { err = -EINVAL; goto out; } setindex = index; } } if (erq->flags & IW_ENCODE_DISABLED) encode_alg = ORINOCO_ALG_NONE; if (erq->flags & IW_ENCODE_OPEN) restricted = 0; if (erq->flags & IW_ENCODE_RESTRICTED) restricted = 1; if (erq->pointer && erq->length > 0) { err = orinoco_set_key(priv, index, ORINOCO_ALG_WEP, keybuf, erq->length, NULL, 0); } priv->tx_key = setindex; /* Try fast key change if connected and only keys are changed */ if ((priv->encode_alg == encode_alg) && (priv->wep_restrict == restricted) && netif_carrier_ok(dev)) { err = __orinoco_hw_setup_wepkeys(priv); /* No need to commit if successful */ goto out; } priv->encode_alg = encode_alg; priv->wep_restrict = restricted; out: orinoco_unlock(priv, &flags); return err; }
0
[]
linux
0a54917c3fc295cb61f3fb52373c173fd3b69f48
72,451,904,944,579,980,000,000,000,000,000,000,000
90
orinoco: fix TKIP countermeasure behaviour Enable the port when disabling countermeasures, and disable it on enabling countermeasures. This bug causes the response of the system to certain attacks to be ineffective. It also prevents wpa_supplicant from getting scan results, as wpa_supplicant disables countermeasures on startup - preventing the hardware from scanning. wpa_supplicant works with ap_mode=2 despite this bug because the commit handler re-enables the port. The log tends to look like: State: DISCONNECTED -> SCANNING Starting AP scan for wildcard SSID Scan requested (ret=0) - scan timeout 5 seconds EAPOL: disable timer tick EAPOL: Supplicant port status: Unauthorized Scan timeout - try to get results Failed to get scan results Failed to get scan results - try scanning again Setting scan request: 1 sec 0 usec Starting AP scan for wildcard SSID Scan requested (ret=-1) - scan timeout 5 seconds Failed to initiate AP scan. Reported by: Giacomo Comes <[email protected]> Signed-off by: David Kilroy <[email protected]> Cc: [email protected] Signed-off-by: John W. Linville <[email protected]>
static int arcmsr_bus_reset(struct scsi_cmnd *cmd) { struct AdapterControlBlock *acb; uint32_t intmask_org, outbound_doorbell; int retry_count = 0; int rtn = FAILED; acb = (struct AdapterControlBlock *) cmd->device->host->hostdata; printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts); acb->num_resets++; switch(acb->adapter_type){ case ACB_ADAPTER_TYPE_A:{ if (acb->acb_flags & ACB_F_BUS_RESET){ long timeout; printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n"); timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ); if (timeout) { return SUCCESS; } } acb->acb_flags |= ACB_F_BUS_RESET; if (!arcmsr_iop_reset(acb)) { struct MessageUnit_A __iomem *reg; reg = acb->pmuA; arcmsr_hardware_reset(acb); acb->acb_flags &= ~ACB_F_IOP_INITED; sleep_again: ssleep(ARCMSR_SLEEPTIME); if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count); if (retry_count > ARCMSR_RETRYCOUNT) { acb->fw_flag = FW_DEADLOCK; printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no); return FAILED; } retry_count++; goto sleep_again; } acb->acb_flags |= ACB_F_IOP_INITED; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_get_firmware_spec(acb); arcmsr_start_adapter_bgrb(acb); /* clear Qbuffer if door bell ringed */ outbound_doorbell = readl(&reg->outbound_doorbell); writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */ writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell); /* enable outbound Post Queue,outbound doorbell Interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); rtn = SUCCESS; } break; } case ACB_ADAPTER_TYPE_B:{ acb->acb_flags |= ACB_F_BUS_RESET; if (!arcmsr_iop_reset(acb)) { acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = FAILED; } else { acb->acb_flags &= ~ACB_F_BUS_RESET; atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); rtn = SUCCESS; } break; } case ACB_ADAPTER_TYPE_C:{ if (acb->acb_flags & ACB_F_BUS_RESET) { long timeout; printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n"); timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ); if (timeout) { return SUCCESS; } } acb->acb_flags |= ACB_F_BUS_RESET; if (!arcmsr_iop_reset(acb)) { struct MessageUnit_C __iomem *reg; reg = acb->pmuC; arcmsr_hardware_reset(acb); acb->acb_flags &= ~ACB_F_IOP_INITED; sleep: ssleep(ARCMSR_SLEEPTIME); if ((readl(&reg->host_diagnostic) & 0x04) != 0) { printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count); if (retry_count > ARCMSR_RETRYCOUNT) { acb->fw_flag = FW_DEADLOCK; printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no); return FAILED; } retry_count++; goto sleep; } acb->acb_flags |= ACB_F_IOP_INITED; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_get_firmware_spec(acb); arcmsr_start_adapter_bgrb(acb); /* clear Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); /* enable outbound Post Queue,outbound doorbell Interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); rtn = SUCCESS; } break; } case ACB_ADAPTER_TYPE_D: { if (acb->acb_flags & ACB_F_BUS_RESET) { long timeout; pr_notice("arcmsr: there is an bus reset" " eh proceeding.......\n"); timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220 * HZ); if (timeout) return SUCCESS; } acb->acb_flags |= ACB_F_BUS_RESET; if (!arcmsr_iop_reset(acb)) { struct MessageUnit_D *reg; reg = acb->pmuD; arcmsr_hardware_reset(acb); acb->acb_flags &= ~ACB_F_IOP_INITED; nap: ssleep(ARCMSR_SLEEPTIME); if ((readl(reg->sample_at_reset) & 0x80) != 0) { pr_err("arcmsr%d: waiting for " "hw bus reset return, retry=%d\n", acb->host->host_no, retry_count); if (retry_count > ARCMSR_RETRYCOUNT) { acb->fw_flag = FW_DEADLOCK; pr_err("arcmsr%d: waiting for hw bus" " reset return, " "RETRY TERMINATED!!\n", acb->host->host_no); return FAILED; } retry_count++; goto nap; } acb->acb_flags |= ACB_F_IOP_INITED; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_get_firmware_spec(acb); arcmsr_start_adapter_bgrb(acb); arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_outbound_ints(acb, intmask_org); atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; pr_err("arcmsr: scsi bus reset " "eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); rtn = SUCCESS; } break; } } return rtn; }
0
[ "CWE-119", "CWE-787" ]
linux
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
114,963,677,968,249,830,000,000,000,000,000,000,000
197
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer() We need to put an upper bound on "user_len" so the memcpy() doesn't overflow. Cc: <[email protected]> Reported-by: Marco Grassi <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Reviewed-by: Tomas Henzl <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
X509_REQ *X509_to_X509_REQ(X509 *x, EVP_PKEY *pkey, const EVP_MD *md) { X509_REQ *ret; X509_REQ_INFO *ri; int i; EVP_PKEY *pktmp; ret = X509_REQ_new(); if (ret == NULL) { X509err(X509_F_X509_TO_X509_REQ, ERR_R_MALLOC_FAILURE); goto err; } ri = ret->req_info; ri->version->length = 1; ri->version->data = (unsigned char *)OPENSSL_malloc(1); if (ri->version->data == NULL) goto err; ri->version->data[0] = 0; /* version == 0 */ if (!X509_REQ_set_subject_name(ret, X509_get_subject_name(x))) goto err; pktmp = X509_get_pubkey(x); i = X509_REQ_set_pubkey(ret, pktmp); EVP_PKEY_free(pktmp); if (!i) goto err; if (pkey != NULL) { if (!X509_REQ_sign(ret, pkey, md)) goto err; } return (ret); err: X509_REQ_free(ret); return (NULL); }
1
[]
openssl
28a00bcd8e318da18031b2ac8778c64147cd54f9
219,055,093,684,261,560,000,000,000,000,000,000,000
39
Check public key is not NULL. CVE-2015-0288 PR#3708 Reviewed-by: Matt Caswell <[email protected]>
void ConnectionManagerImpl::ActiveStream::addStreamDecoderFilterWorker( StreamDecoderFilterSharedPtr filter, bool dual_filter) { ActiveStreamDecoderFilterPtr wrapper(new ActiveStreamDecoderFilter(*this, filter, dual_filter)); filter->setDecoderFilterCallbacks(*wrapper); wrapper->moveIntoListBack(std::move(wrapper), decoder_filters_); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
312,868,169,504,586,450,000,000,000,000,000,000,000
6
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
static int ZEND_FASTCALL ZEND_FETCH_IS_SPEC_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { return zend_fetch_var_address_helper_SPEC_CV(BP_VAR_IS, ZEND_OPCODE_HANDLER_ARGS_PASSTHRU); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
321,506,710,166,664,600,000,000,000,000,000,000,000
4
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
MODRET set_useftpusers(cmd_rec *cmd) { int bool = -1; config_rec *c = NULL; CHECK_ARGS(cmd, 1); CHECK_CONF(cmd, CONF_ROOT|CONF_VIRTUAL|CONF_GLOBAL|CONF_ANON); bool = get_boolean(cmd, 1); if (bool == -1) CONF_ERROR(cmd, "expected Boolean parameter"); c = add_config_param(cmd->argv[0], 1, NULL); c->argv[0] = pcalloc(c->pool, sizeof(unsigned char)); *((unsigned char *) c->argv[0]) = bool; c->flags |= CF_MERGEDOWN; return PR_HANDLED(cmd); }
0
[ "CWE-59", "CWE-61" ]
proftpd
ecff21e0d0e84f35c299ef91d7fda088e516d4ed
302,067,601,727,691,070,000,000,000,000,000,000,000
18
Backporting recursive handling of DefaultRoot path, when AllowChrootSymlinks is off, to 1.3.5 branch.
uint32_t writeI32(const int32_t i32) { T_VIRTUAL_CALL(); return writeI32_virt(i32); }
0
[ "CWE-20" ]
thrift
cfaadcc4adcfde2a8232c62ec89870b73ef40df1
338,240,567,823,013,660,000,000,000,000,000,000,000
4
THRIFT-3231 CPP: Limit recursion depth to 64 Client: cpp Patch: Ben Craig <[email protected]>
void MSG_WriteDeltaEntity( msg_t *msg, struct entityState_s *from, struct entityState_s *to, qboolean force ) { int i, lc; int numFields; netField_t *field; int trunc; float fullFloat; int *fromF, *toF; numFields = ARRAY_LEN( entityStateFields ); // all fields should be 32 bits to avoid any compiler packing issues // the "number" field is not part of the field list // if this assert fails, someone added a field to the entityState_t // struct without updating the message fields assert( numFields + 1 == sizeof( *from )/4 ); // a NULL to is a delta remove message if ( to == NULL ) { if ( from == NULL ) { return; } MSG_WriteBits( msg, from->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 1, 1 ); return; } if ( to->number < 0 || to->number >= MAX_GENTITIES ) { Com_Error (ERR_FATAL, "MSG_WriteDeltaEntity: Bad entity number: %i", to->number ); } lc = 0; // build the change vector as bytes so it is endien independent for ( i = 0, field = entityStateFields ; i < numFields ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF != *toF ) { lc = i+1; } } if ( lc == 0 ) { // nothing at all changed if ( !force ) { return; // nothing at all } // write two bits for no change MSG_WriteBits( msg, to->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 0, 1 ); // not removed MSG_WriteBits( msg, 0, 1 ); // no delta return; } MSG_WriteBits( msg, to->number, GENTITYNUM_BITS ); MSG_WriteBits( msg, 0, 1 ); // not removed MSG_WriteBits( msg, 1, 1 ); // we have a delta MSG_WriteByte( msg, lc ); // # of changes oldsize += numFields; for ( i = 0, field = entityStateFields ; i < lc ; i++, field++ ) { fromF = (int *)( (byte *)from + field->offset ); toF = (int *)( (byte *)to + field->offset ); if ( *fromF == *toF ) { MSG_WriteBits( msg, 0, 1 ); // no change continue; } MSG_WriteBits( msg, 1, 1 ); // changed if ( field->bits == 0 ) { // float fullFloat = *(float *)toF; trunc = (int)fullFloat; if (fullFloat == 0.0f) { MSG_WriteBits( msg, 0, 1 ); oldsize += FLOAT_INT_BITS; } else { MSG_WriteBits( msg, 1, 1 ); if ( trunc == fullFloat && trunc + FLOAT_INT_BIAS >= 0 && trunc + FLOAT_INT_BIAS < ( 1 << FLOAT_INT_BITS ) ) { // send as small integer MSG_WriteBits( msg, 0, 1 ); MSG_WriteBits( msg, trunc + FLOAT_INT_BIAS, FLOAT_INT_BITS ); } else { // send as full floating point value MSG_WriteBits( msg, 1, 1 ); MSG_WriteBits( msg, *toF, 32 ); } } } else { if (*toF == 0) { MSG_WriteBits( msg, 0, 1 ); } else { MSG_WriteBits( msg, 1, 1 ); // integer MSG_WriteBits( msg, *toF, field->bits ); } } } }
0
[ "CWE-119" ]
ioq3
d2b1d124d4055c2fcbe5126863487c52fd58cca1
216,960,766,804,027,840,000,000,000,000,000,000,000
104
Fix/improve buffer overflow in MSG_ReadBits/MSG_WriteBits Prevent reading past end of message in MSG_ReadBits. If read past end of msg->data buffer (16348 bytes) the engine could SEGFAULT. Make MSG_WriteBits use an exact buffer overflow check instead of possibly failing with a few bytes left.
void kernel_bad_stack(struct pt_regs *regs) { printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", regs->gpr[1], regs->nip); die("Bad kernel stack pointer", regs, SIGABRT); }
0
[]
linux
5d176f751ee3c6eededd984ad409bff201f436a7
272,280,689,840,778,900,000,000,000,000,000,000,000
6
powerpc: tm: Enable transactional memory (TM) lazily for userspace Currently the MSR TM bit is always set if the hardware is TM capable. This adds extra overhead as it means the TM SPRS (TFHAR, TEXASR and TFAIR) must be swapped for each process regardless of if they use TM. For processes that don't use TM the TM MSR bit can be turned off allowing the kernel to avoid the expensive swap of the TM registers. A TM unavailable exception will occur if a thread does use TM and the kernel will enable MSR_TM and leave it so for some time afterwards. Signed-off-by: Cyril Bur <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
ConnStateData::startPinnedConnectionMonitoring() { if (pinning.readHandler != NULL) return; // already monitoring typedef CommCbMemFunT<ConnStateData, CommIoCbParams> Dialer; pinning.readHandler = JobCallback(33, 3, Dialer, this, ConnStateData::clientPinnedConnectionRead); Comm::Read(pinning.serverConnection, pinning.readHandler); }
0
[ "CWE-444" ]
squid
fd68382860633aca92065e6c343cfd1b12b126e7
249,077,728,716,329,100,000,000,000,000,000,000,000
10
Improve Transfer-Encoding handling (#702) Reject messages containing Transfer-Encoding header with coding other than chunked or identity. Squid does not support other codings. For simplicity and security sake, also reject messages where Transfer-Encoding contains unnecessary complex values that are technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or "identity, chunked"). RFC 7230 formally deprecated and removed identity coding, but it is still used by some agents.
static int do_decode_custom(const TEST_CUSTOM_DATA *custom_data, const EXPECTED *expected, size_t expected_size, const TEST_PACKAGE *package) { unsigned char *encoding = NULL; /* * We force the defaults to be explicitly encoded to make sure we test * for defaults that shouldn't be present (i.e. we check for failure) */ size_t encoding_length = make_custom_der(custom_data, &encoding, 1); int ret; if (encoding_length == 0) return -1; ret = do_decode(encoding, encoding_length, expected, expected_size, package); OPENSSL_free(encoding); return ret; }
0
[ "CWE-476" ]
openssl
22b88fc9c0e22545401c0b34d24843883ea73fec
10,619,689,933,855,432,000,000,000,000,000,000,000
21
Add a test for encoding/decoding using an invalid ASN.1 Template If you have a CHOICE type that it must use explicit tagging - otherwise the template is invalid. We add tests for this. Reviewed-by: Tomas Mraz <[email protected]>
static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) { struct usb_device *udev = sdev->udev; struct usb_host_endpoint *ep; struct usb_endpoint_descriptor *epd = NULL; int epnum = pdu->base.ep; int dir = pdu->base.direction; if (epnum < 0 || epnum > 15) goto err_ret; if (dir == USBIP_DIR_IN) ep = udev->ep_in[epnum & 0x7f]; else ep = udev->ep_out[epnum & 0x7f]; if (!ep) goto err_ret; epd = &ep->desc; /* validate transfer_buffer_length */ if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) { dev_err(&sdev->udev->dev, "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n", pdu->u.cmd_submit.transfer_buffer_length); return -1; } if (usb_endpoint_xfer_control(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndctrlpipe(udev, epnum); else return usb_rcvctrlpipe(udev, epnum); } if (usb_endpoint_xfer_bulk(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndbulkpipe(udev, epnum); else return usb_rcvbulkpipe(udev, epnum); } if (usb_endpoint_xfer_int(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndintpipe(udev, epnum); else return usb_rcvintpipe(udev, epnum); } if (usb_endpoint_xfer_isoc(epd)) { /* validate packet size and number of packets */ unsigned int maxp, packets, bytes; maxp = usb_endpoint_maxp(epd); maxp *= usb_endpoint_maxp_mult(epd); bytes = pdu->u.cmd_submit.transfer_buffer_length; packets = DIV_ROUND_UP(bytes, maxp); if (pdu->u.cmd_submit.number_of_packets < 0 || pdu->u.cmd_submit.number_of_packets > packets) { dev_err(&sdev->udev->dev, "CMD_SUBMIT: isoc invalid num packets %d\n", pdu->u.cmd_submit.number_of_packets); return -1; } if (dir == USBIP_DIR_OUT) return usb_sndisocpipe(udev, epnum); else return usb_rcvisocpipe(udev, epnum); } err_ret: /* NOT REACHED */ dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); return -1; }
0
[ "CWE-119" ]
linux
c6688ef9f29762e65bce325ef4acd6c675806366
288,864,211,069,491,100,000,000,000,000,000,000,000
76
usbip: fix stub_rx: harden CMD_SUBMIT path to handle malicious input Harden CMD_SUBMIT path to handle malicious input that could trigger large memory allocations. Add checks to validate transfer_buffer_length and number_of_packets to protect against bad input requesting for unbounded memory allocations. Validate early in get_pipe() and return failure. Reported-by: Secunia Research <[email protected]> Cc: stable <[email protected]> Signed-off-by: Shuah Khan <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
uint32_t Scanner::lex_cls_chr() { tok = cur; const loc_t &loc = cur_loc(); #line 798 "../src/parse/lex.re" if (globopts->input_encoding == Enc::ASCII) { #line 3869 "src/parse/lex.cc" { unsigned char yych; unsigned int yyaccept = 0; if ((lim - cur) < 10) { if (!fill(10)) { error("unexpected end of input"); exit(1); } } yych = (unsigned char)*cur; if (yych <= '\f') { if (yych <= 0x00) goto yy587; if (yych == '\n') goto yy591; goto yy589; } else { if (yych <= '\r') goto yy593; if (yych == '\\') goto yy594; goto yy589; } yy587: ++cur; #line 779 "../src/parse/lex.re" { fail_if_eof(); return 0; } #line 3888 "src/parse/lex.cc" yy589: ++cur; yy590: #line 781 "../src/parse/lex.re" { return decode(tok); } #line 3894 "src/parse/lex.cc" yy591: ++cur; #line 773 "../src/parse/lex.re" { msg.error(loc, "newline in character class"); exit(1); } #line 3899 "src/parse/lex.cc" yy593: yych = (unsigned char)*++cur; if (yych == '\n') goto yy591; goto yy590; yy594: yych = (unsigned char)*++cur; if (yych <= '\\') { if (yych <= '/') { if (yych <= '\f') { if (yych <= 0x00) goto yy595; if (yych == '\n') goto yy591; goto yy596; } else { if (yych <= '\r') goto yy598; if (yych == '-') goto yy599; goto yy596; } } else { if (yych <= 'U') { if (yych <= '3') goto yy601; if (yych <= '7') goto yy603; if (yych <= 'T') goto yy596; goto yy604; } else { if (yych == 'X') goto yy606; if (yych <= '[') goto yy596; goto yy607; } } } else { if (yych <= 'n') { if (yych <= 'b') { if (yych <= ']') goto yy609; if (yych <= '`') goto yy596; if (yych <= 'a') goto yy611; goto yy613; } else { if (yych == 'f') goto yy615; if (yych <= 'm') goto yy596; goto yy617; } } else { if (yych <= 't') { if (yych == 'r') goto yy619; if (yych <= 's') goto yy596; goto yy621; } else { if (yych <= 'v') { if (yych <= 'u') goto yy606; goto yy623; } else { if (yych == 'x') goto yy625; goto yy596; } } } } yy595: #line 776 "../src/parse/lex.re" { msg.error(loc, "syntax error in escape sequence"); exit(1); } #line 3960 "src/parse/lex.cc" yy596: ++cur; yy597: #line 794 "../src/parse/lex.re" { msg.warn.useless_escape(loc, tok, cur); return decode(tok + 1); } #line 3969 "src/parse/lex.cc" yy598: yych = (unsigned char)*++cur; if (yych == '\n') goto yy591; goto yy597; yy599: ++cur; #line 792 "../src/parse/lex.re" { return static_cast<uint8_t>('-'); } #line 3978 "src/parse/lex.cc" yy601: yyaccept = 0; yych = (unsigned char)*(mar = ++cur); if (yych <= '/') goto yy602; if (yych <= '7') goto yy626; yy602: #line 775 "../src/parse/lex.re" { msg.error(loc, "syntax error in octal escape sequence"); exit(1); } #line 3987 "src/parse/lex.cc" yy603: ++cur; goto yy602; yy604: yyaccept = 1; yych = (unsigned char)*(mar = ++cur); if (yych <= '@') { if (yych <= '/') goto yy605; if (yych <= '9') goto yy628; } else { if (yych <= 'F') goto yy628; if (yych <= '`') goto yy605; if (yych <= 'f') goto yy628; } yy605: #line 774 "../src/parse/lex.re" { msg.error(loc, "syntax error in hexadecimal escape sequence"); exit(1); } #line 4005 "src/parse/lex.cc" yy606: yyaccept = 1; yych = (unsigned char)*(mar = ++cur); if (yych <= '@') { if (yych <= '/') goto yy605; if (yych <= '9') goto yy629; goto yy605; } else { if (yych <= 'F') goto yy629; if (yych <= '`') goto yy605; if (yych <= 'f') goto yy629; goto yy605; } yy607: ++cur; #line 791 "../src/parse/lex.re" { return static_cast<uint8_t>('\\'); } #line 4023 "src/parse/lex.cc" yy609: ++cur; #line 793 "../src/parse/lex.re" { return static_cast<uint8_t>(']'); } #line 4028 "src/parse/lex.cc" yy611: ++cur; #line 784 "../src/parse/lex.re" { return static_cast<uint8_t>('\a'); } #line 4033 "src/parse/lex.cc" yy613: ++cur; #line 785 "../src/parse/lex.re" { return static_cast<uint8_t>('\b'); } #line 4038 "src/parse/lex.cc" yy615: ++cur; #line 786 "../src/parse/lex.re" { return static_cast<uint8_t>('\f'); } #line 4043 "src/parse/lex.cc" yy617: ++cur; #line 787 "../src/parse/lex.re" { return static_cast<uint8_t>('\n'); } #line 4048 "src/parse/lex.cc" yy619: ++cur; #line 788 "../src/parse/lex.re" { return static_cast<uint8_t>('\r'); } #line 4053 "src/parse/lex.cc" yy621: ++cur; #line 789 "../src/parse/lex.re" { return static_cast<uint8_t>('\t'); } #line 4058 "src/parse/lex.cc" yy623: ++cur; #line 790 "../src/parse/lex.re" { return static_cast<uint8_t>('\v'); } #line 4063 "src/parse/lex.cc" yy625: yyaccept = 1; yych = (unsigned char)*(mar = ++cur); if (yych <= '@') { if (yych <= '/') goto yy605; if (yych <= '9') goto yy630; goto yy605; } else { if (yych <= 'F') goto yy630; if (yych <= '`') goto yy605; if (yych <= 'f') goto yy630; goto yy605; } yy626: yych = (unsigned char)*++cur; if (yych <= '/') goto yy627; if (yych <= '7') goto yy631; yy627: cur = mar; if (yyaccept == 0) { goto yy602; } else { goto yy605; } yy628: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy627; if (yych <= '9') goto yy633; goto yy627; } else { if (yych <= 'F') goto yy633; if (yych <= '`') goto yy627; if (yych <= 'f') goto yy633; goto yy627; } yy629: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy627; if (yych <= '9') goto yy634; goto yy627; } else { if (yych <= 'F') goto yy634; if (yych <= '`') goto yy627; if (yych <= 'f') goto yy634; goto yy627; } yy630: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy627; if (yych <= '9') goto yy635; goto yy627; } else { if (yych <= 'F') goto yy635; if (yych <= '`') goto yy627; if (yych <= 'f') goto yy635; goto yy627; } yy631: ++cur; #line 783 "../src/parse/lex.re" { return unesc_oct(tok, cur); } #line 4128 "src/parse/lex.cc" yy633: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy627; if (yych <= '9') goto yy637; goto yy627; } else { if (yych <= 'F') goto yy637; if (yych <= '`') goto yy627; if (yych <= 'f') goto yy637; goto yy627; } yy634: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy627; if (yych <= '9') goto yy630; goto yy627; } else { if (yych <= 'F') goto yy630; if (yych <= '`') goto yy627; if (yych <= 'f') goto yy630; goto yy627; } yy635: ++cur; #line 782 "../src/parse/lex.re" { return unesc_hex(tok, cur); } #line 4157 "src/parse/lex.cc" yy637: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy627; if (yych >= ':') goto yy627; } else { if (yych <= 'F') goto yy638; if (yych <= '`') goto yy627; if (yych >= 'g') goto yy627; } yy638: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy627; if (yych <= '9') goto yy629; goto yy627; } else { if (yych <= 'F') goto yy629; if (yych <= '`') goto yy627; if (yych <= 'f') goto yy629; goto yy627; } } #line 800 "../src/parse/lex.re" } else { #line 4186 "src/parse/lex.cc" { unsigned char yych; unsigned int yyaccept = 0; if ((lim - cur) < 10) { if (!fill(10)) { error("unexpected end of input"); exit(1); } } yych = (unsigned char)*cur; if (yych <= 0x7F) { if (yych <= '\f') { if (yych <= 0x00) goto yy641; if (yych == '\n') goto yy645; goto yy643; } else { if (yych <= '\r') goto yy647; if (yych == '\\') goto yy648; goto yy643; } } else { if (yych <= 0xEF) { if (yych <= 0xC1) goto yy650; if (yych <= 0xDF) goto yy652; if (yych <= 0xE0) goto yy653; goto yy654; } else { if (yych <= 0xF0) goto yy655; if (yych <= 0xF3) goto yy656; if (yych <= 0xF4) goto yy657; goto yy650; } } yy641: ++cur; #line 779 "../src/parse/lex.re" { fail_if_eof(); return 0; } #line 4219 "src/parse/lex.cc" yy643: ++cur; yy644: #line 781 "../src/parse/lex.re" { return decode(tok); } #line 4225 "src/parse/lex.cc" yy645: ++cur; #line 773 "../src/parse/lex.re" { msg.error(loc, "newline in character class"); exit(1); } #line 4230 "src/parse/lex.cc" yy647: yych = (unsigned char)*++cur; if (yych == '\n') goto yy645; goto yy644; yy648: yyaccept = 0; yych = (unsigned char)*(mar = ++cur); if (yych <= 'b') { if (yych <= '7') { if (yych <= '\r') { if (yych <= '\t') { if (yych >= 0x01) goto yy658; } else { if (yych <= '\n') goto yy645; if (yych <= '\f') goto yy658; goto yy660; } } else { if (yych <= '-') { if (yych <= ',') goto yy658; goto yy661; } else { if (yych <= '/') goto yy658; if (yych <= '3') goto yy663; goto yy665; } } } else { if (yych <= '[') { if (yych <= 'U') { if (yych <= 'T') goto yy658; goto yy666; } else { if (yych == 'X') goto yy668; goto yy658; } } else { if (yych <= ']') { if (yych <= '\\') goto yy669; goto yy671; } else { if (yych <= '`') goto yy658; if (yych <= 'a') goto yy673; goto yy675; } } } } else { if (yych <= 'v') { if (yych <= 'q') { if (yych <= 'f') { if (yych <= 'e') goto yy658; goto yy677; } else { if (yych == 'n') goto yy679; goto yy658; } } else { if (yych <= 's') { if (yych <= 'r') goto yy681; goto yy658; } else { if (yych <= 't') goto yy683; if (yych <= 'u') goto yy668; goto yy685; } } } else { if (yych <= 0xDF) { if (yych <= 'x') { if (yych <= 'w') goto yy658; goto yy687; } else { if (yych <= 0x7F) goto yy658; if (yych >= 0xC2) goto yy688; } } else { if (yych <= 0xF0) { if (yych <= 0xE0) goto yy690; if (yych <= 0xEF) goto yy691; goto yy692; } else { if (yych <= 0xF3) goto yy693; if (yych <= 0xF4) goto yy694; } } } } yy649: #line 776 "../src/parse/lex.re" { msg.error(loc, "syntax error in escape sequence"); exit(1); } #line 4322 "src/parse/lex.cc" yy650: ++cur; yy651: #line 777 "../src/parse/lex.re" { msg.error(loc, "syntax error"); exit(1); } #line 4328 "src/parse/lex.cc" yy652: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy651; if (yych <= 0xBF) goto yy643; goto yy651; yy653: yyaccept = 1; yych = (unsigned char)*(mar = ++cur); if (yych <= 0x9F) goto yy651; if (yych <= 0xBF) goto yy695; goto yy651; yy654: yyaccept = 1; yych = (unsigned char)*(mar = ++cur); if (yych <= 0x7F) goto yy651; if (yych <= 0xBF) goto yy695; goto yy651; yy655: yyaccept = 1; yych = (unsigned char)*(mar = ++cur); if (yych <= 0x8F) goto yy651; if (yych <= 0xBF) goto yy696; goto yy651; yy656: yyaccept = 1; yych = (unsigned char)*(mar = ++cur); if (yych <= 0x7F) goto yy651; if (yych <= 0xBF) goto yy696; goto yy651; yy657: yyaccept = 1; yych = (unsigned char)*(mar = ++cur); if (yych <= 0x7F) goto yy651; if (yych <= 0x8F) goto yy696; goto yy651; yy658: ++cur; yy659: #line 794 "../src/parse/lex.re" { msg.warn.useless_escape(loc, tok, cur); return decode(tok + 1); } #line 4372 "src/parse/lex.cc" yy660: yych = (unsigned char)*++cur; if (yych == '\n') goto yy645; goto yy659; yy661: ++cur; #line 792 "../src/parse/lex.re" { return static_cast<uint8_t>('-'); } #line 4381 "src/parse/lex.cc" yy663: yyaccept = 2; yych = (unsigned char)*(mar = ++cur); if (yych <= '/') goto yy664; if (yych <= '7') goto yy697; yy664: #line 775 "../src/parse/lex.re" { msg.error(loc, "syntax error in octal escape sequence"); exit(1); } #line 4390 "src/parse/lex.cc" yy665: ++cur; goto yy664; yy666: yyaccept = 3; yych = (unsigned char)*(mar = ++cur); if (yych <= '@') { if (yych <= '/') goto yy667; if (yych <= '9') goto yy698; } else { if (yych <= 'F') goto yy698; if (yych <= '`') goto yy667; if (yych <= 'f') goto yy698; } yy667: #line 774 "../src/parse/lex.re" { msg.error(loc, "syntax error in hexadecimal escape sequence"); exit(1); } #line 4408 "src/parse/lex.cc" yy668: yyaccept = 3; yych = (unsigned char)*(mar = ++cur); if (yych <= '@') { if (yych <= '/') goto yy667; if (yych <= '9') goto yy699; goto yy667; } else { if (yych <= 'F') goto yy699; if (yych <= '`') goto yy667; if (yych <= 'f') goto yy699; goto yy667; } yy669: ++cur; #line 791 "../src/parse/lex.re" { return static_cast<uint8_t>('\\'); } #line 4426 "src/parse/lex.cc" yy671: ++cur; #line 793 "../src/parse/lex.re" { return static_cast<uint8_t>(']'); } #line 4431 "src/parse/lex.cc" yy673: ++cur; #line 784 "../src/parse/lex.re" { return static_cast<uint8_t>('\a'); } #line 4436 "src/parse/lex.cc" yy675: ++cur; #line 785 "../src/parse/lex.re" { return static_cast<uint8_t>('\b'); } #line 4441 "src/parse/lex.cc" yy677: ++cur; #line 786 "../src/parse/lex.re" { return static_cast<uint8_t>('\f'); } #line 4446 "src/parse/lex.cc" yy679: ++cur; #line 787 "../src/parse/lex.re" { return static_cast<uint8_t>('\n'); } #line 4451 "src/parse/lex.cc" yy681: ++cur; #line 788 "../src/parse/lex.re" { return static_cast<uint8_t>('\r'); } #line 4456 "src/parse/lex.cc" yy683: ++cur; #line 789 "../src/parse/lex.re" { return static_cast<uint8_t>('\t'); } #line 4461 "src/parse/lex.cc" yy685: ++cur; #line 790 "../src/parse/lex.re" { return static_cast<uint8_t>('\v'); } #line 4466 "src/parse/lex.cc" yy687: yyaccept = 3; yych = (unsigned char)*(mar = ++cur); if (yych <= '@') { if (yych <= '/') goto yy667; if (yych <= '9') goto yy700; goto yy667; } else { if (yych <= 'F') goto yy700; if (yych <= '`') goto yy667; if (yych <= 'f') goto yy700; goto yy667; } yy688: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy689; if (yych <= 0xBF) goto yy658; yy689: cur = mar; if (yyaccept <= 1) { if (yyaccept == 0) { goto yy649; } else { goto yy651; } } else { if (yyaccept == 2) { goto yy664; } else { goto yy667; } } yy690: yych = (unsigned char)*++cur; if (yych <= 0x9F) goto yy689; if (yych <= 0xBF) goto yy688; goto yy689; yy691: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy689; if (yych <= 0xBF) goto yy688; goto yy689; yy692: yych = (unsigned char)*++cur; if (yych <= 0x8F) goto yy689; if (yych <= 0xBF) goto yy691; goto yy689; yy693: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy689; if (yych <= 0xBF) goto yy691; goto yy689; yy694: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy689; if (yych <= 0x8F) goto yy691; goto yy689; yy695: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy689; if (yych <= 0xBF) goto yy643; goto yy689; yy696: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy689; if (yych <= 0xBF) goto yy695; goto yy689; yy697: yych = (unsigned char)*++cur; if (yych <= '/') goto yy689; if (yych <= '7') goto yy701; goto yy689; yy698: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy689; if (yych <= '9') goto yy703; goto yy689; } else { if (yych <= 'F') goto yy703; if (yych <= '`') goto yy689; if (yych <= 'f') goto yy703; goto yy689; } yy699: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy689; if (yych <= '9') goto yy704; goto yy689; } else { if (yych <= 'F') goto yy704; if (yych <= '`') goto yy689; if (yych <= 'f') goto yy704; goto yy689; } yy700: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy689; if (yych <= '9') goto yy705; goto yy689; } else { if (yych <= 'F') goto yy705; if (yych <= '`') goto yy689; if (yych <= 'f') goto yy705; goto yy689; } yy701: ++cur; #line 783 "../src/parse/lex.re" { return unesc_oct(tok, cur); } #line 4579 "src/parse/lex.cc" yy703: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy689; if (yych <= '9') goto yy707; goto yy689; } else { if (yych <= 'F') goto yy707; if (yych <= '`') goto yy689; if (yych <= 'f') goto yy707; goto yy689; } yy704: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy689; if (yych <= '9') goto yy700; goto yy689; } else { if (yych <= 'F') goto yy700; if (yych <= '`') goto yy689; if (yych <= 'f') goto yy700; goto yy689; } yy705: ++cur; #line 782 "../src/parse/lex.re" { return unesc_hex(tok, cur); } #line 4608 "src/parse/lex.cc" yy707: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy689; if (yych >= ':') goto yy689; } else { if (yych <= 'F') goto yy708; if (yych <= '`') goto yy689; if (yych >= 'g') goto yy689; } yy708: yych = (unsigned char)*++cur; if (yych <= '@') { if (yych <= '/') goto yy689; if (yych <= '9') goto yy699; goto yy689; } else { if (yych <= 'F') goto yy699; if (yych <= '`') goto yy689; if (yych <= 'f') goto yy699; goto yy689; } } #line 803 "../src/parse/lex.re" } }
1
[ "CWE-787" ]
re2c
039c18949190c5de5397eba504d2c75dad2ea9ca
51,763,806,644,965,790,000,000,000,000,000,000,000
775
Emit an error when repetition lower bound exceeds upper bound. Historically this was allowed and re2c swapped the bounds. However, it most likely indicates an error in user code and there is only a single occurrence in the tests (and the test in an artificial one), so although the change is backwards incompatible there is low chance of breaking real-world code. This fixes second test case in the bug #394 "Stack overflow due to recursion in src/dfa/dead_rules.cc" (the actual fix is to limit DFA size but the test also has counted repetition with swapped bounds).
void sctp_endpoint_add_asoc(struct sctp_endpoint *ep, struct sctp_association *asoc) { struct sock *sk = ep->base.sk; /* If this is a temporary association, don't bother * since we'll be removing it shortly and don't * want anyone to find it anyway. */ if (asoc->temp) return; /* Now just add it to our list of asocs */ list_add_tail(&asoc->asocs, &ep->asocs); /* Increment the backlog value for a TCP-style listening socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) sk->sk_ack_backlog++; }
0
[]
linux-2.6
5e739d1752aca4e8f3e794d431503bfca3162df4
42,281,242,887,864,108,000,000,000,000,000,000,000
19
sctp: fix potential panics in the SCTP-AUTH API. All of the SCTP-AUTH socket options could cause a panic if the extension is disabled and the API is envoked. Additionally, there were some additional assumptions that certain pointers would always be valid which may not always be the case. This patch hardens the API and address all of the crash scenarios. Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static Token *expand_mmac_params_range(MMacro *mac, Token *tline, Token ***last) { Token *t = tline, **tt, *tm, *head; char *pos; int fst, lst, j, i; pos = strchr(tline->text, ':'); nasm_assert(pos); lst = atoi(pos + 1); fst = atoi(tline->text + 1); /* * only macros params are accounted so * if someone passes %0 -- we reject such * value(s) */ if (lst == 0 || fst == 0) goto err; /* the values should be sane */ if ((fst > (int)mac->nparam || fst < (-(int)mac->nparam)) || (lst > (int)mac->nparam || lst < (-(int)mac->nparam))) goto err; fst = fst < 0 ? fst + (int)mac->nparam + 1: fst; lst = lst < 0 ? lst + (int)mac->nparam + 1: lst; /* counted from zero */ fst--, lst--; /* * It will be at least one token. Note we * need to scan params until separator, otherwise * only first token will be passed. */ tm = mac->params[(fst + mac->rotate) % mac->nparam]; head = new_Token(NULL, tm->type, tm->text, 0); tt = &head->next, tm = tm->next; while (tok_isnt_(tm, ",")) { t = new_Token(NULL, tm->type, tm->text, 0); *tt = t, tt = &t->next, tm = tm->next; } if (fst < lst) { for (i = fst + 1; i <= lst; i++) { t = new_Token(NULL, TOK_OTHER, ",", 0); *tt = t, tt = &t->next; j = (i + mac->rotate) % mac->nparam; tm = mac->params[j]; while (tok_isnt_(tm, ",")) { t = new_Token(NULL, tm->type, tm->text, 0); *tt = t, tt = &t->next, tm = tm->next; } } } else { for (i = fst - 1; i >= lst; i--) { t = new_Token(NULL, TOK_OTHER, ",", 0); *tt = t, tt = &t->next; j = (i + mac->rotate) % mac->nparam; tm = mac->params[j]; while (tok_isnt_(tm, ",")) { t = new_Token(NULL, tm->type, tm->text, 0); *tt = t, tt = &t->next, tm = tm->next; } } } *last = tt; return head; err: nasm_error(ERR_NONFATAL, "`%%{%s}': macro parameters out of range", &tline->text[1]); return tline; }
0
[ "CWE-125" ]
nasm
3144e84add8b152cc7a71e44617ce6f21daa4ba3
295,464,766,006,681,940,000,000,000,000,000,000,000
76
preproc: Don't access offsting byte on unterminated strings https://bugzilla.nasm.us/show_bug.cgi?id=3392446 Signed-off-by: Cyrill Gorcunov <[email protected]>
void kill(const UChar32* text, int textLen, bool forward) { if (textLen == 0) { return; } Utf32String killedText(text, textLen); if (lastAction == actionKill && size > 0) { int slot = indexToSlot[0]; int currentLen = theRing[slot].length(); int resultLen = currentLen + textLen; Utf32String temp(resultLen + 1); if (forward) { memcpy(temp.get(), theRing[slot].get(), currentLen * sizeof(UChar32)); memcpy(&temp[currentLen], killedText.get(), textLen * sizeof(UChar32)); } else { memcpy(temp.get(), killedText.get(), textLen * sizeof(UChar32)); memcpy(&temp[textLen], theRing[slot].get(), currentLen * sizeof(UChar32)); } temp[resultLen] = 0; temp.initFromBuffer(); theRing[slot] = temp; } else { if (size < capacity) { if (size > 0) { memmove(&indexToSlot[1], &indexToSlot[0], size); } indexToSlot[0] = size; size++; theRing.push_back(killedText); } else { int slot = indexToSlot[capacity - 1]; theRing[slot] = killedText; memmove(&indexToSlot[1], &indexToSlot[0], capacity - 1); indexToSlot[0] = slot; } index = 0; } }
0
[ "CWE-200" ]
mongo
035cf2afc04988b22cb67f4ebfd77e9b344cb6e0
111,656,607,722,751,460,000,000,000,000,000,000,000
37
SERVER-25335 avoid group and other permissions when creating .dbshell history file
static bool ComputeLoadStoreField(Handle<Map> type, Handle<String> name, LookupResult* lookup, bool is_store) { // If we directly find a field, the access can be inlined. type->LookupDescriptor(NULL, *name, lookup); if (lookup->IsField()) return true; // For a load, we are out of luck if there is no such field. if (!is_store) return false; // 2nd chance: A store into a non-existent field can still be inlined if we // have a matching transition and some room left in the object. type->LookupTransition(NULL, *name, lookup); return lookup->IsTransitionToField(*type) && (type->unused_property_fields() > 0); }
0
[]
node
fd80a31e0697d6317ce8c2d289575399f4e06d21
276,499,696,399,347,300,000,000,000,000,000,000,000
17
deps: backport 5f836c from v8 upstream Original commit message: Fix Hydrogen bounds check elimination When combining bounds checks, they must all be moved before the first load/store that they are guarding. BUG=chromium:344186 LOG=y [email protected] Review URL: https://codereview.chromium.org/172093002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 fix #8070
long ContentEncoding::ParseContentEncodingEntry(long long start, long long size, IMkvReader* pReader) { assert(pReader); long long pos = start; const long long stop = start + size; // Count ContentCompression and ContentEncryption elements. int compression_count = 0; int encryption_count = 0; while (pos < stop) { long long id, size; const long status = ParseElementHeader(pReader, pos, stop, id, size); if (status < 0) // error return status; if (id == libwebm::kMkvContentCompression) ++compression_count; if (id == libwebm::kMkvContentEncryption) ++encryption_count; pos += size; // consume payload if (pos > stop) return E_FILE_FORMAT_INVALID; } if (compression_count <= 0 && encryption_count <= 0) return -1; if (compression_count > 0) { compression_entries_ = new (std::nothrow) ContentCompression*[compression_count]; if (!compression_entries_) return -1; compression_entries_end_ = compression_entries_; } if (encryption_count > 0) { encryption_entries_ = new (std::nothrow) ContentEncryption*[encryption_count]; if (!encryption_entries_) { delete[] compression_entries_; return -1; } encryption_entries_end_ = encryption_entries_; } pos = start; while (pos < stop) { long long id, size; long status = ParseElementHeader(pReader, pos, stop, id, size); if (status < 0) // error return status; if (id == libwebm::kMkvContentEncodingOrder) { encoding_order_ = UnserializeUInt(pReader, pos, size); } else if (id == libwebm::kMkvContentEncodingScope) { encoding_scope_ = UnserializeUInt(pReader, pos, size); if (encoding_scope_ < 1) return -1; } else if (id == libwebm::kMkvContentEncodingType) { encoding_type_ = UnserializeUInt(pReader, pos, size); } else if (id == libwebm::kMkvContentCompression) { ContentCompression* const compression = new (std::nothrow) ContentCompression(); if (!compression) return -1; status = ParseCompressionEntry(pos, size, pReader, compression); if (status) { delete compression; return status; } *compression_entries_end_++ = compression; } else if (id == libwebm::kMkvContentEncryption) { ContentEncryption* const encryption = new (std::nothrow) ContentEncryption(); if (!encryption) return -1; status = ParseEncryptionEntry(pos, size, pReader, encryption); if (status) { delete encryption; return status; } *encryption_entries_end_++ = encryption; } pos += size; // consume payload if (pos > stop) return E_FILE_FORMAT_INVALID; } if (pos != stop) return E_FILE_FORMAT_INVALID; return 0; }
1
[ "CWE-415" ]
libvpx
6a7c84a2449dcc70de2525df209afea908622399
119,330,497,030,844,880,000,000,000,000,000,000,000
99
update libwebm to libwebm-1.0.0.27-361-g81de00c 81de00c Check there is only one settings per ContentCompression 5623013 Fixes a double free in ContentEncoding 93b2ba0 mkvparser: quiet static analysis warnings Change-Id: Ieaa562ef2f10075381bd856388e6b29f97ca2746
static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval) { s64 sum = timr->it_overrun_last + (s64)baseval; return sum > (s64)INT_MAX ? INT_MAX : (int)sum; }
0
[ "CWE-190" ]
linux
78c9c4dfbf8c04883941445a195276bb4bb92c76
264,659,069,734,820,300,000,000,000,000,000,000,000
6
posix-timers: Sanitize overrun handling The posix timer overrun handling is broken because the forwarding functions can return a huge number of overruns which does not fit in an int. As a consequence timer_getoverrun(2) and siginfo::si_overrun can turn into random number generators. The k_clock::timer_forward() callbacks return a 64 bit value now. Make k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal accounting is correct. 3Remove the temporary (int) casts. Add a helper function which clamps the overrun value returned to user space via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value between 0 and INT_MAX. INT_MAX is an indicator for user space that the overrun value has been clamped. Reported-by: Team OWL337 <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: John Stultz <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Michael Kerrisk <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
static int vnc_width(VncDisplay *vd) { return MIN(VNC_MAX_WIDTH, ROUND_UP(surface_width(vd->ds), VNC_DIRTY_PIXELS_PER_BIT)); }
0
[]
qemu
4c65fed8bdf96780735dbdb92a8bd0d6b6526cc3
90,883,246,124,172,400,000,000,000,000,000,000,000
5
ui: vnc: avoid floating point exception While sending 'SetPixelFormat' messages to a VNC server, the client could set the 'red-max', 'green-max' and 'blue-max' values to be zero. This leads to a floating point exception in write_png_palette while doing frame buffer updates. Reported-by: Lian Yihan <[email protected]> Signed-off-by: Prasad J Pandit <[email protected]> Reviewed-by: Gerd Hoffmann <[email protected]> Signed-off-by: Peter Maydell <[email protected]>
static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { struct exit_ctl_data *p = kvm_get_exit_data(vcpu); struct kvm *kvm = vcpu->kvm; struct call_data call_data; int i; struct kvm_vcpu *vcpui; call_data.ptc_g_data = p->u.ptc_g_data; kvm_for_each_vcpu(i, vcpui, kvm) { if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || vcpu == vcpui) continue; if (waitqueue_active(&vcpui->wq)) wake_up_interruptible(&vcpui->wq); if (vcpui->cpu != -1) { call_data.vcpu = vcpui; smp_call_function_single(vcpui->cpu, vcpu_global_purge, &call_data, 1); } else printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); } return 1; }
0
[ "CWE-399" ]
kvm
5b40572ed5f0344b9dbee486a17c589ce1abe1a3
262,238,030,572,961,860,000,000,000,000,000,000,000
28
KVM: Ensure all vcpus are consistent with in-kernel irqchip settings If some vcpus are created before KVM_CREATE_IRQCHIP, then irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading to potential NULL pointer dereferences. Fix by: - ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called - ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP This is somewhat long winded because vcpu->arch.apic is created without kvm->lock held. Based on earlier patch by Michael Ellerman. Signed-off-by: Michael Ellerman <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
static apr_byte_t oidc_provider_static_config(request_rec *r, oidc_cfg *c, oidc_provider_t **provider) { json_t *j_provider = NULL; const char *s_json = NULL; /* see if we should configure a static provider based on external (cached) metadata */ if ((c->metadata_dir != NULL) || (c->provider.metadata_url == NULL)) { *provider = &c->provider; return TRUE; } c->cache->get(r, OIDC_CACHE_SECTION_PROVIDER, oidc_util_escape_string(r, c->provider.metadata_url), &s_json); if (s_json == NULL) { if (oidc_metadata_provider_retrieve(r, c, NULL, c->provider.metadata_url, &j_provider, &s_json) == FALSE) { oidc_error(r, "could not retrieve metadata from url: %s", c->provider.metadata_url); return FALSE; } c->cache->set(r, OIDC_CACHE_SECTION_PROVIDER, oidc_util_escape_string(r, c->provider.metadata_url), s_json, apr_time_now() + (c->provider_metadata_refresh_interval <= 0 ? apr_time_from_sec( OIDC_CACHE_PROVIDER_METADATA_EXPIRY_DEFAULT) : c->provider_metadata_refresh_interval)); } else { /* correct parsing and validation was already done when it was put in the cache */ j_provider = json_loads(s_json, 0, 0); } *provider = apr_pcalloc(r->pool, sizeof(oidc_provider_t)); memcpy(*provider, &c->provider, sizeof(oidc_provider_t)); if (oidc_metadata_provider_parse(r, c, j_provider, *provider) == FALSE) { oidc_error(r, "could not parse metadata from url: %s", c->provider.metadata_url); if (j_provider) json_decref(j_provider); return FALSE; } json_decref(j_provider); return TRUE; }
0
[ "CWE-20" ]
mod_auth_openidc
612e309bfffd6f9b8ad7cdccda3019fc0865f3b4
98,585,485,559,344,240,000,000,000,000,000,000,000
53
don't echo query params on invalid requests to redirect URI; closes #212 thanks @LukasReschke; I'm sure there's some OWASP guideline that warns against this
njs_module_read(njs_vm_t *vm, int fd, njs_str_t *text) { ssize_t n; struct stat sb; text->start = NULL; if (fstat(fd, &sb) == -1) { goto fail; } if (!S_ISREG(sb.st_mode)) { goto fail; } text->length = sb.st_size; text->start = njs_mp_alloc(vm->mem_pool, text->length); if (text->start == NULL) { goto fail; } n = read(fd, text->start, sb.st_size); if (n < 0 || n != sb.st_size) { goto fail; } return NJS_OK; fail: if (text->start != NULL) { njs_mp_free(vm->mem_pool, text->start); } return NJS_ERROR; }
0
[ "CWE-787" ]
njs
ab1702c7af9959366a5ddc4a75b4357d4e9ebdc1
195,911,266,113,051,770,000,000,000,000,000,000,000
38
Fixed typo while calculating module path length. The issue was introduced in 77c398f26d7e (not released yet).
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; if (!kvm_dr6_valid(dbgregs->dr6)) return -EINVAL; if (!kvm_dr7_valid(dbgregs->dr7)) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); kvm_update_dr0123(vcpu); vcpu->arch.dr6 = dbgregs->dr6; vcpu->arch.dr7 = dbgregs->dr7; kvm_update_dr7(vcpu); return 0; }
0
[ "CWE-476" ]
linux
55749769fe608fa3f4a075e42e89d237c8e37637
28,473,497,513,421,527,000,000,000,000,000,000,000
19
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty When dirty ring logging is enabled, any dirty logging without an active vCPU context will cause a kernel oops. But we've already declared that the shared_info page doesn't get dirty tracking anyway, since it would be kind of insane to mark it dirty every time we deliver an event channel interrupt. Userspace is supposed to just assume it's always dirty any time a vCPU can run or event channels are routed. So stop using the generic kvm_write_wall_clock() and just write directly through the gfn_to_pfn_cache that we already have set up. We can make kvm_write_wall_clock() static in x86.c again now, but let's not remove the 'sec_hi_ofs' argument even though it's not used yet. At some point we *will* want to use that for KVM guests too. Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region") Reported-by: butt3rflyh4ck <[email protected]> Signed-off-by: David Woodhouse <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static INLINE OPJ_BOOL opj_tcd_init_tile(opj_tcd_t *p_tcd, OPJ_UINT32 p_tile_no, OPJ_BOOL isEncoder, OPJ_FLOAT32 fraction, OPJ_SIZE_T sizeof_block) { OPJ_UINT32 (*l_gain_ptr)(OPJ_UINT32) = 00; OPJ_UINT32 compno, resno, bandno, precno, cblkno; opj_tcp_t * l_tcp = 00; opj_cp_t * l_cp = 00; opj_tcd_tile_t * l_tile = 00; opj_tccp_t *l_tccp = 00; opj_tcd_tilecomp_t *l_tilec = 00; opj_image_comp_t * l_image_comp = 00; opj_tcd_resolution_t *l_res = 00; opj_tcd_band_t *l_band = 00; opj_stepsize_t * l_step_size = 00; opj_tcd_precinct_t *l_current_precinct = 00; opj_image_t *l_image = 00; OPJ_UINT32 p,q; OPJ_UINT32 l_level_no; OPJ_UINT32 l_pdx, l_pdy; OPJ_UINT32 l_gain; OPJ_INT32 l_x0b, l_y0b; /* extent of precincts , top left, bottom right**/ OPJ_INT32 l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end, l_br_prc_y_end; /* number of precinct for a resolution */ OPJ_UINT32 l_nb_precincts; /* room needed to store l_nb_precinct precinct for a resolution */ OPJ_UINT32 l_nb_precinct_size; /* number of code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks; /* room needed to store l_nb_code_blocks code blocks for a precinct*/ OPJ_UINT32 l_nb_code_blocks_size; /* size of data for a tile */ OPJ_UINT32 l_data_size; l_cp = p_tcd->cp; l_tcp = &(l_cp->tcps[p_tile_no]); l_tile = p_tcd->tcd_image->tiles; l_tccp = l_tcp->tccps; l_tilec = l_tile->comps; l_image = p_tcd->image; l_image_comp = p_tcd->image->comps; p = p_tile_no % l_cp->tw; /* tile coordinates */ q = p_tile_no / l_cp->tw; /*fprintf(stderr, "Tile coordinate = %d,%d\n", p, q);*/ /* 4 borders of the tile rescale on the image if necessary */ l_tile->x0 = opj_int_max((OPJ_INT32)(l_cp->tx0 + p * l_cp->tdx), (OPJ_INT32)l_image->x0); l_tile->y0 = opj_int_max((OPJ_INT32)(l_cp->ty0 + q * l_cp->tdy), (OPJ_INT32)l_image->y0); l_tile->x1 = opj_int_min((OPJ_INT32)(l_cp->tx0 + (p + 1) * l_cp->tdx), (OPJ_INT32)l_image->x1); l_tile->y1 = opj_int_min((OPJ_INT32)(l_cp->ty0 + (q + 1) * l_cp->tdy), (OPJ_INT32)l_image->y1); /* testcase 1888.pdf.asan.35.988 */ if (l_tccp->numresolutions == 0) { fprintf(stderr, "tiles require at least one resolution\n"); return OPJ_FALSE; } /*fprintf(stderr, "Tile border = %d,%d,%d,%d\n", l_tile->x0, l_tile->y0,l_tile->x1,l_tile->y1);*/ /*tile->numcomps = image->numcomps; */ for (compno = 0; compno < l_tile->numcomps; ++compno) { /*fprintf(stderr, "compno = %d/%d\n", compno, l_tile->numcomps);*/ l_image_comp->resno_decoded = 0; /* border of each l_tile component (global) */ l_tilec->x0 = opj_int_ceildiv(l_tile->x0, (OPJ_INT32)l_image_comp->dx); l_tilec->y0 = opj_int_ceildiv(l_tile->y0, (OPJ_INT32)l_image_comp->dy); l_tilec->x1 = opj_int_ceildiv(l_tile->x1, (OPJ_INT32)l_image_comp->dx); l_tilec->y1 = opj_int_ceildiv(l_tile->y1, (OPJ_INT32)l_image_comp->dy); /*fprintf(stderr, "\tTile compo border = %d,%d,%d,%d\n", l_tilec->x0, l_tilec->y0,l_tilec->x1,l_tilec->y1);*/ /* compute l_data_size with overflow check */ l_data_size = (OPJ_UINT32)(l_tilec->x1 - l_tilec->x0); if ((((OPJ_UINT32)-1) / l_data_size) < (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0)) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)(l_tilec->y1 - l_tilec->y0); if ((((OPJ_UINT32)-1) / (OPJ_UINT32)sizeof(OPJ_UINT32)) < l_data_size) { /* TODO event */ return OPJ_FALSE; } l_data_size = l_data_size * (OPJ_UINT32)sizeof(OPJ_UINT32); l_tilec->numresolutions = l_tccp->numresolutions; if (l_tccp->numresolutions < l_cp->m_specific_param.m_dec.m_reduce) { l_tilec->minimum_num_resolutions = 1; } else { l_tilec->minimum_num_resolutions = l_tccp->numresolutions - l_cp->m_specific_param.m_dec.m_reduce; } l_tilec->data_size_needed = l_data_size; if (p_tcd->m_is_decoder && !opj_alloc_tile_component_data(l_tilec)) { return OPJ_FALSE; } l_data_size = l_tilec->numresolutions * (OPJ_UINT32)sizeof(opj_tcd_resolution_t); if (l_tilec->resolutions == 00) { l_tilec->resolutions = (opj_tcd_resolution_t *) opj_malloc(l_data_size); if (! l_tilec->resolutions ) { return OPJ_FALSE; } /*fprintf(stderr, "\tAllocate resolutions of tilec (opj_tcd_resolution_t): %d\n",l_data_size);*/ l_tilec->resolutions_size = l_data_size; memset(l_tilec->resolutions,0,l_data_size); } else if (l_data_size > l_tilec->resolutions_size) { opj_tcd_resolution_t* new_resolutions = (opj_tcd_resolution_t *) opj_realloc(l_tilec->resolutions, l_data_size); if (! new_resolutions) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to tile resolutions\n"); */ fprintf(stderr, "Not enough memory to tile resolutions\n"); opj_free(l_tilec->resolutions); l_tilec->resolutions = NULL; l_tilec->resolutions_size = 0; return OPJ_FALSE; } l_tilec->resolutions = new_resolutions; /*fprintf(stderr, "\tReallocate data of tilec (int): from %d to %d x OPJ_UINT32\n", l_tilec->resolutions_size, l_data_size);*/ memset(((OPJ_BYTE*) l_tilec->resolutions)+l_tilec->resolutions_size,0,l_data_size - l_tilec->resolutions_size); l_tilec->resolutions_size = l_data_size; } l_level_no = l_tilec->numresolutions - 1; l_res = l_tilec->resolutions; l_step_size = l_tccp->stepsizes; if (l_tccp->qmfbid == 0) { l_gain_ptr = &opj_dwt_getgain_real; } else { l_gain_ptr = &opj_dwt_getgain; } /*fprintf(stderr, "\tlevel_no=%d\n",l_level_no);*/ for (resno = 0; resno < l_tilec->numresolutions; ++resno) { /*fprintf(stderr, "\t\tresno = %d/%d\n", resno, l_tilec->numresolutions);*/ OPJ_INT32 tlcbgxstart, tlcbgystart /*, brcbgxend, brcbgyend*/; OPJ_UINT32 cbgwidthexpn, cbgheightexpn; OPJ_UINT32 cblkwidthexpn, cblkheightexpn; /* border for each resolution level (global) */ l_res->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_res->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_res->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_res->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); /*fprintf(stderr, "\t\t\tres_x0= %d, res_y0 =%d, res_x1=%d, res_y1=%d\n", l_res->x0, l_res->y0, l_res->x1, l_res->y1);*/ /* p. 35, table A-23, ISO/IEC FDIS154444-1 : 2000 (18 august 2000) */ l_pdx = l_tccp->prcw[resno]; l_pdy = l_tccp->prch[resno]; /*fprintf(stderr, "\t\t\tpdx=%d, pdy=%d\n", l_pdx, l_pdy);*/ /* p. 64, B.6, ISO/IEC FDIS15444-1 : 2000 (18 august 2000) */ l_tl_prc_x_start = opj_int_floordivpow2(l_res->x0, (OPJ_INT32)l_pdx) << l_pdx; l_tl_prc_y_start = opj_int_floordivpow2(l_res->y0, (OPJ_INT32)l_pdy) << l_pdy; l_br_prc_x_end = opj_int_ceildivpow2(l_res->x1, (OPJ_INT32)l_pdx) << l_pdx; l_br_prc_y_end = opj_int_ceildivpow2(l_res->y1, (OPJ_INT32)l_pdy) << l_pdy; /*fprintf(stderr, "\t\t\tprc_x_start=%d, prc_y_start=%d, br_prc_x_end=%d, br_prc_y_end=%d \n", l_tl_prc_x_start, l_tl_prc_y_start, l_br_prc_x_end ,l_br_prc_y_end );*/ l_res->pw = (l_res->x0 == l_res->x1) ? 0 : (OPJ_UINT32)((l_br_prc_x_end - l_tl_prc_x_start) >> l_pdx); l_res->ph = (l_res->y0 == l_res->y1) ? 0 : (OPJ_UINT32)((l_br_prc_y_end - l_tl_prc_y_start) >> l_pdy); /*fprintf(stderr, "\t\t\tres_pw=%d, res_ph=%d\n", l_res->pw, l_res->ph );*/ l_nb_precincts = l_res->pw * l_res->ph; l_nb_precinct_size = l_nb_precincts * (OPJ_UINT32)sizeof(opj_tcd_precinct_t); if (resno == 0) { tlcbgxstart = l_tl_prc_x_start; tlcbgystart = l_tl_prc_y_start; /*brcbgxend = l_br_prc_x_end;*/ /* brcbgyend = l_br_prc_y_end;*/ cbgwidthexpn = l_pdx; cbgheightexpn = l_pdy; l_res->numbands = 1; } else { tlcbgxstart = opj_int_ceildivpow2(l_tl_prc_x_start, 1); tlcbgystart = opj_int_ceildivpow2(l_tl_prc_y_start, 1); /*brcbgxend = opj_int_ceildivpow2(l_br_prc_x_end, 1);*/ /*brcbgyend = opj_int_ceildivpow2(l_br_prc_y_end, 1);*/ cbgwidthexpn = l_pdx - 1; cbgheightexpn = l_pdy - 1; l_res->numbands = 3; } cblkwidthexpn = opj_uint_min(l_tccp->cblkw, cbgwidthexpn); cblkheightexpn = opj_uint_min(l_tccp->cblkh, cbgheightexpn); l_band = l_res->bands; for (bandno = 0; bandno < l_res->numbands; ++bandno) { OPJ_INT32 numbps; /*fprintf(stderr, "\t\t\tband_no=%d/%d\n", bandno, l_res->numbands );*/ if (resno == 0) { l_band->bandno = 0 ; l_band->x0 = opj_int_ceildivpow2(l_tilec->x0, (OPJ_INT32)l_level_no); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0, (OPJ_INT32)l_level_no); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1, (OPJ_INT32)l_level_no); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1, (OPJ_INT32)l_level_no); } else { l_band->bandno = bandno + 1; /* x0b = 1 if bandno = 1 or 3 */ l_x0b = l_band->bandno&1; /* y0b = 1 if bandno = 2 or 3 */ l_y0b = (OPJ_INT32)((l_band->bandno)>>1); /* l_band border (global) */ l_band->x0 = opj_int_ceildivpow2(l_tilec->x0 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y0 = opj_int_ceildivpow2(l_tilec->y0 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); l_band->x1 = opj_int_ceildivpow2(l_tilec->x1 - (1 << l_level_no) * l_x0b, (OPJ_INT32)(l_level_no + 1)); l_band->y1 = opj_int_ceildivpow2(l_tilec->y1 - (1 << l_level_no) * l_y0b, (OPJ_INT32)(l_level_no + 1)); } /** avoid an if with storing function pointer */ l_gain = (*l_gain_ptr) (l_band->bandno); numbps = (OPJ_INT32)(l_image_comp->prec + l_gain); l_band->stepsize = (OPJ_FLOAT32)(((1.0 + l_step_size->mant / 2048.0) * pow(2.0, (OPJ_INT32) (numbps - l_step_size->expn)))) * fraction; l_band->numbps = l_step_size->expn + (OPJ_INT32)l_tccp->numgbits - 1; /* WHY -1 ? */ if (! l_band->precincts) { l_band->precincts = (opj_tcd_precinct_t *) opj_malloc( /*3 * */ l_nb_precinct_size); if (! l_band->precincts) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate precincts of a band (opj_tcd_precinct_t): %d\n",l_nb_precinct_size); */ memset(l_band->precincts,0,l_nb_precinct_size); l_band->precincts_data_size = l_nb_precinct_size; } else if (l_band->precincts_data_size < l_nb_precinct_size) { opj_tcd_precinct_t * new_precincts = (opj_tcd_precinct_t *) opj_realloc(l_band->precincts,/*3 * */ l_nb_precinct_size); if (! new_precincts) { /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to handle band precints\n"); */ fprintf(stderr, "Not enough memory to handle band precints\n"); opj_free(l_band->precincts); l_band->precincts = NULL; l_band->precincts_data_size = 0; return OPJ_FALSE; } l_band->precincts = new_precincts; /*fprintf(stderr, "\t\t\t\tReallocate precincts of a band (opj_tcd_precinct_t): from %d to %d\n",l_band->precincts_data_size, l_nb_precinct_size);*/ memset(((OPJ_BYTE *) l_band->precincts) + l_band->precincts_data_size,0,l_nb_precinct_size - l_band->precincts_data_size); l_band->precincts_data_size = l_nb_precinct_size; } l_current_precinct = l_band->precincts; for (precno = 0; precno < l_nb_precincts; ++precno) { OPJ_INT32 tlcblkxstart, tlcblkystart, brcblkxend, brcblkyend; OPJ_INT32 cbgxstart = tlcbgxstart + (OPJ_INT32)(precno % l_res->pw) * (1 << cbgwidthexpn); OPJ_INT32 cbgystart = tlcbgystart + (OPJ_INT32)(precno / l_res->pw) * (1 << cbgheightexpn); OPJ_INT32 cbgxend = cbgxstart + (1 << cbgwidthexpn); OPJ_INT32 cbgyend = cbgystart + (1 << cbgheightexpn); /*fprintf(stderr, "\t precno=%d; bandno=%d, resno=%d; compno=%d\n", precno, bandno , resno, compno);*/ /*fprintf(stderr, "\t tlcbgxstart(=%d) + (precno(=%d) percent res->pw(=%d)) * (1 << cbgwidthexpn(=%d)) \n",tlcbgxstart,precno,l_res->pw,cbgwidthexpn);*/ /* precinct size (global) */ /*fprintf(stderr, "\t cbgxstart=%d, l_band->x0 = %d \n",cbgxstart, l_band->x0);*/ l_current_precinct->x0 = opj_int_max(cbgxstart, l_band->x0); l_current_precinct->y0 = opj_int_max(cbgystart, l_band->y0); l_current_precinct->x1 = opj_int_min(cbgxend, l_band->x1); l_current_precinct->y1 = opj_int_min(cbgyend, l_band->y1); /*fprintf(stderr, "\t prc_x0=%d; prc_y0=%d, prc_x1=%d; prc_y1=%d\n",l_current_precinct->x0, l_current_precinct->y0 ,l_current_precinct->x1, l_current_precinct->y1);*/ tlcblkxstart = opj_int_floordivpow2(l_current_precinct->x0, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t tlcblkxstart =%d\n",tlcblkxstart );*/ tlcblkystart = opj_int_floordivpow2(l_current_precinct->y0, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t tlcblkystart =%d\n",tlcblkystart );*/ brcblkxend = opj_int_ceildivpow2(l_current_precinct->x1, (OPJ_INT32)cblkwidthexpn) << cblkwidthexpn; /*fprintf(stderr, "\t brcblkxend =%d\n",brcblkxend );*/ brcblkyend = opj_int_ceildivpow2(l_current_precinct->y1, (OPJ_INT32)cblkheightexpn) << cblkheightexpn; /*fprintf(stderr, "\t brcblkyend =%d\n",brcblkyend );*/ l_current_precinct->cw = (OPJ_UINT32)((brcblkxend - tlcblkxstart) >> cblkwidthexpn); l_current_precinct->ch = (OPJ_UINT32)((brcblkyend - tlcblkystart) >> cblkheightexpn); l_nb_code_blocks = l_current_precinct->cw * l_current_precinct->ch; /*fprintf(stderr, "\t\t\t\t precinct_cw = %d x recinct_ch = %d\n",l_current_precinct->cw, l_current_precinct->ch); */ l_nb_code_blocks_size = l_nb_code_blocks * (OPJ_UINT32)sizeof_block; if (! l_current_precinct->cblks.blocks) { l_current_precinct->cblks.blocks = opj_malloc(l_nb_code_blocks_size); if (! l_current_precinct->cblks.blocks ) { return OPJ_FALSE; } /*fprintf(stderr, "\t\t\t\tAllocate cblks of a precinct (opj_tcd_cblk_dec_t): %d\n",l_nb_code_blocks_size);*/ memset(l_current_precinct->cblks.blocks,0,l_nb_code_blocks_size); l_current_precinct->block_size = l_nb_code_blocks_size; } else if (l_nb_code_blocks_size > l_current_precinct->block_size) { void *new_blocks = opj_realloc(l_current_precinct->cblks.blocks, l_nb_code_blocks_size); if (! new_blocks) { opj_free(l_current_precinct->cblks.blocks); l_current_precinct->cblks.blocks = NULL; l_current_precinct->block_size = 0; /* opj_event_msg(p_manager, EVT_ERROR, "Not enough memory for current precinct codeblock element\n"); */ fprintf(stderr, "Not enough memory for current precinct codeblock element\n"); return OPJ_FALSE; } l_current_precinct->cblks.blocks = new_blocks; /*fprintf(stderr, "\t\t\t\tReallocate cblks of a precinct (opj_tcd_cblk_dec_t): from %d to %d\n",l_current_precinct->block_size, l_nb_code_blocks_size); */ memset(((OPJ_BYTE *) l_current_precinct->cblks.blocks) + l_current_precinct->block_size ,0 ,l_nb_code_blocks_size - l_current_precinct->block_size); l_current_precinct->block_size = l_nb_code_blocks_size; } if (! l_current_precinct->incltree) { l_current_precinct->incltree = opj_tgt_create(l_current_precinct->cw, l_current_precinct->ch); } else{ l_current_precinct->incltree = opj_tgt_init(l_current_precinct->incltree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->incltree) { fprintf(stderr, "WARNING: No incltree created.\n"); /*return OPJ_FALSE;*/ } if (! l_current_precinct->imsbtree) { l_current_precinct->imsbtree = opj_tgt_create( l_current_precinct->cw, l_current_precinct->ch); } else { l_current_precinct->imsbtree = opj_tgt_init( l_current_precinct->imsbtree, l_current_precinct->cw, l_current_precinct->ch); } if (! l_current_precinct->imsbtree) { fprintf(stderr, "WARNING: No imsbtree created.\n"); /*return OPJ_FALSE;*/ } for (cblkno = 0; cblkno < l_nb_code_blocks; ++cblkno) { OPJ_INT32 cblkxstart = tlcblkxstart + (OPJ_INT32)(cblkno % l_current_precinct->cw) * (1 << cblkwidthexpn); OPJ_INT32 cblkystart = tlcblkystart + (OPJ_INT32)(cblkno / l_current_precinct->cw) * (1 << cblkheightexpn); OPJ_INT32 cblkxend = cblkxstart + (1 << cblkwidthexpn); OPJ_INT32 cblkyend = cblkystart + (1 << cblkheightexpn); if (isEncoder) { opj_tcd_cblk_enc_t* l_code_block = l_current_precinct->cblks.enc + cblkno; if (! opj_tcd_code_block_enc_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); if (! opj_tcd_code_block_enc_allocate_data(l_code_block)) { return OPJ_FALSE; } } else { opj_tcd_cblk_dec_t* l_code_block = l_current_precinct->cblks.dec + cblkno; if (! opj_tcd_code_block_dec_allocate(l_code_block)) { return OPJ_FALSE; } /* code-block size (global) */ l_code_block->x0 = opj_int_max(cblkxstart, l_current_precinct->x0); l_code_block->y0 = opj_int_max(cblkystart, l_current_precinct->y0); l_code_block->x1 = opj_int_min(cblkxend, l_current_precinct->x1); l_code_block->y1 = opj_int_min(cblkyend, l_current_precinct->y1); } } ++l_current_precinct; } /* precno */ ++l_band; ++l_step_size; } /* bandno */ ++l_res; --l_level_no; } /* resno */ ++l_tccp; ++l_tilec; ++l_image_comp; } /* compno */ return OPJ_TRUE; }
1
[ "CWE-190" ]
openjpeg
5d00b719f4b93b1445e6fb4c766b9a9883c57949
23,767,818,004,344,117,000,000,000,000,000,000,000
385
[trunk] fixed a buffer overflow in opj_tcd_init_decode_tile Update issue 431
void gf_filter_lock_all(GF_Filter *filter, Bool do_lock) { if (!filter) return; if (do_lock) gf_mx_p(filter->session->filters_mx); else gf_mx_v(filter->session->filters_mx); }
0
[ "CWE-787" ]
gpac
da37ec8582266983d0ec4b7550ec907401ec441e
39,501,285,242,454,990,000,000,000,000,000,000,000
8
fixed crashes for very long path - cf #1908
void v9fs_put_acl(struct posix_acl *dacl, struct posix_acl *acl) { posix_acl_release(dacl); posix_acl_release(acl); }
0
[ "CWE-862", "CWE-285" ]
linux
073931017b49d9458aa351605b43a7e34598caef
82,908,894,906,426,890,000,000,000,000,000,000,000
6
posix_acl: Clear SGID bit when setting file permissions When file permissions are modified via chmod(2) and the user is not in the owning group or capable of CAP_FSETID, the setgid bit is cleared in inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file permissions as well as the new ACL, but doesn't clear the setgid bit in a similar way; this allows to bypass the check in chmod(2). Fix that. References: CVE-2016-7097 Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Jeff Layton <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Andreas Gruenbacher <[email protected]>
void set_date_utc(int64_t date_utc) { date_utc_ = date_utc; }
0
[ "CWE-20" ]
libvpx
f00890eecdf8365ea125ac16769a83aa6b68792d
206,547,069,852,951,260,000,000,000,000,000,000,000
1
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
xmlDictCleanup(void) { if (!xmlDictInitialized) return; xmlFreeRMutex(xmlDictMutex); xmlDictInitialized = 0; }
0
[ "CWE-399" ]
libxml2
8973d58b7498fa5100a876815476b81fd1a2412a
308,615,009,761,411,450,000,000,000,000,000,000,000
8
Add hash randomization to hash and dict structures Following http://www.ocert.org/advisories/ocert-2011-003.html it seems that having hash randomization might be a good idea when using XML with untrusted data * configure.in: lookup for rand, srand and time * dict.c: add randomization to dictionaries hash tables * hash.c: add randomization to normal hash tables
static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, int msqflg) { struct ipc_security_struct *isec; struct msg_security_struct *msec; struct common_audit_data ad; struct selinux_audit_data sad = {0,}; u32 sid = current_sid(); int rc; isec = msq->q_perm.security; msec = msg->security; /* * First time through, need to assign label to the message */ if (msec->sid == SECINITSID_UNLABELED) { /* * Compute new sid based on current process and * message queue this message will be stored in */ rc = security_transition_sid(sid, isec->sid, SECCLASS_MSG, NULL, &msec->sid); if (rc) return rc; } COMMON_AUDIT_DATA_INIT(&ad, IPC); ad.selinux_audit_data = &sad; ad.u.ipc_id = msq->q_perm.key; /* Can this process write to the queue? */ rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ, MSGQ__WRITE, &ad); if (!rc) /* Can this process send the message */ rc = avc_has_perm(sid, msec->sid, SECCLASS_MSG, MSG__SEND, &ad); if (!rc) /* Can the message be put in the queue? */ rc = avc_has_perm(msec->sid, isec->sid, SECCLASS_MSGQ, MSGQ__ENQUEUE, &ad); return rc; }
0
[ "CWE-264" ]
linux
259e5e6c75a910f3b5e656151dc602f53f9d7548
297,590,957,862,810,470,000,000,000,000,000,000,000
44
Add PR_{GET,SET}_NO_NEW_PRIVS to prevent execve from granting privs With this change, calling prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) disables privilege granting operations at execve-time. For example, a process will not be able to execute a setuid binary to change their uid or gid if this bit is set. The same is true for file capabilities. Additionally, LSM_UNSAFE_NO_NEW_PRIVS is defined to ensure that LSMs respect the requested behavior. To determine if the NO_NEW_PRIVS bit is set, a task may call prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); It returns 1 if set and 0 if it is not set. If any of the arguments are non-zero, it will return -1 and set errno to -EINVAL. (PR_SET_NO_NEW_PRIVS behaves similarly.) This functionality is desired for the proposed seccomp filter patch series. By using PR_SET_NO_NEW_PRIVS, it allows a task to modify the system call behavior for itself and its child tasks without being able to impact the behavior of a more privileged task. Another potential use is making certain privileged operations unprivileged. For example, chroot may be considered "safe" if it cannot affect privileged tasks. Note, this patch causes execve to fail when PR_SET_NO_NEW_PRIVS is set and AppArmor is in use. It is fixed in a subsequent patch. Signed-off-by: Andy Lutomirski <[email protected]> Signed-off-by: Will Drewry <[email protected]> Acked-by: Eric Paris <[email protected]> Acked-by: Kees Cook <[email protected]> v18: updated change desc v17: using new define values as per 3.4 Signed-off-by: James Morris <[email protected]>
static int em_rdpmc(struct x86_emulate_ctxt *ctxt) { u64 pmc; if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) return emulate_gp(ctxt, 0); *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; return X86EMUL_CONTINUE; }
0
[]
kvm
d1442d85cc30ea75f7d399474ca738e0bc96f715
77,600,721,650,360,390,000,000,000,000,000,000,000
10
KVM: x86: Handle errors when RIP is set during far jumps Far jmp/call/ret may fault while loading a new RIP. Currently KVM does not handle this case, and may result in failed vm-entry once the assignment is done. The tricky part of doing so is that loading the new CS affects the VMCS/VMCB state, so if we fail during loading the new RIP, we are left in unconsistent state. Therefore, this patch saves on 64-bit the old CS descriptor and restores it if loading RIP failed. This fixes CVE-2014-3647. Cc: [email protected] Signed-off-by: Nadav Amit <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
Operator *Gfx::findOp(char *name) { int a, b, m, cmp; a = -1; b = numOps; // invariant: opTab[a] < name < opTab[b] while (b - a > 1) { m = (a + b) / 2; cmp = strcmp(opTab[m].name, name); if (cmp < 0) a = m; else if (cmp > 0) b = m; else a = b = m; } if (cmp != 0) return NULL; return &opTab[a]; }
0
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
117,920,765,457,993,500,000,000,000,000,000,000,000
20
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
TEST_F(ExprMatchTest, ConstantPositiveNumberExpressionMatchesCorrectly) { createMatcher(fromjson("{$expr: 1}")); ASSERT_TRUE(matches(BSON("x" << 2))); }
0
[]
mongo
ee97c0699fd55b498310996ee002328e533681a3
179,579,822,805,466,940,000,000,000,000,000,000,000
5
SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr.
static CURLcode smtp_state_command_resp(struct connectdata *conn, int smtpcode, smtpstate instate) { CURLcode result = CURLE_OK; struct Curl_easy *data = conn->data; struct SMTP *smtp = data->req.protop; char *line = data->state.buffer; size_t len = strlen(line); (void)instate; /* no use for this yet */ if((smtp->rcpt && smtpcode/100 != 2 && smtpcode != 553 && smtpcode != 1) || (!smtp->rcpt && smtpcode/100 != 2 && smtpcode != 1)) { failf(data, "Command failed: %d", smtpcode); result = CURLE_RECV_ERROR; } else { /* Temporarily add the LF character back and send as body to the client */ if(!data->set.opt_no_body) { line[len] = '\n'; result = Curl_client_write(conn, CLIENTWRITE_BODY, line, len + 1); line[len] = '\0'; } if(smtpcode != 1) { if(smtp->rcpt) { smtp->rcpt = smtp->rcpt->next; if(smtp->rcpt) { /* Send the next command */ result = smtp_perform_command(conn); } else /* End of DO phase */ state(conn, SMTP_STOP); } else /* End of DO phase */ state(conn, SMTP_STOP); } } return result; }
0
[ "CWE-200", "CWE-119", "CWE-787" ]
curl
ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628
149,080,765,014,951,670,000,000,000,000,000,000,000
44
smtp: use the upload buffer size for scratch buffer malloc ... not the read buffer size, as that can be set smaller and thus cause a buffer overflow! CVE-2018-0500 Reported-by: Peter Wu Bug: https://curl.haxx.se/docs/adv_2018-70a2.html
SpoolssWritePrinter_q(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, dcerpc_info *di, guint8 *drep) { e_ctx_hnd policy_hnd; char *pol_name; guint32 size; proto_item *item; proto_tree *subtree; /* Parse packet */ offset = dissect_nt_policy_hnd( tvb, offset, pinfo, tree, di, drep, hf_hnd, &policy_hnd, NULL, FALSE, FALSE); dcerpc_fetch_polhnd_data(&policy_hnd, &pol_name, NULL, NULL, NULL, pinfo->num); if (pol_name) col_append_fstr(pinfo->cinfo, COL_INFO, ", %s", pol_name); offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep, hf_buffer_size, &size); col_append_fstr(pinfo->cinfo, COL_INFO, ", %d bytes", size); subtree = proto_tree_add_subtree(tree, tvb, offset, 0, ett_writeprinter_buffer, &item, "Buffer"); offset = dissect_ndr_uint8s(tvb, offset, pinfo, subtree, di, drep, hf_buffer_data, size, NULL); offset = dissect_ndr_uint32(tvb, offset, pinfo, subtree, di, drep, hf_buffer_size, NULL); proto_item_set_len(item, size + 4); return offset; }
0
[ "CWE-399" ]
wireshark
b4d16b4495b732888e12baf5b8a7e9bf2665e22b
315,779,238,286,529,130,000,000,000,000,000,000,000
39
SPOOLSS: Try to avoid an infinite loop. Use tvb_reported_length_remaining in dissect_spoolss_uint16uni. Make sure our offset always increments in dissect_spoolss_keybuffer. Change-Id: I7017c9685bb2fa27161d80a03b8fca4ef630e793 Reviewed-on: https://code.wireshark.org/review/14687 Reviewed-by: Gerald Combs <[email protected]> Petri-Dish: Gerald Combs <[email protected]> Tested-by: Petri Dish Buildbot <[email protected]> Reviewed-by: Michael Mann <[email protected]>
static int cx24116_set_symbolrate(struct cx24116_state *state, u32 rate) { dprintk("%s(%d)\n", __func__, rate); /* check if symbol rate is within limits */ if ((rate > state->frontend.ops.info.symbol_rate_max) || (rate < state->frontend.ops.info.symbol_rate_min)) { dprintk("%s() unsupported symbol_rate = %d\n", __func__, rate); return -EOPNOTSUPP; } state->dnxt.symbol_rate = rate; dprintk("%s() symbol_rate = %d\n", __func__, rate); return 0; }
0
[ "CWE-476", "CWE-119", "CWE-125" ]
linux
1fa2337a315a2448c5434f41e00d56b01a22283c
49,663,462,440,161,250,000,000,000,000,000,000,000
16
[media] cx24116: fix a buffer overflow when checking userspace params The maximum size for a DiSEqC command is 6, according to the userspace API. However, the code allows to write up much more values: drivers/media/dvb-frontends/cx24116.c:983 cx24116_send_diseqc_msg() error: buffer overflow 'd->msg' 6 <= 23 Cc: [email protected] Signed-off-by: Mauro Carvalho Chehab <[email protected]>
static void JS_ExecuteGlobalContextFunction( v8::FunctionCallbackInfo<v8::Value> const& args) { TRI_V8_TRY_CATCH_BEGIN(isolate); v8::HandleScope scope(isolate); if (args.Length() != 1) { TRI_V8_THROW_EXCEPTION_USAGE( "executeGlobalContextFunction(<function-type>)"); } // extract the action name v8::String::Utf8Value utf8def(isolate, args[0]); if (*utf8def == nullptr) { TRI_V8_THROW_TYPE_ERROR("<definition> must be a UTF-8 function definition"); } std::string const def = std::string(*utf8def, utf8def.length()); TRI_GET_GLOBALS(); // and pass it to the V8 contexts if (!v8g->_server.getFeature<V8DealerFeature>().addGlobalContextMethod(def)) { TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid action definition"); } TRI_V8_RETURN_UNDEFINED(); TRI_V8_TRY_CATCH_END }
0
[ "CWE-918" ]
arangodb
d7b35a6884c6b2802d34d79fb2a79fb2c9ec2175
180,933,501,372,251,200,000,000,000,000,000,000,000
29
[APM-78] Disable installation from remote URL (#15292) (#15343) * [APM-78] Disable installation from remote URL (#15292) * Update CHANGELOG * Fix clang-format Co-authored-by: Vadim <[email protected]>
void fs_logger_print_log(pid_t pid) { EUID_ASSERT(); ProcessHandle sandbox = pin_sandbox_process(pid); // chroot in the sandbox process_rootfs_chroot(sandbox); unpin_process(sandbox); drop_privs(0); // print RUN_FSLOGGER_FILE FILE *fp = fopen(RUN_FSLOGGER_FILE, "re"); if (!fp) { fprintf(stderr, "Error: Cannot open filesystem log\n"); exit(1); } char buf[MAXBUF]; while (fgets(buf, MAXBUF, fp)) printf("%s", buf); fclose(fp); exit(0); }
0
[ "CWE-269", "CWE-94" ]
firejail
27cde3d7d1e4e16d4190932347c7151dc2a84c50
117,196,020,068,775,350,000,000,000,000,000,000,000
25
fixing CVE-2022-31214
static int aes_wrap_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key, const unsigned char *iv, int enc) { EVP_AES_WRAP_CTX *wctx = EVP_C_DATA(EVP_AES_WRAP_CTX,ctx); if (!iv && !key) return 1; if (key) { if (EVP_CIPHER_CTX_encrypting(ctx)) AES_set_encrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8, &wctx->ks.ks); else AES_set_decrypt_key(key, EVP_CIPHER_CTX_key_length(ctx) * 8, &wctx->ks.ks); if (!iv) wctx->iv = NULL; } if (iv) { memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), iv, EVP_CIPHER_CTX_iv_length(ctx)); wctx->iv = EVP_CIPHER_CTX_iv_noconst(ctx); } return 1; }
0
[ "CWE-125" ]
openssl
2198b3a55de681e1f3c23edb0586afe13f438051
63,796,624,231,887,255,000,000,000,000,000,000,000
22
crypto/evp: harden AEAD ciphers. Originally a crash in 32-bit build was reported CHACHA20-POLY1305 cipher. The crash is triggered by truncated packet and is result of excessive hashing to the edge of accessible memory. Since hash operation is read-only it is not considered to be exploitable beyond a DoS condition. Other ciphers were hardened. Thanks to Robert Święcki for report. CVE-2017-3731 Reviewed-by: Rich Salz <[email protected]>
X509_LOOKUP* X509_STORE_add_lookup(X509_STORE*, X509_LOOKUP_METHOD*) { // TODO: return 0; }
0
[ "CWE-254" ]
mysql-server
e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69
186,786,641,874,932,740,000,000,000,000,000,000,000
5
Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED.
static void gem_realize(DeviceState *dev, Error **errp) { CadenceGEMState *s = CADENCE_GEM(dev); int i; address_space_init(&s->dma_as, s->dma_mr ? s->dma_mr : get_system_memory(), "dma"); if (s->num_priority_queues == 0 || s->num_priority_queues > MAX_PRIORITY_QUEUES) { error_setg(errp, "Invalid num-priority-queues value: %" PRIx8, s->num_priority_queues); return; } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) { error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8, s->num_type1_screeners); return; } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) { error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8, s->num_type2_screeners); return; } for (i = 0; i < s->num_priority_queues; ++i) { sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); } qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_gem_info, &s->conf, object_get_typename(OBJECT(dev)), dev->id, s); if (s->jumbo_max_len > MAX_FRAME_SIZE) { error_setg(errp, "jumbo-max-len is greater than %d", MAX_FRAME_SIZE); return; } }
0
[ "CWE-835" ]
qemu
e73adfbeec9d4e008630c814759052ed945c3fed
166,810,663,930,785,770,000,000,000,000,000,000,000
38
cadence_gem: switch to use qemu_receive_packet() for loopback This patch switches to use qemu_receive_packet() which can detect reentrancy and return early. This is intended to address CVE-2021-3416. Cc: Prasad J Pandit <[email protected]> Cc: [email protected] Reviewed-by: Philippe Mathieu-Daudé <[email protected]> Signed-off-by: Alexander Bulekov <[email protected]> Signed-off-by: Jason Wang <[email protected]>
_kc_invoke_callback_idle(gpointer user_data) { KillChildAsyncData *data = user_data; if (data->sync.success) { char buf_exit[KC_EXIT_TO_STRING_BUF_SIZE]; nm_log_dbg(data->log_domain, "%s: invoke callback: terminated %s", data->log_name, _kc_exit_to_string(buf_exit, data->sync.child_status)); } else nm_log_dbg(data->log_domain, "%s: invoke callback: killing child failed", data->log_name); data->callback(data->pid, data->sync.success, data->sync.child_status, data->user_data); g_free(data); return G_SOURCE_REMOVE; }
0
[ "CWE-20" ]
NetworkManager
420784e342da4883f6debdfe10cde68507b10d27
76,611,212,030,430,550,000,000,000,000,000,000,000
19
core: fix crash in nm_wildcard_match_check() It's not entirely clear how to treat %NULL. Clearly "match.interface-name=eth0" should not match with an interface %NULL. But what about "match.interface-name=!eth0"? It's now implemented that negative matches still succeed against %NULL. What about "match.interface-name=*"? That probably should also match with %NULL. So we treat %NULL really like "". Against commit 11cd443448bc ('iwd: Don't call IWD methods when device unmanaged'), we got this backtrace: #0 0x00007f1c164069f1 in __strnlen_avx2 () at ../sysdeps/x86_64/multiarch/strlen-avx2.S:62 #1 0x00007f1c1637ac9e in __fnmatch (pattern=<optimized out>, string=<optimized out>, string@entry=0x0, flags=flags@entry=0) at fnmatch.c:379 p = 0x0 res = <optimized out> orig_pattern = <optimized out> n = <optimized out> wpattern = 0x7fff8d860730 L"pci-0000:03:00.0" ps = {__count = 0, __value = {__wch = 0, __wchb = "\000\000\000"}} wpattern_malloc = 0x0 wstring_malloc = 0x0 wstring = <optimized out> alloca_used = 80 __PRETTY_FUNCTION__ = "__fnmatch" #2 0x0000564484a978bf in nm_wildcard_match_check (str=0x0, patterns=<optimized out>, num_patterns=<optimized out>) at src/core/nm-core-utils.c:1959 is_inverted = 0 is_mandatory = 0 match = <optimized out> p = 0x564486c43fa0 "pci-0000:03:00.0" has_optional = 0 has_any_optional = 0 i = <optimized out> #3 0x0000564484bf4797 in check_connection_compatible (self=<optimized out>, connection=<optimized out>, error=0x0) at src/core/devices/nm-device.c:7499 patterns = <optimized out> device_driver = 0x564486c76bd0 "veth" num_patterns = 1 priv = 0x564486cbe0b0 __func__ = "check_connection_compatible" device_iface = <optimized out> local = 0x564486c99a60 conn_iface = 0x0 klass = <optimized out> s_match = 0x564486c63df0 [NMSettingMatch] #4 0x0000564484c38491 in check_connection_compatible (device=0x564486cbe590 [NMDeviceVeth], connection=0x564486c6b160, error=0x0) at src/core/devices/nm-device-ethernet.c:348 self = 0x564486cbe590 [NMDeviceVeth] s_wired = <optimized out> Fixes: 3ced486f4162 ('libnm/match: extend syntax for match patterns with '|', '&', '!' and '\\'') https://bugzilla.redhat.com/show_bug.cgi?id=1942741
static void show_submodule_header(FILE *f, const char *path, const char *line_prefix, struct object_id *one, struct object_id *two, unsigned dirty_submodule, const char *meta, const char *reset, struct commit **left, struct commit **right, struct commit_list **merge_bases) { const char *message = NULL; struct strbuf sb = STRBUF_INIT; int fast_forward = 0, fast_backward = 0; if (dirty_submodule & DIRTY_SUBMODULE_UNTRACKED) fprintf(f, "%sSubmodule %s contains untracked content\n", line_prefix, path); if (dirty_submodule & DIRTY_SUBMODULE_MODIFIED) fprintf(f, "%sSubmodule %s contains modified content\n", line_prefix, path); if (is_null_oid(one)) message = "(new submodule)"; else if (is_null_oid(two)) message = "(submodule deleted)"; if (add_submodule_odb(path)) { if (!message) message = "(not initialized)"; goto output_header; } /* * Attempt to lookup the commit references, and determine if this is * a fast forward or fast backwards update. */ *left = lookup_commit_reference(one); *right = lookup_commit_reference(two); /* * Warn about missing commits in the submodule project, but only if * they aren't null. */ if ((!is_null_oid(one) && !*left) || (!is_null_oid(two) && !*right)) message = "(commits not present)"; *merge_bases = get_merge_bases(*left, *right); if (*merge_bases) { if ((*merge_bases)->item == *left) fast_forward = 1; else if ((*merge_bases)->item == *right) fast_backward = 1; } if (!oidcmp(one, two)) { strbuf_release(&sb); return; } output_header: strbuf_addf(&sb, "%s%sSubmodule %s ", line_prefix, meta, path); strbuf_add_unique_abbrev(&sb, one->hash, DEFAULT_ABBREV); strbuf_addstr(&sb, (fast_backward || fast_forward) ? ".." : "..."); strbuf_add_unique_abbrev(&sb, two->hash, DEFAULT_ABBREV); if (message) strbuf_addf(&sb, " %s%s\n", message, reset); else strbuf_addf(&sb, "%s:%s\n", fast_backward ? " (rewind)" : "", reset); fwrite(sb.buf, sb.len, 1, f); strbuf_release(&sb); }
0
[]
git
a8dee3ca610f5a1d403634492136c887f83b59d2
122,037,394,616,154,400,000,000,000,000,000,000,000
71
Disallow dubiously-nested submodule git directories Currently it is technically possible to let a submodule's git directory point right into the git dir of a sibling submodule. Example: the git directories of two submodules with the names `hippo` and `hippo/hooks` would be `.git/modules/hippo/` and `.git/modules/hippo/hooks/`, respectively, but the latter is already intended to house the former's hooks. In most cases, this is just confusing, but there is also a (quite contrived) attack vector where Git can be fooled into mistaking remote content for file contents it wrote itself during a recursive clone. Let's plug this bug. To do so, we introduce the new function `validate_submodule_git_dir()` which simply verifies that no git dir exists for any leading directories of the submodule name (if there are any). Note: this patch specifically continues to allow sibling modules names of the form `core/lib`, `core/doc`, etc, as long as `core` is not a submodule name. This fixes CVE-2019-1387. Reported-by: Nicolas Joly <[email protected]> Signed-off-by: Johannes Schindelin <[email protected]>
int smb_vfs_call_sys_acl_set_fd(struct vfs_handle_struct *handle, struct files_struct *fsp, SMB_ACL_T theacl) { VFS_FIND(sys_acl_set_fd); return handle->fns->sys_acl_set_fd_fn(handle, fsp, theacl); }
0
[ "CWE-264" ]
samba
4278ef25f64d5fdbf432ff1534e275416ec9561e
184,156,605,493,704,240,000,000,000,000,000,000,000
6
CVE-2015-5252: s3: smbd: Fix symlink verification (file access outside the share). Ensure matching component ends in '/' or '\0'. BUG: https://bugzilla.samba.org/show_bug.cgi?id=11395 Signed-off-by: Jeremy Allison <[email protected]> Reviewed-by: Volker Lendecke <[email protected]>
static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) { percpu_ref_exit(&ref_node->refs); kfree(ref_node);
0
[ "CWE-787" ]
linux
d1f82808877bb10d3deee7cf3374a4eb3fb582db
154,080,672,122,561,730,000,000,000,000,000,000,000
5
io_uring: truncate lengths larger than MAX_RW_COUNT on provide buffers Read and write operations are capped to MAX_RW_COUNT. Some read ops rely on that limit, and that is not guaranteed by the IORING_OP_PROVIDE_BUFFERS. Truncate those lengths when doing io_add_buffers, so buffer addresses still use the uncapped length. Also, take the chance and change struct io_buffer len member to __u32, so it matches struct io_provide_buffer len member. This fixes CVE-2021-3491, also reported as ZDI-CAN-13546. Fixes: ddf0322db79c ("io_uring: add IORING_OP_PROVIDE_BUFFERS") Reported-by: Billy Jheng Bing-Jhong (@st424204) Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
void compress_block( const astcenc_context& ctx, const astcenc_image& input_image, const image_block& blk, physical_compressed_block& pcb, compression_working_buffers& tmpbuf) { astcenc_profile decode_mode = ctx.config.profile; symbolic_compressed_block scb; error_weight_block& ewb = tmpbuf.ewb; const block_size_descriptor* bsd = ctx.bsd; float lowest_correl; TRACE_NODE(node0, "block"); trace_add_data("pos_x", blk.xpos); trace_add_data("pos_y", blk.ypos); trace_add_data("pos_z", blk.zpos); // Set stricter block targets for luminance data as we have more bits to play with bool block_is_l = blk.is_luminance(); float block_is_l_scale = block_is_l ? 1.0f / 1.5f : 1.0f; // Set slightly stricter block targets for lumalpha data as we have more bits to play with bool block_is_la = blk.is_luminancealpha(); float block_is_la_scale = block_is_la ? 1.0f / 1.05f : 1.0f; bool block_skip_two_plane = false; // Default max partition, but +1 if only have 1 or 2 active components int max_partitions = ctx.config.tune_partition_count_limit; if (block_is_l || block_is_la) { max_partitions = astc::min(max_partitions + 1, 4); } #if defined(ASTCENC_DIAGNOSTICS) // Do this early in diagnostic builds so we can dump uniform metrics // for every block. Do it later in release builds to avoid redundant work! float error_weight_sum = prepare_error_weight_block(ctx, input_image, *bsd, blk, ewb); float error_threshold = ctx.config.tune_db_limit * error_weight_sum * block_is_l_scale * block_is_la_scale; lowest_correl = prepare_block_statistics(bsd->texel_count, blk, ewb); trace_add_data("lowest_correl", lowest_correl); trace_add_data("tune_error_threshold", error_threshold); #endif // Detected a constant-color block if (all(blk.data_min == blk.data_max)) { TRACE_NODE(node1, "pass"); trace_add_data("partition_count", 0); trace_add_data("plane_count", 1); scb.partition_count = 0; // Encode as FP16 if using HDR if ((decode_mode == ASTCENC_PRF_HDR) || (decode_mode == ASTCENC_PRF_HDR_RGB_LDR_A)) { scb.block_type = SYM_BTYPE_CONST_F16; vint4 color_f16 = float_to_float16(blk.origin_texel); store(color_f16, scb.constant_color); } // Encode as UNORM16 if NOT using HDR else { scb.block_type = SYM_BTYPE_CONST_U16; vfloat4 color_f32 = clamp(0.0f, 1.0f, blk.origin_texel) * 65535.0f; vint4 color_u16 = float_to_int_rtn(color_f32); store(color_u16, scb.constant_color); } trace_add_data("exit", "quality hit"); symbolic_to_physical(*bsd, scb, pcb); return; } #if !defined(ASTCENC_DIAGNOSTICS) float error_weight_sum = prepare_error_weight_block(ctx, input_image, *bsd, blk, ewb); float error_threshold = ctx.config.tune_db_limit * error_weight_sum * block_is_l_scale * block_is_la_scale; #endif // Set SCB and mode errors to a very high error value scb.errorval = ERROR_CALC_DEFAULT; scb.block_type = SYM_BTYPE_ERROR; float best_errorvals_for_pcount[BLOCK_MAX_PARTITIONS] { ERROR_CALC_DEFAULT, ERROR_CALC_DEFAULT, ERROR_CALC_DEFAULT, ERROR_CALC_DEFAULT }; float exit_thresholds_for_pcount[BLOCK_MAX_PARTITIONS] { 0.0f, ctx.config.tune_2_partition_early_out_limit_factor, ctx.config.tune_3_partition_early_out_limit_factor, 0.0f }; // Trial using 1 plane of weights and 1 partition. // Most of the time we test it twice, first with a mode cutoff of 0 and then with the specified // mode cutoff. This causes an early-out that speeds up encoding of easy blocks. However, this // optimization is disabled for 4x4 and 5x4 blocks where it nearly always slows down the // compression and slightly reduces image quality. float errorval_mult[2] { 1.0f / ctx.config.tune_mode0_mse_overshoot, 1.0f }; static const float errorval_overshoot = 1.0f / ctx.config.tune_refinement_mse_overshoot; // Only enable MODE0 fast path (trial 0) if 2D and more than 25 texels int start_trial = 1; if ((bsd->texel_count >= TUNE_MIN_TEXELS_MODE0_FASTPATH) && (bsd->zdim == 1)) { start_trial = 0; } for (int i = start_trial; i < 2; i++) { TRACE_NODE(node1, "pass"); trace_add_data("partition_count", 1); trace_add_data("plane_count", 1); trace_add_data("search_mode", i); float errorval = compress_symbolic_block_for_partition_1plane( ctx.config, *bsd, blk, ewb, i == 0, error_threshold * errorval_mult[i] * errorval_overshoot, 1, 0, scb, tmpbuf); best_errorvals_for_pcount[0] = astc::min(best_errorvals_for_pcount[0], errorval); if (errorval < (error_threshold * errorval_mult[i])) { trace_add_data("exit", "quality hit"); goto END_OF_TESTS; } } #if !defined(ASTCENC_DIAGNOSTICS) lowest_correl = prepare_block_statistics(bsd->texel_count, blk, ewb); #endif block_skip_two_plane = lowest_correl > ctx.config.tune_2_plane_early_out_limit_correlation; // Test the four possible 1-partition, 2-planes modes. Do this in reverse, as // alpha is the most likely to be non-correlated if it is present in the data. for (int i = BLOCK_MAX_COMPONENTS - 1; i >= 0; i--) { TRACE_NODE(node1, "pass"); trace_add_data("partition_count", 1); trace_add_data("plane_count", 2); trace_add_data("plane_component", i); if (block_skip_two_plane) { trace_add_data("skip", "tune_2_plane_early_out_limit_correlation"); continue; } if (blk.grayscale && i != 3) { trace_add_data("skip", "grayscale block"); continue; } if (blk.is_constant_channel(i)) { trace_add_data("skip", "constant component"); continue; } float errorval = compress_symbolic_block_for_partition_2planes( ctx.config, *bsd, blk, ewb, error_threshold * errorval_overshoot, i, scb, tmpbuf); // If attempting two planes is much worse than the best one plane result // then further two plane searches are unlikely to help so move on ... if (errorval > (best_errorvals_for_pcount[0] * 2.0f)) { break; } if (errorval < error_threshold) { trace_add_data("exit", "quality hit"); goto END_OF_TESTS; } } // Find best blocks for 2, 3 and 4 partitions for (int partition_count = 2; partition_count <= max_partitions; partition_count++) { unsigned int partition_indices_1plane[2] { 0, 0 }; find_best_partition_candidates(*bsd, blk, ewb, partition_count, ctx.config.tune_partition_index_limit, partition_indices_1plane[0], partition_indices_1plane[1]); for (int i = 0; i < 2; i++) { TRACE_NODE(node1, "pass"); trace_add_data("partition_count", partition_count); trace_add_data("partition_index", partition_indices_1plane[i]); trace_add_data("plane_count", 1); trace_add_data("search_mode", i); float errorval = compress_symbolic_block_for_partition_1plane( ctx.config, *bsd, blk, ewb, false, error_threshold * errorval_overshoot, partition_count, partition_indices_1plane[i], scb, tmpbuf); best_errorvals_for_pcount[partition_count - 1] = astc::min(best_errorvals_for_pcount[partition_count - 1], errorval); if (errorval < error_threshold) { trace_add_data("exit", "quality hit"); goto END_OF_TESTS; } } // If using N partitions doesn't improve much over using N-1 partitions then skip trying N+1 float best_error = best_errorvals_for_pcount[partition_count - 1]; float best_error_in_prev = best_errorvals_for_pcount[partition_count - 2]; float best_error_scale = exit_thresholds_for_pcount[partition_count - 1]; if (best_error > (best_error_in_prev * best_error_scale)) { trace_add_data("skip", "tune_partition_early_out_limit_factor"); goto END_OF_TESTS; } } trace_add_data("exit", "quality not hit"); END_OF_TESTS: // If we still have an error block then convert to something we can encode // TODO: Do something more sensible here, such as average color block if (scb.block_type == SYM_BTYPE_ERROR) { #if !defined(NDEBUG) static bool printed_once = false; if (!printed_once) { printed_once = true; printf("WARN: At least one block failed to find a valid encoding.\n" " Try increasing compression quality settings.\n\n"); } #endif scb.block_type = SYM_BTYPE_CONST_U16; scb.block_mode = -2; vfloat4 color_f32 = clamp(0.0f, 1.0f, blk.origin_texel) * 65535.0f; vint4 color_u16 = float_to_int_rtn(color_f32); store(color_u16, scb.constant_color); } // Compress to a physical block symbolic_to_physical(*bsd, scb, pcb); }
0
[ "CWE-787" ]
astc-encoder
6ffb3058bfbcc836108c25274e955e399481e2b4
196,564,571,257,947,400,000,000,000,000,000,000,000
268
Provide a fallback for blocks which find no valid encoding
void server_connect_ref(SERVER_CONNECT_REC *conn) { conn->refcount++; }
0
[ "CWE-20" ]
irssi-proxy
85bbc05b21678e80423815d2ef1dfe26208491ab
114,459,069,664,927,250,000,000,000,000,000,000,000
4
Check if an SSL certificate matches the hostname of the server we are connecting to git-svn-id: http://svn.irssi.org/repos/irssi/trunk@5104 dbcabf3a-b0e7-0310-adc4-f8d773084564
CUser::~CUser() { // Delete networks while (!m_vIRCNetworks.empty()) { delete *m_vIRCNetworks.begin(); } // Delete clients while (!m_vClients.empty()) { CZNC::Get().GetManager().DelSockByAddr(m_vClients[0]); } m_vClients.clear(); // Delete modules (unloads all modules!) delete m_pModules; m_pModules = nullptr; CZNC::Get().GetManager().DelCronByAddr(m_pUserTimer); CZNC::Get().AddBytesRead(m_uBytesRead); CZNC::Get().AddBytesWritten(m_uBytesWritten); }
0
[ "CWE-20" ]
znc
64613bc8b6b4adf1e32231f9844d99cd512b8973
15,462,361,248,644,958,000,000,000,000,000,000,000
21
Don't crash if user specified invalid encoding. This is CVE-2019-9917
static int emulate_on_interception(struct kvm_vcpu *vcpu) { return kvm_emulate_instruction(vcpu, 0); }
0
[ "CWE-862" ]
kvm
0f923e07124df069ba68d8bb12324398f4b6b709
228,011,282,656,925,500,000,000,000,000,000,000,000
4
KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653) * Invert the mask of bits that we pick from L2 in nested_vmcb02_prepare_control * Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr This fixes a security issue that allowed a malicious L1 to run L2 with AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled AVIC to read/write the host physical memory at some offsets. Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler") Signed-off-by: Maxim Levitsky <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
void setSocket(const std::shared_ptr<AsyncSSLSocket>& socket) { socket_ = socket; }
0
[ "CWE-125" ]
folly
c321eb588909646c15aefde035fd3133ba32cdee
291,088,324,243,652,800,000,000,000,000,000,000,000
3
Handle close_notify as standard writeErr in AsyncSSLSocket. Summary: Fixes CVE-2019-11934 Reviewed By: mingtaoy Differential Revision: D18020613 fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
static void __route4_delete_filter(struct route4_filter *f) { tcf_exts_destroy(&f->exts); tcf_exts_put_net(&f->exts); kfree(f); }
0
[ "CWE-416", "CWE-200" ]
linux
ef299cc3fa1a9e1288665a9fdc8bff55629fd359
299,398,749,902,079,640,000,000,000,000,000,000,000
6
net_sched: cls_route: remove the right filter from hashtable route4_change() allocates a new filter and copies values from the old one. After the new filter is inserted into the hash table, the old filter should be removed and freed, as the final step of the update. However, the current code mistakenly removes the new one. This looks apparently wrong to me, and it causes double "free" and use-after-free too, as reported by syzbot. Reported-and-tested-by: [email protected] Reported-and-tested-by: [email protected] Reported-and-tested-by: [email protected] Fixes: 1109c00547fc ("net: sched: RCU cls_route") Cc: Jamal Hadi Salim <[email protected]> Cc: Jiri Pirko <[email protected]> Cc: John Fastabend <[email protected]> Signed-off-by: Cong Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static NTSTATUS dcesrv_lsa_CreateTrustedDomain_base(struct dcesrv_call_state *dce_call, TALLOC_CTX *mem_ctx, struct lsa_CreateTrustedDomainEx2 *r, int op, struct lsa_TrustDomainInfoAuthInfo *unencrypted_auth_info) { struct dcesrv_handle *policy_handle; struct lsa_policy_state *policy_state; struct lsa_trusted_domain_state *trusted_domain_state; struct dcesrv_handle *handle; struct ldb_message **msgs, *msg; const char *attrs[] = { NULL }; const char *netbios_name; const char *dns_name; DATA_BLOB trustAuthIncoming, trustAuthOutgoing, auth_blob; struct trustDomainPasswords auth_struct; int ret; NTSTATUS nt_status; struct ldb_context *sam_ldb; struct server_id *server_ids = NULL; uint32_t num_server_ids = 0; NTSTATUS status; bool ok; char *dns_encoded = NULL; char *netbios_encoded = NULL; char *sid_encoded = NULL; struct imessaging_context *imsg_ctx = dcesrv_imessaging_context(dce_call->conn); DCESRV_PULL_HANDLE(policy_handle, r->in.policy_handle, LSA_HANDLE_POLICY); ZERO_STRUCTP(r->out.trustdom_handle); policy_state = policy_handle->data; sam_ldb = policy_state->sam_ldb; netbios_name = r->in.info->netbios_name.string; if (!netbios_name) { return NT_STATUS_INVALID_PARAMETER; } dns_name = r->in.info->domain_name.string; if (dns_name == NULL) { return NT_STATUS_INVALID_PARAMETER; } if (r->in.info->sid == NULL) { return NT_STATUS_INVALID_SID; } /* * We expect S-1-5-21-A-B-C, but we don't * allow S-1-5-21-0-0-0 as this is used * for claims and compound identities. */ ok = dom_sid_is_valid_account_domain(r->in.info->sid); if (!ok) { return NT_STATUS_INVALID_PARAMETER; } dns_encoded = ldb_binary_encode_string(mem_ctx, dns_name); if (dns_encoded == NULL) { return NT_STATUS_NO_MEMORY; } netbios_encoded = ldb_binary_encode_string(mem_ctx, netbios_name); if (netbios_encoded == NULL) { return NT_STATUS_NO_MEMORY; } sid_encoded = ldap_encode_ndr_dom_sid(mem_ctx, r->in.info->sid); if (sid_encoded == NULL) { return NT_STATUS_NO_MEMORY; } trusted_domain_state = talloc_zero(mem_ctx, struct lsa_trusted_domain_state); if (!trusted_domain_state) { return NT_STATUS_NO_MEMORY; } trusted_domain_state->policy = policy_state; if (strcasecmp(netbios_name, "BUILTIN") == 0 || (strcasecmp(dns_name, "BUILTIN") == 0) || (dom_sid_in_domain(policy_state->builtin_sid, r->in.info->sid))) { return NT_STATUS_INVALID_PARAMETER; } if (strcasecmp(netbios_name, policy_state->domain_name) == 0 || strcasecmp(netbios_name, policy_state->domain_dns) == 0 || strcasecmp(dns_name, policy_state->domain_dns) == 0 || strcasecmp(dns_name, policy_state->domain_name) == 0 || (dom_sid_equal(policy_state->domain_sid, r->in.info->sid))) { return NT_STATUS_CURRENT_DOMAIN_NOT_ALLOWED; } /* While this is a REF pointer, some of the functions that wrap this don't provide this */ if (op == NDR_LSA_CREATETRUSTEDDOMAIN) { /* No secrets are created at this time, for this function */ auth_struct.outgoing.count = 0; auth_struct.incoming.count = 0; } else if (op == NDR_LSA_CREATETRUSTEDDOMAINEX2) { auth_blob = data_blob_const(r->in.auth_info_internal->auth_blob.data, r->in.auth_info_internal->auth_blob.size); nt_status = get_trustdom_auth_blob(dce_call, mem_ctx, &auth_blob, &auth_struct); if (!NT_STATUS_IS_OK(nt_status)) { return nt_status; } } else if (op == NDR_LSA_CREATETRUSTEDDOMAINEX) { if (unencrypted_auth_info->incoming_count > 1) { return NT_STATUS_INVALID_PARAMETER; } /* more investigation required here, do not create secrets for * now */ auth_struct.outgoing.count = 0; auth_struct.incoming.count = 0; } else { return NT_STATUS_INVALID_PARAMETER; } if (auth_struct.incoming.count) { nt_status = get_trustauth_inout_blob(dce_call, mem_ctx, &auth_struct.incoming, &trustAuthIncoming); if (!NT_STATUS_IS_OK(nt_status)) { return nt_status; } } else { trustAuthIncoming = data_blob(NULL, 0); } if (auth_struct.outgoing.count) { nt_status = get_trustauth_inout_blob(dce_call, mem_ctx, &auth_struct.outgoing, &trustAuthOutgoing); if (!NT_STATUS_IS_OK(nt_status)) { return nt_status; } } else { trustAuthOutgoing = data_blob(NULL, 0); } ret = ldb_transaction_start(sam_ldb); if (ret != LDB_SUCCESS) { return NT_STATUS_INTERNAL_DB_CORRUPTION; } /* search for the trusted_domain record */ ret = gendb_search(sam_ldb, mem_ctx, policy_state->system_dn, &msgs, attrs, "(&(objectClass=trustedDomain)(|" "(flatname=%s)(trustPartner=%s)" "(flatname=%s)(trustPartner=%s)" "(securityIdentifier=%s)))", dns_encoded, dns_encoded, netbios_encoded, netbios_encoded, sid_encoded); if (ret > 0) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_OBJECT_NAME_COLLISION; } if (ret < 0) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_INTERNAL_DB_CORRUPTION; } msg = ldb_msg_new(mem_ctx); if (msg == NULL) { return NT_STATUS_NO_MEMORY; } msg->dn = ldb_dn_copy(mem_ctx, policy_state->system_dn); if ( ! ldb_dn_add_child_fmt(msg->dn, "cn=%s", dns_name)) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY; } ret = ldb_msg_add_string(msg, "objectClass", "trustedDomain"); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY;; } ret = ldb_msg_add_string(msg, "flatname", netbios_name); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY; } ret = ldb_msg_add_string(msg, "trustPartner", dns_name); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY;; } ret = samdb_msg_add_dom_sid(sam_ldb, mem_ctx, msg, "securityIdentifier", r->in.info->sid); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY;; } ret = samdb_msg_add_int(sam_ldb, mem_ctx, msg, "trustType", r->in.info->trust_type); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY;; } ret = samdb_msg_add_int(sam_ldb, mem_ctx, msg, "trustAttributes", r->in.info->trust_attributes); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY;; } ret = samdb_msg_add_int(sam_ldb, mem_ctx, msg, "trustDirection", r->in.info->trust_direction); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY;; } if (trustAuthIncoming.data) { ret = ldb_msg_add_value(msg, "trustAuthIncoming", &trustAuthIncoming, NULL); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY; } } if (trustAuthOutgoing.data) { ret = ldb_msg_add_value(msg, "trustAuthOutgoing", &trustAuthOutgoing, NULL); if (ret != LDB_SUCCESS) { ldb_transaction_cancel(sam_ldb); return NT_STATUS_NO_MEMORY; } } trusted_domain_state->trusted_domain_dn = talloc_reference(trusted_domain_state, msg->dn); /* create the trusted_domain */ ret = ldb_add(sam_ldb, msg); switch (ret) { case LDB_SUCCESS: break; case LDB_ERR_ENTRY_ALREADY_EXISTS: ldb_transaction_cancel(sam_ldb); DEBUG(0,("Failed to create trusted domain record %s: %s\n", ldb_dn_get_linearized(msg->dn), ldb_errstring(sam_ldb))); return NT_STATUS_DOMAIN_EXISTS; case LDB_ERR_INSUFFICIENT_ACCESS_RIGHTS: ldb_transaction_cancel(sam_ldb); DEBUG(0,("Failed to create trusted domain record %s: %s\n", ldb_dn_get_linearized(msg->dn), ldb_errstring(sam_ldb))); return NT_STATUS_ACCESS_DENIED; default: ldb_transaction_cancel(sam_ldb); DEBUG(0,("Failed to create user record %s: %s\n", ldb_dn_get_linearized(msg->dn), ldb_errstring(sam_ldb))); return NT_STATUS_INTERNAL_DB_CORRUPTION; } if (r->in.info->trust_direction & LSA_TRUST_DIRECTION_INBOUND) { struct ldb_dn *user_dn; /* Inbound trusts must also create a cn=users object to match */ nt_status = add_trust_user(mem_ctx, sam_ldb, policy_state->domain_dn, netbios_name, &auth_struct.incoming, &user_dn); if (!NT_STATUS_IS_OK(nt_status)) { ldb_transaction_cancel(sam_ldb); return nt_status; } /* save the trust user dn */ trusted_domain_state->trusted_domain_user_dn = talloc_steal(trusted_domain_state, user_dn); } ret = ldb_transaction_commit(sam_ldb); if (ret != LDB_SUCCESS) { return NT_STATUS_INTERNAL_DB_CORRUPTION; } /* * Notify winbindd that we have a new trust */ status = irpc_servers_byname(imsg_ctx, mem_ctx, "winbind_server", &num_server_ids, &server_ids); if (NT_STATUS_IS_OK(status) && num_server_ids >= 1) { imessaging_send(imsg_ctx, server_ids[0], MSG_WINBIND_RELOAD_TRUSTED_DOMAINS, NULL); } TALLOC_FREE(server_ids); handle = dcesrv_handle_create(dce_call, LSA_HANDLE_TRUSTED_DOMAIN); if (!handle) { return NT_STATUS_NO_MEMORY; } handle->data = talloc_steal(handle, trusted_domain_state); trusted_domain_state->access_mask = r->in.access_mask; trusted_domain_state->policy = talloc_reference(trusted_domain_state, policy_state); *r->out.trustdom_handle = handle->wire_handle; return NT_STATUS_OK; }
0
[ "CWE-200" ]
samba
0a3aa5f908e351201dc9c4d4807b09ed9eedff77
124,165,656,112,288,280,000,000,000,000,000,000,000
316
CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message This aims to minimise usage of the error-prone pattern of searching for a just-added message element in order to make modifications to it (and potentially finding the wrong element). BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009 Signed-off-by: Joseph Sutton <[email protected]>
st_init_strtable(void) { return st_init_table(&type_strhash); }
0
[ "CWE-125" ]
oniguruma
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
18,448,336,170,110,100,000,000,000,000,000,000,000
4
onig-5.9.2
dp_packet_l2_5(const struct dp_packet *b) { return b->l2_5_ofs != UINT16_MAX ? (char *) dp_packet_data(b) + b->l2_5_ofs : NULL; }
0
[ "CWE-400" ]
ovs
79349cbab0b2a755140eedb91833ad2760520a83
7,632,429,671,028,242,000,000,000,000,000,000,000
6
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
bgp_show_neighbor (struct vty *vty, struct bgp *bgp, enum show_type type, union sockunion *su) { struct listnode *node, *nnode; struct peer *peer; int find = 0; for (ALL_LIST_ELEMENTS (bgp->peer, node, nnode, peer)) { switch (type) { case show_all: bgp_show_peer (vty, peer); break; case show_peer: if (sockunion_same (&peer->su, su)) { find = 1; bgp_show_peer (vty, peer); } break; } } if (type == show_peer && ! find) vty_out (vty, "%% No such neighbor%s", VTY_NEWLINE); return CMD_SUCCESS; }
0
[ "CWE-125" ]
frr
6d58272b4cf96f0daa846210dd2104877900f921
3,018,193,734,476,090,000,000,000,000,000,000,000
29
[bgpd] cleanup, compact and consolidate capability parsing code 2007-07-26 Paul Jakma <[email protected]> * (general) Clean up and compact capability parsing slightly. Consolidate validation of length and logging of generic TLV, and memcpy of capability data, thus removing such from cap specifc code (not always present or correct). * bgp_open.h: Add structures for the generic capability TLV header and for the data formats of the various specific capabilities we support. Hence remove the badly named, or else misdefined, struct capability. * bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data. Do the length checks *before* memcpy()'ing based on that length (stored capability - should have been validated anyway on input, but..). (bgp_afi_safi_valid_indices) new function to validate (afi,safi) which is about to be used as index into arrays, consolidates several instances of same, at least one of which appeared to be incomplete.. (bgp_capability_mp) Much condensed. (bgp_capability_orf_entry) New, process one ORF entry (bgp_capability_orf) Condensed. Fixed to process all ORF entries. (bgp_capability_restart) Condensed, and fixed to use a cap-specific type, rather than abusing capability_mp. (struct message capcode_str) added to aid generic logging. (size_t cap_minsizes[]) added to aid generic validation of capability length field. (bgp_capability_parse) Generic logging and validation of TLV consolidated here. Code compacted as much as possible. * bgp_packet.c: (bgp_open_receive) Capability parsers now use streams, so no more need here to manually fudge the input stream getp. (bgp_capability_msg_parse) use struct capability_mp_data. Validate lengths /before/ memcpy. Use bgp_afi_safi_valid_indices. (bgp_capability_receive) Exported for use by test harness. * bgp_vty.c: (bgp_show_summary) fix conversion warning (bgp_show_peer) ditto * bgp_debug.h: Fix storage 'extern' after type 'const'. * lib/log.c: (mes_lookup) warning about code not being in same-number array slot should be debug, not warning. E.g. BGP has several discontigious number spaces, allocating from different parts of a space is not uncommon (e.g. IANA assigned versus vendor-assigned code points in some number space).
dirserv_generate_networkstatus_vote_obj(crypto_pk_env_t *private_key, authority_cert_t *cert) { or_options_t *options = get_options(); networkstatus_t *v3_out = NULL; uint32_t addr; char *hostname = NULL, *client_versions = NULL, *server_versions = NULL; const char *contact; smartlist_t *routers, *routerstatuses; char identity_digest[DIGEST_LEN]; char signing_key_digest[DIGEST_LEN]; int naming = options->NamingAuthoritativeDir; int listbadexits = options->AuthDirListBadExits; int listbaddirs = options->AuthDirListBadDirs; int vote_on_hsdirs = options->VoteOnHidServDirectoriesV2; routerlist_t *rl = router_get_routerlist(); time_t now = time(NULL); time_t cutoff = now - ROUTER_MAX_AGE_TO_PUBLISH; networkstatus_voter_info_t *voter = NULL; vote_timing_t timing; digestmap_t *omit_as_sybil = NULL; const int vote_on_reachability = running_long_enough_to_decide_unreachable(); smartlist_t *microdescriptors = NULL; tor_assert(private_key); tor_assert(cert); if (resolve_my_address(LOG_WARN, options, &addr, &hostname)<0) { log_warn(LD_NET, "Couldn't resolve my hostname"); return NULL; } if (!strchr(hostname, '.')) { tor_free(hostname); hostname = tor_dup_ip(addr); } if (crypto_pk_get_digest(private_key, signing_key_digest)<0) { log_err(LD_BUG, "Error computing signing key digest"); return NULL; } if (crypto_pk_get_digest(cert->identity_key, identity_digest)<0) { log_err(LD_BUG, "Error computing identity key digest"); return NULL; } if (options->VersioningAuthoritativeDir) { client_versions = format_versions_list(options->RecommendedClientVersions); server_versions = format_versions_list(options->RecommendedServerVersions); } contact = get_options()->ContactInfo; if (!contact) contact = "(none)"; /* precompute this part, since we need it to decide what "stable" * means. */ SMARTLIST_FOREACH(rl->routers, routerinfo_t *, ri, { dirserv_set_router_is_running(ri, now); }); dirserv_compute_performance_thresholds(rl); routers = smartlist_create(); smartlist_add_all(routers, rl->routers); routers_sort_by_identity(routers); omit_as_sybil = get_possible_sybil_list(routers); routerstatuses = smartlist_create(); microdescriptors = smartlist_create(); SMARTLIST_FOREACH_BEGIN(routers, routerinfo_t *, ri) { if (ri->cache_info.published_on >= cutoff) { routerstatus_t *rs; vote_routerstatus_t *vrs; microdesc_t *md; vrs = tor_malloc_zero(sizeof(vote_routerstatus_t)); rs = &vrs->status; set_routerstatus_from_routerinfo(rs, ri, now, naming, listbadexits, listbaddirs, vote_on_hsdirs); if (digestmap_get(omit_as_sybil, ri->cache_info.identity_digest)) clear_status_flags_on_sybil(rs); if (!vote_on_reachability) rs->is_running = 0; vrs->version = version_from_platform(ri->platform); md = dirvote_create_microdescriptor(ri); if (md) { char buf[128]; vote_microdesc_hash_t *h; dirvote_format_microdesc_vote_line(buf, sizeof(buf), md); h = tor_malloc(sizeof(vote_microdesc_hash_t)); h->microdesc_hash_line = tor_strdup(buf); h->next = NULL; vrs->microdesc = h; md->last_listed = now; smartlist_add(microdescriptors, md); } smartlist_add(routerstatuses, vrs); } } SMARTLIST_FOREACH_END(ri); { smartlist_t *added = microdescs_add_list_to_cache(get_microdesc_cache(), microdescriptors, SAVED_NOWHERE, 0); smartlist_free(added); smartlist_free(microdescriptors); } smartlist_free(routers); digestmap_free(omit_as_sybil, NULL); if (options->V3BandwidthsFile) { dirserv_read_measured_bandwidths(options->V3BandwidthsFile, routerstatuses); } v3_out = tor_malloc_zero(sizeof(networkstatus_t)); v3_out->type = NS_TYPE_VOTE; dirvote_get_preferred_voting_intervals(&timing); v3_out->published = now; { char tbuf[ISO_TIME_LEN+1]; networkstatus_t *current_consensus = networkstatus_get_live_consensus(now); long last_consensus_interval; /* only used to pick a valid_after */ if (current_consensus) last_consensus_interval = current_consensus->fresh_until - current_consensus->valid_after; else last_consensus_interval = options->TestingV3AuthInitialVotingInterval; v3_out->valid_after = dirvote_get_start_of_next_interval(now, (int)last_consensus_interval); format_iso_time(tbuf, v3_out->valid_after); log_notice(LD_DIR,"Choosing valid-after time in vote as %s: " "consensus_set=%d, last_interval=%d", tbuf, current_consensus?1:0, (int)last_consensus_interval); } v3_out->fresh_until = v3_out->valid_after + timing.vote_interval; v3_out->valid_until = v3_out->valid_after + (timing.vote_interval * timing.n_intervals_valid); v3_out->vote_seconds = timing.vote_delay; v3_out->dist_seconds = timing.dist_delay; tor_assert(v3_out->vote_seconds > 0); tor_assert(v3_out->dist_seconds > 0); tor_assert(timing.n_intervals_valid > 0); v3_out->client_versions = client_versions; v3_out->server_versions = server_versions; v3_out->known_flags = smartlist_create(); smartlist_split_string(v3_out->known_flags, "Authority Exit Fast Guard Stable V2Dir Valid", 0, SPLIT_SKIP_SPACE|SPLIT_IGNORE_BLANK, 0); if (vote_on_reachability) smartlist_add(v3_out->known_flags, tor_strdup("Running")); if (listbaddirs) smartlist_add(v3_out->known_flags, tor_strdup("BadDirectory")); if (listbadexits) smartlist_add(v3_out->known_flags, tor_strdup("BadExit")); if (naming) { smartlist_add(v3_out->known_flags, tor_strdup("Named")); smartlist_add(v3_out->known_flags, tor_strdup("Unnamed")); } if (vote_on_hsdirs) smartlist_add(v3_out->known_flags, tor_strdup("HSDir")); smartlist_sort_strings(v3_out->known_flags); if (options->ConsensusParams) { v3_out->net_params = smartlist_create(); smartlist_split_string(v3_out->net_params, options->ConsensusParams, NULL, 0, 0); smartlist_sort_strings(v3_out->net_params); } voter = tor_malloc_zero(sizeof(networkstatus_voter_info_t)); voter->nickname = tor_strdup(options->Nickname); memcpy(voter->identity_digest, identity_digest, DIGEST_LEN); voter->sigs = smartlist_create(); voter->address = hostname; voter->addr = addr; voter->dir_port = router_get_advertised_dir_port(options, 0); voter->or_port = router_get_advertised_or_port(options); voter->contact = tor_strdup(contact); if (options->V3AuthUseLegacyKey) { authority_cert_t *c = get_my_v3_legacy_cert(); if (c) { if (crypto_pk_get_digest(c->identity_key, voter->legacy_id_digest)) { log_warn(LD_BUG, "Unable to compute digest of legacy v3 identity key"); memset(voter->legacy_id_digest, 0, DIGEST_LEN); } } } v3_out->voters = smartlist_create(); smartlist_add(v3_out->voters, voter); v3_out->cert = authority_cert_dup(cert); v3_out->routerstatus_list = routerstatuses; /* Note: networkstatus_digest is unset; it won't get set until we actually * format the vote. */ return v3_out; }
0
[ "CWE-264" ]
tor
00fffbc1a15e2696a89c721d0c94dc333ff419ef
226,574,007,062,208,170,000,000,000,000,000,000,000
207
Don't give the Guard flag to relays without the CVE-2011-2768 fix
static inline int sctp_list_single_entry(struct list_head *head) { return (head->next != head) && (head->next == head->prev); }
0
[]
linux
196d67593439b03088913227093e374235596e33
164,546,069,216,297,010,000,000,000,000,000,000,000
4
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call The current SCTP stack is lacking a mechanism to have per association statistics. This is an implementation modeled after OpenSolaris' SCTP_GET_ASSOC_STATS. Userspace part will follow on lksctp if/when there is a general ACK on this. V4: - Move ipackets++ before q->immediate.func() for consistency reasons - Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid returning bogus RTO values - return asoc->rto_min when max_obs_rto value has not changed V3: - Increase ictrlchunks in sctp_assoc_bh_rcv() as well - Move ipackets++ to sctp_inq_push() - return 0 when no rto updates took place since the last call V2: - Implement partial retrieval of stat struct to cope for future expansion - Kill the rtxpackets counter as it cannot be precise anyway - Rename outseqtsns to outofseqtsns to make it clearer that these are out of sequence unexpected TSNs - Move asoc->ipackets++ under a lock to avoid potential miscounts - Fold asoc->opackets++ into the already existing asoc check - Kill unneeded (q->asoc) test when increasing rtxchunks - Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0) - Don't count SHUTDOWNs as SACKs - Move SCTP_GET_ASSOC_STATS to the private space API - Adjust the len check in sctp_getsockopt_assoc_stats() to allow for future struct growth - Move association statistics in their own struct - Update idupchunks when we send a SACK with dup TSNs - return min_rto in max_rto when RTO has not changed. Also return the transport when max_rto last changed. Signed-off: Michele Baldessari <[email protected]> Acked-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
ignore_globbed_names (names, name_func) char **names; sh_ignore_func_t *name_func; { char **newnames; int n, i; for (i = 0; names[i]; i++) ; newnames = strvec_create (i + 1); for (n = i = 0; names[i]; i++) { if ((*name_func) (names[i])) newnames[n++] = names[i]; else free (names[i]); } newnames[n] = (char *)NULL; if (n == 0) { names[0] = (char *)NULL; free (newnames); return; } /* Copy the acceptable names from NEWNAMES back to NAMES and set the new array end. */ for (n = 0; newnames[n]; n++) names[n] = newnames[n]; names[n] = (char *)NULL; free (newnames); }
0
[ "CWE-273", "CWE-787" ]
bash
951bdaad7a18cc0dc1036bba86b18b90874d39ff
214,286,071,751,599,500,000,000,000,000,000,000,000
35
commit bash-20190628 snapshot
static void kiocb_end_write(struct io_kiocb *req) { /* * Tell lockdep we inherited freeze protection from submission * thread. */ if (req->flags & REQ_F_ISREG) { struct super_block *sb = file_inode(req->file)->i_sb; __sb_writers_acquired(sb, SB_FREEZE_WRITE); sb_end_write(sb); } }
0
[ "CWE-787" ]
linux
d1f82808877bb10d3deee7cf3374a4eb3fb582db
315,520,559,310,571,960,000,000,000,000,000,000,000
13
io_uring: truncate lengths larger than MAX_RW_COUNT on provide buffers Read and write operations are capped to MAX_RW_COUNT. Some read ops rely on that limit, and that is not guaranteed by the IORING_OP_PROVIDE_BUFFERS. Truncate those lengths when doing io_add_buffers, so buffer addresses still use the uncapped length. Also, take the chance and change struct io_buffer len member to __u32, so it matches struct io_provide_buffer len member. This fixes CVE-2021-3491, also reported as ZDI-CAN-13546. Fixes: ddf0322db79c ("io_uring: add IORING_OP_PROVIDE_BUFFERS") Reported-by: Billy Jheng Bing-Jhong (@st424204) Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static GFINLINE void av1dmx_update_cts(GF_AV1DmxCtx *ctx) { assert(ctx->cur_fps.num); assert(ctx->cur_fps.den); if (ctx->timescale) { u64 inc = ctx->cur_fps.den; inc *= ctx->timescale; inc /= ctx->cur_fps.num; ctx->cts += inc; } else { ctx->cts += ctx->cur_fps.den; } }
0
[ "CWE-476", "CWE-787" ]
gpac
13dad7d5ef74ca2e6fe4010f5b03eb12e9bbe0ec
126,064,430,894,865,560,000,000,000,000,000,000,000
14
fixed #1719
bool check_grant_routine(THD *thd, ulong want_access, TABLE_LIST *procs, bool is_proc, bool no_errors) { TABLE_LIST *table; Security_context *sctx= thd->security_ctx; char *user= sctx->priv_user; char *host= sctx->priv_host; DBUG_ENTER("check_grant_routine"); want_access&= ~sctx->master_access; if (!want_access) DBUG_RETURN(0); // ok mysql_rwlock_rdlock(&LOCK_grant); for (table= procs; table; table= table->next_global) { GRANT_NAME *grant_proc; if ((grant_proc= routine_hash_search(host, sctx->get_ip()->ptr(), table->db, user, table->table_name, is_proc, 0))) table->grant.privilege|= grant_proc->privs; if (want_access & ~table->grant.privilege) { want_access &= ~table->grant.privilege; goto err; } } mysql_rwlock_unlock(&LOCK_grant); DBUG_RETURN(0); err: mysql_rwlock_unlock(&LOCK_grant); if (!no_errors) { char buff[1024]; const char *command=""; if (table) strxmov(buff, table->db, ".", table->table_name, NullS); if (want_access & EXECUTE_ACL) command= "execute"; else if (want_access & ALTER_PROC_ACL) command= "alter routine"; else if (want_access & GRANT_ACL) command= "grant"; my_error(ER_PROCACCESS_DENIED_ERROR, MYF(0), command, user, host, table ? buff : "unknown"); } DBUG_RETURN(1); }
0
[]
mysql-server
25d1b7e03b9b375a243fabdf0556c063c7282361
172,328,089,270,024,260,000,000,000,000,000,000,000
48
Bug #22722946: integer overflow may lead to wrong results in get_56_lenc_string
TPMI_DH_PCR_Unmarshal(TPMI_DH_PCR *target, BYTE **buffer, INT32 *size, BOOL allowNull) { TPM_RC rc = TPM_RC_SUCCESS; if (rc == TPM_RC_SUCCESS) { rc = TPM_HANDLE_Unmarshal(target, buffer, size); } if (rc == TPM_RC_SUCCESS) { BOOL isNotPcr = (*target > PCR_LAST); BOOL isNotLegalNull = (*target != TPM_RH_NULL) || !allowNull; if (isNotPcr && isNotLegalNull) { rc = TPM_RC_VALUE; } } return rc; }
1
[ "CWE-787" ]
libtpms
5cc98a62dc6f204dcf5b87c2ee83ac742a6a319b
333,809,706,986,758,370,000,000,000,000,000,000,000
17
tpm2: Restore original value if unmarshalled value was illegal Restore the original value of the memory location where data from a stream was unmarshalled and the unmarshalled value was found to be illegal. The goal is to not keep illegal values in memory. Signed-off-by: Stefan Berger <[email protected]>
static ut32 getthzeroimmed16(ut32 number) { ut32 res = 0; res |= (number & 0xf000) << 12; res |= (number & 0x0800) << 7; res |= (number & 0x0700) >> 4; res |= (number & 0x00ff) << 8; return res; }
0
[ "CWE-125", "CWE-787" ]
radare2
e5c14c167b0dcf0a53d76bd50bacbbcc0dfc1ae7
146,697,578,130,125,970,000,000,000,000,000,000,000
8
Fix #12417/#12418 (arm assembler heap overflows)
static const char *wsgi_set_error_override(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->error_override = 0; else if (strcasecmp(f, "On") == 0) dconfig->error_override = 1; else return "WSGIErrorOverride must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->error_override = 0; else if (strcasecmp(f, "On") == 0) sconfig->error_override = 1; else return "WSGIErrorOverride must be one of: Off | On"; } return NULL; }
0
[ "CWE-264" ]
mod_wsgi
d9d5fea585b23991f76532a9b07de7fcd3b649f4
283,408,875,405,061,000,000,000,000,000,000,000,000
29
Local privilege escalation when using daemon mode. (CVE-2014-0240)
dtls1_process_record(SSL *s) { int al; int clear=0; int enc_err; SSL_SESSION *sess; SSL3_RECORD *rr; unsigned int mac_size; unsigned char md[EVP_MAX_MD_SIZE]; int decryption_failed_or_bad_record_mac = 0; unsigned char *mac = NULL; int i; rr= &(s->s3->rrec); sess = s->session; /* At this point, s->packet_length == SSL3_RT_HEADER_LNGTH + rr->length, * and we have that many bytes in s->packet */ rr->input= &(s->packet[DTLS1_RT_HEADER_LENGTH]); /* ok, we can now read from 's->packet' data into 'rr' * rr->input points at rr->length bytes, which * need to be copied into rr->data by either * the decryption or by the decompression * When the data is 'copied' into the rr->data buffer, * rr->input will be pointed at the new buffer */ /* We now have - encrypted [ MAC [ compressed [ plain ] ] ] * rr->length bytes of encrypted compressed stuff. */ /* check is not needed I believe */ if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH) { al=SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_ENCRYPTED_LENGTH_TOO_LONG); goto f_err; } /* decrypt in place in 'rr->input' */ rr->data=rr->input; rr->orig_len=rr->length; enc_err = s->method->ssl3_enc->enc(s,0); if (enc_err <= 0) { /* To minimize information leaked via timing, we will always * perform all computations before discarding the message. */ decryption_failed_or_bad_record_mac = 1; } #ifdef TLS_DEBUG printf("dec %d\n",rr->length); { unsigned int z; for (z=0; z<rr->length; z++) printf("%02X%c",rr->data[z],((z+1)%16)?' ':'\n'); } printf("\n"); #endif /* r->length is now the compressed data plus mac */ if ( (sess == NULL) || (s->enc_read_ctx == NULL) || (s->read_hash == NULL)) clear=1; if (!clear) { mac_size=EVP_MD_size(s->read_hash); if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH+mac_size) { #if 0 /* OK only for stream ciphers (then rr->length is visible from ciphertext anyway) */ al=SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_PRE_MAC_LENGTH_TOO_LONG); goto f_err; #else decryption_failed_or_bad_record_mac = 1; #endif } /* check the MAC for rr->input (it's in mac_size bytes at the tail) */ if (rr->length >= mac_size) { rr->length -= mac_size; mac = &rr->data[rr->length]; } else rr->length = 0; i=s->method->ssl3_enc->mac(s,md,0); if (i < 0 || mac == NULL || CRYPTO_memcmp(md,mac,mac_size) != 0) { decryption_failed_or_bad_record_mac = 1; } } if (decryption_failed_or_bad_record_mac) { /* decryption failed, silently discard message */ rr->length = 0; s->packet_length = 0; goto err; } /* r->length is now just compressed */ if (s->expand != NULL) { if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH) { al=SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_COMPRESSED_LENGTH_TOO_LONG); goto f_err; } if (!ssl3_do_uncompress(s)) { al=SSL_AD_DECOMPRESSION_FAILURE; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_BAD_DECOMPRESSION); goto f_err; } } if (rr->length > SSL3_RT_MAX_PLAIN_LENGTH) { al=SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } rr->off=0; /* So at this point the following is true * ssl->s3->rrec.type is the type of record * ssl->s3->rrec.length == number of bytes in record * ssl->s3->rrec.off == offset to first valid byte * ssl->s3->rrec.data == where to take bytes from, increment * after use :-). */ /* we have pulled in a full packet so zero things */ s->packet_length=0; dtls1_record_bitmap_update(s, &(s->d1->bitmap));/* Mark receipt of record. */ return(1); f_err: ssl3_send_alert(s,SSL3_AL_FATAL,al); err: return(0); }
1
[ "CWE-310" ]
openssl
be88529753897c29c677d1becb321f0072c0659c
232,958,030,173,316,740,000,000,000,000,000,000,000
145
Update DTLS code to match CBC decoding in TLS. This change updates the DTLS code to match the constant-time CBC behaviour in the TLS. (cherry picked from commit 9f27de170d1b7bef3d46d41382dc4dafde8b3900) (cherry picked from commit 5e4ca556e970edb8a7f364fcb6ee6818a965a60b) Conflicts: ssl/d1_enc.c ssl/d1_pkt.c ssl/s3_pkt.c
static void dcb_flushapp(void) { struct dcb_app_type *app; struct dcb_app_type *tmp; spin_lock(&dcb_lock); list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { list_del(&app->list); kfree(app); } spin_unlock(&dcb_lock); }
0
[ "CWE-399" ]
linux-2.6
29cd8ae0e1a39e239a3a7b67da1986add1199fc0
221,719,518,545,829,230,000,000,000,000,000,000,000
12
dcbnl: fix various netlink info leaks The dcb netlink interface leaks stack memory in various places: * perm_addr[] buffer is only filled at max with 12 of the 32 bytes but copied completely, * no in-kernel driver fills all fields of an IEEE 802.1Qaz subcommand, so we're leaking up to 58 bytes for ieee_ets structs, up to 136 bytes for ieee_pfc structs, etc., * the same is true for CEE -- no in-kernel driver fills the whole struct, Prevent all of the above stack info leaks by properly initializing the buffers/structures involved. Signed-off-by: Mathias Krause <[email protected]> Signed-off-by: David S. Miller <[email protected]>
ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, char *cipher_name, size_t *key_size) { char dummy_key[ECRYPTFS_MAX_KEY_BYTES]; char *full_alg_name; int rc; *key_tfm = NULL; if (*key_size > ECRYPTFS_MAX_KEY_BYTES) { rc = -EINVAL; printk(KERN_ERR "Requested key size is [%zd] bytes; maximum " "allowable is [%d]\n", *key_size, ECRYPTFS_MAX_KEY_BYTES); goto out; } rc = ecryptfs_crypto_api_algify_cipher_name(&full_alg_name, cipher_name, "ecb"); if (rc) goto out; *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC); kfree(full_alg_name); if (IS_ERR(*key_tfm)) { rc = PTR_ERR(*key_tfm); printk(KERN_ERR "Unable to allocate crypto cipher with name " "[%s]; rc = [%d]\n", cipher_name, rc); goto out; } crypto_blkcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY); if (*key_size == 0) { struct blkcipher_alg *alg = crypto_blkcipher_alg(*key_tfm); *key_size = alg->max_keysize; } get_random_bytes(dummy_key, *key_size); rc = crypto_blkcipher_setkey(*key_tfm, dummy_key, *key_size); if (rc) { printk(KERN_ERR "Error attempting to set key of size [%zd] for " "cipher [%s]; rc = [%d]\n", *key_size, cipher_name, rc); rc = -EINVAL; goto out; } out: return rc; }
0
[ "CWE-189" ]
linux-2.6
8faece5f906725c10e7a1f6caf84452abadbdc7b
154,034,022,067,648,360,000,000,000,000,000,000,000
43
eCryptfs: Allocate a variable number of pages for file headers When allocating the memory used to store the eCryptfs header contents, a single, zeroed page was being allocated with get_zeroed_page(). However, the size of an eCryptfs header is either PAGE_CACHE_SIZE or ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE (8192), whichever is larger, and is stored in the file's private_data->crypt_stat->num_header_bytes_at_front field. ecryptfs_write_metadata_to_contents() was using num_header_bytes_at_front to decide how many bytes should be written to the lower filesystem for the file header. Unfortunately, at least 8K was being written from the page, despite the chance of the single, zeroed page being smaller than 8K. This resulted in random areas of kernel memory being written between the 0x1000 and 0x1FFF bytes offsets in the eCryptfs file headers if PAGE_SIZE was 4K. This patch allocates a variable number of pages, calculated with num_header_bytes_at_front, and passes the number of allocated pages along to ecryptfs_write_metadata_to_contents(). Thanks to Florian Streibelt for reporting the data leak and working with me to find the problem. 2.6.28 is the only kernel release with this vulnerability. Corresponds to CVE-2009-0787 Signed-off-by: Tyler Hicks <[email protected]> Acked-by: Dustin Kirkland <[email protected]> Reviewed-by: Eric Sandeen <[email protected]> Reviewed-by: Eugene Teo <[email protected]> Cc: Greg KH <[email protected]> Cc: dann frazier <[email protected]> Cc: Serge E. Hallyn <[email protected]> Cc: Florian Streibelt <[email protected]> Cc: [email protected] Signed-off-by: Linus Torvalds <[email protected]>
static void unlock_params(struct snd_pcm_runtime *runtime) { mutex_unlock(&runtime->oss.params_lock); }
0
[ "CWE-362" ]
linux
8423f0b6d513b259fdab9c9bf4aaa6188d054c2d
301,841,503,183,630,230,000,000,000,000,000,000,000
4
ALSA: pcm: oss: Fix race at SNDCTL_DSP_SYNC There is a small race window at snd_pcm_oss_sync() that is called from OSS PCM SNDCTL_DSP_SYNC ioctl; namely the function calls snd_pcm_oss_make_ready() at first, then takes the params_lock mutex for the rest. When the stream is set up again by another thread between them, it leads to inconsistency, and may result in unexpected results such as NULL dereference of OSS buffer as a fuzzer spotted recently. The fix is simply to cover snd_pcm_oss_make_ready() call into the same params_lock mutex with snd_pcm_oss_make_ready_locked() variant. Reported-and-tested-by: butt3rflyh4ck <[email protected]> Reviewed-by: Jaroslav Kysela <[email protected]> Cc: <[email protected]> Link: https://lore.kernel.org/r/CAFcO6XN7JDM4xSXGhtusQfS2mSBcx50VJKwQpCq=WeLt57aaZA@mail.gmail.com Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Takashi Iwai <[email protected]>
zmgr_start_xfrin_ifquota(dns_zonemgr_t *zmgr, dns_zone_t *zone) { dns_peer_t *peer = NULL; isc_netaddr_t masterip; uint32_t nxfrsin, nxfrsperns; dns_zone_t *x; uint32_t maxtransfersin, maxtransfersperns; isc_event_t *e; /* * If we are exiting just pretend we got quota so the zone will * be cleaned up in the zone's task context. */ LOCK_ZONE(zone); if (DNS_ZONE_FLAG(zone, DNS_ZONEFLG_EXITING)) { UNLOCK_ZONE(zone); goto gotquota; } /* * Find any configured information about the server we'd * like to transfer this zone from. */ isc_netaddr_fromsockaddr(&masterip, &zone->masteraddr); (void)dns_peerlist_peerbyaddr(zone->view->peers, &masterip, &peer); UNLOCK_ZONE(zone); /* * Determine the total maximum number of simultaneous * transfers allowed, and the maximum for this specific * master. */ maxtransfersin = zmgr->transfersin; maxtransfersperns = zmgr->transfersperns; if (peer != NULL) (void)dns_peer_gettransfers(peer, &maxtransfersperns); /* * Count the total number of transfers that are in progress, * and the number of transfers in progress from this master. * We linearly scan a list of all transfers; if this turns * out to be too slow, we could hash on the master address. */ nxfrsin = nxfrsperns = 0; for (x = ISC_LIST_HEAD(zmgr->xfrin_in_progress); x != NULL; x = ISC_LIST_NEXT(x, statelink)) { isc_netaddr_t xip; LOCK_ZONE(x); isc_netaddr_fromsockaddr(&xip, &x->masteraddr); UNLOCK_ZONE(x); nxfrsin++; if (isc_netaddr_equal(&xip, &masterip)) nxfrsperns++; } /* Enforce quota. */ if (nxfrsin >= maxtransfersin) return (ISC_R_QUOTA); if (nxfrsperns >= maxtransfersperns) return (ISC_R_QUOTA); gotquota: /* * We have sufficient quota. Move the zone to the "xfrin_in_progress" * list and send it an event to let it start the actual transfer in the * context of its own task. */ e = isc_event_allocate(zmgr->mctx, zmgr, DNS_EVENT_ZONESTARTXFRIN, got_transfer_quota, zone, sizeof(isc_event_t)); if (e == NULL) return (ISC_R_NOMEMORY); LOCK_ZONE(zone); INSIST(zone->statelist == &zmgr->waiting_for_xfrin); ISC_LIST_UNLINK(zmgr->waiting_for_xfrin, zone, statelink); ISC_LIST_APPEND(zmgr->xfrin_in_progress, zone, statelink); zone->statelist = &zmgr->xfrin_in_progress; isc_task_send(zone->task, &e); dns_zone_log(zone, ISC_LOG_INFO, "Transfer started."); UNLOCK_ZONE(zone); return (ISC_R_SUCCESS); }
0
[ "CWE-327" ]
bind9
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
76,526,912,786,225,590,000,000,000,000,000,000,000
87
Update keyfetch_done compute_tag check If in keyfetch_done the compute_tag fails (because for example the algorithm is not supported), don't crash, but instead ignore the key.
void CLASS parse_smal (int offset, unsigned fsize) { int ver; fseek (ifp, offset+2, SEEK_SET); order = 0x4949; ver = fgetc(ifp); if (ver == 6) fseek (ifp, 5, SEEK_CUR); if (get4() != fsize) return; if (ver > 6) data_offset = get4(); raw_height = height = get2(); raw_width = width = get2(); strcpy (make, "SMaL"); sprintf (model, "v%d %dx%d", ver, width, height); if (ver == 6) load_raw = &CLASS smal_v6_load_raw; if (ver == 9) load_raw = &CLASS smal_v9_load_raw; }
0
[ "CWE-189" ]
rawstudio
983bda1f0fa5fa86884381208274198a620f006e
103,911,436,054,186,020,000,000,000,000,000,000,000
18
Avoid overflow in ljpeg_start().
smtp_proceed_wiz(struct smtp_session *s, const char *args) { smtp_reply(s, "500 %s %s: this feature is not supported yet ;-)", esc_code(ESC_STATUS_PERMFAIL, ESC_INVALID_COMMAND), esc_description(ESC_INVALID_COMMAND)); }
0
[ "CWE-78", "CWE-252" ]
src
9dcfda045474d8903224d175907bfc29761dcb45
13,130,892,457,290,493,000,000,000,000,000,000,000
6
Fix a security vulnerability discovered by Qualys which can lead to a privileges escalation on mbox deliveries and unprivileged code execution on lmtp deliveries, due to a logic issue causing a sanity check to be missed. ok eric@, millert@
char **lxc_string_split(const char *string, char _sep) { char *token, *str, *saveptr = NULL; char sep[2] = {_sep, '\0'}; char **tmp = NULL, **result = NULL; size_t result_capacity = 0; size_t result_count = 0; int r, saved_errno; if (!string) return calloc(1, sizeof(char *)); str = alloca(strlen(string) + 1); strcpy(str, string); for (; (token = strtok_r(str, sep, &saveptr)); str = NULL) { r = lxc_grow_array((void ***)&result, &result_capacity, result_count + 1, 16); if (r < 0) goto error_out; result[result_count] = strdup(token); if (!result[result_count]) goto error_out; result_count++; } /* if we allocated too much, reduce it */ tmp = realloc(result, (result_count + 1) * sizeof(char *)); if (!tmp) goto error_out; result = tmp; /* Make sure we don't return uninitialized memory. */ if (result_count == 0) *result = NULL; return result; error_out: saved_errno = errno; lxc_free_array((void **)result, free); errno = saved_errno; return NULL; }
0
[ "CWE-417" ]
lxc
5eb45428b312e978fb9e294dde16efb14dd9fa4d
147,937,742,932,900,230,000,000,000,000,000,000,000
39
CVE 2018-6556: verify netns fd in lxc-user-nic Signed-off-by: Christian Brauner <[email protected]>
GF_Err def_cont_box_dump(GF_Box *a, FILE *trace) { char *name = "SubTrackDefinitionBox"; //only one using generic box container for now gf_isom_box_dump_start(a, name, trace); fprintf(trace, ">\n"); gf_isom_box_dump_done(name, a, trace); return GF_OK; }
0
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
302,103,501,110,466,140,000,000,000,000,000,000,000
8
fixed 2 possible heap overflows (inc. #1088)
static void hidp_send_set_report(struct uhid_event *ev, void *user_data) { struct input_device *idev = user_data; uint8_t hdr; bool sent; DBG(""); switch (ev->u.output.rtype) { case UHID_FEATURE_REPORT: /* Send SET_REPORT on control channel */ if (idev->report_req_pending) { DBG("Old GET_REPORT or SET_REPORT still pending"); return; } hdr = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; sent = hidp_send_ctrl_message(idev, hdr, ev->u.output.data, ev->u.output.size); if (sent) { idev->report_req_pending = hdr; idev->report_req_timer = g_timeout_add_seconds(REPORT_REQ_TIMEOUT, hidp_report_req_timeout, idev); } break; case UHID_OUTPUT_REPORT: /* Send DATA on interrupt channel */ hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; hidp_send_intr_message(idev, hdr, ev->u.output.data, ev->u.output.size); break; default: DBG("Unsupported HID report type %u", ev->u.output.rtype); return; } }
0
[]
bluez
3cccdbab2324086588df4ccf5f892fb3ce1f1787
232,444,844,141,422,600,000,000,000,000,000,000,000
37
HID accepts bonded device connections only. This change adds a configuration for platforms to choose a more secure posture for the HID profile. While some older mice are known to not support pairing or encryption, some platform may choose a more secure posture by requiring the device to be bonded and require the connection to be encrypted when bonding is required. Reference: https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00352.html
print_version(const gs_main_instance *minst) { printf_program_ident(minst->heap, NULL, gs_revision); }
0
[]
ghostpdl
407cc61e87b0fd9d44d72ca740af7d3c85dee78d
55,938,915,081,514,360,000,000,000,000,000,000,000
4
"starting_arg_file" should only apply once. The "starting_arg_file == true" setting should apply to the *first* call to lib_file_open() in the context of a given call to runarg(). Previously, it remained set for the entire duration of the runarg() call, resulting in the current directory being searched for any resource files required by the job. We also want "starting_arg_file == false" when runarg() is called to execute Postscript from a buffer, rather than a file argument. There is a very small chance this may cause problems with some strange scripts or utilities, but I have been unable to prompt such an issue. If one does arise, we may have rethink this entirely. No cluster differences.
static void setup_namespaces(struct lo_data *lo, struct fuse_session *se) { pid_t child; char template[] = "virtiofsd-XXXXXX"; char *tmpdir; /* * Create a new pid namespace for *child* processes. We'll have to * fork in order to enter the new pid namespace. A new mount namespace * is also needed so that we can remount /proc for the new pid * namespace. * * Our UNIX domain sockets have been created. Now we can move to * an empty network namespace to prevent TCP/IP and other network * activity in case this process is compromised. */ if (unshare(CLONE_NEWPID | CLONE_NEWNS | CLONE_NEWNET) != 0) { fuse_log(FUSE_LOG_ERR, "unshare(CLONE_NEWPID | CLONE_NEWNS): %m\n"); exit(1); } child = fork(); if (child < 0) { fuse_log(FUSE_LOG_ERR, "fork() failed: %m\n"); exit(1); } if (child > 0) { pid_t waited; int wstatus; setup_wait_parent_capabilities(); /* The parent waits for the child */ do { waited = waitpid(child, &wstatus, 0); } while (waited < 0 && errno == EINTR && !se->exited); /* We were terminated by a signal, see fuse_signals.c */ if (se->exited) { exit(0); } if (WIFEXITED(wstatus)) { exit(WEXITSTATUS(wstatus)); } exit(1); } /* Send us SIGTERM when the parent thread terminates, see prctl(2) */ prctl(PR_SET_PDEATHSIG, SIGTERM); /* * If the mounts have shared propagation then we want to opt out so our * mount changes don't affect the parent mount namespace. */ if (mount(NULL, "/", NULL, MS_REC | MS_SLAVE, NULL) < 0) { fuse_log(FUSE_LOG_ERR, "mount(/, MS_REC|MS_SLAVE): %m\n"); exit(1); } /* The child must remount /proc to use the new pid namespace */ if (mount("proc", "/proc", "proc", MS_NODEV | MS_NOEXEC | MS_NOSUID | MS_RELATIME, NULL) < 0) { fuse_log(FUSE_LOG_ERR, "mount(/proc): %m\n"); exit(1); } tmpdir = mkdtemp(template); if (!tmpdir) { fuse_log(FUSE_LOG_ERR, "tmpdir(%s): %m\n", template); exit(1); } if (mount("/proc/self/fd", tmpdir, NULL, MS_BIND, NULL) < 0) { fuse_log(FUSE_LOG_ERR, "mount(/proc/self/fd, %s, MS_BIND): %m\n", tmpdir); exit(1); } /* Now we can get our /proc/self/fd directory file descriptor */ lo->proc_self_fd = open(tmpdir, O_PATH); if (lo->proc_self_fd == -1) { fuse_log(FUSE_LOG_ERR, "open(%s, O_PATH): %m\n", tmpdir); exit(1); } if (umount2(tmpdir, MNT_DETACH) < 0) { fuse_log(FUSE_LOG_ERR, "umount2(%s, MNT_DETACH): %m\n", tmpdir); exit(1); } if (rmdir(tmpdir) < 0) { fuse_log(FUSE_LOG_ERR, "rmdir(%s): %m\n", tmpdir); } }
1
[ "CWE-269" ]
qemu
ebf101955ce8f8d72fba103b5151115a4335de2c
273,053,890,801,849,470,000,000,000,000,000,000,000
96
virtiofsd: avoid /proc/self/fd tempdir In order to prevent /proc/self/fd escapes a temporary directory is created where /proc/self/fd is bind-mounted. This doesn't work on read-only file systems. Avoid the temporary directory by bind-mounting /proc/self/fd over /proc. This does not affect other processes since we remounted / with MS_REC | MS_SLAVE. /proc must exist and virtiofsd does not use it so it's safe to do this. Path traversal can be tested with the following function: static void test_proc_fd_escape(struct lo_data *lo) { int fd; int level = 0; ino_t last_ino = 0; fd = lo->proc_self_fd; for (;;) { struct stat st; if (fstat(fd, &st) != 0) { perror("fstat"); return; } if (last_ino && st.st_ino == last_ino) { fprintf(stderr, "inode number unchanged, stopping\n"); return; } last_ino = st.st_ino; fprintf(stderr, "Level %d dev %lu ino %lu\n", level, (unsigned long)st.st_dev, (unsigned long)last_ino); fd = openat(fd, "..", O_PATH | O_DIRECTORY | O_NOFOLLOW); level++; } } Before and after this patch only Level 0 is displayed. Without /proc/self/fd bind-mount protection it is possible to traverse parent directories. Fixes: 397ae982f4df4 ("virtiofsd: jail lo->proc_self_fd") Cc: Miklos Szeredi <[email protected]> Cc: Jens Freimann <[email protected]> Signed-off-by: Stefan Hajnoczi <[email protected]> Message-Id: <[email protected]> Reviewed-by: Dr. David Alan Gilbert <[email protected]> Tested-by: Jens Freimann <[email protected]> Reviewed-by: Jens Freimann <[email protected]> Signed-off-by: Dr. David Alan Gilbert <[email protected]>
static void dio_await_completion(struct dio *dio) { struct bio *bio; do { bio = dio_await_one(dio); if (bio) dio_bio_complete(dio, bio); } while (bio); }
0
[]
linux-2.6
848c4dd5153c7a0de55470ce99a8e13a63b4703f
118,868,956,511,143,920,000,000,000,000,000,000,000
9
dio: zero struct dio with kzalloc instead of manually This patch uses kzalloc to zero all of struct dio rather than manually trying to track which fields we rely on being zero. It passed aio+dio stress testing and some bug regression testing on ext3. This patch was introduced by Linus in the conversation that lead up to Badari's minimal fix to manually zero .map_bh.b_state in commit: 6a648fa72161d1f6468dabd96c5d3c0db04f598a It makes the code a bit smaller. Maybe a couple fewer cachelines to load, if we're lucky: text data bss dec hex filename 3285925 568506 1304616 5159047 4eb887 vmlinux 3285797 568506 1304616 5158919 4eb807 vmlinux.patched I was unable to measure a stable difference in the number of cpu cycles spent in blockdev_direct_IO() when pushing aio+dio 256K reads at ~340MB/s. So the resulting intent of the patch isn't a performance gain but to avoid exposing ourselves to the risk of finding another field like .map_bh.b_state where we rely on zeroing but don't enforce it in the code. Signed-off-by: Zach Brown <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
void collect_list(char *input[]) { if (input[0] == NULL) { return; } char *temp = malloc (500); if (!temp) { return; } temp[0] = 0; int i; int conc = 0; int start, end = 0; int arrsz; for (arrsz = 1; input[arrsz] != NULL; arrsz++) { ; } for (i = 0; input[i]; i++) { if (conc) { strcat (temp, ", "); strcat (temp, input[i]); } if (input[i][0] == '{') { conc = 1; strcat (temp, input[i]); start = i; } if ((conc) & (input[i][strlen (input[i]) - 1] == '}')) { conc = 0; end = i; } } if (end == 0) { free (temp); return; } input[start] = temp; for (i = start + 1; i < arrsz; i++) { input[i] = input[(end-start) + i]; } input[i] = NULL; }
0
[ "CWE-125", "CWE-787" ]
radare2
e5c14c167b0dcf0a53d76bd50bacbbcc0dfc1ae7
121,484,306,811,235,010,000,000,000,000,000,000,000
42
Fix #12417/#12418 (arm assembler heap overflows)
conn_seq_skew_set(struct conntrack *ct, const struct conn_key *key, long long now, int seq_skew, bool seq_skew_dir) { unsigned bucket = hash_to_bucket(conn_key_hash(key, ct->hash_basis)); ct_lock_lock(&ct->buckets[bucket].lock); struct conn *conn = conn_lookup(ct, key, now); if (conn && seq_skew) { conn->seq_skew = seq_skew; conn->seq_skew_dir = seq_skew_dir; } ct_lock_unlock(&ct->buckets[bucket].lock); }
0
[ "CWE-400" ]
ovs
abd7a457652e6734902720fe6a5dddb3fc0d1e3b
146,475,411,518,280,190,000,000,000,000,000,000,000
12
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
off_t _q_iosend(FILE *outfp, FILE *infp, off_t nbytes) { if (nbytes == 0) return 0; unsigned char buf[QIOSEND_CHUNK_SIZE]; off_t total = 0; // total size sent while (total < nbytes) { size_t chunksize; // this time sending size if (nbytes - total <= sizeof(buf)) chunksize = nbytes - total; else chunksize = sizeof(buf); // read size_t rsize = fread(buf, 1, chunksize, infp); if (rsize == 0) break; DEBUG("read %zu", rsize); // write size_t wsize = fwrite(buf, 1, rsize, outfp); if (wsize == 0) break; DEBUG("write %zu", wsize); total += wsize; if (rsize != wsize) { DEBUG("size mismatch. read:%zu, write:%zu", rsize, wsize); break; } } if (total > 0) return total; return -1; }
0
[ "CWE-94" ]
qdecoder
ce7c8a7ac450a823a11b06508ef1eb7441241f81
297,796,283,114,731,370,000,000,000,000,000,000,000
30
security update: add check on improperly encoded input
regerror(int posix_ecode, const regex_t* reg ARG_UNUSED, char* buf, size_t size) { char* s; char tbuf[35]; size_t len; if (posix_ecode > 0 && posix_ecode < (int )(sizeof(ESTRING) / sizeof(ESTRING[0]))) { s = ESTRING[posix_ecode]; } else if (posix_ecode == 0) { s = ""; } else { sprintf(tbuf, "undefined error code (%d)", posix_ecode); s = tbuf; } len = strlen(s) + 1; /* use strlen() because s is ascii encoding. */ if (buf != NULL && size > 0) { strncpy(buf, s, size - 1); buf[size - 1] = '\0'; } return len; }
0
[ "CWE-125" ]
oniguruma
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
257,899,903,389,183,920,000,000,000,000,000,000,000
27
onig-5.9.2
void CClient::Connected() { DEBUG(GetSockName() << " == Connected();"); }
0
[ "CWE-476" ]
znc
2390ad111bde16a78c98ac44572090b33c3bd2d8
316,446,662,384,458,600,000,000,000,000,000,000,000
1
Fix null pointer dereference in echo-message The bug was introduced while fixing #1705. If a client did not enable echo-message, and doesn't have a network, it crashes. Thanks to LunarBNC for reporting this