func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
int userns_exec_1(struct lxc_conf *conf, int (*fn)(void *), void *data) { int ret, pid; struct userns_fn_data d; char c = '1'; int p[2]; struct lxc_list *idmap; ret = pipe(p); if (ret < 0) { SYSERROR("opening pipe"); return -1; } d.fn = fn; d.arg = data; d.p[0] = p[0]; d.p[1] = p[1]; pid = lxc_clone(run_userns_fn, &d, CLONE_NEWUSER); if (pid < 0) goto err; close(p[0]); p[0] = -1; if ((idmap = idmap_add_id(conf, geteuid(), getegid())) == NULL) { ERROR("Error adding self to container uid/gid map"); goto err; } ret = lxc_map_ids(idmap, pid); lxc_free_idmap(idmap); free(idmap); if (ret) { ERROR("Error setting up child mappings"); goto err; } // kick the child if (write(p[1], &c, 1) != 1) { SYSERROR("writing to pipe to child"); goto err; } ret = wait_for_pid(pid); close(p[1]); return ret; err: if (p[0] != -1) close(p[0]); close(p[1]); return -1; }
0
[ "CWE-59", "CWE-61" ]
lxc
592fd47a6245508b79fe6ac819fe6d3b2c1289be
31,254,189,121,326,140,000,000,000,000,000,000,000
53
CVE-2015-1335: Protect container mounts against symlinks When a container starts up, lxc sets up the container's inital fstree by doing a bunch of mounting, guided by the container configuration file. The container config is owned by the admin or user on the host, so we do not try to guard against bad entries. However, since the mount target is in the container, it's possible that the container admin could divert the mount with symbolic links. This could bypass proper container startup (i.e. confinement of a root-owned container by the restrictive apparmor policy, by diverting the required write to /proc/self/attr/current), or bypass the (path-based) apparmor policy by diverting, say, /proc to /mnt in the container. To prevent this, 1. do not allow mounts to paths containing symbolic links 2. do not allow bind mounts from relative paths containing symbolic links. Details: Define safe_mount which ensures that the container has not inserted any symbolic links into any mount targets for mounts to be done during container setup. The host's mount path may contain symbolic links. As it is under the control of the administrator, that's ok. So safe_mount begins the check for symbolic links after the rootfs->mount, by opening that directory. It opens each directory along the path using openat() relative to the parent directory using O_NOFOLLOW. When the target is reached, it mounts onto /proc/self/fd/<targetfd>. Use safe_mount() in mount_entry(), when mounting container proc, and when needed. In particular, safe_mount() need not be used in any case where: 1. the mount is done in the container's namespace 2. the mount is for the container's rootfs 3. the mount is relative to a tmpfs or proc/sysfs which we have just safe_mount()ed ourselves Since we were using proc/net as a temporary placeholder for /proc/sys/net during container startup, and proc/net is a symbolic link, use proc/tty instead. Update the lxc.container.conf manpage with details about the new restrictions. Finally, add a testcase to test some symbolic link possibilities. Reported-by: Roman Fiedler Signed-off-by: Serge Hallyn <[email protected]> Acked-by: Stéphane Graber <[email protected]>
file_path_add(gs_main_instance * minst, gs_file_path * pfp, const char *dirs) { uint len = r_size(&pfp->list); const char *dpath = dirs; int code; if (dirs == 0) return 0; for (;;) { /* Find the end of the next directory name. */ const char *npath = dpath; while (*npath != 0 && *npath != gp_file_name_list_separator) npath++; if (npath > dpath) { if (len == r_size(&pfp->container)) { code = extend_path_list_container(minst, pfp); if (code < 0) { emprintf(minst->heap, "\nAdding path to search paths failed.\n"); return(code); } } make_const_string(&pfp->container.value.refs[len], avm_foreign | a_readonly, npath - dpath, (const byte *)dpath); ++len; } if (!*npath) break; dpath = npath + 1; } r_set_size(&pfp->list, len); return 0; }
0
[]
ghostpdl
6d444c273da5499a4cd72f21cb6d4c9a5256807d
16,005,364,848,029,091,000,000,000,000,000,000,000
33
Bug 697178: Add a file permissions callback For the rare occasions when the graphics library directly opens a file (currently for reading), this allows us to apply any restrictions on file access normally applied in the interpteter.
entry_guards_expand_sample(guard_selection_t *gs) { tor_assert(gs); const or_options_t *options = get_options(); if (live_consensus_is_missing(gs)) { log_info(LD_GUARD, "Not expanding the sample guard set; we have " "no live consensus."); return NULL; } int n_sampled = smartlist_len(gs->sampled_entry_guards); entry_guard_t *added_guard = NULL; int n_usable_filtered_guards = num_reachable_filtered_guards(gs, NULL); int n_guards = 0; smartlist_t *eligible_guards = get_eligible_guards(options, gs, &n_guards); const int max_sample = get_max_sample_size(gs, n_guards); const int min_filtered_sample = get_min_filtered_sample_size(); log_info(LD_GUARD, "Expanding the sample guard set. We have %d guards " "in the sample, and %d eligible guards to extend it with.", n_sampled, smartlist_len(eligible_guards)); while (n_usable_filtered_guards < min_filtered_sample) { /* Has our sample grown too large to expand? */ if (n_sampled >= max_sample) { log_info(LD_GUARD, "Not expanding the guard sample any further; " "just hit the maximum sample threshold of %d", max_sample); goto done; } /* Did we run out of guards? */ if (smartlist_len(eligible_guards) == 0) { /* LCOV_EXCL_START As long as MAX_SAMPLE_THRESHOLD makes can't be adjusted to allow all guards to be sampled, this can't be reached. */ log_info(LD_GUARD, "Not expanding the guard sample any further; " "just ran out of eligible guards"); goto done; /* LCOV_EXCL_STOP */ } /* Otherwise we can add at least one new guard. */ added_guard = select_and_add_guard_item_for_sample(gs, eligible_guards); if (!added_guard) goto done; // LCOV_EXCL_LINE -- only fails on BUG. ++n_sampled; if (added_guard->is_usable_filtered_guard) ++n_usable_filtered_guards; } done: smartlist_free(eligible_guards); return added_guard; }
0
[ "CWE-200" ]
tor
665baf5ed5c6186d973c46cdea165c0548027350
216,187,392,325,787,960,000,000,000,000,000,000,000
60
Consider the exit family when applying guard restrictions. When the new path selection logic went into place, I accidentally dropped the code that considered the _family_ of the exit node when deciding if the guard was usable, and we didn't catch that during code review. This patch makes the guard_restriction_t code consider the exit family as well, and adds some (hopefully redundant) checks for the case where we lack a node_t for a guard but we have a bridge_info_t for it. Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006 and CVE-2017-0377.
int ssl_read( ssl_context *ssl, unsigned char *buf, size_t len ) { int ret; size_t n; SSL_DEBUG_MSG( 2, ( "=> read" ) ); if( ssl->state != SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_handshake( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_handshake", ret ); return( ret ); } } if( ssl->in_offt == NULL ) { if( ( ret = ssl_read_record( ssl ) ) != 0 ) { if( ret == POLARSSL_ERR_SSL_CONN_EOF ) return( 0 ); SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } if( ssl->in_msglen == 0 && ssl->in_msgtype == SSL_MSG_APPLICATION_DATA ) { /* * OpenSSL sends empty messages to randomize the IV */ if( ( ret = ssl_read_record( ssl ) ) != 0 ) { if( ret == POLARSSL_ERR_SSL_CONN_EOF ) return( 0 ); SSL_DEBUG_RET( 1, "ssl_read_record", ret ); return( ret ); } } if( ssl->in_msgtype == SSL_MSG_HANDSHAKE ) { SSL_DEBUG_MSG( 1, ( "received handshake message" ) ); if( ssl->endpoint == SSL_IS_CLIENT && ( ssl->in_msg[0] != SSL_HS_HELLO_REQUEST || ssl->in_hslen != 4 ) ) { SSL_DEBUG_MSG( 1, ( "handshake received (not HelloRequest)" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } if( ssl->disable_renegotiation == SSL_RENEGOTIATION_DISABLED || ( ssl->secure_renegotiation == SSL_LEGACY_RENEGOTIATION && ssl->allow_legacy_renegotiation == SSL_LEGACY_NO_RENEGOTIATION ) ) { SSL_DEBUG_MSG( 3, ( "ignoring renegotiation, sending alert" ) ); if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { /* * SSLv3 does not have a "no_renegotiation" alert */ if( ( ret = ssl_send_fatal_handshake_failure( ssl ) ) != 0 ) return( ret ); } else { if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_WARNING, SSL_ALERT_MSG_NO_RENEGOTIATION ) ) != 0 ) { return( ret ); } } } else { if( ( ret = ssl_renegotiate( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_renegotiate", ret ); return( ret ); } return( POLARSSL_ERR_NET_WANT_READ ); } } else if( ssl->in_msgtype != SSL_MSG_APPLICATION_DATA ) { SSL_DEBUG_MSG( 1, ( "bad application data message" ) ); return( POLARSSL_ERR_SSL_UNEXPECTED_MESSAGE ); } ssl->in_offt = ssl->in_msg; } n = ( len < ssl->in_msglen ) ? len : ssl->in_msglen; memcpy( buf, ssl->in_offt, n ); ssl->in_msglen -= n; if( ssl->in_msglen == 0 ) /* all bytes consumed */ ssl->in_offt = NULL; else /* more data available */ ssl->in_offt += n; SSL_DEBUG_MSG( 2, ( "<= read" ) ); return( (int) n ); }
0
[ "CWE-310" ]
polarssl
4582999be608c9794d4518ae336b265084db9f93
72,170,712,253,724,540,000,000,000,000,000,000,000
116
Fixed timing difference resulting from badly formatted padding.
bool Field_newdate::send_binary(Protocol *protocol) { MYSQL_TIME tm; Field_newdate::get_date(&tm,0); return protocol->store_date(&tm); }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
260,518,879,921,684,850,000,000,000,000,000,000,000
6
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
void nfs_force_lookup_revalidate(struct inode *dir) { NFS_I(dir)->cache_change_attribute += 2; }
0
[ "CWE-909" ]
linux
ac795161c93699d600db16c1a8cc23a65a1eceaf
319,523,551,480,874,000,000,000,000,000,000,000,000
4
NFSv4: Handle case where the lookup of a directory fails If the application sets the O_DIRECTORY flag, and tries to open a regular file, nfs_atomic_open() will punt to doing a regular lookup. If the server then returns a regular file, we will happily return a file descriptor with uninitialised open state. The fix is to return the expected ENOTDIR error in these cases. Reported-by: Lyu Tao <[email protected]> Fixes: 0dd2b474d0b6 ("nfs: implement i_op->atomic_open()") Signed-off-by: Trond Myklebust <[email protected]> Signed-off-by: Anna Schumaker <[email protected]>
void Scanner::lex_c_comment() { loop: #line 3710 "src/parse/lex.cc" { unsigned char yych; if ((lim - cur) < 2) { if (!fill(2)) { error("unexpected end of input"); exit(1); } } yych = (unsigned char)*cur; if (yych <= '\f') { if (yych <= 0x00) goto yy408; if (yych == '\n') goto yy411; goto yy409; } else { if (yych <= '\r') goto yy412; if (yych == '*') goto yy413; goto yy409; } yy408: ++cur; #line 724 "../src/parse/lex.re" { fail_if_eof(); goto loop; } #line 3728 "src/parse/lex.cc" yy409: ++cur; yy410: #line 725 "../src/parse/lex.re" { goto loop; } #line 3734 "src/parse/lex.cc" yy411: ++cur; #line 723 "../src/parse/lex.re" { next_line(); goto loop; } #line 3739 "src/parse/lex.cc" yy412: yych = (unsigned char)*++cur; if (yych == '\n') goto yy411; goto yy410; yy413: yych = (unsigned char)*++cur; if (yych != '/') goto yy410; ++cur; #line 722 "../src/parse/lex.re" { return; } #line 3750 "src/parse/lex.cc" } #line 726 "../src/parse/lex.re" }
0
[ "CWE-787" ]
re2c
039c18949190c5de5397eba504d2c75dad2ea9ca
291,726,248,868,417,300,000,000,000,000,000,000,000
49
Emit an error when repetition lower bound exceeds upper bound. Historically this was allowed and re2c swapped the bounds. However, it most likely indicates an error in user code and there is only a single occurrence in the tests (and the test in an artificial one), so although the change is backwards incompatible there is low chance of breaking real-world code. This fixes second test case in the bug #394 "Stack overflow due to recursion in src/dfa/dead_rules.cc" (the actual fix is to limit DFA size but the test also has counted repetition with swapped bounds).
void CLASS canon_600_correct() { int row, col, val; static const short mul[4][2] = { { 1141,1145 }, { 1128,1109 }, { 1178,1149 }, { 1128,1109 } }; for (row=0; row < height; row++) for (col=0; col < width; col++) { if ((val = BAYER(row,col) - black) < 0) val = 0; val = val * mul[row & 3][col & 1] >> 9; BAYER(row,col) = val; } canon_600_fixed_wb(1311); canon_600_auto_wb(); canon_600_coeff(); maximum = (0x3ff - black) * 1109 >> 9; black = 0; }
0
[ "CWE-703" ]
LibRaw
11909cc59e712e09b508dda729b99aeaac2b29ad
176,596,090,531,784,920,000,000,000,000,000,000,000
18
cumulated data checks patch
static gboolean avdtp_parse_rej(struct avdtp *session, struct avdtp_stream *stream, uint8_t transaction, uint8_t signal_id, void *buf, int size) { struct avdtp_error err; uint8_t acp_seid; struct avdtp_local_sep *sep = stream ? stream->lsep : NULL; switch (signal_id) { case AVDTP_DISCOVER: if (!seid_rej_to_err(buf, size, &err)) return FALSE; error("DISCOVER request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); return TRUE; case AVDTP_GET_CAPABILITIES: case AVDTP_GET_ALL_CAPABILITIES: if (!seid_rej_to_err(buf, size, &err)) return FALSE; error("GET_CAPABILITIES request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); return TRUE; case AVDTP_OPEN: if (!seid_rej_to_err(buf, size, &err)) return FALSE; error("OPEN request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); if (sep && sep->cfm && sep->cfm->open) sep->cfm->open(session, sep, stream, &err, sep->user_data); return TRUE; case AVDTP_SET_CONFIGURATION: if (!conf_rej_to_err(buf, size, &err)) return FALSE; error("SET_CONFIGURATION request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); if (sep && sep->cfm && sep->cfm->set_configuration) sep->cfm->set_configuration(session, sep, stream, &err, sep->user_data); return TRUE; case AVDTP_RECONFIGURE: if (!conf_rej_to_err(buf, size, &err)) return FALSE; error("RECONFIGURE request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); if (sep && sep->cfm && sep->cfm->reconfigure) sep->cfm->reconfigure(session, sep, stream, &err, sep->user_data); return TRUE; case AVDTP_START: if (!stream_rej_to_err(buf, size, &err, &acp_seid)) return FALSE; error("START request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); if (sep && sep->cfm && sep->cfm->start) { sep->cfm->start(session, sep, stream, &err, sep->user_data); stream->starting = FALSE; } return TRUE; case AVDTP_SUSPEND: if (!stream_rej_to_err(buf, size, &err, &acp_seid)) return FALSE; error("SUSPEND request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); if (sep && sep->cfm && sep->cfm->suspend) sep->cfm->suspend(session, sep, stream, &err, sep->user_data); return TRUE; case AVDTP_CLOSE: if (!stream_rej_to_err(buf, size, &err, &acp_seid)) return FALSE; error("CLOSE request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); if (sep && sep->cfm && sep->cfm->close) { sep->cfm->close(session, sep, stream, &err, sep->user_data); stream->close_int = FALSE; } return TRUE; case AVDTP_ABORT: if (!stream_rej_to_err(buf, size, &err, &acp_seid)) return FALSE; error("ABORT request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); if (sep && sep->cfm && sep->cfm->abort) sep->cfm->abort(session, sep, stream, &err, sep->user_data); return FALSE; case AVDTP_DELAY_REPORT: if (!stream_rej_to_err(buf, size, &err, &acp_seid)) return FALSE; error("DELAY_REPORT request rejected: %s (%d)", avdtp_strerror(&err), err.err.error_code); if (sep && sep->cfm && sep->cfm->delay_report) sep->cfm->delay_report(session, sep, stream, &err, sep->user_data); return TRUE; default: error("Unknown reject response signal id: %u", signal_id); return TRUE; } }
0
[ "CWE-703" ]
bluez
7a80d2096f1b7125085e21448112aa02f49f5e9a
225,000,291,050,738,230,000,000,000,000,000,000,000
104
avdtp: Fix accepting invalid/malformed capabilities Check if capabilities are valid before attempting to copy them.
set_ics(E1000State *s, int index, uint32_t val) { DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR], s->mac_reg[IMS]); set_interrupt_cause(s, 0, val | s->mac_reg[ICR]); }
0
[ "CWE-120" ]
qemu
b0d9ffcd0251161c7c92f94804dcf599dfa3edeb
305,628,130,532,768,740,000,000,000,000,000,000,000
6
e1000: Discard packets that are too long if !SBP and !LPE The e1000_receive function for the e1000 needs to discard packets longer than 1522 bytes if the SBP and LPE flags are disabled. The linux driver assumes this behavior and allocates memory based on this assumption. Signed-off-by: Michael Contreras <[email protected]> Signed-off-by: Anthony Liguori <[email protected]>
_equalTruncateStmt(const TruncateStmt *a, const TruncateStmt *b) { COMPARE_NODE_FIELD(relations); COMPARE_SCALAR_FIELD(restart_seqs); COMPARE_SCALAR_FIELD(behavior); return true; }
0
[ "CWE-362" ]
postgres
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
15,649,793,338,186,943,000,000,000,000,000,000,000
8
Avoid repeated name lookups during table and index DDL. If the name lookups come to different conclusions due to concurrent activity, we might perform some parts of the DDL on a different table than other parts. At least in the case of CREATE INDEX, this can be used to cause the permissions checks to be performed against a different table than the index creation, allowing for a privilege escalation attack. This changes the calling convention for DefineIndex, CreateTrigger, transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible (in 9.2 and newer), and AlterTable (in 9.1 and older). In addition, CheckRelationOwnership is removed in 9.2 and newer and the calling convention is changed in older branches. A field has also been added to the Constraint node (FkConstraint in 8.4). Third-party code calling these functions or using the Constraint node will require updating. Report by Andres Freund. Patch by Robert Haas and Andres Freund, reviewed by Tom Lane. Security: CVE-2014-0062
static void conn_llist_dtor(void *user, void *element) { struct connectdata *conn = element; (void)user; conn->bundle = NULL; }
0
[]
curl
058f98dc3fe595f21dc26a5b9b1699e519ba5705
116,082,916,530,904,150,000,000,000,000,000,000,000
6
conncache: include the zone id in the "bundle" hashkey Make connections to two separate IPv6 zone ids create separate connections. Reported-by: Harry Sintonen Bug: https://curl.se/docs/CVE-2022-27775.html Closes #8747
static inline int skb_inner_transport_offset(const struct sk_buff *skb) { return skb_inner_transport_header(skb) - skb->data;
0
[ "CWE-20" ]
linux
2b16f048729bf35e6c28a40cbfad07239f9dcd90
167,748,812,328,760,590,000,000,000,000,000,000,000
4
net: create skb_gso_validate_mac_len() If you take a GSO skb, and split it into packets, will the MAC length (L2 + L3 + L4 headers + payload) of those packets be small enough to fit within a given length? Move skb_gso_mac_seglen() to skbuff.h with other related functions like skb_gso_network_seglen() so we can use it, and then create skb_gso_validate_mac_len to do the full calculation. Signed-off-by: Daniel Axtens <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int hiddev_fasync(int fd, struct file *file, int on) { struct hiddev_list *list = file->private_data; return fasync_helper(fd, file, on, &list->fasync); }
0
[ "CWE-119", "CWE-787" ]
linux
93a2001bdfd5376c3dc2158653034c20392d15c5
96,074,763,936,964,700,000,000,000,000,000,000,000
6
HID: hiddev: validate num_values for HIDIOCGUSAGES, HIDIOCSUSAGES commands This patch validates the num_values parameter from userland during the HIDIOCGUSAGES and HIDIOCSUSAGES commands. Previously, if the report id was set to HID_REPORT_ID_UNKNOWN, we would fail to validate the num_values parameter leading to a heap overflow. Cc: [email protected] Signed-off-by: Scott Bauer <[email protected]> Signed-off-by: Jiri Kosina <[email protected]>
update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) { /* * We are starting a new run period: */ se->exec_start = rq_of(cfs_rq)->clock; }
0
[]
linux-2.6
6a6029b8cefe0ca7e82f27f3904dbedba3de4e06
164,010,406,815,698,610,000,000,000,000,000,000,000
7
sched: simplify sched_slice() Use the existing calc_delta_mine() calculation for sched_slice(). This saves a divide and simplifies the code because we share it with the other /cfs_rq->load users. It also improves code size: text data bss dec hex filename 42659 2740 144 45543 b1e7 sched.o.before 42093 2740 144 44977 afb1 sched.o.after Signed-off-by: Ingo Molnar <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]>
static void nci_nfcee_discover_req(struct nci_dev *ndev, const void *opt) { struct nci_nfcee_discover_cmd cmd; __u8 action = (unsigned long)opt; cmd.discovery_action = action; nci_send_cmd(ndev, NCI_OP_NFCEE_DISCOVER_CMD, 1, &cmd); }
0
[]
linux
48b71a9e66c2eab60564b1b1c85f4928ed04e406
255,631,344,783,419,720,000,000,000,000,000,000,000
9
NFC: add NCI_UNREG flag to eliminate the race There are two sites that calls queue_work() after the destroy_workqueue() and lead to possible UAF. The first site is nci_send_cmd(), which can happen after the nci_close_device as below nfcmrvl_nci_unregister_dev | nfc_genl_dev_up nci_close_device | flush_workqueue | del_timer_sync | nci_unregister_device | nfc_get_device destroy_workqueue | nfc_dev_up nfc_unregister_device | nci_dev_up device_del | nci_open_device | __nci_request | nci_send_cmd | queue_work !!! Another site is nci_cmd_timer, awaked by the nci_cmd_work from the nci_send_cmd. ... | ... nci_unregister_device | queue_work destroy_workqueue | nfc_unregister_device | ... device_del | nci_cmd_work | mod_timer | ... | nci_cmd_timer | queue_work !!! For the above two UAF, the root cause is that the nfc_dev_up can race between the nci_unregister_device routine. Therefore, this patch introduce NCI_UNREG flag to easily eliminate the possible race. In addition, the mutex_lock in nci_close_device can act as a barrier. Signed-off-by: Lin Ma <[email protected]> Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation") Reviewed-by: Jakub Kicinski <[email protected]> Reviewed-by: Krzysztof Kozlowski <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
decompileINITARRAY(int n, SWF_ACTION *actions, int maxn) { struct SWF_ACTIONPUSHPARAM *nparam; nparam=pop(); push(newVar_N("","","","[", nparam->p.Integer,"]")); return 0; }
0
[ "CWE-119", "CWE-125" ]
libming
da9d86eab55cbf608d5c916b8b690f5b76bca462
82,123,587,377,201,790,000,000,000,000,000,000,000
7
decompileAction: Prevent heap buffer overflow and underflow with using OpCode
void lxc_putlock(struct lxc_lock *l) { if (!l) return; switch(l->type) { case LXC_LOCK_ANON_SEM: if (l->u.sem) { sem_destroy(l->u.sem); free(l->u.sem); l->u.sem = NULL; } break; case LXC_LOCK_FLOCK: if (l->u.f.fd != -1) { close(l->u.f.fd); l->u.f.fd = -1; } free(l->u.f.fname); l->u.f.fname = NULL; break; } free(l); }
0
[ "CWE-59", "CWE-61" ]
lxc
72cf81f6a3404e35028567db2c99a90406e9c6e6
316,128,142,523,291,950,000,000,000,000,000,000,000
23
CVE-2015-1331: lxclock: use /run/lxc/lock rather than /run/lock/lxc This prevents an unprivileged user to use LXC to create arbitrary file on the filesystem. Signed-off-by: Serge Hallyn <[email protected]> Signed-off-by: Tyler Hicks <[email protected]> Acked-by: Stéphane Graber <[email protected]>
void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) { int i; u16 j; struct hc_sp_status_block_data sp_sb_data; int func = BP_FUNC(bp); #ifdef BNX2X_STOP_ON_ERROR u16 start = 0, end = 0; u8 cos; #endif if (IS_PF(bp) && disable_int) bnx2x_int_disable(bp); bp->stats_state = STATS_STATE_DISABLED; bp->eth_stats.unrecoverable_error++; DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); BNX2X_ERR("begin crash dump -----------------\n"); /* Indices */ /* Common */ if (IS_PF(bp)) { struct host_sp_status_block *def_sb = bp->def_status_blk; int data_size, cstorm_offset; BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", bp->def_idx, bp->def_att_idx, bp->attn_state, bp->spq_prod_idx, bp->stats_counter); BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", def_sb->atten_status_block.attn_bits, def_sb->atten_status_block.attn_bits_ack, def_sb->atten_status_block.status_block_id, def_sb->atten_status_block.attn_bits_index); BNX2X_ERR(" def ("); for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) pr_cont("0x%x%s", def_sb->sp_sb.index_values[i], (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); data_size = sizeof(struct hc_sp_status_block_data) / sizeof(u32); cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func); for (i = 0; i < data_size; i++) *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset + i * sizeof(u32)); pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", sp_sb_data.igu_sb_id, sp_sb_data.igu_seg_id, sp_sb_data.p_func.pf_id, sp_sb_data.p_func.vnic_id, sp_sb_data.p_func.vf_id, sp_sb_data.p_func.vf_valid, sp_sb_data.state); } for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; int loop; struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p = CHIP_IS_E1x(bp) ? sb_data_e1x.common.state_machine : sb_data_e2.common.state_machine; struct hc_index_data *hc_index_p = CHIP_IS_E1x(bp) ? sb_data_e1x.index_data : sb_data_e2.index_data; u8 data_size, cos; u32 *sb_data_p; struct bnx2x_fp_txdata txdata; if (!bp->fp) break; if (!fp->rx_cons_sb) continue; /* Rx */ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", i, fp->rx_bd_prod, fp->rx_bd_cons, fp->rx_comp_prod, fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n", fp->rx_sge_prod, fp->last_max_sge, le16_to_cpu(fp->fp_hc_idx)); /* Tx */ for_each_cos_in_tx_queue(fp, cos) { if (!fp->txdata_ptr[cos]) break; txdata = *fp->txdata_ptr[cos]; if (!txdata.tx_cons_sb) continue; BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, txdata.tx_bd_cons, le16_to_cpu(*txdata.tx_cons_sb)); } loop = CHIP_IS_E1x(bp) ? HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; /* host sb data */ if (IS_FCOE_FP(fp)) continue; BNX2X_ERR(" run indexes ("); for (j = 0; j < HC_SB_MAX_SM; j++) pr_cont("0x%x%s", fp->sb_running_index[j], (j == HC_SB_MAX_SM - 1) ? ")" : " "); BNX2X_ERR(" indexes ("); for (j = 0; j < loop; j++) pr_cont("0x%x%s", fp->sb_index_values[j], (j == loop - 1) ? ")" : " "); /* VF cannot access FW refelection for status block */ if (IS_VF(bp)) continue; /* fw sb data */ data_size = CHIP_IS_E1x(bp) ? sizeof(struct hc_status_block_data_e1x) : sizeof(struct hc_status_block_data_e2); data_size /= sizeof(u32); sb_data_p = CHIP_IS_E1x(bp) ? (u32 *)&sb_data_e1x : (u32 *)&sb_data_e2; /* copy sb data in here */ for (j = 0; j < data_size; j++) *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + j * sizeof(u32)); if (!CHIP_IS_E1x(bp)) { pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", sb_data_e2.common.p_func.pf_id, sb_data_e2.common.p_func.vf_id, sb_data_e2.common.p_func.vf_valid, sb_data_e2.common.p_func.vnic_id, sb_data_e2.common.same_igu_sb_1b, sb_data_e2.common.state); } else { pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n", sb_data_e1x.common.p_func.pf_id, sb_data_e1x.common.p_func.vf_id, sb_data_e1x.common.p_func.vf_valid, sb_data_e1x.common.p_func.vnic_id, sb_data_e1x.common.same_igu_sb_1b, sb_data_e1x.common.state); } /* SB_SMs data */ for (j = 0; j < HC_SB_MAX_SM; j++) { pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n", j, hc_sm_p[j].__flags, hc_sm_p[j].igu_sb_id, hc_sm_p[j].igu_seg_id, hc_sm_p[j].time_to_expire, hc_sm_p[j].timer_value); } /* Indices data */ for (j = 0; j < loop; j++) { pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j, hc_index_p[j].flags, hc_index_p[j].timeout); } } #ifdef BNX2X_STOP_ON_ERROR if (IS_PF(bp)) { /* event queue */ BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); for (i = 0; i < NUM_EQ_DESC; i++) { u32 *data = (u32 *)&bp->eq_ring[i].message.data; BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", i, bp->eq_ring[i].message.opcode, bp->eq_ring[i].message.error); BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); } } /* Rings */ /* Rx */ for_each_valid_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; if (!bp->fp) break; if (!fp->rx_cons_sb) continue; start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); for (j = start; j != end; j = RX_BD(j + 1)) { u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j]; struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j]; BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n", i, j, rx_bd[1], rx_bd[0], sw_bd->data); } start = RX_SGE(fp->rx_sge_prod); end = RX_SGE(fp->last_max_sge); for (j = start; j != end; j = RX_SGE(j + 1)) { u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n", i, j, rx_sge[1], rx_sge[0], sw_page->page); } start = RCQ_BD(fp->rx_comp_cons - 10); end = RCQ_BD(fp->rx_comp_cons + 503); for (j = start; j != end; j = RCQ_BD(j + 1)) { u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n", i, j, cqe[0], cqe[1], cqe[2], cqe[3]); } } /* Tx */ for_each_valid_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; if (!bp->fp) break; for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; if (!fp->txdata_ptr[cos]) break; if (!txdata->tx_cons_sb) continue; start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); for (j = start; j != end; j = TX_BD(j + 1)) { struct sw_tx_bd *sw_bd = &txdata->tx_buf_ring[j]; BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n", i, cos, j, sw_bd->skb, sw_bd->first_bd); } start = TX_BD(txdata->tx_bd_cons - 10); end = TX_BD(txdata->tx_bd_cons + 254); for (j = start; j != end; j = TX_BD(j + 1)) { u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n", i, cos, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); } } } #endif if (IS_PF(bp)) { bnx2x_fw_dump(bp); bnx2x_mc_assert(bp); } BNX2X_ERR("end crash dump -----------------\n"); }
0
[ "CWE-20" ]
linux
8914a595110a6eca69a5e275b323f5d09e18f4f9
329,614,334,958,172,870,000,000,000,000,000,000,000
282
bnx2x: disable GSO where gso_size is too big for hardware If a bnx2x card is passed a GSO packet with a gso_size larger than ~9700 bytes, it will cause a firmware error that will bring the card down: bnx2x: [bnx2x_attn_int_deasserted3:4323(enP24p1s0f0)]MC assert! bnx2x: [bnx2x_mc_assert:720(enP24p1s0f0)]XSTORM_ASSERT_LIST_INDEX 0x2 bnx2x: [bnx2x_mc_assert:736(enP24p1s0f0)]XSTORM_ASSERT_INDEX 0x0 = 0x00000000 0x25e43e47 0x00463e01 0x00010052 bnx2x: [bnx2x_mc_assert:750(enP24p1s0f0)]Chip Revision: everest3, FW Version: 7_13_1 ... (dump of values continues) ... Detect when the mac length of a GSO packet is greater than the maximum packet size (9700 bytes) and disable GSO. Signed-off-by: Daniel Axtens <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void SSL_set_msg_callback(SSL *ssl, void (*cb)(int write_p, int version, int content_type, const void *buf, size_t len, SSL *ssl, void *arg)) { SSL_callback_ctrl(ssl, SSL_CTRL_SET_MSG_CALLBACK, (void (*)(void))cb); }
0
[]
openssl
ee2ffc279417f15fef3b1073c7dc81a908991516
264,076,298,048,043,540,000,000,000,000,000,000,000
4
Add Next Protocol Negotiation.
void CL_ServerStatus_f(void) { netadr_t to, *toptr = NULL; char *server; serverStatus_t *serverStatus; int argc; netadrtype_t family = NA_UNSPEC; argc = Cmd_Argc(); if ( argc != 2 && argc != 3 ) { if (clc.state != CA_ACTIVE || clc.demoplaying) { Com_Printf ("Not connected to a server.\n"); Com_Printf( "usage: serverstatus [-4|-6] server\n"); return; } toptr = &clc.serverAddress; } if(!toptr) { Com_Memset( &to, 0, sizeof(netadr_t) ); if(argc == 2) server = Cmd_Argv(1); else { if(!strcmp(Cmd_Argv(1), "-4")) family = NA_IP; else if(!strcmp(Cmd_Argv(1), "-6")) family = NA_IP6; else Com_Printf( "warning: only -4 or -6 as address type understood.\n"); server = Cmd_Argv(2); } toptr = &to; if ( !NET_StringToAdr( server, toptr, family ) ) return; } NET_OutOfBandPrint( NS_CLIENT, *toptr, "getstatus" ); serverStatus = CL_GetServerStatus( *toptr ); serverStatus->address = *toptr; serverStatus->print = qtrue; serverStatus->pending = qtrue; }
0
[ "CWE-269" ]
ioq3
376267d534476a875d8b9228149c4ee18b74a4fd
230,219,069,618,342,900,000,000,000,000,000,000,000
51
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; struct rfcomm_dlc *dlc = dev->dlc; u8 v24_sig; BT_DBG("tty %p dev %p set 0x%02x clear 0x%02x", tty, dev, set, clear); rfcomm_dlc_get_modem_status(dlc, &v24_sig); if (set & TIOCM_DSR || set & TIOCM_DTR) v24_sig |= RFCOMM_V24_RTC; if (set & TIOCM_RTS || set & TIOCM_CTS) v24_sig |= RFCOMM_V24_RTR; if (set & TIOCM_RI) v24_sig |= RFCOMM_V24_IC; if (set & TIOCM_CD) v24_sig |= RFCOMM_V24_DV; if (clear & TIOCM_DSR || clear & TIOCM_DTR) v24_sig &= ~RFCOMM_V24_RTC; if (clear & TIOCM_RTS || clear & TIOCM_CTS) v24_sig &= ~RFCOMM_V24_RTR; if (clear & TIOCM_RI) v24_sig &= ~RFCOMM_V24_IC; if (clear & TIOCM_CD) v24_sig &= ~RFCOMM_V24_DV; rfcomm_dlc_set_modem_status(dlc, v24_sig); return 0; }
0
[ "CWE-200" ]
linux
f9432c5ec8b1e9a09b9b0e5569e3c73db8de432a
70,355,863,323,121,570,000,000,000,000,000,000,000
32
Bluetooth: RFCOMM - Fix info leak in ioctl(RFCOMMGETDEVLIST) The RFCOMM code fails to initialize the two padding bytes of struct rfcomm_dev_list_req inserted for alignment before copying it to userland. Additionally there are two padding bytes in each instance of struct rfcomm_dev_info. The ioctl() that for disclosures two bytes plus dev_num times two bytes uninitialized kernel heap memory. Allocate the memory using kzalloc() to fix this issue. Signed-off-by: Mathias Krause <[email protected]> Cc: Marcel Holtmann <[email protected]> Cc: Gustavo Padovan <[email protected]> Cc: Johan Hedberg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { struct usb_ctrlrequest *dr; int ret; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) return -ENOMEM; dr->bRequestType = requesttype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(size); ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); kfree(dr); return ret; }
0
[ "CWE-119", "CWE-787" ]
linux
2e1c42391ff2556387b3cb6308b24f6f65619feb
185,639,225,843,117,350,000,000,000,000,000,000,000
23
USB: core: harden cdc_parse_cdc_header Andrey Konovalov reported a possible out-of-bounds problem for the cdc_parse_cdc_header function. He writes: It looks like cdc_parse_cdc_header() doesn't validate buflen before accessing buffer[1], buffer[2] and so on. The only check present is while (buflen > 0). So fix this issue up by properly validating the buffer length matches what the descriptor says it is. Reported-by: Andrey Konovalov <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int decode_coeff_abs_level_greater2(thread_context* tctx, int cIdx, // int i,int n, int ctxSet) { logtrace(LogSlice,"# coeff_abs_level_greater2\n"); int ctxIdxInc = ctxSet; if (cIdx>0) ctxIdxInc+=4; int bit = decode_CABAC_bit(&tctx->cabac_decoder, &tctx->ctx_model[CONTEXT_MODEL_COEFF_ABS_LEVEL_GREATER2_FLAG + ctxIdxInc]); logtrace(LogSymbols,"$1 coeff_abs_level_greater2=%d\n",bit); return bit; }
0
[]
libde265
e83f3798dd904aa579425c53020c67e03735138d
170,637,670,456,814,100,000,000,000,000,000,000,000
17
fix check for valid PPS idx (#298)
prefix_components (char *filename, bool checkdirs) { int count = 0; struct stat stat_buf; int stat_result; char *f = filename + FILE_SYSTEM_PREFIX_LEN (filename); if (*f) while (*++f) if (ISSLASH (f[0]) && ! ISSLASH (f[-1])) { if (checkdirs) { *f = '\0'; stat_result = safe_stat (filename, &stat_buf); *f = '/'; if (! (stat_result == 0 && S_ISDIR (stat_buf.st_mode))) break; } count++; } return count; }
0
[ "CWE-476" ]
patch
f290f48a621867084884bfff87f8093c15195e6a
228,428,185,138,818,300,000,000,000,000,000,000,000
25
Fix segfault with mangled rename patch http://savannah.gnu.org/bugs/?53132 * src/pch.c (intuit_diff_type): Ensure that two filenames are specified for renames and copies (fix the existing check).
bool CanonicalQuery::isSimpleIdQuery(const BSONObj& query) { bool hasID = false; BSONObjIterator it(query); while (it.more()) { BSONElement elt = it.next(); if (elt.fieldNameStringData() == "_id") { // Verify that the query on _id is a simple equality. hasID = true; if (elt.type() == Object) { // If the value is an object, it can't have a query operator // (must be a literal object match). if (elt.Obj().firstElementFieldName()[0] == '$') { return false; } } else if (!Indexability::isExactBoundsGenerating(elt)) { // The _id fild cannot be something like { _id : { $gt : ... // But it can be BinData. return false; } } else { return false; } } return hasID; }
0
[ "CWE-755" ]
mongo
c8ced6df8f620daaa2e539f192f2eef356c63e9c
250,983,924,630,274,900,000,000,000,000,000,000,000
28
SERVER-47773 Error consistently when tailable cursors and $near are used together
static void macvtap_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); }
0
[ "CWE-119", "CWE-787" ]
linux
b92946e2919134ebe2a4083e4302236295ea2a73
312,191,320,812,454,900,000,000,000,000,000,000,000
4
macvtap: zerocopy: validate vectors before building skb There're several reasons that the vectors need to be validated: - Return error when caller provides vectors whose num is greater than UIO_MAXIOV. - Linearize part of skb when userspace provides vectors grater than MAX_SKB_FRAGS. - Return error when userspace provides vectors whose total length may exceed - MAX_SKB_FRAGS * PAGE_SIZE. Signed-off-by: Jason Wang <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]>
GF_Err HintFile(GF_ISOFile *file, u32 MTUSize, u32 max_ptime, u32 rtp_rate, u32 base_flags, Bool copy_data, Bool interleave, Bool regular_iod, Bool single_group, Bool hint_no_offset) { GF_ESD *esd; GF_InitialObjectDescriptor *iod; u32 i, val, res, streamType; u32 sl_mode, prev_ocr, single_ocr, nb_done, tot_bw, bw, flags, spec_type; GF_Err e; char szPayload[30]; GF_RTPHinter *hinter; Bool copy, has_iod, single_av; u8 init_payt = BASE_PAYT; u32 mtype; GF_SDP_IODProfile iod_mode = GF_SDP_IOD_NONE; u32 media_group = 0; u8 media_prio = 0; tot_bw = 0; prev_ocr = 0; single_ocr = 1; has_iod = 1; iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file); if (!iod) has_iod = 0; else { if (!gf_list_count(iod->ESDescriptors)) has_iod = 0; gf_odf_desc_del((GF_Descriptor *) iod); } spec_type = gf_isom_guess_specification(file); single_av = single_group ? 1 : gf_isom_is_single_av(file); /*first make sure we use a systems track as base OCR*/ for (i=0; i<gf_isom_get_track_count(file); i++) { res = gf_isom_get_media_type(file, i+1); if ((res==GF_ISOM_MEDIA_SCENE) || (res==GF_ISOM_MEDIA_OD)) { if (gf_isom_is_track_in_root_od(file, i+1)) { gf_isom_set_default_sync_track(file, i+1); break; } } } nb_done = 0; for (i=0; i<gf_isom_get_track_count(file); i++) { sl_mode = base_flags; copy = copy_data; /*skip emty tracks (mainly MPEG-4 interaction streams...*/ if (!gf_isom_get_sample_count(file, i+1)) continue; if (!gf_isom_is_track_enabled(file, i+1)) { fprintf(stderr, "Track ID %d disabled - skipping hint\n", gf_isom_get_track_id(file, i+1) ); continue; } mtype = gf_isom_get_media_type(file, i+1); switch (mtype) { case GF_ISOM_MEDIA_VISUAL: if (single_av) { media_group = 2; media_prio = 2; } break; case GF_ISOM_MEDIA_AUXV: if (single_av) { media_group = 2; media_prio = 3; } break; case GF_ISOM_MEDIA_PICT: if (single_av) { media_group = 2; media_prio = 4; } break; case GF_ISOM_MEDIA_AUDIO: if (single_av) { media_group = 2; media_prio = 1; } break; case GF_ISOM_MEDIA_HINT: continue; default: /*no hinting of systems track on isma*/ if (spec_type==GF_ISOM_BRAND_ISMA) continue; } mtype = gf_isom_get_media_subtype(file, i+1, 1); if ((mtype==GF_ISOM_SUBTYPE_MPEG4) || (mtype==GF_ISOM_SUBTYPE_MPEG4_CRYP) ) mtype = gf_isom_get_mpeg4_subtype(file, i+1, 1); if (!single_av) { /*one media per group only (we should prompt user for group selection)*/ media_group ++; media_prio = 1; } streamType = 0; esd = gf_isom_get_esd(file, i+1, 1); if (esd) { streamType = esd->decoderConfig->streamType; if (!prev_ocr) { prev_ocr = esd->OCRESID; if (!esd->OCRESID) prev_ocr = esd->ESID; } else if (esd->OCRESID && prev_ocr != esd->OCRESID) { single_ocr = 0; } /*OD MUST BE WITHOUT REFERENCES*/ if (streamType==1) copy = 1; } gf_odf_desc_del((GF_Descriptor *) esd); if (!regular_iod && gf_isom_is_track_in_root_od(file, i+1)) { /*single AU - check if base64 would fit in ESD (consider 33% overhead of base64), otherwise stream*/ if (gf_isom_get_sample_count(file, i+1)==1) { GF_ISOSample *samp = gf_isom_get_sample(file, i+1, 1, &val); if (streamType) { res = gf_hinter_can_embbed_data(samp->data, samp->dataLength, streamType); } else { /*not a system track, we shall hint it*/ res = 0; } if (samp) gf_isom_sample_del(&samp); if (res) continue; } } if (interleave) sl_mode |= GP_RTP_PCK_USE_INTERLEAVING; hinter = gf_hinter_track_new(file, i+1, MTUSize, max_ptime, rtp_rate, sl_mode, init_payt, copy, media_group, media_prio, &e); if (!hinter) { if (e) { fprintf(stderr, "Cannot create hinter (%s)\n", gf_error_to_string(e)); if (!nb_done) return e; } continue; } if (hint_no_offset) gf_hinter_track_force_no_offsets(hinter); bw = gf_hinter_track_get_bandwidth(hinter); tot_bw += bw; flags = gf_hinter_track_get_flags(hinter); //set extraction mode for AVC/SVC gf_isom_set_nalu_extract_mode(file, i+1, GF_ISOM_NALU_EXTRACT_LAYER_ONLY); gf_hinter_track_get_payload_name(hinter, szPayload); fprintf(stderr, "Hinting track ID %d - Type \"%s:%s\" (%s) - BW %d kbps\n", gf_isom_get_track_id(file, i+1), gf_4cc_to_str(mtype), gf_4cc_to_str(mtype), szPayload, bw); if (flags & GP_RTP_PCK_SYSTEMS_CAROUSEL) fprintf(stderr, "\tMPEG-4 Systems stream carousel enabled\n"); /* if (flags & GP_RTP_PCK_FORCE_MPEG4) fprintf(stderr, "\tMPEG4 transport forced\n"); if (flags & GP_RTP_PCK_USE_MULTI) fprintf(stderr, "\tRTP aggregation enabled\n"); */ e = gf_hinter_track_process(hinter); if (!e) e = gf_hinter_track_finalize(hinter, has_iod); gf_hinter_track_del(hinter); if (e) { fprintf(stderr, "Error while hinting (%s)\n", gf_error_to_string(e)); if (!nb_done) return e; } init_payt++; nb_done ++; } if (has_iod) { iod_mode = GF_SDP_IOD_ISMA; if (regular_iod) iod_mode = GF_SDP_IOD_REGULAR; } else { iod_mode = GF_SDP_IOD_NONE; } gf_hinter_finalize(file, iod_mode, tot_bw); if (!single_ocr) fprintf(stderr, "Warning: at least 2 timelines found in the file\nThis may not be supported by servers/players\n\n"); return GF_OK; }
0
[ "CWE-476" ]
gpac
9eeac00b38348c664dfeae2525bba0cf1bc32349
220,294,580,854,900,180,000,000,000,000,000,000,000
178
fixed #1565
PHP_FUNCTION(ldap_mod_del) { php_ldap_do_modify(INTERNAL_FUNCTION_PARAM_PASSTHRU, LDAP_MOD_DELETE); }
0
[ "CWE-476" ]
php-src
49782c54994ecca2ef2a061063bd5a7079c43527
133,648,206,558,701,600,000,000,000,000,000,000,000
4
Fix bug #76248 - Malicious LDAP-Server Response causes Crash
long BlockGroup::Parse() { const long status = m_block.Parse(m_pCluster); if (status) return status; m_block.SetKey((m_prev > 0) && (m_next <= 0)); return 0; }
0
[ "CWE-20" ]
libvpx
34d54b04e98dd0bac32e9aab0fbda0bf501bc742
275,669,274,490,794,600,000,000,000,000,000,000,000
10
update libwebm to libwebm-1.0.0.27-358-gdbf1d10 changelog: https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10 Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3
static int dread(DviContext *dvi, char *buffer, size_t len) { if(NEEDBYTES(dvi, len) && get_bytes(dvi, len) == -1) return -1; memcpy(buffer, dvi->buffer.data + dvi->buffer.pos, len); dvi->buffer.pos += len; return 0; }
0
[ "CWE-20" ]
evince
d4139205b010ed06310d14284e63114e88ec6de2
198,614,252,426,964,720,000,000,000,000,000,000,000
8
backends: Fix several security issues in the dvi-backend. See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643.
GF_Err adaf_dump(GF_Box *a, FILE * trace) { GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox *)a; if (!a) return GF_BAD_PARAM; gf_isom_box_dump_start(a, "AdobeDRMAUFormatBox ", trace); fprintf(trace, "SelectiveEncryption=\"%d\" IV_length=\"%d\">\n", ptr->selective_enc ? 1 : 0, ptr->IV_length); gf_isom_box_dump_done("AdobeDRMAUFormatBox", a, trace); return GF_OK; }
0
[ "CWE-125" ]
gpac
bceb03fd2be95097a7b409ea59914f332fb6bc86
15,896,345,017,880,102,000,000,000,000,000,000,000
9
fixed 2 possible heap overflows (inc. #1088)
xmlXPathNodeSetClear(xmlNodeSetPtr set, int hasNsNodes) { if ((set == NULL) || (set->nodeNr <= 0)) return; else if (hasNsNodes) { int i; xmlNodePtr node; for (i = 0; i < set->nodeNr; i++) { node = set->nodeTab[i]; if ((node != NULL) && (node->type == XML_NAMESPACE_DECL)) xmlXPathNodeSetFreeNs((xmlNsPtr) node); } } set->nodeNr = 0; }
0
[ "CWE-119" ]
libxml2
91d19754d46acd4a639a8b9e31f50f31c78f8c9c
109,341,600,527,106,350,000,000,000,000,000,000,000
17
Fix the semantic of XPath axis for namespace/attribute context nodes The processing of namespace and attributes nodes was not compliant to the XPath-1.0 specification
static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1) { struct pbase_tree_cache *ent, *nent; void *data; unsigned long size; enum object_type type; int neigh; int my_ix = pbase_tree_cache_ix(sha1); int available_ix = -1; /* pbase-tree-cache acts as a limited hashtable. * your object will be found at your index or within a few * slots after that slot if it is cached. */ for (neigh = 0; neigh < 8; neigh++) { ent = pbase_tree_cache[my_ix]; if (ent && !hashcmp(ent->sha1, sha1)) { ent->ref++; return ent; } else if (((available_ix < 0) && (!ent || !ent->ref)) || ((0 <= available_ix) && (!ent && pbase_tree_cache[available_ix]))) available_ix = my_ix; if (!ent) break; my_ix = pbase_tree_cache_ix_incr(my_ix); } /* Did not find one. Either we got a bogus request or * we need to read and perhaps cache. */ data = read_sha1_file(sha1, &type, &size); if (!data) return NULL; if (type != OBJ_TREE) { free(data); return NULL; } /* We need to either cache or return a throwaway copy */ if (available_ix < 0) ent = NULL; else { ent = pbase_tree_cache[available_ix]; my_ix = available_ix; } if (!ent) { nent = xmalloc(sizeof(*nent)); nent->temporary = (available_ix < 0); } else { /* evict and reuse */ free(ent->tree_data); nent = ent; } hashcpy(nent->sha1, sha1); nent->tree_data = data; nent->tree_size = size; nent->ref = 1; if (!nent->temporary) pbase_tree_cache[my_ix] = nent; return nent; }
0
[ "CWE-119", "CWE-787" ]
git
de1e67d0703894cb6ea782e36abb63976ab07e60
141,273,634,612,118,200,000,000,000,000,000,000,000
66
list-objects: pass full pathname to callbacks When we find a blob at "a/b/c", we currently pass this to our show_object_fn callbacks as two components: "a/b/" and "c". Callbacks which want the full value then call path_name(), which concatenates the two. But this is an inefficient interface; the path is a strbuf, and we could simply append "c" to it temporarily, then roll back the length, without creating a new copy. So we could improve this by teaching the callsites of path_name() this trick (and there are only 3). But we can also notice that no callback actually cares about the broken-down representation, and simply pass each callback the full path "a/b/c" as a string. The callback code becomes even simpler, then, as we do not have to worry about freeing an allocated buffer, nor rolling back our modification to the strbuf. This is theoretically less efficient, as some callbacks would not bother to format the final path component. But in practice this is not measurable. Since we use the same strbuf over and over, our work to grow it is amortized, and we really only pay to memcpy a few bytes. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
static void inline ipv6_store_devconf(struct ipv6_devconf *cnf, __s32 *array, int bytes) { memset(array, 0, bytes); array[DEVCONF_FORWARDING] = cnf->forwarding; array[DEVCONF_HOPLIMIT] = cnf->hop_limit; array[DEVCONF_MTU6] = cnf->mtu6; array[DEVCONF_ACCEPT_RA] = cnf->accept_ra; array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects; array[DEVCONF_AUTOCONF] = cnf->autoconf; array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits; array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits; array[DEVCONF_RTR_SOLICIT_INTERVAL] = cnf->rtr_solicit_interval; array[DEVCONF_RTR_SOLICIT_DELAY] = cnf->rtr_solicit_delay; array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version; #ifdef CONFIG_IPV6_PRIVACY array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr; array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft; array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft; array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry; array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor; #endif array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses; }
0
[ "CWE-200" ]
linux-2.6
8a47077a0b5aa2649751c46e7a27884e6686ccbf
315,608,834,993,406,550,000,000,000,000,000,000,000
24
[NETLINK]: Missing padding fields in dumped structures Plug holes with padding fields and initialized them to zero. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
dump_track_compose_groups(void) { int i; for (i = 0; i < (1<<17); i++) { if (compose_groups[i] == 0) continue; #undef printf printf("COMPOSE_GROUPS: %04x:%d\n", i, compose_groups[i]); } }
0
[ "CWE-476" ]
ghostpdl
7870f4951bcc6a153f317e3439e14d0e929fd231
56,608,749,536,230,160,000,000,000,000,000,000,000
12
Bug 701795: Segv due to image mask issue
static void php_zlib_output_compression_start(TSRMLS_D) { zval *zoh; php_output_handler *h; switch (ZLIBG(output_compression)) { case 0: break; case 1: ZLIBG(output_compression) = PHP_OUTPUT_HANDLER_DEFAULT_SIZE; /* break omitted intentionally */ default: if ( php_zlib_output_encoding(TSRMLS_C) && (h = php_zlib_output_handler_init(ZEND_STRL(PHP_ZLIB_OUTPUT_HANDLER_NAME), ZLIBG(output_compression), PHP_OUTPUT_HANDLER_STDFLAGS TSRMLS_CC)) && (SUCCESS == php_output_handler_start(h TSRMLS_CC))) { if (ZLIBG(output_handler) && *ZLIBG(output_handler)) { MAKE_STD_ZVAL(zoh); ZVAL_STRING(zoh, ZLIBG(output_handler), 1); php_output_start_user(zoh, ZLIBG(output_compression), PHP_OUTPUT_HANDLER_STDFLAGS TSRMLS_CC); zval_ptr_dtor(&zoh); } } break; } }
0
[ "CWE-20" ]
php-src
52b93f0cfd3cba7ff98cc5198df6ca4f23865f80
290,748,936,970,704,750,000,000,000,000,000,000,000
25
Fixed bug #69353 (Missing null byte checks for paths in various PHP extensions)
static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { struct vcpu_vmx *vmx = to_vmx(vcpu); switch (msr_index) { case MSR_IA32_VMX_BASIC: *pdata = vmx->nested.nested_vmx_basic; break; case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_pinbased_ctls_low, vmx->nested.nested_vmx_pinbased_ctls_high); if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: case MSR_IA32_VMX_PROCBASED_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_procbased_ctls_low, vmx->nested.nested_vmx_procbased_ctls_high); if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_EXIT_CTLS: case MSR_IA32_VMX_EXIT_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_exit_ctls_low, vmx->nested.nested_vmx_exit_ctls_high); if (msr_index == MSR_IA32_VMX_EXIT_CTLS) *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_TRUE_ENTRY_CTLS: case MSR_IA32_VMX_ENTRY_CTLS: *pdata = vmx_control_msr( vmx->nested.nested_vmx_entry_ctls_low, vmx->nested.nested_vmx_entry_ctls_high); if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; break; case MSR_IA32_VMX_MISC: *pdata = vmx_control_msr( vmx->nested.nested_vmx_misc_low, vmx->nested.nested_vmx_misc_high); break; case MSR_IA32_VMX_CR0_FIXED0: *pdata = vmx->nested.nested_vmx_cr0_fixed0; break; case MSR_IA32_VMX_CR0_FIXED1: *pdata = vmx->nested.nested_vmx_cr0_fixed1; break; case MSR_IA32_VMX_CR4_FIXED0: *pdata = vmx->nested.nested_vmx_cr4_fixed0; break; case MSR_IA32_VMX_CR4_FIXED1: *pdata = vmx->nested.nested_vmx_cr4_fixed1; break; case MSR_IA32_VMX_VMCS_ENUM: *pdata = vmx->nested.nested_vmx_vmcs_enum; break; case MSR_IA32_VMX_PROCBASED_CTLS2: *pdata = vmx_control_msr( vmx->nested.nested_vmx_secondary_ctls_low, vmx->nested.nested_vmx_secondary_ctls_high); break; case MSR_IA32_VMX_EPT_VPID_CAP: *pdata = vmx->nested.nested_vmx_ept_caps | ((u64)vmx->nested.nested_vmx_vpid_caps << 32); break; case MSR_IA32_VMX_VMFUNC: *pdata = vmx->nested.nested_vmx_vmfunc_controls; break; default: return 1; } return 0; }
0
[ "CWE-20", "CWE-617" ]
linux
3a8b0677fc6180a467e26cc32ce6b0c09a32f9bb
290,857,315,090,535,700,000,000,000,000,000,000,000
78
KVM: VMX: Do not BUG() on out-of-bounds guest IRQ The value of the guest_irq argument to vmx_update_pi_irte() is ultimately coming from a KVM_IRQFD API call. Do not BUG() in vmx_update_pi_irte() if the value is out-of bounds. (Especially, since KVM as a whole seems to hang after that.) Instead, print a message only once if we find that we don't have a route for a certain IRQ (which can be out-of-bounds or within the array). This fixes CVE-2017-1000252. Fixes: efc644048ecde54 ("KVM: x86: Update IRTE for posted-interrupts") Signed-off-by: Jan H. Schönherr <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
RZ_API const char *rz_bin_dwarf_get_lang_name(ut64 lang) { if (lang >= RZ_ARRAY_SIZE(dwarf_langs)) { return NULL; } return dwarf_langs[lang]; }
0
[ "CWE-787" ]
rizin
aa6917772d2f32e5a7daab25a46c72df0b5ea406
128,835,508,183,447,470,000,000,000,000,000,000,000
6
Fix oob write for dwarf with abbrev with count 0 (Fix #2083) (#2086)
str_to_key(unsigned char *str, unsigned char *key) { int i; key[0] = str[0] >> 1; key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2); key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3); key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4); key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5); key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6); key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7); key[7] = str[6] & 0x7F; for (i = 0; i < 8; i++) key[i] = (key[i] << 1); }
0
[ "CWE-119", "CWE-703" ]
linux
06deeec77a5a689cc94b21a8a91a76e42176685d
89,705,900,275,548,760,000,000,000,000,000,000,000
15
cifs: Fix smbencrypt() to stop pointing a scatterlist at the stack smbencrypt() points a scatterlist to the stack, which is breaks if CONFIG_VMAP_STACK=y. Fix it by switching to crypto_cipher_encrypt_one(). The new code should be considerably faster as an added benefit. This code is nearly identical to some code that Eric Biggers suggested. Cc: [email protected] # 4.9 only Reported-by: Eric Biggers <[email protected]> Signed-off-by: Andy Lutomirski <[email protected]> Acked-by: Jeff Layton <[email protected]> Signed-off-by: Steve French <[email protected]>
get_ssa( Operation *op, BerElement *ber, Filter *f, const char **text ) { ber_tag_t tag; ber_len_t len; int rc; struct berval desc, value, nvalue; char *last; SubstringsAssertion ssa; *text = "error decoding filter"; Debug( LDAP_DEBUG_FILTER, "begin get_ssa\n", 0, 0, 0 ); if ( ber_scanf( ber, "{m" /*}*/, &desc ) == LBER_ERROR ) { return SLAPD_DISCONNECT; } *text = NULL; ssa.sa_desc = NULL; ssa.sa_initial.bv_val = NULL; ssa.sa_any = NULL; ssa.sa_final.bv_val = NULL; rc = slap_bv2ad( &desc, &ssa.sa_desc, text ); if( rc != LDAP_SUCCESS ) { f->f_choice |= SLAPD_FILTER_UNDEFINED; rc = slap_bv2undef_ad( &desc, &ssa.sa_desc, text, SLAP_AD_PROXIED|SLAP_AD_NOINSERT ); if( rc != LDAP_SUCCESS ) { Debug( LDAP_DEBUG_ANY, "get_ssa: conn %lu unknown attribute type=%s (%ld)\n", op->o_connid, desc.bv_val, (long) rc ); ssa.sa_desc = slap_bv2tmp_ad( &desc, op->o_tmpmemctx ); } } rc = LDAP_PROTOCOL_ERROR; /* If there is no substring matching rule, there's nothing * we can do with this filter. But we continue to parse it * for logging purposes. */ if ( ssa.sa_desc->ad_type->sat_substr == NULL ) { f->f_choice |= SLAPD_FILTER_UNDEFINED; Debug( LDAP_DEBUG_FILTER, "get_ssa: no substring matching rule for attributeType %s\n", desc.bv_val, 0, 0 ); } for ( tag = ber_first_element( ber, &len, &last ); tag != LBER_DEFAULT; tag = ber_next_element( ber, &len, last ) ) { unsigned usage; if ( ber_scanf( ber, "m", &value ) == LBER_ERROR ) { rc = SLAPD_DISCONNECT; goto return_error; } if ( value.bv_val == NULL || value.bv_len == 0 ) { rc = LDAP_INVALID_SYNTAX; goto return_error; } switch ( tag ) { case LDAP_SUBSTRING_INITIAL: if ( ssa.sa_initial.bv_val != NULL || ssa.sa_any != NULL || ssa.sa_final.bv_val != NULL ) { rc = LDAP_PROTOCOL_ERROR; goto return_error; } usage = SLAP_MR_SUBSTR_INITIAL; break; case LDAP_SUBSTRING_ANY: if ( ssa.sa_final.bv_val != NULL ) { rc = LDAP_PROTOCOL_ERROR; goto return_error; } usage = SLAP_MR_SUBSTR_ANY; break; case LDAP_SUBSTRING_FINAL: if ( ssa.sa_final.bv_val != NULL ) { rc = LDAP_PROTOCOL_ERROR; goto return_error; } usage = SLAP_MR_SUBSTR_FINAL; break; default: Debug( LDAP_DEBUG_FILTER, " unknown substring choice=%ld\n", (long) tag, 0, 0 ); rc = LDAP_PROTOCOL_ERROR; goto return_error; } /* validate/normalize using equality matching rule validator! */ rc = asserted_value_validate_normalize( ssa.sa_desc, ssa.sa_desc->ad_type->sat_equality, usage, &value, &nvalue, text, op->o_tmpmemctx ); if( rc != LDAP_SUCCESS ) { f->f_choice |= SLAPD_FILTER_UNDEFINED; Debug( LDAP_DEBUG_FILTER, "get_ssa: illegal value for attributeType %s (%d) %s\n", desc.bv_val, rc, *text ); ber_dupbv_x( &nvalue, &value, op->o_tmpmemctx ); } switch ( tag ) { case LDAP_SUBSTRING_INITIAL: Debug( LDAP_DEBUG_FILTER, " INITIAL\n", 0, 0, 0 ); ssa.sa_initial = nvalue; break; case LDAP_SUBSTRING_ANY: Debug( LDAP_DEBUG_FILTER, " ANY\n", 0, 0, 0 ); ber_bvarray_add_x( &ssa.sa_any, &nvalue, op->o_tmpmemctx ); break; case LDAP_SUBSTRING_FINAL: Debug( LDAP_DEBUG_FILTER, " FINAL\n", 0, 0, 0 ); ssa.sa_final = nvalue; break; default: assert( 0 ); slap_sl_free( nvalue.bv_val, op->o_tmpmemctx ); rc = LDAP_PROTOCOL_ERROR; return_error: Debug( LDAP_DEBUG_FILTER, " error=%ld\n", (long) rc, 0, 0 ); slap_sl_free( ssa.sa_initial.bv_val, op->o_tmpmemctx ); ber_bvarray_free_x( ssa.sa_any, op->o_tmpmemctx ); if ( ssa.sa_desc->ad_flags & SLAP_DESC_TEMPORARY ) op->o_tmpfree( ssa.sa_desc, op->o_tmpmemctx ); slap_sl_free( ssa.sa_final.bv_val, op->o_tmpmemctx ); return rc; } *text = NULL; rc = LDAP_SUCCESS; } if( rc == LDAP_SUCCESS ) { f->f_sub = op->o_tmpalloc( sizeof( ssa ), op->o_tmpmemctx ); *f->f_sub = ssa; } Debug( LDAP_DEBUG_FILTER, "end get_ssa\n", 0, 0, 0 ); return rc /* LDAP_SUCCESS */ ; }
0
[ "CWE-674" ]
openldap
98464c11df8247d6a11b52e294ba5dd4f0380440
30,879,359,782,068,890,000,000,000,000,000,000,000
166
ITS#9202 limit depth of nested filters Using a hardcoded limit for now; no reasonable apps should ever run into it.
Network::ConnectionBalancer& connectionBalancer() override { return connection_balancer_; }
0
[ "CWE-400" ]
envoy
dfddb529e914d794ac552e906b13d71233609bf7
136,939,902,401,007,570,000,000,000,000,000,000,000
1
listener: Add configurable accepted connection limits (#153) Add support for per-listener limits on accepted connections. Signed-off-by: Tony Allen <[email protected]>
int audit_compare_dname_path(const char *dname, const char *path, int *dirlen) { int dlen, plen; const char *p; if (!dname || !path) return 1; dlen = strlen(dname); plen = strlen(path); if (plen < dlen) return 1; /* disregard trailing slashes */ p = path + plen - 1; while ((*p == '/') && (p > path)) p--; /* find last path component */ p = p - dlen + 1; if (p < path) return 1; else if (p > path) { if (*--p != '/') return 1; else p++; } /* return length of path's directory component */ if (dirlen) *dirlen = p - path; return strncmp(p, dname, dlen); }
0
[ "CWE-362" ]
linux-2.6
8f7b0ba1c853919b85b54774775f567f30006107
335,389,654,084,182,370,000,000,000,000,000,000,000
35
Fix inotify watch removal/umount races Inotify watch removals suck violently. To kick the watch out we need (in this order) inode->inotify_mutex and ih->mutex. That's fine if we have a hold on inode; however, for all other cases we need to make damn sure we don't race with umount. We can *NOT* just grab a reference to a watch - inotify_unmount_inodes() will happily sail past it and we'll end with reference to inode potentially outliving its superblock. Ideally we just want to grab an active reference to superblock if we can; that will make sure we won't go into inotify_umount_inodes() until we are done. Cleanup is just deactivate_super(). However, that leaves a messy case - what if we *are* racing with umount() and active references to superblock can't be acquired anymore? We can bump ->s_count, grab ->s_umount, which will almost certainly wait until the superblock is shut down and the watch in question is pining for fjords. That's fine, but there is a problem - we might have hit the window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock is past the point of no return and is heading for shutdown) and the moment when deactivate_super() acquires ->s_umount. We could just do drop_super() yield() and retry, but that's rather antisocial and this stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having found that we'd got there first (i.e. that ->s_root is non-NULL) we know that we won't race with inotify_umount_inodes(). So we could grab a reference to watch and do the rest as above, just with drop_super() instead of deactivate_super(), right? Wrong. We had to drop ih->mutex before we could grab ->s_umount. So the watch could've been gone already. That still can be dealt with - we need to save watch->wd, do idr_find() and compare its result with our pointer. If they match, we either have the damn thing still alive or we'd lost not one but two races at once, the watch had been killed and a new one got created with the same ->wd at the same address. That couldn't have happened in inotify_destroy(), but inotify_rm_wd() could run into that. Still, "new one got created" is not a problem - we have every right to kill it or leave it alone, whatever's more convenient. So we can use idr_find(...) == watch && watch->inode->i_sb == sb as "grab it and kill it" check. If it's been our original watch, we are fine, if it's a newcomer - nevermind, just pretend that we'd won the race and kill the fscker anyway; we are safe since we know that its superblock won't be going away. And yes, this is far beyond mere "not very pretty"; so's the entire concept of inotify to start with. Signed-off-by: Al Viro <[email protected]> Acked-by: Greg KH <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void __bfq_set_in_service_queue(struct bfq_data *bfqd, struct bfq_queue *bfqq) { if (bfqq) { bfq_clear_bfqq_fifo_expire(bfqq); bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; if (time_is_before_jiffies(bfqq->last_wr_start_finish) && bfqq->wr_coeff > 1 && bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && time_is_before_jiffies(bfqq->budget_timeout)) { /* * For soft real-time queues, move the start * of the weight-raising period forward by the * time the queue has not received any * service. Otherwise, a relatively long * service delay is likely to cause the * weight-raising period of the queue to end, * because of the short duration of the * weight-raising period of a soft real-time * queue. It is worth noting that this move * is not so dangerous for the other queues, * because soft real-time queues are not * greedy. * * To not add a further variable, we use the * overloaded field budget_timeout to * determine for how long the queue has not * received service, i.e., how much time has * elapsed since the queue expired. However, * this is a little imprecise, because * budget_timeout is set to jiffies if bfqq * not only expires, but also remains with no * request. */ if (time_after(bfqq->budget_timeout, bfqq->last_wr_start_finish)) bfqq->last_wr_start_finish += jiffies - bfqq->budget_timeout; else bfqq->last_wr_start_finish = jiffies; } bfq_set_budget_timeout(bfqd, bfqq); bfq_log_bfqq(bfqd, bfqq, "set_in_service_queue, cur-budget = %d", bfqq->entity.budget); } bfqd->in_service_queue = bfqq;
0
[ "CWE-416" ]
linux
2f95fa5c955d0a9987ffdc3a095e2f4e62c5f2a9
189,708,428,757,098,400,000,000,000,000,000,000,000
52
block, bfq: fix use-after-free in bfq_idle_slice_timer_body In bfq_idle_slice_timer func, bfqq = bfqd->in_service_queue is not in bfqd-lock critical section. The bfqq, which is not equal to NULL in bfq_idle_slice_timer, may be freed after passing to bfq_idle_slice_timer_body. So we will access the freed memory. In addition, considering the bfqq may be in race, we should firstly check whether bfqq is in service before doing something on it in bfq_idle_slice_timer_body func. If the bfqq in race is not in service, it means the bfqq has been expired through __bfq_bfqq_expire func, and wait_request flags has been cleared in __bfq_bfqd_reset_in_service func. So we do not need to re-clear the wait_request of bfqq which is not in service. KASAN log is given as follows: [13058.354613] ================================================================== [13058.354640] BUG: KASAN: use-after-free in bfq_idle_slice_timer+0xac/0x290 [13058.354644] Read of size 8 at addr ffffa02cf3e63f78 by task fork13/19767 [13058.354646] [13058.354655] CPU: 96 PID: 19767 Comm: fork13 [13058.354661] Call trace: [13058.354667] dump_backtrace+0x0/0x310 [13058.354672] show_stack+0x28/0x38 [13058.354681] dump_stack+0xd8/0x108 [13058.354687] print_address_description+0x68/0x2d0 [13058.354690] kasan_report+0x124/0x2e0 [13058.354697] __asan_load8+0x88/0xb0 [13058.354702] bfq_idle_slice_timer+0xac/0x290 [13058.354707] __hrtimer_run_queues+0x298/0x8b8 [13058.354710] hrtimer_interrupt+0x1b8/0x678 [13058.354716] arch_timer_handler_phys+0x4c/0x78 [13058.354722] handle_percpu_devid_irq+0xf0/0x558 [13058.354731] generic_handle_irq+0x50/0x70 [13058.354735] __handle_domain_irq+0x94/0x110 [13058.354739] gic_handle_irq+0x8c/0x1b0 [13058.354742] el1_irq+0xb8/0x140 [13058.354748] do_wp_page+0x260/0xe28 [13058.354752] __handle_mm_fault+0x8ec/0x9b0 [13058.354756] handle_mm_fault+0x280/0x460 [13058.354762] do_page_fault+0x3ec/0x890 [13058.354765] do_mem_abort+0xc0/0x1b0 [13058.354768] el0_da+0x24/0x28 [13058.354770] [13058.354773] Allocated by task 19731: [13058.354780] kasan_kmalloc+0xe0/0x190 [13058.354784] kasan_slab_alloc+0x14/0x20 [13058.354788] kmem_cache_alloc_node+0x130/0x440 [13058.354793] bfq_get_queue+0x138/0x858 [13058.354797] bfq_get_bfqq_handle_split+0xd4/0x328 [13058.354801] bfq_init_rq+0x1f4/0x1180 [13058.354806] bfq_insert_requests+0x264/0x1c98 [13058.354811] blk_mq_sched_insert_requests+0x1c4/0x488 [13058.354818] blk_mq_flush_plug_list+0x2d4/0x6e0 [13058.354826] blk_flush_plug_list+0x230/0x548 [13058.354830] blk_finish_plug+0x60/0x80 [13058.354838] read_pages+0xec/0x2c0 [13058.354842] __do_page_cache_readahead+0x374/0x438 [13058.354846] ondemand_readahead+0x24c/0x6b0 [13058.354851] page_cache_sync_readahead+0x17c/0x2f8 [13058.354858] generic_file_buffered_read+0x588/0xc58 [13058.354862] generic_file_read_iter+0x1b4/0x278 [13058.354965] ext4_file_read_iter+0xa8/0x1d8 [ext4] [13058.354972] __vfs_read+0x238/0x320 [13058.354976] vfs_read+0xbc/0x1c0 [13058.354980] ksys_read+0xdc/0x1b8 [13058.354984] __arm64_sys_read+0x50/0x60 [13058.354990] el0_svc_common+0xb4/0x1d8 [13058.354994] el0_svc_handler+0x50/0xa8 [13058.354998] el0_svc+0x8/0xc [13058.354999] [13058.355001] Freed by task 19731: [13058.355007] __kasan_slab_free+0x120/0x228 [13058.355010] kasan_slab_free+0x10/0x18 [13058.355014] kmem_cache_free+0x288/0x3f0 [13058.355018] bfq_put_queue+0x134/0x208 [13058.355022] bfq_exit_icq_bfqq+0x164/0x348 [13058.355026] bfq_exit_icq+0x28/0x40 [13058.355030] ioc_exit_icq+0xa0/0x150 [13058.355035] put_io_context_active+0x250/0x438 [13058.355038] exit_io_context+0xd0/0x138 [13058.355045] do_exit+0x734/0xc58 [13058.355050] do_group_exit+0x78/0x220 [13058.355054] __wake_up_parent+0x0/0x50 [13058.355058] el0_svc_common+0xb4/0x1d8 [13058.355062] el0_svc_handler+0x50/0xa8 [13058.355066] el0_svc+0x8/0xc [13058.355067] [13058.355071] The buggy address belongs to the object at ffffa02cf3e63e70#012 which belongs to the cache bfq_queue of size 464 [13058.355075] The buggy address is located 264 bytes inside of#012 464-byte region [ffffa02cf3e63e70, ffffa02cf3e64040) [13058.355077] The buggy address belongs to the page: [13058.355083] page:ffff7e80b3cf9800 count:1 mapcount:0 mapping:ffff802db5c90780 index:0xffffa02cf3e606f0 compound_mapcount: 0 [13058.366175] flags: 0x2ffffe0000008100(slab|head) [13058.370781] raw: 2ffffe0000008100 ffff7e80b53b1408 ffffa02d730c1c90 ffff802db5c90780 [13058.370787] raw: ffffa02cf3e606f0 0000000000370023 00000001ffffffff 0000000000000000 [13058.370789] page dumped because: kasan: bad access detected [13058.370791] [13058.370792] Memory state around the buggy address: [13058.370797] ffffa02cf3e63e00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fb fb [13058.370801] ffffa02cf3e63e80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [13058.370805] >ffffa02cf3e63f00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [13058.370808] ^ [13058.370811] ffffa02cf3e63f80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [13058.370815] ffffa02cf3e64000: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [13058.370817] ================================================================== [13058.370820] Disabling lock debugging due to kernel taint Here, we directly pass the bfqd to bfq_idle_slice_timer_body func. -- V2->V3: rewrite the comment as suggested by Paolo Valente V1->V2: add one comment, and add Fixes and Reported-by tag. Fixes: aee69d78d ("block, bfq: introduce the BFQ-v0 I/O scheduler as an extra scheduler") Acked-by: Paolo Valente <[email protected]> Reported-by: Wang Wang <[email protected]> Signed-off-by: Zhiqiang Liu <[email protected]> Signed-off-by: Feilong Lin <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
int btd_adapter_disconnect_device(struct btd_adapter *adapter, const bdaddr_t *bdaddr, uint8_t bdaddr_type) { struct mgmt_cp_disconnect cp; memset(&cp, 0, sizeof(cp)); bacpy(&cp.addr.bdaddr, bdaddr); cp.addr.type = bdaddr_type; if (mgmt_send(adapter->mgmt, MGMT_OP_DISCONNECT, adapter->dev_id, sizeof(cp), &cp, disconnect_complete, adapter, NULL) > 0) return 0; return -EIO; }
0
[ "CWE-862", "CWE-863" ]
bluez
b497b5942a8beb8f89ca1c359c54ad67ec843055
182,682,165,176,245,500,000,000,000,000,000,000,000
18
adapter: Fix storing discoverable setting discoverable setting shall only be store when changed via Discoverable property and not when discovery client set it as that be considered temporary just for the lifetime of the discovery.
static bool exclusive_event_installable(struct perf_event *event, struct perf_event_context *ctx) { struct perf_event *iter_event; struct pmu *pmu = event->pmu; if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) return true; list_for_each_entry(iter_event, &ctx->event_list, event_entry) { if (exclusive_event_match(iter_event, event)) return false; } return true; }
0
[ "CWE-416", "CWE-362" ]
linux
12ca6ad2e3a896256f086497a7c7406a547ee373
78,908,540,908,341,240,000,000,000,000,000,000,000
16
perf: Fix race in swevent hash There's a race on CPU unplug where we free the swevent hash array while it can still have events on. This will result in a use-after-free which is BAD. Simply do not free the hash array on unplug. This leaves the thing around and no use-after-free takes place. When the last swevent dies, we do a for_each_possible_cpu() iteration anyway to clean these up, at which time we'll free it, so no leakage will occur. Reported-by: Sasha Levin <[email protected]> Tested-by: Sasha Levin <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vince Weaver <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
size_t mingw_strftime(char *s, size_t max, const char *format, const struct tm *tm) { size_t ret = strftime(s, max, format, tm); if (!ret && errno == EINVAL) die("invalid strftime format: '%s'", format); return ret; }
0
[ "CWE-59", "CWE-61" ]
git
684dd4c2b414bcf648505e74498a608f28de4592
323,143,416,439,950,400,000,000,000,000,000,000,000
9
checkout: fix bug that makes checkout follow symlinks in leading path Before checking out a file, we have to confirm that all of its leading components are real existing directories. And to reduce the number of lstat() calls in this process, we cache the last leading path known to contain only directories. However, when a path collision occurs (e.g. when checking out case-sensitive files in case-insensitive file systems), a cached path might have its file type changed on disk, leaving the cache on an invalid state. Normally, this doesn't bring any bad consequences as we usually check out files in index order, and therefore, by the time the cached path becomes outdated, we no longer need it anyway (because all files in that directory would have already been written). But, there are some users of the checkout machinery that do not always follow the index order. In particular: checkout-index writes the paths in the same order that they appear on the CLI (or stdin); and the delayed checkout feature -- used when a long-running filter process replies with "status=delayed" -- postpones the checkout of some entries, thus modifying the checkout order. When we have to check out an out-of-order entry and the lstat() cache is invalid (due to a previous path collision), checkout_entry() may end up using the invalid data and thrusting that the leading components are real directories when, in reality, they are not. In the best case scenario, where the directory was replaced by a regular file, the user will get an error: "fatal: unable to create file 'foo/bar': Not a directory". But if the directory was replaced by a symlink, checkout could actually end up following the symlink and writing the file at a wrong place, even outside the repository. Since delayed checkout is affected by this bug, it could be used by an attacker to write arbitrary files during the clone of a maliciously crafted repository. Some candidate solutions considered were to disable the lstat() cache during unordered checkouts or sort the entries before passing them to the checkout machinery. But both ideas include some performance penalty and they don't future-proof the code against new unordered use cases. Instead, we now manually reset the lstat cache whenever we successfully remove a directory. Note: We are not even checking whether the directory was the same as the lstat cache points to because we might face a scenario where the paths refer to the same location but differ due to case folding, precomposed UTF-8 issues, or the presence of `..` components in the path. Two regression tests, with case-collisions and utf8-collisions, are also added for both checkout-index and delayed checkout. Note: to make the previously mentioned clone attack unfeasible, it would be sufficient to reset the lstat cache only after the remove_subtree() call inside checkout_entry(). This is the place where we would remove a directory whose path collides with the path of another entry that we are currently trying to check out (possibly a symlink). However, in the interest of a thorough fix that does not leave Git open to similar-but-not-identical attack vectors, we decided to intercept all `rmdir()` calls in one fell swoop. This addresses CVE-2021-21300. Co-authored-by: Johannes Schindelin <[email protected]> Signed-off-by: Matheus Tavares <[email protected]>
static zend_object *spl_filesystem_object_new_ex(zend_class_entry *class_type) { spl_filesystem_object *intern; intern = ecalloc(1, sizeof(spl_filesystem_object) + zend_object_properties_size(class_type)); /* intern->type = SPL_FS_INFO; done by set 0 */ intern->file_class = spl_ce_SplFileObject; intern->info_class = spl_ce_SplFileInfo; zend_object_std_init(&intern->std, class_type); object_properties_init(&intern->std, class_type); intern->std.handlers = &spl_filesystem_object_handlers; return &intern->std; }
0
[ "CWE-74" ]
php-src
a5a15965da23c8e97657278fc8dfbf1dfb20c016
246,239,478,456,423,200,000,000,000,000,000,000,000
15
Fix #78863: DirectoryIterator class silently truncates after a null byte Since the constructor of DirectoryIterator and friends is supposed to accepts paths (i.e. strings without NUL bytes), we must not accept arbitrary strings.
miniflow_expand(const struct miniflow *src, struct flow *dst) { memset(dst, 0, sizeof *dst); flow_union_with_miniflow(dst, src); }
0
[ "CWE-400" ]
ovs
79349cbab0b2a755140eedb91833ad2760520a83
20,604,897,447,535,213,000,000,000,000,000,000,000
5
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
const set<CString>& CUser::GetAllowedHosts() const { return m_ssAllowedHosts; }
0
[ "CWE-20" ]
znc
64613bc8b6b4adf1e32231f9844d99cd512b8973
228,951,363,225,131,260,000,000,000,000,000,000,000
1
Don't crash if user specified invalid encoding. This is CVE-2019-9917
static inline void switch_fpu_finish(struct fpu *new_fpu) { u32 pkru_val = init_pkru_value; struct pkru_state *pk; if (!static_cpu_has(X86_FEATURE_FPU)) return; set_thread_flag(TIF_NEED_FPU_LOAD); if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return; /* * PKRU state is switched eagerly because it needs to be valid before we * return to userland e.g. for a copy_to_user() operation. */ if (current->mm) { pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU); if (pk) pkru_val = pk->pkru; } __write_pkru(pkru_val); }
0
[ "CWE-119", "CWE-732", "CWE-787" ]
linux
59c4bd853abcea95eccc167a7d7fd5f1a5f47b98
68,248,021,913,808,370,000,000,000,000,000,000,000
24
x86/fpu: Don't cache access to fpu_fpregs_owner_ctx The state/owner of the FPU is saved to fpu_fpregs_owner_ctx by pointing to the context that is currently loaded. It never changed during the lifetime of a task - it remained stable/constant. After deferred FPU registers loading until return to userland was implemented, the content of fpu_fpregs_owner_ctx may change during preemption and must not be cached. This went unnoticed for some time and was now noticed, in particular since gcc 9 is caching that load in copy_fpstate_to_sigframe() and reusing it in the retry loop: copy_fpstate_to_sigframe() load fpu_fpregs_owner_ctx and save on stack fpregs_lock() copy_fpregs_to_sigframe() /* failed */ fpregs_unlock() *** PREEMPTION, another uses FPU, changes fpu_fpregs_owner_ctx *** fault_in_pages_writeable() /* succeed, retry */ fpregs_lock() __fpregs_load_activate() fpregs_state_valid() /* uses fpu_fpregs_owner_ctx from stack */ copy_fpregs_to_sigframe() /* succeeds, random FPU content */ This is a comparison of the assembly produced by gcc 9, without vs with this patch: | # arch/x86/kernel/fpu/signal.c:173: if (!access_ok(buf, size)) | cmpq %rdx, %rax # tmp183, _4 | jb .L190 #, |-# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; |-#APP |-# 512 "arch/x86/include/asm/fpu/internal.h" 1 |- movq %gs:fpu_fpregs_owner_ctx,%rax #, pfo_ret__ |-# 0 "" 2 |-#NO_APP |- movq %rax, -88(%rbp) # pfo_ret__, %sfp … |-# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; |- movq -88(%rbp), %rcx # %sfp, pfo_ret__ |- cmpq %rcx, -64(%rbp) # pfo_ret__, %sfp |+# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; |+#APP |+# 512 "arch/x86/include/asm/fpu/internal.h" 1 |+ movq %gs:fpu_fpregs_owner_ctx(%rip),%rax # fpu_fpregs_owner_ctx, pfo_ret__ |+# 0 "" 2 |+# arch/x86/include/asm/fpu/internal.h:512: return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; |+#NO_APP |+ cmpq %rax, -64(%rbp) # pfo_ret__, %sfp Use this_cpu_read() instead this_cpu_read_stable() to avoid caching of fpu_fpregs_owner_ctx during preemption points. The Fixes: tag points to the commit where deferred FPU loading was added. Since this commit, the compiler is no longer allowed to move the load of fpu_fpregs_owner_ctx somewhere else / outside of the locked section. A task preemption will change its value and stale content will be observed. [ bp: Massage. ] Debugged-by: Austin Clements <[email protected]> Debugged-by: David Chase <[email protected]> Debugged-by: Ian Lance Taylor <[email protected]> Fixes: 5f409e20b7945 ("x86/fpu: Defer FPU state load until return to userspace") Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Rik van Riel <[email protected]> Tested-by: Borislav Petkov <[email protected]> Cc: Aubrey Li <[email protected]> Cc: Austin Clements <[email protected]> Cc: Barret Rhoden <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Chase <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: [email protected] Cc: Ingo Molnar <[email protected]> Cc: Josh Bleecher Snyder <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: x86-ml <[email protected]> Link: https://lkml.kernel.org/r/[email protected] Link: https://bugzilla.kernel.org/show_bug.cgi?id=205663
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; pmd_clear(mm, address, pmdp); return pmd; }
0
[ "CWE-264" ]
linux-2.6
1a5a9906d4e8d1976b701f889d8f35d54b928f25
163,726,713,593,665,170,000,000,000,000,000,000,000
8
mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [[email protected]: checkpatch fixes] Reported-by: Ulrich Obergfell <[email protected]> Signed-off-by: Andrea Arcangeli <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Dave Jones <[email protected]> Acked-by: Larry Woodman <[email protected]> Acked-by: Rik van Riel <[email protected]> Cc: <[email protected]> [2.6.38+] Cc: Mark Salter <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline bool IsSafeAltName(const char* name, size_t length, bool utf8) { for (size_t i = 0; i < length; i++) { char c = name[i]; switch (c) { case '"': case '\\': // These mess with encoding rules. // Fall through. case ',': // Commas make it impossible to split the list of subject alternative // names unambiguously, which is why we have to escape. // Fall through. case '\'': // Single quotes are unlikely to appear in any legitimate values, but they // could be used to make a value look like it was escaped (i.e., enclosed // in single/double quotes). return false; default: if (utf8) { // In UTF8 strings, we require escaping for any ASCII control character, // but NOT for non-ASCII characters. Note that all bytes of any code // point that consists of more than a single byte have their MSB set. if (static_cast<unsigned char>(c) < ' ' || c == '\x7f') { return false; } } else { // Check if the char is a control character or non-ASCII character. Note // that char may or may not be a signed type. Regardless, non-ASCII // values will always be outside of this range. if (c < ' ' || c > '~') { return false; } } } } return true; }
0
[ "CWE-295" ]
node
466e5415a2b7b3574ab5403acb87e89a94a980d1
204,889,266,348,218,700,000,000,000,000,000,000,000
37
crypto,tls: implement safe x509 GeneralName format This change introduces JSON-compatible escaping rules for strings that include X.509 GeneralName components (see RFC 5280). This non-standard format avoids ambiguities and prevents injection attacks that could previously lead to X.509 certificates being accepted even though they were not valid for the target hostname. These changes affect the format of subject alternative names and the format of authority information access. The checkServerIdentity function has been modified to safely handle the new format, eliminating the possibility of injecting subject alternative names into the verification logic. Because each subject alternative name is only encoded as a JSON string literal if necessary for security purposes, this change will only be visible in rare cases. This addresses CVE-2021-44532. CVE-ID: CVE-2021-44532 PR-URL: https://github.com/nodejs-private/node-private/pull/300 Reviewed-By: Michael Dawson <[email protected]> Reviewed-By: Rich Trott <[email protected]>
static int cap_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) { return 0; }
0
[]
linux-2.6
ee18d64c1f632043a02e6f5ba5e045bb26a5465f
187,609,585,767,027,470,000,000,000,000,000,000,000
4
KEYS: Add a keyctl to install a process's session keyring on its parent [try #6] Add a keyctl to install a process's session keyring onto its parent. This replaces the parent's session keyring. Because the COW credential code does not permit one process to change another process's credentials directly, the change is deferred until userspace next starts executing again. Normally this will be after a wait*() syscall. To support this, three new security hooks have been provided: cred_alloc_blank() to allocate unset security creds, cred_transfer() to fill in the blank security creds and key_session_to_parent() - which asks the LSM if the process may replace its parent's session keyring. The replacement may only happen if the process has the same ownership details as its parent, and the process has LINK permission on the session keyring, and the session keyring is owned by the process, and the LSM permits it. Note that this requires alteration to each architecture's notify_resume path. This has been done for all arches barring blackfin, m68k* and xtensa, all of which need assembly alteration to support TIF_NOTIFY_RESUME. This allows the replacement to be performed at the point the parent process resumes userspace execution. This allows the userspace AFS pioctl emulation to fully emulate newpag() and the VIOCSETTOK and VIOCSETTOK2 pioctls, all of which require the ability to alter the parent process's PAG membership. However, since kAFS doesn't use PAGs per se, but rather dumps the keys into the session keyring, the session keyring of the parent must be replaced if, for example, VIOCSETTOK is passed the newpag flag. This can be tested with the following program: #include <stdio.h> #include <stdlib.h> #include <keyutils.h> #define KEYCTL_SESSION_TO_PARENT 18 #define OSERROR(X, S) do { if ((long)(X) == -1) { perror(S); exit(1); } } while(0) int main(int argc, char **argv) { key_serial_t keyring, key; long ret; keyring = keyctl_join_session_keyring(argv[1]); OSERROR(keyring, "keyctl_join_session_keyring"); key = add_key("user", "a", "b", 1, keyring); OSERROR(key, "add_key"); ret = keyctl(KEYCTL_SESSION_TO_PARENT); OSERROR(ret, "KEYCTL_SESSION_TO_PARENT"); return 0; } Compiled and linked with -lkeyutils, you should see something like: [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 355907932 --alswrv 4043 -1 \_ keyring: _uid.4043 [dhowells@andromeda ~]$ /tmp/newpag [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: _ses 1055658746 --alswrv 4043 4043 \_ user: a [dhowells@andromeda ~]$ /tmp/newpag hello [dhowells@andromeda ~]$ keyctl show Session Keyring -3 --alswrv 4043 4043 keyring: hello 340417692 --alswrv 4043 4043 \_ user: a Where the test program creates a new session keyring, sticks a user key named 'a' into it and then installs it on its parent. Signed-off-by: David Howells <[email protected]> Signed-off-by: James Morris <[email protected]>
static void event_server_info(IRC_SERVER_REC *server, const char *data) { char *params, *ircd_version, *usermodes, *chanmodes; g_return_if_fail(server != NULL); params = event_get_params(data, 5, NULL, NULL, &ircd_version, &usermodes, &chanmodes); /* check if server understands I and e channel modes */ if (strchr(chanmodes, 'I') && strchr(chanmodes, 'e')) server->emode_known = TRUE; /* save server version */ g_free_not_null(server->version); server->version = g_strdup(ircd_version); g_free(params); }
0
[ "CWE-416" ]
irssi
43e44d553d44e313003cee87e6ea5e24d68b84a1
81,408,643,688,066,800,000,000,000,000,000,000,000
18
Merge branch 'security' into 'master' Security Closes GL#12, GL#13, GL#14, GL#15, GL#16 See merge request irssi/irssi!23
static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); }
0
[ "CWE-119", "CWE-787" ]
ImageMagick
aecd0ada163a4d6c769cec178955d5f3e9316f2f
307,166,086,621,905,050,000,000,000,000,000,000,000
33
Set pixel cache to undefined if any resource limit is exceeded
static int ssl_parse_signature_algorithms_ext( ssl_context *ssl, const unsigned char *buf, size_t len ) { size_t sig_alg_list_size; const unsigned char *p; sig_alg_list_size = ( ( buf[0] << 8 ) | ( buf[1] ) ); if( sig_alg_list_size + 2 != len || sig_alg_list_size %2 != 0 ) { SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); return( POLARSSL_ERR_SSL_BAD_HS_CLIENT_HELLO ); } p = buf + 2; while( sig_alg_list_size > 0 ) { if( p[1] != SSL_SIG_RSA ) { sig_alg_list_size -= 2; p += 2; continue; } #if defined(POLARSSL_SHA4_C) if( p[0] == SSL_HASH_SHA512 ) { ssl->handshake->sig_alg = SSL_HASH_SHA512; break; } if( p[0] == SSL_HASH_SHA384 ) { ssl->handshake->sig_alg = SSL_HASH_SHA384; break; } #endif #if defined(POLARSSL_SHA2_C) if( p[0] == SSL_HASH_SHA256 ) { ssl->handshake->sig_alg = SSL_HASH_SHA256; break; } if( p[0] == SSL_HASH_SHA224 ) { ssl->handshake->sig_alg = SSL_HASH_SHA224; break; } #endif if( p[0] == SSL_HASH_SHA1 ) { ssl->handshake->sig_alg = SSL_HASH_SHA1; break; } if( p[0] == SSL_HASH_MD5 ) { ssl->handshake->sig_alg = SSL_HASH_MD5; break; } sig_alg_list_size -= 2; p += 2; } SSL_DEBUG_MSG( 3, ( "client hello v3, signature_algorithm ext: %d", ssl->handshake->sig_alg ) ); return( 0 ); }
0
[ "CWE-310" ]
polarssl
43f9799ce61c6392a014d0a2ea136b4b3a9ee194
10,573,084,394,159,315,000,000,000,000,000,000,000
68
RSA blinding on CRT operations to counter timing attacks
static bool manager_check_idle(void *userdata) { Manager *m = userdata; Link *link; Iterator i; assert(m); HASHMAP_FOREACH(link, m->links, i) { /* we are not woken on udev activity, so let's just wait for the * pending udev event */ if (link->state == LINK_STATE_PENDING) return false; if (!link->network) continue; /* we are not woken on netork activity, so let's stay around */ if (link_lldp_enabled(link) || link_ipv4ll_enabled(link) || link_dhcp4_server_enabled(link) || link_dhcp4_enabled(link) || link_dhcp6_enabled(link) || link_ipv6_accept_ra_enabled(link)) return false; } return true; }
0
[ "CWE-120" ]
systemd
f5a8c43f39937d97c9ed75e3fe8621945b42b0db
218,489,670,730,873,260,000,000,000,000,000,000,000
28
networkd: IPv6 router discovery - follow IPv6AcceptRouterAdvertisemnt= The previous behavior: When DHCPv6 was enabled, router discover was performed first, and then DHCPv6 was enabled only if the relevant flags were passed in the Router Advertisement message. Moreover, router discovery was performed even if AcceptRouterAdvertisements=false, moreover, even if router advertisements were accepted (by the kernel) the flags indicating that DHCPv6 should be performed were ignored. New behavior: If RouterAdvertisements are accepted, and either no routers are found, or an advertisement is received indicating DHCPv6 should be performed, the DHCPv6 client is started. Moreover, the DHCP option now truly enables the DHCPv6 client regardless of router discovery (though it will probably not be very useful to get a lease withotu any routes, this seems the more consistent approach). The recommended default setting should be to set DHCP=ipv4 and to leave IPv6AcceptRouterAdvertisements unset.
static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &trap, sizeof(trap)); } }
0
[ "CWE-125" ]
linux
b799207e1e1816b09e7a5920fbb2d5fcf6edd681
65,040,683,058,580,090,000,000,000,000,000,000,000
14
bpf: 32-bit RSH verification must truncate input before the ALU op When I wrote commit 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification"), I assumed that, in order to emulate 64-bit arithmetic with 32-bit logic, it is sufficient to just truncate the output to 32 bits; and so I just moved the register size coercion that used to be at the start of the function to the end of the function. That assumption is true for almost every op, but not for 32-bit right shifts, because those can propagate information towards the least significant bit. Fix it by always truncating inputs for 32-bit ops to 32 bits. Also get rid of the coerce_reg_to_size() after the ALU op, since that has no effect. Fixes: 468f6eafa6c4 ("bpf: fix 32-bit ALU op verification") Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]>
bool tipc_link_is_blocked(struct tipc_link *l) { return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER); }
0
[ "CWE-787" ]
linux
9aa422ad326634b76309e8ff342c246800621216
33,142,046,434,042,680,000,000,000,000,000,000,000
4
tipc: improve size validations for received domain records The function tipc_mon_rcv() allows a node to receive and process domain_record structs from peer nodes to track their views of the network topology. This patch verifies that the number of members in a received domain record does not exceed the limit defined by MAX_MON_DOMAIN, something that may otherwise lead to a stack overflow. tipc_mon_rcv() is called from the function tipc_link_proto_rcv(), where we are reading a 32 bit message data length field into a uint16. To avert any risk of bit overflow, we add an extra sanity check for this in that function. We cannot see that happen with the current code, but future designers being unaware of this risk, may introduce it by allowing delivery of very large (> 64k) sk buffers from the bearer layer. This potential problem was identified by Eric Dumazet. This fixes CVE-2022-0435 Reported-by: Samuel Page <[email protected]> Reported-by: Eric Dumazet <[email protected]> Fixes: 35c55c9877f8 ("tipc: add neighbor monitoring framework") Signed-off-by: Jon Maloy <[email protected]> Reviewed-by: Xin Long <[email protected]> Reviewed-by: Samuel Page <[email protected]> Reviewed-by: Eric Dumazet <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static inline void exit_io_context(struct task_struct *task) { }
0
[ "CWE-20", "CWE-703", "CWE-400" ]
linux
b69f2292063d2caf37ca9aec7d63ded203701bf3
265,828,953,028,305,440,000,000,000,000,000,000,000
3
block: Fix io_context leak after failure of clone with CLONE_IO With CLONE_IO, parent's io_context->nr_tasks is incremented, but never decremented whenever copy_process() fails afterwards, which prevents exit_io_context() from calling IO schedulers exit functions. Give a task_struct to exit_io_context(), and call exit_io_context() instead of put_io_context() in copy_process() cleanup path. Signed-off-by: Louis Rilling <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
void KrecipesView::addCategoriesPanelAction( KAction * action ) { categoriesPanel->addAction( action ); }
0
[]
krecipes
cd1490fb5fe82cbe9172a43be13298001b446ecd
200,640,752,617,661,600,000,000,000,000,000,000,000
4
Use WebKit instead of KHTML for printing recipes, fixes sourceforge #2990118 and #2960140. svn path=/trunk/extragear/utils/krecipes/; revision=1137824
parse_service(const char *svc_name) { char lin[MAXBUF]; SERVICE *res; BACKEND *be; MATCHER *m; int ign_case; if((res = (SERVICE *)malloc(sizeof(SERVICE))) == NULL) conf_err("Service config: out of memory - aborted"); memset(res, 0, sizeof(SERVICE)); res->sess_type = SESS_NONE; res->dynscale = dynscale; pthread_mutex_init(&res->mut, NULL); if(svc_name) strncpy(res->name, svc_name, KEY_SIZE); #if OPENSSL_VERSION_NUMBER >= 0x10000000L if((res->sessions = LHM_lh_new(TABNODE, t)) == NULL) #else if((res->sessions = lh_new(LHASH_HASH_FN(t_hash), LHASH_COMP_FN(t_cmp))) == NULL) #endif conf_err("lh_new failed - aborted"); ign_case = ignore_case; while(conf_fgets(lin, MAXBUF)) { if(strlen(lin) > 0 && lin[strlen(lin) - 1] == '\n') lin[strlen(lin) - 1] = '\0'; if(!regexec(&URL, lin, 4, matches, 0)) { if(res->url) { for(m = res->url; m->next; m = m->next) ; if((m->next = (MATCHER *)malloc(sizeof(MATCHER))) == NULL) conf_err("URL config: out of memory - aborted"); m = m->next; } else { if((res->url = (MATCHER *)malloc(sizeof(MATCHER))) == NULL) conf_err("URL config: out of memory - aborted"); m = res->url; } memset(m, 0, sizeof(MATCHER)); lin[matches[1].rm_eo] = '\0'; if(regcomp(&m->pat, lin + matches[1].rm_so, REG_NEWLINE | REG_EXTENDED | (ign_case? REG_ICASE: 0))) conf_err("URL bad pattern - aborted"); } else if(!regexec(&HeadRequire, lin, 4, matches, 0)) { if(res->req_head) { for(m = res->req_head; m->next; m = m->next) ; if((m->next = (MATCHER *)malloc(sizeof(MATCHER))) == NULL) conf_err("HeadRequire config: out of memory - aborted"); m = m->next; } else { if((res->req_head = (MATCHER *)malloc(sizeof(MATCHER))) == NULL) conf_err("HeadRequire config: out of memory - aborted"); m = res->req_head; } memset(m, 0, sizeof(MATCHER)); lin[matches[1].rm_eo] = '\0'; if(regcomp(&m->pat, lin + matches[1].rm_so, REG_ICASE | REG_NEWLINE | REG_EXTENDED)) conf_err("HeadRequire bad pattern - aborted"); } else if(!regexec(&HeadDeny, lin, 4, matches, 0)) { if(res->deny_head) { for(m = res->deny_head; m->next; m = m->next) ; if((m->next = (MATCHER *)malloc(sizeof(MATCHER))) == NULL) conf_err("HeadDeny config: out of memory - aborted"); m = m->next; } else { if((res->deny_head = (MATCHER *)malloc(sizeof(MATCHER))) == NULL) conf_err("HeadDeny config: out of memory - aborted"); m = res->deny_head; } memset(m, 0, sizeof(MATCHER)); lin[matches[1].rm_eo] = '\0'; if(regcomp(&m->pat, lin + matches[1].rm_so, REG_ICASE | REG_NEWLINE | REG_EXTENDED)) conf_err("HeadDeny bad pattern - aborted"); } else if(!regexec(&Redirect, lin, 4, matches, 0)) { if(res->backends) { for(be = res->backends; be->next; be = be->next) ; if((be->next = (BACKEND *)malloc(sizeof(BACKEND))) == NULL) conf_err("Redirect config: out of memory - aborted"); be = be->next; } else { if((res->backends = (BACKEND *)malloc(sizeof(BACKEND))) == NULL) conf_err("Redirect config: out of memory - aborted"); be = res->backends; } memset(be, 0, sizeof(BACKEND)); be->be_type = 302; be->priority = 1; be->alive = 1; pthread_mutex_init(& be->mut, NULL); lin[matches[1].rm_eo] = '\0'; if((be->url = strdup(lin + matches[1].rm_so)) == NULL) conf_err("Redirector config: out of memory - aborted"); /* split the URL into its fields */ if(regexec(&LOCATION, be->url, 4, matches, 0)) conf_err("Redirect bad URL - aborted"); if((be->redir_req = matches[3].rm_eo - matches[3].rm_so) == 1) /* the path is a single '/', so remove it */ be->url[matches[3].rm_so] = '\0'; } else if(!regexec(&RedirectN, lin, 4, matches, 0)) { if(res->backends) { for(be = res->backends; be->next; be = be->next) ; if((be->next = (BACKEND *)malloc(sizeof(BACKEND))) == NULL) conf_err("Redirect config: out of memory - aborted"); be = be->next; } else { if((res->backends = (BACKEND *)malloc(sizeof(BACKEND))) == NULL) conf_err("Redirect config: out of memory - aborted"); be = res->backends; } memset(be, 0, sizeof(BACKEND)); be->be_type = atoi(lin + matches[1].rm_so); be->priority = 1; be->alive = 1; pthread_mutex_init(& be->mut, NULL); lin[matches[2].rm_eo] = '\0'; if((be->url = strdup(lin + matches[2].rm_so)) == NULL) conf_err("Redirector config: out of memory - aborted"); /* split the URL into its fields */ if(regexec(&LOCATION, be->url, 4, matches, 0)) conf_err("Redirect bad URL - aborted"); if((be->redir_req = matches[3].rm_eo - matches[3].rm_so) == 1) /* the path is a single '/', so remove it */ be->url[matches[3].rm_so] = '\0'; } else if(!regexec(&BackEnd, lin, 4, matches, 0)) { if(res->backends) { for(be = res->backends; be->next; be = be->next) ; be->next = parse_be(0); } else res->backends = parse_be(0); } else if(!regexec(&Emergency, lin, 4, matches, 0)) { res->emergency = parse_be(1); } else if(!regexec(&Session, lin, 4, matches, 0)) { parse_sess(res); } else if(!regexec(&DynScale, lin, 4, matches, 0)) { res->dynscale = atoi(lin + matches[1].rm_so); } else if(!regexec(&IgnoreCase, lin, 4, matches, 0)) { ign_case = atoi(lin + matches[1].rm_so); } else if(!regexec(&Disabled, lin, 4, matches, 0)) { res->disabled = atoi(lin + matches[1].rm_so); } else if(!regexec(&End, lin, 4, matches, 0)) { for(be = res->backends; be; be = be->next) res->tot_pri += be->priority; res->abs_pri = res->tot_pri; return res; } else { conf_err("unknown directive"); } } conf_err("Service premature EOF"); return NULL; }
0
[]
pound
a0c52c542ca9620a96750f9877b26bf4c84aef1b
4,233,580,930,030,371,600,000,000,000,000,000,000
156
SSL Compression Disable patch for 2.6f This patch disables SSL/TLS compression entirely. There is no config option. This prevents CRIME attacks against SSL. Note that HTTP compression is still an option. Test your server at https://www.ssllabs.com/ssldb/ Original patch by Hereward Cooper <[email protected]> Openssl 0.9.8 disabling ideas borrowed from Igor Sysoev's code in nginx.
UnicodeString::UnicodeString(int32_t capacity, UChar32 c, int32_t count) { fUnion.fFields.fLengthAndFlags = 0; if(count <= 0 || (uint32_t)c > 0x10ffff) { // just allocate and do not do anything else allocate(capacity); } else if(c <= 0xffff) { int32_t length = count; if(capacity < length) { capacity = length; } if(allocate(capacity)) { UChar *array = getArrayStart(); UChar unit = (UChar)c; for(int32_t i = 0; i < length; ++i) { array[i] = unit; } setLength(length); } } else { // supplementary code point, write surrogate pairs if(count > (INT32_MAX / 2)) { // We would get more than 2G UChars. allocate(capacity); return; } int32_t length = count * 2; if(capacity < length) { capacity = length; } if(allocate(capacity)) { UChar *array = getArrayStart(); UChar lead = U16_LEAD(c); UChar trail = U16_TRAIL(c); for(int32_t i = 0; i < length; i += 2) { array[i] = lead; array[i + 1] = trail; } setLength(length); } } }
0
[ "CWE-190", "CWE-787" ]
icu
b7d08bc04a4296982fcef8b6b8a354a9e4e7afca
16,925,462,953,984,902,000,000,000,000,000,000,000
40
ICU-20958 Prevent SEGV_MAPERR in append See #971
std::string index(SHalfloop_iterator l) const { return SLI(l,verbose); }
0
[ "CWE-125" ]
cgal
5a1ab45058112f8647c14c02f58905ecc597ec76
290,958,106,733,336,250,000,000,000,000,000,000,000
2
Fix Nef_3
Status GetAxisForPackAndUnpack(InferenceContext* c, int32_t rank_after_pack, int32* axis) { TF_RETURN_IF_ERROR(c->GetAttr("axis", axis)); if (*axis < -1 * rank_after_pack || *axis >= rank_after_pack) { return errors::InvalidArgument("Invalid axis: ", *axis, "; must be in [", -1 * rank_after_pack, ",", rank_after_pack, ")"); } if (*axis < 0) *axis = (rank_after_pack + *axis); return Status::OK(); }
0
[ "CWE-703", "CWE-787" ]
tensorflow
c79ba87153ee343401dbe9d1954d7f79e521eb14
284,060,641,152,344,340,000,000,000,000,000,000,000
11
Make Transpose's shape inference function validate that negative `perm` values are within the tensor's rank. PiperOrigin-RevId: 403252853 Change-Id: Ia6b31b45b237312668bb31c2c3b3c7bbce2d2610
static int ZEND_FASTCALL ZEND_IS_IDENTICAL_SPEC_VAR_VAR_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zend_free_op free_op1, free_op2; is_identical_function(&EX_T(opline->result.u.var).tmp_var, _get_zval_ptr_var(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC), _get_zval_ptr_var(&opline->op2, EX(Ts), &free_op2 TSRMLS_CC) TSRMLS_CC); if (free_op1.var) {zval_ptr_dtor(&free_op1.var);}; if (free_op2.var) {zval_ptr_dtor(&free_op2.var);}; ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
104,171,096,256,793,830,000,000,000,000,000,000,000
12
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
SSLNetVConnection::protocol_contains(std::string_view prefix) const { const char *retval = nullptr; std::string_view tag = map_tls_protocol_to_tag(getSSLProtocol()); if (prefix.size() <= tag.size() && strncmp(tag.data(), prefix.data(), prefix.size()) == 0) { retval = tag.data(); } else { retval = super::protocol_contains(prefix); } return retval; }
0
[ "CWE-284" ]
trafficserver
d3f36f79820ea10c26573c742b1bbc370c351716
173,122,298,791,034,520,000,000,000,000,000,000,000
11
Bug fix in origin connection handling (#8731) Co-authored-by: Takuya Kitano <[email protected]>
onig_get_syntax_op(OnigSyntaxType* syntax) { return syntax->op; }
0
[ "CWE-125" ]
oniguruma
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
337,737,244,650,794,800,000,000,000,000,000,000,000
4
onig-5.9.2
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) { struct net_device *dev = skb->dev; int fhoff, nhoff, ret; struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; u8 prevhdr; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return 0; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return 0; if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) return -ENOMEM; skb_set_transport_header(skb, fhoff); hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) { ret = -EINVAL; goto out_unlock; } /* after queue has assumed skb ownership, only 0 or -EINPROGRESS * must be returned. */ ret = -EINPROGRESS; if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len && nf_ct_frag6_reasm(fq, skb, dev)) ret = 0; out_unlock: spin_unlock_bh(&fq->q.lock); inet_frag_put(&fq->q, &nf_frags); return ret; }
0
[ "CWE-787" ]
linux
9b57da0630c9fd36ed7a20fc0f98dc82cc0777fa
123,456,595,361,449,880,000,000,000,000,000,000,000
53
netfilter: ipv6: nf_defrag: drop mangled skb on ream error Dmitry Vyukov reported GPF in network stack that Andrey traced down to negative nh offset in nf_ct_frag6_queue(). Problem is that all network headers before fragment header are pulled. Normal ipv6 reassembly will drop the skb when errors occur further down the line. netfilter doesn't do this, and instead passed the original fragment along. That was also fine back when netfilter ipv6 defrag worked with cloned fragments, as the original, pristine fragment was passed on. So we either have to undo the pull op, or discard such fragments. Since they're malformed after all (e.g. overlapping fragment) it seems preferrable to just drop them. Same for temporary errors -- it doesn't make sense to accept (and perhaps forward!) only some fragments of same datagram. Fixes: 029f7f3b8701cc7ac ("netfilter: ipv6: nf_defrag: avoid/free clone operations") Reported-by: Dmitry Vyukov <[email protected]> Debugged-by: Andrey Konovalov <[email protected]> Diagnosed-by: Eric Dumazet <Eric Dumazet <[email protected]> Signed-off-by: Florian Westphal <[email protected]> Acked-by: Eric Dumazet <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
image_is_64_bit(EFI_IMAGE_OPTIONAL_HEADER_UNION *PEHdr) { /* .Magic is the same offset in all cases */ if (PEHdr->Pe32Plus.OptionalHeader.Magic == EFI_IMAGE_NT_OPTIONAL_HDR64_MAGIC) return 1; return 0; }
0
[ "CWE-787" ]
shim
159151b6649008793d6204a34d7b9c41221fb4b0
108,723,114,576,730,220,000,000,000,000,000,000,000
8
Also avoid CVE-2022-28737 in verify_image() PR 446 ("Add verify_image") duplicates some of the code affected by Chris Coulson's defense in depth patch against CVE-2022-28737 ("pe: Perform image verification earlier when loading grub"). This patch makes the same change to the new function. Signed-off-by: Peter Jones <[email protected]>
flow_set_vlan_pcp(struct flow *flow, uint8_t pcp) { pcp &= 0x07; flow->vlans[0].tci &= ~htons(VLAN_PCP_MASK); flow->vlans[0].tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI); }
0
[ "CWE-400" ]
ovs
79cec1a736b91548ec882d840986a11affda1068
240,007,937,661,970,100,000,000,000,000,000,000,000
6
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
DwaCompressor::LossyDctDecoderBase::execute () { int numComp = _rowPtrs.size(); int lastNonZero = 0; int numBlocksX = (int) ceil ((float)_width / 8.0f); int numBlocksY = (int) ceil ((float)_height / 8.0f); int leftoverX = _width - (numBlocksX-1) * 8; int leftoverY = _height - (numBlocksY-1) * 8; int numFullBlocksX = (int)floor ((float)_width / 8.0f); unsigned short tmpShortNative = 0; unsigned short tmpShortXdr = 0; const char *tmpConstCharPtr = 0; unsigned short *currAcComp = (unsigned short *)_packedAc; std::vector<unsigned short *> currDcComp (_rowPtrs.size()); std::vector<SimdAlignedBuffer64us> halfZigBlock (_rowPtrs.size()); if (_type.size() != _rowPtrs.size()) throw Iex::BaseExc ("Row pointers and types mismatch in count"); if ((_rowPtrs.size() != 3) && (_rowPtrs.size() != 1)) throw Iex::NoImplExc ("Only 1 and 3 channel encoding is supported"); _dctData.resize(numComp); // // Allocate a temp aligned buffer to hold a rows worth of full // 8x8 half-float blocks // unsigned char *rowBlockHandle = new unsigned char [numComp * numBlocksX * 64 * sizeof(unsigned short) + _SSE_ALIGNMENT]; unsigned short *rowBlock[3]; rowBlock[0] = (unsigned short*)rowBlockHandle; for (int i = 0; i < _SSE_ALIGNMENT; ++i) { if (((size_t)(rowBlockHandle + i) & _SSE_ALIGNMENT_MASK) == 0) rowBlock[0] = (unsigned short *)(rowBlockHandle + i); } for (int comp = 1; comp < numComp; ++comp) rowBlock[comp] = rowBlock[comp - 1] + numBlocksX * 64; // // Pack DC components together by common plane, so we can get // a little more out of differencing them. We'll always have // one component per block, so we can computed offsets. // currDcComp[0] = (unsigned short *)_packedDc; for (unsigned int comp = 1; comp < numComp; ++comp) currDcComp[comp] = currDcComp[comp - 1] + numBlocksX * numBlocksY; for (int blocky = 0; blocky < numBlocksY; ++blocky) { int maxY = 8; if (blocky == numBlocksY-1) maxY = leftoverY; int maxX = 8; for (int blockx = 0; blockx < numBlocksX; ++blockx) { if (blockx == numBlocksX-1) maxX = leftoverX; // // If we can detect that the block is constant values // (all components only have DC values, and all AC is 0), // we can do everything only on 1 value, instead of all // 64. // // This won't really help for regular images, but it is // meant more for layers with large swaths of black // bool blockIsConstant = true; for (unsigned int comp = 0; comp < numComp; ++comp) { // // DC component is stored separately // #ifdef IMF_HAVE_SSE2 { __m128i *dst = (__m128i*)halfZigBlock[comp]._buffer; dst[7] = _mm_setzero_si128(); dst[6] = _mm_setzero_si128(); dst[5] = _mm_setzero_si128(); dst[4] = _mm_setzero_si128(); dst[3] = _mm_setzero_si128(); dst[2] = _mm_setzero_si128(); dst[1] = _mm_setzero_si128(); dst[0] = _mm_insert_epi16 (_mm_setzero_si128(), *currDcComp[comp]++, 0); } #else /* IMF_HAVE_SSE2 */ memset (halfZigBlock[comp]._buffer, 0, 64 * 2); halfZigBlock[comp]._buffer[0] = *currDcComp[comp]++; #endif /* IMF_HAVE_SSE2 */ _packedDcCount++; // // UnRLE the AC. This will modify currAcComp // lastNonZero = unRleAc (currAcComp, halfZigBlock[comp]._buffer); // // Convert from XDR to NATIVE // if (!_isNativeXdr) { for (int i = 0; i < 64; ++i) { tmpShortXdr = halfZigBlock[comp]._buffer[i]; tmpConstCharPtr = (const char *)&tmpShortXdr; Xdr::read<CharPtrIO> (tmpConstCharPtr, tmpShortNative); halfZigBlock[comp]._buffer[i] = tmpShortNative; } } if (lastNonZero == 0) { // // DC only case - AC components are all 0 // half h; h.setBits (halfZigBlock[comp]._buffer[0]); _dctData[comp]._buffer[0] = (float)h; dctInverse8x8DcOnly (_dctData[comp]._buffer); } else { // // We have some AC components that are non-zero. // Can't use the 'constant block' optimization // blockIsConstant = false; // // Un-Zig zag // (*fromHalfZigZag) (halfZigBlock[comp]._buffer, _dctData[comp]._buffer); // // Zig-Zag indices in normal layout are as follows: // // 0 1 3 6 10 15 21 28 // 2 4 7 11 16 22 29 36 // 5 8 12 17 23 30 37 43 // 9 13 18 24 31 38 44 49 // 14 19 25 32 39 45 50 54 // 20 26 33 40 46 51 55 58 // 27 34 41 47 52 56 59 61 // 35 42 48 53 57 60 62 63 // // If lastNonZero is less than the first item on // each row, we know that the whole row is zero and // can be skipped in the row-oriented part of the // iDCT. // // The unrolled logic here is: // // if lastNonZero < rowStartIdx[i], // zeroedRows = rowsEmpty[i] // // where: // // const int rowStartIdx[] = {2, 5, 9, 14, 20, 27, 35}; // const int rowsEmpty[] = {7, 6, 5, 4, 3, 2, 1}; // if (lastNonZero < 2) dctInverse8x8_7(_dctData[comp]._buffer); else if (lastNonZero < 5) dctInverse8x8_6(_dctData[comp]._buffer); else if (lastNonZero < 9) dctInverse8x8_5(_dctData[comp]._buffer); else if (lastNonZero < 14) dctInverse8x8_4(_dctData[comp]._buffer); else if (lastNonZero < 20) dctInverse8x8_3(_dctData[comp]._buffer); else if (lastNonZero < 27) dctInverse8x8_2(_dctData[comp]._buffer); else if (lastNonZero < 35) dctInverse8x8_1(_dctData[comp]._buffer); else dctInverse8x8_0(_dctData[comp]._buffer); } } // // Perform the CSC // if (numComp == 3) { if (!blockIsConstant) { csc709Inverse64 (_dctData[0]._buffer, _dctData[1]._buffer, _dctData[2]._buffer); } else { csc709Inverse (_dctData[0]._buffer[0], _dctData[1]._buffer[0], _dctData[2]._buffer[0]); } } // // Float -> Half conversion. // // If the block has a constant value, just convert the first pixel. // for (unsigned int comp = 0; comp < numComp; ++comp) { if (!blockIsConstant) { (*convertFloatToHalf64) (&rowBlock[comp][blockx*64], _dctData[comp]._buffer); } else { #if IMF_HAVE_SSE2 __m128i *dst = (__m128i*)&rowBlock[comp][blockx*64]; dst[0] = _mm_set1_epi16 (((half)_dctData[comp]._buffer[0]).bits()); dst[1] = dst[0]; dst[2] = dst[0]; dst[3] = dst[0]; dst[4] = dst[0]; dst[5] = dst[0]; dst[6] = dst[0]; dst[7] = dst[0]; #else /* IMF_HAVE_SSE2 */ unsigned short *dst = &rowBlock[comp][blockx*64]; dst[0] = ((half)_dctData[comp]._buffer[0]).bits(); for (int i = 1; i < 64; ++i) { dst[i] = dst[0]; } #endif /* IMF_HAVE_SSE2 */ } // blockIsConstant } // comp } // blockx // // At this point, we have half-float nonlinear value blocked // in rowBlock[][]. We need to unblock the data, transfer // back to linear, and write the results in the _rowPtrs[]. // // There is a fast-path for aligned rows, which helps // things a little. Since this fast path is only valid // for full 8-element wide blocks, the partial x blocks // are broken into a separate loop below. // // At the moment, the fast path requires: // * sse support // * aligned row pointers // * full 8-element wide blocks // for (int comp = 0; comp < numComp; ++comp) { // // Test if we can use the fast path // #ifdef IMF_HAVE_SSE2 bool fastPath = true; for (int y = 8 * blocky; y < 8 * blocky + maxY; ++y) { if ((size_t)_rowPtrs[comp][y] & _SSE_ALIGNMENT_MASK) fastPath = false; } if (fastPath) { // // Handle all the full X blocks, in a fast path with sse2 and // aligned row pointers // for (int y=8*blocky; y<8*blocky+maxY; ++y) { __m128i *dst = (__m128i *)_rowPtrs[comp][y]; __m128i *src = (__m128i *)&rowBlock[comp][(y & 0x7) * 8]; for (int blockx = 0; blockx < numFullBlocksX; ++blockx) { // // These may need some twiddling. // Run with multiples of 8 // _mm_prefetch ((char *)(src + 16), _MM_HINT_NTA); unsigned short i0 = _mm_extract_epi16 (*src, 0); unsigned short i1 = _mm_extract_epi16 (*src, 1); unsigned short i2 = _mm_extract_epi16 (*src, 2); unsigned short i3 = _mm_extract_epi16 (*src, 3); unsigned short i4 = _mm_extract_epi16 (*src, 4); unsigned short i5 = _mm_extract_epi16 (*src, 5); unsigned short i6 = _mm_extract_epi16 (*src, 6); unsigned short i7 = _mm_extract_epi16 (*src, 7); i0 = _toLinear[i0]; i1 = _toLinear[i1]; i2 = _toLinear[i2]; i3 = _toLinear[i3]; i4 = _toLinear[i4]; i5 = _toLinear[i5]; i6 = _toLinear[i6]; i7 = _toLinear[i7]; *dst = _mm_insert_epi16 (_mm_setzero_si128(), i0, 0); *dst = _mm_insert_epi16 (*dst, i1, 1); *dst = _mm_insert_epi16 (*dst, i2, 2); *dst = _mm_insert_epi16 (*dst, i3, 3); *dst = _mm_insert_epi16 (*dst, i4, 4); *dst = _mm_insert_epi16 (*dst, i5, 5); *dst = _mm_insert_epi16 (*dst, i6, 6); *dst = _mm_insert_epi16 (*dst, i7, 7); src += 8; dst++; } } } else { #endif /* IMF_HAVE_SSE2 */ // // Basic scalar kinda slow path for handling the full X blocks // for (int y = 8 * blocky; y < 8 * blocky + maxY; ++y) { unsigned short *dst = (unsigned short *)_rowPtrs[comp][y]; for (int blockx = 0; blockx < numFullBlocksX; ++blockx) { unsigned short *src = &rowBlock[comp][blockx * 64 + ((y & 0x7) * 8)]; dst[0] = _toLinear[src[0]]; dst[1] = _toLinear[src[1]]; dst[2] = _toLinear[src[2]]; dst[3] = _toLinear[src[3]]; dst[4] = _toLinear[src[4]]; dst[5] = _toLinear[src[5]]; dst[6] = _toLinear[src[6]]; dst[7] = _toLinear[src[7]]; dst += 8; } } #ifdef IMF_HAVE_SSE2 } #endif /* IMF_HAVE_SSE2 */ // // If we have partial X blocks, deal with all those now // Since this should be minimal work, there currently // is only one path that should work for everyone. // if (numFullBlocksX != numBlocksX) { for (int y = 8 * blocky; y < 8 * blocky + maxY; ++y) { unsigned short *src = (unsigned short *) &rowBlock[comp][numFullBlocksX * 64 + ((y & 0x7) * 8)]; unsigned short *dst = (unsigned short *)_rowPtrs[comp][y]; dst += 8 * numFullBlocksX; for (int x = 0; x < maxX; ++x) { *dst++ = _toLinear[*src++]; } } } } // comp } // blocky // // Walk over all the channels that are of type FLOAT. // Convert from HALF XDR back to FLOAT XDR. // for (unsigned int chan = 0; chan < numComp; ++chan) { if (_type[chan] != FLOAT) continue; std::vector<unsigned short> halfXdr (_width); for (int y=0; y<_height; ++y) { char *floatXdrPtr = _rowPtrs[chan][y]; memcpy(&halfXdr[0], floatXdrPtr, _width*sizeof(unsigned short)); const char *halfXdrPtr = (const char *)(&halfXdr[0]); for (int x=0; x<_width; ++x) { half tmpHalf; Xdr::read<CharPtrIO> (halfXdrPtr, tmpHalf); Xdr::write<CharPtrIO> (floatXdrPtr, (float)tmpHalf); // // Xdr::write and Xdr::read will advance the ptrs // } } } delete[] rowBlockHandle; }
0
[ "CWE-284" ]
openexr
49db4a4192482eec9c27669f75db144cf5434804
88,902,422,598,077,540,000,000,000,000,000,000,000
471
Add additional input validation in an attempt to resolve issue #232
xcf_load_old_paths (XcfInfo *info, GimpImage *image) { guint32 num_paths; guint32 last_selected_row; GimpVectors *active_vectors; info->cp += xcf_read_int32 (info->fp, &last_selected_row, 1); info->cp += xcf_read_int32 (info->fp, &num_paths, 1); while (num_paths-- > 0) xcf_load_old_path (info, image); active_vectors = GIMP_VECTORS (gimp_container_get_child_by_index (gimp_image_get_vectors (image), last_selected_row)); if (active_vectors) gimp_image_set_active_vectors (image, active_vectors); return TRUE; }
0
[ "CWE-416" ]
gimp
e82aaa4b4ee0703c879e35ea9321fff6be3e9b6f
182,796,726,158,912,550,000,000,000,000,000,000,000
22
Bug 767873 - (CVE-2016-4994) Multiple Use-After-Free when parsing... ...XCF channel and layer properties The properties PROP_ACTIVE_LAYER, PROP_FLOATING_SELECTION, PROP_ACTIVE_CHANNEL saves the current object pointer the @info structure. Others like PROP_SELECTION (for channel) and PROP_GROUP_ITEM (for layer) will delete the current object and create a new object, leaving the pointers in @info invalid (dangling). Therefore, if a property from the first type will come before the second, the result will be an UaF in the last lines of xcf_load_image (when it actually using the pointers from @info). I wasn't able to exploit this bug because that g_object_instance->c_class gets cleared by the last g_object_unref and GIMP_IS_{LAYER,CHANNEL} detects that and return FALSE. (cherry picked from commit 6d804bf9ae77bc86a0a97f9b944a129844df9395)
long long Block::GetTimeCode(const Cluster* pCluster) const { if (pCluster == 0) return m_timecode; const long long tc0 = pCluster->GetTimeCode(); assert(tc0 >= 0); // Check if tc0 + m_timecode would overflow. if (tc0 < 0 || LLONG_MAX - tc0 < m_timecode) { return -1; } const long long tc = tc0 + m_timecode; return tc; // unscaled timecode units }
0
[ "CWE-20" ]
libvpx
34d54b04e98dd0bac32e9aab0fbda0bf501bc742
325,469,338,844,778,470,000,000,000,000,000,000,000
16
update libwebm to libwebm-1.0.0.27-358-gdbf1d10 changelog: https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10 Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3
pk_transaction_distro_upgrade_cb (PkBackendJob *job, PkDistroUpgrade *item, PkTransaction *transaction) { PkUpdateStateEnum state; _cleanup_free_ gchar *name = NULL; _cleanup_free_ gchar *summary = NULL; g_return_if_fail (PK_IS_TRANSACTION (transaction)); g_return_if_fail (transaction->priv->tid != NULL); /* add to results */ pk_results_add_distro_upgrade (transaction->priv->results, item); /* get data */ g_object_get (item, "state", &state, "name", &name, "summary", &summary, NULL); /* emit */ g_debug ("emitting distro-upgrade %s, %s, %s", pk_distro_upgrade_enum_to_string (state), name, summary); g_dbus_connection_emit_signal (transaction->priv->connection, NULL, transaction->priv->tid, PK_DBUS_INTERFACE_TRANSACTION, "DistroUpgrade", g_variant_new ("(uss)", state, name, summary != NULL ? summary : ""), NULL); }
0
[ "CWE-287" ]
PackageKit
f176976e24e8c17b80eff222572275517c16bdad
191,081,136,309,641,500,000,000,000,000,000,000,000
36
Reinstallation and downgrade require authorization Added new policy actions: * org.freedesktop.packagekit.package-reinstall * org.freedesktop.packagekit.package-downgrade The first does not depend or require any other actions to be authorized except for org.freedesktop.packagekit.package-install-untrusted in case of reinstallation of not trusted package. The same applies to second one plus it implies org.freedesktop.packagekit.package-install action (if the user is authorized to downgrade, he's authorized to install as well). Now the authorization can spawn up to 3 asynchronous calls to polkit for single package because each transaction flag (allow-downgrade, allow-reinstall) the client supplies needs to be checked separately.
unknown_nmi_error(unsigned char reason, struct pt_regs *regs) { if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) return; #ifdef CONFIG_MCA /* * Might actually be able to figure out what the guilty party * is: */ if (MCA_bus) { mca_handle_nmi(); return; } #endif pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", reason, smp_processor_id()); pr_emerg("Do you have a strange power saving mode enabled?\n"); if (unknown_nmi_panic || panic_on_unrecovered_nmi) panic("NMI: Not continuing"); pr_emerg("Dazed and confused, but trying to continue\n"); }
0
[ "CWE-400" ]
linux-stable-rt
e5d4e1c3ccee18c68f23d62ba77bda26e893d4f0
95,123,745,460,955,020,000,000,000,000,000,000,000
24
x86: Do not disable preemption in int3 on 32bit Preemption must be disabled before enabling interrupts in do_trap on x86_64 because the stack in use for int3 and debug is a per CPU stack set by th IST. But 32bit does not have an IST and the stack still belongs to the current task and there is no problem in scheduling out the task. Keep preemption enabled on X86_32 when enabling interrupts for do_trap(). The name of the function is changed from preempt_conditional_sti/cli() to conditional_sti/cli_ist(), to annotate that this function is used when the stack is on the IST. Cc: [email protected] Signed-off-by: Steven Rostedt <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]>
static int mov_metadata_int8_bypass_padding(MOVContext *c, AVIOContext *pb, unsigned len, const char *key) { char buf[16]; /* bypass padding bytes */ avio_r8(pb); avio_r8(pb); avio_r8(pb); snprintf(buf, sizeof(buf), "%d", avio_r8(pb)); av_dict_set(&c->fc->metadata, key, buf, 0); return 0; }
0
[ "CWE-119", "CWE-787" ]
FFmpeg
689e59b7ffed34eba6159dcc78e87133862e3746
16,106,986,686,261,848,000,000,000,000,000,000,000
15
mov: reset dref_count on realloc to keep values consistent. This fixes a potential crash. Signed-off-by: Michael Niedermayer <[email protected]>
~RequestNote() { delete errorReport; }
0
[ "CWE-59" ]
passenger
9dda49f4a3ebe9bafc48da1bd45799f30ce19566
49,565,086,088,977,440,000,000,000,000,000,000,000
3
Fixed a problem with graceful web server restarts. This problem was introduced in 4.0.6 during the attempt to fix issue #910.
GF_Err flac_dmx_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove) { const GF_PropertyValue *p; GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); if (is_remove) { ctx->ipid = NULL; if (ctx->opid) { gf_filter_pid_remove(ctx->opid); ctx->opid = NULL; } return GF_OK; } if (! gf_filter_pid_check_caps(pid)) return GF_NOT_SUPPORTED; ctx->ipid = pid; p = gf_filter_pid_get_property(pid, GF_PROP_PID_TIMESCALE); if (p) ctx->timescale = p->value.uint; p = gf_filter_pid_get_property_str(pid, "nocts"); if (p && p->value.boolean) ctx->recompute_cts = GF_TRUE; else ctx->recompute_cts = GF_FALSE; if (ctx->timescale && !ctx->opid) { ctx->opid = gf_filter_pid_new(filter); gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); } return GF_OK; }
0
[ "CWE-787" ]
gpac
da69ad1f970a7e17c865eaec9af98cc84df10d5b
228,994,666,777,681,830,000,000,000,000,000,000,000
31
fixed 1718
mj_v_skip(int n, gx_device_printer *pdev, gp_file *stream) { /* This is a kind of magic number. */ static const int max_y_step = (256 * 15 + 255); int l = n - max_y_step; for (; l > 0; l -= max_y_step) { /* move 256 * 15 + 255 dots at once*/ gp_fwrite("\033(v\2\0\xff\x0f", sizeof(byte), 7, stream); } l += max_y_step; /* move to the end. */ { int n2 = l / 256; int n1 = l - n2 * 256; gp_fwrite("\033(v\2\0", sizeof(byte) ,5 ,stream); gp_fputc(n1, stream); gp_fputc(n2, stream); gp_fputc('\r', stream); } return 0; }
0
[ "CWE-120" ]
ghostpdl
849e74e5ab450dd581942192da7101e0664fa5af
84,657,932,766,978,400,000,000,000,000,000,000,000
21
Bug 701799: avoid out-of-range array access in mj_color_correct(). Code is obscure, so this fix merely avoids out-of-range access in the simplest way possible, without understanding what the code is trying to do. Fixes: ./sanbin/gs -sOutputFile=tmp -sDEVICE=mj6000c ../bug-701799.pdf
void Item_field::set_field(Field *field_par) { field=result_field=field_par; // for easy coding with fields maybe_null=field->maybe_null(); decimals= field->decimals(); table_name= *field_par->table_name; field_name= field_par->field_name; db_name= field_par->table->s->db.str; alias_name_used= field_par->table->alias_name_used; unsigned_flag=test(field_par->flags & UNSIGNED_FLAG); collation.set(field_par->charset(), field_par->derivation(), field_par->repertoire()); fix_char_length(field_par->char_length()); max_length= adjust_max_effective_column_length(field_par, max_length); fixed= 1; if (field->table->s->tmp_table == SYSTEM_TMP_TABLE) any_privileges= 0; }
0
[]
server
b000e169562697aa072600695d4f0c0412f94f4f
129,470,617,939,154,530,000,000,000,000,000,000,000
20
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) based on: commit f7316aa0c9a Author: Ajo Robert <[email protected]> Date: Thu Aug 24 17:03:21 2017 +0530 Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) Backport of Bug#19143243 fix. NAME_CONST item can return NULL_ITEM type in case of incorrect arguments. NULL_ITEM has special processing in Item_func_in function. In Item_func_in::fix_length_and_dec an array of possible comparators is created. Since NAME_CONST function has NULL_ITEM type, corresponding array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE. ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(), so the NULL_ITEM is attempted compared with an empty comparator. The fix is to disable the caching of Item_name_const item.
void cgit_snapshot_link(const char *name, const char *title, const char *class, const char *head, const char *rev, const char *archivename) { reporevlink("snapshot", name, title, class, head, rev, archivename); }
0
[]
cgit
513b3863d999f91b47d7e9f26710390db55f9463
12,225,074,665,269,607,000,000,000,000,000,000,000
6
ui-shared: prevent malicious filename from injecting headers
inline void Pad(const tflite::PadParams& op_params, const RuntimeShape& input_shape, const int32* input_data, const int32* pad_value_ptr, const RuntimeShape& output_shape, int32* output_data) { PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape, output_data); }
0
[ "CWE-476", "CWE-369" ]
tensorflow
15691e456c7dc9bd6be203b09765b063bf4a380c
299,386,837,445,734,330,000,000,000,000,000,000,000
7
Prevent dereferencing of null pointers in TFLite's `add.cc`. PiperOrigin-RevId: 387244946 Change-Id: I56094233327fbd8439b92e1dbb1262176e00eeb9
evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd) { return evbuffer_write_atmost(buffer, fd, -1); }
0
[ "CWE-189" ]
libevent
20d6d4458bee5d88bda1511c225c25b2d3198d6c
94,432,373,477,869,460,000,000,000,000,000,000,000
4
Fix CVE-2014-6272 in Libevent 2.0 For this fix, we need to make sure that passing too-large inputs to the evbuffer functions can't make us do bad things with the heap. Also, lower the maximum chunk size to the lower of off_t, size_t maximum. This is necessary since otherwise we could get into an infinite loop if we make a chunk that 'misalign' cannot index into.
rfbHandleAuthResult(rfbClient* client) { uint32_t authResult=0, reasonLen=0; char *reason=NULL; if (!ReadFromRFBServer(client, (char *)&authResult, 4)) return FALSE; authResult = rfbClientSwap32IfLE(authResult); switch (authResult) { case rfbVncAuthOK: rfbClientLog("VNC authentication succeeded\n"); return TRUE; break; case rfbVncAuthFailed: if (client->major==3 && client->minor>7) { /* we have an error following */ if (!ReadFromRFBServer(client, (char *)&reasonLen, 4)) return FALSE; reasonLen = rfbClientSwap32IfLE(reasonLen); reason = malloc(reasonLen+1); if (!ReadFromRFBServer(client, reason, reasonLen)) { free(reason); return FALSE; } reason[reasonLen]=0; rfbClientLog("VNC connection failed: %s\n",reason); free(reason); return FALSE; } rfbClientLog("VNC authentication failed\n"); return FALSE; case rfbVncAuthTooMany: rfbClientLog("VNC authentication failed - too many tries\n"); return FALSE; } rfbClientLog("Unknown VNC authentication result: %d\n", (int)authResult); return FALSE; }
1
[ "CWE-787" ]
libvncserver
a83439b9fbe0f03c48eb94ed05729cb016f8b72f
51,055,663,918,022,560,000,000,000,000,000,000,000
38
LibVNCClient: fix three possible heap buffer overflows An attacker could feed `0xffffffff`, causing a `malloc(0)` for the buffers which are subsequently written to. Closes #247
void slice_segment_header::reset() { pps = NULL; slice_index = 0; first_slice_segment_in_pic_flag = 0; no_output_of_prior_pics_flag = 0; slice_pic_parameter_set_id = 0; dependent_slice_segment_flag = 0; slice_segment_address = 0; slice_type = 0; pic_output_flag = 0; colour_plane_id = 0; slice_pic_order_cnt_lsb = 0; short_term_ref_pic_set_sps_flag = 0; slice_ref_pic_set.reset(); short_term_ref_pic_set_idx = 0; num_long_term_sps = 0; num_long_term_pics= 0; for (int i=0;i<MAX_NUM_REF_PICS;i++) { lt_idx_sps[i] = 0; poc_lsb_lt[i] = 0; used_by_curr_pic_lt_flag[i] = 0; delta_poc_msb_present_flag[i] = 0; delta_poc_msb_cycle_lt[i] = 0; } slice_temporal_mvp_enabled_flag = 0; slice_sao_luma_flag = 0; slice_sao_chroma_flag = 0; num_ref_idx_active_override_flag = 0; num_ref_idx_l0_active = 0; num_ref_idx_l1_active = 0; ref_pic_list_modification_flag_l0 = 0; ref_pic_list_modification_flag_l1 = 0; for (int i=0;i<16;i++) { list_entry_l0[i] = 0; list_entry_l1[i] = 0; } mvd_l1_zero_flag = 0; cabac_init_flag = 0; collocated_from_l0_flag = 0; collocated_ref_idx = 0; luma_log2_weight_denom = 0; ChromaLog2WeightDenom = 0; for (int i=0;i<2;i++) for (int j=0;j<16;j++) { luma_weight_flag[i][j] = 0; chroma_weight_flag[i][j] = 0; LumaWeight[i][j] = 0; luma_offset[i][j] = 0; ChromaWeight[i][j][0] = ChromaWeight[i][j][1] = 0; ChromaOffset[i][j][0] = ChromaOffset[i][j][1] = 0; } five_minus_max_num_merge_cand = 0; slice_qp_delta = 0; slice_cb_qp_offset = 0; slice_cr_qp_offset = 0; cu_chroma_qp_offset_enabled_flag = 0; deblocking_filter_override_flag = 0; slice_deblocking_filter_disabled_flag = 0; slice_beta_offset = 0; slice_tc_offset = 0; slice_loop_filter_across_slices_enabled_flag = 0; num_entry_point_offsets = 0; offset_len = 0; entry_point_offset.clear(); slice_segment_header_extension_length = 0; SliceAddrRS = 0; SliceQPY = 0; initType = 0; MaxNumMergeCand = 0; CurrRpsIdx = 0; CurrRps.reset(); NumPocTotalCurr = 0; for (int i=0;i<2;i++) for (int j=0;j<MAX_NUM_REF_PICS;j++) { RefPicList[i][j] = 0; RefPicList_POC[i][j] = 0; RefPicList_PicState[i][j] = 0; LongTermRefPic[i][j] = 0; } //context_model ctx_model_storage[CONTEXT_MODEL_TABLE_LENGTH]; RemoveReferencesList.clear(); ctx_model_storage_defined = false; }
0
[]
libde265
e83f3798dd904aa579425c53020c67e03735138d
9,434,955,028,214,127,000,000,000,000,000,000,000
109
fix check for valid PPS idx (#298)
MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; Quantum *magick_restrict q; ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat*PerceptibleReciprocal(gradient->radius); } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); }
0
[]
ImageMagick
f4cdb3f3aab28273960ffacf1d356312b56ffd27
117,730,287,321,605,190,000,000,000,000,000,000,000
231
https://github.com/ImageMagick/ImageMagick/issues/3338
static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; init_waitqueue_head(&ei->socket.wq.wait); ei->socket.wq.fasync_list = NULL; ei->socket.wq.flags = 0; ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei->socket.sk = NULL; ei->socket.file = NULL; return &ei->vfs_inode; }
0
[]
linux
d69e07793f891524c6bbf1e75b9ae69db4450953
202,969,234,705,980,950,000,000,000,000,000,000,000
19
net: disallow ancillary data for __sys_{send,recv}msg_file() Only io_uring uses (and added) these, and we want to disallow the use of sendmsg/recvmsg for anything but regular data transfers. Use the newly added prep helper to split the msghdr copy out from the core function, to check for msg_control and msg_controllen settings. If either is set, we return -EINVAL. Acked-by: David S. Miller <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static int pxa2xx_i2c_slave_init(I2CSlave *i2c) { /* Nothing to do. */ return 0; }
0
[ "CWE-119" ]
qemu
caa881abe0e01f9931125a0977ec33c5343e4aa7
302,007,676,579,676,700,000,000,000,000,000,000,000
5
pxa2xx: avoid buffer overrun on incoming migration CVE-2013-4533 s->rx_level is read from the wire and used to determine how many bytes to subsequently read into s->rx_fifo[]. If s->rx_level exceeds the length of s->rx_fifo[] the buffer can be overrun with arbitrary data from the wire. Fix this by validating rx_level against the size of s->rx_fifo. Cc: Don Koch <[email protected]> Reported-by: Michael Roth <[email protected]> Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Peter Maydell <[email protected]> Reviewed-by: Don Koch <[email protected]> Signed-off-by: Juan Quintela <[email protected]>
callbacks_update_ruler_scales (void) { double xStart, xEnd, yStart, yEnd; xStart = screenRenderInfo.lowerLeftX; yStart = screenRenderInfo.lowerLeftY; xEnd = screenRenderInfo.lowerLeftX + (screenRenderInfo.displayWidth / screenRenderInfo.scaleFactorX); yEnd = screenRenderInfo.lowerLeftY + (screenRenderInfo.displayHeight / screenRenderInfo.scaleFactorY); /* mils can get super crowded with large boards, but inches are too large for most boards. So, we leave mils in for now and just switch to inches if the scale factor gets too small */ if (!((screen.unit == GERBV_MILS) && ((screenRenderInfo.scaleFactorX < 80)||(screenRenderInfo.scaleFactorY < 80)))) { xStart = callbacks_calculate_actual_distance (xStart); xEnd = callbacks_calculate_actual_distance (xEnd); yStart = callbacks_calculate_actual_distance (yStart); yEnd = callbacks_calculate_actual_distance (yEnd); } /* make sure the widgets actually exist before setting (in case this gets called before everything is realized */ if (screen.win.hRuler) gtk_ruler_set_range (GTK_RULER (screen.win.hRuler), xStart, xEnd, 0, xEnd - xStart); /* reverse y min and max, since the ruler starts at the top */ if (screen.win.vRuler) gtk_ruler_set_range (GTK_RULER (screen.win.vRuler), yEnd, yStart, 0, yEnd - yStart); }
0
[ "CWE-200" ]
gerbv
319a8af890e4d0a5c38e6d08f510da8eefc42537
80,287,876,116,658,770,000,000,000,000,000,000,000
24
Remove local alias to parameter array Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402
decrypt (gcry_mpi_t output, gcry_mpi_t a, gcry_mpi_t b, ELG_secret_key *skey ) { gcry_mpi_t t1 = mpi_alloc_secure( mpi_get_nlimbs( skey->p ) ); mpi_normalize (a); mpi_normalize (b); /* output = b/(a^x) mod p */ mpi_powm( t1, a, skey->x, skey->p ); mpi_invm( t1, t1, skey->p ); mpi_mulm( output, b, t1, skey->p ); #if 0 if( DBG_CIPHER ) { log_mpidump ("elg decrypted x", skey->x); log_mpidump ("elg decrypted p", skey->p); log_mpidump ("elg decrypted a", a); log_mpidump ("elg decrypted b", b); log_mpidump ("elg decrypted M", output); } #endif mpi_free(t1); }
1
[ "CWE-200" ]
libgcrypt
410d70bad9a650e3837055e36f157894ae49a57d
261,666,230,547,360,070,000,000,000,000,000,000,000
23
cipher: Use ciphertext blinding for Elgamal decryption. * cipher/elgamal.c (USE_BLINDING): New. (decrypt): Rewrite to use ciphertext blinding. -- CVE-id: CVE-2014-3591 As a countermeasure to a new side-channel attacks on sliding windows exponentiation we blind the ciphertext for Elgamal decryption. This is similar to what we are doing with RSA. This patch is a backport of the GnuPG 1.4 commit ff53cf06e966dce0daba5f2c84e03ab9db2c3c8b. Unfortunately, the performance impact of Elgamal blinding is quite noticeable (i5-2410M CPU @ 2.30GHz TP 220): Algorithm generate 100*priv 100*public ------------------------------------------------ ELG 1024 bit - 100ms 90ms ELG 2048 bit - 330ms 350ms ELG 3072 bit - 660ms 790ms Algorithm generate 100*priv 100*public ------------------------------------------------ ELG 1024 bit - 150ms 90ms ELG 2048 bit - 520ms 360ms ELG 3072 bit - 1100ms 800ms Signed-off-by: Werner Koch <[email protected]>
TABLE *get_null_ref_table() const { return null_ref_table; }
0
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
70,646,341,186,603,840,000,000,000,000,000,000,000
1
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <[email protected]>
void dev_error(struct cgpu_info *dev, enum dev_reason reason) { dev->device_last_not_well = time(NULL); dev->device_not_well_reason = reason; switch (reason) { case REASON_THREAD_FAIL_INIT: dev->thread_fail_init_count++; break; case REASON_THREAD_ZERO_HASH: dev->thread_zero_hash_count++; break; case REASON_THREAD_FAIL_QUEUE: dev->thread_fail_queue_count++; break; case REASON_DEV_SICK_IDLE_60: dev->dev_sick_idle_60_count++; break; case REASON_DEV_DEAD_IDLE_600: dev->dev_dead_idle_600_count++; break; case REASON_DEV_NOSTART: dev->dev_nostart_count++; break; case REASON_DEV_OVER_HEAT: dev->dev_over_heat_count++; break; case REASON_DEV_THERMAL_CUTOFF: dev->dev_thermal_cutoff_count++; break; case REASON_DEV_COMMS_ERROR: dev->dev_comms_error_count++; break; case REASON_DEV_THROTTLE: dev->dev_throttle_count++; break; } }
0
[ "CWE-20", "CWE-703" ]
sgminer
910c36089940e81fb85c65b8e63dcd2fac71470c
183,454,579,470,284,050,000,000,000,000,000,000,000
38
stratum: parse_notify(): Don't die on malformed bbversion/prev_hash/nbit/ntime. Might have introduced a memory leak, don't have time to check. :( Should the other hex2bin()'s be checked? Thanks to Mick Ayzenberg <mick.dejavusecurity.com> for finding this.
DeepScanLineInputFile::readPixelSampleCounts (int scanline1, int scanline2) { Int64 savedFilePos = 0; if(!_data->frameBufferValid) { throw IEX_NAMESPACE::ArgExc("readPixelSampleCounts called with no valid frame buffer"); } try { Lock lock (*_data->_streamData); savedFilePos = _data->_streamData->is->tellg(); int scanLineMin = min (scanline1, scanline2); int scanLineMax = max (scanline1, scanline2); if (scanLineMin < _data->minY || scanLineMax > _data->maxY) throw IEX_NAMESPACE::ArgExc ("Tried to read scan line sample counts outside " "the image file's data window."); for (int i = scanLineMin; i <= scanLineMax; i++) { // // if scanline is already read, it'll be in the cache // otherwise, read from file, store in cache and in caller's framebuffer // if (_data->gotSampleCount[i - _data->minY]) { fillSampleCountFromCache(i,_data); }else{ int lineBlockId = ( i - _data->minY ) / _data->linesInBuffer; readSampleCountForLineBlock ( _data->_streamData, _data, lineBlockId ); int minYInLineBuffer = lineBlockId * _data->linesInBuffer + _data->minY; int maxYInLineBuffer = min ( minYInLineBuffer + _data->linesInBuffer - 1, _data->maxY ); // // For each line within the block, get the count of bytes. // bytesPerDeepLineTable ( _data->header, minYInLineBuffer, maxYInLineBuffer, _data->sampleCountSliceBase, _data->sampleCountXStride, _data->sampleCountYStride, _data->bytesPerLine ); // // For each scanline within the block, get the offset. // offsetInLineBufferTable ( _data->bytesPerLine, minYInLineBuffer - _data->minY, maxYInLineBuffer - _data->minY, _data->linesInBuffer, _data->offsetInLineBuffer ); } } _data->_streamData->is->seekg(savedFilePos); } catch (IEX_NAMESPACE::BaseExc &e) { REPLACE_EXC (e, "Error reading sample count data from image " "file \"" << fileName() << "\". " << e.what()); _data->_streamData->is->seekg(savedFilePos); throw; } }
0
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
327,546,663,053,120,770,000,000,000,000,000,000,000
77
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <[email protected]>
rdpsnd_init_packet(uint8 type, uint16 size) { STREAM s; s = channel_init(rdpsnd_channel, size + 4); out_uint8(s, type); out_uint8(s, 0); /* protocol-mandated padding */ out_uint16_le(s, size); return s; }
0
[ "CWE-119", "CWE-125", "CWE-703", "CWE-787" ]
rdesktop
4dca546d04321a610c1835010b5dad85163b65e1
194,088,273,298,150,140,000,000,000,000,000,000,000
10
Malicious RDP server security fixes This commit includes fixes for a set of 21 vulnerabilities in rdesktop when a malicious RDP server is used. All vulnerabilities was identified and reported by Eyal Itkin. * Add rdp_protocol_error function that is used in several fixes * Refactor of process_bitmap_updates * Fix possible integer overflow in s_check_rem() on 32bit arch * Fix memory corruption in process_bitmap_data - CVE-2018-8794 * Fix remote code execution in process_bitmap_data - CVE-2018-8795 * Fix remote code execution in process_plane - CVE-2018-8797 * Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175 * Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175 * Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176 * Fix Denial of Service in sec_recv - CVE-2018-20176 * Fix minor information leak in rdpdr_process - CVE-2018-8791 * Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792 * Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793 * Fix Denial of Service in process_bitmap_data - CVE-2018-8796 * Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798 * Fix Denial of Service in process_secondary_order - CVE-2018-8799 * Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800 * Fix major information leak in ui_clip_handle_data - CVE-2018-20174 * Fix memory corruption in rdp_in_unistr - CVE-2018-20177 * Fix Denial of Service in process_demand_active - CVE-2018-20178 * Fix remote code execution in lspci_process - CVE-2018-20179 * Fix remote code execution in rdpsnddbg_process - CVE-2018-20180 * Fix remote code execution in seamless_process - CVE-2018-20181 * Fix remote code execution in seamless_process_line - CVE-2018-20182
static inline uint64_t readNumber(const unsigned char *p, unsigned *off, unsigned len, char *ok) { uint64_t n=0; unsigned i, newoff, lim, p0 = p[*off], shift=0; lim = p0 - 0x60; if (lim > 0x10) { cli_errmsg("Invalid number type: %c\n", p0); *ok = 0; return 0; } newoff = *off +lim+1; if (newoff > len) { cli_errmsg("End of line encountered while reading number\n"); *ok = 0; return 0; } if (p0 == 0x60) { *off = newoff; return 0; } for (i=*off+1;i < newoff; i++) { uint64_t v = p[i]; if (UNLIKELY((v&0xf0) != 0x60)) { cli_errmsg("Invalid number part: %c\n", (char)v); *ok = 0; return 0; } v &= 0xf; v <<= shift; n |= v; shift += 4; } *off = newoff; return n; }
0
[ "CWE-189" ]
clamav-devel
3d664817f6ef833a17414a4ecea42004c35cc42f
234,314,027,890,622,780,000,000,000,000,000,000,000
38
fix recursion level crash (bb #3706). Thanks to Stephane Chazelas for the analysis.
void uncache_tmp_xattrs(void) { if (prior_xattr_count != (size_t)-1) { rsync_xa_list *xa_list_item = rsync_xal_l.items; rsync_xa_list *xa_list_start = xa_list_item + prior_xattr_count; xa_list_item += rsync_xal_l.count; rsync_xal_l.count = prior_xattr_count; while (xa_list_item-- > xa_list_start) { struct ht_int64_node *node; rsync_xa_list_ref *ref; rsync_xal_free(&xa_list_item->xa_items); if (rsync_xal_h == NULL) continue; node = hashtable_find(rsync_xal_h, xa_list_item->key, 0); if (node == NULL) continue; if (node->data == NULL) continue; ref = node->data; if (xa_list_item->ndx == ref->ndx) { /* xa_list_item is the first in the list. */ node->data = ref->next; free(ref); continue; } while (ref != NULL) { if (ref->next == NULL) { ref = NULL; break; } if (xa_list_item->ndx == ref->next->ndx) { ref->next = ref->next->next; free(ref); break; } ref = ref->next; } } prior_xattr_count = (size_t)-1; } }
0
[ "CWE-125" ]
rsync
47a63d90e71d3e19e0e96052bb8c6b9cb140ecc1
287,513,763,004,759,060,000,000,000,000,000,000,000
47
Enforce trailing \0 when receiving xattr name values. Fixes bug 13112.
int __udp_lib_get_port(struct sock *sk, unsigned short snum, struct hlist_head udptable[], int *port_rover, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2 ) ) { struct hlist_node *node; struct hlist_head *head; struct sock *sk2; int error = 1; write_lock_bh(&udp_hash_lock); if (snum == 0) { int best_size_so_far, best, result, i; if (*port_rover > sysctl_local_port_range[1] || *port_rover < sysctl_local_port_range[0]) *port_rover = sysctl_local_port_range[0]; best_size_so_far = 32767; best = result = *port_rover; for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { int size; head = &udptable[result & (UDP_HTABLE_SIZE - 1)]; if (hlist_empty(head)) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); goto gotit; } size = 0; sk_for_each(sk2, node, head) { if (++size >= best_size_so_far) goto next; } best_size_so_far = size; best = result; next: ; } result = best; for (i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & (UDP_HTABLE_SIZE - 1)); if (! __udp_lib_lport_inuse(result, udptable)) break; } if (i >= (1 << 16) / UDP_HTABLE_SIZE) goto fail; gotit: *port_rover = snum = result; } else { head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; sk_for_each(sk2, node, head) if (sk2->sk_hash == snum && sk2 != sk && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (*saddr_comp)(sk, sk2) ) goto fail; } inet_sk(sk)->num = snum; sk->sk_hash = snum; if (sk_unhashed(sk)) { head = &udptable[snum & (UDP_HTABLE_SIZE - 1)]; sk_add_node(sk, head); sock_prot_inc_use(sk->sk_prot); } error = 0; fail: write_unlock_bh(&udp_hash_lock); return error; }
1
[]
linux-2.6
32c1da70810017a98aa6c431a5494a302b6b9a30
203,541,452,275,097,500,000,000,000,000,000,000,000
78
[UDP]: Randomize port selection. This patch causes UDP port allocation to be randomized like TCP. The earlier code would always choose same port (ie first empty list). Signed-off-by: Stephen Hemminger <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static const char *req_filename_field(request_rec *r) { return r->filename; }
0
[ "CWE-20" ]
httpd
78eb3b9235515652ed141353d98c239237030410
330,476,379,507,598,000,000,000,000,000,000,000,000
4
*) SECURITY: CVE-2015-0228 (cve.mitre.org) mod_lua: A maliciously crafted websockets PING after a script calls r:wsupgrade() can cause a child process crash. [Edward Lu <Chaosed0 gmail.com>] Discovered by Guido Vranken <guidovranken gmail.com> Submitted by: Edward Lu Committed by: covener git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1657261 13f79535-47bb-0310-9956-ffa450edef68