func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
file_close(ref * pfile) { stream *s; if (file_is_valid(s, pfile)) { /* closing a closed file is a no-op */ if (sclose(s)) return_error(gs_error_ioerror); } return 0; }
0
[ "CWE-200" ]
ghostpdl
ab109aaeb3ddba59518b036fb288402a65cf7ce8
228,647,050,678,615,000,000,000,000,000,000,000,000
10
Bug 694724: Have filenameforall and getenv honor SAFER
int32_t dmar_assign_irte(const struct intr_source *intr_src, union dmar_ir_entry *irte, uint16_t idx_in, uint16_t *idx_out) { struct dmar_drhd_rt *dmar_unit; union dmar_ir_entry *ir_table, *ir_entry; union pci_bdf sid; uint64_t trigger_mode; int32_t ret = -EINVAL; if (intr_src->is_msi) { dmar_unit = device_to_dmaru((uint8_t)intr_src->src.msi.bits.b, intr_src->src.msi.fields.devfun); sid.value = (uint16_t)(intr_src->src.msi.value); trigger_mode = 0x0UL; } else { dmar_unit = ioapic_to_dmaru(intr_src->src.ioapic_id, &sid); trigger_mode = irte->bits.remap.trigger_mode; } if (is_dmar_unit_valid(dmar_unit, sid)) { dmar_enable_intr_remapping(dmar_unit); ir_table = (union dmar_ir_entry *)hpa2hva(dmar_unit->ir_table_addr); *idx_out = idx_in; if (idx_in == INVALID_IRTE_ID) { *idx_out = alloc_irtes(dmar_unit, 1U); } if (*idx_out < CONFIG_MAX_IR_ENTRIES) { ir_entry = ir_table + *idx_out; if (intr_src->pid_paddr != 0UL) { union dmar_ir_entry irte_pi; /* irte is in remapped mode format, convert to posted mode format */ irte_pi.value.lo_64 = 0UL; irte_pi.value.hi_64 = 0UL; irte_pi.bits.post.vector = irte->bits.remap.vector; irte_pi.bits.post.svt = 0x1UL; irte_pi.bits.post.sid = sid.value; irte_pi.bits.post.present = 0x1UL; irte_pi.bits.post.mode = 0x1UL; irte_pi.bits.post.pda_l = (intr_src->pid_paddr) >> 6U; irte_pi.bits.post.pda_h = (intr_src->pid_paddr) >> 32U; *ir_entry = irte_pi; } else { /* Fields that have not been initialized explicitly default to 0 */ irte->bits.remap.svt = 0x1UL; irte->bits.remap.sid = sid.value; irte->bits.remap.present = 0x1UL; irte->bits.remap.trigger_mode = trigger_mode; *ir_entry = *irte; } iommu_flush_cache(ir_entry, sizeof(union dmar_ir_entry)); dmar_invalid_iec(dmar_unit, *idx_out, 0U, false); } ret = 0; } return ret; }
0
[ "CWE-120", "CWE-787" ]
acrn-hypervisor
25c0e3817eb332660dd63d1d4522e63dcc94e79a
233,461,942,279,521,000,000,000,000,000,000,000,000
64
hv: validate input for dmar_free_irte function Malicious input 'index' may trigger buffer overflow on array 'irte_alloc_bitmap[]'. This patch validate that 'index' shall be less than 'CONFIG_MAX_IR_ENTRIES' and also remove unnecessary check on 'index' in 'ptirq_free_irte()' function with this fix. Tracked-On: #6132 Signed-off-by: Yonghua Huang <[email protected]>
static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig) { struct sisusb_usb_data *sisusb; loff_t ret; sisusb = file->private_data; if (!sisusb) return -ENODEV; mutex_lock(&sisusb->lock); /* Sanity check */ if (!sisusb->present || !sisusb->ready || !sisusb->sisusb_dev) { mutex_unlock(&sisusb->lock); return -ENODEV; } ret = no_seek_end_llseek(file, offset, orig); mutex_unlock(&sisusb->lock); return ret; }
0
[ "CWE-476" ]
linux
9a5729f68d3a82786aea110b1bfe610be318f80a
84,803,831,077,931,155,000,000,000,000,000,000,000
22
USB: sisusbvga: fix oops in error path of sisusb_probe The pointer used to log a failure of usb_register_dev() must be set before the error is logged. v2: fix that minor is not available before registration Signed-off-by: oliver Neukum <[email protected]> Reported-by: [email protected] Fixes: 7b5cd5fefbe02 ("USB: SisUSB2VGA: Convert printk to dev_* macros") Cc: stable <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
QPDFWriter::writeObjectStream(QPDFObjectHandle object) { // Note: object might be null if this is a place-holder for an // object stream that we are generating from scratch. QPDFObjGen old_og = object.getObjGen(); assert(old_og.getGen() == 0); int old_id = old_og.getObj(); int new_id = this->m->obj_renumber[old_og]; std::vector<qpdf_offset_t> offsets; qpdf_offset_t first = 0; // Generate stream itself. We have to do this in two passes so we // can calculate offsets in the first pass. PointerHolder<Buffer> stream_buffer; int first_obj = -1; bool compressed = false; for (int pass = 1; pass <= 2; ++pass) { if (pass == 1) { pushDiscardFilter(); } else { // Adjust offsets to skip over comment before first object first = offsets.at(0); for (std::vector<qpdf_offset_t>::iterator iter = offsets.begin(); iter != offsets.end(); ++iter) { *iter -= first; } // Take one pass at writing pairs of numbers so we can get // their size information pushDiscardFilter(); writeObjectStreamOffsets(offsets, first_obj); first += this->m->pipeline->getCount(); popPipelineStack(); // Set up a stream to write the stream data into a buffer. Pipeline* next = pushPipeline(new Pl_Buffer("object stream")); if ((this->m->compress_streams || (this->m->stream_decode_level == qpdf_dl_none)) && (! this->m->qdf_mode)) { compressed = true; next = pushPipeline( new Pl_Flate("compress object stream", next, Pl_Flate::a_deflate)); } activatePipelineStack(); writeObjectStreamOffsets(offsets, first_obj); } int count = 0; for (std::set<QPDFObjGen>::iterator iter = this->m->object_stream_to_objects[old_id].begin(); iter != this->m->object_stream_to_objects[old_id].end(); ++iter, ++count) { QPDFObjGen obj = *iter; int new_obj = this->m->obj_renumber[obj]; if (first_obj == -1) { first_obj = new_obj; } if (this->m->qdf_mode) { writeString("%% Object stream: object " + QUtil::int_to_string(new_obj) + ", index " + QUtil::int_to_string(count)); if (! this->m->suppress_original_object_ids) { writeString("; original object ID: " + QUtil::int_to_string(obj.getObj())); // For compatibility, only write the generation if // non-zero. While object streams only allow // objects with generation 0, if we are generating // object streams, the old object could have a // non-zero generation. if (obj.getGen() != 0) { QTC::TC("qpdf", "QPDFWriter original obj non-zero gen"); writeString(" " + QUtil::int_to_string(obj.getGen())); } } writeString("\n"); } if (pass == 1) { offsets.push_back(this->m->pipeline->getCount()); // To avoid double-counting objects being written in // object streams for progress reporting, decrement in // pass 1. indicateProgress(true, false); } writeObject(this->m->pdf.getObjectByObjGen(obj), count); this->m->xref[new_obj] = QPDFXRefEntry(2, new_id, count); } // stream_buffer will be initialized only for pass 2 popPipelineStack(&stream_buffer); } // Write the object openObject(new_id); setDataKey(new_id); writeString("<<"); writeStringQDF("\n "); writeString(" /Type /ObjStm"); writeStringQDF("\n "); size_t length = stream_buffer->getSize(); adjustAESStreamLength(length); writeString(" /Length " + QUtil::uint_to_string(length)); writeStringQDF("\n "); if (compressed) { writeString(" /Filter /FlateDecode"); } writeString(" /N " + QUtil::uint_to_string(offsets.size())); writeStringQDF("\n "); writeString(" /First " + QUtil::int_to_string(first)); if (! object.isNull()) { // If the original object has an /Extends key, preserve it. QPDFObjectHandle dict = object.getDict(); QPDFObjectHandle extends = dict.getKey("/Extends"); if (extends.isIndirect()) { QTC::TC("qpdf", "QPDFWriter copy Extends"); writeStringQDF("\n "); writeString(" /Extends "); unparseChild(extends, 1, f_in_ostream); } } writeStringQDF("\n"); writeStringNoQDF(" "); writeString(">>\nstream\n"); if (this->m->encrypted) { QTC::TC("qpdf", "QPDFWriter encrypt object stream"); } pushEncryptionFilter(); writeBuffer(stream_buffer); popPipelineStack(); if (this->m->newline_before_endstream) { writeString("\n"); } writeString("endstream"); this->m->cur_data_key.clear(); closeObject(new_id); }
0
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
35,625,794,004,386,817,000,000,000,000,000,000,000
157
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
archive_read_data_skip(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; int r; const void *buff; size_t size; int64_t offset; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_DATA, "archive_read_data_skip"); if (a->format->read_data_skip != NULL) r = (a->format->read_data_skip)(a); else { while ((r = archive_read_data_block(&a->archive, &buff, &size, &offset)) == ARCHIVE_OK) ; } if (r == ARCHIVE_EOF) r = ARCHIVE_OK; a->archive.state = ARCHIVE_STATE_HEADER; return (r); }
0
[ "CWE-125" ]
libarchive
e6c9668f3202215ddb71617b41c19b6f05acf008
298,098,208,055,119,070,000,000,000,000,000,000,000
26
Add a check to archive_read_filter_consume to reject any attempts to move the file pointer by a negative amount. Note: Either this or commit 3865cf2 provides a fix for Issue 394.
void addCopy(const LowerCaseString& key, uint64_t value) override { header_map_->addCopy(key, value); header_map_->verifyByteSizeInternalForTest(); }
0
[]
envoy
2c60632d41555ec8b3d9ef5246242be637a2db0f
230,725,240,412,376,820,000,000,000,000,000,000,000
4
http: header map security fixes for duplicate headers (#197) Previously header matching did not match on all headers for non-inline headers. This patch changes the default behavior to always logically match on all headers. Multiple individual headers will be logically concatenated with ',' similar to what is done with inline headers. This makes the behavior effectively consistent. This behavior can be temporary reverted by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to "false". Targeted fixes have been additionally performed on the following extensions which make them consider all duplicate headers by default as a comma concatenated list: 1) Any extension using CEL matching on headers. 2) The header to metadata filter. 3) The JWT filter. 4) The Lua filter. Like primary header matching used in routing, RBAC, etc. this behavior can be disabled by setting the runtime value "envoy.reloadable_features.header_match_on_all_headers" to false. Finally, the setCopy() header map API previously only set the first header in the case of duplicate non-inline headers. setCopy() now behaves similiarly to the other set*() APIs and replaces all found headers with a single value. This may have had security implications in the extauth filter which uses this API. This behavior can be disabled by setting the runtime value "envoy.reloadable_features.http_set_copy_replace_all_headers" to false. Fixes https://github.com/envoyproxy/envoy-setec/issues/188 Signed-off-by: Matt Klein <[email protected]>
EXPORTED int annotate_state_store(annotate_state_t *state, struct entryattlist *l) { int r = 0; struct entryattlist *e = l; struct attvaluelist *av; annotate_state_start(state); /* Build a list of callbacks for storing the annotations */ while (e) { int attribs; const annotate_entrydesc_t *desc = NULL; struct annotate_entry_list *nentry = NULL; /* See if we support this entry */ r = find_desc_store(state, e->entry, &desc); if (r) goto cleanup; /* Add this entry to our list only if it applies to our particular server type */ if ((desc->proxytype != PROXY_ONLY) || proxy_store_func) nentry = _annotate_state_add_entry(state, desc, e->entry); /* See if we are allowed to set the given attributes. */ attribs = desc->attribs; av = e->attvalues; while (av) { if (!strcmp(av->attrib, "value.shared")) { if (!(attribs & ATTRIB_VALUE_SHARED)) { r = IMAP_PERMISSION_DENIED; goto cleanup; } r = annotate_canon_value(&av->value, desc->type); if (r) goto cleanup; if (nentry) { buf_init_ro(&nentry->shared, av->value.s, av->value.len); nentry->have_shared = 1; } } else if (!strcmp(av->attrib, "content-type.shared") || !strcmp(av->attrib, "content-type.priv")) { syslog(LOG_WARNING, "annotatemore_store: client used " "deprecated attribute \"%s\", ignoring", av->attrib); } else if (!strcmp(av->attrib, "value.priv")) { if (!(attribs & ATTRIB_VALUE_PRIV)) { r = IMAP_PERMISSION_DENIED; goto cleanup; } r = annotate_canon_value(&av->value, desc->type); if (r) goto cleanup; if (nentry) { buf_init_ro(&nentry->priv, av->value.s, av->value.len); nentry->have_priv = 1; } } else { r = IMAP_PERMISSION_DENIED; goto cleanup; } av = av->next; } e = e->next; } if (state->which == ANNOTATION_SCOPE_SERVER) { r = _annotate_store_entries(state); } else if (state->which == ANNOTATION_SCOPE_MAILBOX) { if (proxy_store_func) { r = annotate_state_need_mbentry(state); if (r) goto cleanup; assert(state->mbentry); } else assert(state->mailbox); r = _annotate_store_entries(state); if (r) goto cleanup; state->count++; if (proxy_store_func && state->mbentry->server && !hash_lookup(state->mbentry->server, &state->server_table)) { hash_insert(state->mbentry->server, (void *)0xDEADBEEF, &state->server_table); } if (!r && !state->count) r = IMAP_MAILBOX_NONEXISTENT; if (proxy_store_func) { if (!r) { /* proxy command to backends */ struct proxy_rock prock = { NULL, NULL }; prock.mbox_pat = state->mbentry->ext_name; prock.entryatts = l; hash_enumerate(&state->server_table, store_proxy, &prock); } } } else if (state->which == ANNOTATION_SCOPE_MESSAGE) { r = _annotate_store_entries(state); if (r) goto cleanup; } cleanup: annotate_state_finish(state); return r; }
0
[ "CWE-732" ]
cyrus-imapd
621f9e41465b521399f691c241181300fab55995
63,002,952,487,324,540,000,000,000,000,000,000,000
120
annotate: don't allow everyone to write shared server entries
void ipc_print_size(int unit, char *msg, uint64_t size, const char *end, int width) { char format[32]; if (!msg) /* NULL */ ; else if (msg[strlen(msg) - 1] == '=') printf("%s", msg); else if (unit == IPC_UNIT_BYTES) printf(_("%s (bytes) = "), msg); else if (unit == IPC_UNIT_KB) printf(_("%s (kbytes) = "), msg); else printf("%s = ", msg); switch (unit) { case IPC_UNIT_DEFAULT: case IPC_UNIT_BYTES: sprintf(format, "%%%dju", width); printf(format, size); break; case IPC_UNIT_KB: sprintf(format, "%%%dju", width); printf(format, size / 1024); break; case IPC_UNIT_HUMAN: { char *tmp; sprintf(format, "%%%ds", width); printf(format, (tmp = size_to_human_string(SIZE_SUFFIX_1LETTER, size))); free(tmp); break; } default: /* impossible occurred */ abort(); } if (end) printf("%s", end); }
0
[ "CWE-190" ]
util-linux
1c9143d0c1f979c3daf10e1c37b5b1e916c22a1c
30,893,775,040,525,440,000,000,000,000,000,000,000
42
sys-utils/ipcutils: be careful when call calloc() for uint64 nmembs Fix: https://github.com/karelzak/util-linux/issues/1395 Signed-off-by: Karel Zak <[email protected]>
rl_save_prompt () { saved_local_prompt = local_prompt; saved_local_prefix = local_prompt_prefix; saved_prefix_length = prompt_prefix_length; saved_local_length = local_prompt_len; saved_last_invisible = prompt_last_invisible; saved_visible_length = prompt_visible_length; saved_invis_chars_first_line = prompt_invis_chars_first_line; saved_physical_chars = prompt_physical_chars; local_prompt = local_prompt_prefix = (char *)0; local_prompt_len = 0; prompt_last_invisible = prompt_visible_length = prompt_prefix_length = 0; prompt_invis_chars_first_line = prompt_physical_chars = 0; }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
289,004,047,752,624,850,000,000,000,000,000,000,000
16
bash-4.4-rc2 release
struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) { struct hrtimer_clock_base *base; for (;;) { base = timer->base; if (likely(base != NULL)) { spin_lock_irqsave(&base->cpu_base->lock, *flags); if (likely(base == timer->base)) return base; /* The timer has migrated to another CPU: */ spin_unlock_irqrestore(&base->cpu_base->lock, *flags); } cpu_relax(); } }
0
[ "CWE-189" ]
linux-2.6
13788ccc41ceea5893f9c747c59bc0b28f2416c2
127,314,044,178,947,930,000,000,000,000,000,000,000
17
[PATCH] hrtimer: prevent overrun DoS in hrtimer_forward() hrtimer_forward() does not check for the possible overflow of timer->expires. This can happen on 64 bit machines with large interval values and results currently in an endless loop in the softirq because the expiry value becomes negative and therefor the timer is expired all the time. Check for this condition and set the expiry value to the max. expiry time in the future. The fix should be applied to stable kernel series as well. Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Ingo Molnar <[email protected]> Cc: <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
TEST_F(RouterTest, PoolFailureWithPriority) { ON_CALL(callbacks_.route_->route_entry_, priority()) .WillByDefault(Return(Upstream::ResourcePriority::High)); EXPECT_CALL(cm_.thread_local_cluster_, httpConnPool(Upstream::ResourcePriority::High, _, &router_)); EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _)) .WillOnce(Invoke([&](Http::StreamDecoder&, Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* { callbacks.onPoolFailure(ConnectionPool::PoolFailureReason::RemoteConnectionFailure, "tls version mismatch", cm_.thread_local_cluster_.conn_pool_.host_); return nullptr; })); Http::TestResponseHeaderMapImpl response_headers{ {":status", "503"}, {"content-length", "139"}, {"content-type", "text/plain"}}; EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); EXPECT_CALL(callbacks_, encodeData(_, true)); EXPECT_CALL(callbacks_.stream_info_, setResponseFlag(StreamInfo::ResponseFlag::UpstreamConnectionFailure)); EXPECT_CALL(callbacks_.stream_info_, onUpstreamHostSelected(_)) .WillOnce(Invoke([&](const Upstream::HostDescriptionConstSharedPtr host) -> void { EXPECT_EQ(host_address_, host->address()); })); Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); router_.decodeHeaders(headers, true); EXPECT_TRUE(verifyHostUpstreamStats(0, 1)); // Pool failure, so upstream request was not initiated. EXPECT_EQ(0U, callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value()); EXPECT_EQ(callbacks_.details(), "upstream_reset_before_response_started{connection failure,tls version mismatch}"); }
0
[ "CWE-703" ]
envoy
18871dbfb168d3512a10c78dd267ff7c03f564c6
65,704,780,293,927,495,000,000,000,000,000,000,000
34
[1.18] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <[email protected]>
_pixops_composite_real (guchar *dest_buf, int render_x0, int render_y0, int render_x1, int render_y1, int dest_rowstride, int dest_channels, gboolean dest_has_alpha, const guchar *src_buf, int src_width, int src_height, int src_rowstride, int src_channels, gboolean src_has_alpha, double scale_x, double scale_y, PixopsInterpType interp_type, int overall_alpha) { PixopsFilter filter; PixopsLineFunc line_func; #ifdef USE_MMX gboolean found_mmx = _pixops_have_mmx (); #endif g_return_if_fail (!(dest_channels == 3 && dest_has_alpha)); g_return_if_fail (!(src_channels == 3 && src_has_alpha)); if (scale_x == 0 || scale_y == 0) return; if (interp_type == PIXOPS_INTERP_NEAREST) { if (scale_x == 1.0 && scale_y == 1.0) pixops_composite_nearest_noscale (dest_buf, render_x0, render_y0, render_x1, render_y1, dest_rowstride, dest_channels, dest_has_alpha, src_buf, src_width, src_height, src_rowstride, src_channels, src_has_alpha, overall_alpha); else pixops_composite_nearest (dest_buf, render_x0, render_y0, render_x1, render_y1, dest_rowstride, dest_channels, dest_has_alpha, src_buf, src_width, src_height, src_rowstride, src_channels, src_has_alpha, scale_x, scale_y, overall_alpha); return; } filter.overall_alpha = overall_alpha / 255.; if (!make_weights (&filter, interp_type, scale_x, scale_y)) return; if (filter.x.n == 2 && filter.y.n == 2 && dest_channels == 4 && src_channels == 4 && src_has_alpha && !dest_has_alpha) { #ifdef USE_MMX if (found_mmx) line_func = composite_line_22_4a4_mmx_stub; else #endif line_func = composite_line_22_4a4; } else line_func = composite_line; pixops_process (dest_buf, render_x0, render_y0, render_x1, render_y1, dest_rowstride, dest_channels, dest_has_alpha, src_buf, src_width, src_height, src_rowstride, src_channels, src_has_alpha, scale_x, scale_y, 0, 0, 0, 0, 0, &filter, line_func, composite_pixel); g_free (filter.x.weights); g_free (filter.y.weights); }
0
[ "CWE-119" ]
gdk-pixbuf
19f9685dbff7d1f929c61cf99188df917a18811d
85,965,447,314,316,300,000,000,000,000,000,000,000
73
pixops: Fail make_weights functions on OOM The weights could grow very large under certain circumstances, in particular in security-relevant conditions, including the testsuite. By allowing the weight allocation to fail, this can be worked around. https://bugzilla.gnome.org/show_bug.cgi?id=754387
static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { return nf_generic_should_process(nf_ct_protonum(ct)); }
0
[ "CWE-20", "CWE-254", "CWE-787" ]
linux
db29a9508a9246e77087c5531e45b2c88ec6988b
282,202,583,077,554,280,000,000,000,000,000,000,000
5
netfilter: conntrack: disable generic tracking for known protocols Given following iptables ruleset: -P FORWARD DROP -A FORWARD -m sctp --dport 9 -j ACCEPT -A FORWARD -p tcp --dport 80 -j ACCEPT -A FORWARD -p tcp -m conntrack -m state ESTABLISHED,RELATED -j ACCEPT One would assume that this allows SCTP on port 9 and TCP on port 80. Unfortunately, if the SCTP conntrack module is not loaded, this allows *all* SCTP communication, to pass though, i.e. -p sctp -j ACCEPT, which we think is a security issue. This is because on the first SCTP packet on port 9, we create a dummy "generic l4" conntrack entry without any port information (since conntrack doesn't know how to extract this information). All subsequent packets that are unknown will then be in established state since they will fallback to proto_generic and will match the 'generic' entry. Our originally proposed version [1] completely disabled generic protocol tracking, but Jozsef suggests to not track protocols for which a more suitable helper is available, hence we now mitigate the issue for in tree known ct protocol helpers only, so that at least NAT and direction information will still be preserved for others. [1] http://www.spinics.net/lists/netfilter-devel/msg33430.html Joint work with Daniel Borkmann. Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Acked-by: Jozsef Kadlecsik <[email protected]> Signed-off-by: Pablo Neira Ayuso <[email protected]>
Item_bool_rowready_func2* Ge_creator::create(THD *thd, Item *a, Item *b) const { return new(thd->mem_root) Item_func_ge(thd, a, b); }
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
34,176,650,511,819,973,000,000,000,000,000,000,000
4
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
router_add_exit_policy(routerinfo_t *router, directory_token_t *tok) { addr_policy_t *newe; newe = router_parse_addr_policy(tok); if (!newe) return -1; if (! router->exit_policy) router->exit_policy = smartlist_create(); if (((tok->tp == K_ACCEPT6 || tok->tp == K_REJECT6) && tor_addr_family(&newe->addr) == AF_INET) || ((tok->tp == K_ACCEPT || tok->tp == K_REJECT) && tor_addr_family(&newe->addr) == AF_INET6)) { log_warn(LD_DIR, "Mismatch between field type and address type in exit " "policy"); addr_policy_free(newe); return -1; } smartlist_add(router->exit_policy, newe); return 0; }
0
[ "CWE-399" ]
tor
57e35ad3d91724882c345ac709666a551a977f0f
99,306,528,084,952,400,000,000,000,000,000,000,000
24
Avoid possible segfault when handling networkstatus vote with bad flavor Fix for 6530; fix on 0.2.2.6-alpha.
AcceptConnReq(ptcplstn_t *pLstn, int *newSock, prop_t **peerName, prop_t **peerIP) { int sockflags; struct sockaddr_storage addr; socklen_t addrlen = sizeof(addr); int iNewSock = -1; DEFiRet; iNewSock = accept(pLstn->sock, (struct sockaddr*) &addr, &addrlen); if(iNewSock < 0) { if(errno == EAGAIN || errno == EWOULDBLOCK || errno == EMFILE) ABORT_FINALIZE(RS_RET_NO_MORE_DATA); ABORT_FINALIZE(RS_RET_ACCEPT_ERR); } if(pLstn->pSrv->bKeepAlive) EnableKeepAlive(pLstn, iNewSock);/* we ignore errors, best to do! */ CHKiRet(getPeerNames(peerName, peerIP, (struct sockaddr *) &addr, pLstn->pSrv->bUnixSocket)); /* set the new socket to non-blocking IO */ if((sockflags = fcntl(iNewSock, F_GETFL)) != -1) { sockflags |= O_NONBLOCK; /* SETFL could fail too, so get it caught by the subsequent * error check. */ sockflags = fcntl(iNewSock, F_SETFL, sockflags); } if(sockflags == -1) { DBGPRINTF("error %d setting fcntl(O_NONBLOCK) on tcp socket %d", errno, iNewSock); prop.Destruct(peerName); prop.Destruct(peerIP); ABORT_FINALIZE(RS_RET_IO_ERROR); } *newSock = iNewSock; finalize_it: if(iRet != RS_RET_OK) { /* the close may be redundant, but that doesn't hurt... */ if(iNewSock != -1) close(iNewSock); } RETiRet; }
0
[ "CWE-190" ]
rsyslog
0381a0de64a5a048c3d48b79055bd9848d0c7fc2
287,576,574,911,028,300,000,000,000,000,000,000,000
47
imptcp: fix Segmentation Fault when octet count is to high
static int tipc_nl_retrieve_key(struct nlattr **attrs, struct tipc_aead_key **key) { struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY]; if (!attr) return -ENODATA; *key = (struct tipc_aead_key *)nla_data(attr); if (nla_len(attr) < tipc_aead_key_size(*key)) return -EINVAL; return 0; }
1
[]
linux
0217ed2848e8538bcf9172d97ed2eeb4a26041bb
284,003,867,985,169,170,000,000,000,000,000,000,000
14
tipc: better validate user input in tipc_nl_retrieve_key() Before calling tipc_aead_key_size(ptr), we need to ensure we have enough data to dereference ptr->keylen. We probably also want to make sure tipc_aead_key_size() wont overflow with malicious ptr->keylen values. Syzbot reported: BUG: KMSAN: uninit-value in __tipc_nl_node_set_key net/tipc/node.c:2971 [inline] BUG: KMSAN: uninit-value in tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023 CPU: 0 PID: 21060 Comm: syz-executor.5 Not tainted 5.11.0-rc7-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x21c/0x280 lib/dump_stack.c:120 kmsan_report+0xfb/0x1e0 mm/kmsan/kmsan_report.c:118 __msan_warning+0x5f/0xa0 mm/kmsan/kmsan_instr.c:197 __tipc_nl_node_set_key net/tipc/node.c:2971 [inline] tipc_nl_node_set_key+0x9bf/0x13b0 net/tipc/node.c:3023 genl_family_rcv_msg_doit net/netlink/genetlink.c:739 [inline] genl_family_rcv_msg net/netlink/genetlink.c:783 [inline] genl_rcv_msg+0x1319/0x1610 net/netlink/genetlink.c:800 netlink_rcv_skb+0x6fa/0x810 net/netlink/af_netlink.c:2494 genl_rcv+0x63/0x80 net/netlink/genetlink.c:811 netlink_unicast_kernel net/netlink/af_netlink.c:1304 [inline] netlink_unicast+0x11d6/0x14a0 net/netlink/af_netlink.c:1330 netlink_sendmsg+0x1740/0x1840 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg net/socket.c:672 [inline] ____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345 ___sys_sendmsg net/socket.c:2399 [inline] __sys_sendmsg+0x714/0x830 net/socket.c:2432 __compat_sys_sendmsg net/compat.c:347 [inline] __do_compat_sys_sendmsg net/compat.c:354 [inline] __se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351 __ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351 do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline] __do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141 do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166 do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c RIP: 0023:0xf7f60549 Code: 03 74 c0 01 10 05 03 74 b8 01 10 06 03 74 b4 01 10 07 03 74 b0 01 10 08 03 74 d8 01 00 00 00 00 00 51 52 55 89 e5 0f 34 cd 80 <5d> 5a 59 c3 90 90 90 90 8d b4 26 00 00 00 00 8d b4 26 00 00 00 00 RSP: 002b:00000000f555a5fc EFLAGS: 00000296 ORIG_RAX: 0000000000000172 RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000020000200 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Uninit was created at: kmsan_save_stack_with_flags mm/kmsan/kmsan.c:121 [inline] kmsan_internal_poison_shadow+0x5c/0xf0 mm/kmsan/kmsan.c:104 kmsan_slab_alloc+0x8d/0xe0 mm/kmsan/kmsan_hooks.c:76 slab_alloc_node mm/slub.c:2907 [inline] __kmalloc_node_track_caller+0xa37/0x1430 mm/slub.c:4527 __kmalloc_reserve net/core/skbuff.c:142 [inline] __alloc_skb+0x2f8/0xb30 net/core/skbuff.c:210 alloc_skb include/linux/skbuff.h:1099 [inline] netlink_alloc_large_skb net/netlink/af_netlink.c:1176 [inline] netlink_sendmsg+0xdbc/0x1840 net/netlink/af_netlink.c:1894 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg net/socket.c:672 [inline] ____sys_sendmsg+0xcfc/0x12f0 net/socket.c:2345 ___sys_sendmsg net/socket.c:2399 [inline] __sys_sendmsg+0x714/0x830 net/socket.c:2432 __compat_sys_sendmsg net/compat.c:347 [inline] __do_compat_sys_sendmsg net/compat.c:354 [inline] __se_compat_sys_sendmsg+0xa7/0xc0 net/compat.c:351 __ia32_compat_sys_sendmsg+0x4a/0x70 net/compat.c:351 do_syscall_32_irqs_on arch/x86/entry/common.c:79 [inline] __do_fast_syscall_32+0x102/0x160 arch/x86/entry/common.c:141 do_fast_syscall_32+0x6a/0xc0 arch/x86/entry/common.c:166 do_SYSENTER_32+0x73/0x90 arch/x86/entry/common.c:209 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c Fixes: e1f32190cf7d ("tipc: add support for AEAD key setting via netlink") Signed-off-by: Eric Dumazet <[email protected]> Cc: Tuong Lien <[email protected]> Cc: Jon Maloy <[email protected]> Cc: Ying Xue <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
FLAC__bool read_subframe_lpc_(FLAC__StreamDecoder *decoder, unsigned channel, unsigned bps, const unsigned order, FLAC__bool do_full_decode) { FLAC__Subframe_LPC *subframe = &decoder->private_->frame.subframes[channel].data.lpc; FLAC__int32 i32; FLAC__uint32 u32; unsigned u; decoder->private_->frame.subframes[channel].type = FLAC__SUBFRAME_TYPE_LPC; subframe->residual = decoder->private_->residual[channel]; subframe->order = order; /* read warm-up samples */ for(u = 0; u < order; u++) { if(!FLAC__bitreader_read_raw_int32(decoder->private_->input, &i32, bps)) return false; /* read_callback_ sets the state for us */ subframe->warmup[u] = i32; } /* read qlp coeff precision */ if(!FLAC__bitreader_read_raw_uint32(decoder->private_->input, &u32, FLAC__SUBFRAME_LPC_QLP_COEFF_PRECISION_LEN)) return false; /* read_callback_ sets the state for us */ if(u32 == (1u << FLAC__SUBFRAME_LPC_QLP_COEFF_PRECISION_LEN) - 1) { send_error_to_client_(decoder, FLAC__STREAM_DECODER_ERROR_STATUS_LOST_SYNC); decoder->protected_->state = FLAC__STREAM_DECODER_SEARCH_FOR_FRAME_SYNC; return true; } subframe->qlp_coeff_precision = u32+1; /* read qlp shift */ if(!FLAC__bitreader_read_raw_int32(decoder->private_->input, &i32, FLAC__SUBFRAME_LPC_QLP_SHIFT_LEN)) return false; /* read_callback_ sets the state for us */ subframe->quantization_level = i32; /* read quantized lp coefficiencts */ for(u = 0; u < order; u++) { if(!FLAC__bitreader_read_raw_int32(decoder->private_->input, &i32, subframe->qlp_coeff_precision)) return false; /* read_callback_ sets the state for us */ subframe->qlp_coeff[u] = i32; } /* read entropy coding method info */ if(!FLAC__bitreader_read_raw_uint32(decoder->private_->input, &u32, FLAC__ENTROPY_CODING_METHOD_TYPE_LEN)) return false; /* read_callback_ sets the state for us */ subframe->entropy_coding_method.type = (FLAC__EntropyCodingMethodType)u32; switch(subframe->entropy_coding_method.type) { case FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE: case FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE2: if(!FLAC__bitreader_read_raw_uint32(decoder->private_->input, &u32, FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE_ORDER_LEN)) return false; /* read_callback_ sets the state for us */ subframe->entropy_coding_method.data.partitioned_rice.order = u32; subframe->entropy_coding_method.data.partitioned_rice.contents = &decoder->private_->partitioned_rice_contents[channel]; break; default: send_error_to_client_(decoder, FLAC__STREAM_DECODER_ERROR_STATUS_UNPARSEABLE_STREAM); decoder->protected_->state = FLAC__STREAM_DECODER_SEARCH_FOR_FRAME_SYNC; return true; } /* read residual */ switch(subframe->entropy_coding_method.type) { case FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE: case FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE2: if(!read_residual_partitioned_rice_(decoder, order, subframe->entropy_coding_method.data.partitioned_rice.order, &decoder->private_->partitioned_rice_contents[channel], decoder->private_->residual[channel], /*is_extended=*/subframe->entropy_coding_method.type == FLAC__ENTROPY_CODING_METHOD_PARTITIONED_RICE2)) return false; break; default: FLAC__ASSERT(0); } /* decode the subframe */ if(do_full_decode) { memcpy(decoder->private_->output[channel], subframe->warmup, sizeof(FLAC__int32) * order); /*@@@@@@ technically not pessimistic enough, should be more like if( (FLAC__uint64)order * ((((FLAC__uint64)1)<<bps)-1) * ((1<<subframe->qlp_coeff_precision)-1) < (((FLAC__uint64)-1) << 32) ) */ if(bps + subframe->qlp_coeff_precision + FLAC__bitmath_ilog2(order) <= 32) if(bps <= 16 && subframe->qlp_coeff_precision <= 16) decoder->private_->local_lpc_restore_signal_16bit(decoder->private_->residual[channel], decoder->private_->frame.header.blocksize-order, subframe->qlp_coeff, order, subframe->quantization_level, decoder->private_->output[channel]+order); else decoder->private_->local_lpc_restore_signal(decoder->private_->residual[channel], decoder->private_->frame.header.blocksize-order, subframe->qlp_coeff, order, subframe->quantization_level, decoder->private_->output[channel]+order); else decoder->private_->local_lpc_restore_signal_64bit(decoder->private_->residual[channel], decoder->private_->frame.header.blocksize-order, subframe->qlp_coeff, order, subframe->quantization_level, decoder->private_->output[channel]+order); } return true; }
0
[ "CWE-119" ]
flac
5b3033a2b355068c11fe637e14ac742d273f076e
184,015,670,015,576,500,000,000,000,000,000,000,000
87
src/libFLAC/stream_decoder.c : Fix buffer read overflow. This is CVE-2014-8962. Reported-by: Michele Spagnuolo, Google Security Team <[email protected]>
static struct buffer_head *minix_update_inode(struct inode *inode) { if (INODE_VERSION(inode) == MINIX_V1) return V1_minix_update_inode(inode); else return V2_minix_update_inode(inode); }
0
[ "CWE-189" ]
linux-2.6
f5fb09fa3392ad43fbcfc2f4580752f383ab5996
271,128,992,984,491,080,000,000,000,000,000,000,000
7
[PATCH] Fix for minix crash Mounting a (corrupt) minix filesystem with zero s_zmap_blocks gives a spectacular crash on my 2.6.17.8 system, no doubt because minix/inode.c does an unconditional minix_set_bit(0,sbi->s_zmap[0]->b_data); [[email protected]: make labels conistent while we're there] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
mlx5_rx_queue_lwm_query(struct rte_eth_dev *dev, uint16_t *queue_id, uint8_t *lwm) { struct mlx5_priv *priv = dev->data->dev_private; unsigned int rxq_id, found = 0, n; struct mlx5_rxq_priv *rxq; if (!queue_id) return -EINVAL; /* Query all the Rx queues of the port in a circular way. */ for (rxq_id = *queue_id, n = 0; n < priv->rxqs_n; n++) { rxq = mlx5_rxq_get(dev, rxq_id); if (rxq && rxq->lwm_event_pending) { pthread_mutex_lock(&priv->sh->lwm_config_lock); rxq->lwm_event_pending = 0; pthread_mutex_unlock(&priv->sh->lwm_config_lock); *queue_id = rxq_id; found = 1; if (lwm) *lwm = mlx5_rxq_lwm_to_percentage(rxq); break; } rxq_id = (rxq_id + 1) % priv->rxqs_n; } return found; }
0
[]
dpdk
60b254e3923d007bcadbb8d410f95ad89a2f13fa
208,663,495,112,602,560,000,000,000,000,000,000,000
26
net/mlx5: fix Rx queue recovery mechanism The local variables are getting inconsistent in data receiving routines after queue error recovery. Receive queue consumer index is getting wrong, need to reset one to the size of the queue (as RQ was fully replenished in recovery procedure). In MPRQ case, also the local consumed strd variable should be reset. CVE-2022-28199 Fixes: 88c0733535d6 ("net/mlx5: extend Rx completion with error handling") Cc: [email protected] Signed-off-by: Alexander Kozyrev <[email protected]> Signed-off-by: Matan Azrad <[email protected]>
dse_add(Slapi_PBlock *pb) /* JCM There should only be one exit point from this function! */ { Slapi_Entry *e = NULL; /*The new entry to add*/ Slapi_Entry *e_copy = NULL; /* copy of added entry */ char *errbuf = NULL; int rc = LDAP_SUCCESS; int error = -1; int dont_write_file = 0; /* default */ struct dse *pdse; int returncode = LDAP_SUCCESS; char returntext[SLAPI_DSE_RETURNTEXT_SIZE] = ""; Slapi_DN *sdn = NULL; Slapi_DN parent; int need_be_postop = 0; PRBool global_lock_owned = PR_FALSE; /* * Get the database, the dn and the entry to add */ if (slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &pdse) < 0 || slapi_pblock_get(pb, SLAPI_ADD_TARGET_SDN, &sdn) < 0 || slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e) < 0 || (NULL == pdse)) { rc = LDAP_OPERATIONS_ERROR; goto done; } slapi_pblock_get(pb, SLAPI_DSE_DONT_WRITE_WHEN_ADDING, &dont_write_file); if (!dont_write_file && dse_check_for_readonly_error(pb, pdse)) { return (error); /* result already sent */ } /* * Check to make sure the entry passes the schema check */ if (slapi_entry_schema_check(pb, e) != 0) { char *errtext; slapi_log_err(SLAPI_DSE_TRACELEVEL, "dse_add", "entry failed schema check\n"); slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &errtext); if (errtext && errtext[0]) { PL_strncpyz(returntext, errtext, sizeof(returntext)); } rc = LDAP_OBJECT_CLASS_VIOLATION; e = NULL; /* caller will free upon error */ goto done; } /* Check if the attribute values in the entry obey the syntaxes */ if (slapi_entry_syntax_check(pb, e, 0) != 0) { char *errtext; slapi_log_err(SLAPI_DSE_TRACELEVEL, "dse_add", "entry failed syntax check\n"); slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &errtext); if (errtext && errtext[0]) { PL_strncpyz(returntext, errtext, sizeof(returntext)); } rc = LDAP_INVALID_SYNTAX; e = NULL; /* caller will free upon error */ goto done; } /* * Attempt to find this dn. */ { Slapi_Entry *existingentry = dse_get_entry_copy(pdse, sdn, DSE_USE_LOCK); if (existingentry != NULL) { /* * If we've reached this code, there is an entry * whose dn matches dn, so tell the user and return */ slapi_entry_free(existingentry); rc = LDAP_ALREADY_EXISTS; e = NULL; /* caller will free upon error */ goto done; } } /* * Get the parent dn and see if the corresponding entry exists. * If the parent does not exist, only allow the "root" user to * add the entry. */ slapi_sdn_init(&parent); slapi_sdn_get_parent(sdn, &parent); if (!slapi_sdn_isempty(&parent)) { Slapi_Entry *parententry = NULL; parententry = dse_get_entry_copy(pdse, &parent, DSE_USE_LOCK); if (parententry == NULL) { rc = LDAP_NO_SUCH_OBJECT; slapi_log_err(SLAPI_DSE_TRACELEVEL, " dse_add", "Narent does not exist\n"); slapi_sdn_done(&parent); e = NULL; /* caller will free upon error */ goto done; } rc = plugin_call_acl_plugin(pb, parententry, NULL, NULL, SLAPI_ACL_ADD, ACLPLUGIN_ACCESS_DEFAULT, &errbuf); slapi_entry_free(parententry); if (rc != LDAP_SUCCESS) { slapi_log_err(SLAPI_DSE_TRACELEVEL, "dse_add", "No access to parent\n"); if (errbuf && errbuf[0]) { PL_strncpyz(returntext, errbuf, sizeof(returntext)); } slapi_ch_free_string(&errbuf); slapi_sdn_done(&parent); e = NULL; /* caller will free upon error */ goto done; } } else { /* no parent */ int isroot; slapi_pblock_get(pb, SLAPI_REQUESTOR_ISROOT, &isroot); if (!isroot) { slapi_log_err(SLAPI_DSE_TRACELEVEL, "dse_add", "No parent and not root\n"); rc = LDAP_INSUFFICIENT_ACCESS; slapi_sdn_done(&parent); e = NULL; /* caller will free upon error */ goto done; } } slapi_sdn_done(&parent); /* * Before we add the entry, find out if the syntax of the aci * aci attribute values are correct or not. We don't want to add * the entry if the syntax is incorrect. */ if (plugin_call_acl_verify_syntax(pb, e, &errbuf) != 0) { if (errbuf && errbuf[0]) { PL_strncpyz(returntext, errbuf, sizeof(returntext)); slapi_ch_free_string(&errbuf); } rc = LDAP_INVALID_SYNTAX; e = NULL; /* caller will free upon error */ goto done; } /* Possibly acquire the global backend lock */ if (global_backend_lock_requested()) { global_backend_lock_lock(); global_lock_owned = PR_TRUE; } if (dse_call_callback(pdse, pb, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, e, NULL, &returncode, returntext) != SLAPI_DSE_CALLBACK_OK) { if (!returncode) { slapi_log_err(SLAPI_LOG_ERR, "dse_add", "DSE PREOP callback returned error but did not set returncode\n"); returncode = LDAP_OPERATIONS_ERROR; } rc = returncode; e = NULL; /* caller will free upon error */ goto done; } /* next, give the be plugins a crack at it */ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &returncode); plugin_call_plugins(pb, SLAPI_PLUGIN_BE_PRE_ADD_FN); need_be_postop = 1; /* have to call be postops now */ if (!returncode) { slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode); } if (!returncode) { /* finally, give the betxn plugins a crack at it */ plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN); if (!returncode) { slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode); } } if (returncode) { if (!returntext[0]) { char *ldap_result_message = NULL; slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); if (ldap_result_message && ldap_result_message[0]) { PL_strncpyz(returntext, ldap_result_message, sizeof(returntext)); } } rc = returncode; e = NULL; /* caller will free upon error */ goto done; } /* * Check if we are adding a plugin */ if (dse_add_plugin(e, returntext)) { returncode = LDAP_UNWILLING_TO_PERFORM; goto done; } /* make copy for postop fns because add_entry_pb consumes the given entry */ e_copy = slapi_entry_dup(e); if (dse_add_entry_pb(pdse, e_copy, pb) != 0) { rc = LDAP_OPERATIONS_ERROR; e = NULL; /* caller will free upon error */ goto done; } /* The postop must be called after the write lock is released. */ dse_call_callback(pdse, pb, SLAPI_OPERATION_ADD, DSE_FLAG_POSTOP, e, NULL, &returncode, returntext); done: if (e) { slapi_pblock_set(pb, SLAPI_ENTRY_POST_OP, slapi_entry_dup(e)); } /* make sure OPRETURN and RESULT_CODE are set */ slapi_pblock_get(pb, SLAPI_PLUGIN_OPRETURN, &error); if (rc || returncode) { if (!error) { slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, rc ? &rc : &returncode); } if (!returncode) { returncode = rc; } } if (need_be_postop) { /* next, give the be txn plugins a crack at it */ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &returncode); plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_POST_ADD_FN); /* finally, give the be plugins a crack at it */ plugin_call_plugins(pb, SLAPI_PLUGIN_BE_POST_ADD_FN); if (!returncode) { slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode); } } if (global_lock_owned) { global_backend_lock_unlock(); } slapi_send_ldap_result(pb, returncode, NULL, returntext[0] ? returntext : NULL, 0, NULL); return dse_add_return(rc, e); }
0
[ "CWE-200", "CWE-203" ]
389-ds-base
b6aae4d8e7c8a6ddd21646f94fef1bf7f22c3f32
3,425,452,480,417,442,700,000,000,000,000,000,000
228
Issue 4609 - CVE - info disclosure when authenticating Description: If you bind as a user that does not exist. Error 49 is returned instead of error 32. As error 32 discloses that the entry does not exist. When you bind as an entry that does not have userpassword set then error 48 (inappropriate auth) is returned, but this discloses that the entry does indeed exist. Instead we should always return error 49, even if the password is not set in the entry. This way we do not disclose to an attacker if the Bind DN exists or not. Relates: https://github.com/389ds/389-ds-base/issues/4609 Reviewed by: tbordaz(Thanks!)
sv_compquery (value) const char *value; { int nval = 100; if (value && *value) { nval = atoi (value); if (nval < 0) nval = 0; } rl_completion_query_items = nval; return 0; }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
292,850,595,951,406,500,000,000,000,000,000,000,000
14
bash-4.4-rc2 release
static int crypt_check_data_device_size(struct crypt_device *cd) { int r; uint64_t size, size_min; /* Check data device size, require at least header or one sector */ size_min = crypt_get_data_offset(cd) << SECTOR_SHIFT ?: SECTOR_SIZE; r = device_size(cd->device, &size); if (r < 0) return r; if (size < size_min) { log_err(cd, _("Header detected but device %s is too small."), device_path(cd->device)); return -EINVAL; } return r; }
0
[ "CWE-345" ]
cryptsetup
0113ac2d889c5322659ad0596d4cfc6da53e356c
63,016,684,676,285,700,000,000,000,000,000,000,000
20
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack Fix possible attacks against data confidentiality through LUKS2 online reencryption extension crash recovery. An attacker can modify on-disk metadata to simulate decryption in progress with crashed (unfinished) reencryption step and persistently decrypt part of the LUKS device. This attack requires repeated physical access to the LUKS device but no knowledge of user passphrases. The decryption step is performed after a valid user activates the device with a correct passphrase and modified metadata. There are no visible warnings for the user that such recovery happened (except using the luksDump command). The attack can also be reversed afterward (simulating crashed encryption from a plaintext) with possible modification of revealed plaintext. The problem was caused by reusing a mechanism designed for actual reencryption operation without reassessing the security impact for new encryption and decryption operations. While the reencryption requires calculating and verifying both key digests, no digest was needed to initiate decryption recovery if the destination is plaintext (no encryption key). Also, some metadata (like encryption cipher) is not protected, and an attacker could change it. Note that LUKS2 protects visible metadata only when a random change occurs. It does not protect against intentional modification but such modification must not cause a violation of data confidentiality. The fix introduces additional digest protection of reencryption metadata. The digest is calculated from known keys and critical reencryption metadata. Now an attacker cannot create correct metadata digest without knowledge of a passphrase for used keyslots. For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
static inline int cidr(u128_t u) { uint64_t v; int n = 0; for(v = u.l; v > 0; v <<= 1) n++; for(v = u.h; v > 0; v <<= 1) n++; return n; }
0
[]
netmask
29a9c239bd1008363f5b34ffd6c2cef906f3660c
113,567,616,822,292,610,000,000,000,000,000,000,000
7
bump version to 2.4.4 * remove checks for negative unsigned ints, fixes #2 * harden error logging functions, fixes #3
bfad_im_vport_create(struct fc_vport *fc_vport, bool disable) { char *vname = fc_vport->symbolic_name; struct Scsi_Host *shost = fc_vport->shost; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_lport_cfg_s port_cfg; struct bfad_vport_s *vp; int status = 0, rc; unsigned long flags; memset(&port_cfg, 0, sizeof(port_cfg)); u64_to_wwn(fc_vport->node_name, (u8 *)&port_cfg.nwwn); u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn); if (strlen(vname) > 0) strcpy((char *)&port_cfg.sym_name, vname); port_cfg.roles = BFA_LPORT_ROLE_FCP_IM; spin_lock_irqsave(&bfad->bfad_lock, flags); list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) { if (port_cfg.pwwn == vp->fcs_vport.lport.port_cfg.pwwn) { port_cfg.preboot_vp = vp->fcs_vport.lport.port_cfg.preboot_vp; break; } } spin_unlock_irqrestore(&bfad->bfad_lock, flags); rc = bfad_vport_create(bfad, 0, &port_cfg, &fc_vport->dev); if (rc == BFA_STATUS_OK) { struct bfad_vport_s *vport; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, port_cfg.pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); if (disable) { spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_stop(fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } vport = fcs_vport->vport_drv; vshost = vport->drv_port.im_port->shost; fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); fc_host_supported_classes(vshost) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(vshost), 0, sizeof(fc_host_supported_fc4s(vshost))); /* For FCP type 0x08 */ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) fc_host_supported_fc4s(vshost)[2] = 1; /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(vshost)[7] = 1; fc_host_supported_speeds(vshost) = bfad_im_supported_speeds(&bfad->bfa); fc_host_maxframe_size(vshost) = bfa_fcport_get_maxfrsize(&bfad->bfa); fc_vport->dd_data = vport; vport->drv_port.im_port->fc_vport = fc_vport; } else if (rc == BFA_STATUS_INVALID_WWN) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_EXISTS) return VPCERR_BAD_WWN; else if (rc == BFA_STATUS_VPORT_MAX) return VPCERR_NO_FABRIC_SUPP; else if (rc == BFA_STATUS_VPORT_WWN_BP) return VPCERR_BAD_WWN; else return FC_VPORT_FAILED; return status; }
0
[ "CWE-400", "CWE-401" ]
linux
0e62395da2bd5166d7c9e14cbc7503b256a34cb0
63,876,084,689,527,575,000,000,000,000,000,000,000
87
scsi: bfa: release allocated memory in case of error In bfad_im_get_stats if bfa_port_get_stats fails, allocated memory needs to be released. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
static int event_enable_on_exec(struct perf_event *event, struct perf_event_context *ctx) { if (!event->attr.enable_on_exec) return 0; event->attr.enable_on_exec = 0; if (event->state >= PERF_EVENT_STATE_INACTIVE) return 0; __perf_event_mark_enabled(event); return 1; }
0
[ "CWE-703", "CWE-189" ]
linux
8176cced706b5e5d15887584150764894e94e02f
53,325,618,025,275,090,000,000,000,000,000,000,000
14
perf: Treat attr.config as u64 in perf_swevent_init() Trinity discovered that we fail to check all 64 bits of attr.config passed by user space, resulting to out-of-bounds access of the perf_swevent_enabled array in sw_perf_event_destroy(). Introduced in commit b0a873ebb ("perf: Register PMU implementations"). Signed-off-by: Tommi Rantala <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: [email protected] Cc: Paul Mackerras <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
void __init parse_efi_setup(u64 phys_addr, u32 data_len) { efi_setup = phys_addr + sizeof(struct setup_data); }
0
[ "CWE-388" ]
tip
4e78921ba4dd0aca1cc89168f45039add4183f8e
278,577,639,939,192,100,000,000,000,000,000,000,000
4
efi/x86/Add missing error handling to old_memmap 1:1 mapping code The old_memmap flow in efi_call_phys_prolog() performs numerous memory allocations, and either does not check for failure at all, or it does but fails to propagate it back to the caller, which may end up calling into the firmware with an incomplete 1:1 mapping. So let's fix this by returning NULL from efi_call_phys_prolog() on memory allocation failures only, and by handling this condition in the caller. Also, clean up any half baked sets of page tables that we may have created before returning with a NULL return value. Note that any failure at this level will trigger a panic() two levels up, so none of this makes a huge difference, but it is a nice cleanup nonetheless. [ardb: update commit log, add efi_call_phys_epilog() call on error path] Signed-off-by: Gen Zhang <[email protected]> Signed-off-by: Ard Biesheuvel <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rob Bradford <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
lzw_result lzw_decode(struct lzw_ctx *ctx, const uint8_t ** const stack_pos_out) { lzw_result res; uint32_t code_new; uint32_t code_out; uint8_t last_value; uint8_t *stack_pos = ctx->stack_base; uint32_t clear_code = ctx->clear_code; uint32_t current_entry = ctx->current_entry; struct lzw_dictionary_entry * const table = ctx->table; /* Get a new code from the input */ res = lzw__next_code(&ctx->input, ctx->current_code_size, &code_new); if (res != LZW_OK) { return res; } /* Handle the new code */ if (code_new == clear_code) { /* Got Clear code */ return lzw__clear_codes(ctx, stack_pos_out); } else if (code_new == ctx->eoi_code) { /* Got End of Information code */ return LZW_EOI_CODE; } else if (code_new > current_entry) { /* Code is invalid */ return LZW_BAD_CODE; } else if (code_new < current_entry) { /* Code is in table */ code_out = code_new; last_value = table[code_new].first_value; } else { /* Code not in table */ *stack_pos++ = ctx->previous_code_first; code_out = ctx->previous_code; last_value = ctx->previous_code_first; } /* Add to the dictionary, only if there's space */ if (current_entry < (1 << LZW_CODE_MAX)) { struct lzw_dictionary_entry *entry = table + current_entry; entry->last_value = last_value; entry->first_value = ctx->previous_code_first; entry->previous_entry = ctx->previous_code; ctx->current_entry++; } /* Ensure code size is increased, if needed. */ if (current_entry == ctx->current_code_size_max) { if (ctx->current_code_size < LZW_CODE_MAX) { ctx->current_code_size++; ctx->current_code_size_max = (1 << ctx->current_code_size) - 1; } } /* Store details of this code as "previous code" to the context. */ ctx->previous_code_first = table[code_new].first_value; ctx->previous_code = code_new; /* Put rest of data for this code on output stack. * Note, in the case of "code not in table", the last entry of the * current code has already been placed on the stack above. */ while (code_out > clear_code) { struct lzw_dictionary_entry *entry = table + code_out; *stack_pos++ = entry->last_value; code_out = entry->previous_entry; } *stack_pos++ = table[code_out].last_value; *stack_pos_out = stack_pos; return LZW_OK; }
1
[ "CWE-125", "CWE-787" ]
chafa
e6ce3746cdcf0836b9dae659a5aed15d73a080d8
160,716,953,605,882,430,000,000,000,000,000,000,000
77
libnsgif: fix oob in lzw_decode
auto join(const Range& range, const BasicCStringRef<wchar_t>& sep) -> ArgJoin<wchar_t, decltype(std::begin(range))> { return join(std::begin(range), std::end(range), sep); }
0
[ "CWE-134", "CWE-119", "CWE-787" ]
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
84,225,324,220,804,750,000,000,000,000,000,000,000
4
Fix segfault on complex pointer formatting (#642)
static unsigned unfilterScanline(unsigned char* recon, const unsigned char* scanline, const unsigned char* precon, size_t bytewidth, unsigned char filterType, size_t length) { /* For PNG filter method 0 unfilter a PNG image scanline by scanline. when the pixels are smaller than 1 byte, the filter works byte per byte (bytewidth = 1) precon is the previous unfiltered scanline, recon the result, scanline the current one the incoming scanlines do NOT include the filtertype byte, that one is given in the parameter filterType instead recon and scanline MAY be the same memory address! precon must be disjoint. */ size_t i; switch(filterType) { case 0: for(i = 0; i < length; i++) recon[i] = scanline[i]; break; case 1: for(i = 0; i < bytewidth; i++) recon[i] = scanline[i]; for(i = bytewidth; i < length; i++) recon[i] = scanline[i] + recon[i - bytewidth]; break; case 2: if(precon) { for(i = 0; i < length; i++) recon[i] = scanline[i] + precon[i]; } else { for(i = 0; i < length; i++) recon[i] = scanline[i]; } break; case 3: if(precon) { for(i = 0; i < bytewidth; i++) recon[i] = scanline[i] + precon[i] / 2; for(i = bytewidth; i < length; i++) recon[i] = scanline[i] + ((recon[i - bytewidth] + precon[i]) / 2); } else { for(i = 0; i < bytewidth; i++) recon[i] = scanline[i]; for(i = bytewidth; i < length; i++) recon[i] = scanline[i] + recon[i - bytewidth] / 2; } break; case 4: if(precon) { for(i = 0; i < bytewidth; i++) { recon[i] = (scanline[i] + precon[i]); /*paethPredictor(0, precon[i], 0) is always precon[i]*/ } for(i = bytewidth; i < length; i++) { recon[i] = (scanline[i] + paethPredictor(recon[i - bytewidth], precon[i], precon[i - bytewidth])); } } else { for(i = 0; i < bytewidth; i++) { recon[i] = scanline[i]; } for(i = bytewidth; i < length; i++) { /*paethPredictor(recon[i - bytewidth], 0, 0) is always recon[i - bytewidth]*/ recon[i] = (scanline[i] + recon[i - bytewidth]); } } break; default: return 36; /*error: unexisting filter type given*/ } return 0; }
0
[ "CWE-401" ]
FreeRDP
9fee4ae076b1ec97b97efb79ece08d1dab4df29a
39,982,992,284,612,744,000,000,000,000,000,000,000
73
Fixed #5645: realloc return handling
TEST_F(OptimizePipeline, PushDownAddFieldsAndInternalizeProjection) { auto unpackSpecObj = fromjson( "{$_internalUnpackBucket: { exclude: [], timeField: 'time', metaField: 'myMeta', " "bucketMaxSpanSeconds: 3600}}"); auto addFieldsSpec = fromjson("{$addFields: {device: '$myMeta.a'}}"); auto projectSpecObj = fromjson("{$project: {_id: true, x: true, device: true}}"); auto pipeline = Pipeline::parse(makeVector(unpackSpecObj, addFieldsSpec, projectSpecObj), getExpCtx()); pipeline->optimizePipeline(); // We should push down the $addFields and internalize the $project. auto serialized = pipeline->serializeToBson(); ASSERT_EQ(2u, serialized.size()); ASSERT_BSONOBJ_EQ(fromjson("{$addFields: {device: '$meta.a'}}"), serialized[0]); ASSERT_BSONOBJ_EQ(fromjson("{$_internalUnpackBucket: { include: ['_id', 'device', 'x'], " "timeField: 'time', metaField: 'myMeta', bucketMaxSpanSeconds: " "3600, computedMetaProjFields: ['device']}}"), serialized[1]); }
0
[]
mongo
b3107d73a2c58d7e016b834dae0acfd01c0db8d7
46,977,243,234,375,070,000,000,000,000,000,000,000
21
SERVER-59299: Flatten top-level nested $match stages in doOptimizeAt (cherry picked from commit 4db5eceda2cff697f35c84cd08232bac8c33beec)
Magick_png_write_raw_profile(const ImageInfo *image_info,png_struct *ping, png_info *ping_info, unsigned char *profile_type, unsigned char *profile_description, unsigned char *profile_data, png_uint_32 length) { png_textp text; register ssize_t i; unsigned char *sp; png_charp dp; png_uint_32 allocated_length, description_length; unsigned char hex[16]={'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; if (length > 1) { if (LocaleNCompare((char *) profile_type+1, "ng-chunk-",9) == 0) return; } if (image_info->verbose) { (void) printf("writing raw profile: type=%s, length=%.20g\n", (char *) profile_type, (double) length); } #if PNG_LIBPNG_VER >= 10400 text=(png_textp) png_malloc(ping,(png_alloc_size_t) sizeof(png_text)); #else text=(png_textp) png_malloc(ping,(png_size_t) sizeof(png_text)); #endif description_length=(png_uint_32) strlen((const char *) profile_description); allocated_length=(png_uint_32) (length*2 + (length >> 5) + 20 + description_length); #if PNG_LIBPNG_VER >= 10400 text[0].text=(png_charp) png_malloc(ping, (png_alloc_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_alloc_size_t) 80); #else text[0].text=(png_charp) png_malloc(ping, (png_size_t) allocated_length); text[0].key=(png_charp) png_malloc(ping, (png_size_t) 80); #endif text[0].key[0]='\0'; (void) ConcatenateMagickString(text[0].key, "Raw profile type ",MagickPathExtent); (void) ConcatenateMagickString(text[0].key,(const char *) profile_type,62); sp=profile_data; dp=text[0].text; *dp++='\n'; (void) CopyMagickString(dp,(const char *) profile_description, allocated_length); dp+=description_length; *dp++='\n'; (void) FormatLocaleString(dp,allocated_length- (png_size_t) (dp-text[0].text),"%8lu ",(unsigned long) length); dp+=8; for (i=0; i < (ssize_t) length; i++) { if (i%36 == 0) *dp++='\n'; *(dp++)=(char) hex[((*sp >> 4) & 0x0f)]; *(dp++)=(char) hex[((*sp++ ) & 0x0f)]; } *dp++='\n'; *dp='\0'; text[0].text_length=(png_size_t) (dp-text[0].text); text[0].compression=image_info->compression == NoCompression || (image_info->compression == UndefinedCompression && text[0].text_length < 128) ? -1 : 0; if (text[0].text_length <= allocated_length) png_set_text(ping,ping_info,text,1); png_free(ping,text[0].text); png_free(ping,text[0].key); png_free(ping,text); }
0
[ "CWE-416" ]
ImageMagick
916d7bbd2c66a286d379dbd94bc6035c8fab937c
188,147,178,923,437,720,000,000,000,000,000,000,000
88
Removed invalid free reported in #1791.
static inline int rq_prio(const struct i915_request *rq) { return rq->sched.attr.priority; }
0
[]
linux
bc8a76a152c5f9ef3b48104154a65a68a8b76946
114,164,387,313,520,040,000,000,000,000,000,000,000
4
drm/i915/gen9: Clear residual context state on context switch Intel ID: PSIRT-TA-201910-001 CVEID: CVE-2019-14615 Intel GPU Hardware prior to Gen11 does not clear EU state during a context switch. This can result in information leakage between contexts. For Gen8 and Gen9, hardware provides a mechanism for fast cleardown of the EU state, by issuing a PIPE_CONTROL with bit 27 set. We can use this in a context batch buffer to explicitly cleardown the state on every context switch. As this workaround is already in place for gen8, we can borrow the code verbatim for Gen9. Signed-off-by: Mika Kuoppala <[email protected]> Signed-off-by: Akeem G Abodunrin <[email protected]> Cc: Kumar Valsan Prathap <[email protected]> Cc: Chris Wilson <[email protected]> Cc: Balestrieri Francesco <[email protected]> Cc: Bloomfield Jon <[email protected]> Cc: Dutt Sudeep <[email protected]>
int main(void) { timelib_time time = timelib_strtotime("May 12"); printf ("%04d-%02d-%02d %02d:%02d:%02d.%-5d %+04d %1d", time.y, time.m, time.d, time.h, time.i, time.s, time.f, time.z, time.dst); if (time.have_relative) { printf ("%3dY %3dM %3dD / %3dH %3dM %3dS", time.relative.y, time.relative.m, time.relative.d, time.relative.h, time.relative.i, time.relative.s); } if (time.have_weekday_relative) { printf (" / %d", time.relative.weekday); } if (time.have_weeknr_day) { printf(" / %dW%d", time.relative.weeknr_day.weeknr, time.relative.weeknr_day.dayofweek); } return 0; }
0
[ "CWE-125" ]
php-src
5c0455bf2c8cd3c25401407f158e820aa3b239e1
166,366,902,792,720,720,000,000,000,000,000,000,000
18
Merge branch 'PHP-7.0' into PHP-7.1 * PHP-7.0: Fixed bug #75055 Out-Of-Bounds Read in timelib_meridian() Apply upstream patch for CVE-2016-1283
CImg<T>& draw_object3d(const float x0, const float y0, const float z0, const CImg<tp>& vertices, const CImgList<tf>& primitives, const CImgList<tc>& colors, const CImg<to>& opacities, const unsigned int render_type=4, const bool is_double_sided=false, const float focale=700, const float lightx=0, const float lighty=0, const float lightz=-5e8, const float specular_lightness=0.2f, const float specular_shininess=0.1f, const float g_opacity=1) { return draw_object3d(x0,y0,z0,vertices,primitives,colors,opacities,render_type, is_double_sided,focale,lightx,lighty,lightz, specular_lightness,specular_shininess,g_opacity,CImg<floatT>::empty()); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
233,149,263,711,203,560,000,000,000,000,000,000,000
12
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
xfs_da_get_buf( struct xfs_trans *trans, struct xfs_inode *dp, xfs_dablk_t bno, xfs_daddr_t mappedbno, struct xfs_buf **bpp, int whichfork) { struct xfs_buf *bp; struct xfs_buf_map map; struct xfs_buf_map *mapp; int nmap; int error; *bpp = NULL; mapp = &map; nmap = 1; error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork, &mapp, &nmap); if (error) { /* mapping a hole is not an error, but we don't continue */ if (error == -1) error = 0; goto out_free; } bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, mapp, nmap, 0); error = bp ? bp->b_error : XFS_ERROR(EIO); if (error) { xfs_trans_brelse(trans, bp); goto out_free; } *bpp = bp; out_free: if (mapp != &map) kmem_free(mapp); return error; }
0
[ "CWE-399" ]
linux
c88547a8119e3b581318ab65e9b72f27f23e641d
215,767,002,941,368,700,000,000,000,000,000,000,000
42
xfs: fix directory hash ordering bug Commit f5ea1100 ("xfs: add CRCs to dir2/da node blocks") introduced in 3.10 incorrectly converted the btree hash index array pointer in xfs_da3_fixhashpath(). It resulted in the the current hash always being compared against the first entry in the btree rather than the current block index into the btree block's hash entry array. As a result, it was comparing the wrong hashes, and so could misorder the entries in the btree. For most cases, this doesn't cause any problems as it requires hash collisions to expose the ordering problem. However, when there are hash collisions within a directory there is a very good probability that the entries will be ordered incorrectly and that actually matters when duplicate hashes are placed into or removed from the btree block hash entry array. This bug results in an on-disk directory corruption and that results in directory verifier functions throwing corruption warnings into the logs. While no data or directory entries are lost, access to them may be compromised, and attempts to remove entries from a directory that has suffered from this corruption may result in a filesystem shutdown. xfs_repair will fix the directory hash ordering without data loss occuring. [dchinner: wrote useful a commit message] cc: <[email protected]> Reported-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: Mark Tinguely <[email protected]> Reviewed-by: Ben Myers <[email protected]> Signed-off-by: Dave Chinner <[email protected]>
print_ets_priority_assignment_table(netdissect_options *ndo, const u_char *ptr) { ND_PRINT((ndo, "\n\t Priority Assignment Table")); ND_PRINT((ndo, "\n\t Priority : 0 1 2 3 4 5 6 7")); ND_PRINT((ndo, "\n\t Value : %-3d %-3d %-3d %-3d %-3d %-3d %-3d %-3d", ptr[0]>>4,ptr[0]&0x0f,ptr[1]>>4,ptr[1]&0x0f,ptr[2]>>4, ptr[2] & 0x0f, ptr[3] >> 4, ptr[3] & 0x0f)); }
0
[ "CWE-399", "CWE-835" ]
tcpdump
34cec721d39c76be1e0a600829a7b17bdfb832b6
182,532,385,677,213,400,000,000,000,000,000,000,000
9
CVE-2017-12997/LLDP: Don't use an 8-bit loop counter. If you have a for (i = 0; i < N; i++) loop, you'd better make sure that i is big enough to hold N - not N-1, N. The TLV length here is 9 bits long, not 8 bits long, so an 8-bit loop counter will overflow and you can loop infinitely. This fixes an infinite loop discovered by Forcepoint's security researchers Otto Airamo & Antti Levomäki. Add tests using the capture files supplied by the reporter(s). Clean up the output a bit while we're at it.
static int check_if_fs_block(e2fsck_t ctx, blk64_t test_block) { ext2_filsys fs = ctx->fs; blk64_t first_block; dgrp_t i; first_block = fs->super->s_first_data_block; for (i = 0; i < fs->group_desc_count; i++) { /* Check superblocks/block group descriptors */ if (ext2fs_bg_has_super(fs, i)) { if (test_block >= first_block && (test_block <= first_block + fs->desc_blocks)) return 1; } /* Check the inode table */ if ((ext2fs_inode_table_loc(fs, i)) && (test_block >= ext2fs_inode_table_loc(fs, i)) && (test_block < (ext2fs_inode_table_loc(fs, i) + fs->inode_blocks_per_group))) return 1; /* Check the bitmap blocks */ if ((test_block == ext2fs_block_bitmap_loc(fs, i)) || (test_block == ext2fs_inode_bitmap_loc(fs, i))) return 1; first_block += fs->super->s_blocks_per_group; } return 0; }
0
[ "CWE-787" ]
e2fsprogs
71ba13755337e19c9a826dfc874562a36e1b24d3
333,150,298,019,765,230,000,000,000,000,000,000,000
32
e2fsck: don't try to rehash a deleted directory If directory has been deleted in pass1[bcd] processing, then we shouldn't try to rehash the directory in pass 3a when we try to rehash/reoptimize directories. Signed-off-by: Theodore Ts'o <[email protected]>
evdev_device_destroy(struct evdev_device *device) { struct evdev_dispatch *dispatch; dispatch = device->dispatch; if (dispatch) dispatch->interface->destroy(dispatch); if (device->base.group) libinput_device_group_unref(device->base.group); free(device->output_name); filter_destroy(device->pointer.filter); libinput_timer_destroy(&device->scroll.timer); libinput_timer_destroy(&device->middlebutton.timer); libinput_seat_unref(device->base.seat); libevdev_free(device->evdev); udev_device_unref(device->udev_device); free(device); }
1
[ "CWE-134" ]
libinput
a423d7d3269dc32a87384f79e29bb5ac021c83d1
233,009,499,611,614,730,000,000,000,000,000,000,000
20
evdev: strip the device name of format directives This fixes a format string vulnerabilty. evdev_log_message() composes a format string consisting of a fixed prefix (including the rendered device name) and the passed-in format buffer. This format string is then passed with the arguments to the actual log handler, which usually and eventually ends up being printf. If the device name contains a printf-style format directive, these ended up in the format string and thus get interpreted correctly, e.g. for a device "Foo%sBar" the log message vs printf invocation ends up being: evdev_log_message(device, "some message %s", "some argument"); printf("event9 - Foo%sBar: some message %s", "some argument"); This can enable an attacker to execute malicious code with the privileges of the process using libinput. To exploit this, an attacker needs to be able to create a kernel device with a malicious name, e.g. through /dev/uinput or a Bluetooth device. To fix this, convert any potential format directives in the device name by duplicating percentages. Pre-rendering the device to avoid the issue altogether would be nicer but the current log level hooks do not easily allow for this. The device name is the only user-controlled part of the format string. A second potential issue is the sysname of the device which is also sanitized. This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from Assured AB, and independently by Lukas Lamster. Fixes #752 Signed-off-by: Peter Hutterer <[email protected]>
static const char *unset_define(cmd_parms *cmd, void *dummy, const char *name) { int i; const char **defines; if (cmd->parent && ap_cstr_casecmp(cmd->parent->directive, "<VirtualHost")) { return apr_pstrcat(cmd->pool, cmd->cmd->name, " is not valid in ", cmd->parent->directive, " context", NULL); } if (ap_strchr_c(name, ':') != NULL) { return "Variable name must not contain ':'"; } if (!saved_server_config_defines) { init_config_defines(cmd->pool); } defines = (const char **)ap_server_config_defines->elts; for (i = 0; i < ap_server_config_defines->nelts; i++) { if (strcmp(defines[i], name) == 0) { defines[i] = *(const char **)apr_array_pop(ap_server_config_defines); break; } } if (server_config_defined_vars) { apr_table_unset(server_config_defined_vars, name); } return NULL; }
0
[ "CWE-416", "CWE-284" ]
httpd
4cc27823899e070268b906ca677ee838d07cf67a
337,708,904,166,541,930,000,000,000,000,000,000,000
32
core: Disallow Methods' registration at run time (.htaccess), they may be used only if registered at init time (httpd.conf). Calling ap_method_register() in children processes is not the right scope since it won't be shared for all requests. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68
fr_window_archive_remove (FrWindow *window, GList *file_list) { _archive_operation_started (window, FR_ACTION_DELETING_FILES); fr_window_clipboard_remove_file_list (window, file_list); fr_archive_remove (window->archive, file_list, window->priv->compression, window->priv->cancellable, archive_remove_ready_cb, window); }
0
[ "CWE-22" ]
file-roller
b147281293a8307808475e102a14857055f81631
203,171,988,638,855,440,000,000,000,000,000,000,000
12
libarchive: sanitize filenames before extracting
static int del_instruction_bp(struct task_struct *child, int slot) { switch (slot) { case 1: if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC12MODE) { /* address range - clear slots 1 & 2 */ child->thread.debug.iac2 = 0; dbcr_iac_range(child) &= ~DBCR_IAC12MODE; } child->thread.debug.iac1 = 0; child->thread.debug.dbcr0 &= ~DBCR0_IAC1; break; case 2: if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC12MODE) /* used in a range */ return -EINVAL; child->thread.debug.iac2 = 0; child->thread.debug.dbcr0 &= ~DBCR0_IAC2; break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case 3: if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC34MODE) { /* address range - clear slots 3 & 4 */ child->thread.debug.iac4 = 0; dbcr_iac_range(child) &= ~DBCR_IAC34MODE; } child->thread.debug.iac3 = 0; child->thread.debug.dbcr0 &= ~DBCR0_IAC3; break; case 4: if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC34MODE) /* Used in a range */ return -EINVAL; child->thread.debug.iac4 = 0; child->thread.debug.dbcr0 &= ~DBCR0_IAC4; break; #endif default: return -EINVAL; } return 0; }
0
[ "CWE-119", "CWE-787" ]
linux
c1fa0768a8713b135848f78fd43ffc208d8ded70
57,476,745,450,682,420,000,000,000,000,000,000,000
54
powerpc/tm: Flush TM only if CPU has TM feature Commit cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump") added code to access TM SPRs in flush_tmregs_to_thread(). However flush_tmregs_to_thread() does not check if TM feature is available on CPU before trying to access TM SPRs in order to copy live state to thread structures. flush_tmregs_to_thread() is indeed guarded by CONFIG_PPC_TRANSACTIONAL_MEM but it might be the case that kernel was compiled with CONFIG_PPC_TRANSACTIONAL_MEM enabled and ran on a CPU without TM feature available, thus rendering the execution of TM instructions that are treated by the CPU as illegal instructions. The fix is just to add proper checking in flush_tmregs_to_thread() if CPU has the TM feature before accessing any TM-specific resource, returning immediately if TM is no available on the CPU. Adding that checking in flush_tmregs_to_thread() instead of in places where it is called, like in vsr_get() and vsr_set(), is better because avoids the same problem cropping up elsewhere. Cc: [email protected] # v4.13+ Fixes: cd63f3c ("powerpc/tm: Fix saving of TM SPRs in core dump") Signed-off-by: Gustavo Romero <[email protected]> Reviewed-by: Cyril Bur <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
static int check_revocation(X509_STORE_CTX *ctx) { int i, last, ok; if (!(ctx->param->flags & X509_V_FLAG_CRL_CHECK)) return 1; if (ctx->param->flags & X509_V_FLAG_CRL_CHECK_ALL) last = sk_X509_num(ctx->chain) - 1; else { /* If checking CRL paths this isn't the EE certificate */ if (ctx->parent) return 1; last = 0; } for (i = 0; i <= last; i++) { ctx->error_depth = i; ok = check_cert(ctx); if (!ok) return ok; } return 1; }
0
[ "CWE-119" ]
openssl
370ac320301e28bb615cee80124c042649c95d14
20,567,017,783,438,325,000,000,000,000,000,000,000
21
Fix length checks in X509_cmp_time to avoid out-of-bounds reads. Also tighten X509_cmp_time to reject more than three fractional seconds in the time; and to reject trailing garbage after the offset. CVE-2015-1789 Reviewed-by: Viktor Dukhovni <[email protected]> Reviewed-by: Richard Levitte <[email protected]>
static int sha1write_ewah_helper(void *f, const void *buf, size_t len) { /* sha1write will die on error */ sha1write(f, buf, len); return len; }
0
[ "CWE-119", "CWE-787" ]
git
de1e67d0703894cb6ea782e36abb63976ab07e60
99,111,532,716,796,560,000,000,000,000,000,000,000
6
list-objects: pass full pathname to callbacks When we find a blob at "a/b/c", we currently pass this to our show_object_fn callbacks as two components: "a/b/" and "c". Callbacks which want the full value then call path_name(), which concatenates the two. But this is an inefficient interface; the path is a strbuf, and we could simply append "c" to it temporarily, then roll back the length, without creating a new copy. So we could improve this by teaching the callsites of path_name() this trick (and there are only 3). But we can also notice that no callback actually cares about the broken-down representation, and simply pass each callback the full path "a/b/c" as a string. The callback code becomes even simpler, then, as we do not have to worry about freeing an allocated buffer, nor rolling back our modification to the strbuf. This is theoretically less efficient, as some callbacks would not bother to format the final path component. But in practice this is not measurable. Since we use the same strbuf over and over, our work to grow it is amortized, and we really only pay to memcpy a few bytes. Signed-off-by: Jeff King <[email protected]> Signed-off-by: Junio C Hamano <[email protected]>
static int io_req_defer(struct io_kiocb *req) { const struct io_uring_sqe *sqe = req->submit.sqe; struct io_uring_sqe *sqe_copy; struct io_ring_ctx *ctx = req->ctx; /* Still need defer if there is pending req in defer list. */ if (!req_need_defer(req) && list_empty(&ctx->defer_list)) return 0; sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); if (!sqe_copy) return -EAGAIN; spin_lock_irq(&ctx->completion_lock); if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { spin_unlock_irq(&ctx->completion_lock); kfree(sqe_copy); return 0; } memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); req->flags |= REQ_F_FREE_SQE; req->submit.sqe = sqe_copy; trace_io_uring_defer(ctx, req, req->user_data); list_add_tail(&req->list, &ctx->defer_list); spin_unlock_irq(&ctx->completion_lock); return -EIOCBQUEUED; }
0
[]
linux
181e448d8709e517c9c7b523fcd209f24eb38ca7
283,246,668,487,464,280,000,000,000,000,000,000,000
30
io_uring: async workers should inherit the user creds If we don't inherit the original task creds, then we can confuse users like fuse that pass creds in the request header. See link below on identical aio issue. Link: https://lore.kernel.org/linux-fsdevel/[email protected]/T/#u Signed-off-by: Jens Axboe <[email protected]>
void __do_SAK(struct tty_struct *tty) { #ifdef TTY_SOFT_SAK tty_hangup(tty); #else struct task_struct *g, *p; struct pid *session; int i; struct file *filp; struct fdtable *fdt; if (!tty) return; session = tty->session; tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); read_lock(&tasklist_lock); /* Kill the entire session */ do_each_pid_task(session, PIDTYPE_SID, p) { printk(KERN_NOTICE "SAK: killed process %d" " (%s): task_session(p)==tty->session\n", task_pid_nr(p), p->comm); send_sig(SIGKILL, p, 1); } while_each_pid_task(session, PIDTYPE_SID, p); /* Now kill any processes that happen to have the * tty open. */ do_each_thread(g, p) { if (p->signal->tty == tty) { printk(KERN_NOTICE "SAK: killed process %d" " (%s): task_session(p)==tty->session\n", task_pid_nr(p), p->comm); send_sig(SIGKILL, p, 1); continue; } task_lock(p); if (p->files) { /* * We don't take a ref to the file, so we must * hold ->file_lock instead. */ spin_lock(&p->files->file_lock); fdt = files_fdtable(p->files); for (i = 0; i < fdt->max_fds; i++) { filp = fcheck_files(p->files, i); if (!filp) continue; if (filp->f_op->read == tty_read && file_tty(filp) == tty) { printk(KERN_NOTICE "SAK: killed process %d" " (%s): fd#%d opened to the tty\n", task_pid_nr(p), p->comm, i); force_sig(SIGKILL, p); break; } } spin_unlock(&p->files->file_lock); } task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); #endif }
0
[ "CWE-703" ]
linux
c290f8358acaeffd8e0c551ddcc24d1206143376
219,579,124,427,884,700,000,000,000,000,000,000,000
66
TTY: drop driver reference in tty_open fail path When tty_driver_lookup_tty fails in tty_open, we forget to drop a reference to the tty driver. This was added by commit 4a2b5fddd5 (Move tty lookup/reopen to caller). Fix that by adding tty_driver_kref_put to the fail path. I will refactor the code later. This is for the ease of backporting to stable. Introduced-in: v2.6.28-rc2 Signed-off-by: Jiri Slaby <[email protected]> Cc: stable <[email protected]> Cc: Alan Cox <[email protected]> Acked-by: Sukadev Bhattiprolu <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static void test_list_fields() { MYSQL_RES *result; int rc; myheader("test_list_fields"); rc= mysql_query(mysql, "drop table if exists t1"); myquery(rc); rc= mysql_query(mysql, "create table t1(c1 int primary key auto_increment, c2 char(10) default 'mysql')"); myquery(rc); result= mysql_list_fields(mysql, "t1", NULL); mytest(result); rc= my_process_result_set(result); DIE_UNLESS(rc == 0); verify_prepare_field(result, 0, "c1", "c1", MYSQL_TYPE_LONG, "t1", "t1", current_db, 11, "0"); verify_prepare_field(result, 1, "c2", "c2", MYSQL_TYPE_STRING, "t1", "t1", current_db, 10, "mysql"); mysql_free_result(result); myquery(mysql_query(mysql, "drop table t1")); }
0
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
126,720,944,772,274,080,000,000,000,000,000,000,000
29
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
unsigned int fib6_tables_seq_read(struct net *net) { unsigned int h, fib_seq = 0; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv6.fib_table_hash[h]; struct fib6_table *tb; hlist_for_each_entry_rcu(tb, head, tb6_hlist) fib_seq += tb->fib_seq; } rcu_read_unlock(); return fib_seq; }
0
[ "CWE-755" ]
linux
7b09c2d052db4b4ad0b27b97918b46a7746966fa
298,891,813,944,462,080,000,000,000,000,000,000,000
16
ipv6: fix a typo in fib6_rule_lookup() Yi Ren reported an issue discovered by syzkaller, and bisected to the cited commit. Many thanks to Yi, this trivial patch does not reflect the patient work that has been done. Fixes: d64a1f574a29 ("ipv6: honor RT6_LOOKUP_F_DST_NOREF in rule lookup logic") Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Wei Wang <[email protected]> Bisected-and-reported-by: Yi Ren <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
directory_fetches_dir_info_early(const or_options_t *options) { return directory_fetches_from_authorities(options); }
0
[]
tor
02e05bd74dbec614397b696cfcda6525562a4675
61,626,608,119,380,040,000,000,000,000,000,000,000
4
When examining descriptors as a dirserver, reject ones with bad versions This is an extra fix for bug 21278: it ensures that these descriptors and platforms will never be listed in a legit consensus.
static void oidc_store_access_token_expiry(request_rec *r, oidc_session_t *session, int expires_in) { if (expires_in != -1) { oidc_session_set(r, session, OIDC_ACCESSTOKEN_EXPIRES_SESSION_KEY, apr_psprintf(r->pool, "%" APR_TIME_T_FMT, apr_time_sec(apr_time_now()) + expires_in)); } }
0
[ "CWE-20" ]
mod_auth_openidc
612e309bfffd6f9b8ad7cdccda3019fc0865f3b4
84,786,935,758,190,270,000,000,000,000,000,000,000
8
don't echo query params on invalid requests to redirect URI; closes #212 thanks @LukasReschke; I'm sure there's some OWASP guideline that warns against this
evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen) { struct evbuffer_chain *chain, *tmp; int result = -1; EVBUFFER_LOCK(buf); if (buf->freeze_start) { goto done; } chain = buf->first; if (chain == NULL) { chain = evbuffer_chain_new(datlen); if (!chain) goto done; evbuffer_chain_insert(buf, chain); } /* we cannot touch immutable buffers */ if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { /* If this chain is empty, we can treat it as * 'empty at the beginning' rather than 'empty at the end' */ if (chain->off == 0) chain->misalign = chain->buffer_len; if ((size_t)chain->misalign >= datlen) { /* we have enough space to fit everything */ memcpy(chain->buffer + chain->misalign - datlen, data, datlen); chain->off += datlen; chain->misalign -= datlen; buf->total_len += datlen; buf->n_add_for_cb += datlen; goto out; } else if (chain->misalign) { /* we can only fit some of the data. */ memcpy(chain->buffer, (char*)data + datlen - chain->misalign, (size_t)chain->misalign); chain->off += (size_t)chain->misalign; buf->total_len += (size_t)chain->misalign; buf->n_add_for_cb += (size_t)chain->misalign; datlen -= (size_t)chain->misalign; chain->misalign = 0; } } /* we need to add another chain */ if ((tmp = evbuffer_chain_new(datlen)) == NULL) goto done; buf->first = tmp; if (buf->last_with_datap == &buf->first) buf->last_with_datap = &tmp->next; tmp->next = chain; tmp->off = datlen; tmp->misalign = tmp->buffer_len - datlen; memcpy(tmp->buffer + tmp->misalign, data, datlen); buf->total_len += datlen; buf->n_add_for_cb += (size_t)chain->misalign; out: evbuffer_invoke_callbacks_(buf); result = 0; done: EVBUFFER_UNLOCK(buf); return result; }
1
[ "CWE-189" ]
libevent
841ecbd96105c84ac2e7c9594aeadbcc6fb38bc4
205,549,976,137,883,870,000,000,000,000,000,000,000
72
Fix CVE-2014-6272 in Libevent 2.1 For this fix, we need to make sure that passing too-large inputs to the evbuffer functions can't make us do bad things with the heap. Also, lower the maximum chunk size to the lower of off_t, size_t maximum. This is necessary since otherwise we could get into an infinite loop if we make a chunk that 'misalign' cannot index into.
request_is_satisfied (NautilusDirectory *directory, NautilusFile *file, Request request) { if (REQUEST_WANTS_TYPE (request, REQUEST_METAFILE) && !nautilus_directory_is_metadata_read (directory)) { return FALSE; } if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_LIST) && !(directory->details->directory_loaded && directory->details->directory_loaded_sent_notification)) { return FALSE; } if (REQUEST_WANTS_TYPE (request, REQUEST_DIRECTORY_COUNT)) { if (has_problem (directory, file, lacks_directory_count)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_FILE_INFO)) { if (has_problem (directory, file, lacks_info)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_FILESYSTEM_INFO)) { if (has_problem (directory, file, lacks_filesystem_info)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_TOP_LEFT_TEXT)) { if (has_problem (directory, file, lacks_top_left)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_LARGE_TOP_LEFT_TEXT)) { if (has_problem (directory, file, lacks_large_top_left)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_DEEP_COUNT)) { if (has_problem (directory, file, lacks_deep_count)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_THUMBNAIL)) { if (has_problem (directory, file, lacks_thumbnail)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_MOUNT)) { if (has_problem (directory, file, lacks_mount)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_MIME_LIST)) { if (has_problem (directory, file, lacks_mime_list)) { return FALSE; } } if (REQUEST_WANTS_TYPE (request, REQUEST_LINK_INFO)) { if (has_problem (directory, file, lacks_link_info)) { return FALSE; } } return TRUE; }
0
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
333,077,908,743,512,000,000,000,000,000,000,000,000
77
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
gst_matroska_demux_parse_stream (GstMatroskaDemux * demux, GstEbmlRead * ebml, GstMatroskaTrackContext ** dest_context) { GstMatroskaTrackContext *context; GstCaps *caps = NULL; GstTagList *cached_taglist; GstFlowReturn ret; guint32 id, riff_fourcc = 0; guint16 riff_audio_fmt = 0; gchar *codec = NULL; DEBUG_ELEMENT_START (demux, ebml, "TrackEntry"); *dest_context = NULL; /* start with the master */ if ((ret = gst_ebml_read_master (ebml, &id)) != GST_FLOW_OK) { DEBUG_ELEMENT_STOP (demux, ebml, "TrackEntry", ret); return ret; } /* allocate generic... if we know the type, we'll g_renew() * with the precise type */ context = g_new0 (GstMatroskaTrackContext, 1); context->index_writer_id = -1; context->type = 0; /* no type yet */ context->default_duration = 0; context->pos = 0; context->set_discont = TRUE; context->timecodescale = 1.0; context->flags = GST_MATROSKA_TRACK_ENABLED | GST_MATROSKA_TRACK_DEFAULT | GST_MATROSKA_TRACK_LACING; context->from_time = GST_CLOCK_TIME_NONE; context->from_offset = -1; context->to_offset = G_MAXINT64; context->alignment = 1; context->dts_only = FALSE; context->intra_only = FALSE; context->tags = gst_tag_list_new_empty (); g_queue_init (&context->protection_event_queue); context->protection_info = NULL; GST_DEBUG_OBJECT (demux, "Parsing a TrackEntry (%d tracks parsed so far)", demux->common.num_streams); /* try reading the trackentry headers */ while (ret == GST_FLOW_OK && gst_ebml_read_has_remaining (ebml, 1, TRUE)) { if ((ret = gst_ebml_peek_id (ebml, &id)) != GST_FLOW_OK) break; switch (id) { /* track number (unique stream ID) */ case GST_MATROSKA_ID_TRACKNUMBER:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_ERROR_OBJECT (demux, "Invalid TrackNumber 0"); ret = GST_FLOW_ERROR; break; } GST_DEBUG_OBJECT (demux, "TrackNumber: %" G_GUINT64_FORMAT, num); context->num = num; break; } /* track UID (unique identifier) */ case GST_MATROSKA_ID_TRACKUID:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_ERROR_OBJECT (demux, "Invalid TrackUID 0"); ret = GST_FLOW_ERROR; break; } GST_DEBUG_OBJECT (demux, "TrackUID: %" G_GUINT64_FORMAT, num); context->uid = num; break; } /* track type (video, audio, combined, subtitle, etc.) */ case GST_MATROSKA_ID_TRACKTYPE:{ guint64 track_type; if ((ret = gst_ebml_read_uint (ebml, &id, &track_type)) != GST_FLOW_OK) { break; } if (context->type != 0 && context->type != track_type) { GST_WARNING_OBJECT (demux, "More than one tracktype defined in a TrackEntry - skipping"); break; } else if (track_type < 1 || track_type > 254) { GST_WARNING_OBJECT (demux, "Invalid TrackType %" G_GUINT64_FORMAT, track_type); break; } GST_DEBUG_OBJECT (demux, "TrackType: %" G_GUINT64_FORMAT, track_type); /* ok, so we're actually going to reallocate this thing */ switch (track_type) { case GST_MATROSKA_TRACK_TYPE_VIDEO: gst_matroska_track_init_video_context (&context); break; case GST_MATROSKA_TRACK_TYPE_AUDIO: gst_matroska_track_init_audio_context (&context); break; case GST_MATROSKA_TRACK_TYPE_SUBTITLE: gst_matroska_track_init_subtitle_context (&context); break; case GST_MATROSKA_TRACK_TYPE_COMPLEX: case GST_MATROSKA_TRACK_TYPE_LOGO: case GST_MATROSKA_TRACK_TYPE_BUTTONS: case GST_MATROSKA_TRACK_TYPE_CONTROL: default: GST_WARNING_OBJECT (demux, "Unknown or unsupported TrackType %" G_GUINT64_FORMAT, track_type); context->type = 0; break; } break; } /* tracktype specific stuff for video */ case GST_MATROSKA_ID_TRACKVIDEO:{ GstMatroskaTrackVideoContext *videocontext; DEBUG_ELEMENT_START (demux, ebml, "TrackVideo"); if (!gst_matroska_track_init_video_context (&context)) { GST_WARNING_OBJECT (demux, "TrackVideo element in non-video track - ignoring track"); ret = GST_FLOW_ERROR; break; } else if ((ret = gst_ebml_read_master (ebml, &id)) != GST_FLOW_OK) { break; } videocontext = (GstMatroskaTrackVideoContext *) context; while (ret == GST_FLOW_OK && gst_ebml_read_has_remaining (ebml, 1, TRUE)) { if ((ret = gst_ebml_peek_id (ebml, &id)) != GST_FLOW_OK) break; switch (id) { /* Should be one level up but some broken muxers write it here. */ case GST_MATROSKA_ID_TRACKDEFAULTDURATION:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_WARNING_OBJECT (demux, "Invalid TrackDefaultDuration 0"); break; } GST_DEBUG_OBJECT (demux, "TrackDefaultDuration: %" G_GUINT64_FORMAT, num); context->default_duration = num; break; } /* video framerate */ /* NOTE: This one is here only for backward compatibility. * Use _TRACKDEFAULDURATION one level up. */ case GST_MATROSKA_ID_VIDEOFRAMERATE:{ gdouble num; if ((ret = gst_ebml_read_float (ebml, &id, &num)) != GST_FLOW_OK) break; if (num <= 0.0) { GST_WARNING_OBJECT (demux, "Invalid TrackVideoFPS %lf", num); break; } GST_DEBUG_OBJECT (demux, "TrackVideoFrameRate: %lf", num); if (context->default_duration == 0) context->default_duration = gst_gdouble_to_guint64 ((gdouble) GST_SECOND * (1.0 / num)); videocontext->default_fps = num; break; } /* width of the size to display the video at */ case GST_MATROSKA_ID_VIDEODISPLAYWIDTH:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_WARNING_OBJECT (demux, "Invalid TrackVideoDisplayWidth 0"); break; } GST_DEBUG_OBJECT (demux, "TrackVideoDisplayWidth: %" G_GUINT64_FORMAT, num); videocontext->display_width = num; break; } /* height of the size to display the video at */ case GST_MATROSKA_ID_VIDEODISPLAYHEIGHT:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_WARNING_OBJECT (demux, "Invalid TrackVideoDisplayHeight 0"); break; } GST_DEBUG_OBJECT (demux, "TrackVideoDisplayHeight: %" G_GUINT64_FORMAT, num); videocontext->display_height = num; break; } /* width of the video in the file */ case GST_MATROSKA_ID_VIDEOPIXELWIDTH:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_WARNING_OBJECT (demux, "Invalid TrackVideoPixelWidth 0"); break; } GST_DEBUG_OBJECT (demux, "TrackVideoPixelWidth: %" G_GUINT64_FORMAT, num); videocontext->pixel_width = num; break; } /* height of the video in the file */ case GST_MATROSKA_ID_VIDEOPIXELHEIGHT:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_WARNING_OBJECT (demux, "Invalid TrackVideoPixelHeight 0"); break; } GST_DEBUG_OBJECT (demux, "TrackVideoPixelHeight: %" G_GUINT64_FORMAT, num); videocontext->pixel_height = num; break; } /* whether the video is interlaced */ case GST_MATROSKA_ID_VIDEOFLAGINTERLACED:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 1) videocontext->interlace_mode = GST_MATROSKA_INTERLACE_MODE_INTERLACED; else if (num == 2) videocontext->interlace_mode = GST_MATROSKA_INTERLACE_MODE_PROGRESSIVE; else videocontext->interlace_mode = GST_MATROSKA_INTERLACE_MODE_UNKNOWN; GST_DEBUG_OBJECT (demux, "video track interlacing mode: %d", videocontext->interlace_mode); break; } /* interlaced field order */ case GST_MATROSKA_ID_VIDEOFIELDORDER:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (videocontext->interlace_mode != GST_MATROSKA_INTERLACE_MODE_INTERLACED) { GST_WARNING_OBJECT (demux, "FieldOrder element when not interlaced - ignoring"); break; } if (num == 0) /* turns out we're actually progressive */ videocontext->interlace_mode = GST_MATROSKA_INTERLACE_MODE_PROGRESSIVE; else if (num == 2) videocontext->field_order = GST_VIDEO_FIELD_ORDER_UNKNOWN; else if (num == 9) videocontext->field_order = GST_VIDEO_FIELD_ORDER_TOP_FIELD_FIRST; else if (num == 14) videocontext->field_order = GST_VIDEO_FIELD_ORDER_BOTTOM_FIELD_FIRST; else { GST_FIXME_OBJECT (demux, "Unknown or unsupported FieldOrder %" G_GUINT64_FORMAT, num); videocontext->field_order = GST_VIDEO_FIELD_ORDER_UNKNOWN; } GST_DEBUG_OBJECT (demux, "video track field order: %d", videocontext->field_order); break; } /* aspect ratio behaviour */ case GST_MATROSKA_ID_VIDEOASPECTRATIOTYPE:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num != GST_MATROSKA_ASPECT_RATIO_MODE_FREE && num != GST_MATROSKA_ASPECT_RATIO_MODE_KEEP && num != GST_MATROSKA_ASPECT_RATIO_MODE_FIXED) { GST_WARNING_OBJECT (demux, "Unknown TrackVideoAspectRatioType 0x%x", (guint) num); break; } GST_DEBUG_OBJECT (demux, "TrackVideoAspectRatioType: %" G_GUINT64_FORMAT, num); videocontext->asr_mode = num; break; } /* colourspace (only matters for raw video) fourcc */ case GST_MATROSKA_ID_VIDEOCOLOURSPACE:{ guint8 *data; guint64 datalen; if ((ret = gst_ebml_read_binary (ebml, &id, &data, &datalen)) != GST_FLOW_OK) break; if (datalen != 4) { g_free (data); GST_WARNING_OBJECT (demux, "Invalid TrackVideoColourSpace length %" G_GUINT64_FORMAT, datalen); break; } memcpy (&videocontext->fourcc, data, 4); GST_DEBUG_OBJECT (demux, "TrackVideoColourSpace: %" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (videocontext->fourcc)); g_free (data); break; } /* color info */ case GST_MATROSKA_ID_VIDEOCOLOUR:{ ret = gst_matroska_demux_parse_colour (demux, ebml, videocontext); break; } case GST_MATROSKA_ID_VIDEOSTEREOMODE: { guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; GST_DEBUG_OBJECT (demux, "StereoMode: %" G_GUINT64_FORMAT, num); switch (num) { case GST_MATROSKA_STEREO_MODE_SBS_RL: videocontext->multiview_flags = GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST; /* fall through */ case GST_MATROSKA_STEREO_MODE_SBS_LR: videocontext->multiview_mode = GST_VIDEO_MULTIVIEW_MODE_SIDE_BY_SIDE; break; case GST_MATROSKA_STEREO_MODE_TB_RL: videocontext->multiview_flags = GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST; /* fall through */ case GST_MATROSKA_STEREO_MODE_TB_LR: videocontext->multiview_mode = GST_VIDEO_MULTIVIEW_MODE_TOP_BOTTOM; break; case GST_MATROSKA_STEREO_MODE_CHECKER_RL: videocontext->multiview_flags = GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST; /* fall through */ case GST_MATROSKA_STEREO_MODE_CHECKER_LR: videocontext->multiview_mode = GST_VIDEO_MULTIVIEW_MODE_CHECKERBOARD; break; case GST_MATROSKA_STEREO_MODE_FBF_RL: videocontext->multiview_flags = GST_VIDEO_MULTIVIEW_FLAGS_RIGHT_VIEW_FIRST; /* fall through */ case GST_MATROSKA_STEREO_MODE_FBF_LR: videocontext->multiview_mode = GST_VIDEO_MULTIVIEW_MODE_FRAME_BY_FRAME; /* FIXME: In frame-by-frame mode, left/right frame buffers are * laced within one block, and we'll need to apply FIRST_IN_BUNDLE * accordingly. See http://www.matroska.org/technical/specs/index.html#StereoMode */ GST_FIXME_OBJECT (demux, "Frame-by-frame stereoscopic mode not fully implemented"); break; } break; } default: GST_WARNING_OBJECT (demux, "Unknown TrackVideo subelement 0x%x - ignoring", id); /* fall through */ case GST_MATROSKA_ID_VIDEODISPLAYUNIT: case GST_MATROSKA_ID_VIDEOPIXELCROPBOTTOM: case GST_MATROSKA_ID_VIDEOPIXELCROPTOP: case GST_MATROSKA_ID_VIDEOPIXELCROPLEFT: case GST_MATROSKA_ID_VIDEOPIXELCROPRIGHT: case GST_MATROSKA_ID_VIDEOGAMMAVALUE: ret = gst_ebml_read_skip (ebml); break; } } DEBUG_ELEMENT_STOP (demux, ebml, "TrackVideo", ret); break; } /* tracktype specific stuff for audio */ case GST_MATROSKA_ID_TRACKAUDIO:{ GstMatroskaTrackAudioContext *audiocontext; DEBUG_ELEMENT_START (demux, ebml, "TrackAudio"); if (!gst_matroska_track_init_audio_context (&context)) { GST_WARNING_OBJECT (demux, "TrackAudio element in non-audio track - ignoring track"); ret = GST_FLOW_ERROR; break; } if ((ret = gst_ebml_read_master (ebml, &id)) != GST_FLOW_OK) break; audiocontext = (GstMatroskaTrackAudioContext *) context; while (ret == GST_FLOW_OK && gst_ebml_read_has_remaining (ebml, 1, TRUE)) { if ((ret = gst_ebml_peek_id (ebml, &id)) != GST_FLOW_OK) break; switch (id) { /* samplerate */ case GST_MATROSKA_ID_AUDIOSAMPLINGFREQ:{ gdouble num; if ((ret = gst_ebml_read_float (ebml, &id, &num)) != GST_FLOW_OK) break; if (num <= 0.0) { GST_WARNING_OBJECT (demux, "Invalid TrackAudioSamplingFrequency %lf", num); break; } GST_DEBUG_OBJECT (demux, "TrackAudioSamplingFrequency: %lf", num); audiocontext->samplerate = num; break; } /* bitdepth */ case GST_MATROSKA_ID_AUDIOBITDEPTH:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_WARNING_OBJECT (demux, "Invalid TrackAudioBitDepth 0"); break; } GST_DEBUG_OBJECT (demux, "TrackAudioBitDepth: %" G_GUINT64_FORMAT, num); audiocontext->bitdepth = num; break; } /* channels */ case GST_MATROSKA_ID_AUDIOCHANNELS:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_WARNING_OBJECT (demux, "Invalid TrackAudioChannels 0"); break; } GST_DEBUG_OBJECT (demux, "TrackAudioChannels: %" G_GUINT64_FORMAT, num); audiocontext->channels = num; break; } default: GST_WARNING_OBJECT (demux, "Unknown TrackAudio subelement 0x%x - ignoring", id); /* fall through */ case GST_MATROSKA_ID_AUDIOCHANNELPOSITIONS: case GST_MATROSKA_ID_AUDIOOUTPUTSAMPLINGFREQ: ret = gst_ebml_read_skip (ebml); break; } } DEBUG_ELEMENT_STOP (demux, ebml, "TrackAudio", ret); break; } /* codec identifier */ case GST_MATROSKA_ID_CODECID:{ gchar *text; if ((ret = gst_ebml_read_ascii (ebml, &id, &text)) != GST_FLOW_OK) break; GST_DEBUG_OBJECT (demux, "CodecID: %s", GST_STR_NULL (text)); context->codec_id = text; break; } /* codec private data */ case GST_MATROSKA_ID_CODECPRIVATE:{ guint8 *data; guint64 size; if ((ret = gst_ebml_read_binary (ebml, &id, &data, &size)) != GST_FLOW_OK) break; context->codec_priv = data; context->codec_priv_size = size; GST_DEBUG_OBJECT (demux, "CodecPrivate of size %" G_GUINT64_FORMAT, size); break; } /* name of the codec */ case GST_MATROSKA_ID_CODECNAME:{ gchar *text; if ((ret = gst_ebml_read_utf8 (ebml, &id, &text)) != GST_FLOW_OK) break; GST_DEBUG_OBJECT (demux, "CodecName: %s", GST_STR_NULL (text)); context->codec_name = text; break; } /* codec delay */ case GST_MATROSKA_ID_CODECDELAY:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; context->codec_delay = num; GST_DEBUG_OBJECT (demux, "CodecDelay: %" GST_TIME_FORMAT, GST_TIME_ARGS (num)); break; } /* codec delay */ case GST_MATROSKA_ID_SEEKPREROLL:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; context->seek_preroll = num; GST_DEBUG_OBJECT (demux, "SeekPreroll: %" GST_TIME_FORMAT, GST_TIME_ARGS (num)); break; } /* name of this track */ case GST_MATROSKA_ID_TRACKNAME:{ gchar *text; if ((ret = gst_ebml_read_utf8 (ebml, &id, &text)) != GST_FLOW_OK) break; context->name = text; GST_DEBUG_OBJECT (demux, "TrackName: %s", GST_STR_NULL (text)); break; } /* language (matters for audio/subtitles, mostly) */ case GST_MATROSKA_ID_TRACKLANGUAGE:{ gchar *text; if ((ret = gst_ebml_read_utf8 (ebml, &id, &text)) != GST_FLOW_OK) break; context->language = text; /* fre-ca => fre */ if (strlen (context->language) >= 4 && context->language[3] == '-') context->language[3] = '\0'; GST_DEBUG_OBJECT (demux, "TrackLanguage: %s", GST_STR_NULL (context->language)); break; } /* whether this is actually used */ case GST_MATROSKA_ID_TRACKFLAGENABLED:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num) context->flags |= GST_MATROSKA_TRACK_ENABLED; else context->flags &= ~GST_MATROSKA_TRACK_ENABLED; GST_DEBUG_OBJECT (demux, "TrackEnabled: %d", (context->flags & GST_MATROSKA_TRACK_ENABLED) ? 1 : 0); break; } /* whether it's the default for this track type */ case GST_MATROSKA_ID_TRACKFLAGDEFAULT:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num) context->flags |= GST_MATROSKA_TRACK_DEFAULT; else context->flags &= ~GST_MATROSKA_TRACK_DEFAULT; GST_DEBUG_OBJECT (demux, "TrackDefault: %d", (context->flags & GST_MATROSKA_TRACK_DEFAULT) ? 1 : 0); break; } /* whether the track must be used during playback */ case GST_MATROSKA_ID_TRACKFLAGFORCED:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num) context->flags |= GST_MATROSKA_TRACK_FORCED; else context->flags &= ~GST_MATROSKA_TRACK_FORCED; GST_DEBUG_OBJECT (demux, "TrackForced: %d", (context->flags & GST_MATROSKA_TRACK_FORCED) ? 1 : 0); break; } /* lacing (like MPEG, where blocks don't end/start on frame * boundaries) */ case GST_MATROSKA_ID_TRACKFLAGLACING:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num) context->flags |= GST_MATROSKA_TRACK_LACING; else context->flags &= ~GST_MATROSKA_TRACK_LACING; GST_DEBUG_OBJECT (demux, "TrackLacing: %d", (context->flags & GST_MATROSKA_TRACK_LACING) ? 1 : 0); break; } /* default length (in time) of one data block in this track */ case GST_MATROSKA_ID_TRACKDEFAULTDURATION:{ guint64 num; if ((ret = gst_ebml_read_uint (ebml, &id, &num)) != GST_FLOW_OK) break; if (num == 0) { GST_WARNING_OBJECT (demux, "Invalid TrackDefaultDuration 0"); break; } GST_DEBUG_OBJECT (demux, "TrackDefaultDuration: %" G_GUINT64_FORMAT, num); context->default_duration = num; break; } case GST_MATROSKA_ID_CONTENTENCODINGS:{ ret = gst_matroska_read_common_read_track_encodings (&demux->common, ebml, context); break; } case GST_MATROSKA_ID_TRACKTIMECODESCALE:{ gdouble num; if ((ret = gst_ebml_read_float (ebml, &id, &num)) != GST_FLOW_OK) break; if (num <= 0.0) { GST_WARNING_OBJECT (demux, "Invalid TrackTimeCodeScale %lf", num); break; } GST_DEBUG_OBJECT (demux, "TrackTimeCodeScale: %lf", num); context->timecodescale = num; break; } default: GST_WARNING ("Unknown TrackEntry subelement 0x%x - ignoring", id); /* pass-through */ /* we ignore these because they're nothing useful (i.e. crap) * or simply not implemented yet. */ case GST_MATROSKA_ID_TRACKMINCACHE: case GST_MATROSKA_ID_TRACKMAXCACHE: case GST_MATROSKA_ID_MAXBLOCKADDITIONID: case GST_MATROSKA_ID_TRACKATTACHMENTLINK: case GST_MATROSKA_ID_TRACKOVERLAY: case GST_MATROSKA_ID_TRACKTRANSLATE: case GST_MATROSKA_ID_TRACKOFFSET: case GST_MATROSKA_ID_CODECSETTINGS: case GST_MATROSKA_ID_CODECINFOURL: case GST_MATROSKA_ID_CODECDOWNLOADURL: case GST_MATROSKA_ID_CODECDECODEALL: ret = gst_ebml_read_skip (ebml); break; } } DEBUG_ELEMENT_STOP (demux, ebml, "TrackEntry", ret); /* Decode codec private data if necessary */ if (context->encodings && context->encodings->len > 0 && context->codec_priv && context->codec_priv_size > 0) { if (!gst_matroska_decode_data (context->encodings, &context->codec_priv, &context->codec_priv_size, GST_MATROSKA_TRACK_ENCODING_SCOPE_CODEC_DATA, TRUE)) { GST_WARNING_OBJECT (demux, "Decoding codec private data failed"); ret = GST_FLOW_ERROR; } } if (context->type == 0 || context->codec_id == NULL || (ret != GST_FLOW_OK && ret != GST_FLOW_EOS)) { if (ret == GST_FLOW_OK || ret == GST_FLOW_EOS) GST_WARNING_OBJECT (ebml, "Unknown stream/codec in track entry header"); gst_matroska_track_free (context); context = NULL; *dest_context = NULL; return ret; } /* check for a cached track taglist */ cached_taglist = (GstTagList *) g_hash_table_lookup (demux->common.cached_track_taglists, GUINT_TO_POINTER (context->uid)); if (cached_taglist) gst_tag_list_insert (context->tags, cached_taglist, GST_TAG_MERGE_APPEND); /* compute caps */ switch (context->type) { case GST_MATROSKA_TRACK_TYPE_VIDEO:{ GstMatroskaTrackVideoContext *videocontext = (GstMatroskaTrackVideoContext *) context; caps = gst_matroska_demux_video_caps (videocontext, context->codec_id, context->codec_priv, context->codec_priv_size, &codec, &riff_fourcc); if (codec) { gst_tag_list_add (context->tags, GST_TAG_MERGE_REPLACE, GST_TAG_VIDEO_CODEC, codec, NULL); context->tags_changed = TRUE; g_free (codec); } break; } case GST_MATROSKA_TRACK_TYPE_AUDIO:{ GstClockTime lead_in_ts = 0; GstMatroskaTrackAudioContext *audiocontext = (GstMatroskaTrackAudioContext *) context; caps = gst_matroska_demux_audio_caps (audiocontext, context->codec_id, context->codec_priv, context->codec_priv_size, &codec, &riff_audio_fmt, &lead_in_ts); if (lead_in_ts > demux->audio_lead_in_ts) { demux->audio_lead_in_ts = lead_in_ts; GST_DEBUG_OBJECT (demux, "Increased audio lead-in to %" GST_TIME_FORMAT, GST_TIME_ARGS (lead_in_ts)); } if (codec) { gst_tag_list_add (context->tags, GST_TAG_MERGE_REPLACE, GST_TAG_AUDIO_CODEC, codec, NULL); context->tags_changed = TRUE; g_free (codec); } break; } case GST_MATROSKA_TRACK_TYPE_SUBTITLE:{ GstMatroskaTrackSubtitleContext *subtitlecontext = (GstMatroskaTrackSubtitleContext *) context; caps = gst_matroska_demux_subtitle_caps (subtitlecontext, context->codec_id, context->codec_priv, context->codec_priv_size); break; } case GST_MATROSKA_TRACK_TYPE_COMPLEX: case GST_MATROSKA_TRACK_TYPE_LOGO: case GST_MATROSKA_TRACK_TYPE_BUTTONS: case GST_MATROSKA_TRACK_TYPE_CONTROL: default: /* we should already have quit by now */ g_assert_not_reached (); } if ((context->language == NULL || *context->language == '\0') && (context->type == GST_MATROSKA_TRACK_TYPE_AUDIO || context->type == GST_MATROSKA_TRACK_TYPE_SUBTITLE)) { GST_LOG ("stream %d: language=eng (assuming default)", context->index); context->language = g_strdup ("eng"); } if (context->language) { const gchar *lang; /* Matroska contains ISO 639-2B codes, we want ISO 639-1 */ lang = gst_tag_get_language_code (context->language); gst_tag_list_add (context->tags, GST_TAG_MERGE_REPLACE, GST_TAG_LANGUAGE_CODE, (lang) ? lang : context->language, NULL); if (context->name) { gst_tag_list_add (context->tags, GST_TAG_MERGE_REPLACE, GST_TAG_TITLE, context->name, NULL); } context->tags_changed = TRUE; } if (caps == NULL) { GST_WARNING_OBJECT (demux, "could not determine caps for stream with " "codec_id='%s'", context->codec_id); switch (context->type) { case GST_MATROSKA_TRACK_TYPE_VIDEO: caps = gst_caps_new_empty_simple ("video/x-unknown"); break; case GST_MATROSKA_TRACK_TYPE_AUDIO: caps = gst_caps_new_empty_simple ("audio/x-unknown"); break; case GST_MATROSKA_TRACK_TYPE_SUBTITLE: caps = gst_caps_new_empty_simple ("application/x-subtitle-unknown"); break; case GST_MATROSKA_TRACK_TYPE_COMPLEX: default: caps = gst_caps_new_empty_simple ("application/x-matroska-unknown"); break; } gst_caps_set_simple (caps, "codec-id", G_TYPE_STRING, context->codec_id, NULL); /* add any unrecognised riff fourcc / audio format, but after codec-id */ if (context->type == GST_MATROSKA_TRACK_TYPE_AUDIO && riff_audio_fmt != 0) gst_caps_set_simple (caps, "format", G_TYPE_INT, riff_audio_fmt, NULL); else if (context->type == GST_MATROSKA_TRACK_TYPE_VIDEO && riff_fourcc != 0) { gchar *fstr = g_strdup_printf ("%" GST_FOURCC_FORMAT, GST_FOURCC_ARGS (riff_fourcc)); gst_caps_set_simple (caps, "fourcc", G_TYPE_STRING, fstr, NULL); g_free (fstr); } } else if (context->stream_headers != NULL) { gst_matroska_demux_add_stream_headers_to_caps (demux, context->stream_headers, caps); } if (context->encodings) { GstMatroskaTrackEncoding *enc; guint i; for (i = 0; i < context->encodings->len; i++) { enc = &g_array_index (context->encodings, GstMatroskaTrackEncoding, i); if (enc->type == GST_MATROSKA_ENCODING_ENCRYPTION /* encryption */ ) { GstStructure *s = gst_caps_get_structure (caps, 0); if (!gst_structure_has_name (s, "application/x-webm-enc")) { gst_structure_set (s, "original-media-type", G_TYPE_STRING, gst_structure_get_name (s), NULL); gst_structure_set (s, "encryption-algorithm", G_TYPE_STRING, gst_matroska_track_encryption_algorithm_name (enc->enc_algo), NULL); gst_structure_set (s, "encoding-scope", G_TYPE_STRING, gst_matroska_track_encoding_scope_name (enc->scope), NULL); gst_structure_set (s, "cipher-mode", G_TYPE_STRING, gst_matroska_track_encryption_cipher_mode_name (enc->enc_cipher_mode), NULL); gst_structure_set_name (s, "application/x-webm-enc"); } } } } context->caps = caps; /* tadaah! */ *dest_context = context; return ret; }
0
[]
gst-plugins-good
9181191511f9c0be6a89c98b311f49d66bd46dc3
74,880,478,367,994,630,000,000,000,000,000,000,000
954
matroskademux: Fix extraction of multichannel WavPack The old code had a couple of issues that all lead to potential memory safety bugs. - Use a constant for the Wavpack4Header size instead of using sizeof. It's written out into the data and not from the struct and who knows what special alignment/padding requirements some C compilers have. - gst_buffer_set_size() does not realloc the buffer when setting a bigger size than allocated, it only allows growing up to the maximum allocated size. Instead use a GstAdapter to collect all the blocks and take out everything at once in the end. - Check that enough data is actually available in the input and otherwise handle it an error in all cases instead of silently ignoring it. Among other things this fixes out of bounds writes because the code assumed gst_buffer_set_size() can grow the buffer and simply wrote after the end of the buffer. Thanks to Natalie Silvanovich for reporting. Fixes https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/issues/859 Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/merge_requests/903>
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) { return false; }
0
[ "CWE-476" ]
linux
ac64115a66c18c01745bbd3c47a36b124e5fd8c0
112,613,547,845,533,980,000,000,000,000,000,000,000
4
KVM: PPC: Fix oops when checking KVM_CAP_PPC_HTM The following program causes a kernel oops: #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/ioctl.h> #include <linux/kvm.h> main() { int fd = open("/dev/kvm", O_RDWR); ioctl(fd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_HTM); } This happens because when using the global KVM fd with KVM_CHECK_EXTENSION, kvm_vm_ioctl_check_extension() gets called with a NULL kvm argument, which gets dereferenced in is_kvmppc_hv_enabled(). Spotted while reading the code. Let's use the hv_enabled fallback variable, like everywhere else in this function. Fixes: 23528bb21ee2 ("KVM: PPC: Introduce KVM_CAP_PPC_HTM") Cc: [email protected] # v4.7+ Signed-off-by: Greg Kurz <[email protected]> Reviewed-by: David Gibson <[email protected]> Reviewed-by: Thomas Huth <[email protected]> Signed-off-by: Paul Mackerras <[email protected]>
void update_process_times(int user_tick) { struct task_struct *p = current; /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); run_local_timers(); rcu_sched_clock_irq(user_tick); #ifdef CONFIG_IRQ_WORK if (in_irq()) irq_work_tick(); #endif scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(); }
1
[ "CWE-200", "CWE-330" ]
linux
f227e3ec3b5cad859ad15666874405e8c1bbc1d4
33,405,367,387,359,627,000,000,000,000,000,000,000
16
random32: update the net random state on interrupt and activity This modifies the first 32 bits out of the 128 bits of a random CPU's net_rand_state on interrupt or CPU activity to complicate remote observations that could lead to guessing the network RNG's internal state. Note that depending on some network devices' interrupt rate moderation or binding, this re-seeding might happen on every packet or even almost never. In addition, with NOHZ some CPUs might not even get timer interrupts, leaving their local state rarely updated, while they are running networked processes making use of the random state. For this reason, we also perform this update in update_process_times() in order to at least update the state when there is user or system activity, since it's the only case we care about. Reported-by: Amit Klein <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Cc: Eric Dumazet <[email protected]> Cc: "Jason A. Donenfeld" <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Kees Cook <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: <[email protected]> Signed-off-by: Willy Tarreau <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
CallResult<PseudoHandle<>> JSObject::getNamedOrIndexed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropOpFlags opFlags) { if (LLVM_UNLIKELY(selfHandle->flags_.indexedStorage)) { // Note that getStringView can be satisfied without materializing the // Identifier. const auto strView = runtime->getIdentifierTable().getStringView(runtime, name); if (auto nameAsIndex = toArrayIndex(strView)) { return getComputed_RJS( selfHandle, runtime, runtime->makeHandle(HermesValue::encodeNumberValue(*nameAsIndex))); } // Here we have indexed properties but the symbol was not index-like. // Fall through to getNamed(). } return getNamed_RJS(selfHandle, runtime, name, opFlags); }
0
[ "CWE-843", "CWE-125" ]
hermes
fe52854cdf6725c2eaa9e125995da76e6ceb27da
156,171,224,441,989,490,000,000,000,000,000,000,000
21
[CVE-2020-1911] Look up HostObject computed properties on the right object in the prototype chain. Summary: The change in the hermes repository fixes the security vulnerability CVE-2020-1911. This vulnerability only affects applications which allow evaluation of uncontrolled, untrusted JavaScript code not shipped with the app, so React Native apps will generally not be affected. This revision includes a test for the bug. The test is generic JSI code, so it is included in the hermes and react-native repositories. Changelog: [Internal] Reviewed By: tmikov Differential Revision: D23322992 fbshipit-source-id: 4e88c974afe1ad33a263f9cac03e9dc98d33649a
do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, size_t min_ss_size) { struct task_struct *t = current; if (oss) { memset(oss, 0, sizeof(stack_t)); oss->ss_sp = (void __user *) t->sas_ss_sp; oss->ss_size = t->sas_ss_size; oss->ss_flags = sas_ss_flags(sp) | (current->sas_ss_flags & SS_FLAG_BITS); } if (ss) { void __user *ss_sp = ss->ss_sp; size_t ss_size = ss->ss_size; unsigned ss_flags = ss->ss_flags; int ss_mode; if (unlikely(on_sig_stack(sp))) return -EPERM; ss_mode = ss_flags & ~SS_FLAG_BITS; if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && ss_mode != 0)) return -EINVAL; if (ss_mode == SS_DISABLE) { ss_size = 0; ss_sp = NULL; } else { if (unlikely(ss_size < min_ss_size)) return -ENOMEM; } t->sas_ss_sp = (unsigned long) ss_sp; t->sas_ss_size = ss_size; t->sas_ss_flags = ss_flags; } return 0; }
0
[ "CWE-190" ]
linux
d1e7fd6462ca9fc76650fbe6ca800e35b24267da
322,723,707,172,343,580,000,000,000,000,000,000,000
41
signal: Extend exec_id to 64bits Replace the 32bit exec_id with a 64bit exec_id to make it impossible to wrap the exec_id counter. With care an attacker can cause exec_id wrap and send arbitrary signals to a newly exec'd parent. This bypasses the signal sending checks if the parent changes their credentials during exec. The severity of this problem can been seen that in my limited testing of a 32bit exec_id it can take as little as 19s to exec 65536 times. Which means that it can take as little as 14 days to wrap a 32bit exec_id. Adam Zabrocki has succeeded wrapping the self_exe_id in 7 days. Even my slower timing is in the uptime of a typical server. Which means self_exec_id is simply a speed bump today, and if exec gets noticably faster self_exec_id won't even be a speed bump. Extending self_exec_id to 64bits introduces a problem on 32bit architectures where reading self_exec_id is no longer atomic and can take two read instructions. Which means that is is possible to hit a window where the read value of exec_id does not match the written value. So with very lucky timing after this change this still remains expoiltable. I have updated the update of exec_id on exec to use WRITE_ONCE and the read of exec_id in do_notify_parent to use READ_ONCE to make it clear that there is no locking between these two locations. Link: https://lore.kernel.org/kernel-hardening/[email protected] Fixes: 2.3.23pre2 Cc: [email protected] Signed-off-by: "Eric W. Biederman" <[email protected]>
static inline unsigned long task_util(struct task_struct *p) { return READ_ONCE(p->se.avg.util_avg); }
0
[ "CWE-400", "CWE-703", "CWE-835" ]
linux
c40f7d74c741a907cfaeb73a7697081881c497d0
148,177,954,197,687,270,000,000,000,000,000,000,000
4
sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the scheduler under high loads, starting at around the v4.18 time frame, and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list manipulation. Do a (manual) revert of: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") It turns out that the list_del_leaf_cfs_rq() introduced by this commit is a surprising property that was not considered in followup commits such as: 9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list") As Vincent Guittot explains: "I think that there is a bigger problem with commit a9e7f6544b9c and cfs_rq throttling: Let take the example of the following topology TG2 --> TG1 --> root: 1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1 cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in one path because it has never been used and can't be throttled so tmp_alone_branch will point to leaf_cfs_rq_list at the end. 2) Then TG1 is throttled 3) and we add TG3 as a new child of TG1. 4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1 cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list. With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list. So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1 cfs_rq is removed from the list. Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list but tmp_alone_branch still points to TG3 cfs_rq because its throttled parent can't be enqueued when the lock is released. tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should. So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch points on another TG cfs_rq, the next TG cfs_rq that will be added, will be linked outside rq->leaf_cfs_rq_list - which is bad. In addition, we can break the ordering of the cfs_rq in rq->leaf_cfs_rq_list but this ordering is used to update and propagate the update from leaf down to root." Instead of trying to work through all these cases and trying to reproduce the very high loads that produced the lockup to begin with, simplify the code temporarily by reverting a9e7f6544b9c - which change was clearly not thought through completely. This (hopefully) gives us a kernel that doesn't lock up so people can continue to enjoy their holidays without worrying about regressions. ;-) [ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ] Analyzed-by: Xie XiuQi <[email protected]> Analyzed-by: Vincent Guittot <[email protected]> Reported-by: Zhipeng Xie <[email protected]> Reported-by: Sargun Dhillon <[email protected]> Reported-by: Xie XiuQi <[email protected]> Tested-by: Zhipeng Xie <[email protected]> Tested-by: Sargun Dhillon <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Acked-by: Vincent Guittot <[email protected]> Cc: <[email protected]> # v4.13+ Cc: Bin Li <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Thomas Gleixner <[email protected]> Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static int get_master_version_and_clock(MYSQL* mysql, Master_info* mi) { char err_buff[MAX_SLAVE_ERRMSG]; const char* errmsg= 0; int err_code= 0; int version_number=0; version_number= atoi(mysql->server_version); MYSQL_RES *master_res= 0; MYSQL_ROW master_row; DBUG_ENTER("get_master_version_and_clock"); /* Free old mi_description_event (that is needed if we are in a reconnection). */ DBUG_EXECUTE_IF("unrecognized_master_version", { version_number= 1; };); mysql_mutex_lock(&mi->data_lock); mi->set_mi_description_event(NULL); if (!my_isdigit(&my_charset_bin,*mysql->server_version)) { errmsg = "Master reported unrecognized MySQL version"; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, ER(err_code), errmsg); } else { /* Note the following switch will bug when we have MySQL branch 30 ;) */ switch (version_number) { case 0: case 1: case 2: errmsg = "Master reported unrecognized MySQL version"; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, ER(err_code), errmsg); break; case 3: mi->set_mi_description_event(new Format_description_log_event(1, mysql->server_version)); break; case 4: mi->set_mi_description_event(new Format_description_log_event(3, mysql->server_version)); break; default: /* Master is MySQL >=5.0. Give a default Format_desc event, so that we can take the early steps (like tests for "is this a 3.23 master") which we have to take before we receive the real master's Format_desc which will override this one. Note that the Format_desc we create below is garbage (it has the format of the *slave*); it's only good to help know if the master is 3.23, 4.0, etc. */ mi->set_mi_description_event(new Format_description_log_event(4, mysql->server_version)); break; } } /* This does not mean that a 5.0 slave will be able to read a 5.5 master; but as we don't know yet, we don't want to forbid this for now. If a 5.0 slave can't read a 5.5 master, this will show up when the slave can't read some events sent by the master, and there will be error messages. */ if (errmsg) { /* unlock the mutex on master info structure */ mysql_mutex_unlock(&mi->data_lock); goto err; } /* as we are here, we tried to allocate the event */ if (mi->get_mi_description_event() == NULL) { mysql_mutex_unlock(&mi->data_lock); errmsg= "default Format_description_log_event"; err_code= ER_SLAVE_CREATE_EVENT_FAILURE; sprintf(err_buff, ER(err_code), errmsg); goto err; } /* FD_q's (A) is set initially from RL's (A): FD_q.(A) := RL.(A). It's necessary to adjust FD_q.(A) at this point because in the following course FD_q is going to be dumped to RL. Generally FD_q is derived from a received FD_m (roughly FD_q := FD_m) in queue_event and the master's (A) is installed. At one step with the assignment the Relay-Log's checksum alg is set to a new value: RL.(A) := FD_q.(A). If the slave service is stopped the last time assigned RL.(A) will be passed over to the restarting service (to the current execution point). RL.A is a "codec" to verify checksum in queue_event() almost all the time the first fake Rotate event. Starting from this point IO thread will executes the following checksum warmup sequence of actions: FD_q.A := RL.A, A_m^0 := master.@@global.binlog_checksum, {queue_event(R_f): verifies(R_f, A_m^0)}, {queue_event(FD_m): verifies(FD_m, FD_m.A), dump(FD_q), rotate(RL), FD_q := FD_m, RL.A := FD_q.A)} See legends definition on MYSQL_BIN_LOG::relay_log_checksum_alg docs lines (binlog.h). In above A_m^0 - the value of master's @@binlog_checksum determined in the upcoming handshake (stored in mi->checksum_alg_before_fd). After the warm-up sequence IO gets to "normal" checksum verification mode to use RL.A in {queue_event(E_m): verifies(E_m, RL.A)} until it has received a new FD_m. */ mi->get_mi_description_event()->checksum_alg= mi->rli->relay_log.relay_log_checksum_alg; DBUG_ASSERT(mi->get_mi_description_event()->checksum_alg != BINLOG_CHECKSUM_ALG_UNDEF); DBUG_ASSERT(mi->rli->relay_log.relay_log_checksum_alg != BINLOG_CHECKSUM_ALG_UNDEF); mysql_mutex_unlock(&mi->data_lock); /* Compare the master and slave's clock. Do not die if master's clock is unavailable (very old master not supporting UNIX_TIMESTAMP()?). */ DBUG_EXECUTE_IF("dbug.before_get_UNIX_TIMESTAMP", { const char act[]= "now " "wait_for signal.get_unix_timestamp"; DBUG_ASSERT(opt_debug_sync_timeout > 0); DBUG_ASSERT(!debug_sync_set_action(current_thd, STRING_WITH_LEN(act))); };); master_res= NULL; if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT UNIX_TIMESTAMP()")) && (master_res= mysql_store_result(mysql)) && (master_row= mysql_fetch_row(master_res))) { mysql_mutex_lock(&mi->data_lock); mi->clock_diff_with_master= (long) (time((time_t*) 0) - strtoul(master_row[0], 0, 10)); mysql_mutex_unlock(&mi->data_lock); } else if (check_io_slave_killed(mi->info_thd, mi, NULL)) goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), "Get master clock failed with error: %s", mysql_error(mysql)); goto network_err; } else { mysql_mutex_lock(&mi->data_lock); mi->clock_diff_with_master= 0; /* The "most sensible" value */ mysql_mutex_unlock(&mi->data_lock); sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, " "do not trust column Seconds_Behind_Master of SHOW " "SLAVE STATUS. Error: %s (%d)", mysql_error(mysql), mysql_errno(mysql)); } if (master_res) { mysql_free_result(master_res); master_res= NULL; } /* Check that the master's server id and ours are different. Because if they are equal (which can result from a simple copy of master's datadir to slave, thus copying some my.cnf), replication will work but all events will be skipped. Do not die if SHOW VARIABLES LIKE 'SERVER_ID' fails on master (very old master?). Note: we could have put a @@SERVER_ID in the previous SELECT UNIX_TIMESTAMP() instead, but this would not have worked on 3.23 masters. */ DBUG_EXECUTE_IF("dbug.before_get_SERVER_ID", { const char act[]= "now " "wait_for signal.get_server_id"; DBUG_ASSERT(opt_debug_sync_timeout > 0); DBUG_ASSERT(!debug_sync_set_action(current_thd, STRING_WITH_LEN(act))); };); master_res= NULL; master_row= NULL; if (!mysql_real_query(mysql, STRING_WITH_LEN("SHOW VARIABLES LIKE 'SERVER_ID'")) && (master_res= mysql_store_result(mysql)) && (master_row= mysql_fetch_row(master_res))) { if ((::server_id == (mi->master_id= strtoul(master_row[1], 0, 10))) && !mi->rli->replicate_same_server_id) { errmsg= "The slave I/O thread stops because master and slave have equal \ MySQL server ids; these ids must be different for replication to work (or \ the --replicate-same-server-id option must be used on slave but this does \ not always make sense; please check the manual before using it)."; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, ER(err_code), errmsg); goto err; } } else if (mysql_errno(mysql)) { if (check_io_slave_killed(mi->info_thd, mi, NULL)) goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), "Get master SERVER_ID failed with error: %s", mysql_error(mysql)); goto network_err; } /* Fatal error */ errmsg= "The slave I/O thread stops because a fatal error is encountered \ when it try to get the value of SERVER_ID variable from master."; err_code= mysql_errno(mysql); sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); goto err; } else if (!master_row && master_res) { mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, "Unknown system variable 'SERVER_ID' on master, \ maybe it is a *VERY OLD MASTER*."); } if (master_res) { mysql_free_result(master_res); master_res= NULL; } if (mi->master_id == 0 && mi->ignore_server_ids->dynamic_ids.elements > 0) { errmsg= "Slave configured with server id filtering could not detect the master server id."; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, ER(err_code), errmsg); goto err; } /* Check that the master's global character_set_server and ours are the same. Not fatal if query fails (old master?). Note that we don't check for equality of global character_set_client and collation_connection (neither do we prevent their setting in set_var.cc). That's because from what I (Guilhem) have tested, the global values of these 2 are never used (new connections don't use them). We don't test equality of global collation_database either as it's is going to be deprecated (made read-only) in 4.1 very soon. The test is only relevant if master < 5.0.3 (we'll test only if it's older than the 5 branch; < 5.0.3 was alpha...), as >= 5.0.3 master stores charset info in each binlog event. We don't do it for 3.23 because masters <3.23.50 hang on SELECT @@unknown_var (BUG#7965 - see changelog of 3.23.50). So finally we test only if master is 4.x. */ /* redundant with rest of code but safer against later additions */ if (*mysql->server_version == '3') goto err; if (*mysql->server_version == '4') { master_res= NULL; if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT @@GLOBAL.COLLATION_SERVER")) && (master_res= mysql_store_result(mysql)) && (master_row= mysql_fetch_row(master_res))) { if (strcmp(master_row[0], global_system_variables.collation_server->name)) { errmsg= "The slave I/O thread stops because master and slave have \ different values for the COLLATION_SERVER global variable. The values must \ be equal for the Statement-format replication to work"; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, ER(err_code), errmsg); goto err; } } else if (check_io_slave_killed(mi->info_thd, mi, NULL)) goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), "Get master COLLATION_SERVER failed with error: %s", mysql_error(mysql)); goto network_err; } else if (mysql_errno(mysql) != ER_UNKNOWN_SYSTEM_VARIABLE) { /* Fatal error */ errmsg= "The slave I/O thread stops because a fatal error is encountered \ when it try to get the value of COLLATION_SERVER global variable from master."; err_code= mysql_errno(mysql); sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); goto err; } else mi->report(WARNING_LEVEL, ER_UNKNOWN_SYSTEM_VARIABLE, "Unknown system variable 'COLLATION_SERVER' on master, \ maybe it is a *VERY OLD MASTER*. *NOTE*: slave may experience \ inconsistency if replicated data deals with collation."); if (master_res) { mysql_free_result(master_res); master_res= NULL; } } /* Perform analogous check for time zone. Theoretically we also should perform check here to verify that SYSTEM time zones are the same on slave and master, but we can't rely on value of @@system_time_zone variable (it is time zone abbreviation) since it determined at start time and so could differ for slave and master even if they are really in the same system time zone. So we are omiting this check and just relying on documentation. Also according to Monty there are many users who are using replication between servers in various time zones. Hence such check will broke everything for them. (And now everything will work for them because by default both their master and slave will have 'SYSTEM' time zone). This check is only necessary for 4.x masters (and < 5.0.4 masters but those were alpha). */ if (*mysql->server_version == '4') { master_res= NULL; if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT @@GLOBAL.TIME_ZONE")) && (master_res= mysql_store_result(mysql)) && (master_row= mysql_fetch_row(master_res))) { if (strcmp(master_row[0], global_system_variables.time_zone->get_name()->ptr())) { errmsg= "The slave I/O thread stops because master and slave have \ different values for the TIME_ZONE global variable. The values must \ be equal for the Statement-format replication to work"; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, ER(err_code), errmsg); goto err; } } else if (check_io_slave_killed(mi->info_thd, mi, NULL)) goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), "Get master TIME_ZONE failed with error: %s", mysql_error(mysql)); goto network_err; } else { /* Fatal error */ errmsg= "The slave I/O thread stops because a fatal error is encountered \ when it try to get the value of TIME_ZONE global variable from master."; err_code= mysql_errno(mysql); sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); goto err; } if (master_res) { mysql_free_result(master_res); master_res= NULL; } } if (mi->heartbeat_period != 0.0) { char llbuf[22]; const char query_format[]= "SET @master_heartbeat_period= %s"; char query[sizeof(query_format) - 2 + sizeof(llbuf)]; /* the period is an ulonglong of nano-secs. */ llstr((ulonglong) (mi->heartbeat_period*1000000000UL), llbuf); sprintf(query, query_format, llbuf); if (mysql_real_query(mysql, query, strlen(query))) { if (check_io_slave_killed(mi->info_thd, mi, NULL)) goto slave_killed_err; if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), "SET @master_heartbeat_period to master failed with error: %s", mysql_error(mysql)); mysql_free_result(mysql_store_result(mysql)); goto network_err; } else { /* Fatal error */ errmsg= "The slave I/O thread stops because a fatal error is encountered " " when it tries to SET @master_heartbeat_period on master."; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); mysql_free_result(mysql_store_result(mysql)); goto err; } } mysql_free_result(mysql_store_result(mysql)); } /* Querying if master is capable to checksum and notifying it about own CRC-awareness. The master's side instant value of @@global.binlog_checksum is stored in the dump thread's uservar area as well as cached locally to become known in consensus by master and slave. */ if (DBUG_EVALUATE_IF("simulate_slave_unaware_checksum", 0, 1)) { int rc; const char query[]= "SET @master_binlog_checksum= @@global.binlog_checksum"; master_res= NULL; mi->checksum_alg_before_fd= BINLOG_CHECKSUM_ALG_UNDEF; //initially undefined /* @c checksum_alg_before_fd is queried from master in this block. If master is old checksum-unaware the value stays undefined. Once the first FD will be received its alg descriptor will replace the being queried one. */ rc= mysql_real_query(mysql, query, strlen(query)); if (rc != 0) { mi->checksum_alg_before_fd= BINLOG_CHECKSUM_ALG_OFF; if (check_io_slave_killed(mi->info_thd, mi, NULL)) goto slave_killed_err; if (mysql_errno(mysql) == ER_UNKNOWN_SYSTEM_VARIABLE) { // this is tolerable as OM -> NS is supported mi->report(WARNING_LEVEL, mysql_errno(mysql), "Notifying master by %s failed with " "error: %s", query, mysql_error(mysql)); } else { if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), "Notifying master by %s failed with " "error: %s", query, mysql_error(mysql)); mysql_free_result(mysql_store_result(mysql)); goto network_err; } else { errmsg= "The slave I/O thread stops because a fatal error is encountered " "when it tried to SET @master_binlog_checksum on master."; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); mysql_free_result(mysql_store_result(mysql)); goto err; } } } else { mysql_free_result(mysql_store_result(mysql)); if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT @master_binlog_checksum")) && (master_res= mysql_store_result(mysql)) && (master_row= mysql_fetch_row(master_res)) && (master_row[0] != NULL)) { mi->checksum_alg_before_fd= (uint8) find_type(master_row[0], &binlog_checksum_typelib, 1) - 1; DBUG_EXECUTE_IF("undefined_algorithm_on_slave", mi->checksum_alg_before_fd = BINLOG_CHECKSUM_ALG_UNDEF;); if(mi->checksum_alg_before_fd == BINLOG_CHECKSUM_ALG_UNDEF) { errmsg= "The slave I/O thread was stopped because a fatal error is encountered " "The checksum algorithm used by master is unknown to slave."; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); mysql_free_result(mysql_store_result(mysql)); goto err; } // valid outcome is either of DBUG_ASSERT(mi->checksum_alg_before_fd == BINLOG_CHECKSUM_ALG_OFF || mi->checksum_alg_before_fd == BINLOG_CHECKSUM_ALG_CRC32); } else if (check_io_slave_killed(mi->info_thd, mi, NULL)) goto slave_killed_err; else if (is_network_error(mysql_errno(mysql))) { mi->report(WARNING_LEVEL, mysql_errno(mysql), "Get master BINLOG_CHECKSUM failed with error: %s", mysql_error(mysql)); goto network_err; } else { errmsg= "The slave I/O thread stops because a fatal error is encountered " "when it tried to SELECT @master_binlog_checksum."; err_code= ER_SLAVE_FATAL_ERROR; sprintf(err_buff, "%s Error: %s", errmsg, mysql_error(mysql)); mysql_free_result(mysql_store_result(mysql)); goto err; } } if (master_res) { mysql_free_result(master_res); master_res= NULL; } } else mi->checksum_alg_before_fd= BINLOG_CHECKSUM_ALG_OFF; if (DBUG_EVALUATE_IF("simulate_slave_unaware_gtid", 0, 1)) { switch (io_thread_init_command(mi, "SELECT @@GLOBAL.GTID_MODE", ER_UNKNOWN_SYSTEM_VARIABLE, &master_res, &master_row)) { case COMMAND_STATUS_ERROR: DBUG_RETURN(2); case COMMAND_STATUS_ALLOWED_ERROR: // master is old and does not have @@GLOBAL.GTID_MODE mi->master_gtid_mode= 0; break; case COMMAND_STATUS_OK: int typelib_index= find_type(master_row[0], &gtid_mode_typelib, 1); mysql_free_result(master_res); if (typelib_index == 0) { mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, "The slave IO thread stops because the master has " "an unknown @@GLOBAL.GTID_MODE."); DBUG_RETURN(1); } mi->master_gtid_mode= typelib_index - 1; break; } if (mi->master_gtid_mode > gtid_mode + 1 || gtid_mode > mi->master_gtid_mode + 1) { mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, "The slave IO thread stops because the master has " "@@GLOBAL.GTID_MODE %s and this server has " "@@GLOBAL.GTID_MODE %s", gtid_mode_names[mi->master_gtid_mode], gtid_mode_names[gtid_mode]); DBUG_RETURN(1); } if (mi->is_auto_position() && mi->master_gtid_mode != 3) { mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, "The slave IO thread stops because the master has " "@@GLOBAL.GTID_MODE %s and we are trying to connect " "using MASTER_AUTO_POSITION.", gtid_mode_names[mi->master_gtid_mode]); DBUG_RETURN(1); } } err: if (errmsg) { if (master_res) mysql_free_result(master_res); DBUG_ASSERT(err_code != 0); mi->report(ERROR_LEVEL, err_code, "%s", err_buff); DBUG_RETURN(1); } DBUG_RETURN(0); network_err: if (master_res) mysql_free_result(master_res); DBUG_RETURN(2); slave_killed_err: if (master_res) mysql_free_result(master_res); DBUG_RETURN(2); }
0
[ "CWE-284", "CWE-295" ]
mysql-server
3bd5589e1a5a93f9c224badf983cd65c45215390
63,090,636,556,568,560,000,000,000,000,000,000,000
599
WL#6791 : Redefine client --ssl option to imply enforced encryption # Changed the meaning of the --ssl=1 option of all client binaries to mean force ssl, not try ssl and fail over to eunecrypted # Added a new MYSQL_OPT_SSL_ENFORCE mysql_options() option to specify that an ssl connection is required. # Added a new macro SSL_SET_OPTIONS() to the client SSL handling headers that sets all the relevant SSL options at once. # Revamped all of the current native clients to use the new macro # Removed some Windows line endings. # Added proper handling of the new option into the ssl helper headers. # If SSL is mandatory assume that the media is secure enough for the sha256 plugin to do unencrypted password exchange even before establishing a connection. # Set the default ssl cipher to DHE-RSA-AES256-SHA if none is specified. # updated test cases that require a non-default cipher to spawn a mysql command line tool binary since mysqltest has no support for specifying ciphers. # updated the replication slave connection code to always enforce SSL if any of the SSL config options is present. # test cases added and updated. # added a mysql_get_option() API to return mysql_options() values. Used the new API inside the sha256 plugin. # Fixed compilation warnings because of unused variables. # Fixed test failures (mysql_ssl and bug13115401) # Fixed whitespace issues. # Fully implemented the mysql_get_option() function. # Added a test case for mysql_get_option() # fixed some trailing whitespace issues # fixed some uint/int warnings in mysql_client_test.c # removed shared memory option from non-windows get_options tests # moved MYSQL_OPT_LOCAL_INFILE to the uint options
UserId PostgreSqlStorage::validateUser(const QString &user, const QString &password) { QSqlQuery query(logDb()); query.prepare(queryString("select_authuser")); query.bindValue(":username", user); query.bindValue(":password", cryptedPassword(password)); safeExec(query); if (query.first()) { return query.value(0).toInt(); } else { return 0; } }
0
[ "CWE-89" ]
quassel
aa1008be162cb27da938cce93ba533f54d228869
177,573,777,897,282,250,000,000,000,000,000,000,000
15
Fixing security vulnerability with Qt 4.8.5+ and PostgreSQL. Properly detects whether Qt performs slash escaping in SQL queries or not, and then configures PostgreSQL accordingly. This bug was a introduced due to a bugfix in Qt 4.8.5 disables slash escaping when binding queries: https://bugreports.qt-project.org/browse/QTBUG-30076 Thanks to brot and Tucos. [Fixes #1244]
static int aesni_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr) { EVP_AES_HMAC_SHA1 *key = data(ctx); switch (type) { case EVP_CTRL_AEAD_SET_MAC_KEY: { unsigned int i; unsigned char hmac_key[64]; memset(hmac_key, 0, sizeof(hmac_key)); if (arg > (int)sizeof(hmac_key)) { SHA1_Init(&key->head); SHA1_Update(&key->head, ptr, arg); SHA1_Final(hmac_key, &key->head); } else { memcpy(hmac_key, ptr, arg); } for (i = 0; i < sizeof(hmac_key); i++) hmac_key[i] ^= 0x36; /* ipad */ SHA1_Init(&key->head); SHA1_Update(&key->head, hmac_key, sizeof(hmac_key)); for (i = 0; i < sizeof(hmac_key); i++) hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */ SHA1_Init(&key->tail); SHA1_Update(&key->tail, hmac_key, sizeof(hmac_key)); OPENSSL_cleanse(hmac_key, sizeof(hmac_key)); return 1; } case EVP_CTRL_AEAD_TLS1_AAD: { unsigned char *p = ptr; unsigned int len; if (arg != EVP_AEAD_TLS1_AAD_LEN) return -1; len = p[arg - 2] << 8 | p[arg - 1]; if (ctx->encrypt) { key->payload_length = len; if ((key->aux.tls_ver = p[arg - 4] << 8 | p[arg - 3]) >= TLS1_1_VERSION) { len -= AES_BLOCK_SIZE; p[arg - 2] = len >> 8; p[arg - 1] = len; } key->md = key->head; SHA1_Update(&key->md, p, arg); return (int)(((len + SHA_DIGEST_LENGTH + AES_BLOCK_SIZE) & -AES_BLOCK_SIZE) - len); } else { memcpy(key->aux.tls_aad, ptr, arg); key->payload_length = arg; return SHA_DIGEST_LENGTH; } } default: return -1; } }
0
[ "CWE-310" ]
openssl
4159f311671cf3bac03815e5de44681eb758304a
80,015,426,840,685,070,000,000,000,000,000,000,000
70
Check that we have enough padding characters. Reviewed-by: Emilia Käsper <[email protected]> CVE-2016-2107 MR: #2572
void ion_buffer_destroy(struct ion_buffer *buffer) { if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_dma(buffer->heap, buffer); buffer->heap->ops->free(buffer); vfree(buffer->pages); kfree(buffer); }
0
[ "CWE-416", "CWE-284" ]
linux
9590232bb4f4cc824f3425a6e1349afbe6d6d2b7
291,650,511,169,005,970,000,000,000,000,000,000,000
9
staging/android/ion : fix a race condition in the ion driver There is a use-after-free problem in the ion driver. This is caused by a race condition in the ion_ioctl() function. A handle has ref count of 1 and two tasks on different cpus calls ION_IOC_FREE simultaneously. cpu 0 cpu 1 ------------------------------------------------------- ion_handle_get_by_id() (ref == 2) ion_handle_get_by_id() (ref == 3) ion_free() (ref == 2) ion_handle_put() (ref == 1) ion_free() (ref == 0 so ion_handle_destroy() is called and the handle is freed.) ion_handle_put() is called and it decreases the slub's next free pointer The problem is detected as an unaligned access in the spin lock functions since it uses load exclusive instruction. In some cases it corrupts the slub's free pointer which causes a mis-aligned access to the next free pointer.(kmalloc returns a pointer like ffffc0745b4580aa). And it causes lots of other hard-to-debug problems. This symptom is caused since the first member in the ion_handle structure is the reference count and the ion driver decrements the reference after it has been freed. To fix this problem client->lock mutex is extended to protect all the codes that uses the handle. Signed-off-by: Eun Taik Lee <[email protected]> Reviewed-by: Laura Abbott <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { struct rtable *rt; struct flowi4 fl4; const struct iphdr *iph = (const struct iphdr *) skb->data; int oif = skb->dev->ifindex; u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; rt = (struct rtable *) dst; __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0); __ip_do_redirect(rt, skb, &fl4, true); }
0
[ "CWE-17" ]
linux
df4d92549f23e1c037e83323aff58a21b3de7fe0
18,690,591,146,979,014,000,000,000,000,000,000,000
15
ipv4: try to cache dst_entries which would cause a redirect Not caching dst_entries which cause redirects could be exploited by hosts on the same subnet, causing a severe DoS attack. This effect aggravated since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()"). Lookups causing redirects will be allocated with DST_NOCACHE set which will force dst_release to free them via RCU. Unfortunately waiting for RCU grace period just takes too long, we can end up with >1M dst_entries waiting to be released and the system will run OOM. rcuos threads cannot catch up under high softirq load. Attaching the flag to emit a redirect later on to the specific skb allows us to cache those dst_entries thus reducing the pressure on allocation and deallocation. This issue was discovered by Marcelo Leitner. Cc: Julian Anastasov <[email protected]> Signed-off-by: Marcelo Leitner <[email protected]> Signed-off-by: Florian Westphal <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: Julian Anastasov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void MirrorJob::SetNewerThan(const char *f) { struct timespec ts; if(parse_datetime(&ts,f,0)) { newer_than=ts.tv_sec; return; } struct stat st; if(stat(f,&st)==-1) { perror(f); return; } newer_than=st.st_mtime; }
0
[ "CWE-20", "CWE-401" ]
lftp
a27e07d90a4608ceaf928b1babb27d4d803e1992
58,925,930,516,285,440,000,000,000,000,000,000,000
16
mirror: prepend ./ to rm and chmod arguments to avoid URL recognition (fix #452)
static int ext4_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { handle_t *handle; struct inode *inode; int err, retries = 0; retry: handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); if (IS_DIRSYNC(dir)) ext4_handle_sync(handle); inode = ext4_new_inode (handle, dir, mode); err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); err = ext4_add_nondir(handle, dentry, inode); } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; return err; }
0
[ "CWE-20" ]
linux-2.6
e6b8bc09ba2075cd91fbffefcd2778b1a00bd76f
243,142,511,089,158,330,000,000,000,000,000,000,000
30
ext4: Add sanity check to make_indexed_dir Make sure the rec_len field in the '..' entry is sane, lest we overrun the directory block and cause a kernel oops on a purposefully corrupted filesystem. Thanks to Sami Liedes for reporting this bug. http://bugzilla.kernel.org/show_bug.cgi?id=12430 Signed-off-by: "Theodore Ts'o" <[email protected]> Cc: [email protected]
int PKCS7_add_attribute(PKCS7_SIGNER_INFO *p7si, int nid, int atrtype, void *value) { return (add_attribute(&(p7si->unauth_attr), nid, atrtype, value)); }
0
[]
openssl
c0334c2c92dd1bc3ad8138ba6e74006c3631b0f9
39,313,430,913,168,270,000,000,000,000,000,000,000
5
PKCS#7: avoid NULL pointer dereferences with missing content In PKCS#7, the ASN.1 content component is optional. This typically applies to inner content (detached signatures), however we must also handle unexpected missing outer content correctly. This patch only addresses functions reachable from parsing, decryption and verification, and functions otherwise associated with reading potentially untrusted data. Correcting all low-level API calls requires further work. CVE-2015-0289 Thanks to Michal Zalewski (Google) for reporting this issue. Reviewed-by: Steve Henson <[email protected]>
nth_rtt(struct delegpt_addr* result_list, size_t num_results, size_t n) { int rtt_band; size_t i; int* rtt_list, *rtt_index; if(num_results < 1 || n >= num_results) { return -1; } rtt_list = calloc(num_results, sizeof(int)); if(!rtt_list) { log_err("malloc failure: allocating rtt_list"); return -1; } rtt_index = rtt_list; for(i=0; i<num_results && result_list; i++) { if(result_list->sel_rtt != -1) { *rtt_index = result_list->sel_rtt; rtt_index++; } result_list=result_list->next_result; } qsort(rtt_list, num_results, sizeof(*rtt_list), rtt_compare); log_assert(n > 0); rtt_band = rtt_list[n-1]; free(rtt_list); return rtt_band; }
0
[ "CWE-400" ]
unbound
ba0f382eee814e56900a535778d13206b86b6d49
95,492,616,393,351,850,000,000,000,000,000,000,000
32
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming query into a large number of queries directed to a target. - CVE-2020-12663 Malformed answers from upstream name servers can be used to make Unbound unresponsive.
composite_line(int y, int start, CompositeDeepScanLine::Data * _Data, vector<const char *> & names, const vector<vector< vector<float *> > > & pointers, const vector<unsigned int> & total_sizes, const vector<unsigned int> & num_sources ) { vector<float> output_pixel(names.size()); //the pixel we'll output to vector<const float *> inputs(names.size()); DeepCompositing d; // fallback compositing engine DeepCompositing * comp= _Data->_comp ? _Data->_comp : &d; int pixel = (y-start)*(_Data->_dataWindow.max.x+1-_Data->_dataWindow.min.x); for(int x=_Data->_dataWindow.min.x;x<=_Data->_dataWindow.max.x;x++) { // set inputs[] to point to the first sample of the first part of each channel // if there's a zback, set all channel independently... if(_Data->_zback) { for(size_t channel=0;channel<names.size();channel++) { inputs[channel]=pointers[0][channel][pixel]; } }else{ // otherwise, set 0 and 1 to point to Z inputs[0]=pointers[0][0][pixel]; inputs[1]=pointers[0][0][pixel]; for(size_t channel=2;channel<names.size();channel++) { inputs[channel]=pointers[0][channel][pixel]; } } comp->composite_pixel(&output_pixel[0], &inputs[0], &names[0], static_cast<int>(names.size()), total_sizes[pixel], num_sources[pixel] ); size_t channel_number=0; // // write out composited value into internal frame buffer // for(FrameBuffer::Iterator it = _Data->_outputFrameBuffer.begin();it !=_Data->_outputFrameBuffer.end();it++) { float value = output_pixel[ _Data->_bufferMap[channel_number] ]; // value to write intptr_t base = reinterpret_cast<intptr_t>(it.slice().base); // cast to half float if necessary if(it.slice().type==OPENEXR_IMF_INTERNAL_NAMESPACE::FLOAT) { float* ptr = reinterpret_cast<float*>(base + y*it.slice().yStride + x*it.slice().xStride); *ptr = value; } else if(it.slice().type==HALF) { half* ptr = reinterpret_cast<half*>(base + y*it.slice().yStride + x*it.slice().xStride); *ptr = half(value); } channel_number++; } pixel++; }// next pixel on row }
0
[ "CWE-787" ]
openexr
7d0ef6617f5b5622276458cc5a21d8b859ca7c5b
45,851,358,346,062,390,000,000,000,000,000,000,000
83
enforce xSampling/ySampling==1 in CompositeDeepScanLine (#1209) Signed-off-by: Peter Hillman <[email protected]>
String* Item_user_var_as_out_param::val_str(String *str) { DBUG_ASSERT(0); return 0; }
0
[ "CWE-120" ]
server
eca207c46293bc72dd8d0d5622153fab4d3fccf1
3,391,680,406,339,697,600,000,000,000,000,000,000
5
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size. Precision should be kept below DECIMAL_MAX_SCALE for computations. It can be bigger in Item_decimal. I'd fix this too but it changes the existing behaviour so problemmatic to ix.
handle_connection(GArray *servers, int net, SERVER *serve, CLIENT *client) { int sock_flags_old; int sock_flags_new; if(serve->max_connections > 0 && g_hash_table_size(children) >= serve->max_connections) { msg(LOG_INFO, "Max connections reached"); goto handle_connection_out; } if((sock_flags_old = fcntl(net, F_GETFL, 0)) == -1) { err("fcntl F_GETFL"); } sock_flags_new = sock_flags_old & ~O_NONBLOCK; if (sock_flags_new != sock_flags_old && fcntl(net, F_SETFL, sock_flags_new) == -1) { err("fcntl F_SETFL ~O_NONBLOCK"); } if(!client) { client = g_new0(CLIENT, 1); client->server=serve; client->exportsize=OFFT_MAX; client->net=net; client->transactionlogfd = -1; } if (set_peername(net, client)) { goto handle_connection_out; } if (!authorized_client(client)) { msg(LOG_INFO, "Unauthorized client"); goto handle_connection_out; } msg(LOG_INFO, "Authorized client"); if (!dontfork) { pid_t pid; int i; sigset_t newset; sigset_t oldset; sigemptyset(&newset); sigaddset(&newset, SIGCHLD); sigaddset(&newset, SIGTERM); sigprocmask(SIG_BLOCK, &newset, &oldset); if ((pid = fork()) < 0) { msg(LOG_INFO, "Could not fork (%s)", strerror(errno)); sigprocmask(SIG_SETMASK, &oldset, NULL); goto handle_connection_out; } if (pid > 0) { /* parent */ pid_t *pidp; pidp = g_malloc(sizeof(pid_t)); *pidp = pid; g_hash_table_insert(children, pidp, pidp); sigprocmask(SIG_SETMASK, &oldset, NULL); goto handle_connection_out; } /* child */ signal(SIGCHLD, SIG_DFL); signal(SIGTERM, SIG_DFL); signal(SIGHUP, SIG_DFL); sigprocmask(SIG_SETMASK, &oldset, NULL); g_hash_table_destroy(children); children = NULL; for(i=0;i<servers->len;i++) { serve=&g_array_index(servers, SERVER, i); close(serve->socket); } /* FALSE does not free the actual data. This is required, because the client has a direct reference into that data, and otherwise we get a segfault... */ g_array_free(servers, FALSE); for(i=0;i<modernsocks->len;i++) { close(g_array_index(modernsocks, int, i)); } g_array_free(modernsocks, TRUE); } msg(LOG_INFO, "Starting to serve"); serveconnection(client); exit(EXIT_SUCCESS); handle_connection_out: g_free(client); close(net); }
0
[ "CWE-399", "CWE-310" ]
nbd
741495cb08503fd32a9d22648e63b64390c601f4
255,204,032,700,084,000,000,000,000,000,000,000,000
91
nbd-server: handle modern-style negotiation in a child process Previously, the modern style negotiation was carried out in the root server (listener) process before forking the actual client handler. This made it possible for a malfunctioning or evil client to terminate the root process simply by querying a non-existent export or aborting in the middle of the negotation process (caused SIGPIPE in the server). This commit moves the negotiation process to the child to keep the root process up and running no matter what happens during the negotiation. See http://sourceforge.net/mailarchive/message.php?msg_id=30410146 Signed-off-by: Tuomas Räsänen <[email protected]>
static int sc_open_snapd_tool(const char *tool_name) { // +1 is for the case where the link is exactly PATH_MAX long but we also // want to store the terminating '\0'. The readlink system call doesn't add // terminating null, but our initialization of buf handles this for us. char buf[PATH_MAX + 1] = { 0 }; if (readlink("/proc/self/exe", buf, sizeof buf) < 0) { die("cannot readlink /proc/self/exe"); } if (buf[0] != '/') { // this shouldn't happen, but make sure have absolute path die("readlink /proc/self/exe returned relative path"); } char *dir_name = dirname(buf); int dir_fd SC_CLEANUP(sc_cleanup_close) = 1; dir_fd = open(dir_name, O_PATH | O_DIRECTORY | O_NOFOLLOW | O_CLOEXEC); if (dir_fd < 0) { die("cannot open path %s", dir_name); } int tool_fd = -1; tool_fd = openat(dir_fd, tool_name, O_PATH | O_NOFOLLOW | O_CLOEXEC); if (tool_fd < 0) { die("cannot open path %s/%s", dir_name, tool_name); } debug("opened %s executable as file descriptor %d", tool_name, tool_fd); return tool_fd; }
1
[ "CWE-94" ]
snapd
54e71e7750f73a28f5a47fe04dd058360e24c0e9
31,291,114,531,416,730,000,000,000,000,000,000,000
26
cmd/libsnap-confine-private: Defend against hardlink attacks When snap-confine goes to execute other helper binaries (snap-update-ns etc) via sc_open_snapd_tool(), these other binaries are located relative to the currently executing snap-confine process via /proc/self/exe. Since it is possible for regular users to hardlink setuid binaries when fs.protected_hardlinks is 0, it is possible to hardlink snap-confine to another location and then place an attacker controlled binary in place of snap-update-ns and have this executed as root by snap-confine. Protect against this by checking that snap-confine is located either within /usr/lib/snapd or within the core or snapd snaps as expected. This resolves CVE-2021-44730. Signed-off-by: Alex Murray <[email protected]>
TEST_F(Http1ServerConnectionImplTest, ConnectRequestNoContentLength) { initialize(); InSequence sequence; NiceMock<MockRequestDecoder> decoder; EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); TestRequestHeaderMapImpl expected_headers{ {":authority", "host:80"}, {":method", "CONNECT"}, }; EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_headers), false)); Buffer::OwnedImpl buffer("CONNECT host:80 HTTP/1.1\r\n\r\n"); auto status = codec_->dispatch(buffer); Buffer::OwnedImpl expected_data("abcd"); Buffer::OwnedImpl connect_payload("abcd"); EXPECT_CALL(decoder, decodeData(BufferEqual(&expected_data), false)); status = codec_->dispatch(connect_payload); EXPECT_TRUE(status.ok()); }
0
[ "CWE-770" ]
envoy
7ca28ff7d46454ae930e193d97b7d08156b1ba59
247,865,990,892,991,820,000,000,000,000,000,000,000
21
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145) Signed-off-by: antonio <[email protected]>
SPICE_GNUC_VISIBLE void spice_server_char_device_wakeup(SpiceCharDeviceInstance* sin) { if (!sin->st) { spice_warning("no RedCharDevice attached to instance %p", sin); return; } sin->st->wakeup(); }
0
[]
spice
ca5bbc5692e052159bce1a75f55dc60b36078749
77,184,349,024,624,150,000,000,000,000,000,000,000
8
With OpenSSL 1.1: Disable client-initiated renegotiation. Fixes issue #49 Fixes BZ#1904459 Signed-off-by: Julien Ropé <[email protected]> Reported-by: BlackKD Acked-by: Frediano Ziglio <[email protected]>
void PsdImage::setComment(const std::string& /*comment*/) { // not supported throw(Error(kerInvalidSettingForImage, "Image comment", "Photoshop")); }
0
[ "CWE-125" ]
exiv2
68966932510213b5656fcf433ab6d7e26f48e23b
301,145,635,459,936,700,000,000,000,000,000,000,000
5
PSD: Use Safe::add for preventing overflows in PSD files
static void io_sq_thread_finish(struct io_ring_ctx *ctx) { struct io_sq_data *sqd = ctx->sq_data; if (sqd) { complete(&sqd->startup); if (sqd->thread) { wait_for_completion(&ctx->sq_thread_comp); io_sq_thread_park(sqd); } mutex_lock(&sqd->ctx_lock); list_del(&ctx->sqd_list); io_sqd_update_thread_idle(sqd); mutex_unlock(&sqd->ctx_lock); if (sqd->thread) io_sq_thread_unpark(sqd); io_put_sq_data(sqd); ctx->sq_data = NULL; }
0
[ "CWE-667" ]
linux
3ebba796fa251d042be42b929a2d916ee5c34a49
7,132,504,189,233,719,000,000,000,000,000,000,000
23
io_uring: ensure that SQPOLL thread is started for exit If we create it in a disabled state because IORING_SETUP_R_DISABLED is set on ring creation, we need to ensure that we've kicked the thread if we're exiting before it's been explicitly disabled. Otherwise we can run into a deadlock where exit is waiting go park the SQPOLL thread, but the SQPOLL thread itself is waiting to get a signal to start. That results in the below trace of both tasks hung, waiting on each other: INFO: task syz-executor458:8401 blocked for more than 143 seconds. Not tainted 5.11.0-next-20210226-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread_park fs/io_uring.c:7115 [inline] io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103 io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745 __io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840 io_uring_files_cancel include/linux/io_uring.h:47 [inline] do_exit+0x299/0x2a60 kernel/exit.c:780 do_group_exit+0x125/0x310 kernel/exit.c:922 __do_sys_exit_group kernel/exit.c:933 [inline] __se_sys_exit_group kernel/exit.c:931 [inline] __x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x43e899 RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7 RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899 RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000 RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000 R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0 R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001 INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds. task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004 Call Trace: context_switch kernel/sched/core.c:4324 [inline] __schedule+0x90c/0x21a0 kernel/sched/core.c:5075 schedule+0xcf/0x270 kernel/sched/core.c:5154 schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x168/0x270 kernel/sched/completion.c:138 io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds. Reported-by: [email protected] Signed-off-by: Jens Axboe <[email protected]>
uint32_t ConnectionImpl::getHeadersSize() { return current_header_field_.size() + current_header_value_.size() + headersOrTrailers().byteSize(); }
0
[ "CWE-770" ]
envoy
7ca28ff7d46454ae930e193d97b7d08156b1ba59
161,733,430,145,834,190,000,000,000,000,000,000,000
4
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145) Signed-off-by: antonio <[email protected]>
mrb_init_gc(mrb_state *mrb) { struct RClass *gc; gc = mrb_define_module(mrb, "GC"); mrb_define_class_method(mrb, gc, "start", gc_start, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "enable", gc_enable, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "disable", gc_disable, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "interval_ratio", gc_interval_ratio_get, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "interval_ratio=", gc_interval_ratio_set, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, gc, "step_ratio", gc_step_ratio_get, MRB_ARGS_NONE()); mrb_define_class_method(mrb, gc, "step_ratio=", gc_step_ratio_set, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, gc, "generational_mode=", gc_generational_mode_set, MRB_ARGS_REQ(1)); mrb_define_class_method(mrb, gc, "generational_mode", gc_generational_mode_get, MRB_ARGS_NONE()); #ifdef GC_TEST #ifdef GC_DEBUG mrb_define_class_method(mrb, gc, "test", gc_test, MRB_ARGS_NONE()); #endif #endif }
0
[ "CWE-416" ]
mruby
5c114c91d4ff31859fcd84cf8bf349b737b90d99
123,104,976,648,730,440,000,000,000,000,000,000,000
21
Clear unused stack region that may refer freed objects; fix #3596
static void r_bin_mdmp_free_pe64_bin(void *pe_bin_) { struct Pe64_r_bin_mdmp_pe_bin *pe_bin = pe_bin_; if (pe_bin) { sdb_free (pe_bin->bin->kv); Pe64_r_bin_pe_free (pe_bin->bin); R_FREE (pe_bin); } }
0
[ "CWE-400", "CWE-703" ]
radare2
27fe8031782d3a06c3998eaa94354867864f9f1b
308,279,724,617,153,240,000,000,000,000,000,000,000
8
Fix DoS in the minidump parser ##crash * Reported by lazymio via huntr.dev * Reproducer: mdmp-dos
bool CWebServer::IsIdxForUser(const WebEmSession *pSession, const int Idx) { if (pSession->rights == 2) return true; if (pSession->rights == 0) return false; //viewer //User int iUser = FindUser(pSession->username.c_str()); if ((iUser < 0) || (iUser >= (int)m_users.size())) return false; if (m_users[iUser].TotSensors == 0) return true; // all sensors std::vector<std::vector<std::string> > result = m_sql.safe_query("SELECT DeviceRowID FROM SharedDevices WHERE (SharedUserID == '%d') AND (DeviceRowID == '%d')", m_users[iUser].ID, Idx); return (!result.empty()); }
0
[ "CWE-89" ]
domoticz
ee70db46f81afa582c96b887b73bcd2a86feda00
173,826,019,874,775,400,000,000,000,000,000,000,000
17
Fixed possible SQL Injection Vulnerability (Thanks to Fabio Carretto!)
static bool vsock_in_bound_table(struct vsock_sock *vsk) { bool ret; spin_lock_bh(&vsock_table_lock); ret = __vsock_in_bound_table(vsk); spin_unlock_bh(&vsock_table_lock); return ret; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
114,988,402,329,857,270,000,000,000,000,000,000,000
10
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void jpc_qmfb_split_col(jpc_fix_t *a, int numrows, int stride, int parity) { int bufsize = JPC_CEILDIVPOW2(numrows, 1); #if !defined(HAVE_VLA) jpc_fix_t splitbuf[QMFB_SPLITBUFSIZE]; #else jpc_fix_t splitbuf[bufsize]; #endif jpc_fix_t *buf = splitbuf; register jpc_fix_t *srcptr; register jpc_fix_t *dstptr; register int n; register int m; int hstartcol; #if !defined(HAVE_VLA) /* Get a buffer. */ if (bufsize > QMFB_SPLITBUFSIZE) { if (!(buf = jas_malloc(bufsize * sizeof(jpc_fix_t)))) { /* We have no choice but to commit suicide in this case. */ abort(); } } #endif if (numrows >= 2) { hstartcol = (numrows + 1 - parity) >> 1; m = (parity) ? hstartcol : (numrows - hstartcol); /* Save the samples destined for the highpass channel. */ n = m; dstptr = buf; srcptr = &a[(1 - parity) * stride]; while (n-- > 0) { *dstptr = *srcptr; ++dstptr; srcptr += stride << 1; } /* Copy the appropriate samples into the lowpass channel. */ dstptr = &a[(1 - parity) * stride]; srcptr = &a[(2 - parity) * stride]; n = numrows - m - (!parity); while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; srcptr += stride << 1; } /* Copy the saved samples into the highpass channel. */ dstptr = &a[hstartcol * stride]; srcptr = buf; n = m; while (n-- > 0) { *dstptr = *srcptr; dstptr += stride; ++srcptr; } } #if !defined(HAVE_VLA) /* If the split buffer was allocated on the heap, free this memory. */ if (buf != splitbuf) { jas_free(buf); } #endif }
1
[ "CWE-119" ]
jasper
0d64bde2b3ba7e1450710d540136a8ce4199ef30
112,564,715,432,232,300,000,000,000,000,000,000,000
67
CVE-2014-8158
static int lstat_cache_matchlen(struct cache_def *cache, const char *name, int len, int *ret_flags, int track_flags, int prefix_len_stat_func) { int match_len, last_slash, last_slash_dir, previous_slash; int save_flags, ret; struct stat st; if (cache->track_flags != track_flags || cache->prefix_len_stat_func != prefix_len_stat_func) { /* * As a safeguard rule we clear the cache if the * values of track_flags and/or prefix_len_stat_func * does not match with the last supplied values. */ reset_lstat_cache(cache); cache->track_flags = track_flags; cache->prefix_len_stat_func = prefix_len_stat_func; match_len = last_slash = 0; } else { /* * Check to see if we have a match from the cache for * the 2 "excluding" path types. */ match_len = last_slash = longest_path_match(name, len, cache->path.buf, cache->path.len, &previous_slash); *ret_flags = cache->flags & track_flags & (FL_NOENT|FL_SYMLINK); if (!(track_flags & FL_FULLPATH) && match_len == len) match_len = last_slash = previous_slash; if (*ret_flags && match_len == cache->path.len) return match_len; /* * If we now have match_len > 0, we would know that * the matched part will always be a directory. * * Also, if we are tracking directories and 'name' is * a substring of the cache on a path component basis, * we can return immediately. */ *ret_flags = track_flags & FL_DIR; if (*ret_flags && len == match_len) return match_len; } /* * Okay, no match from the cache so far, so now we have to * check the rest of the path components. */ *ret_flags = FL_DIR; last_slash_dir = last_slash; if (len > cache->path.len) strbuf_grow(&cache->path, len - cache->path.len); while (match_len < len) { do { cache->path.buf[match_len] = name[match_len]; match_len++; } while (match_len < len && name[match_len] != '/'); if (match_len >= len && !(track_flags & FL_FULLPATH)) break; last_slash = match_len; cache->path.buf[last_slash] = '\0'; if (last_slash <= prefix_len_stat_func) ret = stat(cache->path.buf, &st); else ret = lstat(cache->path.buf, &st); if (ret) { *ret_flags = FL_LSTATERR; if (errno == ENOENT) *ret_flags |= FL_NOENT; } else if (S_ISDIR(st.st_mode)) { last_slash_dir = last_slash; continue; } else if (S_ISLNK(st.st_mode)) { *ret_flags = FL_SYMLINK; } else { *ret_flags = FL_ERR; } break; } /* * At the end update the cache. Note that max 3 different * path types, FL_NOENT, FL_SYMLINK and FL_DIR, can be cached * for the moment! */ save_flags = *ret_flags & track_flags & (FL_NOENT|FL_SYMLINK); if (save_flags && last_slash > 0) { cache->path.buf[last_slash] = '\0'; cache->path.len = last_slash; cache->flags = save_flags; } else if ((track_flags & FL_DIR) && last_slash_dir > 0) { /* * We have a separate test for the directory case, * since it could be that we have found a symlink or a * non-existing directory and the track_flags says * that we cannot cache this fact, so the cache would * then have been left empty in this case. * * But if we are allowed to track real directories, we * can still cache the path components before the last * one (the found symlink or non-existing component). */ cache->path.buf[last_slash_dir] = '\0'; cache->path.len = last_slash_dir; cache->flags = FL_DIR; } else { reset_lstat_cache(cache); } return match_len; }
0
[ "CWE-59", "CWE-61" ]
git
684dd4c2b414bcf648505e74498a608f28de4592
335,248,407,679,593,300,000,000,000,000,000,000,000
116
checkout: fix bug that makes checkout follow symlinks in leading path Before checking out a file, we have to confirm that all of its leading components are real existing directories. And to reduce the number of lstat() calls in this process, we cache the last leading path known to contain only directories. However, when a path collision occurs (e.g. when checking out case-sensitive files in case-insensitive file systems), a cached path might have its file type changed on disk, leaving the cache on an invalid state. Normally, this doesn't bring any bad consequences as we usually check out files in index order, and therefore, by the time the cached path becomes outdated, we no longer need it anyway (because all files in that directory would have already been written). But, there are some users of the checkout machinery that do not always follow the index order. In particular: checkout-index writes the paths in the same order that they appear on the CLI (or stdin); and the delayed checkout feature -- used when a long-running filter process replies with "status=delayed" -- postpones the checkout of some entries, thus modifying the checkout order. When we have to check out an out-of-order entry and the lstat() cache is invalid (due to a previous path collision), checkout_entry() may end up using the invalid data and thrusting that the leading components are real directories when, in reality, they are not. In the best case scenario, where the directory was replaced by a regular file, the user will get an error: "fatal: unable to create file 'foo/bar': Not a directory". But if the directory was replaced by a symlink, checkout could actually end up following the symlink and writing the file at a wrong place, even outside the repository. Since delayed checkout is affected by this bug, it could be used by an attacker to write arbitrary files during the clone of a maliciously crafted repository. Some candidate solutions considered were to disable the lstat() cache during unordered checkouts or sort the entries before passing them to the checkout machinery. But both ideas include some performance penalty and they don't future-proof the code against new unordered use cases. Instead, we now manually reset the lstat cache whenever we successfully remove a directory. Note: We are not even checking whether the directory was the same as the lstat cache points to because we might face a scenario where the paths refer to the same location but differ due to case folding, precomposed UTF-8 issues, or the presence of `..` components in the path. Two regression tests, with case-collisions and utf8-collisions, are also added for both checkout-index and delayed checkout. Note: to make the previously mentioned clone attack unfeasible, it would be sufficient to reset the lstat cache only after the remove_subtree() call inside checkout_entry(). This is the place where we would remove a directory whose path collides with the path of another entry that we are currently trying to check out (possibly a symlink). However, in the interest of a thorough fix that does not leave Git open to similar-but-not-identical attack vectors, we decided to intercept all `rmdir()` calls in one fell swoop. This addresses CVE-2021-21300. Co-authored-by: Johannes Schindelin <[email protected]> Signed-off-by: Matheus Tavares <[email protected]>
void CairoImage::setImage (cairo_surface_t *image) { if (this->image) cairo_surface_destroy (this->image); this->image = cairo_surface_reference (image); }
0
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
318,451,778,393,966,180,000,000,000,000,000,000,000
5
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
jas_matrix_t *jas_matrix_create(int numrows, int numcols) { jas_matrix_t *matrix; int i; if (numrows < 0 || numcols < 0) { return 0; } if (!(matrix = jas_malloc(sizeof(jas_matrix_t)))) { return 0; } matrix->flags_ = 0; matrix->numrows_ = numrows; matrix->numcols_ = numcols; matrix->rows_ = 0; matrix->maxrows_ = numrows; matrix->data_ = 0; matrix->datasize_ = numrows * numcols; if (matrix->maxrows_ > 0) { if (!(matrix->rows_ = jas_alloc2(matrix->maxrows_, sizeof(jas_seqent_t *)))) { jas_matrix_destroy(matrix); return 0; } } if (matrix->datasize_ > 0) { if (!(matrix->data_ = jas_alloc2(matrix->datasize_, sizeof(jas_seqent_t)))) { jas_matrix_destroy(matrix); return 0; } } for (i = 0; i < numrows; ++i) { matrix->rows_[i] = &matrix->data_[i * matrix->numcols_]; } for (i = 0; i < matrix->datasize_; ++i) { matrix->data_[i] = 0; } matrix->xstart_ = 0; matrix->ystart_ = 0; matrix->xend_ = matrix->numcols_; matrix->yend_ = matrix->numrows_; return matrix; }
1
[ "CWE-190" ]
jasper
988f8365f7d8ad8073b6786e433d34c553ecf568
50,172,536,044,950,610,000,000,000,000,000,000,000
51
Fixed an integer overflow problem.
string t_go_generator::type_to_enum(t_type* type) { type = get_true_type(type); if (type->is_base_type()) { t_base_type::t_base tbase = ((t_base_type*)type)->get_base(); switch (tbase) { case t_base_type::TYPE_VOID: throw "NO T_VOID CONSTRUCT"; case t_base_type::TYPE_STRING: /* this is wrong, binary is still a string type internally if (((t_base_type*)type)->is_binary()) { return "thrift.BINARY"; } */ return "thrift.STRING"; case t_base_type::TYPE_BOOL: return "thrift.BOOL"; case t_base_type::TYPE_I8: return "thrift.BYTE"; case t_base_type::TYPE_I16: return "thrift.I16"; case t_base_type::TYPE_I32: return "thrift.I32"; case t_base_type::TYPE_I64: return "thrift.I64"; case t_base_type::TYPE_DOUBLE: return "thrift.DOUBLE"; } } else if (type->is_enum()) { return "thrift.I32"; } else if (type->is_struct() || type->is_xception()) { return "thrift.STRUCT"; } else if (type->is_map()) { return "thrift.MAP"; } else if (type->is_set()) { return "thrift.SET"; } else if (type->is_list()) { return "thrift.LIST"; } throw "INVALID TYPE IN type_to_enum: " + type->get_name(); }
0
[ "CWE-77" ]
thrift
2007783e874d524a46b818598a45078448ecc53e
1,337,754,148,378,799,000,000,000,000,000,000,000
50
THRIFT-3893 Command injection in format_go_output Client: Go Patch: Jens Geyer
real_uptime(const routerinfo_t *router, time_t now) { if (now < router->cache_info.published_on) return router->uptime; else return router->uptime + (now - router->cache_info.published_on); }
0
[]
tor
02e05bd74dbec614397b696cfcda6525562a4675
219,920,705,486,932,340,000,000,000,000,000,000,000
7
When examining descriptors as a dirserver, reject ones with bad versions This is an extra fix for bug 21278: it ensures that these descriptors and platforms will never be listed in a legit consensus.
log_message(__attribute__((unused)) const struct mg_connection *conn, const char *message) { puts(message); return 1; }
0
[ "CWE-787" ]
rsyslog
89955b0bcb1ff105e1374aad7e0e993faa6a038f
274,843,733,577,849,200,000,000,000,000,000,000,000
5
net bugfix: potential buffer overrun
stop_redo_ins(void) { block_redo = FALSE; }
0
[ "CWE-78" ]
vim
53575521406739cf20bbe4e384d88e7dca11f040
41,354,604,167,981,983,000,000,000,000,000,000,000
4
patch 8.1.1365: source command doesn't check for the sandbox Problem: Source command doesn't check for the sandbox. (Armin Razmjou) Solution: Check for the sandbox when sourcing a file.
TEST_P(Http2FloodMitigationTest, PriorityIdleStream) { beginSession(); floodServer(Http2Frame::makePriorityFrame(0, 1), "http2.inbound_priority_frames_flood"); }
0
[ "CWE-400", "CWE-703" ]
envoy
afc39bea36fd436e54262f150c009e8d72db5014
314,708,865,129,407,770,000,000,000,000,000,000,000
5
Track byteSize of HeaderMap internally. Introduces a cached byte size updated internally in HeaderMap. The value is stored as an optional, and is cleared whenever a non-const pointer or reference to a HeaderEntry is accessed. The cached value can be set with refreshByteSize() which performs an iteration over the HeaderMap to sum the size of each key and value in the HeaderMap. Signed-off-by: Asra Ali <[email protected]>
cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct cpuacct *ca = cgroup_ca(cgrp); free_percpu(ca->cpuusage); kfree(ca); }
0
[]
linux-2.6
8f1bc385cfbab474db6c27b5af1e439614f3025c
138,153,645,374,871,920,000,000,000,000,000,000,000
7
sched: fair: weight calculations In order to level the hierarchy, we need to calculate load based on the root view. That is, each task's load is in the same unit. A / \ B 1 / \ 2 3 To compute 1's load we do: weight(1) -------------- rq_weight(A) To compute 2's load we do: weight(2) weight(B) ------------ * ----------- rq_weight(B) rw_weight(A) This yields load fractions in comparable units. The consequence is that it changes virtual time. We used to have: time_{i} vtime_{i} = ------------ weight_{i} vtime = \Sum vtime_{i} = time / rq_weight. But with the new way of load calculation we get that vtime equals time. Signed-off-by: Peter Zijlstra <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
irc_server_get_prefix_char_index (struct t_irc_server *server, char prefix_char) { const char *prefix_chars; char *pos; if (server) { prefix_chars = irc_server_get_prefix_chars (server); pos = strchr (prefix_chars, prefix_char); if (pos) return pos - prefix_chars; } return -1; }
0
[ "CWE-120", "CWE-787" ]
weechat
40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f
160,381,135,832,606,100,000,000,000,000,000,000,000
16
irc: fix crash when a new message 005 is received with longer nick prefixes Thanks to Stuart Nevans Locke for reporting the issue.
void MainWindow::changeDeinterlacer(bool checked, const char* method) { if (checked) { MLT.videoWidget()->setProperty("deinterlace_method", method); if (MLT.consumer()) { MLT.consumer()->set("deinterlace_method", method); MLT.refreshConsumer(); } } Settings.setPlayerDeinterlacer(method); }
0
[ "CWE-89", "CWE-327", "CWE-295" ]
shotcut
f008adc039642307f6ee3378d378cdb842e52c1d
117,686,082,234,929,670,000,000,000,000,000,000,000
11
fix upgrade check is not using TLS correctly
static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier, ARMPACKey *key, bool data) { ARMMMUIdx mmu_idx = arm_stage1_mmu_idx(env); ARMVAParameters param = aa64_va_parameters(env, ptr, mmu_idx, data); uint64_t pac, ext_ptr, ext, test; int bot_bit, top_bit; /* If tagged pointers are in use, use ptr<55>, otherwise ptr<63>. */ if (param.tbi) { ext = sextract64(ptr, 55, 1); } else { ext = sextract64(ptr, 63, 1); } /* Build a pointer with known good extension bits. */ top_bit = 64 - 8 * param.tbi; bot_bit = 64 - param.tsz; ext_ptr = deposit64(ptr, bot_bit, top_bit - bot_bit, ext); pac = pauth_computepac(ext_ptr, modifier, *key); /* * Check if the ptr has good extension bits and corrupt the * pointer authentication code if not. */ test = sextract64(ptr, bot_bit, top_bit - bot_bit); if (test != 0 && test != -1) { pac ^= MAKE_64BIT_MASK(top_bit - 1, 1); } /* * Preserve the determination between upper and lower at bit 55, * and insert pointer authentication code. */ if (param.tbi) { ptr &= ~MAKE_64BIT_MASK(bot_bit, 55 - bot_bit + 1); pac &= MAKE_64BIT_MASK(bot_bit, 54 - bot_bit + 1); } else { ptr &= MAKE_64BIT_MASK(0, bot_bit); pac &= ~(MAKE_64BIT_MASK(55, 1) | MAKE_64BIT_MASK(0, bot_bit)); } ext &= MAKE_64BIT_MASK(55, 1); return pac | ext | ptr; }
0
[]
qemu
de0b1bae6461f67243282555475f88b2384a1eb9
297,222,922,252,852,300,000,000,000,000,000,000,000
45
target/arm: Fix PAuth sbox functions In the PAC computation, sbox was applied over wrong bits. As this is a 4-bit sbox, bit index should be incremented by 4 instead of 16. Test vector from QARMA paper (https://eprint.iacr.org/2016/444.pdf) was used to verify one computation of the pauth_computepac() function which uses sbox2. Launchpad: https://bugs.launchpad.net/bugs/1859713 Reviewed-by: Richard Henderson <[email protected]> Signed-off-by: Vincent DEHORS <[email protected]> Signed-off-by: Adrien GRASSEIN <[email protected]> Message-id: [email protected] Reviewed-by: Peter Maydell <[email protected]> Signed-off-by: Peter Maydell <[email protected]>
static int dio_complete(struct dio *dio, loff_t offset, int ret) { ssize_t transferred = 0; /* * AIO submission can race with bio completion to get here while * expecting to have the last io completed by bio completion. * In that case -EIOCBQUEUED is in fact not an error we want * to preserve through this call. */ if (ret == -EIOCBQUEUED) ret = 0; if (dio->result) { transferred = dio->result; /* Check for short read case */ if ((dio->rw == READ) && ((offset + transferred) > dio->i_size)) transferred = dio->i_size - offset; } if (dio->end_io && dio->result) dio->end_io(dio->iocb, offset, transferred, dio->map_bh.b_private); if (dio->lock_type == DIO_LOCKING) /* lockdep: non-owner release */ up_read_non_owner(&dio->inode->i_alloc_sem); if (ret == 0) ret = dio->page_errors; if (ret == 0) ret = dio->io_error; if (ret == 0) ret = transferred; return ret; }
0
[]
linux-2.6
848c4dd5153c7a0de55470ce99a8e13a63b4703f
121,016,058,585,493,910,000,000,000,000,000,000,000
37
dio: zero struct dio with kzalloc instead of manually This patch uses kzalloc to zero all of struct dio rather than manually trying to track which fields we rely on being zero. It passed aio+dio stress testing and some bug regression testing on ext3. This patch was introduced by Linus in the conversation that lead up to Badari's minimal fix to manually zero .map_bh.b_state in commit: 6a648fa72161d1f6468dabd96c5d3c0db04f598a It makes the code a bit smaller. Maybe a couple fewer cachelines to load, if we're lucky: text data bss dec hex filename 3285925 568506 1304616 5159047 4eb887 vmlinux 3285797 568506 1304616 5158919 4eb807 vmlinux.patched I was unable to measure a stable difference in the number of cpu cycles spent in blockdev_direct_IO() when pushing aio+dio 256K reads at ~340MB/s. So the resulting intent of the patch isn't a performance gain but to avoid exposing ourselves to the risk of finding another field like .map_bh.b_state where we rely on zeroing but don't enforce it in the code. Signed-off-by: Zach Brown <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void openssl_init(RedLinkInfo *link) { unsigned long f4 = RSA_F4; link->tiTicketing.bn = BN_new(); if (!link->tiTicketing.bn) { red_dump_openssl_errors(); spice_error("OpenSSL BIGNUMS alloc failed"); } BN_set_word(link->tiTicketing.bn, f4); }
0
[]
spice
ca5bbc5692e052159bce1a75f55dc60b36078749
53,470,443,655,271,810,000,000,000,000,000,000,000
12
With OpenSSL 1.1: Disable client-initiated renegotiation. Fixes issue #49 Fixes BZ#1904459 Signed-off-by: Julien Ropé <[email protected]> Reported-by: BlackKD Acked-by: Frediano Ziglio <[email protected]>
static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { int subprog, target_insn; target_insn = *insn_idx + insn->imm + 1; subprog = find_subprog(env, target_insn); if (subprog < 0) { verbose(env, "verifier bug. No program starts at insn %d\n", target_insn); return -EFAULT; } return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); }
0
[ "CWE-125" ]
bpf
049c4e13714ecbca567b4d5f6d563f05d431c80e
278,157,303,789,884,900,000,000,000,000,000,000,000
15
bpf: Fix alu32 const subreg bound tracking on bitwise operations Fix a bug in the verifier's scalar32_min_max_*() functions which leads to incorrect tracking of 32 bit bounds for the simulation of and/or/xor bitops. When both the src & dst subreg is a known constant, then the assumption is that scalar_min_max_*() will take care to update bounds correctly. However, this is not the case, for example, consider a register R2 which has a tnum of 0xffffffff00000000, meaning, lower 32 bits are known constant and in this case of value 0x00000001. R2 is then and'ed with a register R3 which is a 64 bit known constant, here, 0x100000002. What can be seen in line '10:' is that 32 bit bounds reach an invalid state where {u,s}32_min_value > {u,s}32_max_value. The reason is scalar32_min_max_*() delegates 32 bit bounds updates to scalar_min_max_*(), however, that really only takes place when both the 64 bit src & dst register is a known constant. Given scalar32_min_max_*() is intended to be designed as closely as possible to scalar_min_max_*(), update the 32 bit bounds in this situation through __mark_reg32_known() which will set all {u,s}32_{min,max}_value to the correct constant, which is 0x00000000 after the fix (given 0x00000001 & 0x00000002 in 32 bit space). This is possible given var32_off already holds the final value as dst_reg->var_off is updated before calling scalar32_min_max_*(). Before fix, invalid tracking of R2: [...] 9: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,smin_value=-9223372036854775807 (0x8000000000000001),smax_value=9223372032559808513 (0x7fffffff00000001),umin_value=1,umax_value=0xffffffff00000001,var_off=(0x1; 0xffffffff00000000),s32_min_value=1,s32_max_value=1,u32_min_value=1,u32_max_value=1) R3_w=inv4294967298 R10=fp0 9: (5f) r2 &= r3 10: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,smin_value=0,smax_value=4294967296 (0x100000000),umin_value=0,umax_value=0x100000000,var_off=(0x0; 0x100000000),s32_min_value=1,s32_max_value=0,u32_min_value=1,u32_max_value=0) R3_w=inv4294967298 R10=fp0 [...] After fix, correct tracking of R2: [...] 9: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,smin_value=-9223372036854775807 (0x8000000000000001),smax_value=9223372032559808513 (0x7fffffff00000001),umin_value=1,umax_value=0xffffffff00000001,var_off=(0x1; 0xffffffff00000000),s32_min_value=1,s32_max_value=1,u32_min_value=1,u32_max_value=1) R3_w=inv4294967298 R10=fp0 9: (5f) r2 &= r3 10: R0_w=inv1337 R1=ctx(id=0,off=0,imm=0) R2_w=inv(id=0,smin_value=0,smax_value=4294967296 (0x100000000),umin_value=0,umax_value=0x100000000,var_off=(0x0; 0x100000000),s32_min_value=0,s32_max_value=0,u32_min_value=0,u32_max_value=0) R3_w=inv4294967298 R10=fp0 [...] Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking") Fixes: 2921c90d4718 ("bpf: Fix a verifier failure with xor") Reported-by: Manfred Paul (@_manfp) Reported-by: Thadeu Lima de Souza Cascardo <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Reviewed-by: John Fastabend <[email protected]> Acked-by: Alexei Starovoitov <[email protected]>
GF_Box *name_box_new() { ISOM_DECL_BOX_ALLOC(GF_NameBox, GF_ISOM_BOX_TYPE_NAME); return (GF_Box *)tmp; }
0
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
185,430,293,651,278,340,000,000,000,000,000,000,000
5
fixed #1587
int CJSON_CDECL main(void) { UNITY_BEGIN(); RUN_TEST(cjson_array_foreach_should_loop_over_arrays); RUN_TEST(cjson_array_foreach_should_not_dereference_null_pointer); RUN_TEST(cjson_get_object_item_should_get_object_items); RUN_TEST(cjson_get_object_item_case_sensitive_should_get_object_items); RUN_TEST(typecheck_functions_should_check_type); RUN_TEST(cjson_should_not_parse_to_deeply_nested_jsons); RUN_TEST(cjson_set_number_value_should_set_numbers); RUN_TEST(cjson_detach_item_via_pointer_should_detach_items); RUN_TEST(cjson_replace_item_via_pointer_should_replace_items); RUN_TEST(cjson_replace_item_in_object_should_preserve_name); RUN_TEST(cjson_functions_shouldnt_crash_with_null_pointers); RUN_TEST(ensure_should_fail_on_failed_realloc); RUN_TEST(skip_utf8_bom_should_skip_bom); RUN_TEST(skip_utf8_bom_should_not_skip_bom_if_not_at_beginning); RUN_TEST(cjson_get_string_value_should_get_a_string); RUN_TEST(cjson_create_string_reference_should_create_a_string_reference); RUN_TEST(cjson_create_object_reference_should_create_an_object_reference); RUN_TEST(cjson_create_array_reference_should_create_an_array_reference); RUN_TEST(cjson_add_item_to_object_should_not_use_after_free_when_string_is_aliased); return UNITY_END(); }
1
[ "CWE-754", "CWE-787" ]
cJSON
be749d7efa7c9021da746e685bd6dec79f9dd99b
319,360,317,052,336,300,000,000,000,000,000,000,000
26
Fix crash of cJSON_GetObjectItemCaseSensitive when calling it on arrays
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val) { hwaddr pa; pa = vq->vring.used + offsetof(VRingUsed, idx); stw_phys(&address_space_memory, pa, val); }
0
[ "CWE-94" ]
qemu
cc45995294b92d95319b4782750a3580cabdbc0c
193,322,799,884,319,150,000,000,000,000,000,000,000
6
virtio: out-of-bounds buffer write on invalid state load CVE-2013-4151 QEMU 1.0 out-of-bounds buffer write in virtio_load@hw/virtio/virtio.c So we have this code since way back when: num = qemu_get_be32(f); for (i = 0; i < num; i++) { vdev->vq[i].vring.num = qemu_get_be32(f); array of vqs has size VIRTIO_PCI_QUEUE_MAX, so on invalid input this will write beyond end of buffer. Signed-off-by: Michael S. Tsirkin <[email protected]> Reviewed-by: Michael Roth <[email protected]> Signed-off-by: Juan Quintela <[email protected]>
printer_notify_hf_index(int field) { int result = -1; switch(field) { case PRINTER_NOTIFY_SERVER_NAME: result = hf_servername; break; case PRINTER_NOTIFY_PRINTER_NAME: result = hf_printername; break; case PRINTER_NOTIFY_SHARE_NAME: result = hf_sharename; break; case PRINTER_NOTIFY_PORT_NAME: result = hf_portname; break; case PRINTER_NOTIFY_DRIVER_NAME: result = hf_drivername; break; case PRINTER_NOTIFY_COMMENT: result = hf_printercomment; break; case PRINTER_NOTIFY_LOCATION: result = hf_printerlocation; break; case PRINTER_NOTIFY_SEPFILE: result = hf_sepfile; break; case PRINTER_NOTIFY_PRINT_PROCESSOR: result = hf_printprocessor; break; case PRINTER_NOTIFY_PARAMETERS: result = hf_parameters; break; case PRINTER_NOTIFY_DATATYPE: result = hf_parameters; break; } return result; }
0
[ "CWE-399" ]
wireshark
b4d16b4495b732888e12baf5b8a7e9bf2665e22b
326,223,743,599,785,830,000,000,000,000,000,000,000
42
SPOOLSS: Try to avoid an infinite loop. Use tvb_reported_length_remaining in dissect_spoolss_uint16uni. Make sure our offset always increments in dissect_spoolss_keybuffer. Change-Id: I7017c9685bb2fa27161d80a03b8fca4ef630e793 Reviewed-on: https://code.wireshark.org/review/14687 Reviewed-by: Gerald Combs <[email protected]> Petri-Dish: Gerald Combs <[email protected]> Tested-by: Petri Dish Buildbot <[email protected]> Reviewed-by: Michael Mann <[email protected]>