func
stringlengths
0
484k
target
int64
0
1
cwe
sequencelengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
routerlist_insert_old(routerlist_t *rl, routerinfo_t *ri) { { /* XXXX remove this code if it slows us down. */ routerinfo_t *ri_generated = router_get_my_routerinfo(); tor_assert(ri_generated != ri); } tor_assert(ri->cache_info.routerlist_index == -1); if (should_cache_old_descriptors() && ri->purpose == ROUTER_PURPOSE_GENERAL && !sdmap_get(rl->desc_digest_map, ri->cache_info.signed_descriptor_digest)) { signed_descriptor_t *sd = signed_descriptor_from_routerinfo(ri); sdmap_set(rl->desc_digest_map, sd->signed_descriptor_digest, sd); smartlist_add(rl->old_routers, sd); sd->routerlist_index = smartlist_len(rl->old_routers)-1; if (!tor_digest_is_zero(sd->extra_info_digest)) sdmap_set(rl->desc_by_eid_map, sd->extra_info_digest, sd); } else { routerinfo_free(ri); } #ifdef DEBUG_ROUTERLIST routerlist_assert_ok(rl); #endif }
0
[ "CWE-399" ]
tor
308f6dad20675c42b29862f4269ad1fbfb00dc9a
38,252,064,961,915,465,000,000,000,000,000,000,000
26
Mitigate a side-channel leak of which relays Tor chooses for a circuit Tor's and OpenSSL's current design guarantee that there are other leaks, but this one is likely to be more easily exploitable, and is easy to fix.
static void irda_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; self = instance; pr_debug("%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { dev_kfree_skb(skb); return; } dev_kfree_skb(skb); // Should be ??? skb_queue_tail(&sk->sk_receive_queue, skb); /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; /* Find out what the largest chunk of data that we can transmit is */ switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { net_err_ratelimited("%s: max_sdu_size must be 0\n", __func__); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { net_err_ratelimited("%s: max_sdu_size cannot be 0\n", __func__); return; } self->max_data_size = max_sdu_size; break; default: self->max_data_size = irttp_get_max_seg_size(self->tsap); } pr_debug("%s(), max_data_size=%d\n", __func__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); /* We are now connected! */ sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); }
0
[]
net
79462ad02e861803b3840cc782248c7359451cd9
30,193,918,944,917,280,000,000,000,000,000,000,000
58
net: add validation for the socket syscall protocol argument 郭永刚 reported that one could simply crash the kernel as root by using a simple program: int socket_fd; struct sockaddr_in addr; addr.sin_port = 0; addr.sin_addr.s_addr = INADDR_ANY; addr.sin_family = 10; socket_fd = socket(10,3,0x40000000); connect(socket_fd , &addr,16); AF_INET, AF_INET6 sockets actually only support 8-bit protocol identifiers. inet_sock's skc_protocol field thus is sized accordingly, thus larger protocol identifiers simply cut off the higher bits and store a zero in the protocol fields. This could lead to e.g. NULL function pointer because as a result of the cut off inet_num is zero and we call down to inet_autobind, which is NULL for raw sockets. kernel: Call Trace: kernel: [<ffffffff816db90e>] ? inet_autobind+0x2e/0x70 kernel: [<ffffffff816db9a4>] inet_dgram_connect+0x54/0x80 kernel: [<ffffffff81645069>] SYSC_connect+0xd9/0x110 kernel: [<ffffffff810ac51b>] ? ptrace_notify+0x5b/0x80 kernel: [<ffffffff810236d8>] ? syscall_trace_enter_phase2+0x108/0x200 kernel: [<ffffffff81645e0e>] SyS_connect+0xe/0x10 kernel: [<ffffffff81779515>] tracesys_phase2+0x84/0x89 I found no particular commit which introduced this problem. CVE: CVE-2015-8543 Cc: Cong Wang <[email protected]> Reported-by: 郭永刚 <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void Scanner::try_lex_string_in_code(char quote) { // We need to lex string literals in code blocks because they may contain closing // brace symbol that would otherwise be erroneously lexed as a real closing brace. // // However, single quote in Rust may be either the beginning of a char literal as in // '\u{1F600}', or a standalone one as in 'label. In the latter case trying to lex a // generic string literal will consume a fragment of the file until the next single // quote (if any) and result in either a spurios parse error, or incorrect generated // code. Therefore in Rust we try to lex a char literal, or else consume the quote. if (globopts->lang != LANG_RUST || quote != '\'') { lex_string(quote); return; } // Rust spec (literals): https://doc.rust-lang.org/reference/tokens.html#literals // Rust spec (input encoding): https://doc.rust-lang.org/reference/input-format.html #line 3452 "src/parse/lex.cc" { unsigned char yych; static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; if ((lim - cur) < 5) { if (!fill(5)) { error("unexpected end of input"); exit(1); } } yych = (unsigned char)*(mar = cur); if (yych <= 0xDF) { if (yych <= '\\') { if (yych <= '[') goto yy520; goto yy522; } else { if (yych <= 0x7F) goto yy520; if (yych >= 0xC2) goto yy523; } } else { if (yych <= 0xF0) { if (yych <= 0xE0) goto yy524; if (yych <= 0xEF) goto yy525; goto yy526; } else { if (yych <= 0xF3) goto yy527; if (yych <= 0xF4) goto yy528; } } yy519: #line 700 "../src/parse/lex.re" { return; } #line 3512 "src/parse/lex.cc" yy520: yych = (unsigned char)*++cur; if (yych == '\'') goto yy529; yy521: cur = mar; goto yy519; yy522: yych = (unsigned char)*++cur; if (yych <= 'm') { if (yych <= '\'') { if (yych == '"') goto yy520; if (yych <= '&') goto yy521; goto yy530; } else { if (yych <= '0') { if (yych <= '/') goto yy521; goto yy520; } else { if (yych == '\\') goto yy520; goto yy521; } } } else { if (yych <= 's') { if (yych <= 'n') goto yy520; if (yych == 'r') goto yy520; goto yy521; } else { if (yych <= 'u') { if (yych <= 't') goto yy520; goto yy531; } else { if (yych == 'x') goto yy532; goto yy521; } } } yy523: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy521; if (yych <= 0xBF) goto yy520; goto yy521; yy524: yych = (unsigned char)*++cur; if (yych <= 0x9F) goto yy521; if (yych <= 0xBF) goto yy523; goto yy521; yy525: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy521; if (yych <= 0xBF) goto yy523; goto yy521; yy526: yych = (unsigned char)*++cur; if (yych <= 0x8F) goto yy521; if (yych <= 0xBF) goto yy525; goto yy521; yy527: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy521; if (yych <= 0xBF) goto yy525; goto yy521; yy528: yych = (unsigned char)*++cur; if (yych <= 0x7F) goto yy521; if (yych <= 0x8F) goto yy525; goto yy521; yy529: ++cur; goto yy519; yy530: yych = (unsigned char)*++cur; if (yych == '\'') goto yy529; goto yy519; yy531: yych = (unsigned char)*++cur; if (yych == '{') goto yy533; goto yy521; yy532: yych = (unsigned char)*++cur; if (yych == '\'') goto yy521; goto yy535; yy533: yych = (unsigned char)*++cur; if (yych == '}') goto yy521; goto yy537; yy534: ++cur; if (lim <= cur) { if (!fill(1)) { error("unexpected end of input"); exit(1); } } yych = (unsigned char)*cur; yy535: if (yybm[0+yych] & 128) { goto yy534; } if (yych == '\'') goto yy529; goto yy521; yy536: ++cur; if ((lim - cur) < 2) { if (!fill(2)) { error("unexpected end of input"); exit(1); } } yych = (unsigned char)*cur; yy537: if (yych <= 'F') { if (yych <= '/') goto yy521; if (yych <= '9') goto yy536; if (yych <= '@') goto yy521; goto yy536; } else { if (yych <= 'f') { if (yych <= '`') goto yy521; goto yy536; } else { if (yych == '}') goto yy520; goto yy521; } } } #line 701 "../src/parse/lex.re" }
1
[ "CWE-787" ]
re2c
039c18949190c5de5397eba504d2c75dad2ea9ca
327,776,780,290,780,700,000,000,000,000,000,000,000
199
Emit an error when repetition lower bound exceeds upper bound. Historically this was allowed and re2c swapped the bounds. However, it most likely indicates an error in user code and there is only a single occurrence in the tests (and the test in an artificial one), so although the change is backwards incompatible there is low chance of breaking real-world code. This fixes second test case in the bug #394 "Stack overflow due to recursion in src/dfa/dead_rules.cc" (the actual fix is to limit DFA size but the test also has counted repetition with swapped bounds).
rsvg_new_text (void) { RsvgNodeText *text; text = g_new (RsvgNodeText, 1); _rsvg_node_init (&text->super, RSVG_NODE_TYPE_TEXT); text->super.draw = _rsvg_node_text_draw; text->super.set_atts = _rsvg_node_text_set_atts; text->x = text->y = text->dx = text->dy = _rsvg_css_parse_length ("0"); return &text->super; }
0
[]
librsvg
34c95743ca692ea0e44778e41a7c0a129363de84
314,835,663,861,775,260,000,000,000,000,000,000,000
10
Store node type separately in RsvgNode The node name (formerly RsvgNode:type) cannot be used to infer the sub-type of RsvgNode that we're dealing with, since for unknown elements we put type = node-name. This lead to a (potentially exploitable) crash e.g. when the element name started with "fe" which tricked the old code into considering it as a RsvgFilterPrimitive. CVE-2011-3146 https://bugzilla.gnome.org/show_bug.cgi?id=658014
compileTranslationTable (const char *tableList) { /*compile source tables into a table in memory */ int k; char mainTable[MAXSTRING]; char subTable[MAXSTRING]; int listLength; int currentListPos = 0; errorCount = 0; warningCount = 0; fileCount = 0; table = NULL; characterClasses = NULL; ruleNames = NULL; if (tableList == NULL) return NULL; if (!opcodeLengths[0]) { TranslationTableOpcode opcode; for (opcode = 0; opcode < CTO_None; opcode++) opcodeLengths[opcode] = strlen (opcodeNames[opcode]); } allocateHeader (NULL); /*Compile things that are necesary for the proper operation of liblouis or liblouisxml or liblouisutdml */ compileString ("space \\s 0"); compileString ("noback sign \\x0000 0"); compileString ("space \\x00a0 a unbreakable space"); compileString ("space \\x001b 1b escape"); compileString ("space \\xffff 123456789abcdef ENDSEGMENT"); listLength = strlen (tableList); for (k = currentListPos; k < listLength; k++) if (tableList[k] == ',') break; if (k == listLength) { /* Only one file */ strcpy (tablePath, tableList); for (k = strlen (tablePath); k >= 0; k--) if (tablePath[k] == '\\' || tablePath[k] == '/') break; strcpy (mainTable, &tablePath[k + 1]); tablePath[++k] = 0; if (!compileFile (mainTable)) goto cleanup; } else { /* Compile a list of files */ currentListPos = k + 1; strncpy (tablePath, tableList, k); tablePath[k] = 0; for (k = strlen (tablePath); k >= 0; k--) if (tablePath[k] == '\\' || tablePath[k] == '/') break; strcpy (mainTable, &tablePath[k + 1]); tablePath[++k] = 0; if (!compileFile (mainTable)) goto cleanup; while (currentListPos < listLength) { for (k = currentListPos; k < listLength; k++) if (tableList[k] == ',') break; strncpy (subTable, &tableList[currentListPos], k - currentListPos); subTable[k - currentListPos] = 0; if (!compileFile (subTable)) goto cleanup; currentListPos = k + 1; } } /*Clean up after compiling files*/ cleanup: if (characterClasses) deallocateCharacterClasses (); if (ruleNames) deallocateRuleNames (); if (warningCount) lou_logPrint ("%d warnings issued", warningCount); if (!errorCount) { setDefaults (); table->tableSize = tableSize; table->bytesUsed = tableUsed; } else { if (!(errorCount == 1 && fileCount == 1)) lou_logPrint ("%d errors found.", errorCount); if (table) free (table); table = NULL; } return (void *) table; }
1
[]
liblouis
dc97ef791a4fae9da11592c79f9f79e010596e0c
13,278,298,906,072,000,000,000,000,000,000,000,000
93
Merge branch 'table_resolver'
static void TIFFUnmapBlob(thandle_t image,tdata_t base,toff_t size) { (void) image; (void) base; (void) size; }
0
[ "CWE-125" ]
ImageMagick
803bc34ebe023f209f745baf8a112610ff77cc8c
43,348,304,741,100,760,000,000,000,000,000,000,000
6
Prevent possible buffer overflow when reading TIFF images (bug report from Shi Pu of MS509 Team)
void JBIG2Stream::discardSegment(unsigned int segNum) { for (auto it = globalSegments.begin(); it != globalSegments.end(); ++it) { if ((*it)->getSegNum() == segNum) { globalSegments.erase(it); return; } } for (auto it = segments.begin(); it != segments.end(); ++it) { if ((*it)->getSegNum() == segNum) { segments.erase(it); return; } } }
0
[ "CWE-476", "CWE-190" ]
poppler
27354e9d9696ee2bc063910a6c9a6b27c5184a52
6,294,741,209,778,948,000,000,000,000,000,000,000
15
JBIG2Stream: Fix crash on broken file https://github.com/jeffssh/CVE-2021-30860 Thanks to David Warren for the heads up
static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct stub_device *sdev = dev_get_drvdata(dev); int sockfd = 0; struct socket *socket; int rv; struct task_struct *tcp_rx = NULL; struct task_struct *tcp_tx = NULL; if (!sdev) { dev_err(dev, "sdev is null\n"); return -ENODEV; } rv = sscanf(buf, "%d", &sockfd); if (rv != 1) return -EINVAL; if (sockfd != -1) { int err; dev_info(dev, "stub up\n"); spin_lock_irq(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_AVAILABLE) { dev_err(dev, "not ready\n"); goto err; } socket = sockfd_lookup(sockfd, &err); if (!socket) { dev_err(dev, "failed to lookup sock"); goto err; } if (socket->type != SOCK_STREAM) { dev_err(dev, "Expecting SOCK_STREAM - found %d", socket->type); goto sock_err; } /* unlock and create threads and get tasks */ spin_unlock_irq(&sdev->ud.lock); tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx"); if (IS_ERR(tcp_rx)) { sockfd_put(socket); return -EINVAL; } tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx"); if (IS_ERR(tcp_tx)) { kthread_stop(tcp_rx); sockfd_put(socket); return -EINVAL; } /* get task structs now */ get_task_struct(tcp_rx); get_task_struct(tcp_tx); /* lock and update sdev->ud state */ spin_lock_irq(&sdev->ud.lock); sdev->ud.tcp_socket = socket; sdev->ud.sockfd = sockfd; sdev->ud.tcp_rx = tcp_rx; sdev->ud.tcp_tx = tcp_tx; sdev->ud.status = SDEV_ST_USED; spin_unlock_irq(&sdev->ud.lock); wake_up_process(sdev->ud.tcp_rx); wake_up_process(sdev->ud.tcp_tx); } else { dev_info(dev, "stub down\n"); spin_lock_irq(&sdev->ud.lock); if (sdev->ud.status != SDEV_ST_USED) goto err; spin_unlock_irq(&sdev->ud.lock); usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN); } return count; sock_err: sockfd_put(socket); err: spin_unlock_irq(&sdev->ud.lock); return -EINVAL; }
0
[ "CWE-362" ]
linux
9380afd6df70e24eacbdbde33afc6a3950965d22
151,624,572,692,064,030,000,000,000,000,000,000,000
93
usbip: fix stub_dev usbip_sockfd_store() races leading to gpf usbip_sockfd_store() is invoked when user requests attach (import) detach (unimport) usb device from usbip host. vhci_hcd sends import request and usbip_sockfd_store() exports the device if it is free for export. Export and unexport are governed by local state and shared state - Shared state (usbip device status, sockfd) - sockfd and Device status are used to determine if stub should be brought up or shut down. - Local state (tcp_socket, rx and tx thread task_struct ptrs) A valid tcp_socket controls rx and tx thread operations while the device is in exported state. - While the device is exported, device status is marked used and socket, sockfd, and thread pointers are valid. Export sequence (stub-up) includes validating the socket and creating receive (rx) and transmit (tx) threads to talk to the client to provide access to the exported device. rx and tx threads depends on local and shared state to be correct and in sync. Unexport (stub-down) sequence shuts the socket down and stops the rx and tx threads. Stub-down sequence relies on local and shared states to be in sync. There are races in updating the local and shared status in the current stub-up sequence resulting in crashes. These stem from starting rx and tx threads before local and global state is updated correctly to be in sync. 1. Doesn't handle kthread_create() error and saves invalid ptr in local state that drives rx and tx threads. 2. Updates tcp_socket and sockfd, starts stub_rx and stub_tx threads before updating usbip_device status to SDEV_ST_USED. This opens up a race condition between the threads and usbip_sockfd_store() stub up and down handling. Fix the above problems: - Stop using kthread_get_run() macro to create/start threads. - Create threads and get task struct reference. - Add kthread_create() failure handling and bail out. - Hold usbip_device lock to update local and shared states after creating rx and tx threads. - Update usbip_device status to SDEV_ST_USED. - Update usbip_device tcp_socket, sockfd, tcp_rx, and tcp_tx - Start threads after usbip_device (tcp_socket, sockfd, tcp_rx, tcp_tx, and status) is complete. Credit goes to syzbot and Tetsuo Handa for finding and root-causing the kthread_get_run() improper error handling problem and others. This is a hard problem to find and debug since the races aren't seen in a normal case. Fuzzing forces the race window to be small enough for the kthread_get_run() error path bug and starting threads before updating the local and shared state bug in the stub-up sequence. Tested with syzbot reproducer: - https://syzkaller.appspot.com/text?tag=ReproC&x=14801034d00000 Fixes: 9720b4bc76a83807 ("staging/usbip: convert to kthread") Cc: [email protected] Reported-by: syzbot <[email protected]> Reported-by: syzbot <[email protected]> Reported-by: syzbot <[email protected]> Reported-by: Tetsuo Handa <[email protected]> Signed-off-by: Shuah Khan <[email protected]> Link: https://lore.kernel.org/r/268a0668144d5ff36ec7d87fdfa90faf583b7ccc.1615171203.git.skhan@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num, unsigned size) { struct queued_ctx ctx; struct usb_device *udev = testdev_to_usbdev(dev); void *buf; dma_addr_t buf_dma; int i; int retval = -ENOMEM; init_completion(&ctx.complete); atomic_set(&ctx.pending, 1); /* One more than the actual value */ ctx.num = num; ctx.status = 0; buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma); if (!buf) return retval; memset(buf, 0, size); /* Allocate and init the urbs we'll queue */ ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL); if (!ctx.urbs) goto free_buf; for (i = 0; i < num; i++) { ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (!ctx.urbs[i]) goto free_urbs; usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size, unlink_queued_callback, &ctx); ctx.urbs[i]->transfer_dma = buf_dma; ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; if (usb_pipeout(ctx.urbs[i]->pipe)) { simple_fill_buf(ctx.urbs[i]); ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET; } } /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */ for (i = 0; i < num; i++) { atomic_inc(&ctx.pending); retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL); if (retval != 0) { dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n", i, retval); atomic_dec(&ctx.pending); ctx.status = retval; break; } } if (i == num) { usb_unlink_urb(ctx.urbs[num - 4]); usb_unlink_urb(ctx.urbs[num - 2]); } else { while (--i >= 0) usb_unlink_urb(ctx.urbs[i]); } if (atomic_dec_and_test(&ctx.pending)) /* The extra count */ complete(&ctx.complete); wait_for_completion(&ctx.complete); retval = ctx.status; free_urbs: for (i = 0; i < num; i++) usb_free_urb(ctx.urbs[i]); kfree(ctx.urbs); free_buf: usb_free_coherent(udev, size, buf, buf_dma); return retval; }
0
[ "CWE-476" ]
linux
7c80f9e4a588f1925b07134bb2e3689335f6c6d8
41,004,979,396,866,767,000,000,000,000,000,000,000
72
usb: usbtest: fix NULL pointer dereference If the usbtest driver encounters a device with an IN bulk endpoint but no OUT bulk endpoint, it will try to dereference a NULL pointer (out->desc.bEndpointAddress). The problem can be solved by adding a missing test. Signed-off-by: Alan Stern <[email protected]> Reported-by: Andrey Konovalov <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Signed-off-by: Felipe Balbi <[email protected]>
void passing_segment(Vertex_handle v, IT) { v->mark() = true; }
0
[ "CWE-269" ]
cgal
618b409b0fbcef7cb536a4134ae3a424ef5aae45
240,131,562,589,061,700,000,000,000,000,000,000,000
2
Fix Nef_2 and Nef_S2 IO
static double mp_vector_off(_cimg_math_parser& mp) { const unsigned int ptr = (unsigned int)mp.opcode[2] + 1, siz = (unsigned int)mp.opcode[3]; const int off = (int)_mp_arg(4); return off>=0 && off<(int)siz?mp.mem[ptr + off]:cimg::type<double>::nan();
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
323,266,361,065,079,200,000,000,000,000,000,000,000
7
Fix other issues in 'CImg<T>::load_bmp()'.
static int _get_lderrno(LDAP *ldap) { #if !HAVE_NSLDAP #if LDAP_API_VERSION > 2000 || HAVE_ORALDAP int lderr; /* New versions of OpenLDAP do it this way */ ldap_get_option(ldap, LDAP_OPT_ERROR_NUMBER, &lderr); return lderr; #else return ldap->ld_errno; #endif #else return ldap_get_lderrno(ldap, NULL, NULL); #endif }
0
[ "CWE-476" ]
php-src
49782c54994ecca2ef2a061063bd5a7079c43527
142,340,944,000,994,430,000,000,000,000,000,000,000
16
Fix bug #76248 - Malicious LDAP-Server Response causes Crash
*/ PHP_FUNCTION(date_timezone_set) { zval *object; zval *timezone_object; php_date_obj *dateobj; php_timezone_obj *tzobj; if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "OO", &object, date_ce_date, &timezone_object, date_ce_timezone) == FAILURE) { RETURN_FALSE; } dateobj = (php_date_obj *) zend_object_store_get_object(object TSRMLS_CC); DATE_CHECK_INITIALIZED(dateobj->time, DateTime); tzobj = (php_timezone_obj *) zend_object_store_get_object(timezone_object TSRMLS_CC); switch (tzobj->type) { case TIMELIB_ZONETYPE_OFFSET: timelib_set_timezone_from_offset(dateobj->time, tzobj->tzi.utc_offset); break; case TIMELIB_ZONETYPE_ABBR: timelib_set_timezone_from_abbr(dateobj->time, tzobj->tzi.z); break; case TIMELIB_ZONETYPE_ID: timelib_set_timezone(dateobj->time, tzobj->tzi.tz); break; } timelib_unixtime2local(dateobj->time, dateobj->time->sse); RETURN_ZVAL(object, 1, 0);
0
[]
php-src
7b1898183032eeabc64a086ff040af991cebcd93
194,919,221,813,484,200,000,000,000,000,000,000,000
29
Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone) Conflicts: ext/date/php_date.c
int digest_file_window(struct digest *d, const char *filename, unsigned char *hash, const unsigned char *sig, loff_t start, loff_t size) { int fd, ret; ret = digest_init(d); if (ret) return ret; fd = open(filename, O_RDONLY); if (fd < 0) { perror(filename); return -errno; } ret = digest_update_from_fd(d, fd, start, size); if (ret) goto out; if (sig) ret = digest_verify(d, sig); else ret = digest_final(d, hash); out: close(fd); return ret; }
0
[ "CWE-200" ]
barebox
0a9f9a7410681e55362f8311537ebc7be9ad0fbe
231,828,478,775,561,150,000,000,000,000,000,000,000
30
crypto: digest: use crypto_memneq() When verifying a digest it is important not to leak timing information through memcmp(). Use crypto_memneq() instead. Signed-off-by: Sascha Hauer <[email protected]>
static void load_creator_from_old_format( FILE *fp, xref_t *xref, const char *buf, size_t buf_size) { int i, n_eles, length, is_escaped, obj_id; char *c, *ascii, *start, *s, *saved_buf_search, *obj; size_t obj_size; pdf_creator_t *info; info = new_creator(&n_eles); /* Mark the end of buf, so that we do not crawl past it */ if (buf_size < 1) return; const char *buf_end = buf + buf_size - 1; /* Treat 'end' as either the end of 'buf' or the end of 'obj'. Obj is if * the creator element (e.g., ModDate, Producer, etc) is an object and not * part of 'buf'. */ const char *end = buf_end; for (i=0; i<n_eles; ++i) { if (!(c = strstr(buf, info[i].key))) continue; /* Find the value (skipping whitespace) */ c += strlen(info[i].key); while (isspace(*c)) ++c; if (c >= buf_end) { FAIL("Failed to locate space, likely a corrupt PDF."); } /* If looking at the start of a pdf token, we have gone too far */ if (*c == '/') continue; /* If the value is a number and not a '(' then the data is located in * an object we need to fetch, and not inline */ obj = saved_buf_search = NULL; obj_size = 0; end = buf_end; /* Init to be the buffer, this might not be an obj. */ if (isdigit(*c)) { obj_id = atoi(c); saved_buf_search = c; s = saved_buf_search; obj = get_object(fp, obj_id, xref, &obj_size, NULL); end = obj + obj_size; c = obj; /* Iterate to '(' */ while (c && (*c != '(') && (c < end)) ++c; if (c >= end) { FAIL("Failed to locate a '(' character. " "This might be a corrupt PDF.\n"); } /* Advance the search to the next token */ while (s && (*s == '/') && (s < buf_end)) ++s; if (s >= buf_end) { FAIL("Failed to locate a '/' character. " "This might be a corrupt PDF.\n"); } saved_buf_search = s; } /* Find the end of the value */ start = c; length = is_escaped = 0; while (c && ((*c != '\r') && (*c != '\n') && (*c != '<'))) { /* Bail out if we see an un-escaped ')' closing character */ if (!is_escaped && (*c == ')')) break; else if (*c == '\\') is_escaped = 1; else is_escaped = 0; ++c; ++length; if (c > end) { FAIL("Failed to locate the end of a value. " "This might be a corrupt PDF.\n"); } } if (length == 0) continue; /* Add 1 to length so it gets the closing ')' when we copy */ if (length) length += 1; length = (length > KV_MAX_VALUE_LENGTH) ? KV_MAX_VALUE_LENGTH : length; strncpy(info[i].value, start, length); info[i].value[KV_MAX_VALUE_LENGTH - 1] = '\0'; /* Restore where we were searching from */ if (saved_buf_search) { /* Release memory from get_object() called earlier */ free(obj); c = saved_buf_search; } } /* For all creation information tags */ /* Go through the values and convert if encoded */ for (i = 0; i < n_eles; ++i) { const size_t val_str_len = strnlen(info[i].value, KV_MAX_VALUE_LENGTH); if ((ascii = decode_text_string(info[i].value, val_str_len))) { strncpy(info[i].value, ascii, val_str_len); free(ascii); } } xref->creator = info; xref->n_creator_entries = n_eles; }
0
[ "CWE-787" ]
pdfresurrect
1b422459f07353adce2878806d5247d9e91fb397
140,589,812,648,370,320,000,000,000,000,000,000,000
125
Update header validation checks. Thanks to yifengchen-cc for identifying this.
int run_plugin_auth(MYSQL *mysql, char *data, uint data_len, const char *data_plugin, const char *db) { const char *auth_plugin_name; auth_plugin_t *auth_plugin; MCPVIO_EXT mpvio; ulong pkt_length; int res; DBUG_ENTER ("run_plugin_auth"); /* determine the default/initial plugin to use */ if (mysql->options.extension && mysql->options.extension->default_auth && mysql->server_capabilities & CLIENT_PLUGIN_AUTH) { auth_plugin_name= mysql->options.extension->default_auth; if (!(auth_plugin= (auth_plugin_t*) mysql_client_find_plugin(mysql, auth_plugin_name, MYSQL_CLIENT_AUTHENTICATION_PLUGIN))) DBUG_RETURN (1); /* oops, not found */ } else { auth_plugin= mysql->server_capabilities & CLIENT_PROTOCOL_41 ? &native_password_client_plugin : &old_password_client_plugin; auth_plugin_name= auth_plugin->name; } DBUG_PRINT ("info", ("using plugin %s", auth_plugin_name)); mysql->net.last_errno= 0; /* just in case */ if (data_plugin && strcmp(data_plugin, auth_plugin_name)) { /* data was prepared for a different plugin, don't show it to this one */ data= 0; data_len= 0; } mpvio.mysql_change_user= data_plugin == 0; mpvio.cached_server_reply.pkt= (uchar*)data; mpvio.cached_server_reply.pkt_len= data_len; mpvio.read_packet= client_mpvio_read_packet; mpvio.write_packet= client_mpvio_write_packet; mpvio.info= client_mpvio_info; mpvio.mysql= mysql; mpvio.packets_read= mpvio.packets_written= 0; mpvio.db= db; mpvio.plugin= auth_plugin; res= auth_plugin->authenticate_user((struct st_plugin_vio *)&mpvio, mysql); DBUG_PRINT ("info", ("authenticate_user returned %s", res == CR_OK ? "CR_OK" : res == CR_ERROR ? "CR_ERROR" : res == CR_OK_HANDSHAKE_COMPLETE ? "CR_OK_HANDSHAKE_COMPLETE" : "error")); compile_time_assert(CR_OK == -1); compile_time_assert(CR_ERROR == 0); if (res > CR_OK && mysql->net.read_pos[0] != 254) { /* the plugin returned an error. write it down in mysql, unless the error code is CR_ERROR and mysql->net.last_errno is already set (the plugin has done it) */ DBUG_PRINT ("info", ("res=%d", res)); if (res > CR_ERROR) set_mysql_error(mysql, res, unknown_sqlstate); else if (!mysql->net.last_errno) set_mysql_error(mysql, CR_UNKNOWN_ERROR, unknown_sqlstate); DBUG_RETURN (1); } /* read the OK packet (or use the cached value in mysql->net.read_pos */ if (res == CR_OK) pkt_length= (*mysql->methods->read_change_user_result)(mysql); else /* res == CR_OK_HANDSHAKE_COMPLETE */ pkt_length= mpvio.last_read_packet_len; DBUG_PRINT ("info", ("OK packet length=%lu", pkt_length)); if (pkt_length == packet_error) { if (mysql->net.last_errno == CR_SERVER_LOST) set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, ER(CR_SERVER_LOST_EXTENDED), "reading authorization packet", errno); DBUG_RETURN (1); } if (mysql->net.read_pos[0] == 254) { /* The server asked to use a different authentication plugin */ if (pkt_length == 1) { /* old "use short scramble" packet */ DBUG_PRINT ("info", ("old use short scramble packet from server")); auth_plugin_name= old_password_plugin_name; mpvio.cached_server_reply.pkt= (uchar*)mysql->scramble; mpvio.cached_server_reply.pkt_len= SCRAMBLE_LENGTH + 1; } else { /* new "use different plugin" packet */ uint len; auth_plugin_name= (char*)mysql->net.read_pos + 1; len= strlen(auth_plugin_name); /* safe as my_net_read always appends \0 */ mpvio.cached_server_reply.pkt_len= pkt_length - len - 2; mpvio.cached_server_reply.pkt= mysql->net.read_pos + len + 2; DBUG_PRINT ("info", ("change plugin packet from server for plugin %s", auth_plugin_name)); } if (!(auth_plugin= (auth_plugin_t *) mysql_client_find_plugin(mysql, auth_plugin_name, MYSQL_CLIENT_AUTHENTICATION_PLUGIN))) DBUG_RETURN (1); mpvio.plugin= auth_plugin; res= auth_plugin->authenticate_user((struct st_plugin_vio *)&mpvio, mysql); DBUG_PRINT ("info", ("second authenticate_user returned %s", res == CR_OK ? "CR_OK" : res == CR_ERROR ? "CR_ERROR" : res == CR_OK_HANDSHAKE_COMPLETE ? "CR_OK_HANDSHAKE_COMPLETE" : "error")); if (res > CR_OK) { if (res > CR_ERROR) set_mysql_error(mysql, res, unknown_sqlstate); else if (!mysql->net.last_errno) set_mysql_error(mysql, CR_UNKNOWN_ERROR, unknown_sqlstate); DBUG_RETURN (1); } if (res != CR_OK_HANDSHAKE_COMPLETE) { /* Read what server thinks about out new auth message report */ if (cli_safe_read(mysql) == packet_error) { if (mysql->net.last_errno == CR_SERVER_LOST) set_mysql_extended_error(mysql, CR_SERVER_LOST, unknown_sqlstate, ER(CR_SERVER_LOST_EXTENDED), "reading final connect information", errno); DBUG_RETURN (1); } } } /* net->read_pos[0] should always be 0 here if the server implements the protocol correctly */ DBUG_RETURN (mysql->net.read_pos[0] != 0); }
0
[ "CWE-254" ]
server
f0d774d48416bb06063184380b684380ca005a41
252,665,685,865,972,950,000,000,000,000,000,000,000
155
MDEV-9212 ssl-validate-cert incorrect hostname check Reimplement ssl_verify_server_cert() using the logic from https://wiki.openssl.org/index.php/Hostname_validation The bug was discovered by Alex Gaynor.
static unsigned pirq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.pirq; }
0
[ "CWE-400", "CWE-703" ]
linux
e99502f76271d6bc4e374fe368c50c67a1fd3070
133,450,379,776,999,430,000,000,000,000,000,000,000
9
xen/events: defer eoi in case of excessive number of events In case rogue guests are sending events at high frequency it might happen that xen_evtchn_do_upcall() won't stop processing events in dom0. As this is done in irq handling a crash might be the result. In order to avoid that, delay further inter-domain events after some time in xen_evtchn_do_upcall() by forcing eoi processing into a worker on the same cpu, thus inhibiting new events coming in. The time after which eoi processing is to be delayed is configurable via a new module parameter "event_loop_timeout" which specifies the maximum event loop time in jiffies (default: 2, the value was chosen after some tests showing that a value of 2 was the lowest with an only slight drop of dom0 network throughput while multiple guests performed an event storm). How long eoi processing will be delayed can be specified via another parameter "event_eoi_delay" (again in jiffies, default 10, again the value was chosen after testing with different delay values). This is part of XSA-332. Cc: [email protected] Reported-by: Julien Grall <[email protected]> Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Reviewed-by: Wei Liu <[email protected]>
int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { int ret; lock_sock(sk); ret = tcp_sendpage_locked(sk, page, offset, size, flags); release_sock(sk); return ret; }
0
[ "CWE-190" ]
net
3b4929f65b0d8249f19a50245cd88ed1a2f78cff
220,053,079,984,018,570,000,000,000,000,000,000,000
11
tcp: limit payload size of sacked skbs Jonathan Looney reported that TCP can trigger the following crash in tcp_shifted_skb() : BUG_ON(tcp_skb_pcount(skb) < pcount); This can happen if the remote peer has advertized the smallest MSS that linux TCP accepts : 48 An skb can hold 17 fragments, and each fragment can hold 32KB on x86, or 64KB on PowerPC. This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs can overflow. Note that tcp_sendmsg() builds skbs with less than 64KB of payload, so this problem needs SACK to be enabled. SACK blocks allow TCP to coalesce multiple skbs in the retransmit queue, thus filling the 17 fragments to maximal capacity. CVE-2019-11477 -- u16 overflow of TCP_SKB_CB(skb)->tcp_gso_segs Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jonathan Looney <[email protected]> Acked-by: Neal Cardwell <[email protected]> Reviewed-by: Tyler Hicks <[email protected]> Cc: Yuchung Cheng <[email protected]> Cc: Bruce Curtis <[email protected]> Cc: Jonathan Lemon <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void raw_dispatch(const MessagesMap_t *entry, const uint8_t *msg, uint32_t msg_size, uint32_t frame_length) { static RawMessage raw_msg; raw_msg.buffer = msg; raw_msg.length = msg_size; if(entry->process_func) { ((raw_msg_handler_t)entry->process_func)(&raw_msg, frame_length); } }
0
[ "CWE-787" ]
keepkey-firmware
b222c66cdd7c3203d917c80ba615082d309d80c3
166,692,923,894,419,290,000,000,000,000,000,000,000
12
board: factor out tiny_dispatch And add stronger checks on what tiny_msg's are allowed to be decoded.
static CodecStats& atomicGet(AtomicPtr& ptr, Stats::Scope& scope) { return *ptr.get([&scope]() -> CodecStats* { return new CodecStats{ALL_HTTP2_CODEC_STATS(POOL_COUNTER_PREFIX(scope, "http2."), POOL_GAUGE_PREFIX(scope, "http2."))}; }); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
129,236,794,956,903,830,000,000,000,000,000,000,000
6
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
static unsigned char *PopRunlengthPacket(Image *image,unsigned char *pixels, size_t length,PixelPacket pixel,IndexPacket index) { if (image->storage_class != DirectClass) { unsigned int value; value=(unsigned int) index; switch (image->depth) { case 32: { *pixels++=(unsigned char) (value >> 24); *pixels++=(unsigned char) (value >> 16); } case 16: *pixels++=(unsigned char) (value >> 8); case 8: { *pixels++=(unsigned char) value; break; } default: (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,"ImageDepthNotSupported","`%s'",image->filename); } switch (image->depth) { case 32: { unsigned int value; if (image->matte != MagickFalse) { value=ScaleQuantumToLong(pixel.opacity); pixels=PopLongPixel(MSBEndian,value,pixels); } break; } case 16: { unsigned short value; if (image->matte != MagickFalse) { value=ScaleQuantumToShort(pixel.opacity); pixels=PopShortPixel(MSBEndian,value,pixels); } break; } case 8: { unsigned char value; if (image->matte != MagickFalse) { value=(unsigned char) ScaleQuantumToChar(pixel.opacity); pixels=PopCharPixel(value,pixels); } break; } default: (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,"ImageDepthNotSupported","`%s'",image->filename); } *pixels++=(unsigned char) length; return(pixels); } switch (image->depth) { case 32: { unsigned int value; value=ScaleQuantumToLong(pixel.red); pixels=PopLongPixel(MSBEndian,value,pixels); if (IsGrayColorspace(image->colorspace) == MagickFalse) { value=ScaleQuantumToLong(pixel.green); pixels=PopLongPixel(MSBEndian,value,pixels); value=ScaleQuantumToLong(pixel.blue); pixels=PopLongPixel(MSBEndian,value,pixels); } if (image->colorspace == CMYKColorspace) { value=ScaleQuantumToLong(index); pixels=PopLongPixel(MSBEndian,value,pixels); } if (image->matte != MagickFalse) { value=ScaleQuantumToLong(pixel.opacity); pixels=PopLongPixel(MSBEndian,value,pixels); } break; } case 16: { unsigned short value; value=ScaleQuantumToShort(pixel.red); pixels=PopShortPixel(MSBEndian,value,pixels); if (IsGrayColorspace(image->colorspace) == MagickFalse) { value=ScaleQuantumToShort(pixel.green); pixels=PopShortPixel(MSBEndian,value,pixels); value=ScaleQuantumToShort(pixel.blue); pixels=PopShortPixel(MSBEndian,value,pixels); } if (image->colorspace == CMYKColorspace) { value=ScaleQuantumToShort(index); pixels=PopShortPixel(MSBEndian,value,pixels); } if (image->matte != MagickFalse) { value=ScaleQuantumToShort(pixel.opacity); pixels=PopShortPixel(MSBEndian,value,pixels); } break; } case 8: { unsigned char value; value=(unsigned char) ScaleQuantumToChar(pixel.red); pixels=PopCharPixel(value,pixels); if (IsGrayColorspace(image->colorspace) == MagickFalse) { value=(unsigned char) ScaleQuantumToChar(pixel.green); pixels=PopCharPixel(value,pixels); value=(unsigned char) ScaleQuantumToChar(pixel.blue); pixels=PopCharPixel(value,pixels); } if (image->colorspace == CMYKColorspace) { value=(unsigned char) ScaleQuantumToChar(index); pixels=PopCharPixel(value,pixels); } if (image->matte != MagickFalse) { value=(unsigned char) ScaleQuantumToChar(pixel.opacity); pixels=PopCharPixel(value,pixels); } break; } default: (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,"ImageDepthNotSupported","`%s'",image->filename); } *pixels++=(unsigned char) length; return(pixels); }
0
[ "CWE-772" ]
ImageMagick6
ae3eecad2f59e27123c1a6c891be75d06fc03656
223,392,069,594,394,550,000,000,000,000,000,000,000
159
https://github.com/ImageMagick/ImageMagick/issues/1191
ssize_t get_exactly(const size_t want, ceph::bufferlist& dst) override { return get_decoratee().get_exactly(want, dst); }
0
[ "CWE-770" ]
ceph
ab29bed2fc9f961fe895de1086a8208e21ddaddc
325,171,028,766,702,420,000,000,000,000,000,000,000
3
rgw: fix issues with 'enforce bounds' patch The patch to enforce bounds on max-keys/max-uploads/max-parts had a few issues that would prevent us from compiling it. Instead of changing the code provided by the submitter, we're addressing them in a separate commit to maintain the DCO. Signed-off-by: Joao Eduardo Luis <[email protected]> Signed-off-by: Abhishek Lekshmanan <[email protected]> (cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a) mimic specific fixes: As the largeish change from master g_conf() isn't in mimic yet, use the g_conf global structure, also make rgw_op use the value from req_info ceph context as we do for all the requests
static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10); }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
167,130,415,390,190,100,000,000,000,000,000,000,000
7
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
static gboolean input_device_auto_reconnect(gpointer user_data) { struct input_device *idev = user_data; DBG("path=%s, attempt=%d", idev->path, idev->reconnect_attempt); /* Stop the recurrent reconnection attempts if the device is * reconnected or is marked for removal. */ if (device_is_temporary(idev->device) || btd_device_is_connected(idev->device)) goto bail; /* Only attempt an auto-reconnect for at most 3 minutes (6 * 30s). */ if (idev->reconnect_attempt >= 6) goto bail; /* Check if the profile is already connected. */ if (idev->ctrl_io) goto bail; if (is_connected(idev)) goto bail; idev->reconnect_attempt++; dev_connect(idev); return TRUE; bail: idev->reconnect_timer = 0; return FALSE; }
0
[]
bluez
3cccdbab2324086588df4ccf5f892fb3ce1f1787
332,313,456,995,848,660,000,000,000,000,000,000,000
33
HID accepts bonded device connections only. This change adds a configuration for platforms to choose a more secure posture for the HID profile. While some older mice are known to not support pairing or encryption, some platform may choose a more secure posture by requiring the device to be bonded and require the connection to be encrypted when bonding is required. Reference: https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00352.html
iter_state_to_string(enum iter_state state) { switch (state) { case INIT_REQUEST_STATE : return "INIT REQUEST STATE"; case INIT_REQUEST_2_STATE : return "INIT REQUEST STATE (stage 2)"; case INIT_REQUEST_3_STATE: return "INIT REQUEST STATE (stage 3)"; case QUERYTARGETS_STATE : return "QUERY TARGETS STATE"; case PRIME_RESP_STATE : return "PRIME RESPONSE STATE"; case COLLECT_CLASS_STATE : return "COLLECT CLASS STATE"; case DSNS_FIND_STATE : return "DSNS FIND STATE"; case QUERY_RESP_STATE : return "QUERY RESPONSE STATE"; case FINISHED_STATE : return "FINISHED RESPONSE STATE"; default : return "UNKNOWN ITER STATE"; } }
0
[ "CWE-400" ]
unbound
ba0f382eee814e56900a535778d13206b86b6d49
115,658,364,174,873,300,000,000,000,000,000,000,000
26
- CVE-2020-12662 Unbound can be tricked into amplifying an incoming query into a large number of queries directed to a target. - CVE-2020-12663 Malformed answers from upstream name servers can be used to make Unbound unresponsive.
SparseFeatureReader(const Tensor& indices_t, const Tensor& values_t, int64 batch_size) : values_(values_t.flat<ValuesType>()) { row_splits_.reserve(batch_size + 1); row_splits_.push_back(0); auto indices = indices_t.matrix<int64>(); int64 num_values = values_.size(); int64 i = 0; // value index for (int row = 0; row < batch_size; row++) { while (i < num_values && indices(i, 0) <= row) ++i; row_splits_.push_back(i); } }
0
[ "CWE-125", "CWE-369" ]
tensorflow
44b7f486c0143f68b56c34e2d01e146ee445134a
336,799,617,101,973,470,000,000,000,000,000,000,000
13
Fix out of bounds read in `ragged_cross_op.cc`. PiperOrigin-RevId: 369757702 Change-Id: Ie6e5d2c21513a8d56bf41fcf35960caf76e890f9
static void check_image(size_t pageno, QPDFObjectHandle page) { QPDFObjectHandle image = page.getKey("/Resources").getKey("/XObject").getKey("/Im1"); ImageChecker ic(pageno); image.pipeStreamData(&ic, 0, qpdf_dl_specialized); }
0
[ "CWE-787" ]
qpdf
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
65,934,335,381,530,130,000,000,000,000,000,000,000
7
Fix sign and conversion warnings (major) This makes all integer type conversions that have potential data loss explicit with calls that do range checks and raise an exception. After this commit, qpdf builds with no warnings when -Wsign-conversion -Wconversion is used with gcc or clang or when -W3 -Wd4800 is used with MSVC. This significantly reduces the likelihood of potential crashes from bogus integer values. There are some parts of the code that take int when they should take size_t or an offset. Such places would make qpdf not support files with more than 2^31 of something that usually wouldn't be so large. In the event that such a file shows up and is valid, at least qpdf would raise an error in the right spot so the issue could be legitimately addressed rather than failing in some weird way because of a silent overflow condition.
mesh_state_delete(struct module_qstate* qstate) { struct mesh_area* mesh; struct mesh_state_ref* super, ref; struct mesh_state* mstate; if(!qstate) return; mstate = qstate->mesh_info; mesh = mstate->s.env->mesh; mesh_detach_subs(&mstate->s); if(mstate->list_select == mesh_forever_list) { mesh->num_forever_states --; mesh_list_remove(mstate, &mesh->forever_first, &mesh->forever_last); } else if(mstate->list_select == mesh_jostle_list) { mesh_list_remove(mstate, &mesh->jostle_first, &mesh->jostle_last); } if(!mstate->reply_list && !mstate->cb_list && mstate->super_set.count == 0) { log_assert(mesh->num_detached_states > 0); mesh->num_detached_states--; } if(mstate->reply_list || mstate->cb_list) { log_assert(mesh->num_reply_states > 0); mesh->num_reply_states--; } ref.node.key = &ref; ref.s = mstate; RBTREE_FOR(super, struct mesh_state_ref*, &mstate->super_set) { (void)rbtree_delete(&super->s->sub_set, &ref); } (void)rbtree_delete(&mesh->run, mstate); (void)rbtree_delete(&mesh->all, mstate); mesh_state_cleanup(mstate); }
0
[ "CWE-613", "CWE-703" ]
unbound
f6753a0f1018133df552347a199e0362fc1dac68
305,270,626,307,401,540,000,000,000,000,000,000,000
36
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
void Magick::Image::whiteThresholdChannel(const ChannelType channel_, const std::string &threshold_) { modifyImage(); GetPPException; GetAndSetPPChannelMask(channel_); WhiteThresholdImage(image(),threshold_.c_str(),exceptionInfo); RestorePPChannelMask; ThrowImageException; }
0
[ "CWE-416" ]
ImageMagick
8c35502217c1879cb8257c617007282eee3fe1cc
156,914,411,168,928,370,000,000,000,000,000,000,000
10
Added missing return to avoid use after free.
char * exif_dump_data(int *dump_free, int format, int components, int length, int motorola_intel, char *value_ptr) /* {{{ */ { char *dump; int len; *dump_free = 0; if (format == TAG_FMT_STRING) { return value_ptr ? value_ptr : "<no data>"; } if (format == TAG_FMT_UNDEFINED) { return "<undefined>\n"; } if (format == TAG_FMT_IFD) { return ""; } if (format == TAG_FMT_SINGLE || format == TAG_FMT_DOUBLE) { return "<not implemented>"; } *dump_free = 1; if (components > 1) { len = spprintf(&dump, 0, "(%d,%d) {", components, length); } else { len = spprintf(&dump, 0, "{"); } while(components > 0) { switch(format) { case TAG_FMT_BYTE: case TAG_FMT_UNDEFINED: case TAG_FMT_STRING: case TAG_FMT_SBYTE: dump = erealloc(dump, len + 4 + 1); snprintf(dump + len, 4 + 1, "0x%02X", *value_ptr); len += 4; value_ptr++; break; case TAG_FMT_USHORT: case TAG_FMT_SSHORT: dump = erealloc(dump, len + 6 + 1); snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get16s(value_ptr, motorola_intel)); len += 6; value_ptr += 2; break; case TAG_FMT_ULONG: case TAG_FMT_SLONG: dump = erealloc(dump, len + 6 + 1); snprintf(dump + len, 6 + 1, "0x%04X", php_ifd_get32s(value_ptr, motorola_intel)); len += 6; value_ptr += 4; break; case TAG_FMT_URATIONAL: case TAG_FMT_SRATIONAL: dump = erealloc(dump, len + 13 + 1); snprintf(dump + len, 13 + 1, "0x%04X/0x%04X", php_ifd_get32s(value_ptr, motorola_intel), php_ifd_get32s(value_ptr+4, motorola_intel)); len += 13; value_ptr += 8; break; } if (components > 0) { dump = erealloc(dump, len + 2 + 1); snprintf(dump + len, 2 + 1, ", "); len += 2; components--; } else{ break; } } dump = erealloc(dump, len + 1 + 1); snprintf(dump + len, 1 + 1, "}"); return dump; }
0
[ "CWE-125" ]
php-src
887a7b571407f7a49a5e7cf1e612d21ef83fedb4
226,139,405,103,187,420,000,000,000,000,000,000,000
70
Fixed bug #77831 - Heap-buffer-overflow in exif_iif_add_value in EXIF
static int neightbl_fill_param_info(struct neigh_table *tbl, struct neigh_parms *parms, struct sk_buff *skb, struct netlink_callback *cb) { struct ndtmsg *ndtmsg; struct nlmsghdr *nlh; nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg), NLM_F_MULTI); ndtmsg = NLMSG_DATA(nlh); read_lock_bh(&tbl->lock); ndtmsg->ndtm_family = tbl->family; RTA_PUT_STRING(skb, NDTA_NAME, tbl->id); if (neightbl_fill_parms(skb, parms) < 0) goto rtattr_failure; read_unlock_bh(&tbl->lock); return NLMSG_END(skb, nlh); rtattr_failure: read_unlock_bh(&tbl->lock); return NLMSG_CANCEL(skb, nlh); nlmsg_failure: return -1; }
1
[ "CWE-200" ]
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
165,521,843,735,665,700,000,000,000,000,000,000,000
30
[NETLINK]: Missing initializations in dumped data Mostly missing initialization of padding fields of 1 or 2 bytes length, two instances of uninitialized nlmsgerr->msg of 16 bytes length. Signed-off-by: Patrick McHardy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int __f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl, struct page *ipage) { int name_index; void *value = NULL; size_t size = 0; int error; switch (type) { case ACL_TYPE_ACCESS: name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { error = posix_acl_update_mode(inode, &inode->i_mode, &acl); if (error) return error; set_acl_inode(inode, inode->i_mode); } break; case ACL_TYPE_DEFAULT: name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = f2fs_acl_to_disk(acl, &size); if (IS_ERR(value)) { clear_inode_flag(inode, FI_ACL_MODE); return (int)PTR_ERR(value); } } error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0); kfree(value); if (!error) set_cached_acl(inode, type, acl); clear_inode_flag(inode, FI_ACL_MODE); return error; }
0
[ "CWE-862", "CWE-285" ]
linux
073931017b49d9458aa351605b43a7e34598caef
49,693,915,810,803,590,000,000,000,000,000,000,000
46
posix_acl: Clear SGID bit when setting file permissions When file permissions are modified via chmod(2) and the user is not in the owning group or capable of CAP_FSETID, the setgid bit is cleared in inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file permissions as well as the new ACL, but doesn't clear the setgid bit in a similar way; this allows to bypass the check in chmod(2). Fix that. References: CVE-2016-7097 Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Jeff Layton <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Andreas Gruenbacher <[email protected]>
boost::intrusive_ptr<Expression> ExpressionRegexFind::parse(ExpressionContext* const expCtx, BSONElement expr, const VariablesParseState& vpsIn) { auto opName = "$regexFind"_sd; auto [input, regex, options] = CommonRegexParse(expCtx, expr, vpsIn, opName); return new ExpressionRegexFind( expCtx, std::move(input), std::move(regex), std::move(options), opName); }
0
[]
mongo
1772b9a0393b55e6a280a35e8f0a1f75c014f301
59,886,930,669,959,970,000,000,000,000,000,000,000
8
SERVER-49404 Enforce additional checks in $arrayToObject
static MagickBooleanType WriteSGIImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { CompressionType compression; const char *value; MagickBooleanType status; MagickOffsetType scene; MagickSizeType number_pixels; MemoryInfo *pixel_info; SGIInfo iris_info; register const Quantum *p; register ssize_t i, x; register unsigned char *q; ssize_t y, z; unsigned char *pixels, *packets; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns > 65535UL) || (image->rows > 65535UL)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); scene=0; do { /* Initialize SGI raster file header. */ (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) ResetMagickMemory(&iris_info,0,sizeof(iris_info)); iris_info.magic=0x01DA; compression=image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (image->depth > 8) compression=NoCompression; if (compression == NoCompression) iris_info.storage=(unsigned char) 0x00; else iris_info.storage=(unsigned char) 0x01; iris_info.bytes_per_pixel=(unsigned char) (image->depth > 8 ? 2 : 1); iris_info.dimension=3; iris_info.columns=(unsigned short) image->columns; iris_info.rows=(unsigned short) image->rows; if (image->alpha_trait != UndefinedPixelTrait) iris_info.depth=4; else { if ((image_info->type != TrueColorType) && (SetImageGray(image,exception) != MagickFalse)) { iris_info.dimension=2; iris_info.depth=1; } else iris_info.depth=3; } iris_info.minimum_value=0; iris_info.maximum_value=(size_t) (image->depth <= 8 ? 1UL*ScaleQuantumToChar(QuantumRange) : 1UL*ScaleQuantumToShort(QuantumRange)); /* Write SGI header. */ (void) WriteBlobMSBShort(image,iris_info.magic); (void) WriteBlobByte(image,iris_info.storage); (void) WriteBlobByte(image,iris_info.bytes_per_pixel); (void) WriteBlobMSBShort(image,iris_info.dimension); (void) WriteBlobMSBShort(image,iris_info.columns); (void) WriteBlobMSBShort(image,iris_info.rows); (void) WriteBlobMSBShort(image,iris_info.depth); (void) WriteBlobMSBLong(image,(unsigned int) iris_info.minimum_value); (void) WriteBlobMSBLong(image,(unsigned int) iris_info.maximum_value); (void) WriteBlobMSBLong(image,(unsigned int) iris_info.sans); value=GetImageProperty(image,"label",exception); if (value != (const char *) NULL) (void) CopyMagickString(iris_info.name,value,sizeof(iris_info.name)); (void) WriteBlob(image,sizeof(iris_info.name),(unsigned char *) iris_info.name); (void) WriteBlobMSBLong(image,(unsigned int) iris_info.pixel_format); (void) WriteBlob(image,sizeof(iris_info.filler),iris_info.filler); /* Allocate SGI pixels. */ number_pixels=(MagickSizeType) image->columns*image->rows; if ((4*iris_info.bytes_per_pixel*number_pixels) != ((MagickSizeType) (size_t) (4*iris_info.bytes_per_pixel*number_pixels))) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); pixel_info=AcquireVirtualMemory((size_t) number_pixels,4* iris_info.bytes_per_pixel*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); /* Convert image pixels to uncompressed SGI pixels. */ for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (image->depth <= 8) for (x=0; x < (ssize_t) image->columns; x++) { register unsigned char *q; q=(unsigned char *) pixels; q+=((iris_info.rows-1)-y)*(4*iris_info.columns)+4*x; *q++=ScaleQuantumToChar(GetPixelRed(image,p)); *q++=ScaleQuantumToChar(GetPixelGreen(image,p)); *q++=ScaleQuantumToChar(GetPixelBlue(image,p)); *q++=ScaleQuantumToChar(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } else for (x=0; x < (ssize_t) image->columns; x++) { register unsigned short *q; q=(unsigned short *) pixels; q+=((iris_info.rows-1)-y)*(4*iris_info.columns)+4*x; *q++=ScaleQuantumToShort(GetPixelRed(image,p)); *q++=ScaleQuantumToShort(GetPixelGreen(image,p)); *q++=ScaleQuantumToShort(GetPixelBlue(image,p)); *q++=ScaleQuantumToShort(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } switch (compression) { case NoCompression: { /* Write uncompressed SGI pixels. */ for (z=0; z < (ssize_t) iris_info.depth; z++) { for (y=0; y < (ssize_t) iris_info.rows; y++) { if (image->depth <= 8) for (x=0; x < (ssize_t) iris_info.columns; x++) { register unsigned char *q; q=(unsigned char *) pixels; q+=y*(4*iris_info.columns)+4*x+z; (void) WriteBlobByte(image,*q); } else for (x=0; x < (ssize_t) iris_info.columns; x++) { register unsigned short *q; q=(unsigned short *) pixels; q+=y*(4*iris_info.columns)+4*x+z; (void) WriteBlobMSBShort(image,*q); } } } break; } default: { MemoryInfo *packet_info; size_t length, number_packets, *runlength; ssize_t offset, *offsets; /* Convert SGI uncompressed pixels. */ offsets=(ssize_t *) AcquireQuantumMemory(iris_info.rows, iris_info.depth*sizeof(*offsets)); runlength=(size_t *) AcquireQuantumMemory(iris_info.rows, iris_info.depth*sizeof(*runlength)); packet_info=AcquireVirtualMemory((2*(size_t) iris_info.columns+10)* image->rows,4*sizeof(*packets)); if ((offsets == (ssize_t *) NULL) || (runlength == (size_t *) NULL) || (packet_info == (MemoryInfo *) NULL)) { if (offsets != (ssize_t *) NULL) offsets=(ssize_t *) RelinquishMagickMemory(offsets); if (runlength != (size_t *) NULL) runlength=(size_t *) RelinquishMagickMemory(runlength); if (packet_info != (MemoryInfo *) NULL) packet_info=RelinquishVirtualMemory(packet_info); ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); } packets=(unsigned char *) GetVirtualMemoryBlob(packet_info); offset=512+4*2*((ssize_t) iris_info.rows*iris_info.depth); number_packets=0; q=pixels; for (y=0; y < (ssize_t) iris_info.rows; y++) { for (z=0; z < (ssize_t) iris_info.depth; z++) { length=SGIEncode(q+z,(size_t) iris_info.columns,packets+ number_packets); number_packets+=length; offsets[y+z*iris_info.rows]=offset; runlength[y+z*iris_info.rows]=(size_t) length; offset+=(ssize_t) length; } q+=(iris_info.columns*4); } /* Write out line start and length tables and runlength-encoded pixels. */ for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++) (void) WriteBlobMSBLong(image,(unsigned int) offsets[i]); for (i=0; i < (ssize_t) (iris_info.rows*iris_info.depth); i++) (void) WriteBlobMSBLong(image,(unsigned int) runlength[i]); (void) WriteBlob(image,number_packets,packets); /* Relinquish resources. */ offsets=(ssize_t *) RelinquishMagickMemory(offsets); runlength=(size_t *) RelinquishMagickMemory(runlength); packet_info=RelinquishVirtualMemory(packet_info); break; } } pixel_info=RelinquishVirtualMemory(pixel_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
0
[ "CWE-125" ]
ImageMagick
7afcf9f71043df15508e46f079387bd4689a738d
209,953,745,655,143,800,000,000,000,000,000,000,000
289
Prevent buffer overflow in BMP & SGI coders (bug report from pwchen&rayzhong of tencent)
print_value(const oid * objid, size_t objidlen, const netsnmp_variable_list * variable) { fprint_value(stdout, objid, objidlen, variable); }
0
[ "CWE-59", "CWE-61" ]
net-snmp
4fd9a450444a434a993bc72f7c3486ccce41f602
148,721,499,248,892,470,000,000,000,000,000,000,000
5
CHANGES: snmpd: Stop reading and writing the mib_indexes/* files Caching directory contents is something the operating system should do and is not something Net-SNMP should do. Instead of storing a copy of the directory contents in ${tmp_dir}/mib_indexes/${n}, always scan a MIB directory.
std::string read_map(const std::string& name) { std::string res; std::string map_location = get_wml_location("maps/" + name); if(!map_location.empty()) { res = read_file(map_location); } if (res.empty()) { res = read_file(get_user_data_dir() + "/editor/maps/" + name); } return res; }
0
[ "CWE-200" ]
wesnoth
af61f9fdd15cd439da9e2fe5fa39d174c923eaae
128,705,188,463,230,370,000,000,000,000,000,000,000
14
fs: Use game data path to resolve ./ in the absence of a current_dir Fixes a file content disclosure bug (#22042) affecting functionality relying on the get_wml_location() function and not passing a non-empty value for the current_dir parameter. See <https://gna.org/bugs/?22042> for details. This is a candidate for the 1.10 and 1.12 branches. (Backported from master, commit 314425ab0e57b32909d324f7d4bf213d62cbd3b5.)
onig_is_code_in_cc_len(int elen, OnigCodePoint code, CClassNode* cc) { int found; if (elen > 1 || (code >= SINGLE_BYTE_SIZE)) { if (IS_NULL(cc->mbuf)) { found = 0; } else { found = (onig_is_in_code_range(cc->mbuf->p, code) != 0 ? 1 : 0); } } else { found = (BITSET_AT(cc->bs, code) == 0 ? 0 : 1); } if (IS_NCCLASS_NOT(cc)) return !found; else return found; }
0
[ "CWE-125" ]
php-src
c6e34d91b88638966662caac62c4d0e90538e317
162,569,028,748,918,100,000,000,000,000,000,000,000
21
Fix bug #77371 (heap buffer overflow in mb regex functions - compile_string_node)
mesalink_close(struct Curl_easy *data, struct connectdata *conn, int sockindex) { struct ssl_connect_data *connssl = &conn->ssl[sockindex]; (void) data; if(BACKEND->handle) { (void)SSL_shutdown(BACKEND->handle); SSL_free(BACKEND->handle); BACKEND->handle = NULL; } if(BACKEND->ctx) { SSL_CTX_free(BACKEND->ctx); BACKEND->ctx = NULL; } }
0
[ "CWE-290" ]
curl
b09c8ee15771c614c4bf3ddac893cdb12187c844
72,603,266,854,869,045,000,000,000,000,000,000,000
16
vtls: add 'isproxy' argument to Curl_ssl_get/addsessionid() To make sure we set and extract the correct session. Reported-by: Mingtao Yang Bug: https://curl.se/docs/CVE-2021-22890.html CVE-2021-22890
Expression_Obj Parser::lex_almost_any_value_chars() { const char* match = lex < one_plus < alternatives < sequence < exactly <'\\'>, any_char >, sequence < negate < sequence < exactly < url_kwd >, exactly <'('> > >, neg_class_char < almost_any_value_class > >, sequence < exactly <'/'>, negate < alternatives < exactly <'/'>, exactly <'*'> > > >, sequence < exactly <'\\'>, exactly <'#'>, negate < exactly <'{'> > >, sequence < exactly <'!'>, negate < alpha > > > > >(false); if (match) { return SASS_MEMORY_NEW(String_Constant, pstate, lexed); } return {}; }
0
[ "CWE-125" ]
libsass
eb15533b07773c30dc03c9d742865604f47120ef
92,274,722,133,024,060,000,000,000,000,000,000,000
51
Fix memory leak in `parse_ie_keyword_arg` `kwd_arg` would never get freed when there was a parse error in `parse_ie_keyword_arg`. Closes #2656
ex_rewind(exarg_T *eap) { do_argfile(eap, 0); }
0
[ "CWE-416", "CWE-125" ]
vim
6f98371532fcff911b462d51bc64f2ce8a6ae682
312,951,016,329,057,760,000,000,000,000,000,000,000
4
patch 8.2.3884: crash when clearing the argument list while using it Problem: Crash when clearing the argument list while using it. Solution: Lock the argument list for ":all".
getresponse( int opcode, int associd, u_short *rstatus, int *rsize, const char **rdata, int timeo ) { struct ntp_control rpkt; struct sock_timeval tvo; u_short offsets[MAXFRAGS+1]; u_short counts[MAXFRAGS+1]; u_short offset; u_short count; size_t numfrags; size_t f; size_t ff; int seenlastfrag; int shouldbesize; fd_set fds; int n; int errcode; /* * This is pretty tricky. We may get between 1 and MAXFRAG packets * back in response to the request. We peel the data out of * each packet and collect it in one long block. When the last * packet in the sequence is received we'll know how much data we * should have had. Note we use one long time out, should reconsider. */ *rsize = 0; if (rstatus) *rstatus = 0; *rdata = (char *)pktdata; numfrags = 0; seenlastfrag = 0; FD_ZERO(&fds); /* * Loop until we have an error or a complete response. Nearly all * code paths to loop again use continue. */ for (;;) { if (numfrags == 0) tvo = tvout; else tvo = tvsout; FD_SET(sockfd, &fds); n = select(sockfd + 1, &fds, NULL, NULL, &tvo); if (n == -1) { warning("select fails"); return -1; } if (n == 0) { /* * Timed out. Return what we have */ if (numfrags == 0) { if (timeo) fprintf(stderr, "%s: timed out, nothing received\n", currenthost); return ERR_TIMEOUT; } if (timeo) fprintf(stderr, "%s: timed out with incomplete data\n", currenthost); if (debug) { fprintf(stderr, "ERR_INCOMPLETE: Received fragments:\n"); for (f = 0; f < numfrags; f++) fprintf(stderr, "%2u: %5d %5d\t%3d octets\n", (u_int)f, offsets[f], offsets[f] + counts[f], counts[f]); fprintf(stderr, "last fragment %sreceived\n", (seenlastfrag) ? "" : "not "); } return ERR_INCOMPLETE; } n = recv(sockfd, (char *)&rpkt, sizeof(rpkt), 0); if (n == -1) { warning("read"); return -1; } if (debug >= 4) { printf("Response packet:\n"); dump_hex_printable(&rpkt, n); } /* * Check for format errors. Bug proofing. */ if (n < (int)CTL_HEADER_LEN) { if (debug) printf("Short (%d byte) packet received\n", n); continue; } if (PKT_VERSION(rpkt.li_vn_mode) > NTP_VERSION || PKT_VERSION(rpkt.li_vn_mode) < NTP_OLDVERSION) { if (debug) printf("Packet received with version %d\n", PKT_VERSION(rpkt.li_vn_mode)); continue; } if (PKT_MODE(rpkt.li_vn_mode) != MODE_CONTROL) { if (debug) printf("Packet received with mode %d\n", PKT_MODE(rpkt.li_vn_mode)); continue; } if (!CTL_ISRESPONSE(rpkt.r_m_e_op)) { if (debug) printf("Received request packet, wanted response\n"); continue; } /* * Check opcode and sequence number for a match. * Could be old data getting to us. */ if (ntohs(rpkt.sequence) != sequence) { if (debug) printf("Received sequnce number %d, wanted %d\n", ntohs(rpkt.sequence), sequence); continue; } if (CTL_OP(rpkt.r_m_e_op) != opcode) { if (debug) printf( "Received opcode %d, wanted %d (sequence number okay)\n", CTL_OP(rpkt.r_m_e_op), opcode); continue; } /* * Check the error code. If non-zero, return it. */ if (CTL_ISERROR(rpkt.r_m_e_op)) { errcode = (ntohs(rpkt.status) >> 8) & 0xff; if (CTL_ISMORE(rpkt.r_m_e_op)) TRACE(1, ("Error code %d received on not-final packet\n", errcode)); if (errcode == CERR_UNSPEC) return ERR_UNSPEC; return errcode; } /* * Check the association ID to make sure it matches what * we sent. */ if (ntohs(rpkt.associd) != associd) { TRACE(1, ("Association ID %d doesn't match expected %d\n", ntohs(rpkt.associd), associd)); /* * Hack for silly fuzzballs which, at the time of writing, * return an assID of sys.peer when queried for system variables. */ #ifdef notdef continue; #endif } /* * Collect offset and count. Make sure they make sense. */ offset = ntohs(rpkt.offset); count = ntohs(rpkt.count); /* * validate received payload size is padded to next 32-bit * boundary and no smaller than claimed by rpkt.count */ if (n & 0x3) { TRACE(1, ("Response packet not padded, size = %d\n", n)); continue; } shouldbesize = (CTL_HEADER_LEN + count + 3) & ~3; if (n < shouldbesize) { printf("Response packet claims %u octets payload, above %ld received\n", count, (long)n - CTL_HEADER_LEN); return ERR_INCOMPLETE; } if (debug >= 3 && shouldbesize > n) { u_int32 key; u_int32 *lpkt; int maclen; /* * Usually we ignore authentication, but for debugging purposes * we watch it here. */ /* round to 8 octet boundary */ shouldbesize = (shouldbesize + 7) & ~7; maclen = n - shouldbesize; if (maclen >= (int)MIN_MAC_LEN) { printf( "Packet shows signs of authentication (total %d, data %d, mac %d)\n", n, shouldbesize, maclen); lpkt = (u_int32 *)&rpkt; printf("%08lx %08lx %08lx %08lx %08lx %08lx\n", (u_long)ntohl(lpkt[(n - maclen)/sizeof(u_int32) - 3]), (u_long)ntohl(lpkt[(n - maclen)/sizeof(u_int32) - 2]), (u_long)ntohl(lpkt[(n - maclen)/sizeof(u_int32) - 1]), (u_long)ntohl(lpkt[(n - maclen)/sizeof(u_int32)]), (u_long)ntohl(lpkt[(n - maclen)/sizeof(u_int32) + 1]), (u_long)ntohl(lpkt[(n - maclen)/sizeof(u_int32) + 2])); key = ntohl(lpkt[(n - maclen) / sizeof(u_int32)]); printf("Authenticated with keyid %lu\n", (u_long)key); if (key != 0 && key != info_auth_keyid) { printf("We don't know that key\n"); } else { if (authdecrypt(key, (u_int32 *)&rpkt, n - maclen, maclen)) { printf("Auth okay!\n"); } else { printf("Auth failed!\n"); } } } } TRACE(2, ("Got packet, size = %d\n", n)); if (count > (n - CTL_HEADER_LEN)) { TRACE(1, ("Received count of %u octets, data in packet is %ld\n", count, (long)n - CTL_HEADER_LEN)); continue; } if (count == 0 && CTL_ISMORE(rpkt.r_m_e_op)) { TRACE(1, ("Received count of 0 in non-final fragment\n")); continue; } if (offset + count > sizeof(pktdata)) { TRACE(1, ("Offset %u, count %u, too big for buffer\n", offset, count)); return ERR_TOOMUCH; } if (seenlastfrag && !CTL_ISMORE(rpkt.r_m_e_op)) { TRACE(1, ("Received second last fragment packet\n")); continue; } /* * So far, so good. Record this fragment, making sure it doesn't * overlap anything. */ TRACE(2, ("Packet okay\n")); if (numfrags > (MAXFRAGS - 1)) { TRACE(2, ("Number of fragments exceeds maximum %d\n", MAXFRAGS - 1)); return ERR_TOOMUCH; } /* * Find the position for the fragment relative to any * previously received. */ for (f = 0; f < numfrags && offsets[f] < offset; f++) { /* empty body */ ; } if (f < numfrags && offset == offsets[f]) { TRACE(1, ("duplicate %u octets at %u ignored, prior %u at %u\n", count, offset, counts[f], offsets[f])); continue; } if (f > 0 && (offsets[f-1] + counts[f-1]) > offset) { TRACE(1, ("received frag at %u overlaps with %u octet frag at %u\n", offset, counts[f-1], offsets[f-1])); continue; } if (f < numfrags && (offset + count) > offsets[f]) { TRACE(1, ("received %u octet frag at %u overlaps with frag at %u\n", count, offset, offsets[f])); continue; } for (ff = numfrags; ff > f; ff--) { offsets[ff] = offsets[ff-1]; counts[ff] = counts[ff-1]; } offsets[f] = offset; counts[f] = count; numfrags++; /* * Got that stuffed in right. Figure out if this was the last. * Record status info out of the last packet. */ if (!CTL_ISMORE(rpkt.r_m_e_op)) { seenlastfrag = 1; if (rstatus != 0) *rstatus = ntohs(rpkt.status); } /* * Copy the data into the data buffer. */ memcpy((char *)pktdata + offset, &rpkt.u, count); /* * If we've seen the last fragment, look for holes in the sequence. * If there aren't any, we're done. */ if (seenlastfrag && offsets[0] == 0) { for (f = 1; f < numfrags; f++) if (offsets[f-1] + counts[f-1] != offsets[f]) break; if (f == numfrags) { *rsize = offsets[f-1] + counts[f-1]; TRACE(1, ("%lu packets reassembled into response\n", (u_long)numfrags)); return 0; } } } /* giant for (;;) collecting response packets */ } /* getresponse() */
0
[ "CWE-20" ]
ntp
07a5b8141e354a998a52994c3c9cd547927e56ce
246,943,399,583,534,250,000,000,000,000,000,000,000
343
[TALOS-CAN-0063] avoid buffer overrun in ntpq
int LocatePatch(cmsIT8* it8, const char* cPatch) { int i; const char *data; TABLE* t = GetTable(it8); for (i=0; i < t-> nPatches; i++) { data = GetData(it8, i, t->SampleID); if (data != NULL) { if (cmsstrcasecmp(data, cPatch) == 0) return i; } } // SynError(it8, "Couldn't find patch '%s'\n", cPatch); return -1; }
0
[]
Little-CMS
65e2f1df3495edc984f7e0d7b7b24e29d851e240
250,160,026,499,974,070,000,000,000,000,000,000,000
20
Fix some warnings from static analysis
int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new) { int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; unsigned int bsize = i_blocksize(inode); if (!page_has_buffers(page)) create_empty_buffers(page, bsize, 0); head = page_buffers(page); for (bh = head, block_start = 0; bh != head || !block_start; bh = bh->b_this_page, block_start += bsize) { block_end = block_start + bsize; clear_buffer_new(bh); /* * Ignore blocks outside of our i/o range - * they may belong to unallocated clusters. */ if (block_start >= to || block_end <= from) { if (PageUptodate(page)) set_buffer_uptodate(bh); continue; } /* * For an allocating write with cluster size >= page * size, we always write the entire page. */ if (new) set_buffer_new(bh); if (!buffer_mapped(bh)) { map_bh(bh, inode->i_sb, *p_blkno); clean_bdev_bh_alias(bh); } if (PageUptodate(page)) { if (!buffer_uptodate(bh)) set_buffer_uptodate(bh); } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_new(bh) && ocfs2_should_read_blk(inode, page, block_start) && (block_start < from || block_end > to)) { ll_rw_block(REQ_OP_READ, 0, 1, &bh); *wait_bh++=bh; } *p_blkno = *p_blkno + 1; } /* * If we issued read requests - let them complete. */ while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) ret = -EIO; } if (ret == 0 || !new) return ret; /* * If we get -EIO above, zero out any newly allocated blocks * to avoid exposing stale data. */ bh = head; block_start = 0; do { block_end = block_start + bsize; if (block_end <= from) goto next_bh; if (block_start >= to) break; zero_user(page, block_start, bh->b_size); set_buffer_uptodate(bh); mark_buffer_dirty(bh); next_bh: block_start = block_end; bh = bh->b_this_page; } while (bh != head); return ret; }
0
[ "CWE-362" ]
linux
3e4c56d41eef5595035872a2ec5a483f42e8917f
271,781,297,336,558,100,000,000,000,000,000,000,000
91
ocfs2: ip_alloc_sem should be taken in ocfs2_get_block() ip_alloc_sem should be taken in ocfs2_get_block() when reading file in DIRECT mode to prevent concurrent access to extent tree with ocfs2_dio_end_io_write(), which may cause BUGON in the following situation: read file 'A' end_io of writing file 'A' vfs_read __vfs_read ocfs2_file_read_iter generic_file_read_iter ocfs2_direct_IO __blockdev_direct_IO do_blockdev_direct_IO do_direct_IO get_more_blocks ocfs2_get_block ocfs2_extent_map_get_blocks ocfs2_get_clusters ocfs2_get_clusters_nocache() ocfs2_search_extent_list return the index of record which contains the v_cluster, that is v_cluster > rec[i]->e_cpos. ocfs2_dio_end_io ocfs2_dio_end_io_write down_write(&oi->ip_alloc_sem); ocfs2_mark_extent_written ocfs2_change_extent_flag ocfs2_split_extent ... --> modify the rec[i]->e_cpos, resulting in v_cluster < rec[i]->e_cpos. BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos)) [[email protected]: v3] Link: http://lkml.kernel.org/r/[email protected] Link: http://lkml.kernel.org/r/[email protected] Fixes: c15471f79506 ("ocfs2: fix sparse file & data ordering issue in direct io") Signed-off-by: Alex Chen <[email protected]> Reviewed-by: Jun Piao <[email protected]> Reviewed-by: Joseph Qi <[email protected]> Reviewed-by: Gang He <[email protected]> Acked-by: Changwei Ge <[email protected]> Cc: Mark Fasheh <[email protected]> Cc: Joel Becker <[email protected]> Cc: Junxiao Bi <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static NTSTATUS fcb_or_dos_open(struct smb_request *req, connection_struct *conn, files_struct *fsp_to_dup_into, const struct smb_filename *smb_fname, struct file_id id, uint16 file_pid, uint64_t vuid, uint32 access_mask, uint32 share_access, uint32 create_options) { files_struct *fsp; DEBUG(5,("fcb_or_dos_open: attempting old open semantics for " "file %s.\n", smb_fname_str_dbg(smb_fname))); for(fsp = file_find_di_first(conn->sconn, id); fsp; fsp = file_find_di_next(fsp)) { DEBUG(10,("fcb_or_dos_open: checking file %s, fd = %d, " "vuid = %llu, file_pid = %u, private_options = 0x%x " "access_mask = 0x%x\n", fsp_str_dbg(fsp), fsp->fh->fd, (unsigned long long)fsp->vuid, (unsigned int)fsp->file_pid, (unsigned int)fsp->fh->private_options, (unsigned int)fsp->access_mask )); if (fsp != fsp_to_dup_into && fsp->fh->fd != -1 && fsp->vuid == vuid && fsp->file_pid == file_pid && (fsp->fh->private_options & (NTCREATEX_OPTIONS_PRIVATE_DENY_DOS | NTCREATEX_OPTIONS_PRIVATE_DENY_FCB)) && (fsp->access_mask & FILE_WRITE_DATA) && strequal(fsp->fsp_name->base_name, smb_fname->base_name) && strequal(fsp->fsp_name->stream_name, smb_fname->stream_name)) { DEBUG(10,("fcb_or_dos_open: file match\n")); break; } } if (!fsp) { return NT_STATUS_NOT_FOUND; } /* quite an insane set of semantics ... */ if (is_executable(smb_fname->base_name) && (fsp->fh->private_options & NTCREATEX_OPTIONS_PRIVATE_DENY_DOS)) { DEBUG(10,("fcb_or_dos_open: file fail due to is_executable.\n")); return NT_STATUS_INVALID_PARAMETER; } /* We need to duplicate this fsp. */ return dup_file_fsp(req, fsp, access_mask, share_access, create_options, fsp_to_dup_into); }
0
[]
samba
60f922bf1bd8816eacbb32c24793ad1f97a1d9f2
301,483,066,640,694,460,000,000,000,000,000,000,000
57
Fix bug #10229 - No access check verification on stream files. https://bugzilla.samba.org/show_bug.cgi?id=10229 We need to check if the requested access mask could be used to open the underlying file (if it existed), as we're passing in zero for the access mask to the base filename. Signed-off-by: Jeremy Allison <[email protected]> Reviewed-by: Stefan Metzmacher <[email protected]> Reviewed-by: David Disseldorp <[email protected]>
nfa_max_width(nfa_state_T *startstate, int depth) { int l, r; nfa_state_T *state = startstate; int len = 0; // detect looping in a NFA_SPLIT if (depth > 4) return -1; while (state != NULL) { switch (state->c) { case NFA_END_INVISIBLE: case NFA_END_INVISIBLE_NEG: // the end, return what we have return len; case NFA_SPLIT: // two alternatives, use the maximum l = nfa_max_width(state->out, depth + 1); r = nfa_max_width(state->out1, depth + 1); if (l < 0 || r < 0) return -1; return len + (l > r ? l : r); case NFA_ANY: case NFA_START_COLL: case NFA_START_NEG_COLL: // matches some character, including composing chars if (enc_utf8) len += MB_MAXBYTES; else if (has_mbyte) len += 2; else ++len; if (state->c != NFA_ANY) { // skip over the characters state = state->out1->out; continue; } break; case NFA_DIGIT: case NFA_WHITE: case NFA_HEX: case NFA_OCTAL: // ascii ++len; break; case NFA_IDENT: case NFA_SIDENT: case NFA_KWORD: case NFA_SKWORD: case NFA_FNAME: case NFA_SFNAME: case NFA_PRINT: case NFA_SPRINT: case NFA_NWHITE: case NFA_NDIGIT: case NFA_NHEX: case NFA_NOCTAL: case NFA_WORD: case NFA_NWORD: case NFA_HEAD: case NFA_NHEAD: case NFA_ALPHA: case NFA_NALPHA: case NFA_LOWER: case NFA_NLOWER: case NFA_UPPER: case NFA_NUPPER: case NFA_LOWER_IC: case NFA_NLOWER_IC: case NFA_UPPER_IC: case NFA_NUPPER_IC: case NFA_ANY_COMPOSING: // possibly non-ascii if (has_mbyte) len += 3; else ++len; break; case NFA_START_INVISIBLE: case NFA_START_INVISIBLE_NEG: case NFA_START_INVISIBLE_BEFORE: case NFA_START_INVISIBLE_BEFORE_NEG: // zero-width, out1 points to the END state state = state->out1->out; continue; case NFA_BACKREF1: case NFA_BACKREF2: case NFA_BACKREF3: case NFA_BACKREF4: case NFA_BACKREF5: case NFA_BACKREF6: case NFA_BACKREF7: case NFA_BACKREF8: case NFA_BACKREF9: #ifdef FEAT_SYN_HL case NFA_ZREF1: case NFA_ZREF2: case NFA_ZREF3: case NFA_ZREF4: case NFA_ZREF5: case NFA_ZREF6: case NFA_ZREF7: case NFA_ZREF8: case NFA_ZREF9: #endif case NFA_NEWL: case NFA_SKIP: // unknown width return -1; case NFA_BOL: case NFA_EOL: case NFA_BOF: case NFA_EOF: case NFA_BOW: case NFA_EOW: case NFA_MOPEN: case NFA_MOPEN1: case NFA_MOPEN2: case NFA_MOPEN3: case NFA_MOPEN4: case NFA_MOPEN5: case NFA_MOPEN6: case NFA_MOPEN7: case NFA_MOPEN8: case NFA_MOPEN9: #ifdef FEAT_SYN_HL case NFA_ZOPEN: case NFA_ZOPEN1: case NFA_ZOPEN2: case NFA_ZOPEN3: case NFA_ZOPEN4: case NFA_ZOPEN5: case NFA_ZOPEN6: case NFA_ZOPEN7: case NFA_ZOPEN8: case NFA_ZOPEN9: case NFA_ZCLOSE: case NFA_ZCLOSE1: case NFA_ZCLOSE2: case NFA_ZCLOSE3: case NFA_ZCLOSE4: case NFA_ZCLOSE5: case NFA_ZCLOSE6: case NFA_ZCLOSE7: case NFA_ZCLOSE8: case NFA_ZCLOSE9: #endif case NFA_MCLOSE: case NFA_MCLOSE1: case NFA_MCLOSE2: case NFA_MCLOSE3: case NFA_MCLOSE4: case NFA_MCLOSE5: case NFA_MCLOSE6: case NFA_MCLOSE7: case NFA_MCLOSE8: case NFA_MCLOSE9: case NFA_NOPEN: case NFA_NCLOSE: case NFA_LNUM_GT: case NFA_LNUM_LT: case NFA_COL_GT: case NFA_COL_LT: case NFA_VCOL_GT: case NFA_VCOL_LT: case NFA_MARK_GT: case NFA_MARK_LT: case NFA_VISUAL: case NFA_LNUM: case NFA_CURSOR: case NFA_COL: case NFA_VCOL: case NFA_MARK: case NFA_ZSTART: case NFA_ZEND: case NFA_OPT_CHARS: case NFA_EMPTY: case NFA_START_PATTERN: case NFA_END_PATTERN: case NFA_COMPOSING: case NFA_END_COMPOSING: // zero-width break; default: if (state->c < 0) // don't know what this is return -1; // normal character len += MB_CHAR2LEN(state->c); break; } // normal way to continue state = state->out; } // unrecognized, "cannot happen" return -1; }
0
[ "CWE-122" ]
vim
65b605665997fad54ef39a93199e305af2fe4d7f
308,180,214,366,810,300,000,000,000,000,000,000,000
213
patch 8.2.3409: reading beyond end of line with invalid utf-8 character Problem: Reading beyond end of line with invalid utf-8 character. Solution: Check for NUL when advancing.
int recv_files(int f_in, int f_out, char *local_name) { int fd1,fd2; STRUCT_STAT st; int iflags, xlen; char *fname, fbuf[MAXPATHLEN]; char xname[MAXPATHLEN]; char fnametmp[MAXPATHLEN]; char *fnamecmp, *partialptr; char fnamecmpbuf[MAXPATHLEN]; uchar fnamecmp_type; struct file_struct *file; int itemizing = am_server ? logfile_format_has_i : stdout_format_has_i; enum logcode log_code = log_before_transfer ? FLOG : FINFO; int max_phase = protocol_version >= 29 ? 2 : 1; int dflt_perms = (ACCESSPERMS & ~orig_umask); #ifdef SUPPORT_ACLS const char *parent_dirname = ""; #endif int ndx, recv_ok; if (DEBUG_GTE(RECV, 1)) rprintf(FINFO, "recv_files(%d) starting\n", cur_flist->used); if (delay_updates) delayed_bits = bitbag_create(cur_flist->used + 1); while (1) { cleanup_disable(); /* This call also sets cur_flist. */ ndx = read_ndx_and_attrs(f_in, f_out, &iflags, &fnamecmp_type, xname, &xlen); if (ndx == NDX_DONE) { if (!am_server && INFO_GTE(PROGRESS, 2) && cur_flist) { set_current_file_index(NULL, 0); end_progress(0); } if (inc_recurse && first_flist) { if (read_batch) { ndx = first_flist->used + first_flist->ndx_start; gen_wants_ndx(ndx, first_flist->flist_num); } flist_free(first_flist); if (first_flist) continue; } else if (read_batch && first_flist) { ndx = first_flist->used; gen_wants_ndx(ndx, first_flist->flist_num); } if (++phase > max_phase) break; if (DEBUG_GTE(RECV, 1)) rprintf(FINFO, "recv_files phase=%d\n", phase); if (phase == 2 && delay_updates) handle_delayed_updates(local_name); write_int(f_out, NDX_DONE); continue; } if (ndx - cur_flist->ndx_start >= 0) file = cur_flist->files[ndx - cur_flist->ndx_start]; else file = dir_flist->files[cur_flist->parent_ndx]; fname = local_name ? local_name : f_name(file, fbuf); if (DEBUG_GTE(RECV, 1)) rprintf(FINFO, "recv_files(%s)\n", fname); #ifdef SUPPORT_XATTRS if (preserve_xattrs && iflags & ITEM_REPORT_XATTR && do_xfers && !(want_xattr_optim && BITS_SET(iflags, ITEM_XNAME_FOLLOWS|ITEM_LOCAL_CHANGE))) recv_xattr_request(file, f_in); #endif if (!(iflags & ITEM_TRANSFER)) { maybe_log_item(file, iflags, itemizing, xname); #ifdef SUPPORT_XATTRS if (preserve_xattrs && iflags & ITEM_REPORT_XATTR && do_xfers && !BITS_SET(iflags, ITEM_XNAME_FOLLOWS|ITEM_LOCAL_CHANGE)) set_file_attrs(fname, file, NULL, fname, 0); #endif if (iflags & ITEM_IS_NEW) { stats.created_files++; if (S_ISREG(file->mode)) { /* Nothing further to count. */ } else if (S_ISDIR(file->mode)) stats.created_dirs++; #ifdef SUPPORT_LINKS else if (S_ISLNK(file->mode)) stats.created_symlinks++; #endif else if (IS_DEVICE(file->mode)) stats.created_devices++; else stats.created_specials++; } continue; } if (phase == 2) { rprintf(FERROR, "got transfer request in phase 2 [%s]\n", who_am_i()); exit_cleanup(RERR_PROTOCOL); } if (file->flags & FLAG_FILE_SENT) { if (csum_length == SHORT_SUM_LENGTH) { if (keep_partial && !partial_dir) make_backups = -make_backups; /* prevents double backup */ if (append_mode) sparse_files = -sparse_files; append_mode = -append_mode; csum_length = SUM_LENGTH; redoing = 1; } } else { if (csum_length != SHORT_SUM_LENGTH) { if (keep_partial && !partial_dir) make_backups = -make_backups; if (append_mode) sparse_files = -sparse_files; append_mode = -append_mode; csum_length = SHORT_SUM_LENGTH; redoing = 0; } if (iflags & ITEM_IS_NEW) stats.created_files++; } if (!am_server && INFO_GTE(PROGRESS, 1)) set_current_file_index(file, ndx); stats.xferred_files++; stats.total_transferred_size += F_LENGTH(file); cleanup_got_literal = 0; if (daemon_filter_list.head && check_filter(&daemon_filter_list, FLOG, fname, 0) < 0) { rprintf(FERROR, "attempt to hack rsync failed.\n"); exit_cleanup(RERR_PROTOCOL); } if (read_batch) { int wanted = redoing ? we_want_redo(ndx) : gen_wants_ndx(ndx, cur_flist->flist_num); if (!wanted) { rprintf(FINFO, "(Skipping batched update for%s \"%s\")\n", redoing ? " resend of" : "", fname); discard_receive_data(f_in, F_LENGTH(file)); file->flags |= FLAG_FILE_SENT; continue; } } remember_initial_stats(); if (!do_xfers) { /* log the transfer */ log_item(FCLIENT, file, iflags, NULL); if (read_batch) discard_receive_data(f_in, F_LENGTH(file)); continue; } if (write_batch < 0) { log_item(FCLIENT, file, iflags, NULL); if (!am_server) discard_receive_data(f_in, F_LENGTH(file)); if (inc_recurse) send_msg_int(MSG_SUCCESS, ndx); continue; } partialptr = partial_dir ? partial_dir_fname(fname) : fname; if (protocol_version >= 29) { switch (fnamecmp_type) { case FNAMECMP_FNAME: fnamecmp = fname; break; case FNAMECMP_PARTIAL_DIR: fnamecmp = partialptr; break; case FNAMECMP_BACKUP: fnamecmp = get_backup_name(fname); break; case FNAMECMP_FUZZY: if (file->dirname) { pathjoin(fnamecmpbuf, sizeof fnamecmpbuf, file->dirname, xname); fnamecmp = fnamecmpbuf; } else fnamecmp = xname; break; default: if (fnamecmp_type > FNAMECMP_FUZZY && fnamecmp_type-FNAMECMP_FUZZY <= basis_dir_cnt) { fnamecmp_type -= FNAMECMP_FUZZY + 1; if (file->dirname) { stringjoin(fnamecmpbuf, sizeof fnamecmpbuf, basis_dir[fnamecmp_type], "/", file->dirname, "/", xname, NULL); } else pathjoin(fnamecmpbuf, sizeof fnamecmpbuf, basis_dir[fnamecmp_type], xname); } else if (fnamecmp_type >= basis_dir_cnt) { rprintf(FERROR, "invalid basis_dir index: %d.\n", fnamecmp_type); exit_cleanup(RERR_PROTOCOL); } else pathjoin(fnamecmpbuf, sizeof fnamecmpbuf, basis_dir[fnamecmp_type], fname); fnamecmp = fnamecmpbuf; break; } if (!fnamecmp || (daemon_filter_list.head && check_filter(&daemon_filter_list, FLOG, fname, 0) < 0)) { fnamecmp = fname; fnamecmp_type = FNAMECMP_FNAME; } } else { /* Reminder: --inplace && --partial-dir are never * enabled at the same time. */ if (inplace && make_backups > 0) { if (!(fnamecmp = get_backup_name(fname))) fnamecmp = fname; else fnamecmp_type = FNAMECMP_BACKUP; } else if (partial_dir && partialptr) fnamecmp = partialptr; else fnamecmp = fname; } /* open the file */ fd1 = do_open(fnamecmp, O_RDONLY, 0); if (fd1 == -1 && protocol_version < 29) { if (fnamecmp != fname) { fnamecmp = fname; fd1 = do_open(fnamecmp, O_RDONLY, 0); } if (fd1 == -1 && basis_dir[0]) { /* pre-29 allowed only one alternate basis */ pathjoin(fnamecmpbuf, sizeof fnamecmpbuf, basis_dir[0], fname); fnamecmp = fnamecmpbuf; fd1 = do_open(fnamecmp, O_RDONLY, 0); } } updating_basis_or_equiv = inplace && (fnamecmp == fname || fnamecmp_type == FNAMECMP_BACKUP); if (fd1 == -1) { st.st_mode = 0; st.st_size = 0; } else if (do_fstat(fd1,&st) != 0) { rsyserr(FERROR_XFER, errno, "fstat %s failed", full_fname(fnamecmp)); discard_receive_data(f_in, F_LENGTH(file)); close(fd1); if (inc_recurse) send_msg_int(MSG_NO_SEND, ndx); continue; } if (fd1 != -1 && S_ISDIR(st.st_mode) && fnamecmp == fname) { /* this special handling for directories * wouldn't be necessary if robust_rename() * and the underlying robust_unlink could cope * with directories */ rprintf(FERROR_XFER, "recv_files: %s is a directory\n", full_fname(fnamecmp)); discard_receive_data(f_in, F_LENGTH(file)); close(fd1); if (inc_recurse) send_msg_int(MSG_NO_SEND, ndx); continue; } if (fd1 != -1 && !S_ISREG(st.st_mode)) { close(fd1); fd1 = -1; } /* If we're not preserving permissions, change the file-list's * mode based on the local permissions and some heuristics. */ if (!preserve_perms) { int exists = fd1 != -1; #ifdef SUPPORT_ACLS const char *dn = file->dirname ? file->dirname : "."; if (parent_dirname != dn && strcmp(parent_dirname, dn) != 0) { dflt_perms = default_perms_for_dir(dn); parent_dirname = dn; } #endif file->mode = dest_mode(file->mode, st.st_mode, dflt_perms, exists); } /* We now check to see if we are writing the file "inplace" */ if (inplace) { fd2 = do_open(fname, O_WRONLY|O_CREAT, 0600); if (fd2 == -1) { rsyserr(FERROR_XFER, errno, "open %s failed", full_fname(fname)); } else if (updating_basis_or_equiv) cleanup_set(NULL, NULL, file, fd1, fd2); } else { fd2 = open_tmpfile(fnametmp, fname, file); if (fd2 != -1) cleanup_set(fnametmp, partialptr, file, fd1, fd2); } if (fd2 == -1) { discard_receive_data(f_in, F_LENGTH(file)); if (fd1 != -1) close(fd1); if (inc_recurse) send_msg_int(MSG_NO_SEND, ndx); continue; } /* log the transfer */ if (log_before_transfer) log_item(FCLIENT, file, iflags, NULL); else if (!am_server && INFO_GTE(NAME, 1) && INFO_EQ(PROGRESS, 1)) rprintf(FINFO, "%s\n", fname); /* recv file data */ recv_ok = receive_data(f_in, fnamecmp, fd1, st.st_size, fname, fd2, F_LENGTH(file)); log_item(log_code, file, iflags, NULL); if (fd1 != -1) close(fd1); if (close(fd2) < 0) { rsyserr(FERROR, errno, "close failed on %s", full_fname(fnametmp)); exit_cleanup(RERR_FILEIO); } if ((recv_ok && (!delay_updates || !partialptr)) || inplace) { if (partialptr == fname) partialptr = NULL; if (!finish_transfer(fname, fnametmp, fnamecmp, partialptr, file, recv_ok, 1)) recv_ok = -1; else if (fnamecmp == partialptr) { do_unlink(partialptr); handle_partial_dir(partialptr, PDIR_DELETE); } } else if (keep_partial && partialptr) { if (!handle_partial_dir(partialptr, PDIR_CREATE)) { rprintf(FERROR, "Unable to create partial-dir for %s -- discarding %s.\n", local_name ? local_name : f_name(file, NULL), recv_ok ? "completed file" : "partial file"); do_unlink(fnametmp); recv_ok = -1; } else if (!finish_transfer(partialptr, fnametmp, fnamecmp, NULL, file, recv_ok, !partial_dir)) recv_ok = -1; else if (delay_updates && recv_ok) { bitbag_set_bit(delayed_bits, ndx); recv_ok = 2; } else partialptr = NULL; } else do_unlink(fnametmp); cleanup_disable(); if (read_batch) file->flags |= FLAG_FILE_SENT; switch (recv_ok) { case 2: break; case 1: if (remove_source_files || inc_recurse || (preserve_hard_links && F_IS_HLINKED(file))) send_msg_int(MSG_SUCCESS, ndx); break; case 0: { enum logcode msgtype = redoing ? FERROR_XFER : FWARNING; if (msgtype == FERROR_XFER || INFO_GTE(NAME, 1)) { char *errstr, *redostr, *keptstr; if (!(keep_partial && partialptr) && !inplace) keptstr = "discarded"; else if (partial_dir) keptstr = "put into partial-dir"; else keptstr = "retained"; if (msgtype == FERROR_XFER) { errstr = "ERROR"; redostr = ""; } else { errstr = "WARNING"; redostr = read_batch ? " (may try again)" : " (will try again)"; } rprintf(msgtype, "%s: %s failed verification -- update %s%s.\n", errstr, local_name ? f_name(file, NULL) : fname, keptstr, redostr); } if (!redoing) { if (read_batch) flist_ndx_push(&batch_redo_list, ndx); send_msg_int(MSG_REDO, ndx); file->flags |= FLAG_FILE_SENT; } else if (inc_recurse) send_msg_int(MSG_NO_SEND, ndx); break; } case -1: if (inc_recurse) send_msg_int(MSG_NO_SEND, ndx); break; } } if (make_backups < 0) make_backups = -make_backups; if (phase == 2 && delay_updates) /* for protocol_version < 29 */ handle_delayed_updates(local_name); if (DEBUG_GTE(RECV, 1)) rprintf(FINFO,"recv_files finished\n"); return 0; }
1
[ "CWE-862" ]
rsync
3e06d40029cfdce9d0f73d87cfd4edaf54be9c51
273,331,075,093,136,400,000,000,000,000,000,000,000
436
Check fname in recv_files sooner.
const char* muxing_app() const { return muxing_app_; }
0
[ "CWE-20" ]
libvpx
f00890eecdf8365ea125ac16769a83aa6b68792d
259,884,615,176,450,340,000,000,000,000,000,000,000
1
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
static inline MagickRealType GetPixelLuminance( const Image *magick_restrict image,const Quantum *magick_restrict pixel) { MagickRealType intensity; if (image->colorspace != sRGBColorspace) { intensity=(MagickRealType) ( 0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); return(intensity); } intensity=(MagickRealType) (0.212656f*DecodePixelGamma((MagickRealType) pixel[image->channel_map[RedPixelChannel].offset])+0.715158f* DecodePixelGamma((MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset])+0.072186f* DecodePixelGamma((MagickRealType) pixel[image->channel_map[BluePixelChannel].offset])); return(intensity); }
0
[ "CWE-20", "CWE-125" ]
ImageMagick
8187d2d8fd010d2d6b1a3a8edd935beec404dddc
246,712,228,277,729,000,000,000,000,000,000,000,000
22
https://github.com/ImageMagick/ImageMagick/issues/1610
int TS_CONF_set_ess_cert_id_chain(CONF *conf, const char *section, TS_RESP_CTX *ctx) { return TS_CONF_add_flag(conf, section, ENV_ESS_CERT_ID_CHAIN, TS_ESS_CERT_ID_CHAIN, ctx); }
0
[]
openssl
c7235be6e36c4bef84594aa3b2f0561db84b63d8
66,759,616,209,290,450,000,000,000,000,000,000,000
6
RFC 3161 compliant time stamp request creation, response generation and response verification. Submitted by: Zoltan Glozik <[email protected]> Reviewed by: Ulf Moeller
bool MYSQL_BIN_LOG::write(Log_event *event_info) { THD *thd= event_info->thd; bool error= 1; DBUG_ENTER("MYSQL_BIN_LOG::write(Log_event *)"); binlog_cache_data *cache_data= 0; bool is_trans_cache= FALSE; if (thd->binlog_evt_union.do_union) { /* In Stored function; Remember that function call caused an update. We will log the function call to the binary log on function exit */ thd->binlog_evt_union.unioned_events= TRUE; thd->binlog_evt_union.unioned_events_trans |= event_info->use_trans_cache(); DBUG_RETURN(0); } /* We only end the statement if we are in a top-level statement. If we are inside a stored function, we do not end the statement since this will close all tables on the slave. */ bool const end_stmt= thd->locked_tables_mode && thd->lex->requires_prelocking(); if (thd->binlog_flush_pending_rows_event(end_stmt, event_info->use_trans_cache())) DBUG_RETURN(error); /* In most cases this is only called if 'is_open()' is true; in fact this is mostly called if is_open() *was* true a few instructions before, but it could have changed since. */ if (likely(is_open())) { #ifdef HAVE_REPLICATION /* In the future we need to add to the following if tests like "do the involved tables match (to be implemented) binlog_[wild_]{do|ignore}_table?" (WL#1049)" */ const char *local_db= event_info->get_db(); if ((thd && !(thd->variables.option_bits & OPTION_BIN_LOG)) || (thd->lex->sql_command != SQLCOM_ROLLBACK_TO_SAVEPOINT && thd->lex->sql_command != SQLCOM_SAVEPOINT && !binlog_filter->db_ok(local_db))) DBUG_RETURN(0); #endif /* HAVE_REPLICATION */ IO_CACHE *file= NULL; if (event_info->use_direct_logging()) { file= &log_file; mysql_mutex_lock(&LOCK_log); } else { if (thd->binlog_setup_trx_data()) goto err; binlog_cache_mngr *const cache_mngr= (binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton); is_trans_cache= use_trans_cache(thd, event_info->use_trans_cache()); file= cache_mngr->get_binlog_cache_log(is_trans_cache); cache_data= cache_mngr->get_binlog_cache_data(is_trans_cache); if (thd->lex->stmt_accessed_non_trans_temp_table()) cache_data->set_changes_to_non_trans_temp_table(); thd->binlog_start_trans_and_stmt(); } DBUG_PRINT("info",("event type: %d",event_info->get_type_code())); /* No check for auto events flag here - this write method should never be called if auto-events are enabled. Write first log events which describe the 'run environment' of the SQL command. If row-based binlogging, Insert_id, Rand and other kind of "setting context" events are not needed. */ if (thd) { if (!thd->is_current_stmt_binlog_format_row()) { if (thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt) { Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT, thd->first_successful_insert_id_in_prev_stmt_for_binlog); if (e.write(file)) goto err; } if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0) { DBUG_PRINT("info",("number of auto_inc intervals: %u", thd->auto_inc_intervals_in_cur_stmt_for_binlog. nb_elements())); Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT, thd->auto_inc_intervals_in_cur_stmt_for_binlog. minimum()); if (e.write(file)) goto err; } if (thd->rand_used) { Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2); if (e.write(file)) goto err; } if (thd->user_var_events.elements) { for (uint i= 0; i < thd->user_var_events.elements; i++) { BINLOG_USER_VAR_EVENT *user_var_event; get_dynamic(&thd->user_var_events,(uchar*) &user_var_event, i); /* setting flags for user var log event */ uchar flags= User_var_log_event::UNDEF_F; if (user_var_event->unsigned_flag) flags|= User_var_log_event::UNSIGNED_F; User_var_log_event e(thd, user_var_event->user_var_event->name.str, user_var_event->user_var_event->name.length, user_var_event->value, user_var_event->length, user_var_event->type, user_var_event->charset_number, flags); if (e.write(file)) goto err; } } } } /* Write the event. */ if (event_info->write(file) || DBUG_EVALUATE_IF("injecting_fault_writing", 1, 0)) goto err; error= 0; err: if (event_info->use_direct_logging()) { if (!error) { bool synced; if ((error= flush_and_sync(&synced))) { mysql_mutex_unlock(&LOCK_log); } else if ((error= RUN_HOOK(binlog_storage, after_flush, (thd, log_file_name, file->pos_in_file, synced)))) { sql_print_error("Failed to run 'after_flush' hooks"); mysql_mutex_unlock(&LOCK_log); } else { bool check_purge; signal_update(); error= rotate(false, &check_purge); mysql_mutex_unlock(&LOCK_log); if (!error && check_purge) purge(); } } else { mysql_mutex_unlock(&LOCK_log); } } if (error) { set_write_error(thd, is_trans_cache); if (check_write_error(thd) && cache_data && stmt_has_updated_non_trans_table(thd)) cache_data->set_incident(); } } DBUG_RETURN(error); }
0
[ "CWE-264" ]
mysql-server
48bd8b16fe382be302c6f0b45931be5aa6f29a0e
184,412,300,548,071,800,000,000,000,000,000,000,000
191
Bug#24388753: PRIVILEGE ESCALATION USING MYSQLD_SAFE [This is the 5.5/5.6 version of the bugfix]. The problem was that it was possible to write log files ending in .ini/.cnf that later could be parsed as an options file. This made it possible for users to specify startup options without the permissions to do so. This patch fixes the problem by disallowing general query log and slow query log to be written to files ending in .ini and .cnf.
struct json_parser *json_parser_init_flags(struct istream *input, enum json_parser_flags flags) { struct json_parser *parser; parser = i_new(struct json_parser, 1); parser->input = input; parser->flags = flags; parser->value = str_new(default_pool, 128); i_array_init(&parser->nesting, 8); i_stream_ref(input); if ((flags & JSON_PARSER_NO_ROOT_OBJECT) != 0) parser->state = JSON_STATE_VALUE; return parser; }
0
[]
core
973769d74433de3c56c4ffdf4f343cb35d98e4f7
237,993,781,533,343,470,000,000,000,000,000,000,000
16
lib: json - Escape invalid UTF-8 as unicode bytes This prevents dovecot from crashing if invalid UTF-8 input is given.
static void exclusive_event_destroy(struct perf_event *event) { struct pmu *pmu = event->pmu; if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) return; /* see comment in exclusive_event_init() */ if (event->attach_state & PERF_ATTACH_TASK) atomic_dec(&pmu->exclusive_cnt); else atomic_inc(&pmu->exclusive_cnt); }
0
[ "CWE-416", "CWE-362" ]
linux
12ca6ad2e3a896256f086497a7c7406a547ee373
92,140,696,613,038,580,000,000,000,000,000,000,000
13
perf: Fix race in swevent hash There's a race on CPU unplug where we free the swevent hash array while it can still have events on. This will result in a use-after-free which is BAD. Simply do not free the hash array on unplug. This leaves the thing around and no use-after-free takes place. When the last swevent dies, we do a for_each_possible_cpu() iteration anyway to clean these up, at which time we'll free it, so no leakage will occur. Reported-by: Sasha Levin <[email protected]> Tested-by: Sasha Levin <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Stephane Eranian <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vince Weaver <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
static int http_close(URLContext *h) { int ret = 0; HTTPContext *s = h->priv_data; #if CONFIG_ZLIB inflateEnd(&s->inflate_stream); av_freep(&s->inflate_buffer); #endif /* CONFIG_ZLIB */ if (!s->end_chunked_post) /* Close the write direction by sending the end of chunked encoding. */ ret = http_shutdown(h, h->flags); if (s->hd) ffurl_closep(&s->hd); av_dict_free(&s->chained_options); return ret; }
0
[ "CWE-119", "CWE-787" ]
FFmpeg
2a05c8f813de6f2278827734bf8102291e7484aa
179,324,194,456,414,040,000,000,000,000,000,000,000
19
http: make length/offset-related variables unsigned. Fixes #5992, reported and found by Paul Cher <[email protected]>.
static int fts3IncrmergeWriter( Fts3Table *p, /* Fts3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ int iIdx, /* Index of new output segment */ Fts3MultiSegReader *pCsr, /* Cursor that data will be read from */ IncrmergeWriter *pWriter /* Populate this object */ ){ int rc; /* Return Code */ int i; /* Iterator variable */ int nLeafEst = 0; /* Blocks allocated for leaf nodes */ sqlite3_stmt *pLeafEst = 0; /* SQL used to determine nLeafEst */ sqlite3_stmt *pFirstBlock = 0; /* SQL used to determine first block */ /* Calculate nLeafEst. */ rc = fts3SqlStmt(p, SQL_MAX_LEAF_NODE_ESTIMATE, &pLeafEst, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pLeafEst, 1, iAbsLevel); sqlite3_bind_int64(pLeafEst, 2, pCsr->nSegment); if( SQLITE_ROW==sqlite3_step(pLeafEst) ){ nLeafEst = sqlite3_column_int(pLeafEst, 0); } rc = sqlite3_reset(pLeafEst); } if( rc!=SQLITE_OK ) return rc; /* Calculate the first block to use in the output segment */ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pFirstBlock, 0); if( rc==SQLITE_OK ){ if( SQLITE_ROW==sqlite3_step(pFirstBlock) ){ pWriter->iStart = sqlite3_column_int64(pFirstBlock, 0); pWriter->iEnd = pWriter->iStart - 1; pWriter->iEnd += nLeafEst * FTS_MAX_APPENDABLE_HEIGHT; } rc = sqlite3_reset(pFirstBlock); } if( rc!=SQLITE_OK ) return rc; /* Insert the marker in the %_segments table to make sure nobody tries ** to steal the space just allocated. This is also used to identify ** appendable segments. */ rc = fts3WriteSegment(p, pWriter->iEnd, 0, 0); if( rc!=SQLITE_OK ) return rc; pWriter->iAbsLevel = iAbsLevel; pWriter->nLeafEst = nLeafEst; pWriter->iIdx = iIdx; /* Set up the array of NodeWriter objects */ for(i=0; i<FTS_MAX_APPENDABLE_HEIGHT; i++){ pWriter->aNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; } return SQLITE_OK; }
0
[ "CWE-787" ]
sqlite
c72f2fb7feff582444b8ffdc6c900c69847ce8a9
233,896,572,108,380,660,000,000,000,000,000,000,000
53
More improvements to shadow table corruption detection in FTS3. FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d
e_mail_parser_inline_pgp_encrypted_class_init (EMailParserExtensionClass *class) { class->mime_types = parser_mime_types; class->priority = G_PRIORITY_LOW; class->parse = empe_inlinepgp_encrypted_parse; }
0
[ "CWE-347" ]
evolution
f66cd3e1db301d264563b4222a3574e2e58e2b85
219,115,509,697,028,900,000,000,000,000,000,000,000
6
eds-I#3 - [GPG] Mails that are not encrypted look encrypted Related to https://gitlab.gnome.org/GNOME/evolution-data-server/issues/3
XListFonts( register Display *dpy, _Xconst char *pattern, /* null-terminated */ int maxNames, int *actualCount) /* RETURN */ { register long nbytes; register unsigned i; register int length; char **flist = NULL; char *ch = NULL; char *chstart; char *chend; int count = 0; xListFontsReply rep; register xListFontsReq *req; unsigned long rlen = 0; if (strlen(pattern) >= USHRT_MAX) return NULL; LockDisplay(dpy); GetReq(ListFonts, req); req->maxNames = maxNames; nbytes = req->nbytes = pattern ? (CARD16) strlen (pattern) : 0; req->length += (nbytes + 3) >> 2; _XSend (dpy, pattern, nbytes); /* use _XSend instead of Data, since following _XReply will flush buffer */ if (!_XReply (dpy, (xReply *)&rep, 0, xFalse)) { *actualCount = 0; UnlockDisplay(dpy); SyncHandle(); return (char **) NULL; } if (rep.nFonts) { flist = Xmallocarray (rep.nFonts, sizeof(char *)); if (rep.length > 0 && rep.length < (INT_MAX >> 2)) { rlen = rep.length << 2; ch = Xmalloc(rlen + 1); /* +1 to leave room for last null-terminator */ } if ((! flist) || (! ch)) { Xfree(flist); Xfree(ch); _XEatDataWords(dpy, rep.length); *actualCount = 0; UnlockDisplay(dpy); SyncHandle(); return (char **) NULL; } _XReadPad (dpy, ch, rlen); /* * unpack into null terminated strings. */ chstart = ch; chend = ch + rlen; length = *(unsigned char *)ch; *ch = 1; /* make sure it is non-zero for XFreeFontNames */ for (i = 0; i < rep.nFonts; i++) { if (ch + length < chend) { flist[i] = ch + 1; /* skip over length */ ch += length + 1; /* find next length ... */ length = *(unsigned char *)ch; *ch = '\0'; /* and replace with null-termination */ count++; } else { Xfree(chstart); Xfree(flist); flist = NULL; count = 0; break; } } } *actualCount = count; UnlockDisplay(dpy); SyncHandle(); return (flist); }
0
[ "CWE-120" ]
libx11
8d2e02ae650f00c4a53deb625211a0527126c605
116,906,902,990,878,700,000,000,000,000,000,000,000
83
Reject string longer than USHRT_MAX before sending them on the wire The X protocol uses CARD16 values to represent the length so this would overflow. CVE-2021-31535 Signed-off-by: Matthieu Herrb <[email protected]>
static void iscsi_timed_check_events(void *opaque) { IscsiLun *iscsilun = opaque; qemu_mutex_lock(&iscsilun->mutex); /* check for timed out requests */ iscsi_service(iscsilun->iscsi, 0); if (iscsilun->request_timed_out) { iscsilun->request_timed_out = false; iscsi_reconnect(iscsilun->iscsi); } /* newer versions of libiscsi may return zero events. Ensure we are able * to return to service once this situation changes. */ iscsi_set_events(iscsilun); qemu_mutex_unlock(&iscsilun->mutex); timer_mod(iscsilun->event_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + EVENT_INTERVAL); }
0
[ "CWE-125" ]
qemu
ff0507c239a246fd7215b31c5658fc6a3ee1e4c5
202,345,793,795,509,300,000,000,000,000,000,000,000
23
block/iscsi:fix heap-buffer-overflow in iscsi_aio_ioctl_cb There is an overflow, the source 'datain.data[2]' is 100 bytes, but the 'ss' is 252 bytes.This may cause a security issue because we can access a lot of unrelated memory data. The len for sbp copy data should take the minimum of mx_sb_len and sb_len_wr, not the maximum. If we use iscsi device for VM backend storage, ASAN show stack: READ of size 252 at 0xfffd149dcfc4 thread T0 #0 0xaaad433d0d34 in __asan_memcpy (aarch64-softmmu/qemu-system-aarch64+0x2cb0d34) #1 0xaaad45f9d6d0 in iscsi_aio_ioctl_cb /qemu/block/iscsi.c:996:9 #2 0xfffd1af0e2dc (/usr/lib64/iscsi/libiscsi.so.8+0xe2dc) #3 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174) #4 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac) #5 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5 #6 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9 #7 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20 #8 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520 #9 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5 #10 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4) #11 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9 #12 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242 #13 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518 #14 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9 #15 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5 #16 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c) #17 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740) 0xfffd149dcfc4 is located 0 bytes to the right of 100-byte region [0xfffd149dcf60,0xfffd149dcfc4) allocated by thread T0 here: #0 0xaaad433d1e70 in __interceptor_malloc (aarch64-softmmu/qemu-system-aarch64+0x2cb1e70) #1 0xfffd1af0e254 (/usr/lib64/iscsi/libiscsi.so.8+0xe254) #2 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174) #3 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac) #4 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5 #5 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9 #6 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20 #7 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520 #8 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5 #9 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4) #10 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9 #11 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242 #12 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518 #13 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9 #14 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5 #15 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c) #16 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740) Reported-by: Euler Robot <[email protected]> Signed-off-by: Chen Qun <[email protected]> Reviewed-by: Stefan Hajnoczi <[email protected]> Message-id: [email protected] Reviewed-by: Daniel P. Berrangé <[email protected]> Signed-off-by: Peter Maydell <[email protected]>
tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free) { MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; if (!tuplesort_gettuple_common(state, forward, &stup, should_free)) stup.tuple = NULL; MemoryContextSwitchTo(oldcontext); return stup.tuple; }
0
[ "CWE-209" ]
postgres
804b6b6db4dcfc590a468e7be390738f9f7755fb
77,316,442,145,180,370,000,000,000,000,000,000,000
12
Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.
static int mxf_probe(AVProbeData *p) { const uint8_t *bufp = p->buf; const uint8_t *end = p->buf + p->buf_size; if (p->buf_size < sizeof(mxf_header_partition_pack_key)) return 0; /* Must skip Run-In Sequence and search for MXF header partition pack key SMPTE 377M 5.5 */ end -= sizeof(mxf_header_partition_pack_key); for (; bufp < end;) { if (!((bufp[13] - 1) & 0xF2)){ if (AV_RN32(bufp ) == AV_RN32(mxf_header_partition_pack_key ) && AV_RN32(bufp+ 4) == AV_RN32(mxf_header_partition_pack_key+ 4) && AV_RN32(bufp+ 8) == AV_RN32(mxf_header_partition_pack_key+ 8) && AV_RN16(bufp+12) == AV_RN16(mxf_header_partition_pack_key+12)) return AVPROBE_SCORE_MAX; bufp ++; } else bufp += 10; } return 0; }
0
[ "CWE-703", "CWE-834" ]
FFmpeg
900f39692ca0337a98a7cf047e4e2611071810c2
127,607,476,186,933,010,000,000,000,000,000,000,000
24
avformat/mxfdec: Fix DoS issues in mxf_read_index_entry_array() Fixes: 20170829A.mxf Co-Author: 张洪亮(望初)" <[email protected]> Found-by: Xiaohei and Wangchu from Alibaba Security Team Signed-off-by: Michael Niedermayer <[email protected]>
napi_status napi_create_array_with_length(napi_env env, size_t length, napi_value* result) { CHECK_ENV(env); CHECK_ARG(env, result); *result = v8impl::JsValueFromV8LocalValue( v8::Array::New(env->isolate, length)); return napi_clear_last_error(env); }
0
[ "CWE-191" ]
node
656260b4b65fec3b10f6da3fdc9f11fb941aafb5
45,306,015,157,429,580,000,000,000,000,000,000,000
11
napi: fix memory corruption vulnerability Fixes: https://hackerone.com/reports/784186 CVE-ID: CVE-2020-8174 PR-URL: https://github.com/nodejs-private/node-private/pull/195 Reviewed-By: Anna Henningsen <[email protected]> Reviewed-By: Gabriel Schulhof <[email protected]> Reviewed-By: Michael Dawson <[email protected]> Reviewed-By: Colin Ihrig <[email protected]> Reviewed-By: Rich Trott <[email protected]>
int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid, u32 group, gfp_t allocation, int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), void *filter_data) { struct net *net = sock_net(ssk); struct netlink_broadcast_data info; struct sock *sk; skb = netlink_trim(skb, allocation); info.exclude_sk = ssk; info.net = net; info.portid = portid; info.group = group; info.failure = 0; info.delivery_failure = 0; info.congested = 0; info.delivered = 0; info.allocation = allocation; info.skb = skb; info.skb2 = NULL; info.tx_filter = filter; info.tx_data = filter_data; /* While we sleep in clone, do not allow to change socket list */ netlink_lock_table(); sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) do_one_broadcast(sk, &info); consume_skb(skb); netlink_unlock_table(); if (info.delivery_failure) { kfree_skb(info.skb2); return -ENOBUFS; } consume_skb(info.skb2); if (info.delivered) { if (info.congested && (allocation & __GFP_WAIT)) yield(); return 0; } return -ESRCH; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
338,188,482,453,512,380,000,000,000,000,000,000,000
49
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
spi_to_spi_gpio(const struct spi_device *spi) { const struct spi_bitbang *bang; struct spi_gpio *spi_gpio; bang = spi_master_get_devdata(spi->master); spi_gpio = container_of(bang, struct spi_gpio, bitbang); return spi_gpio; }
0
[ "CWE-400", "CWE-401" ]
linux
d3b0ffa1d75d5305ebe34735598993afbb8a869d
207,008,335,852,968,850,000,000,000,000,000,000,000
9
spi: gpio: prevent memory leak in spi_gpio_probe In spi_gpio_probe an SPI master is allocated via spi_alloc_master, but this controller should be released if devm_add_action_or_reset fails, otherwise memory leaks. In order to avoid leak spi_contriller_put must be called in case of failure for devm_add_action_or_reset. Fixes: 8b797490b4db ("spi: gpio: Make sure spi_master_put() is called in every error path") Signed-off-by: Navid Emamdoost <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Mark Brown <[email protected]>
QPDFWriter::writeString(std::string const& str) { this->pipeline->write(QUtil::unsigned_char_pointer(str), str.length()); }
0
[ "CWE-399", "CWE-835" ]
qpdf
8249a26d69f72b9cda584c14cc3f12769985e481
9,006,440,885,218,340,000,000,000,000,000,000,000
4
Fix infinite loop in QPDFWriter (fixes #143)
GF_Err iref_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, s->type); }
0
[ "CWE-401", "CWE-787" ]
gpac
ec64c7b8966d7e4642d12debb888be5acf18efb9
151,005,275,945,796,140,000,000,000,000,000,000,000
4
fixed #1786 (fuzz)
/* Gets the number of bytes needed to convert a Latin-1 string to UTF-8 */ static size_t LATIN1_to_UTF8_len(const char *text) { size_t bytes = 1; while (*text) { Uint8 ch = *(const Uint8 *)text++; if (ch <= 0x7F) { bytes += 1; } else { bytes += 2; } } return bytes;
0
[ "CWE-190", "CWE-787" ]
SDL_ttf
db1b41ab8bde6723c24b866e466cad78c2fa0448
121,901,548,710,799,980,000,000,000,000,000,000,000
13
More integer overflow (see bug #187) Make sure that 'width + alignment' doesn't overflow, otherwise it could create a SDL_Surface of 'width' but with wrong 'pitch'
Bool gf_hevc_slice_is_intra(HEVCState *hevc) { switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: return GF_TRUE; default: return GF_FALSE; } }
0
[ "CWE-190", "CWE-787" ]
gpac
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
139,908,372,287,360,770,000,000,000,000,000,000,000
14
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
static void __exit sm712fb_exit(void) { pci_unregister_driver(&smtcfb_driver); }
0
[ "CWE-787" ]
linux-fbdev
bd771cf5c4254511cc4abb88f3dab3bd58bdf8e8
277,682,735,760,229,000,000,000,000,000,000,000,000
4
video: fbdev: sm712fb: Fix crash in smtcfb_read() Zheyu Ma reported this crash in the sm712fb driver when reading three bytes from the framebuffer: BUG: unable to handle page fault for address: ffffc90001ffffff RIP: 0010:smtcfb_read+0x230/0x3e0 Call Trace: vfs_read+0x198/0xa00 ? do_sys_openat2+0x27d/0x350 ? __fget_light+0x54/0x340 ksys_read+0xce/0x190 do_syscall_64+0x43/0x90 Fix it by removing the open-coded endianess fixup-code and by moving the pointer post decrement out the fb_readl() function. Reported-by: Zheyu Ma <[email protected]> Signed-off-by: Helge Deller <[email protected]> Tested-by: Zheyu Ma <[email protected]> Cc: [email protected]
evdev_init_sendevents(struct evdev_device *device, struct evdev_dispatch *dispatch) { device->base.config.sendevents = &dispatch->sendevents.config; dispatch->sendevents.current_mode = LIBINPUT_CONFIG_SEND_EVENTS_ENABLED; dispatch->sendevents.config.get_modes = evdev_sendevents_get_modes; dispatch->sendevents.config.set_mode = evdev_sendevents_set_mode; dispatch->sendevents.config.get_mode = evdev_sendevents_get_mode; dispatch->sendevents.config.get_default_mode = evdev_sendevents_get_default_mode; }
0
[ "CWE-134" ]
libinput
a423d7d3269dc32a87384f79e29bb5ac021c83d1
25,128,846,315,787,880,000,000,000,000,000,000,000
11
evdev: strip the device name of format directives This fixes a format string vulnerabilty. evdev_log_message() composes a format string consisting of a fixed prefix (including the rendered device name) and the passed-in format buffer. This format string is then passed with the arguments to the actual log handler, which usually and eventually ends up being printf. If the device name contains a printf-style format directive, these ended up in the format string and thus get interpreted correctly, e.g. for a device "Foo%sBar" the log message vs printf invocation ends up being: evdev_log_message(device, "some message %s", "some argument"); printf("event9 - Foo%sBar: some message %s", "some argument"); This can enable an attacker to execute malicious code with the privileges of the process using libinput. To exploit this, an attacker needs to be able to create a kernel device with a malicious name, e.g. through /dev/uinput or a Bluetooth device. To fix this, convert any potential format directives in the device name by duplicating percentages. Pre-rendering the device to avoid the issue altogether would be nicer but the current log level hooks do not easily allow for this. The device name is the only user-controlled part of the format string. A second potential issue is the sysname of the device which is also sanitized. This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from Assured AB, and independently by Lukas Lamster. Fixes #752 Signed-off-by: Peter Hutterer <[email protected]>
static void handle_wl_output_scale(void *data, struct wl_output *output, int32_t factor) { struct swaylock_surface *surface = data; surface->scale = factor; if (surface->state->run_display) { damage_surface(surface); } }
0
[ "CWE-703" ]
swaylock
1d1c75b6316d21933069a9d201f966d84099f6ca
177,342,468,647,126,970,000,000,000,000,000,000,000
8
Add support for ext-session-lock-v1 This is a new protocol to lock the session [1]. It should be more reliable than layer-shell + input-inhibitor. [1]: https://gitlab.freedesktop.org/wayland/wayland-protocols/-/merge_requests/131
static RedLinkInfo *reds_init_client_ssl_connection(RedsState *reds, int socket) { RedLinkInfo *link; RedStreamSslStatus ssl_status; link = reds_init_client_connection(reds, socket); if (link == NULL) { return NULL; } ssl_status = red_stream_enable_ssl(link->stream, reds->ctx); switch (ssl_status) { case RED_STREAM_SSL_STATUS_OK: reds_handle_new_link(link); return link; case RED_STREAM_SSL_STATUS_ERROR: goto error; case RED_STREAM_SSL_STATUS_WAIT_FOR_READ: link->stream->watch = reds_core_watch_add(reds, link->stream->socket, SPICE_WATCH_EVENT_READ, reds_handle_ssl_accept, link); break; case RED_STREAM_SSL_STATUS_WAIT_FOR_WRITE: link->stream->watch = reds_core_watch_add(reds, link->stream->socket, SPICE_WATCH_EVENT_WRITE, reds_handle_ssl_accept, link); break; } return link; error: /* close the stream but do not close the socket, this API is * supposed to not close it if it fails */ link->stream->socket = -1; reds_link_free(link); return NULL; }
0
[]
spice
ca5bbc5692e052159bce1a75f55dc60b36078749
5,013,509,788,085,723,400,000,000,000,000,000,000
37
With OpenSSL 1.1: Disable client-initiated renegotiation. Fixes issue #49 Fixes BZ#1904459 Signed-off-by: Julien Ropé <[email protected]> Reported-by: BlackKD Acked-by: Frediano Ziglio <[email protected]>
static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, void *ptr) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct sctp_sockaddr_entry *addr = NULL; struct sctp_sockaddr_entry *temp; struct net *net = dev_net(ifa->idev->dev); int found = 0; switch (ev) { case NETDEV_UP: addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; addr->a.v6.sin6_addr = ifa->addr; addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->valid = 1; spin_lock_bh(&net->sctp.local_addr_lock); list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list); sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW); spin_unlock_bh(&net->sctp.local_addr_lock); } break; case NETDEV_DOWN: spin_lock_bh(&net->sctp.local_addr_lock); list_for_each_entry_safe(addr, temp, &net->sctp.local_addr_list, list) { if (addr->a.sa.sa_family == AF_INET6 && ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) { sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL); found = 1; addr->valid = 0; list_del_rcu(&addr->list); break; } } spin_unlock_bh(&net->sctp.local_addr_lock); if (found) kfree_rcu(addr, rcu); break; } return NOTIFY_DONE; }
0
[ "CWE-310" ]
net
95ee62083cb6453e056562d91f597552021e6ae7
148,054,563,526,573,050,000,000,000,000,000,000,000
46
net: sctp: fix ipv6 ipsec encryption bug in sctp_v6_xmit Alan Chester reported an issue with IPv6 on SCTP that IPsec traffic is not being encrypted, whereas on IPv4 it is. Setting up an AH + ESP transport does not seem to have the desired effect: SCTP + IPv4: 22:14:20.809645 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto AH (51), length 116) 192.168.0.2 > 192.168.0.5: AH(spi=0x00000042,sumlen=16,seq=0x1): ESP(spi=0x00000044,seq=0x1), length 72 22:14:20.813270 IP (tos 0x2,ECT(0), ttl 64, id 0, offset 0, flags [DF], proto AH (51), length 340) 192.168.0.5 > 192.168.0.2: AH(spi=0x00000043,sumlen=16,seq=0x1): SCTP + IPv6: 22:31:19.215029 IP6 (class 0x02, hlim 64, next-header SCTP (132) payload length: 364) fe80::222:15ff:fe87:7fc.3333 > fe80::92e6:baff:fe0d:5a54.36767: sctp 1) [INIT ACK] [init tag: 747759530] [rwnd: 62464] [OS: 10] [MIS: 10] Moreover, Alan says: This problem was seen with both Racoon and Racoon2. Other people have seen this with OpenSwan. When IPsec is configured to encrypt all upper layer protocols the SCTP connection does not initialize. After using Wireshark to follow packets, this is because the SCTP packet leaves Box A unencrypted and Box B believes all upper layer protocols are to be encrypted so it drops this packet, causing the SCTP connection to fail to initialize. When IPsec is configured to encrypt just SCTP, the SCTP packets are observed unencrypted. In fact, using `socat sctp6-listen:3333 -` on one end and transferring "plaintext" string on the other end, results in cleartext on the wire where SCTP eventually does not report any errors, thus in the latter case that Alan reports, the non-paranoid user might think he's communicating over an encrypted transport on SCTP although he's not (tcpdump ... -X): ... 0x0030: 5d70 8e1a 0003 001a 177d eb6c 0000 0000 ]p.......}.l.... 0x0040: 0000 0000 706c 6169 6e74 6578 740a 0000 ....plaintext... Only in /proc/net/xfrm_stat we can see XfrmInTmplMismatch increasing on the receiver side. Initial follow-up analysis from Alan's bug report was done by Alexey Dobriyan. Also thanks to Vlad Yasevich for feedback on this. SCTP has its own implementation of sctp_v6_xmit() not calling inet6_csk_xmit(). This has the implication that it probably never really got updated along with changes in inet6_csk_xmit() and therefore does not seem to invoke xfrm handlers. SCTP's IPv4 xmit however, properly calls ip_queue_xmit() to do the work. Since a call to inet6_csk_xmit() would solve this problem, but result in unecessary route lookups, let us just use the cached flowi6 instead that we got through sctp_v6_get_dst(). Since all SCTP packets are being sent through sctp_packet_transmit(), we do the route lookup / flow caching in sctp_transport_route(), hold it in tp->dst and skb_dst_set() right after that. If we would alter fl6->daddr in sctp_v6_xmit() to np->opt->srcrt, we possibly could run into the same effect of not having xfrm layer pick it up, hence, use fl6_update_dst() in sctp_v6_get_dst() instead to get the correct source routed dst entry, which we assign to the skb. Also source address routing example from 625034113 ("sctp: fix sctp to work with ipv6 source address routing") still works with this patch! Nevertheless, in RFC5095 it is actually 'recommended' to not use that anyway due to traffic amplification [1]. So it seems we're not supposed to do that anyway in sctp_v6_xmit(). Moreover, if we overwrite the flow destination here, the lower IPv6 layer will be unable to put the correct destination address into IP header, as routing header is added in ipv6_push_nfrag_opts() but then probably with wrong final destination. Things aside, result of this patch is that we do not have any XfrmInTmplMismatch increase plus on the wire with this patch it now looks like: SCTP + IPv6: 08:17:47.074080 IP6 2620:52:0:102f:7a2b:cbff:fe27:1b0a > 2620:52:0:102f:213:72ff:fe32:7eba: AH(spi=0x00005fb4,seq=0x1): ESP(spi=0x00005fb5,seq=0x1), length 72 08:17:47.074264 IP6 2620:52:0:102f:213:72ff:fe32:7eba > 2620:52:0:102f:7a2b:cbff:fe27:1b0a: AH(spi=0x00003d54,seq=0x1): ESP(spi=0x00003d55,seq=0x1), length 296 This fixes Kernel Bugzilla 24412. This security issue seems to be present since 2.6.18 kernels. Lets just hope some big passive adversary in the wild didn't have its fun with that. lksctp-tools IPv6 regression test suite passes as well with this patch. [1] http://www.secdev.org/conf/IPv6_RH_security-csw07.pdf Reported-by: Alan Chester <[email protected]> Reported-by: Alexey Dobriyan <[email protected]> Signed-off-by: Daniel Borkmann <[email protected]> Cc: Steffen Klassert <[email protected]> Cc: Hannes Frederic Sowa <[email protected]> Acked-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
void textview_show_error(TextView *textview) { GtkTextView *text; GtkTextBuffer *buffer; GtkTextIter iter; textview_set_font(textview, NULL); textview_clear(textview); text = GTK_TEXT_VIEW(textview->text); buffer = gtk_text_view_get_buffer(text); gtk_text_buffer_get_start_iter(buffer, &iter); TEXTVIEW_INSERT(_("\n" " This message can't be displayed.\n" " This is probably due to a network error.\n" "\n" " Use ")); TEXTVIEW_INSERT_LINK(_("'Network Log'"), "sc://view_log", NULL); TEXTVIEW_INSERT(_(" in the Tools menu for more information.")); textview_show_icon(textview, GTK_STOCK_DIALOG_ERROR); }
0
[ "CWE-601" ]
claws
ac286a71ed78429e16c612161251b9ea90ccd431
9,182,311,373,386,555,000,000,000,000,000,000,000
22
harden link checker before accepting click
static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, int spi) { struct bpf_verifier_state *st = env->cur_state; int first_idx = st->first_insn_idx; int last_idx = env->insn_idx; struct bpf_func_state *func; struct bpf_reg_state *reg; u32 reg_mask = regno >= 0 ? 1u << regno : 0; u64 stack_mask = spi >= 0 ? 1ull << spi : 0; bool skip_first = true; bool new_marks = false; int i, err; if (!env->allow_ptr_leaks) /* backtracking is root only for now */ return 0; func = st->frame[st->curframe]; if (regno >= 0) { reg = &func->regs[regno]; if (reg->type != SCALAR_VALUE) { WARN_ONCE(1, "backtracing misuse"); return -EFAULT; } if (!reg->precise) new_marks = true; else reg_mask = 0; reg->precise = true; } while (spi >= 0) { if (func->stack[spi].slot_type[0] != STACK_SPILL) { stack_mask = 0; break; } reg = &func->stack[spi].spilled_ptr; if (reg->type != SCALAR_VALUE) { stack_mask = 0; break; } if (!reg->precise) new_marks = true; else stack_mask = 0; reg->precise = true; break; } if (!new_marks) return 0; if (!reg_mask && !stack_mask) return 0; for (;;) { DECLARE_BITMAP(mask, 64); u32 history = st->jmp_history_cnt; if (env->log.level & BPF_LOG_LEVEL) verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); for (i = last_idx;;) { if (skip_first) { err = 0; skip_first = false; } else { err = backtrack_insn(env, i, &reg_mask, &stack_mask); } if (err == -ENOTSUPP) { mark_all_scalars_precise(env, st); return 0; } else if (err) { return err; } if (!reg_mask && !stack_mask) /* Found assignment(s) into tracked register in this state. * Since this state is already marked, just return. * Nothing to be tracked further in the parent state. */ return 0; if (i == first_idx) break; i = get_prev_insn_idx(st, i, &history); if (i >= env->prog->len) { /* This can happen if backtracking reached insn 0 * and there are still reg_mask or stack_mask * to backtrack. * It means the backtracking missed the spot where * particular register was initialized with a constant. */ verbose(env, "BUG backtracking idx %d\n", i); WARN_ONCE(1, "verifier backtracking bug"); return -EFAULT; } } st = st->parent; if (!st) break; new_marks = false; func = st->frame[st->curframe]; bitmap_from_u64(mask, reg_mask); for_each_set_bit(i, mask, 32) { reg = &func->regs[i]; if (reg->type != SCALAR_VALUE) { reg_mask &= ~(1u << i); continue; } if (!reg->precise) new_marks = true; reg->precise = true; } bitmap_from_u64(mask, stack_mask); for_each_set_bit(i, mask, 64) { if (i >= func->allocated_stack / BPF_REG_SIZE) { /* the sequence of instructions: * 2: (bf) r3 = r10 * 3: (7b) *(u64 *)(r3 -8) = r0 * 4: (79) r4 = *(u64 *)(r10 -8) * doesn't contain jmps. It's backtracked * as a single block. * During backtracking insn 3 is not recognized as * stack access, so at the end of backtracking * stack slot fp-8 is still marked in stack_mask. * However the parent state may not have accessed * fp-8 and it's "unallocated" stack space. * In such case fallback to conservative. */ mark_all_scalars_precise(env, st); return 0; } if (func->stack[i].slot_type[0] != STACK_SPILL) { stack_mask &= ~(1ull << i); continue; } reg = &func->stack[i].spilled_ptr; if (reg->type != SCALAR_VALUE) { stack_mask &= ~(1ull << i); continue; } if (!reg->precise) new_marks = true; reg->precise = true; } if (env->log.level & BPF_LOG_LEVEL) { print_verifier_state(env, func); verbose(env, "parent %s regs=%x stack=%llx marks\n", new_marks ? "didn't have" : "already had", reg_mask, stack_mask); } if (!reg_mask && !stack_mask) break; if (!new_marks) break; last_idx = st->last_insn_idx; first_idx = st->first_insn_idx; } return 0; }
0
[]
linux
294f2fc6da27620a506e6c050241655459ccd6bd
3,851,777,118,315,206,000,000,000,000,000,000,000
162
bpf: Verifer, adjust_scalar_min_max_vals to always call update_reg_bounds() Currently, for all op verification we call __red_deduce_bounds() and __red_bound_offset() but we only call __update_reg_bounds() in bitwise ops. However, we could benefit from calling __update_reg_bounds() in BPF_ADD, BPF_SUB, and BPF_MUL cases as well. For example, a register with state 'R1_w=invP0' when we subtract from it, w1 -= 2 Before coerce we will now have an smin_value=S64_MIN, smax_value=U64_MAX and unsigned bounds umin_value=0, umax_value=U64_MAX. These will then be clamped to S32_MIN, U32_MAX values by coerce in the case of alu32 op as done in above example. However tnum will be a constant because the ALU op is done on a constant. Without update_reg_bounds() we have a scenario where tnum is a const but our unsigned bounds do not reflect this. By calling update_reg_bounds after coerce to 32bit we further refine the umin_value to U64_MAX in the alu64 case or U32_MAX in the alu32 case above. Signed-off-by: John Fastabend <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]> Link: https://lore.kernel.org/bpf/158507151689.15666.566796274289413203.stgit@john-Precision-5820-Tower
RGWOp *RGWHandler_REST_Obj_SWIFT::op_get() { return get_obj_op(true); }
0
[ "CWE-617" ]
ceph
f44a8ae8aa27ecef69528db9aec220f12492810e
225,871,141,364,726,040,000,000,000,000,000,000,000
4
rgw: RGWSwiftWebsiteHandler::is_web_dir checks empty subdir_name checking for empty name avoids later assertion in RGWObjectCtx::set_atomic Fixes: CVE-2021-3531 Reviewed-by: Casey Bodley <[email protected]> Signed-off-by: Casey Bodley <[email protected]> (cherry picked from commit 7196a469b4470f3c8628489df9a41ec8b00a5610)
static loff_t snd_disconnect_llseek(struct file *file, loff_t offset, int orig) { return -ENODEV; }
0
[ "CWE-416" ]
linux
2a3f7221acddfe1caa9ff09b3a8158c39b2fdeac
197,374,088,536,846,440,000,000,000,000,000,000,000
4
ALSA: core: Fix card races between register and disconnect There is a small race window in the card disconnection code that allows the registration of another card with the very same card id. This leads to a warning in procfs creation as caught by syzkaller. The problem is that we delete snd_cards and snd_cards_lock entries at the very beginning of the disconnection procedure. This makes the slot available to be assigned for another card object while the disconnection procedure is being processed. Then it becomes possible to issue a procfs registration with the existing file name although we check the conflict beforehand. The fix is simply to move the snd_cards and snd_cards_lock clearances at the end of the disconnection procedure. The references to these entries are merely either from the global proc files like /proc/asound/cards or from the card registration / disconnection, so it should be fine to shift at the very end. Reported-by: [email protected] Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
void sk_common_release(struct sock *sk) { if (sk->sk_prot->destroy) sk->sk_prot->destroy(sk); /* * Observation: when sk_common_release is called, processes have * no access to socket. But net still has. * Step one, detach it from networking: * * A. Remove from hash tables. */ sk->sk_prot->unhash(sk); /* * In this point socket cannot receive new packets, but it is possible * that some packets are in flight because some CPU runs receiver and * did hash table lookup before we unhashed socket. They will achieve * receive queue and will be purged by socket destructor. * * Also we still have packets pending on receive queue and probably, * our own packets waiting in device queues. sock_destroy will drain * receive queue, but transmitted packets will delay socket destruction * until the last reference will be released. */ sock_orphan(sk); xfrm_sk_free_policy(sk); sk_refcnt_debug_release(sk); sock_put(sk); }
0
[]
net
35306eb23814444bd4021f8a1c3047d3cb0c8b2b
61,413,773,306,342,160,000,000,000,000,000,000,000
35
af_unix: fix races in sk_peer_pid and sk_peer_cred accesses Jann Horn reported that SO_PEERCRED and SO_PEERGROUPS implementations are racy, as af_unix can concurrently change sk_peer_pid and sk_peer_cred. In order to fix this issue, this patch adds a new spinlock that needs to be used whenever these fields are read or written. Jann also pointed out that l2cap_sock_get_peer_pid_cb() is currently reading sk->sk_peer_pid which makes no sense, as this field is only possibly set by AF_UNIX sockets. We will have to clean this in a separate patch. This could be done by reverting b48596d1dc25 "Bluetooth: L2CAP: Add get_peer_pid callback" or implementing what was truly expected. Fixes: 109f6e39fa07 ("af_unix: Allow SO_PEERCRED to work across namespaces.") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jann Horn <[email protected]> Cc: Eric W. Biederman <[email protected]> Cc: Luiz Augusto von Dentz <[email protected]> Cc: Marcel Holtmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
rpc_C_VerifyRecover (CK_X_FUNCTION_LIST *self, p11_rpc_message *msg) { CK_SESSION_HANDLE session; CK_BYTE_PTR signature; CK_ULONG signature_len; CK_BYTE_PTR data; CK_ULONG data_len; BEGIN_CALL (VerifyRecover); IN_ULONG (session); IN_BYTE_ARRAY (signature, signature_len); IN_BYTE_BUFFER (data, data_len); PROCESS_CALL ((self, session, signature, signature_len, data, &data_len)); OUT_BYTE_ARRAY (data, data_len); END_CALL; }
0
[ "CWE-190" ]
p11-kit
5307a1d21a50cacd06f471a873a018d23ba4b963
256,995,887,067,967,900,000,000,000,000,000,000,000
17
Check for arithmetic overflows before allocating
flatpak_bwrap_append_args (FlatpakBwrap *bwrap, GPtrArray *other_array) { flatpak_bwrap_append_argsv (bwrap, (char **) other_array->pdata, other_array->len); }
0
[ "CWE-94", "CWE-74" ]
flatpak
6d1773d2a54dde9b099043f07a2094a4f1c2f486
304,405,174,509,165,600,000,000,000,000,000,000,000
7
run: Convert all environment variables into bwrap arguments This avoids some of them being filtered out by a setuid bwrap. It also means that if they came from an untrusted source, they cannot be used to inject arbitrary code into a non-setuid bwrap via mechanisms like LD_PRELOAD. Because they get bundled into a memfd or temporary file, they do not actually appear in argv, ensuring that they remain inaccessible to processes running under a different uid (which is important if their values are tokens or other secrets). Signed-off-by: Simon McVittie <[email protected]> Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2
void OSDService::agent_entry() { dout(10) << __func__ << " start" << dendl; agent_lock.Lock(); while (!agent_stop_flag) { if (agent_queue.empty()) { dout(20) << __func__ << " empty queue" << dendl; agent_cond.Wait(agent_lock); continue; } uint64_t level = agent_queue.rbegin()->first; set<PGRef>& top = agent_queue.rbegin()->second; dout(10) << __func__ << " tiers " << agent_queue.size() << ", top is " << level << " with pgs " << top.size() << ", ops " << agent_ops << "/" << cct->_conf->osd_agent_max_ops << (agent_active ? " active" : " NOT ACTIVE") << dendl; dout(20) << __func__ << " oids " << agent_oids << dendl; int max = cct->_conf->osd_agent_max_ops - agent_ops; int agent_flush_quota = max; if (!flush_mode_high_count) agent_flush_quota = cct->_conf->osd_agent_max_low_ops - agent_ops; if (agent_flush_quota <= 0 || top.empty() || !agent_active) { agent_cond.Wait(agent_lock); continue; } if (!agent_valid_iterator || agent_queue_pos == top.end()) { agent_queue_pos = top.begin(); agent_valid_iterator = true; } PGRef pg = *agent_queue_pos; dout(10) << "high_count " << flush_mode_high_count << " agent_ops " << agent_ops << " flush_quota " << agent_flush_quota << dendl; agent_lock.Unlock(); if (!pg->agent_work(max, agent_flush_quota)) { dout(10) << __func__ << " " << pg->get_pgid() << " no agent_work, delay for " << cct->_conf->osd_agent_delay_time << " seconds" << dendl; osd->logger->inc(l_osd_tier_delay); // Queue a timer to call agent_choose_mode for this pg in 5 seconds agent_timer_lock.Lock(); Context *cb = new AgentTimeoutCB(pg); agent_timer.add_event_after(cct->_conf->osd_agent_delay_time, cb); agent_timer_lock.Unlock(); } agent_lock.Lock(); } agent_lock.Unlock(); dout(10) << __func__ << " finish" << dendl; }
0
[ "CWE-287", "CWE-284" ]
ceph
5ead97120e07054d80623dada90a5cc764c28468
73,567,545,470,723,240,000,000,000,000,000,000,000
57
auth/cephx: add authorizer challenge Allow the accepting side of a connection to reject an initial authorizer with a random challenge. The connecting side then has to respond with an updated authorizer proving they are able to decrypt the service's challenge and that the new authorizer was produced for this specific connection instance. The accepting side requires this challenge and response unconditionally if the client side advertises they have the feature bit. Servers wishing to require this improved level of authentication simply have to require the appropriate feature. Signed-off-by: Sage Weil <[email protected]> (cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b) # Conflicts: # src/auth/Auth.h # src/auth/cephx/CephxProtocol.cc # src/auth/cephx/CephxProtocol.h # src/auth/none/AuthNoneProtocol.h # src/msg/Dispatcher.h # src/msg/async/AsyncConnection.cc - const_iterator - ::decode vs decode - AsyncConnection ctor arg noise - get_random_bytes(), not cct->random()
nautilus_file_invalidate_attributes (NautilusFile *file, NautilusFileAttributes file_attributes) { /* Cancel possible in-progress loads of any of these attributes */ nautilus_directory_cancel_loading_file_attributes (file->details->directory, file, file_attributes); /* Actually invalidate the values */ nautilus_file_invalidate_attributes_internal (file, file_attributes); nautilus_directory_add_file_to_work_queue (file->details->directory, file); /* Kick off I/O if necessary */ nautilus_directory_async_state_changed (file->details->directory); }
0
[]
nautilus
7632a3e13874a2c5e8988428ca913620a25df983
286,187,970,677,181,260,000,000,000,000,000,000,000
16
Check for trusted desktop file launchers. 2009-02-24 Alexander Larsson <[email protected]> * libnautilus-private/nautilus-directory-async.c: Check for trusted desktop file launchers. * libnautilus-private/nautilus-file-private.h: * libnautilus-private/nautilus-file.c: * libnautilus-private/nautilus-file.h: Add nautilus_file_is_trusted_link. Allow unsetting of custom display name. * libnautilus-private/nautilus-mime-actions.c: Display dialog when trying to launch a non-trusted desktop file. svn path=/trunk/; revision=15003
static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, u16 phyIdReg0, u16 phyIdReg1) { enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; u32 oui; u16 model; int i; if (phyIdReg0 == 0xffff) return result; if (phyIdReg1 == 0xffff) return result; /* oui is split between two registers */ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; /* Scan table for this PHY */ for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel)) { netdev_info(qdev->ndev, "Phy: %s\n", PHY_DEVICES[i].name); result = PHY_DEVICES[i].phyDevice; break; } } return result; }
0
[ "CWE-401" ]
linux
1acb8f2a7a9f10543868ddd737e37424d5c36cf4
7,524,858,427,995,455,000,000,000,000,000,000,000
32
net: qlogic: Fix memory leak in ql_alloc_large_buffers In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb. This skb should be released if pci_dma_mapping_error fails. Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()") Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static void gp_query_free(struct gp_query *q, bool free_buffer) { if (!q) { return; } if (free_buffer) { free(q->buffer); } free(q); }
0
[ "CWE-667" ]
gssproxy
cb761412e299ef907f22cd7c4146d50c8a792003
335,477,894,849,075,370,000,000,000,000,000,000,000
12
Unlock cond_mutex before pthread exit in gp_worker_main() Signed-off-by: GuiYao <[email protected]> [[email protected]: whitespace, tweak commit message] Reviewed-by: Robbie Harwood <[email protected]>
ex_put(exarg_T *eap) { /* ":0put" works like ":1put!". */ if (eap->line2 == 0) { eap->line2 = 1; eap->forceit = TRUE; } curwin->w_cursor.lnum = eap->line2; do_put(eap->regname, eap->forceit ? BACKWARD : FORWARD, 1L, PUT_LINE|PUT_CURSLINE); }
0
[ "CWE-78" ]
vim
8c62a08faf89663e5633dc5036cd8695c80f1075
321,405,320,342,203,200,000,000,000,000,000,000,000
12
patch 8.1.0881: can execute shell commands in rvim through interfaces Problem: Can execute shell commands in rvim through interfaces. Solution: Disable using interfaces in restricted mode. Allow for writing file with writefile(), histadd() and a few others.
Error HeifContext::add_exif_metadata(std::shared_ptr<Image> master_image, const void* data, int size) { // find location of TIFF header uint32_t offset = 0; const char * tiffmagic1 = "MM\0*"; const char * tiffmagic2 = "II*\0"; while (offset+4 < (unsigned int)size) { if (!memcmp( (uint8_t *) data + offset, tiffmagic1, 4 )) break; if (!memcmp( (uint8_t *) data + offset, tiffmagic2, 4 )) break; offset++; } if (offset >= (unsigned int)size) { return Error(heif_error_Usage_error, heif_suberror_Invalid_parameter_value, "Could not find location of TIFF header in Exif metadata."); } // create an infe box describing what kind of data we are storing (this also creates a new ID) auto metadata_infe_box = m_heif_file->add_new_infe_box("Exif"); metadata_infe_box->set_hidden_item(true); heif_item_id metadata_id = metadata_infe_box->get_item_ID(); // we assign this data to the image m_heif_file->add_iref_reference(metadata_id, fourcc("cdsc"), { master_image->get_id() }); // copy the Exif data into the file, store the pointer to it in an iloc box entry std::vector<uint8_t> data_array; data_array.resize(size+4); data_array[0] = (uint8_t) ((offset >> 24) & 0xFF); data_array[1] = (uint8_t) ((offset >> 16) & 0xFF); data_array[2] = (uint8_t) ((offset >> 8) & 0xFF); data_array[3] = (uint8_t) ((offset) & 0xFF); memcpy(data_array.data()+4, data, size); m_heif_file->append_iloc_data(metadata_id, data_array); return Error::Ok; }
0
[ "CWE-125" ]
libheif
f7399b62d7fbc596f1b2871578c1d2053bedf1dd
205,897,947,178,222,600,000,000,000,000,000,000,000
47
Handle case where referenced "iref" box doesn't exist (fixes #138).
verify_s4u2self_reply(krb5_context context, krb5_keyblock *subkey, krb5_pa_s4u_x509_user *req_s4u_user, krb5_pa_data **rep_padata, krb5_pa_data **enc_padata) { krb5_error_code code; krb5_pa_data *rep_s4u_padata, *enc_s4u_padata; krb5_pa_s4u_x509_user *rep_s4u_user = NULL; krb5_data data, *datap = NULL; krb5_keyusage usage; krb5_boolean valid; krb5_boolean not_newer; assert(req_s4u_user != NULL); switch (subkey->enctype) { case ENCTYPE_DES_CBC_CRC: case ENCTYPE_DES_CBC_MD4: case ENCTYPE_DES_CBC_MD5: case ENCTYPE_DES3_CBC_SHA1: case ENCTYPE_DES3_CBC_RAW: case ENCTYPE_ARCFOUR_HMAC: case ENCTYPE_ARCFOUR_HMAC_EXP : not_newer = TRUE; break; default: not_newer = FALSE; break; } enc_s4u_padata = krb5int_find_pa_data(context, enc_padata, KRB5_PADATA_S4U_X509_USER); /* XXX this will break newer enctypes with a MIT 1.7 KDC */ rep_s4u_padata = krb5int_find_pa_data(context, rep_padata, KRB5_PADATA_S4U_X509_USER); if (rep_s4u_padata == NULL) { if (not_newer == FALSE || enc_s4u_padata != NULL) return KRB5_KDCREP_MODIFIED; else return 0; } data.length = rep_s4u_padata->length; data.data = (char *)rep_s4u_padata->contents; code = decode_krb5_pa_s4u_x509_user(&data, &rep_s4u_user); if (code != 0) goto cleanup; if (rep_s4u_user->user_id.nonce != req_s4u_user->user_id.nonce) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } code = encode_krb5_s4u_userid(&rep_s4u_user->user_id, &datap); if (code != 0) goto cleanup; if (rep_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY; else usage = KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST; code = krb5_c_verify_checksum(context, subkey, usage, datap, &rep_s4u_user->cksum, &valid); if (code != 0) goto cleanup; if (valid == FALSE) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } /* * KDCs that support KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE also return * S4U enc_padata for older (pre-AES) encryption types only. */ if (not_newer) { if (enc_s4u_padata == NULL) { if (rep_s4u_user->user_id.options & KRB5_S4U_OPTS_USE_REPLY_KEY_USAGE) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } } else { if (enc_s4u_padata->length != req_s4u_user->cksum.length + rep_s4u_user->cksum.length) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } if (memcmp(enc_s4u_padata->contents, req_s4u_user->cksum.contents, req_s4u_user->cksum.length) || memcmp(&enc_s4u_padata->contents[req_s4u_user->cksum.length], rep_s4u_user->cksum.contents, rep_s4u_user->cksum.length)) { code = KRB5_KDCREP_MODIFIED; goto cleanup; } } } else if (!krb5_c_is_keyed_cksum(rep_s4u_user->cksum.checksum_type)) { code = KRB5KRB_AP_ERR_INAPP_CKSUM; goto cleanup; } cleanup: krb5_free_pa_s4u_x509_user(context, rep_s4u_user); krb5_free_data(context, datap); return code; }
0
[ "CWE-617", "CWE-703" ]
krb5
5e6d1796106df8ba6bc1973ee0917c170d929086
204,909,687,527,899,050,000,000,000,000,000,000,000
114
Ignore password attributes for S4U2Self requests For consistency with Windows KDCs, allow protocol transition to work even if the password has expired or needs changing. Also, when looking up an enterprise principal with an AS request, treat ERR_KEY_EXP as confirmation that the client is present in the realm. [[email protected]: added comment in kdc_process_s4u2self_req(); edited commit message] ticket: 8763 (new) tags: pullup target_version: 1.17
set_no_hlsearch(int flag) { no_hlsearch = flag; # ifdef FEAT_EVAL set_vim_var_nr(VV_HLSEARCH, !no_hlsearch && p_hls); # endif }
0
[ "CWE-78" ]
vim
8c62a08faf89663e5633dc5036cd8695c80f1075
83,720,133,173,427,220,000,000,000,000,000,000,000
7
patch 8.1.0881: can execute shell commands in rvim through interfaces Problem: Can execute shell commands in rvim through interfaces. Solution: Disable using interfaces in restricted mode. Allow for writing file with writefile(), histadd() and a few others.
static int cgroup_freeze_show(struct seq_file *seq, void *v) { struct cgroup *cgrp = seq_css(seq)->cgroup; seq_printf(seq, "%d\n", cgrp->freezer.freeze); return 0; }
0
[ "CWE-416" ]
linux
a06247c6804f1a7c86a2e5398a4c1f1db1471848
293,191,535,410,307,500,000,000,000,000,000,000,000
8
psi: Fix uaf issue when psi trigger is destroyed while being polled With write operation on psi files replacing old trigger with a new one, the lifetime of its waitqueue is totally arbitrary. Overwriting an existing trigger causes its waitqueue to be freed and pending poll() will stumble on trigger->event_wait which was destroyed. Fix this by disallowing to redefine an existing psi trigger. If a write operation is used on a file descriptor with an already existing psi trigger, the operation will fail with EBUSY error. Also bypass a check for psi_disabled in the psi_trigger_destroy as the flag can be flipped after the trigger is created, leading to a memory leak. Fixes: 0e94682b73bf ("psi: introduce psi monitor") Reported-by: [email protected] Suggested-by: Linus Torvalds <[email protected]> Analyzed-by: Eric Biggers <[email protected]> Signed-off-by: Suren Baghdasaryan <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Eric Biggers <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: [email protected] Link: https://lore.kernel.org/r/[email protected]
TEST_P(RedirectIntegrationTest, InvalidRedirect) { useAccessLog("%RESPONSE_CODE% %RESPONSE_CODE_DETAILS% %RESP(test-header)%"); initialize(); redirect_response_.setLocation("invalid_url"); // Send the same request as above, only send an invalid URL as the response. // The request should not be redirected. codec_client_ = makeHttpConnection(lookupPort("http")); default_request_headers_.setHost("handle.internal.redirect"); auto response = sendRequestAndWaitForResponse(default_request_headers_, 0, redirect_response_, 0); EXPECT_EQ("302", response->headers().getStatusValue()); EXPECT_EQ( 1, test_server_->counter("cluster.cluster_0.upstream_internal_redirect_failed_total")->value()); EXPECT_EQ(1, test_server_->counter("http.config_test.downstream_rq_3xx")->value()); EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("302 via_upstream test-header-value\n")); EXPECT_EQ("test-header-value", response->headers().get(test_header_key_)[0]->value().getStringView()); }
0
[ "CWE-703" ]
envoy
18871dbfb168d3512a10c78dd267ff7c03f564c6
153,435,556,870,126,130,000,000,000,000,000,000,000
21
[1.18] CVE-2022-21655 Crash with direct_response Signed-off-by: Otto van der Schaaf <[email protected]>
struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) { struct sk_buff *clone; struct net_device *dev = skb->dev; struct frag_hdr *fhdr; struct nf_ct_frag6_queue *fq; struct ipv6hdr *hdr; int fhoff, nhoff; u8 prevhdr; struct sk_buff *ret_skb = NULL; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return skb; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return skb; clone = skb_clone(skb, GFP_ATOMIC); if (clone == NULL) { pr_debug("Can't clone skb\n"); return skb; } NFCT_FRAG6_CB(clone)->orig = skb; if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) { pr_debug("message is too short.\n"); goto ret_orig; } skb_set_transport_header(clone, fhoff); hdr = ipv6_hdr(clone); fhdr = (struct frag_hdr *)skb_transport_header(clone); if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) nf_ct_frag6_evictor(); fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); goto ret_orig; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { spin_unlock_bh(&fq->q.lock); pr_debug("Can't insert skb to queue\n"); fq_put(fq); goto ret_orig; } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len) { ret_skb = nf_ct_frag6_reasm(fq, dev); if (ret_skb == NULL) pr_debug("Can't reassemble fragmented packets\n"); } spin_unlock_bh(&fq->q.lock); fq_put(fq); return ret_skb; ret_orig: kfree_skb(clone); return skb; }
0
[]
linux-2.6
9e2dcf72023d1447f09c47d77c99b0c49659e5ce
281,737,547,013,594,000,000,000,000,000,000,000,000
70
netfilter: nf_conntrack_reasm: properly handle packets fragmented into a single fragment When an ICMPV6_PKT_TOOBIG message is received with a MTU below 1280, all further packets include a fragment header. Unlike regular defragmentation, conntrack also needs to "reassemble" those fragments in order to obtain a packet without the fragment header for connection tracking. Currently nf_conntrack_reasm checks whether a fragment has either IP6_MF set or an offset != 0, which makes it ignore those fragments. Remove the invalid check and make reassembly handle fragment queues containing only a single fragment. Reported-and-tested-by: Ulrich Weber <[email protected]> Signed-off-by: Patrick McHardy <[email protected]>
static int cli_full_connection_state_destructor( struct cli_full_connection_state *s) { if (s->cli != NULL) { cli_shutdown(s->cli); s->cli = NULL; } return 0; }
0
[ "CWE-94" ]
samba
94295b7aa22d2544af5323bca70d3dcb97fd7c64
289,204,393,311,398,530,000,000,000,000,000,000,000
9
CVE-2016-2019: s3:libsmb: add comment regarding smbXcli_session_is_guest() with mandatory signing BUG: https://bugzilla.samba.org/show_bug.cgi?id=11860 Signed-off-by: Stefan Metzmacher <[email protected]>
static int ZEND_FASTCALL ZEND_IS_NOT_EQUAL_SPEC_CONST_CONST_HANDLER(ZEND_OPCODE_HANDLER_ARGS) { zend_op *opline = EX(opline); zval *result = &EX_T(opline->result.u.var).tmp_var; compare_function(result, &opline->op1.u.constant, &opline->op2.u.constant TSRMLS_CC); ZVAL_BOOL(result, (Z_LVAL_P(result) != 0)); ZEND_VM_NEXT_OPCODE(); }
0
[]
php-src
ce96fd6b0761d98353761bf78d5bfb55291179fd
271,887,936,884,949,670,000,000,000,000,000,000,000
14
- fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus
mprint(struct magic_set *ms, struct magic *m) { uint64_t v; float vf; double vd; int64_t t = 0; char buf[128], tbuf[26]; union VALUETYPE *p = &ms->ms_value; switch (m->type) { case FILE_BYTE: v = file_signextend(ms, m, (uint64_t)p->b); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%c", (unsigned char)v); if (file_printf(ms, F(m->desc, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(m->desc, "%c"), (unsigned char) v) == -1) return -1; break; } t = ms->offset + sizeof(char); break; case FILE_SHORT: case FILE_BESHORT: case FILE_LESHORT: v = file_signextend(ms, m, (uint64_t)p->h); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%hu", (unsigned short)v); if (file_printf(ms, F(m->desc, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(m->desc, "%hu"), (unsigned short) v) == -1) return -1; break; } t = ms->offset + sizeof(short); break; case FILE_LONG: case FILE_BELONG: case FILE_LELONG: case FILE_MELONG: v = file_signextend(ms, m, (uint64_t)p->l); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%u", (uint32_t)v); if (file_printf(ms, F(m->desc, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(m->desc, "%u"), (uint32_t) v) == -1) return -1; break; } t = ms->offset + sizeof(int32_t); break; case FILE_QUAD: case FILE_BEQUAD: case FILE_LEQUAD: v = file_signextend(ms, m, p->q); switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%llu", (unsigned long long)v); if (file_printf(ms, F(m->desc, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(m->desc, "%llu"), (unsigned long long) v) == -1) return -1; break; } t = ms->offset + sizeof(int64_t); break; case FILE_STRING: case FILE_PSTRING: case FILE_BESTRING16: case FILE_LESTRING16: if (m->reln == '=' || m->reln == '!') { if (file_printf(ms, F(m->desc, "%s"), m->value.s) == -1) return -1; t = ms->offset + m->vallen; } else { char *str = p->s; /* compute t before we mangle the string? */ t = ms->offset + strlen(str); if (*m->value.s == '\0') str[strcspn(str, "\n")] = '\0'; if (m->str_flags & STRING_TRIM) { char *last; while (isspace((unsigned char)*str)) str++; last = str; while (*last) last++; --last; while (isspace((unsigned char)*last)) last--; *++last = '\0'; } if (file_printf(ms, F(m->desc, "%s"), str) == -1) return -1; if (m->type == FILE_PSTRING) t += file_pstring_length_size(m); } break; case FILE_DATE: case FILE_BEDATE: case FILE_LEDATE: case FILE_MEDATE: if (file_printf(ms, F(m->desc, "%s"), file_fmttime(p->l, FILE_T_LOCAL, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint32_t); break; case FILE_LDATE: case FILE_BELDATE: case FILE_LELDATE: case FILE_MELDATE: if (file_printf(ms, F(m->desc, "%s"), file_fmttime(p->l, 0, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint32_t); break; case FILE_QDATE: case FILE_BEQDATE: case FILE_LEQDATE: if (file_printf(ms, F(m->desc, "%s"), file_fmttime(p->q, FILE_T_LOCAL, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_QLDATE: case FILE_BEQLDATE: case FILE_LEQLDATE: if (file_printf(ms, F(m->desc, "%s"), file_fmttime(p->q, 0, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_QWDATE: case FILE_BEQWDATE: case FILE_LEQWDATE: if (file_printf(ms, F(m->desc, "%s"), file_fmttime(p->q, FILE_T_WINDOWS, tbuf)) == -1) return -1; t = ms->offset + sizeof(uint64_t); break; case FILE_FLOAT: case FILE_BEFLOAT: case FILE_LEFLOAT: vf = p->f; switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%g", vf); if (file_printf(ms, F(m->desc, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(m->desc, "%g"), vf) == -1) return -1; break; } t = ms->offset + sizeof(float); break; case FILE_DOUBLE: case FILE_BEDOUBLE: case FILE_LEDOUBLE: vd = p->d; switch (check_fmt(ms, m)) { case -1: return -1; case 1: (void)snprintf(buf, sizeof(buf), "%g", vd); if (file_printf(ms, F(m->desc, "%s"), buf) == -1) return -1; break; default: if (file_printf(ms, F(m->desc, "%g"), vd) == -1) return -1; break; } t = ms->offset + sizeof(double); break; case FILE_REGEX: { char *cp; int rval; cp = strndup((const char *)ms->search.s, ms->search.rm_len); if (cp == NULL) { file_oomem(ms, ms->search.rm_len); return -1; } rval = file_printf(ms, F(m->desc, "%s"), cp); free(cp); if (rval == -1) return -1; if ((m->str_flags & REGEX_OFFSET_START)) t = ms->search.offset; else t = ms->search.offset + ms->search.rm_len; break; } case FILE_SEARCH: if (file_printf(ms, F(m->desc, "%s"), m->value.s) == -1) return -1; if ((m->str_flags & REGEX_OFFSET_START)) t = ms->search.offset; else t = ms->search.offset + m->vallen; break; case FILE_DEFAULT: case FILE_CLEAR: if (file_printf(ms, "%s", m->desc) == -1) return -1; t = ms->offset; break; case FILE_INDIRECT: case FILE_USE: case FILE_NAME: t = ms->offset; break; default: file_magerror(ms, "invalid m->type (%d) in mprint()", m->type); return -1; } return (int32_t)t; }
0
[ "CWE-755" ]
file
3c081560c23f20b2985c285338b52c7aae9fdb0f
62,553,514,924,111,200,000,000,000,000,000,000,000
273
prevent infinite recursion.
cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, char *mount_data_global, const char *devname) { int rc; int xid; struct smb_vol *volume_info; struct cifsSesInfo *pSesInfo; struct cifsTconInfo *tcon; struct TCP_Server_Info *srvTcp; char *full_path; char *mount_data = mount_data_global; #ifdef CONFIG_CIFS_DFS_UPCALL struct dfs_info3_param *referrals = NULL; unsigned int num_referrals = 0; int referral_walks_count = 0; try_mount_again: #endif rc = 0; tcon = NULL; pSesInfo = NULL; srvTcp = NULL; full_path = NULL; xid = GetXid(); volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); if (!volume_info) { rc = -ENOMEM; goto out; } if (cifs_parse_mount_options(mount_data, devname, volume_info)) { rc = -EINVAL; goto out; } if (volume_info->nullauth) { cFYI(1, "null user"); volume_info->username = ""; } else if (volume_info->username) { /* BB fixme parse for domain name here */ cFYI(1, "Username: %s", volume_info->username); } else { cifserror("No username specified"); /* In userspace mount helper we can get user name from alternate locations such as env variables and files on disk */ rc = -EINVAL; goto out; } /* this is needed for ASCII cp to Unicode converts */ if (volume_info->iocharset == NULL) { /* load_nls_default cannot return null */ volume_info->local_nls = load_nls_default(); } else { volume_info->local_nls = load_nls(volume_info->iocharset); if (volume_info->local_nls == NULL) { cERROR(1, "CIFS mount error: iocharset %s not found", volume_info->iocharset); rc = -ELIBACC; goto out; } } cifs_sb->local_nls = volume_info->local_nls; /* get a reference to a tcp session */ srvTcp = cifs_get_tcp_session(volume_info); if (IS_ERR(srvTcp)) { rc = PTR_ERR(srvTcp); goto out; } /* get a reference to a SMB session */ pSesInfo = cifs_get_smb_ses(srvTcp, volume_info); if (IS_ERR(pSesInfo)) { rc = PTR_ERR(pSesInfo); pSesInfo = NULL; goto mount_fail_check; } setup_cifs_sb(volume_info, cifs_sb); if (pSesInfo->capabilities & CAP_LARGE_FILES) sb->s_maxbytes = MAX_LFS_FILESIZE; else sb->s_maxbytes = MAX_NON_LFS; /* BB FIXME fix time_gran to be larger for LANMAN sessions */ sb->s_time_gran = 100; /* search for existing tcon to this server share */ tcon = cifs_get_tcon(pSesInfo, volume_info); if (IS_ERR(tcon)) { rc = PTR_ERR(tcon); tcon = NULL; goto remote_path_check; } cifs_sb->tcon = tcon; /* do not care if following two calls succeed - informational */ if (!tcon->ipc) { CIFSSMBQFSDeviceInfo(xid, tcon); CIFSSMBQFSAttributeInfo(xid, tcon); } /* tell server which Unix caps we support */ if (tcon->ses->capabilities & CAP_UNIX) /* reset of caps checks mount to see if unix extensions disabled for just this mount */ reset_cifs_unix_caps(xid, tcon, sb, volume_info); else tcon->unix_ext = 0; /* server does not support them */ /* convert forward to back slashes in prepath here if needed */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { cifs_sb->rsize = 1024 * 127; cFYI(DBG2, "no very large read support, rsize now 127K"); } if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) cifs_sb->wsize = min(cifs_sb->wsize, (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); if (!(tcon->ses->capabilities & CAP_LARGE_READ_X)) cifs_sb->rsize = min(cifs_sb->rsize, (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); remote_path_check: /* check if a whole path (including prepath) is not remote */ if (!rc && cifs_sb->prepathlen && tcon) { /* build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(cifs_sb); if (full_path == NULL) { rc = -ENOMEM; goto mount_fail_check; } rc = is_path_accessible(xid, tcon, cifs_sb, full_path); if (rc != -EREMOTE) { kfree(full_path); goto mount_fail_check; } kfree(full_path); } /* get referral if needed */ if (rc == -EREMOTE) { #ifdef CONFIG_CIFS_DFS_UPCALL if (referral_walks_count > MAX_NESTED_LINKS) { /* * BB: when we implement proper loop detection, * we will remove this check. But now we need it * to prevent an indefinite loop if 'DFS tree' is * misconfigured (i.e. has loops). */ rc = -ELOOP; goto mount_fail_check; } /* convert forward to back slashes in prepath here if needed */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); full_path = build_unc_path_to_root(volume_info, cifs_sb); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto mount_fail_check; } cFYI(1, "Getting referral for: %s", full_path); rc = get_dfs_path(xid, pSesInfo , full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc && num_referrals > 0) { char *fake_devname = NULL; if (mount_data != mount_data_global) kfree(mount_data); mount_data = cifs_compose_mount_options( cifs_sb->mountdata, full_path + 1, referrals, &fake_devname); free_dfs_info_array(referrals, num_referrals); kfree(fake_devname); kfree(full_path); if (IS_ERR(mount_data)) { rc = PTR_ERR(mount_data); mount_data = NULL; goto mount_fail_check; } if (tcon) cifs_put_tcon(tcon); else if (pSesInfo) cifs_put_smb_ses(pSesInfo); cleanup_volume_info(&volume_info); referral_walks_count++; FreeXid(xid); goto try_mount_again; } #else /* No DFS support, return error on mount */ rc = -EOPNOTSUPP; #endif } mount_fail_check: /* on error free sesinfo and tcon struct if needed */ if (rc) { if (mount_data != mount_data_global) kfree(mount_data); /* If find_unc succeeded then rc == 0 so we can not end */ /* up accidently freeing someone elses tcon struct */ if (tcon) cifs_put_tcon(tcon); else if (pSesInfo) cifs_put_smb_ses(pSesInfo); else cifs_put_tcp_session(srvTcp); goto out; } /* volume_info->password is freed above when existing session found (in which case it is not needed anymore) but when new sesion is created the password ptr is put in the new session structure (in which case the password will be freed at unmount time) */ out: /* zero out password before freeing */ cleanup_volume_info(&volume_info); FreeXid(xid); return rc; }
0
[ "CWE-284", "CWE-264" ]
linux
4ff67b720c02c36e54d55b88c2931879b7db1cd2
128,261,764,310,867,990,000,000,000,000,000,000,000
233
cifs: clean up cifs_find_smb_ses (try #2) This patch replaces the earlier patch by the same name. The only difference is that MAX_PASSWORD_SIZE has been increased to attempt to match the limits that windows enforces. Do a better job of matching sessions by authtype. Matching by username for a Kerberos session is incorrect, and anonymous sessions need special handling. Also, in the case where we do match by username, we also need to match by password. That ensures that someone else doesn't "borrow" an existing session without needing to know the password. Finally, passwords can be longer than 16 bytes. Bump MAX_PASSWORD_SIZE to 512 to match the size that the userspace mount helper allows. Signed-off-by: Jeff Layton <[email protected]> Signed-off-by: Steve French <[email protected]>
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) { if (xdp_prog) { struct xdp_buff xdp; u32 act; int err; act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); if (act != XDP_PASS) { switch (act) { case XDP_REDIRECT: err = xdp_do_generic_redirect(skb->dev, skb, &xdp, xdp_prog); if (err) goto out_redir; break; case XDP_TX: generic_xdp_tx(skb, xdp_prog); break; } return XDP_DROP; } } return XDP_PASS; out_redir: kfree_skb(skb); return XDP_DROP;
0
[ "CWE-416" ]
linux
a4270d6795b0580287453ea55974d948393e66ef
338,282,723,578,482,760,000,000,000,000,000,000,000
28
net-gro: fix use-after-free read in napi_gro_frags() If a network driver provides to napi_gro_frags() an skb with a page fragment of exactly 14 bytes, the call to gro_pull_from_frag0() will 'consume' the fragment by calling skb_frag_unref(skb, 0), and the page might be freed and reused. Reading eth->h_proto at the end of napi_frags_skb() might read mangled data, or crash under specific debugging features. BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline] BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 Read of size 2 at addr ffff88809366840c by task syz-executor599/8957 CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x172/0x1f0 lib/dump_stack.c:113 print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188 __kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317 kasan_report+0x12/0x20 mm/kasan/common.c:614 __asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142 napi_frags_skb net/core/dev.c:5833 [inline] napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841 tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991 tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037 call_write_iter include/linux/fs.h:1872 [inline] do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693 do_iter_write fs/read_write.c:970 [inline] do_iter_write+0x184/0x610 fs/read_write.c:951 vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015 do_writev+0x15b/0x330 fs/read_write.c:1058 Fixes: a50e233c50db ("net-gro: restore frag0 optimization") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: syzbot <[email protected]> Signed-off-by: David S. Miller <[email protected]>
apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume, void *on_ctx) { h2_stream *stream; int n, id; ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, "h2_mplx(%ld): dispatch events", m->id); apr_atomic_set32(&m->event_pending, 0); /* update input windows for streams */ h2_ihash_iter(m->streams, report_consumption_iter, m); purge_streams(m, 1); n = h2_ififo_count(m->readyq); while (n > 0 && (h2_ififo_try_pull(m->readyq, &id) == APR_SUCCESS)) { --n; stream = h2_ihash_get(m->streams, id); if (stream) { on_resume(on_ctx, stream); } } return APR_SUCCESS; }
0
[ "CWE-444" ]
mod_h2
825de6a46027b2f4c30d7ff5a0c8b852d639c207
256,019,800,683,841,700,000,000,000,000,000,000,000
27
* Fixed keepalives counter on slave connections.
void KrecipesView::createShoppingListFromDiet( void ) { shoppingListPanel->createShopping( dietPanel->dietList() ); slotSetPanel( ShoppingP ); }
0
[]
krecipes
cd1490fb5fe82cbe9172a43be13298001b446ecd
233,973,924,670,498,770,000,000,000,000,000,000,000
5
Use WebKit instead of KHTML for printing recipes, fixes sourceforge #2990118 and #2960140. svn path=/trunk/extragear/utils/krecipes/; revision=1137824
T& _at(const int offset) { const unsigned int siz = (unsigned int)size(); return (*this)[offset<0?0:(unsigned int)offset>=siz?siz - 1:offset]; }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
233,418,845,464,528,870,000,000,000,000,000,000,000
4
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
int wc_ecc_verify_hash_ex(mp_int *r, mp_int *s, const byte* hash, word32 hashlen, int* res, ecc_key* key) { int err; #ifdef WOLFSSL_ATECC508A byte sigRS[ATECC_KEY_SIZE*2]; #elif !defined(WOLFSSL_SP_MATH) int did_init = 0; ecc_point *mG = NULL, *mQ = NULL; mp_int v; mp_int w; mp_int u1; mp_int u2; mp_int* e; #if !defined(WOLFSSL_ASYNC_CRYPT) || !defined(HAVE_CAVIUM_V) mp_int e_lcl; #endif DECLARE_CURVE_SPECS(ECC_CURVE_FIELD_COUNT) #endif if (r == NULL || s == NULL || hash == NULL || res == NULL || key == NULL) return ECC_BAD_ARG_E; /* default to invalid signature */ *res = 0; /* is the IDX valid ? */ if (wc_ecc_is_valid_idx(key->idx) != 1) { return ECC_BAD_ARG_E; } #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_ECC) && \ defined(WOLFSSL_ASYNC_CRYPT_TEST) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_ECC) { if (wc_AsyncTestInit(&key->asyncDev, ASYNC_TEST_ECC_VERIFY)) { WC_ASYNC_TEST* testDev = &key->asyncDev.test; testDev->eccVerify.r = r; testDev->eccVerify.s = s; testDev->eccVerify.hash = hash; testDev->eccVerify.hashlen = hashlen; testDev->eccVerify.stat = res; testDev->eccVerify.key = key; return WC_PENDING_E; } } #endif #ifdef WOLFSSL_ATECC508A /* Extract R and S */ err = mp_to_unsigned_bin(r, &sigRS[0]); if (err != MP_OKAY) { return err; } err = mp_to_unsigned_bin(s, &sigRS[ATECC_KEY_SIZE]); if (err != MP_OKAY) { return err; } err = atcatls_verify(hash, sigRS, key->pubkey_raw, (bool*)res); if (err != ATCA_SUCCESS) { return BAD_COND_E; } #else /* checking if private key with no public part */ if (key->type == ECC_PRIVATEKEY_ONLY) { WOLFSSL_MSG("Verify called with private key, generating public part"); err = wc_ecc_make_pub_ex(key, NULL, NULL); if (err != MP_OKAY) { WOLFSSL_MSG("Unable to extract public key"); return err; } } #ifdef WOLFSSL_SP_MATH if (key->idx != ECC_CUSTOM_IDX && ecc_sets[key->idx].id == ECC_SECP256R1) { return sp_ecc_verify_256(hash, hashlen, key->pubkey.x, key->pubkey.y, key->pubkey.z, r, s, res, key->heap); } else return WC_KEY_SIZE_E; #else #ifdef WOLFSSL_HAVE_SP_ECC #ifndef WOLFSSL_SP_NO_256 #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_ECC) && \ defined(WOLFSSL_ASYNC_CRYPT_TEST) if (key->asyncDev.marker != WOLFSSL_ASYNC_MARKER_ECC) #endif { if (key->idx != ECC_CUSTOM_IDX && ecc_sets[key->idx].id == ECC_SECP256R1) return sp_ecc_verify_256(hash, hashlen, key->pubkey.x, key->pubkey.y, key->pubkey.z,r, s, res, key->heap); } #endif #endif #if defined(WOLFSSL_ASYNC_CRYPT) && defined(HAVE_CAVIUM_V) err = wc_ecc_alloc_mpint(key, &key->e); if (err != 0) return err; e = key->e; #else e = &e_lcl; #endif err = mp_init(e); if (err != MP_OKAY) return MEMORY_E; /* read in the specs for this curve */ err = wc_ecc_curve_load(key->dp, &curve, ECC_CURVE_FIELD_ALL); /* check for zero */ if (err == MP_OKAY) { if (mp_iszero(r) == MP_YES || mp_iszero(s) == MP_YES || mp_cmp(r, curve->order) != MP_LT || mp_cmp(s, curve->order) != MP_LT) { err = MP_ZERO_E; } } /* read hash */ if (err == MP_OKAY) { /* we may need to truncate if hash is longer than key size */ unsigned int orderBits = mp_count_bits(curve->order); /* truncate down to byte size, may be all that's needed */ if ( (WOLFSSL_BIT_SIZE * hashlen) > orderBits) hashlen = (orderBits + WOLFSSL_BIT_SIZE - 1) / WOLFSSL_BIT_SIZE; err = mp_read_unsigned_bin(e, hash, hashlen); /* may still need bit truncation too */ if (err == MP_OKAY && (WOLFSSL_BIT_SIZE * hashlen) > orderBits) mp_rshb(e, WOLFSSL_BIT_SIZE - (orderBits & 0x7)); } /* check for async hardware acceleration */ #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_ECC) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_ECC) { #if defined(HAVE_CAVIUM_V) || defined(HAVE_INTEL_QA) #ifdef HAVE_CAVIUM_V if (NitroxEccIsCurveSupported(key)) #endif { word32 keySz = key->dp->size; err = wc_mp_to_bigint_sz(e, &e->raw, keySz); if (err == MP_OKAY) err = wc_mp_to_bigint_sz(key->pubkey.x, &key->pubkey.x->raw, keySz); if (err == MP_OKAY) err = wc_mp_to_bigint_sz(key->pubkey.y, &key->pubkey.y->raw, keySz); if (err == MP_OKAY) #ifdef HAVE_CAVIUM_V err = NitroxEcdsaVerify(key, &e->raw, &key->pubkey.x->raw, &key->pubkey.y->raw, &r->raw, &s->raw, &curve->prime->raw, &curve->order->raw, res); #else err = IntelQaEcdsaVerify(&key->asyncDev, &e->raw, &key->pubkey.x->raw, &key->pubkey.y->raw, &r->raw, &s->raw, &curve->Af->raw, &curve->Bf->raw, &curve->prime->raw, &curve->order->raw, &curve->Gx->raw, &curve->Gy->raw, res); #endif #ifndef HAVE_CAVIUM_V mp_clear(e); #endif wc_ecc_curve_free(curve); return err; } #endif /* HAVE_CAVIUM_V || HAVE_INTEL_QA */ } #endif /* WOLFSSL_ASYNC_CRYPT */ /* allocate ints */ if (err == MP_OKAY) { if ((err = mp_init_multi(&v, &w, &u1, &u2, NULL, NULL)) != MP_OKAY) { err = MEMORY_E; } did_init = 1; } /* allocate points */ if (err == MP_OKAY) { mG = wc_ecc_new_point_h(key->heap); mQ = wc_ecc_new_point_h(key->heap); if (mQ == NULL || mG == NULL) err = MEMORY_E; } /* w = s^-1 mod n */ if (err == MP_OKAY) err = mp_invmod(s, curve->order, &w); /* u1 = ew */ if (err == MP_OKAY) err = mp_mulmod(e, &w, curve->order, &u1); /* u2 = rw */ if (err == MP_OKAY) err = mp_mulmod(r, &w, curve->order, &u2); /* find mG and mQ */ if (err == MP_OKAY) err = mp_copy(curve->Gx, mG->x); if (err == MP_OKAY) err = mp_copy(curve->Gy, mG->y); if (err == MP_OKAY) err = mp_set(mG->z, 1); if (err == MP_OKAY) err = mp_copy(key->pubkey.x, mQ->x); if (err == MP_OKAY) err = mp_copy(key->pubkey.y, mQ->y); if (err == MP_OKAY) err = mp_copy(key->pubkey.z, mQ->z); #ifdef FREESCALE_LTC_ECC /* use PKHA to compute u1*mG + u2*mQ */ if (err == MP_OKAY) err = wc_ecc_mulmod_ex(&u1, mG, mG, curve->Af, curve->prime, 0, key->heap); if (err == MP_OKAY) err = wc_ecc_mulmod_ex(&u2, mQ, mQ, curve->Af, curve->prime, 0, key->heap); if (err == MP_OKAY) err = wc_ecc_point_add(mG, mQ, mG, curve->prime); #else #ifndef ECC_SHAMIR { mp_digit mp = 0; /* compute u1*mG + u2*mQ = mG */ if (err == MP_OKAY) { err = wc_ecc_mulmod_ex(&u1, mG, mG, curve->Af, curve->prime, 0, key->heap); } if (err == MP_OKAY) { err = wc_ecc_mulmod_ex(&u2, mQ, mQ, curve->Af, curve->prime, 0, key->heap); } /* find the montgomery mp */ if (err == MP_OKAY) err = mp_montgomery_setup(curve->prime, &mp); /* add them */ if (err == MP_OKAY) err = ecc_projective_add_point(mQ, mG, mG, curve->Af, curve->prime, mp); /* reduce */ if (err == MP_OKAY) err = ecc_map(mG, curve->prime, mp); } #else /* use Shamir's trick to compute u1*mG + u2*mQ using half the doubles */ if (err == MP_OKAY) { err = ecc_mul2add(mG, &u1, mQ, &u2, mG, curve->Af, curve->prime, key->heap); } #endif /* ECC_SHAMIR */ #endif /* FREESCALE_LTC_ECC */ /* v = X_x1 mod n */ if (err == MP_OKAY) err = mp_mod(mG->x, curve->order, &v); /* does v == r */ if (err == MP_OKAY) { if (mp_cmp(&v, r) == MP_EQ) *res = 1; } /* cleanup */ wc_ecc_del_point_h(mG, key->heap); wc_ecc_del_point_h(mQ, key->heap); mp_clear(e); if (did_init) { mp_clear(&v); mp_clear(&w); mp_clear(&u1); mp_clear(&u2); } wc_ecc_curve_free(curve); #endif /* WOLFSSL_SP_MATH */ #endif /* WOLFSSL_ATECC508A */ return err; }
0
[ "CWE-200" ]
wolfssl
9b9568d500f31f964af26ba8d01e542e1f27e5ca
35,211,831,911,074,315,000,000,000,000,000,000,000
291
Change ECDSA signing to use blinding.
onig_compile(regex_t* reg, const UChar* pattern, const UChar* pattern_end, OnigErrorInfo* einfo) { int r; Node* root; ScanEnv scan_env; #ifdef USE_CALL UnsetAddrList uslist = {0}; #endif root = 0; if (IS_NOT_NULL(einfo)) { einfo->enc = reg->enc; einfo->par = (UChar* )NULL; } #ifdef ONIG_DEBUG fprintf(DBGFP, "\nPATTERN: /"); print_enc_string(DBGFP, reg->enc, pattern, pattern_end); #endif if (reg->ops_alloc == 0) { r = ops_init(reg, OPS_INIT_SIZE); if (r != 0) goto end; } else reg->ops_used = 0; r = onig_parse_tree(&root, pattern, pattern_end, reg, &scan_env); if (r != 0) goto err; r = reduce_string_list(root, reg->enc); if (r != 0) goto err; /* mixed use named group and no-named group */ if (scan_env.num_named > 0 && IS_SYNTAX_BV(scan_env.syntax, ONIG_SYN_CAPTURE_ONLY_NAMED_GROUP) && ! OPTON_CAPTURE_GROUP(reg->options)) { if (scan_env.num_named != scan_env.num_mem) r = disable_noname_group_capture(&root, reg, &scan_env); else r = numbered_ref_check(root); if (r != 0) goto err; } r = check_backrefs(root, &scan_env); if (r != 0) goto err; #ifdef USE_CALL if (scan_env.num_call > 0) { r = unset_addr_list_init(&uslist, scan_env.num_call); if (r != 0) goto err; scan_env.unset_addr_list = &uslist; r = tune_call(root, &scan_env, 0); if (r != 0) goto err_unset; r = tune_call2(root); if (r != 0) goto err_unset; r = recursive_call_check_trav(root, &scan_env, 0); if (r < 0) goto err_unset; r = infinite_recursive_call_check_trav(root, &scan_env); if (r != 0) goto err_unset; tune_called_state(root, 0); } reg->num_call = scan_env.num_call; #endif #ifdef ONIG_DEBUG_PARSE fprintf(DBGFP, "MAX PARSE DEPTH: %d\n", scan_env.max_parse_depth); fprintf(DBGFP, "TREE (parsed)\n"); print_tree(DBGFP, root); fprintf(DBGFP, "\n"); #endif r = tune_tree(root, reg, 0, &scan_env); if (r != 0) goto err_unset; if (scan_env.backref_num != 0) { set_parent_node_trav(root, NULL_NODE); r = set_empty_repeat_node_trav(root, NULL_NODE, &scan_env); if (r != 0) goto err_unset; set_empty_status_check_trav(root, &scan_env); } #ifdef ONIG_DEBUG_PARSE fprintf(DBGFP, "TREE (after tune)\n"); print_tree(DBGFP, root); fprintf(DBGFP, "\n"); #endif reg->capture_history = scan_env.cap_history; reg->push_mem_start = scan_env.backtrack_mem | scan_env.cap_history; #ifdef USE_CALLOUT if (IS_NOT_NULL(reg->extp) && reg->extp->callout_num != 0) { reg->push_mem_end = reg->push_mem_start; } else { if (MEM_STATUS_IS_ALL_ON(reg->push_mem_start)) reg->push_mem_end = scan_env.backrefed_mem | scan_env.cap_history; else reg->push_mem_end = reg->push_mem_start & (scan_env.backrefed_mem | scan_env.cap_history); } #else if (MEM_STATUS_IS_ALL_ON(reg->push_mem_start)) reg->push_mem_end = scan_env.backrefed_mem | scan_env.cap_history; else reg->push_mem_end = reg->push_mem_start & (scan_env.backrefed_mem | scan_env.cap_history); #endif clear_optimize_info(reg); #ifndef ONIG_DONT_OPTIMIZE r = set_optimize_info_from_tree(root, reg, &scan_env); if (r != 0) goto err_unset; #endif if (IS_NOT_NULL(scan_env.mem_env_dynamic)) { xfree(scan_env.mem_env_dynamic); scan_env.mem_env_dynamic = (MemEnv* )NULL; } r = compile_tree(root, reg, &scan_env); if (r == 0) { if (scan_env.keep_num > 0) { r = add_op(reg, OP_UPDATE_VAR); if (r != 0) goto err; COP(reg)->update_var.type = UPDATE_VAR_KEEP_FROM_STACK_LAST; COP(reg)->update_var.id = 0; /* not used */ COP(reg)->update_var.clear = FALSE; } r = add_op(reg, OP_END); if (r != 0) goto err; #ifdef USE_CALL if (scan_env.num_call > 0) { r = fix_unset_addr_list(&uslist, reg); unset_addr_list_end(&uslist); if (r != 0) goto err; } #endif set_addr_in_repeat_range(reg); if ((reg->push_mem_end != 0) #ifdef USE_REPEAT_AND_EMPTY_CHECK_LOCAL_VAR || (reg->num_repeat != 0) || (reg->num_empty_check != 0) #endif #ifdef USE_CALLOUT || (IS_NOT_NULL(reg->extp) && reg->extp->callout_num != 0) #endif #ifdef USE_CALL || scan_env.num_call > 0 #endif ) reg->stack_pop_level = STACK_POP_LEVEL_ALL; else { if (reg->push_mem_start != 0) reg->stack_pop_level = STACK_POP_LEVEL_MEM_START; else reg->stack_pop_level = STACK_POP_LEVEL_FREE; } r = ops_make_string_pool(reg); if (r != 0) goto err; } #ifdef USE_CALL else if (scan_env.num_call > 0) { unset_addr_list_end(&uslist); } #endif onig_node_free(root); #ifdef ONIG_DEBUG_COMPILE onig_print_names(DBGFP, reg); onig_print_compiled_byte_code_list(DBGFP, reg); #endif #ifdef USE_DIRECT_THREADED_CODE /* opcode -> opaddr */ onig_init_for_match_at(reg); #endif end: return r; err_unset: #ifdef USE_CALL if (scan_env.num_call > 0) { unset_addr_list_end(&uslist); } #endif err: if (IS_NOT_NULL(scan_env.error)) { if (IS_NOT_NULL(einfo)) { einfo->par = scan_env.error; einfo->par_end = scan_env.error_end; } } onig_node_free(root); if (IS_NOT_NULL(scan_env.mem_env_dynamic)) xfree(scan_env.mem_env_dynamic); return r; }
0
[ "CWE-787" ]
oniguruma
cbe9f8bd9cfc6c3c87a60fbae58fa1a85db59df0
296,941,457,804,526,460,000,000,000,000,000,000,000
211
#207: Out-of-bounds write
MagickExport Image *PolynomialImage(const Image *images, const size_t number_terms,const double *terms,ExceptionInfo *exception) { #define PolynomialImageTag "Polynomial/Image" CacheView *polynomial_view; Image *image; MagickBooleanType status; MagickOffsetType progress; PixelChannels **magick_restrict polynomial_pixels; size_t number_images; ssize_t y; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImageCanvas(images,exception); if (image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } number_images=GetImageListLength(images); polynomial_pixels=AcquirePixelThreadSet(images); if (polynomial_pixels == (PixelChannels **) NULL) { image=DestroyImage(image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Polynomial image pixels. */ status=MagickTrue; progress=0; polynomial_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); ssize_t i, x; PixelChannels *polynomial_pixel; Quantum *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } polynomial_pixel=polynomial_pixels[id]; for (j=0; j < (ssize_t) image->columns; j++) for (i=0; i < MaxPixelChannels; i++) polynomial_pixel[j].channel[i]=0.0; next=images; for (j=0; j < (ssize_t) number_images; j++) { const Quantum *p; if (j >= (ssize_t) number_terms) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); break; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(next); i++) { MagickRealType coefficient, degree; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(next,channel); PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (polynomial_traits == UndefinedPixelTrait)) continue; if ((traits & UpdatePixelTrait) == 0) continue; coefficient=(MagickRealType) terms[2*j]; degree=(MagickRealType) terms[(j << 1)+1]; polynomial_pixel[x].channel[i]+=coefficient* pow(QuantumScale*GetPixelChannel(image,channel,p),degree); } p+=GetPixelChannels(next); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(images,PolynomialImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } polynomial_view=DestroyCacheView(polynomial_view); polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels); if (status == MagickFalse) image=DestroyImage(image); return(image); }
0
[]
ImageMagick
4717744e4bb27de8ea978e51c6d5bcddf62ffe49
118,572,384,476,724,950,000,000,000,000,000,000,000
180
https://github.com/ImageMagick/ImageMagick/issues/3332