func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static zend_never_inline void ZEND_FASTCALL _zend_hash_iterators_remove(HashTable *ht) { HashTableIterator *iter = EG(ht_iterators); HashTableIterator *end = iter + EG(ht_iterators_used); while (iter != end) { if (iter->ht == ht) { iter->ht = HT_POISONED_PTR; } iter++; } }
0
[ "CWE-190" ]
php-src
4cc0286f2f3780abc6084bcdae5dce595daa3c12
36,122,030,635,507,343,000,000,000,000,000,000,000
12
Fix #73832 - leave the table in a safe state if the size is too big.
static struct snd_seq_queue *queue_list_remove(int id, int client) { struct snd_seq_queue *q; unsigned long flags; spin_lock_irqsave(&queue_list_lock, flags); q = queue_list[id]; if (q) { spin_lock(&q->owner_lock); if (q->owner == client) { /* found */ q->klocked = 1; spin_unlock(&q->owner_lock); queue_list[id] = NULL; num_queues--; spin_unlock_irqrestore(&queue_list_lock, flags); return q; } spin_unlock(&q->owner_lock); } spin_unlock_irqrestore(&queue_list_lock, flags); return NULL; }
0
[ "CWE-362" ]
linux
3567eb6af614dac436c4b16a8d426f9faed639b3
101,038,602,837,664,010,000,000,000,000,000,000,000
23
ALSA: seq: Fix race at timer setup and close ALSA sequencer code has an open race between the timer setup ioctl and the close of the client. This was triggered by syzkaller fuzzer, and a use-after-free was caught there as a result. This patch papers over it by adding a proper queue->timer_mutex lock around the timer-related calls in the relevant code path. Reported-by: Dmitry Vyukov <[email protected]> Tested-by: Dmitry Vyukov <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, BaseType_t * pxHigherPriorityTaskWoken ) { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; UBaseType_t uxSavedInterruptStatus; configASSERT( pxStreamBuffer ); uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { ( void ) xTaskNotifyFromISR( ( pxStreamBuffer )->xTaskWaitingToReceive, ( uint32_t ) 0, eNoAction, pxHigherPriorityTaskWoken ); ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; xReturn = pdTRUE; } else { xReturn = pdFALSE; } } portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; }
0
[ "CWE-190" ]
FreeRTOS-Kernel
d05b9c123f2bf9090bce386a244fc934ae44db5b
74,654,353,282,095,520,000,000,000,000,000,000,000
29
Add addition overflow check for stream buffer (#226)
int SN_Client_Unsubscribe(MqttClient *client, SN_Unsubscribe *unsubscribe) { int rc, len; /* Validate required arguments */ if (client == NULL || unsubscribe == NULL) { return MQTT_CODE_ERROR_BAD_ARG; } if (unsubscribe->stat == MQTT_MSG_BEGIN) { #ifdef WOLFMQTT_MULTITHREAD /* Lock send socket mutex */ rc = wm_SemLock(&client->lockSend); if (rc != 0) { return rc; } #endif /* Encode the subscribe packet */ rc = SN_Encode_Unsubscribe(client->tx_buf, client->tx_buf_len, unsubscribe); #ifdef WOLFMQTT_DEBUG_CLIENT PRINTF("MqttClient_EncodePacket: Len %d, Type %s (%d)", rc, SN_Packet_TypeDesc(SN_MSG_TYPE_UNSUBSCRIBE), SN_MSG_TYPE_UNSUBSCRIBE); #endif if (rc <= 0) { #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif return rc; } len = rc; #ifdef WOLFMQTT_MULTITHREAD rc = wm_SemLock(&client->lockClient); if (rc == 0) { /* inform other threads of expected response */ rc = MqttClient_RespList_Add(client, (MqttPacketType)SN_MSG_TYPE_UNSUBACK, 0, &unsubscribe->pendResp, &unsubscribe->ack); wm_SemUnlock(&client->lockClient); } if (rc != 0) { wm_SemUnlock(&client->lockSend); return rc; /* Error locking client */ } #endif /* Send unsubscribe packet */ rc = MqttPacket_Write(client, client->tx_buf, len); #ifdef WOLFMQTT_MULTITHREAD wm_SemUnlock(&client->lockSend); #endif if (rc != len) { #ifdef WOLFMQTT_MULTITHREAD if (wm_SemLock(&client->lockClient) == 0) { MqttClient_RespList_Remove(client, &unsubscribe->pendResp); wm_SemUnlock(&client->lockClient); } #endif } unsubscribe->stat = MQTT_MSG_WAIT; } /* Wait for unsubscribe ack packet */ rc = SN_Client_WaitType(client, &unsubscribe->ack, SN_MSG_TYPE_UNSUBACK, unsubscribe->packet_id, client->cmd_timeout_ms); #ifdef WOLFMQTT_NONBLOCK if (rc == MQTT_CODE_CONTINUE) return rc; #endif #ifdef WOLFMQTT_MULTITHREAD if (wm_SemLock(&client->lockClient) == 0) { MqttClient_RespList_Remove(client, &unsubscribe->pendResp); wm_SemUnlock(&client->lockClient); } #endif /* reset state */ unsubscribe->stat = MQTT_MSG_BEGIN; return rc; }
0
[ "CWE-787" ]
wolfMQTT
84d4b53122e0fa0280c7872350b89d5777dabbb2
39,558,803,972,797,633,000,000,000,000,000,000,000
85
Fix wolfmqtt-fuzzer: Null-dereference WRITE in MqttProps_Free
char *ndpi_get_proto_name(struct ndpi_detection_module_struct *ndpi_str, u_int16_t proto_id) { if((proto_id >= ndpi_str->ndpi_num_supported_protocols) || (proto_id >= (NDPI_MAX_SUPPORTED_PROTOCOLS + NDPI_MAX_NUM_CUSTOM_PROTOCOLS)) || (ndpi_str->proto_defaults[proto_id].protoName == NULL)) proto_id = NDPI_PROTOCOL_UNKNOWN; return(ndpi_str->proto_defaults[proto_id].protoName); }
0
[ "CWE-416", "CWE-787" ]
nDPI
6a9f5e4f7c3fd5ddab3e6727b071904d76773952
8,907,373,266,710,657,000,000,000,000,000,000,000
9
Fixed use after free caused by dangling pointer * This fix also improved RCE Injection detection Signed-off-by: Toni Uhlig <[email protected]>
winlink_clear_flags(struct winlink *wl) { struct winlink *loop; wl->window->flags &= ~WINDOW_ALERTFLAGS; TAILQ_FOREACH(loop, &wl->window->winlinks, wentry) { if ((loop->flags & WINLINK_ALERTFLAGS) != 0) { loop->flags &= ~WINLINK_ALERTFLAGS; server_status_session(loop->session); } } }
0
[]
src
b32e1d34e10a0da806823f57f02a4ae6e93d756e
273,128,156,973,462,000,000,000,000,000,000,000,000
12
evbuffer_new and bufferevent_new can both fail (when malloc fails) and return NULL. GitHub issue 1547.
*/ private int mconvert(struct magic_set *ms, struct magic *m, int flip) { union VALUETYPE *p = &ms->ms_value; uint8_t type; switch (type = cvt_flip(m->type, flip)) { case FILE_BYTE: cvt_8(p, m); return 1; case FILE_SHORT: cvt_16(p, m); return 1; case FILE_LONG: case FILE_DATE: case FILE_LDATE: cvt_32(p, m); return 1; case FILE_QUAD: case FILE_QDATE: case FILE_QLDATE: case FILE_QWDATE: cvt_64(p, m); return 1; case FILE_STRING: case FILE_BESTRING16: case FILE_LESTRING16: { /* Null terminate and eat *trailing* return */ p->s[sizeof(p->s) - 1] = '\0'; return 1; } case FILE_PSTRING: { char *ptr1 = p->s, *ptr2 = ptr1 + file_pstring_length_size(m); size_t len = file_pstring_get_length(m, ptr1); if (len >= sizeof(p->s)) len = sizeof(p->s) - 1; while (len--) *ptr1++ = *ptr2++; *ptr1 = '\0'; return 1; } case FILE_BESHORT: p->h = (short)((p->hs[0]<<8)|(p->hs[1])); cvt_16(p, m); return 1; case FILE_BELONG: case FILE_BEDATE: case FILE_BELDATE: p->l = (int32_t) ((p->hl[0]<<24)|(p->hl[1]<<16)|(p->hl[2]<<8)|(p->hl[3])); if (type == FILE_BELONG) cvt_32(p, m); return 1; case FILE_BEQUAD: case FILE_BEQDATE: case FILE_BEQLDATE: case FILE_BEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8)|((uint64_t)p->hq[7])); if (type == FILE_BEQUAD) cvt_64(p, m); return 1; case FILE_LESHORT: p->h = (short)((p->hs[1]<<8)|(p->hs[0])); cvt_16(p, m); return 1; case FILE_LELONG: case FILE_LEDATE: case FILE_LELDATE: p->l = (int32_t) ((p->hl[3]<<24)|(p->hl[2]<<16)|(p->hl[1]<<8)|(p->hl[0])); if (type == FILE_LELONG) cvt_32(p, m); return 1; case FILE_LEQUAD: case FILE_LEQDATE: case FILE_LEQLDATE: case FILE_LEQWDATE: p->q = (uint64_t) (((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8)|((uint64_t)p->hq[0])); if (type == FILE_LEQUAD) cvt_64(p, m); return 1; case FILE_MELONG: case FILE_MEDATE: case FILE_MELDATE: p->l = (int32_t) ((p->hl[1]<<24)|(p->hl[0]<<16)|(p->hl[3]<<8)|(p->hl[2])); if (type == FILE_MELONG) cvt_32(p, m); return 1; case FILE_FLOAT: cvt_float(p, m); return 1; case FILE_BEFLOAT: p->l = ((uint32_t)p->hl[0]<<24)|((uint32_t)p->hl[1]<<16)| ((uint32_t)p->hl[2]<<8) |((uint32_t)p->hl[3]); cvt_float(p, m); return 1; case FILE_LEFLOAT: p->l = ((uint32_t)p->hl[3]<<24)|((uint32_t)p->hl[2]<<16)| ((uint32_t)p->hl[1]<<8) |((uint32_t)p->hl[0]); cvt_float(p, m); return 1; case FILE_DOUBLE: cvt_double(p, m); return 1; case FILE_BEDOUBLE: p->q = ((uint64_t)p->hq[0]<<56)|((uint64_t)p->hq[1]<<48)| ((uint64_t)p->hq[2]<<40)|((uint64_t)p->hq[3]<<32)| ((uint64_t)p->hq[4]<<24)|((uint64_t)p->hq[5]<<16)| ((uint64_t)p->hq[6]<<8) |((uint64_t)p->hq[7]); cvt_double(p, m); return 1; case FILE_LEDOUBLE: p->q = ((uint64_t)p->hq[7]<<56)|((uint64_t)p->hq[6]<<48)| ((uint64_t)p->hq[5]<<40)|((uint64_t)p->hq[4]<<32)| ((uint64_t)p->hq[3]<<24)|((uint64_t)p->hq[2]<<16)| ((uint64_t)p->hq[1]<<8) |((uint64_t)p->hq[0]); cvt_double(p, m); return 1; case FILE_REGEX: case FILE_SEARCH: case FILE_DEFAULT: case FILE_CLEAR: case FILE_NAME: case FILE_USE: return 1; default: file_magerror(ms, "invalid type %d in mconvert()", m->type); return 0;
1
[ "CWE-119", "CWE-787" ]
file
27a14bc7ba285a0a5ebfdb55e54001aa11932b08
63,127,382,122,265,860,000,000,000,000,000,000,000
138
Correctly compute the truncated pascal string size (Francisco Alonso and Jan Kaluza at RedHat)
void HttpRequestEntry::resetHttpHeaderProcessor() { proc_ = make_unique<HttpHeaderProcessor>(HttpHeaderProcessor::CLIENT_PARSER); }
0
[ "CWE-532" ]
aria2
37368130ca7de5491a75fd18a20c5c5cc641824a
68,102,335,698,546,590,000,000,000,000,000,000,000
4
Mask headers
static void clean_dir(const char *name, int *pidarr, int start_pid, int max_pids) { DIR *dir; if (!(dir = opendir(name))) { fwarning("cannot clean %s directory\n", name); return; // we live to fight another day! } // clean leftover files struct dirent *entry; char *end; while ((entry = readdir(dir)) != NULL) { pid_t pid = strtol(entry->d_name, &end, 10); pid %= max_pids; if (end == entry->d_name || *end) continue; if (pid < start_pid) continue; if (pidarr[pid] == 0) delete_run_files(pid); } closedir(dir); }
0
[ "CWE-284", "CWE-732" ]
firejail
eecf35c2f8249489a1d3e512bb07f0d427183134
189,733,277,119,609,200,000,000,000,000,000,000,000
23
mount runtime seccomp files read-only (#2602) avoid creating locations in the file system that are both writable and executable (in this case for processes with euid of the user). for the same reason also remove user owned libfiles when it is not needed any more
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; unsigned int delay = 0; bool monitor_only = false; if (trigger) { u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; if (!le16_to_cpu(trigger->occurrences)) return 0; if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) { IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig); iwl_force_nmi(fwrt->trans); return 0; } trigger->occurrences = cpu_to_le16(occurrences); monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY; /* convert msec to usec */ delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC; } desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); if (!desc) return -ENOMEM; desc->len = len; desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); }
0
[ "CWE-400", "CWE-401" ]
linux
b4b814fec1a5a849383f7b3886b654a13abbda7d
185,875,951,263,182,930,000,000,000,000,000,000,000
40
iwlwifi: dbg_ini: fix memory leak in alloc_sgtable In alloc_sgtable if alloc_page fails, the alocated table should be released. Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: Luca Coelho <[email protected]>
UnregisterExprContextCallback(ExprContext *econtext, ExprContextCallbackFunction function, Datum arg) { ExprContext_CB **prev_callback; ExprContext_CB *ecxt_callback; prev_callback = &econtext->ecxt_callbacks; while ((ecxt_callback = *prev_callback) != NULL) { if (ecxt_callback->function == function && ecxt_callback->arg == arg) { *prev_callback = ecxt_callback->next; pfree(ecxt_callback); } else prev_callback = &ecxt_callback->next; } }
0
[ "CWE-209" ]
postgres
804b6b6db4dcfc590a468e7be390738f9f7755fb
211,025,534,246,208,900,000,000,000,000,000,000,000
20
Fix column-privilege leak in error-message paths While building error messages to return to the user, BuildIndexValueDescription, ExecBuildSlotValueDescription and ri_ReportViolation would happily include the entire key or entire row in the result returned to the user, even if the user didn't have access to view all of the columns being included. Instead, include only those columns which the user is providing or which the user has select rights on. If the user does not have any rights to view the table or any of the columns involved then no detail is provided and a NULL value is returned from BuildIndexValueDescription and ExecBuildSlotValueDescription. Note that, for key cases, the user must have access to all of the columns for the key to be shown; a partial key will not be returned. Further, in master only, do not return any data for cases where row security is enabled on the relation and row security should be applied for the user. This required a bit of refactoring and moving of things around related to RLS- note the addition of utils/misc/rls.c. Back-patch all the way, as column-level privileges are now in all supported versions. This has been assigned CVE-2014-8161, but since the issue and the patch have already been publicized on pgsql-hackers, there's no point in trying to hide this commit.
virtual Item *get_equal_const_item(THD *thd, const Context &ctx, Item *const_item) { return const_item; }
0
[ "CWE-416", "CWE-703" ]
server
08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917
46,070,298,289,268,710,000,000,000,000,000,000,000
5
MDEV-24176 Server crashes after insert in the table with virtual column generated using date_format() and if() vcol_info->expr is allocated on expr_arena at parsing stage. Since expr item is allocated on expr_arena all its containee items must be allocated on expr_arena too. Otherwise fix_session_expr() will encounter prematurely freed item. When table is reopened from cache vcol_info contains stale expression. We refresh expression via TABLE::vcol_fix_exprs() but first we must prepare a proper context (Vcol_expr_context) which meets some requirements: 1. As noted above expr update must be done on expr_arena as there may be new items created. It was a bug in fix_session_expr_for_read() and was just not reproduced because of no second refix. Now refix is done for more cases so it does reproduce. Tests affected: vcol.binlog 2. Also name resolution context must be narrowed to the single table. Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes 3. sql_mode must be clean and not fail expr update. sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc must not affect vcol expression update. If the table was created successfully any further evaluation must not fail. Tests affected: main.func_like Reviewed by: Sergei Golubchik <[email protected]>
static int ctrl_fill_mcgrp_info(const struct genl_family *family, const struct genl_multicast_group *grp, int grp_id, u32 portid, u32 seq, u32 flags, struct sk_buff *skb, u8 cmd) { void *hdr; struct nlattr *nla_grps; struct nlattr *nest; hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); if (hdr == NULL) return -1; if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id)) goto nla_put_failure; nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); if (nla_grps == NULL) goto nla_put_failure; nest = nla_nest_start(skb, 1); if (nest == NULL) goto nla_put_failure; if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp_id) || nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, grp->name)) goto nla_put_failure; nla_nest_end(skb, nest); nla_nest_end(skb, nla_grps); genlmsg_end(skb, hdr); return 0; nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; }
0
[ "CWE-399", "CWE-401" ]
linux
ceabee6c59943bdd5e1da1a6a20dc7ee5f8113a2
277,365,859,039,728,870,000,000,000,000,000,000,000
40
genetlink: Fix a memory leak on error path In genl_register_family(), when idr_alloc() fails, we forget to free the memory we possibly allocate for family->attrbuf. Reported-by: Hulk Robot <[email protected]> Fixes: 2ae0f17df1cd ("genetlink: use idr to track families") Signed-off-by: YueHaibing <[email protected]> Reviewed-by: Kirill Tkhai <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, unsigned long *visited, bool validate) { struct uac_clock_source_descriptor *source; struct uac_clock_selector_descriptor *selector; struct uac_clock_multiplier_descriptor *multiplier; entity_id &= 0xff; if (test_and_set_bit(entity_id, visited)) { usb_audio_warn(chip, "%s(): recursive clock topology detected, id %d.\n", __func__, entity_id); return -EINVAL; } /* first, see if the ID we're looking for is a clock source already */ source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id); if (source) { entity_id = source->bClockID; if (validate && !uac_clock_source_is_valid(chip, entity_id)) { usb_audio_err(chip, "clock source %d is not valid, cannot use\n", entity_id); return -ENXIO; } return entity_id; } selector = snd_usb_find_clock_selector(chip->ctrl_intf, entity_id); if (selector) { int ret, i, cur; /* the entity ID we are looking for is a selector. * find out what it currently selects */ ret = uac_clock_selector_get_val(chip, selector->bClockID); if (ret < 0) return ret; /* Selector values are one-based */ if (ret > selector->bNrInPins || ret < 1) { usb_audio_err(chip, "%s(): selector reported illegal value, id %d, ret %d\n", __func__, selector->bClockID, ret); return -EINVAL; } cur = ret; ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1], visited, validate); if (!validate || ret > 0 || !chip->autoclock) return ret; /* The current clock source is invalid, try others. */ for (i = 1; i <= selector->bNrInPins; i++) { int err; if (i == cur) continue; ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1], visited, true); if (ret < 0) continue; err = uac_clock_selector_set_val(chip, entity_id, i); if (err < 0) continue; usb_audio_info(chip, "found and selected valid clock source %d\n", ret); return ret; } return -ENXIO; } /* FIXME: multipliers only act as pass-thru element for now */ multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id); if (multiplier) return __uac_clock_find_source(chip, multiplier->bCSourceID, visited, validate); return -EINVAL; }
0
[]
sound
447d6275f0c21f6cc97a88b3a0c601436a4cdf2a
181,084,047,437,838,550,000,000,000,000,000,000,000
89
ALSA: usb-audio: Add sanity checks for endpoint accesses Add some sanity check codes before actually accessing the endpoint via get_endpoint() in order to avoid the invalid access through a malformed USB descriptor. Mostly just checking bNumEndpoints, but in one place (snd_microii_spdif_default_get()), the validity of iface and altsetting index is checked as well. Bugzilla: https://bugzilla.suse.com/show_bug.cgi?id=971125 Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
static int diffie_hellman_sha1(LIBSSH2_SESSION *session, _libssh2_bn *g, _libssh2_bn *p, int group_order, unsigned char packet_type_init, unsigned char packet_type_reply, unsigned char *midhash, unsigned long midhash_len, kmdhgGPshakex_state_t *exchange_state) { int ret = 0; int rc; libssh2_sha1_ctx exchange_hash_ctx; if (exchange_state->state == libssh2_NB_state_idle) { /* Setup initial values */ exchange_state->e_packet = NULL; exchange_state->s_packet = NULL; exchange_state->k_value = NULL; exchange_state->ctx = _libssh2_bn_ctx_new(); exchange_state->x = _libssh2_bn_init(); /* Random from client */ exchange_state->e = _libssh2_bn_init(); /* g^x mod p */ exchange_state->f = _libssh2_bn_init_from_bin(); /* g^(Random from server) mod p */ exchange_state->k = _libssh2_bn_init(); /* The shared secret: f^x mod p */ /* Zero the whole thing out */ memset(&exchange_state->req_state, 0, sizeof(packet_require_state_t)); /* Generate x and e */ _libssh2_bn_rand(exchange_state->x, group_order, 0, -1); _libssh2_bn_mod_exp(exchange_state->e, g, exchange_state->x, p, exchange_state->ctx); /* Send KEX init */ /* packet_type(1) + String Length(4) + leading 0(1) */ exchange_state->e_packet_len = _libssh2_bn_bytes(exchange_state->e) + 6; if (_libssh2_bn_bits(exchange_state->e) % 8) { /* Leading 00 not needed */ exchange_state->e_packet_len--; } exchange_state->e_packet = LIBSSH2_ALLOC(session, exchange_state->e_packet_len); if (!exchange_state->e_packet) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Out of memory error"); goto clean_exit; } exchange_state->e_packet[0] = packet_type_init; _libssh2_htonu32(exchange_state->e_packet + 1, exchange_state->e_packet_len - 5); if (_libssh2_bn_bits(exchange_state->e) % 8) { _libssh2_bn_to_bin(exchange_state->e, exchange_state->e_packet + 5); } else { exchange_state->e_packet[5] = 0; _libssh2_bn_to_bin(exchange_state->e, exchange_state->e_packet + 6); } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Sending KEX packet %d", (int) packet_type_init); exchange_state->state = libssh2_NB_state_created; } if (exchange_state->state == libssh2_NB_state_created) { rc = _libssh2_transport_send(session, exchange_state->e_packet, exchange_state->e_packet_len, NULL, 0); if (rc == LIBSSH2_ERROR_EAGAIN) { return rc; } else if (rc) { ret = _libssh2_error(session, rc, "Unable to send KEX init message"); goto clean_exit; } exchange_state->state = libssh2_NB_state_sent; } if (exchange_state->state == libssh2_NB_state_sent) { if (session->burn_optimistic_kexinit) { /* The first KEX packet to come along will be the guess initially * sent by the server. That guess turned out to be wrong so we * need to silently ignore it */ int burn_type; _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Waiting for badly guessed KEX packet (to be ignored)"); burn_type = _libssh2_packet_burn(session, &exchange_state->burn_state); if (burn_type == LIBSSH2_ERROR_EAGAIN) { return burn_type; } else if (burn_type <= 0) { /* Failed to receive a packet */ ret = burn_type; goto clean_exit; } session->burn_optimistic_kexinit = 0; _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Burnt packet of type: %02x", (unsigned int) burn_type); } exchange_state->state = libssh2_NB_state_sent1; } if (exchange_state->state == libssh2_NB_state_sent1) { /* Wait for KEX reply */ rc = _libssh2_packet_require(session, packet_type_reply, &exchange_state->s_packet, &exchange_state->s_packet_len, 0, NULL, 0, &exchange_state->req_state); if (rc == LIBSSH2_ERROR_EAGAIN) { return rc; } if (rc) { ret = _libssh2_error(session, LIBSSH2_ERROR_TIMEOUT, "Timed out waiting for KEX reply"); goto clean_exit; } /* Parse KEXDH_REPLY */ exchange_state->s = exchange_state->s_packet + 1; session->server_hostkey_len = _libssh2_ntohu32(exchange_state->s); exchange_state->s += 4; if (session->server_hostkey) LIBSSH2_FREE(session, session->server_hostkey); session->server_hostkey = LIBSSH2_ALLOC(session, session->server_hostkey_len); if (!session->server_hostkey) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate memory for a copy " "of the host key"); goto clean_exit; } memcpy(session->server_hostkey, exchange_state->s, session->server_hostkey_len); exchange_state->s += session->server_hostkey_len; #if LIBSSH2_MD5 { libssh2_md5_ctx fingerprint_ctx; if (libssh2_md5_init(&fingerprint_ctx)) { libssh2_md5_update(fingerprint_ctx, session->server_hostkey, session->server_hostkey_len); libssh2_md5_final(fingerprint_ctx, session->server_hostkey_md5); session->server_hostkey_md5_valid = TRUE; } else { session->server_hostkey_md5_valid = FALSE; } } #ifdef LIBSSH2DEBUG { char fingerprint[50], *fprint = fingerprint; int i; for(i = 0; i < 16; i++, fprint += 3) { snprintf(fprint, 4, "%02x:", session->server_hostkey_md5[i]); } *(--fprint) = '\0'; _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Server's MD5 Fingerprint: %s", fingerprint); } #endif /* LIBSSH2DEBUG */ #endif /* ! LIBSSH2_MD5 */ { libssh2_sha1_ctx fingerprint_ctx; if (libssh2_sha1_init(&fingerprint_ctx)) { libssh2_sha1_update(fingerprint_ctx, session->server_hostkey, session->server_hostkey_len); libssh2_sha1_final(fingerprint_ctx, session->server_hostkey_sha1); session->server_hostkey_sha1_valid = TRUE; } else { session->server_hostkey_sha1_valid = FALSE; } } #ifdef LIBSSH2DEBUG { char fingerprint[64], *fprint = fingerprint; int i; for(i = 0; i < 20; i++, fprint += 3) { snprintf(fprint, 4, "%02x:", session->server_hostkey_sha1[i]); } *(--fprint) = '\0'; _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Server's SHA1 Fingerprint: %s", fingerprint); } #endif /* LIBSSH2DEBUG */ if (session->hostkey->init(session, session->server_hostkey, session->server_hostkey_len, &session->server_hostkey_abstract)) { ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_INIT, "Unable to initialize hostkey importer"); goto clean_exit; } exchange_state->f_value_len = _libssh2_ntohu32(exchange_state->s); exchange_state->s += 4; exchange_state->f_value = exchange_state->s; exchange_state->s += exchange_state->f_value_len; _libssh2_bn_from_bin(exchange_state->f, exchange_state->f_value_len, exchange_state->f_value); exchange_state->h_sig_len = _libssh2_ntohu32(exchange_state->s); exchange_state->s += 4; exchange_state->h_sig = exchange_state->s; /* Compute the shared secret */ _libssh2_bn_mod_exp(exchange_state->k, exchange_state->f, exchange_state->x, p, exchange_state->ctx); exchange_state->k_value_len = _libssh2_bn_bytes(exchange_state->k) + 5; if (_libssh2_bn_bits(exchange_state->k) % 8) { /* don't need leading 00 */ exchange_state->k_value_len--; } exchange_state->k_value = LIBSSH2_ALLOC(session, exchange_state->k_value_len); if (!exchange_state->k_value) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate buffer for K"); goto clean_exit; } _libssh2_htonu32(exchange_state->k_value, exchange_state->k_value_len - 4); if (_libssh2_bn_bits(exchange_state->k) % 8) { _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 4); } else { exchange_state->k_value[4] = 0; _libssh2_bn_to_bin(exchange_state->k, exchange_state->k_value + 5); } exchange_state->exchange_hash = (void*)&exchange_hash_ctx; libssh2_sha1_init(&exchange_hash_ctx); if (session->local.banner) { _libssh2_htonu32(exchange_state->h_sig_comp, strlen((char *) session->local.banner) - 2); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 4); libssh2_sha1_update(exchange_hash_ctx, (char *) session->local.banner, strlen((char *) session->local.banner) - 2); } else { _libssh2_htonu32(exchange_state->h_sig_comp, sizeof(LIBSSH2_SSH_DEFAULT_BANNER) - 1); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 4); libssh2_sha1_update(exchange_hash_ctx, LIBSSH2_SSH_DEFAULT_BANNER, sizeof(LIBSSH2_SSH_DEFAULT_BANNER) - 1); } _libssh2_htonu32(exchange_state->h_sig_comp, strlen((char *) session->remote.banner)); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 4); libssh2_sha1_update(exchange_hash_ctx, session->remote.banner, strlen((char *) session->remote.banner)); _libssh2_htonu32(exchange_state->h_sig_comp, session->local.kexinit_len); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 4); libssh2_sha1_update(exchange_hash_ctx, session->local.kexinit, session->local.kexinit_len); _libssh2_htonu32(exchange_state->h_sig_comp, session->remote.kexinit_len); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 4); libssh2_sha1_update(exchange_hash_ctx, session->remote.kexinit, session->remote.kexinit_len); _libssh2_htonu32(exchange_state->h_sig_comp, session->server_hostkey_len); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 4); libssh2_sha1_update(exchange_hash_ctx, session->server_hostkey, session->server_hostkey_len); if (packet_type_init == SSH_MSG_KEX_DH_GEX_INIT) { /* diffie-hellman-group-exchange hashes additional fields */ #ifdef LIBSSH2_DH_GEX_NEW _libssh2_htonu32(exchange_state->h_sig_comp, LIBSSH2_DH_GEX_MINGROUP); _libssh2_htonu32(exchange_state->h_sig_comp + 4, LIBSSH2_DH_GEX_OPTGROUP); _libssh2_htonu32(exchange_state->h_sig_comp + 8, LIBSSH2_DH_GEX_MAXGROUP); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 12); #else _libssh2_htonu32(exchange_state->h_sig_comp, LIBSSH2_DH_GEX_OPTGROUP); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 4); #endif } if (midhash) { libssh2_sha1_update(exchange_hash_ctx, midhash, midhash_len); } libssh2_sha1_update(exchange_hash_ctx, exchange_state->e_packet + 1, exchange_state->e_packet_len - 1); _libssh2_htonu32(exchange_state->h_sig_comp, exchange_state->f_value_len); libssh2_sha1_update(exchange_hash_ctx, exchange_state->h_sig_comp, 4); libssh2_sha1_update(exchange_hash_ctx, exchange_state->f_value, exchange_state->f_value_len); libssh2_sha1_update(exchange_hash_ctx, exchange_state->k_value, exchange_state->k_value_len); libssh2_sha1_final(exchange_hash_ctx, exchange_state->h_sig_comp); if (session->hostkey-> sig_verify(session, exchange_state->h_sig, exchange_state->h_sig_len, exchange_state->h_sig_comp, 20, &session->server_hostkey_abstract)) { ret = _libssh2_error(session, LIBSSH2_ERROR_HOSTKEY_SIGN, "Unable to verify hostkey signature"); goto clean_exit; } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Sending NEWKEYS message"); exchange_state->c = SSH_MSG_NEWKEYS; exchange_state->state = libssh2_NB_state_sent2; } if (exchange_state->state == libssh2_NB_state_sent2) { rc = _libssh2_transport_send(session, &exchange_state->c, 1, NULL, 0); if (rc == LIBSSH2_ERROR_EAGAIN) { return rc; } else if (rc) { ret = _libssh2_error(session, rc, "Unable to send NEWKEYS message"); goto clean_exit; } exchange_state->state = libssh2_NB_state_sent3; } if (exchange_state->state == libssh2_NB_state_sent3) { rc = _libssh2_packet_require(session, SSH_MSG_NEWKEYS, &exchange_state->tmp, &exchange_state->tmp_len, 0, NULL, 0, &exchange_state->req_state); if (rc == LIBSSH2_ERROR_EAGAIN) { return rc; } else if (rc) { ret = _libssh2_error(session, rc, "Timed out waiting for NEWKEYS"); goto clean_exit; } /* The first key exchange has been performed, switch to active crypt/comp/mac mode */ session->state |= LIBSSH2_STATE_NEWKEYS; _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Received NEWKEYS message"); /* This will actually end up being just packet_type(1) for this packet type anyway */ LIBSSH2_FREE(session, exchange_state->tmp); if (!session->session_id) { session->session_id = LIBSSH2_ALLOC(session, SHA_DIGEST_LENGTH); if (!session->session_id) { ret = _libssh2_error(session, LIBSSH2_ERROR_ALLOC, "Unable to allocate buffer for SHA digest"); goto clean_exit; } memcpy(session->session_id, exchange_state->h_sig_comp, SHA_DIGEST_LENGTH); session->session_id_len = SHA_DIGEST_LENGTH; _libssh2_debug(session, LIBSSH2_TRACE_KEX, "session_id calculated"); } /* Cleanup any existing cipher */ if (session->local.crypt->dtor) { session->local.crypt->dtor(session, &session->local.crypt_abstract); } /* Calculate IV/Secret/Key for each direction */ if (session->local.crypt->init) { unsigned char *iv = NULL, *secret = NULL; int free_iv = 0, free_secret = 0; LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(iv, session->local.crypt-> iv_len, "A"); if (!iv) { ret = -1; goto clean_exit; } LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(secret, session->local.crypt-> secret_len, "C"); if (!secret) { LIBSSH2_FREE(session, iv); ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } if (session->local.crypt-> init(session, session->local.crypt, iv, &free_iv, secret, &free_secret, 1, &session->local.crypt_abstract)) { LIBSSH2_FREE(session, iv); LIBSSH2_FREE(session, secret); ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } if (free_iv) { memset(iv, 0, session->local.crypt->iv_len); LIBSSH2_FREE(session, iv); } if (free_secret) { memset(secret, 0, session->local.crypt->secret_len); LIBSSH2_FREE(session, secret); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Client to Server IV and Key calculated"); if (session->remote.crypt->dtor) { /* Cleanup any existing cipher */ session->remote.crypt->dtor(session, &session->remote.crypt_abstract); } if (session->remote.crypt->init) { unsigned char *iv = NULL, *secret = NULL; int free_iv = 0, free_secret = 0; LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(iv, session->remote.crypt-> iv_len, "B"); if (!iv) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(secret, session->remote.crypt-> secret_len, "D"); if (!secret) { LIBSSH2_FREE(session, iv); ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } if (session->remote.crypt-> init(session, session->remote.crypt, iv, &free_iv, secret, &free_secret, 0, &session->remote.crypt_abstract)) { LIBSSH2_FREE(session, iv); LIBSSH2_FREE(session, secret); ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } if (free_iv) { memset(iv, 0, session->remote.crypt->iv_len); LIBSSH2_FREE(session, iv); } if (free_secret) { memset(secret, 0, session->remote.crypt->secret_len); LIBSSH2_FREE(session, secret); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Server to Client IV and Key calculated"); if (session->local.mac->dtor) { session->local.mac->dtor(session, &session->local.mac_abstract); } if (session->local.mac->init) { unsigned char *key = NULL; int free_key = 0; LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(key, session->local.mac-> key_len, "E"); if (!key) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } session->local.mac->init(session, key, &free_key, &session->local.mac_abstract); if (free_key) { memset(key, 0, session->local.mac->key_len); LIBSSH2_FREE(session, key); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Client to Server HMAC Key calculated"); if (session->remote.mac->dtor) { session->remote.mac->dtor(session, &session->remote.mac_abstract); } if (session->remote.mac->init) { unsigned char *key = NULL; int free_key = 0; LIBSSH2_KEX_METHOD_DIFFIE_HELLMAN_SHA1_HASH(key, session->remote.mac-> key_len, "F"); if (!key) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } session->remote.mac->init(session, key, &free_key, &session->remote.mac_abstract); if (free_key) { memset(key, 0, session->remote.mac->key_len); LIBSSH2_FREE(session, key); } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Server to Client HMAC Key calculated"); /* Initialize compression for each direction */ /* Cleanup any existing compression */ if (session->local.comp && session->local.comp->dtor) { session->local.comp->dtor(session, 1, &session->local.comp_abstract); } if (session->local.comp && session->local.comp->init) { if (session->local.comp->init(session, 1, &session->local.comp_abstract)) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Client to Server compression initialized"); if (session->remote.comp && session->remote.comp->dtor) { session->remote.comp->dtor(session, 0, &session->remote.comp_abstract); } if (session->remote.comp && session->remote.comp->init) { if (session->remote.comp->init(session, 0, &session->remote.comp_abstract)) { ret = LIBSSH2_ERROR_KEX_FAILURE; goto clean_exit; } } _libssh2_debug(session, LIBSSH2_TRACE_KEX, "Server to Client compression initialized"); } clean_exit: _libssh2_bn_free(exchange_state->x); exchange_state->x = NULL; _libssh2_bn_free(exchange_state->e); exchange_state->e = NULL; _libssh2_bn_free(exchange_state->f); exchange_state->f = NULL; _libssh2_bn_free(exchange_state->k); exchange_state->k = NULL; _libssh2_bn_ctx_free(exchange_state->ctx); exchange_state->ctx = NULL; if (exchange_state->e_packet) { LIBSSH2_FREE(session, exchange_state->e_packet); exchange_state->e_packet = NULL; } if (exchange_state->s_packet) { LIBSSH2_FREE(session, exchange_state->s_packet); exchange_state->s_packet = NULL; } if (exchange_state->k_value) { LIBSSH2_FREE(session, exchange_state->k_value); exchange_state->k_value = NULL; } exchange_state->state = libssh2_NB_state_idle; return ret; }
0
[ "CWE-200" ]
libssh2
ca5222ea819cc5ed797860070b4c6c1aeeb28420
309,445,912,498,133,860,000,000,000,000,000,000,000
613
diffie_hellman_sha256: convert bytes to bits As otherwise we get far too small numbers. Reported-by: Andreas Schneider CVE-2016-0787
xmlDOMWrapNSNormDeclareNsForced(xmlDocPtr doc, xmlNodePtr elem, const xmlChar *nsName, const xmlChar *prefix, int checkShadow) { xmlNsPtr ret; char buf[50]; const xmlChar *pref; int counter = 0; if ((doc == NULL) || (elem == NULL) || (elem->type != XML_ELEMENT_NODE)) return(NULL); /* * Create a ns-decl on @anchor. */ pref = prefix; while (1) { /* * Lookup whether the prefix is unused in elem's ns-decls. */ if ((elem->nsDef != NULL) && (xmlTreeNSListLookupByPrefix(elem->nsDef, pref) != NULL)) goto ns_next_prefix; if (checkShadow && elem->parent && ((xmlNodePtr) elem->parent->doc != elem->parent)) { /* * Does it shadow ancestor ns-decls? */ if (xmlSearchNsByPrefixStrict(doc, elem->parent, pref, NULL) == 1) goto ns_next_prefix; } ret = xmlNewNs(NULL, nsName, pref); if (ret == NULL) return (NULL); if (elem->nsDef == NULL) elem->nsDef = ret; else { xmlNsPtr ns2 = elem->nsDef; while (ns2->next != NULL) ns2 = ns2->next; ns2->next = ret; } return (ret); ns_next_prefix: counter++; if (counter > 1000) return (NULL); if (prefix == NULL) { snprintf((char *) buf, sizeof(buf), "ns_%d", counter); } else snprintf((char *) buf, sizeof(buf), "%.30s_%d", (char *)prefix, counter); pref = BAD_CAST buf; } }
0
[ "CWE-20" ]
libxml2
bdd66182ef53fe1f7209ab6535fda56366bd7ac9
295,693,437,405,918,640,000,000,000,000,000,000,000
58
Avoid building recursive entities For https://bugzilla.gnome.org/show_bug.cgi?id=762100 When we detect a recusive entity we should really not build the associated data, moreover if someone bypass libxml2 fatal errors and still tries to serialize a broken entity make sure we don't risk to get ito a recursion * parser.c: xmlParserEntityCheck() don't build if entity loop were found and remove the associated text content * tree.c: xmlStringGetNodeList() avoid a potential recursion
dtls1_process_record(SSL *s) { int al; int clear=0; int enc_err; SSL_SESSION *sess; SSL3_RECORD *rr; unsigned int mac_size; unsigned char md[EVP_MAX_MD_SIZE]; int decryption_failed_or_bad_record_mac = 0; unsigned char *mac = NULL; rr= &(s->s3->rrec); sess = s->session; /* At this point, s->packet_length == SSL3_RT_HEADER_LNGTH + rr->length, * and we have that many bytes in s->packet */ rr->input= &(s->packet[DTLS1_RT_HEADER_LENGTH]); /* ok, we can now read from 's->packet' data into 'rr' * rr->input points at rr->length bytes, which * need to be copied into rr->data by either * the decryption or by the decompression * When the data is 'copied' into the rr->data buffer, * rr->input will be pointed at the new buffer */ /* We now have - encrypted [ MAC [ compressed [ plain ] ] ] * rr->length bytes of encrypted compressed stuff. */ /* check is not needed I believe */ if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH) { al=SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_ENCRYPTED_LENGTH_TOO_LONG); goto f_err; } /* decrypt in place in 'rr->input' */ rr->data=rr->input; enc_err = s->method->ssl3_enc->enc(s,0); if (enc_err <= 0) { /* To minimize information leaked via timing, we will always * perform all computations before discarding the message. */ decryption_failed_or_bad_record_mac = 1; } #ifdef TLS_DEBUG printf("dec %d\n",rr->length); { unsigned int z; for (z=0; z<rr->length; z++) printf("%02X%c",rr->data[z],((z+1)%16)?' ':'\n'); } printf("\n"); #endif /* r->length is now the compressed data plus mac */ if ( (sess == NULL) || (s->enc_read_ctx == NULL) || (s->read_hash == NULL)) clear=1; if (!clear) { mac_size=EVP_MD_size(s->read_hash); if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH+mac_size) { #if 0 /* OK only for stream ciphers (then rr->length is visible from ciphertext anyway) */ al=SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_PRE_MAC_LENGTH_TOO_LONG); goto f_err; #else decryption_failed_or_bad_record_mac = 1; #endif } /* check the MAC for rr->input (it's in mac_size bytes at the tail) */ if (rr->length >= mac_size) { rr->length -= mac_size; mac = &rr->data[rr->length]; } else rr->length = 0; i=s->method->ssl3_enc->mac(s,md,0); if (i < 0 || mac == NULL || CRYPTO_memcmp(md,mac,mac_size) != 0) { decryption_failed_or_bad_record_mac = 1; } } if (decryption_failed_or_bad_record_mac) { /* decryption failed, silently discard message */ rr->length = 0; s->packet_length = 0; goto err; } /* r->length is now just compressed */ if (s->expand != NULL) { if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH) { al=SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_COMPRESSED_LENGTH_TOO_LONG); goto f_err; } if (!ssl3_do_uncompress(s)) { al=SSL_AD_DECOMPRESSION_FAILURE; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_BAD_DECOMPRESSION); goto f_err; } } if (rr->length > SSL3_RT_MAX_PLAIN_LENGTH) { al=SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD,SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } rr->off=0; /* So at this point the following is true * ssl->s3->rrec.type is the type of record * ssl->s3->rrec.length == number of bytes in record * ssl->s3->rrec.off == offset to first valid byte * ssl->s3->rrec.data == where to take bytes from, increment * after use :-). */ /* we have pulled in a full packet so zero things */ s->packet_length=0; dtls1_record_bitmap_update(s, &(s->d1->bitmap));/* Mark receipt of record. */ return(1); f_err: ssl3_send_alert(s,SSL3_AL_FATAL,al); err: return(0); }
1
[ "CWE-310" ]
openssl
2928cb4c82d6516d9e65ede4901a5957d8c39c32
26,858,544,350,272,930,000,000,000,000,000,000,000
143
Fixups.
void sctp_ootb_pkt_free(struct sctp_packet *packet) { sctp_transport_free(packet->transport); }
0
[ "CWE-20" ]
linux-2.6
ba0166708ef4da7eeb61dd92bbba4d5a749d6561
46,040,856,499,317,340,000,000,000,000,000,000,000
4
sctp: Fix kernel panic while process protocol violation parameter Since call to function sctp_sf_abort_violation() need paramter 'arg' with 'struct sctp_chunk' type, it will read the chunk type and chunk length from the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen() always with 'struct sctp_paramhdr' type's parameter, it will be passed to sctp_sf_abort_violation(). This may cause kernel panic. sctp_sf_violation_paramlen() |-- sctp_sf_abort_violation() |-- sctp_make_abort_violation() This patch fixed this problem. This patch also fix two place which called sctp_sf_violation_paramlen() with wrong paramter type. Signed-off-by: Wei Yongjun <[email protected]> Signed-off-by: Vlad Yasevich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
GF_FilterPid *gf_filter_get_opid(GF_Filter *filter, u32 idx) { return gf_list_get(filter->output_pids, idx); }
0
[ "CWE-787" ]
gpac
da37ec8582266983d0ec4b7550ec907401ec441e
4,863,285,797,849,462,000,000,000,000,000,000,000
4
fixed crashes for very long path - cf #1908
static ssize_t oom_adjust_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; int oom_adjust; unsigned long flags; int err; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) { err = -EFAULT; goto out; } err = kstrtoint(strstrip(buffer), 0, &oom_adjust); if (err) goto out; if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && oom_adjust != OOM_DISABLE) { err = -EINVAL; goto out; } task = get_proc_task(file->f_path.dentry->d_inode); if (!task) { err = -ESRCH; goto out; } task_lock(task); if (!task->mm) { err = -EINVAL; goto err_task_lock; } if (!lock_task_sighand(task, &flags)) { err = -ESRCH; goto err_task_lock; } if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) { err = -EACCES; goto err_sighand; } /* * Warn that /proc/pid/oom_adj is deprecated, see * Documentation/feature-removal-schedule.txt. */ printk_once(KERN_WARNING "%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n", current->comm, task_pid_nr(current), task_pid_nr(task), task_pid_nr(task)); task->signal->oom_adj = oom_adjust; /* * Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum * value is always attainable. */ if (task->signal->oom_adj == OOM_ADJUST_MAX) task->signal->oom_score_adj = OOM_SCORE_ADJ_MAX; else task->signal->oom_score_adj = (oom_adjust * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE; trace_oom_score_adj_update(task); err_sighand: unlock_task_sighand(task, &flags); err_task_lock: task_unlock(task); put_task_struct(task); out: return err < 0 ? err : count; }
0
[]
linux
0499680a42141d86417a8fbaa8c8db806bea1201
314,916,748,429,160,900,000,000,000,000,000,000,000
74
procfs: add hidepid= and gid= mount options Add support for mount options to restrict access to /proc/PID/ directories. The default backward-compatible "relaxed" behaviour is left untouched. The first mount option is called "hidepid" and its value defines how much info about processes we want to be available for non-owners: hidepid=0 (default) means the old behavior - anybody may read all world-readable /proc/PID/* files. hidepid=1 means users may not access any /proc/<pid>/ directories, but their own. Sensitive files like cmdline, sched*, status are now protected against other users. As permission checking done in proc_pid_permission() and files' permissions are left untouched, programs expecting specific files' modes are not confused. hidepid=2 means hidepid=1 plus all /proc/PID/ will be invisible to other users. It doesn't mean that it hides whether a process exists (it can be learned by other means, e.g. by kill -0 $PID), but it hides process' euid and egid. It compicates intruder's task of gathering info about running processes, whether some daemon runs with elevated privileges, whether another user runs some sensitive program, whether other users run any program at all, etc. gid=XXX defines a group that will be able to gather all processes' info (as in hidepid=0 mode). This group should be used instead of putting nonroot user in sudoers file or something. However, untrusted users (like daemons, etc.) which are not supposed to monitor the tasks in the whole system should not be added to the group. hidepid=1 or higher is designed to restrict access to procfs files, which might reveal some sensitive private information like precise keystrokes timings: http://www.openwall.com/lists/oss-security/2011/11/05/3 hidepid=1/2 doesn't break monitoring userspace tools. ps, top, pgrep, and conky gracefully handle EPERM/ENOENT and behave as if the current user is the only user running processes. pstree shows the process subtree which contains "pstree" process. Note: the patch doesn't deal with setuid/setgid issues of keeping preopened descriptors of procfs files (like https://lkml.org/lkml/2011/2/7/368). We rely on that the leaked information like the scheduling counters of setuid apps doesn't threaten anybody's privacy - only the user started the setuid program may read the counters. Signed-off-by: Vasiliy Kulikov <[email protected]> Cc: Alexey Dobriyan <[email protected]> Cc: Al Viro <[email protected]> Cc: Randy Dunlap <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Greg KH <[email protected]> Cc: Theodore Tso <[email protected]> Cc: Alan Cox <[email protected]> Cc: James Morris <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Hugh Dickins <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
smtp_tx_rcpt_to(struct smtp_tx *tx, const char *line) { char *opt, *p; char *copy; char tmp[SMTP_LINE_MAX]; (void)strlcpy(tmp, line, sizeof tmp); copy = tmp; if (tx->rcptcount >= env->sc_session_max_rcpt) { smtp_reply(tx->session, "451 %s %s: Too many recipients", esc_code(ESC_STATUS_TEMPFAIL, ESC_TOO_MANY_RECIPIENTS), esc_description(ESC_TOO_MANY_RECIPIENTS)); return; } if (smtp_mailaddr(&tx->evp.rcpt, copy, 0, &copy, tx->session->smtpname) == 0) { smtp_reply(tx->session, "501 %s Recipient address syntax error", esc_code(ESC_STATUS_PERMFAIL, ESC_BAD_DESTINATION_MAILBOX_ADDRESS_SYNTAX)); return; } while ((opt = strsep(&copy, " "))) { if (*opt == '\0') continue; if (ADVERTISE_EXT_DSN(tx->session) && strncasecmp(opt, "NOTIFY=", 7) == 0) { opt += 7; while ((p = strsep(&opt, ","))) { if (strcasecmp(p, "SUCCESS") == 0) tx->evp.dsn_notify |= DSN_SUCCESS; else if (strcasecmp(p, "FAILURE") == 0) tx->evp.dsn_notify |= DSN_FAILURE; else if (strcasecmp(p, "DELAY") == 0) tx->evp.dsn_notify |= DSN_DELAY; else if (strcasecmp(p, "NEVER") == 0) tx->evp.dsn_notify |= DSN_NEVER; } if (tx->evp.dsn_notify & DSN_NEVER && tx->evp.dsn_notify & (DSN_SUCCESS | DSN_FAILURE | DSN_DELAY)) { smtp_reply(tx->session, "553 NOTIFY option NEVER cannot be" " combined with other options"); return; } } else if (ADVERTISE_EXT_DSN(tx->session) && strncasecmp(opt, "ORCPT=", 6) == 0) { opt += 6; if (!text_to_mailaddr(&tx->evp.dsn_orcpt, opt)) { smtp_reply(tx->session, "553 ORCPT address syntax error"); return; } } else { smtp_reply(tx->session, "503 Unsupported option %s", opt); return; } } m_create(p_lka, IMSG_SMTP_EXPAND_RCPT, 0, 0, -1); m_add_id(p_lka, tx->session->id); m_add_envelope(p_lka, &tx->evp); m_close(p_lka); tree_xset(&wait_lka_rcpt, tx->session->id, tx->session); }
0
[ "CWE-78", "CWE-252" ]
src
9dcfda045474d8903224d175907bfc29761dcb45
199,461,024,994,857,200,000,000,000,000,000,000,000
69
Fix a security vulnerability discovered by Qualys which can lead to a privileges escalation on mbox deliveries and unprivileged code execution on lmtp deliveries, due to a logic issue causing a sanity check to be missed. ok eric@, millert@
delete_all_jobs (running_only) int running_only; { register int i; sigset_t set, oset; BLOCK_CHILD (set, oset); /* XXX - need to set j_lastj, j_firstj appropriately if running_only != 0. */ if (js.j_jobslots) { js.j_current = js.j_previous = NO_JOB; /* XXX could use js.j_firstj here */ for (i = 0; i < js.j_jobslots; i++) { #if defined (DEBUG) if (i < js.j_firstj && jobs[i]) itrace("delete_all_jobs: job %d non-null before js.j_firstj (%d)", i, js.j_firstj); if (i > js.j_lastj && jobs[i]) itrace("delete_all_jobs: job %d non-null after js.j_lastj (%d)", i, js.j_lastj); #endif if (jobs[i] && (running_only == 0 || (running_only && RUNNING(i)))) /* We don't want to add any of these pids to bgpids. If running_only is non-zero, we don't want to add running jobs to the list. If we are interested in all jobs, not just running jobs, and we are going to clear the bgpids list below (bgp_clear()), we don't need to bother. */ delete_job (i, DEL_WARNSTOPPED|DEL_NOBGPID); } if (running_only == 0) { free ((char *)jobs); js.j_jobslots = 0; js.j_firstj = js.j_lastj = js.j_njobs = 0; } } if (running_only == 0) bgp_clear (); UNBLOCK_CHILD (oset); }
0
[]
bash
955543877583837c85470f7fb8a97b7aa8d45e6c
60,778,616,869,904,040,000,000,000,000,000,000,000
43
bash-4.4-rc2 release
static int _ffs_name_dev(struct ffs_dev *dev, const char *name) { struct ffs_dev *existing; existing = _ffs_do_find_dev(name); if (existing) return -EBUSY; dev->name = name; return 0; }
0
[ "CWE-416", "CWE-362" ]
linux
38740a5b87d53ceb89eb2c970150f6e94e00373a
63,401,033,153,227,150,000,000,000,000,000,000,000
12
usb: gadget: f_fs: Fix use-after-free When using asynchronous read or write operations on the USB endpoints the issuer of the IO request is notified by calling the ki_complete() callback of the submitted kiocb when the URB has been completed. Calling this ki_complete() callback will free kiocb. Make sure that the structure is no longer accessed beyond that point, otherwise undefined behaviour might occur. Fixes: 2e4c7553cd6f ("usb: gadget: f_fs: add aio support") Cc: <[email protected]> # v3.15+ Signed-off-by: Lars-Peter Clausen <[email protected]> Signed-off-by: Felipe Balbi <[email protected]>
ldns_rr_list2buffer_str_fmt(ldns_buffer *output, const ldns_output_format *fmt, const ldns_rr_list *list) { uint16_t i; for(i = 0; i < ldns_rr_list_rr_count(list); i++) { (void) ldns_rr2buffer_str_fmt(output, fmt, ldns_rr_list_rr(list, i)); } return ldns_buffer_status(output); }
0
[ "CWE-415" ]
ldns
070b4595981f48a21cc6b4f5047fdc2d09d3da91
107,111,136,846,202,300,000,000,000,000,000,000,000
11
CAA and URI
static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len) { struct dev_data *dev = ep->driver_data; if (dev->setup_out_ready) { DBG (dev, "ep0 request busy!\n"); return -EBUSY; } if (len > sizeof (dev->rbuf)) req->buf = kmalloc(len, GFP_ATOMIC); if (req->buf == NULL) { req->buf = dev->rbuf; return -ENOMEM; } req->complete = ep0_complete; req->length = len; req->zero = 0; return 0; }
0
[ "CWE-763" ]
linux
501e38a5531efbd77d5c73c0ba838a889bfc1d74
168,074,889,734,849,990,000,000,000,000,000,000,000
19
usb: gadget: clear related members when goto fail dev->config and dev->hs_config and dev->dev need to be cleaned if dev_config fails to avoid UAF. Acked-by: Alan Stern <[email protected]> Signed-off-by: Hangyu Hua <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *sr = &req->sr_msg; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL; if (sr->msg_flags & MSG_DONTWAIT) req->flags |= REQ_F_NOWAIT; #ifdef CONFIG_COMPAT if (req->ctx->compat) sr->msg_flags |= MSG_CMSG_COMPAT; #endif return 0; }
0
[ "CWE-787" ]
linux
d1f82808877bb10d3deee7cf3374a4eb3fb582db
306,634,143,777,475,120,000,000,000,000,000,000,000
19
io_uring: truncate lengths larger than MAX_RW_COUNT on provide buffers Read and write operations are capped to MAX_RW_COUNT. Some read ops rely on that limit, and that is not guaranteed by the IORING_OP_PROVIDE_BUFFERS. Truncate those lengths when doing io_add_buffers, so buffer addresses still use the uncapped length. Also, take the chance and change struct io_buffer len member to __u32, so it matches struct io_provide_buffer len member. This fixes CVE-2021-3491, also reported as ZDI-CAN-13546. Fixes: ddf0322db79c ("io_uring: add IORING_OP_PROVIDE_BUFFERS") Reported-by: Billy Jheng Bing-Jhong (@st424204) Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static void ext4_da_update_reserve_space(struct inode *inode, int used) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); int total, mdb, mdb_free; spin_lock(&EXT4_I(inode)->i_block_reservation_lock); /* recalculate the number of metablocks still need to be reserved */ total = EXT4_I(inode)->i_reserved_data_blocks - used; mdb = ext4_calc_metadata_amount(inode, total); /* figure out how many metablocks to release */ BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks); mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb; if (mdb_free) { /* Account for allocated meta_blocks */ mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; /* update fs dirty blocks counter */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); EXT4_I(inode)->i_allocated_meta_blocks = 0; EXT4_I(inode)->i_reserved_meta_blocks = mdb; } /* update per-inode reservations */ BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); EXT4_I(inode)->i_reserved_data_blocks -= used; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); }
0
[ "CWE-399" ]
linux-2.6
06a279d636734da32bb62dd2f7b0ade666f65d7c
303,019,751,156,189,950,000,000,000,000,000,000,000
30
ext4: only use i_size_high for regular files Directories are not allowed to be bigger than 2GB, so don't use i_size_high for anything other than regular files. E2fsck should complain about these inodes, but the simplest thing to do for the kernel is to only use i_size_high for regular files. This prevents an intentially corrupted filesystem from causing the kernel to burn a huge amount of CPU and issuing error messages such as: EXT4-fs warning (device loop0): ext4_block_to_path: block 135090028 > max Thanks to David Maciejak from Fortinet's FortiGuard Global Security Research Team for reporting this issue. http://bugzilla.kernel.org/show_bug.cgi?id=12375 Signed-off-by: "Theodore Ts'o" <[email protected]> Cc: [email protected]
sf_error_number (int errnum) { static const char *bad_errnum = "No error defined for this error number. This is a bug in libsndfile." ; int k ; if (errnum == SFE_MAX_ERROR) return SndfileErrors [0].str ; if (errnum < 0 || errnum > SFE_MAX_ERROR) { /* This really shouldn't happen in release versions. */ printf ("Not a valid error number (%d).\n", errnum) ; return bad_errnum ; } ; for (k = 0 ; SndfileErrors [k].str ; k++) if (errnum == SndfileErrors [k].error) return SndfileErrors [k].str ; return bad_errnum ; } /* sf_error_number */
0
[ "CWE-119", "CWE-787" ]
libsndfile
708e996c87c5fae77b104ccfeb8f6db784c32074
54,607,940,370,796,050,000,000,000,000,000,000,000
20
src/ : Move to a variable length header buffer Previously, the `psf->header` buffer was a fixed length specified by `SF_HEADER_LEN` which was set to `12292`. This was problematic for two reasons; this value was un-necessarily large for the majority of files and too small for some others. Now the size of the header buffer starts at 256 bytes and grows as necessary up to a maximum of 100k.
static void find_compressor(char * compressor_name, int len, MOVTrack *track) { AVDictionaryEntry *encoder; int xdcam_res = (track->par->width == 1280 && track->par->height == 720) || (track->par->width == 1440 && track->par->height == 1080) || (track->par->width == 1920 && track->par->height == 1080); if (track->mode == MODE_MOV && (encoder = av_dict_get(track->st->metadata, "encoder", NULL, 0))) { av_strlcpy(compressor_name, encoder->value, 32); } else if (track->par->codec_id == AV_CODEC_ID_MPEG2VIDEO && xdcam_res) { int interlaced = track->par->field_order > AV_FIELD_PROGRESSIVE; AVStream *st = track->st; int rate = defined_frame_rate(NULL, st); av_strlcatf(compressor_name, len, "XDCAM"); if (track->par->format == AV_PIX_FMT_YUV422P) { av_strlcatf(compressor_name, len, " HD422"); } else if(track->par->width == 1440) { av_strlcatf(compressor_name, len, " HD"); } else av_strlcatf(compressor_name, len, " EX"); av_strlcatf(compressor_name, len, " %d%c", track->par->height, interlaced ? 'i' : 'p'); av_strlcatf(compressor_name, len, "%d", rate * (interlaced + 1)); } }
0
[ "CWE-125" ]
FFmpeg
95556e27e2c1d56d9e18f5db34d6f756f3011148
29,327,538,710,397,003,000,000,000,000,000,000,000
27
avformat/movenc: Do not pass AVCodecParameters in avpriv_request_sample Fixes: out of array read Fixes: ffmpeg_crash_8.avi Found-by: Thuan Pham, Marcel Böhme, Andrew Santosa and Alexandru Razvan Caciulescu with AFLSmart Signed-off-by: Michael Niedermayer <[email protected]>
parse_char_class(Node** np, OnigToken* tok, UChar** src, UChar* end, ScanEnv* env) { int r, neg, len, fetched, and_start; OnigCodePoint v, vs; UChar *p; Node* node; CClassNode *cc, *prev_cc; CClassNode work_cc; enum CCSTATE state; enum CCVALTYPE val_type, in_type; int val_israw, in_israw; prev_cc = (CClassNode* )NULL; *np = NULL_NODE; r = fetch_token_in_cc(tok, src, end, env); if (r == TK_CHAR && tok->u.c == '^' && tok->escaped == 0) { neg = 1; r = fetch_token_in_cc(tok, src, end, env); } else { neg = 0; } if (r < 0) return r; if (r == TK_CC_CLOSE) { if (! code_exist_check((OnigCodePoint )']', *src, env->pattern_end, 1, env)) return ONIGERR_EMPTY_CHAR_CLASS; CC_ESC_WARN(env, (UChar* )"]"); r = tok->type = TK_CHAR; /* allow []...] */ } *np = node = node_new_cclass(); CHECK_NULL_RETURN_MEMERR(node); cc = NCCLASS(node); and_start = 0; state = CCS_START; p = *src; while (r != TK_CC_CLOSE) { fetched = 0; switch (r) { case TK_CHAR: len = ONIGENC_CODE_TO_MBCLEN(env->enc, tok->u.c); if (len > 1) { in_type = CCV_CODE_POINT; } else if (len < 0) { r = len; goto err; } else { sb_char: in_type = CCV_SB; } v = (OnigCodePoint )tok->u.c; in_israw = 0; goto val_entry2; break; case TK_RAW_BYTE: /* tok->base != 0 : octal or hexadec. */ if (! ONIGENC_IS_SINGLEBYTE(env->enc) && tok->base != 0) { UChar buf[ONIGENC_CODE_TO_MBC_MAXLEN]; UChar* bufe = buf + ONIGENC_CODE_TO_MBC_MAXLEN; UChar* psave = p; int i, base = tok->base; buf[0] = tok->u.c; for (i = 1; i < ONIGENC_MBC_MAXLEN(env->enc); i++) { r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; if (r != TK_RAW_BYTE || tok->base != base) { fetched = 1; break; } buf[i] = tok->u.c; } if (i < ONIGENC_MBC_MINLEN(env->enc)) { r = ONIGERR_TOO_SHORT_MULTI_BYTE_STRING; goto err; } len = enclen(env->enc, buf); if (i < len) { r = ONIGERR_TOO_SHORT_MULTI_BYTE_STRING; goto err; } else if (i > len) { /* fetch back */ p = psave; for (i = 1; i < len; i++) { r = fetch_token_in_cc(tok, &p, end, env); } fetched = 0; } if (i == 1) { v = (OnigCodePoint )buf[0]; goto raw_single; } else { v = ONIGENC_MBC_TO_CODE(env->enc, buf, bufe); in_type = CCV_CODE_POINT; } } else { v = (OnigCodePoint )tok->u.c; raw_single: in_type = CCV_SB; } in_israw = 1; goto val_entry2; break; case TK_CODE_POINT: v = tok->u.code; in_israw = 1; val_entry: len = ONIGENC_CODE_TO_MBCLEN(env->enc, v); if (len < 0) { r = len; goto err; } in_type = (len == 1 ? CCV_SB : CCV_CODE_POINT); val_entry2: r = next_state_val(cc, &vs, v, &val_israw, in_israw, in_type, &val_type, &state, env); if (r != 0) goto err; break; case TK_POSIX_BRACKET_OPEN: r = parse_posix_bracket(cc, &p, end, env); if (r < 0) goto err; if (r == 1) { /* is not POSIX bracket */ CC_ESC_WARN(env, (UChar* )"["); p = tok->backp; v = (OnigCodePoint )tok->u.c; in_israw = 0; goto val_entry; } goto next_class; break; case TK_CHAR_TYPE: r = add_ctype_to_cc(cc, tok->u.prop.ctype, tok->u.prop.not, env); if (r != 0) return r; next_class: r = next_state_class(cc, &vs, &val_type, &state, env); if (r != 0) goto err; break; case TK_CHAR_PROPERTY: { int ctype; ctype = fetch_char_property_to_ctype(&p, end, env); if (ctype < 0) return ctype; r = add_ctype_to_cc(cc, ctype, tok->u.prop.not, env); if (r != 0) return r; goto next_class; } break; case TK_CC_RANGE: if (state == CCS_VALUE) { r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; fetched = 1; if (r == TK_CC_CLOSE) { /* allow [x-] */ range_end_val: v = (OnigCodePoint )'-'; in_israw = 0; goto val_entry; } else if (r == TK_CC_AND) { CC_ESC_WARN(env, (UChar* )"-"); goto range_end_val; } state = CCS_RANGE; } else if (state == CCS_START) { /* [-xa] is allowed */ v = (OnigCodePoint )tok->u.c; in_israw = 0; r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; fetched = 1; /* [--x] or [a&&-x] is warned. */ if (r == TK_CC_RANGE || and_start != 0) CC_ESC_WARN(env, (UChar* )"-"); goto val_entry; } else if (state == CCS_RANGE) { CC_ESC_WARN(env, (UChar* )"-"); goto sb_char; /* [!--x] is allowed */ } else { /* CCS_COMPLETE */ r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; fetched = 1; if (r == TK_CC_CLOSE) goto range_end_val; /* allow [a-b-] */ else if (r == TK_CC_AND) { CC_ESC_WARN(env, (UChar* )"-"); goto range_end_val; } if (IS_SYNTAX_BV(env->syntax, ONIG_SYN_ALLOW_DOUBLE_RANGE_OP_IN_CC)) { CC_ESC_WARN(env, (UChar* )"-"); goto sb_char; /* [0-9-a] is allowed as [0-9\-a] */ } r = ONIGERR_UNMATCHED_RANGE_SPECIFIER_IN_CHAR_CLASS; goto err; } break; case TK_CC_CC_OPEN: /* [ */ { Node *anode; CClassNode* acc; r = parse_char_class(&anode, tok, &p, end, env); if (r != 0) goto cc_open_err; acc = NCCLASS(anode); r = or_cclass(cc, acc, env->enc); onig_node_free(anode); cc_open_err: if (r != 0) goto err; } break; case TK_CC_AND: /* && */ { if (state == CCS_VALUE) { r = next_state_val(cc, &vs, 0, &val_israw, 0, val_type, &val_type, &state, env); if (r != 0) goto err; } /* initialize local variables */ and_start = 1; state = CCS_START; if (IS_NOT_NULL(prev_cc)) { r = and_cclass(prev_cc, cc, env->enc); if (r != 0) goto err; bbuf_free(cc->mbuf); } else { prev_cc = cc; cc = &work_cc; } initialize_cclass(cc); } break; case TK_EOT: r = ONIGERR_PREMATURE_END_OF_CHAR_CLASS; goto err; break; default: r = ONIGERR_PARSER_BUG; goto err; break; } if (fetched) r = tok->type; else { r = fetch_token_in_cc(tok, &p, end, env); if (r < 0) goto err; } } if (state == CCS_VALUE) { r = next_state_val(cc, &vs, 0, &val_israw, 0, val_type, &val_type, &state, env); if (r != 0) goto err; } if (IS_NOT_NULL(prev_cc)) { r = and_cclass(prev_cc, cc, env->enc); if (r != 0) goto err; bbuf_free(cc->mbuf); cc = prev_cc; } if (neg != 0) NCCLASS_SET_NOT(cc); else NCCLASS_CLEAR_NOT(cc); if (IS_NCCLASS_NOT(cc) && IS_SYNTAX_BV(env->syntax, ONIG_SYN_NOT_NEWLINE_IN_NEGATIVE_CC)) { int is_empty; is_empty = (IS_NULL(cc->mbuf) ? 1 : 0); if (is_empty != 0) BITSET_IS_EMPTY(cc->bs, is_empty); if (is_empty == 0) { #define NEWLINE_CODE 0x0a if (ONIGENC_IS_CODE_NEWLINE(env->enc, NEWLINE_CODE)) { if (ONIGENC_CODE_TO_MBCLEN(env->enc, NEWLINE_CODE) == 1) BITSET_SET_BIT(cc->bs, NEWLINE_CODE); else add_code_range(&(cc->mbuf), env, NEWLINE_CODE, NEWLINE_CODE); } } } *src = p; return 0; err: if (cc != NCCLASS(*np)) bbuf_free(cc->mbuf); onig_node_free(*np); return r; }
0
[ "CWE-125" ]
oniguruma
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
71,030,220,576,810,230,000,000,000,000,000,000,000
325
onig-5.9.2
txid_snapshot_send(PG_FUNCTION_ARGS) { TxidSnapshot *snap = (TxidSnapshot *) PG_GETARG_VARLENA_P(0); StringInfoData buf; uint32 i; pq_begintypsend(&buf); pq_sendint(&buf, snap->nxip, 4); pq_sendint64(&buf, snap->xmin); pq_sendint64(&buf, snap->xmax); for (i = 0; i < snap->nxip; i++) pq_sendint64(&buf, snap->xip[i]); PG_RETURN_BYTEA_P(pq_endtypsend(&buf)); }
0
[ "CWE-703", "CWE-189" ]
postgres
31400a673325147e1205326008e32135a78b4d8a
149,909,729,717,829,080,000,000,000,000,000,000,000
14
Predict integer overflow to avoid buffer overruns. Several functions, mostly type input functions, calculated an allocation size such that the calculation wrapped to a small positive value when arguments implied a sufficiently-large requirement. Writes past the end of the inadvertent small allocation followed shortly thereafter. Coverity identified the path_in() vulnerability; code inspection led to the rest. In passing, add check_stack_depth() to prevent stack overflow in related functions. Back-patch to 8.4 (all supported versions). The non-comment hstore changes touch code that did not exist in 8.4, so that part stops at 9.0. Noah Misch and Heikki Linnakangas, reviewed by Tom Lane. Security: CVE-2014-0064
static void gprinter_cleanup(void) { if (major) { unregister_chrdev_region(MKDEV(major, 0), minors); major = minors = 0; } class_destroy(usb_gadget_class); usb_gadget_class = NULL; }
0
[ "CWE-416" ]
linux
e8d5f92b8d30bb4ade76494490c3c065e12411b1
154,299,547,391,631,790,000,000,000,000,000,000,000
9
usb: gadget: function: printer: fix use-after-free in __lock_acquire Fix this by increase object reference count. BUG: KASAN: use-after-free in __lock_acquire+0x3fd4/0x4180 kernel/locking/lockdep.c:3831 Read of size 8 at addr ffff8880683b0018 by task syz-executor.0/3377 CPU: 1 PID: 3377 Comm: syz-executor.0 Not tainted 5.6.11 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xce/0x128 lib/dump_stack.c:118 print_address_description.constprop.4+0x21/0x3c0 mm/kasan/report.c:374 __kasan_report+0x131/0x1b0 mm/kasan/report.c:506 kasan_report+0x12/0x20 mm/kasan/common.c:641 __asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135 __lock_acquire+0x3fd4/0x4180 kernel/locking/lockdep.c:3831 lock_acquire+0x127/0x350 kernel/locking/lockdep.c:4488 __raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline] _raw_spin_lock_irqsave+0x35/0x50 kernel/locking/spinlock.c:159 printer_ioctl+0x4a/0x110 drivers/usb/gadget/function/f_printer.c:723 vfs_ioctl fs/ioctl.c:47 [inline] ksys_ioctl+0xfb/0x130 fs/ioctl.c:763 __do_sys_ioctl fs/ioctl.c:772 [inline] __se_sys_ioctl fs/ioctl.c:770 [inline] __x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:770 do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294 entry_SYSCALL_64_after_hwframe+0x49/0xbe RIP: 0033:0x4531a9 Code: ed 60 fc ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 bb 60 fc ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007fd14ad72c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 000000000073bfa8 RCX: 00000000004531a9 RDX: fffffffffffffff9 RSI: 000000000000009e RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00000000004bbd61 R13: 00000000004d0a98 R14: 00007fd14ad736d4 R15: 00000000ffffffff Allocated by task 2393: save_stack+0x21/0x90 mm/kasan/common.c:72 set_track mm/kasan/common.c:80 [inline] __kasan_kmalloc.constprop.3+0xa7/0xd0 mm/kasan/common.c:515 kasan_kmalloc+0x9/0x10 mm/kasan/common.c:529 kmem_cache_alloc_trace+0xfa/0x2d0 mm/slub.c:2813 kmalloc include/linux/slab.h:555 [inline] kzalloc include/linux/slab.h:669 [inline] gprinter_alloc+0xa1/0x870 drivers/usb/gadget/function/f_printer.c:1416 usb_get_function+0x58/0xc0 drivers/usb/gadget/functions.c:61 config_usb_cfg_link+0x1ed/0x3e0 drivers/usb/gadget/configfs.c:444 configfs_symlink+0x527/0x11d0 fs/configfs/symlink.c:202 vfs_symlink+0x33d/0x5b0 fs/namei.c:4201 do_symlinkat+0x11b/0x1d0 fs/namei.c:4228 __do_sys_symlinkat fs/namei.c:4242 [inline] __se_sys_symlinkat fs/namei.c:4239 [inline] __x64_sys_symlinkat+0x73/0xb0 fs/namei.c:4239 do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294 entry_SYSCALL_64_after_hwframe+0x49/0xbe Freed by task 3368: save_stack+0x21/0x90 mm/kasan/common.c:72 set_track mm/kasan/common.c:80 [inline] kasan_set_free_info mm/kasan/common.c:337 [inline] __kasan_slab_free+0x135/0x190 mm/kasan/common.c:476 kasan_slab_free+0xe/0x10 mm/kasan/common.c:485 slab_free_hook mm/slub.c:1444 [inline] slab_free_freelist_hook mm/slub.c:1477 [inline] slab_free mm/slub.c:3034 [inline] kfree+0xf7/0x410 mm/slub.c:3995 gprinter_free+0x49/0xd0 drivers/usb/gadget/function/f_printer.c:1353 usb_put_function+0x38/0x50 drivers/usb/gadget/functions.c:87 config_usb_cfg_unlink+0x2db/0x3b0 drivers/usb/gadget/configfs.c:485 configfs_unlink+0x3b9/0x7f0 fs/configfs/symlink.c:250 vfs_unlink+0x287/0x570 fs/namei.c:4073 do_unlinkat+0x4f9/0x620 fs/namei.c:4137 __do_sys_unlink fs/namei.c:4184 [inline] __se_sys_unlink fs/namei.c:4182 [inline] __x64_sys_unlink+0x42/0x50 fs/namei.c:4182 do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294 entry_SYSCALL_64_after_hwframe+0x49/0xbe The buggy address belongs to the object at ffff8880683b0000 which belongs to the cache kmalloc-1k of size 1024 The buggy address is located 24 bytes inside of 1024-byte region [ffff8880683b0000, ffff8880683b0400) The buggy address belongs to the page: page:ffffea0001a0ec00 refcount:1 mapcount:0 mapping:ffff88806c00e300 index:0xffff8880683b1800 compound_mapcount: 0 flags: 0x100000000010200(slab|head) raw: 0100000000010200 0000000000000000 0000000600000001 ffff88806c00e300 raw: ffff8880683b1800 000000008010000a 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Reported-by: Kyungtae Kim <[email protected]> Signed-off-by: Zqiang <[email protected]> Signed-off-by: Felipe Balbi <[email protected]>
print_emulators(const gs_main_instance *minst) { outprintf(minst->heap, "%s", help_emulators); { const byte *s; for (s = gs_emulators; s[0] != 0; s += strlen((const char *)s) + 1) outprintf(minst->heap, " %s", s); } outprintf(minst->heap, "\n"); }
0
[]
ghostpdl
407cc61e87b0fd9d44d72ca740af7d3c85dee78d
297,002,064,081,782,130,000,000,000,000,000,000,000
11
"starting_arg_file" should only apply once. The "starting_arg_file == true" setting should apply to the *first* call to lib_file_open() in the context of a given call to runarg(). Previously, it remained set for the entire duration of the runarg() call, resulting in the current directory being searched for any resource files required by the job. We also want "starting_arg_file == false" when runarg() is called to execute Postscript from a buffer, rather than a file argument. There is a very small chance this may cause problems with some strange scripts or utilities, but I have been unable to prompt such an issue. If one does arise, we may have rethink this entirely. No cluster differences.
static void xdr_buf_try_expand(struct xdr_buf *buf, unsigned int len) { struct kvec *head = buf->head; struct kvec *tail = buf->tail; unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; unsigned int free_space, newlen; if (sum > buf->len) { free_space = min_t(unsigned int, sum - buf->len, len); newlen = xdr_buf_pages_fill_sparse(buf, buf->len + free_space, GFP_KERNEL); free_space = newlen - buf->len; buf->len = newlen; len -= free_space; if (!len) return; } if (buf->buflen > sum) { /* Expand the tail buffer */ free_space = min_t(unsigned int, buf->buflen - sum, len); tail->iov_len += free_space; buf->len += free_space; } }
0
[ "CWE-119", "CWE-787" ]
linux
6d1c0f3d28f98ea2736128ed3e46821496dc3a8c
24,509,649,115,672,070,000,000,000,000,000,000,000
25
sunrpc: Avoid a KASAN slab-out-of-bounds bug in xdr_set_page_base() This seems to happen fairly easily during READ_PLUS testing on NFS v4.2. I found that we could end up accessing xdr->buf->pages[pgnr] with a pgnr greater than the number of pages in the array. So let's just return early if we're setting base to a point at the end of the page data and let xdr_set_tail_base() handle setting up the buffer pointers instead. Signed-off-by: Anna Schumaker <[email protected]> Fixes: 8d86e373b0ef ("SUNRPC: Clean up helpers xdr_set_iov() and xdr_set_page_base()") Signed-off-by: Trond Myklebust <[email protected]>
void mobi_free_opf_manifest(OPFmanifest *manifest) { if (manifest) { mobi_free_opf_struct_3el(manifest->item, id, href, media_type); free(manifest); } }
0
[ "CWE-476" ]
libmobi
c0699c8693c47f14a2e57dec7292e862ac7adf9c
136,106,440,643,417,330,000,000,000,000,000,000,000
6
Fix undefined behavior when passing null to strdup
static __be16 ipx_map_frame_type(unsigned char type) { __be16 rc = 0; switch (type) { case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break; case IPX_FRAME_8022: rc = htons(ETH_P_802_2); break; case IPX_FRAME_SNAP: rc = htons(ETH_P_SNAP); break; case IPX_FRAME_8023: rc = htons(ETH_P_802_3); break; } return rc; }
0
[ "CWE-416" ]
linux
ee0d8d8482345ff97a75a7d747efc309f13b0d80
196,981,260,989,645,240,000,000,000,000,000,000,000
13
ipx: call ipxitf_put() in ioctl error path We should call ipxitf_put() if the copy_to_user() fails. Reported-by: 李强 <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static int cert_self_signed(X509 *x) { /* * FIXME: x509v3_cache_extensions() needs to detect more failures and not * set EXFLAG_SET when that happens. Especially, if the failures are * parse errors, rather than memory pressure! */ X509_check_purpose(x, -1, 0); if (x->ex_flags & EXFLAG_SS) return 1; else return 0; }
0
[]
openssl
33cc5dde478ba5ad79f8fd4acd8737f0e60e236e
139,581,969,949,389,090,000,000,000,000,000,000,000
13
Compat self-signed trust with reject-only aux data When auxiliary data contains only reject entries, continue to trust self-signed objects just as when no auxiliary data is present. This makes it possible to reject specific uses without changing what's accepted (and thus overring the underlying EKU). Added new supported certs and doubled test count from 38 to 76. Reviewed-by: Dr. Stephen Henson <[email protected]>
#define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
0
[ "CWE-476" ]
linux
4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6
95,595,523,993,300,890,000,000,000,000,000,000,000
1
f2fs: support swap file w/ DIO Signed-off-by: Jaegeuk Kim <[email protected]>
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (__kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; }
0
[ "CWE-200" ]
kvm
831d9d02f9522e739825a51a11e3bc5aa531a905
187,513,307,244,600,700,000,000,000,000,000,000,000
8
KVM: x86: fix information leak to userland Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and kvm_clock_data are copied to userland with some padding and reserved fields unitialized. It leads to leaking of contents of kernel stack memory. We have to initialize them to zero. In patch v1 Jan Kiszka suggested to fill reserved fields with zeros instead of memset'ting the whole struct. It makes sense as these fields are explicitly marked as padding. No more fields need zeroing. KVM-Stable-Tag. Signed-off-by: Vasiliy Kulikov <[email protected]> Signed-off-by: Marcelo Tosatti <[email protected]>
static int hls_slice_header(HEVCContext *s) { GetBitContext *gb = &s->HEVClc->gb; SliceHeader *sh = &s->sh; int i, ret; // Coded parameters sh->first_slice_in_pic_flag = get_bits1(gb); if (s->ref && sh->first_slice_in_pic_flag) { av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n"); return 1; // This slice will be skiped later, do not corrupt state } if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) { s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; if (IS_IDR(s)) ff_hevc_clear_refs(s); } sh->no_output_of_prior_pics_flag = 0; if (IS_IRAP(s)) sh->no_output_of_prior_pics_flag = get_bits1(gb); sh->pps_id = get_ue_golomb_long(gb); if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) { av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id); return AVERROR_INVALIDDATA; } if (!sh->first_slice_in_pic_flag && s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) { av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n"); return AVERROR_INVALIDDATA; } s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data; if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1) sh->no_output_of_prior_pics_flag = 1; if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) { const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data; const HEVCSPS *last_sps = s->ps.sps; enum AVPixelFormat pix_fmt; if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) { if (sps->width != last_sps->width || sps->height != last_sps->height || sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering != last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering) sh->no_output_of_prior_pics_flag = 0; } ff_hevc_clear_refs(s); pix_fmt = get_format(s, sps); if (pix_fmt < 0) return pix_fmt; ret = set_sps(s, sps, pix_fmt); if (ret < 0) return ret; s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; } sh->dependent_slice_segment_flag = 0; if (!sh->first_slice_in_pic_flag) { int slice_address_length; if (s->ps.pps->dependent_slice_segments_enabled_flag) sh->dependent_slice_segment_flag = get_bits1(gb); slice_address_length = av_ceil_log2(s->ps.sps->ctb_width * s->ps.sps->ctb_height); sh->slice_segment_addr = get_bitsz(gb, slice_address_length); if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) { av_log(s->avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n", sh->slice_segment_addr); return AVERROR_INVALIDDATA; } if (!sh->dependent_slice_segment_flag) { sh->slice_addr = sh->slice_segment_addr; s->slice_idx++; } } else { sh->slice_segment_addr = sh->slice_addr = 0; s->slice_idx = 0; s->slice_initialized = 0; } if (!sh->dependent_slice_segment_flag) { s->slice_initialized = 0; for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++) skip_bits(gb, 1); // slice_reserved_undetermined_flag[] sh->slice_type = get_ue_golomb_long(gb); if (!(sh->slice_type == HEVC_SLICE_I || sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B)) { av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n", sh->slice_type); return AVERROR_INVALIDDATA; } if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) { av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n"); return AVERROR_INVALIDDATA; } // when flag is not present, picture is inferred to be output sh->pic_output_flag = 1; if (s->ps.pps->output_flag_present_flag) sh->pic_output_flag = get_bits1(gb); if (s->ps.sps->separate_colour_plane_flag) sh->colour_plane_id = get_bits(gb, 2); if (!IS_IDR(s)) { int poc, pos; sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb); poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type); if (!sh->first_slice_in_pic_flag && poc != s->poc) { av_log(s->avctx, AV_LOG_WARNING, "Ignoring POC change between slices: %d -> %d\n", s->poc, poc); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; poc = s->poc; } s->poc = poc; sh->short_term_ref_pic_set_sps_flag = get_bits1(gb); pos = get_bits_left(gb); if (!sh->short_term_ref_pic_set_sps_flag) { ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1); if (ret < 0) return ret; sh->short_term_rps = &sh->slice_rps; } else { int numbits, rps_idx; if (!s->ps.sps->nb_st_rps) { av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n"); return AVERROR_INVALIDDATA; } numbits = av_ceil_log2(s->ps.sps->nb_st_rps); rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0; sh->short_term_rps = &s->ps.sps->st_rps[rps_idx]; } sh->short_term_ref_pic_set_size = pos - get_bits_left(gb); pos = get_bits_left(gb); ret = decode_lt_rps(s, &sh->long_term_rps, gb); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n"); if (s->avctx->err_recognition & AV_EF_EXPLODE) return AVERROR_INVALIDDATA; } sh->long_term_ref_pic_set_size = pos - get_bits_left(gb); if (s->ps.sps->sps_temporal_mvp_enabled_flag) sh->slice_temporal_mvp_enabled_flag = get_bits1(gb); else sh->slice_temporal_mvp_enabled_flag = 0; } else { s->sh.short_term_rps = NULL; s->poc = 0; } /* 8.3.1 */ if (sh->first_slice_in_pic_flag && s->temporal_id == 0 && s->nal_unit_type != HEVC_NAL_TRAIL_N && s->nal_unit_type != HEVC_NAL_TSA_N && s->nal_unit_type != HEVC_NAL_STSA_N && s->nal_unit_type != HEVC_NAL_RADL_N && s->nal_unit_type != HEVC_NAL_RADL_R && s->nal_unit_type != HEVC_NAL_RASL_N && s->nal_unit_type != HEVC_NAL_RASL_R) s->pocTid0 = s->poc; if (s->ps.sps->sao_enabled) { sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb); if (s->ps.sps->chroma_format_idc) { sh->slice_sample_adaptive_offset_flag[1] = sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb); } } else { sh->slice_sample_adaptive_offset_flag[0] = 0; sh->slice_sample_adaptive_offset_flag[1] = 0; sh->slice_sample_adaptive_offset_flag[2] = 0; } sh->nb_refs[L0] = sh->nb_refs[L1] = 0; if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) { int nb_refs; sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active; if (get_bits1(gb)) { // num_ref_idx_active_override_flag sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1; if (sh->slice_type == HEVC_SLICE_B) sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1; } if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) { av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n", sh->nb_refs[L0], sh->nb_refs[L1]); return AVERROR_INVALIDDATA; } sh->rpl_modification_flag[0] = 0; sh->rpl_modification_flag[1] = 0; nb_refs = ff_hevc_frame_nb_refs(s); if (!nb_refs) { av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n"); return AVERROR_INVALIDDATA; } if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) { sh->rpl_modification_flag[0] = get_bits1(gb); if (sh->rpl_modification_flag[0]) { for (i = 0; i < sh->nb_refs[L0]; i++) sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs)); } if (sh->slice_type == HEVC_SLICE_B) { sh->rpl_modification_flag[1] = get_bits1(gb); if (sh->rpl_modification_flag[1] == 1) for (i = 0; i < sh->nb_refs[L1]; i++) sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs)); } } if (sh->slice_type == HEVC_SLICE_B) sh->mvd_l1_zero_flag = get_bits1(gb); if (s->ps.pps->cabac_init_present_flag) sh->cabac_init_flag = get_bits1(gb); else sh->cabac_init_flag = 0; sh->collocated_ref_idx = 0; if (sh->slice_temporal_mvp_enabled_flag) { sh->collocated_list = L0; if (sh->slice_type == HEVC_SLICE_B) sh->collocated_list = !get_bits1(gb); if (sh->nb_refs[sh->collocated_list] > 1) { sh->collocated_ref_idx = get_ue_golomb_long(gb); if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) { av_log(s->avctx, AV_LOG_ERROR, "Invalid collocated_ref_idx: %d.\n", sh->collocated_ref_idx); return AVERROR_INVALIDDATA; } } } if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) || (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) { int ret = pred_weight_table(s, gb); if (ret < 0) return ret; } sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb); if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) { av_log(s->avctx, AV_LOG_ERROR, "Invalid number of merging MVP candidates: %d.\n", sh->max_num_merge_cand); return AVERROR_INVALIDDATA; } } sh->slice_qp_delta = get_se_golomb(gb); if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) { sh->slice_cb_qp_offset = get_se_golomb(gb); sh->slice_cr_qp_offset = get_se_golomb(gb); } else { sh->slice_cb_qp_offset = 0; sh->slice_cr_qp_offset = 0; } if (s->ps.pps->chroma_qp_offset_list_enabled_flag) sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb); else sh->cu_chroma_qp_offset_enabled_flag = 0; if (s->ps.pps->deblocking_filter_control_present_flag) { int deblocking_filter_override_flag = 0; if (s->ps.pps->deblocking_filter_override_enabled_flag) deblocking_filter_override_flag = get_bits1(gb); if (deblocking_filter_override_flag) { sh->disable_deblocking_filter_flag = get_bits1(gb); if (!sh->disable_deblocking_filter_flag) { int beta_offset_div2 = get_se_golomb(gb); int tc_offset_div2 = get_se_golomb(gb) ; if (beta_offset_div2 < -6 || beta_offset_div2 > 6 || tc_offset_div2 < -6 || tc_offset_div2 > 6) { av_log(s->avctx, AV_LOG_ERROR, "Invalid deblock filter offsets: %d, %d\n", beta_offset_div2, tc_offset_div2); return AVERROR_INVALIDDATA; } sh->beta_offset = beta_offset_div2 * 2; sh->tc_offset = tc_offset_div2 * 2; } } else { sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf; sh->beta_offset = s->ps.pps->beta_offset; sh->tc_offset = s->ps.pps->tc_offset; } } else { sh->disable_deblocking_filter_flag = 0; sh->beta_offset = 0; sh->tc_offset = 0; } if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag && (sh->slice_sample_adaptive_offset_flag[0] || sh->slice_sample_adaptive_offset_flag[1] || !sh->disable_deblocking_filter_flag)) { sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb); } else { sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag; } } else if (!s->slice_initialized) { av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n"); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = 0; if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) { unsigned num_entry_point_offsets = get_ue_golomb_long(gb); // It would be possible to bound this tighter but this here is simpler if (num_entry_point_offsets > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets); return AVERROR_INVALIDDATA; } sh->num_entry_point_offsets = num_entry_point_offsets; if (sh->num_entry_point_offsets > 0) { int offset_len = get_ue_golomb_long(gb) + 1; if (offset_len < 1 || offset_len > 32) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len); return AVERROR_INVALIDDATA; } av_freep(&sh->entry_point_offset); av_freep(&sh->offset); av_freep(&sh->size); sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned)); sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int)); if (!sh->entry_point_offset || !sh->offset || !sh->size) { sh->num_entry_point_offsets = 0; av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n"); return AVERROR(ENOMEM); } for (i = 0; i < sh->num_entry_point_offsets; i++) { unsigned val = get_bits_long(gb, offset_len); sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size } if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) { s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here s->threads_number = 1; } else s->enable_parallel_tiles = 0; } else s->enable_parallel_tiles = 0; } if (s->ps.pps->slice_header_extension_present_flag) { unsigned int length = get_ue_golomb_long(gb); if (length*8LL > get_bits_left(gb)) { av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n"); return AVERROR_INVALIDDATA; } for (i = 0; i < length; i++) skip_bits(gb, 8); // slice_header_extension_data_byte } // Inferred parameters sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta; if (sh->slice_qp > 51 || sh->slice_qp < -s->ps.sps->qp_bd_offset) { av_log(s->avctx, AV_LOG_ERROR, "The slice_qp %d is outside the valid range " "[%d, 51].\n", sh->slice_qp, -s->ps.sps->qp_bd_offset); return AVERROR_INVALIDDATA; } sh->slice_ctb_addr_rs = sh->slice_segment_addr; if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) { av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n"); return AVERROR_INVALIDDATA; } if (get_bits_left(gb) < 0) { av_log(s->avctx, AV_LOG_ERROR, "Overread slice header by %d bits\n", -get_bits_left(gb)); return AVERROR_INVALIDDATA; } s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag; if (!s->ps.pps->cu_qp_delta_enabled_flag) s->HEVClc->qp_y = s->sh.slice_qp; s->slice_initialized = 1; s->HEVClc->tu.cu_qp_offset_cb = 0; s->HEVClc->tu.cu_qp_offset_cr = 0; return 0; }
0
[ "CWE-476" ]
FFmpeg
9ccc633068c6fe76989f487c8932bd11886ad65b
329,455,107,854,606,400,000,000,000,000,000,000,000
425
avcodec/hevcdec: Avoid only partly skiping duplicate first slices Fixes: NULL pointer dereference and out of array access Fixes: 13871/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5746167087890432 Fixes: 13845/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5650370728034304 This also fixes the return code for explode mode Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Reviewed-by: James Almer <[email protected]> Signed-off-by: Michael Niedermayer <[email protected]> (cherry picked from commit 54655623a82632e7624714d7b2a3e039dc5faa7e) Signed-off-by: Michael Niedermayer <[email protected]>
static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) { struct nfs_server *server = NFS_SERVER(dir); struct nfs_renameargs *arg = msg->rpc_argp; struct nfs_renameres *res = msg->rpc_resp; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; arg->bitmask = server->attr_bitmask; res->server = server; nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); }
0
[ "CWE-703", "CWE-189" ]
linux
20e0fa98b751facf9a1101edaefbc19c82616a68
185,109,178,259,754,940,000,000,000,000,000,000,000
11
Fix length of buffer copied in __nfs4_get_acl_uncached _copy_from_pages() used to copy data from the temporary buffer to the user passed buffer is passed the wrong size parameter when copying data. res.acl_len contains both the bitmap and acl lenghts while acl_len contains the acl length after adjusting for the bitmap size. Signed-off-by: Sachin Prabhu <[email protected]> Signed-off-by: Trond Myklebust <[email protected]>
static void tight_pack24(VncState *vs, uint8_t *buf, size_t count, size_t *ret) { uint8_t *buf8; uint32_t pix; int rshift, gshift, bshift; buf8 = buf; if (1 /* FIXME */) { rshift = vs->client_pf.rshift; gshift = vs->client_pf.gshift; bshift = vs->client_pf.bshift; } else { rshift = 24 - vs->client_pf.rshift; gshift = 24 - vs->client_pf.gshift; bshift = 24 - vs->client_pf.bshift; } if (ret) { *ret = count * 3; } while (count--) { pix = ldl_he_p(buf8); *buf++ = (char)(pix >> rshift); *buf++ = (char)(pix >> gshift); *buf++ = (char)(pix >> bshift); buf8 += 4; } }
0
[ "CWE-401" ]
qemu
6bf21f3d83e95bcc4ba35a7a07cc6655e8b010b0
84,359,316,032,238,930,000,000,000,000,000,000,000
30
vnc: fix memory leak when vnc disconnect Currently when qemu receives a vnc connect, it creates a 'VncState' to represent this connection. In 'vnc_worker_thread_loop' it creates a local 'VncState'. The connection 'VcnState' and local 'VncState' exchange data in 'vnc_async_encoding_start' and 'vnc_async_encoding_end'. In 'zrle_compress_data' it calls 'deflateInit2' to allocate the libz library opaque data. The 'VncState' used in 'zrle_compress_data' is the local 'VncState'. In 'vnc_zrle_clear' it calls 'deflateEnd' to free the libz library opaque data. The 'VncState' used in 'vnc_zrle_clear' is the connection 'VncState'. In currently implementation there will be a memory leak when the vnc disconnect. Following is the asan output backtrack: Direct leak of 29760 byte(s) in 5 object(s) allocated from: 0 0xffffa67ef3c3 in __interceptor_calloc (/lib64/libasan.so.4+0xd33c3) 1 0xffffa65071cb in g_malloc0 (/lib64/libglib-2.0.so.0+0x571cb) 2 0xffffa5e968f7 in deflateInit2_ (/lib64/libz.so.1+0x78f7) 3 0xaaaacec58613 in zrle_compress_data ui/vnc-enc-zrle.c:87 4 0xaaaacec58613 in zrle_send_framebuffer_update ui/vnc-enc-zrle.c:344 5 0xaaaacec34e77 in vnc_send_framebuffer_update ui/vnc.c:919 6 0xaaaacec5e023 in vnc_worker_thread_loop ui/vnc-jobs.c:271 7 0xaaaacec5e5e7 in vnc_worker_thread ui/vnc-jobs.c:340 8 0xaaaacee4d3c3 in qemu_thread_start util/qemu-thread-posix.c:502 9 0xffffa544e8bb in start_thread (/lib64/libpthread.so.0+0x78bb) 10 0xffffa53965cb in thread_start (/lib64/libc.so.6+0xd55cb) This is because the opaque allocated in 'deflateInit2' is not freed in 'deflateEnd'. The reason is that the 'deflateEnd' calls 'deflateStateCheck' and in the latter will check whether 's->strm != strm'(libz's data structure). This check will be true so in 'deflateEnd' it just return 'Z_STREAM_ERROR' and not free the data allocated in 'deflateInit2'. The reason this happens is that the 'VncState' contains the whole 'VncZrle', so when calling 'deflateInit2', the 's->strm' will be the local address. So 's->strm != strm' will be true. To fix this issue, we need to make 'zrle' of 'VncState' to be a pointer. Then the connection 'VncState' and local 'VncState' exchange mechanism will work as expection. The 'tight' of 'VncState' has the same issue, let's also turn it to a pointer. Reported-by: Ying Fang <[email protected]> Signed-off-by: Li Qiang <[email protected]> Message-id: [email protected] Signed-off-by: Gerd Hoffmann <[email protected]>
static int auth_context_match( git_http_auth_context **out, http_subtransport *t, bool (*scheme_match)(git_http_auth_scheme *scheme, void *data), void *data) { git_http_auth_scheme *scheme = NULL; git_http_auth_context *context = NULL, *c; size_t i; *out = NULL; for (i = 0; i < ARRAY_SIZE(auth_schemes); i++) { if (scheme_match(&auth_schemes[i], data)) { scheme = &auth_schemes[i]; break; } } if (!scheme) return 0; /* See if authentication has already started for this scheme */ git_vector_foreach(&t->auth_contexts, i, c) { if (c->type == scheme->type) { context = c; break; } } if (!context) { if (scheme->init_context(&context, &t->connection_data) < 0) return -1; else if (!context) return 0; else if (git_vector_insert(&t->auth_contexts, context) < 0) return -1; } *out = context; return 0; }
0
[ "CWE-284" ]
libgit2
b5c6a1b407b7f8b952bded2789593b68b1876211
85,238,215,445,673,490,000,000,000,000,000,000,000
43
http: check certificate validity before clobbering the error variable
time_t base64totime_t(char* s, database* db, const char* field_name){ if(strcmp(s,"0")==0){ return 0; } byte* b=decode_base64(s,strlen(s),NULL); char* endp; if (b==NULL) { /* Should we print error here? */ return 0; } else { time_t t = strtol((char *)b,&endp,10); if (endp[0]!='\0') { LOG_DB_FORMAT_LINE(LOG_LEVEL_WARNING, could not read '%s' from database: strtoll failed for '%s' (base64 encoded value: '%s'), field_name, b, s) free(b); return 0; } log_msg(LOG_LEVEL_DEBUG, "base64totime_t: converted '%s': '%s' to %lld (base64 encoded value '%s')", field_name, b, (long long) t, s); free(b); return t; } }
0
[ "CWE-787" ]
aide
175d1f2626f4500b4fc5ecb7167bba9956b174bc
92,383,623,962,279,880,000,000,000,000,000,000,000
28
Precalculate buffer size in base64 functions Aide uses a fixed size (16k bytes) for the return buffer in encode_base64/decode_base64 functions. This results in a segfault if aide processes a file with too large extended attribute value or ACL. Fix this issue by precalculating the size of the return buffer depending on the input in the encode_base64/decode_base64 functions. This addresses CVE-2021-45417. Thanks to David Bouman for reporting this vulnerability and reviewing this patch.
MagickExport int LocaleUppercase(const int c) { if (c == EOF) return(c); #if defined(MAGICKCORE_LOCALE_SUPPORT) if (c_locale != (locale_t) NULL) return(toupper_l((int) ((unsigned char) c),c_locale)); #endif return(toupper((int) ((unsigned char) c))); }
0
[ "CWE-125" ]
ImageMagick
58d9c46929ca0828edde34d263700c3a5fe8dc3c
295,579,304,606,533,900,000,000,000,000,000,000,000
10
...
static void collect_procs_anon(struct page *page, struct list_head *to_kill, struct to_kill **tkc, int force_early) { struct vm_area_struct *vma; struct task_struct *tsk; struct anon_vma *av; pgoff_t pgoff; av = page_lock_anon_vma_read(page); if (av == NULL) /* Not actually mapped anymore */ return; pgoff = page_to_pgoff(page); read_lock(&tasklist_lock); for_each_process (tsk) { struct anon_vma_chain *vmac; struct task_struct *t = task_early_kill(tsk, force_early); if (!t) continue; anon_vma_interval_tree_foreach(vmac, &av->rb_root, pgoff, pgoff) { vma = vmac->vma; if (!page_mapped_in_vma(page, vma)) continue; if (vma->vm_mm == t->mm) add_to_kill(t, page, vma, to_kill, tkc); } } read_unlock(&tasklist_lock); page_unlock_anon_vma_read(av); }
0
[]
linux
46612b751c4941c5c0472ddf04027e877ae5990f
55,963,580,416,884,240,000,000,000,000,000,000,000
32
mm: hwpoison: fix thp split handing in soft_offline_in_use_page() When soft_offline_in_use_page() runs on a thp tail page after pmd is split, we trigger the following VM_BUG_ON_PAGE(): Memory failure: 0x3755ff: non anonymous thp __get_any_page: 0x3755ff: unknown zero refcount page type 2fffff80000000 Soft offlining pfn 0x34d805 at process virtual address 0x20fff000 page:ffffea000d360140 count:0 mapcount:0 mapping:0000000000000000 index:0x1 flags: 0x2fffff80000000() raw: 002fffff80000000 ffffea000d360108 ffffea000d360188 0000000000000000 raw: 0000000000000001 0000000000000000 00000000ffffffff 0000000000000000 page dumped because: VM_BUG_ON_PAGE(page_ref_count(page) == 0) ------------[ cut here ]------------ kernel BUG at ./include/linux/mm.h:519! soft_offline_in_use_page() passed refcount and page lock from tail page to head page, which is not needed because we can pass any subpage to split_huge_page(). Naoya had fixed a similar issue in c3901e722b29 ("mm: hwpoison: fix thp split handling in memory_failure()"). But he missed fixing soft offline. Link: http://lkml.kernel.org/r/[email protected] Fixes: 61f5d698cc97 ("mm: re-enable THP") Signed-off-by: zhongjiang <[email protected]> Acked-by: Naoya Horiguchi <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: <[email protected]> [4.5+] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
libssh2_channel_window_read_ex(LIBSSH2_CHANNEL *channel, unsigned long *read_avail, unsigned long *window_size_initial) { if(!channel) return 0; /* no channel, no window! */ if(window_size_initial) { *window_size_initial = channel->remote.window_size_initial; } if(read_avail) { size_t bytes_queued = 0; LIBSSH2_PACKET *next_packet; LIBSSH2_PACKET *packet = _libssh2_list_first(&channel->session->packets); while(packet) { unsigned char packet_type; next_packet = _libssh2_list_next(&packet->node); if(packet->data_len < 1) { packet = next_packet; _libssh2_debug(channel->session, LIBSSH2_TRACE_ERROR, "Unexpected packet length"); continue; } packet_type = packet->data[0]; if(((packet_type == SSH_MSG_CHANNEL_DATA) || (packet_type == SSH_MSG_CHANNEL_EXTENDED_DATA)) && ((packet->data_len >= 5) && (_libssh2_ntohu32(packet->data + 1) == channel->local.id))) { bytes_queued += packet->data_len - packet->data_head; } packet = next_packet; } *read_avail = bytes_queued; } return channel->remote.window_size; }
0
[ "CWE-787" ]
libssh2
dc109a7f518757741590bb993c0c8412928ccec2
328,853,801,401,399,900,000,000,000,000,000,000,000
45
Security fixes (#315) * Bounds checks Fixes for CVEs https://www.libssh2.org/CVE-2019-3863.html https://www.libssh2.org/CVE-2019-3856.html * Packet length bounds check CVE https://www.libssh2.org/CVE-2019-3855.html * Response length check CVE https://www.libssh2.org/CVE-2019-3859.html * Bounds check CVE https://www.libssh2.org/CVE-2019-3857.html * Bounds checking CVE https://www.libssh2.org/CVE-2019-3859.html and additional data validation * Check bounds before reading into buffers * Bounds checking CVE https://www.libssh2.org/CVE-2019-3859.html * declare SIZE_MAX and UINT_MAX if needed
void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp) { struct device *device = *devp; if (device) { device_unregister(device); *devp = NULL; } if (*cdevp) { cdev_del(*cdevp); *cdevp = NULL; } }
0
[ "CWE-284", "CWE-264" ]
linux
e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3
254,383,787,128,188,100,000,000,000,000,000,000,000
14
IB/security: Restrict use of the write() interface The drivers/infiniband stack uses write() as a replacement for bi-directional ioctl(). This is not safe. There are ways to trigger write calls that result in the return structure that is normally written to user space being shunted off to user specified kernel memory instead. For the immediate repair, detect and deny suspicious accesses to the write API. For long term, update the user space libraries and the kernel API to something that doesn't present the same security vulnerabilities (likely a structured ioctl() interface). The impacted uAPI interfaces are generally only available if hardware from drivers/infiniband is installed in the system. Reported-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Jason Gunthorpe <[email protected]> [ Expanded check to all known write() entry points ] Cc: [email protected] Signed-off-by: Doug Ledford <[email protected]>
static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { #ifdef CONFIG_SLUB_DEBUG void *addr = page_address(page); void *p; unsigned long *map; slab_err(s, page, text, s->name); slab_lock(page); map = get_map(s, page); for_each_object(p, s, addr, page->objects) { if (!test_bit(slab_index(p, s, addr), map)) { pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr); print_tracking(s, p); } } put_map(map); slab_unlock(page); #endif }
0
[]
linux
fd4d9c7d0c71866ec0c2825189ebd2ce35bd95b8
98,894,920,109,244,960,000,000,000,000,000,000,000
24
mm: slub: add missing TID bump in kmem_cache_alloc_bulk() When kmem_cache_alloc_bulk() attempts to allocate N objects from a percpu freelist of length M, and N > M > 0, it will first remove the M elements from the percpu freelist, then call ___slab_alloc() to allocate the next element and repopulate the percpu freelist. ___slab_alloc() can re-enable IRQs via allocate_slab(), so the TID must be bumped before ___slab_alloc() to properly commit the freelist head change. Fix it by unconditionally bumping c->tid when entering the slowpath. Cc: [email protected] Fixes: ebe909e0fdb3 ("slub: improve bulk alloc strategy") Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
evbuffer_drain(struct evbuffer *buf, size_t len) { struct evbuffer_chain *chain, *next; size_t remaining, old_len; int result = 0; EVBUFFER_LOCK(buf); old_len = buf->total_len; if (old_len == 0) goto done; if (buf->freeze_start) { result = -1; goto done; } if (len >= old_len && !HAS_PINNED_R(buf)) { len = old_len; for (chain = buf->first; chain != NULL; chain = next) { next = chain->next; evbuffer_chain_free(chain); } ZERO_CHAIN(buf); } else { if (len >= old_len) len = old_len; buf->total_len -= len; remaining = len; for (chain = buf->first; remaining >= chain->off; chain = next) { next = chain->next; remaining -= chain->off; if (chain == *buf->last_with_datap) { buf->last_with_datap = &buf->first; } if (&chain->next == buf->last_with_datap) buf->last_with_datap = &buf->first; if (CHAIN_PINNED_R(chain)) { EVUTIL_ASSERT(remaining == 0); chain->misalign += chain->off; chain->off = 0; break; } else evbuffer_chain_free(chain); } buf->first = chain; EVUTIL_ASSERT(chain && remaining <= chain->off); chain->misalign += remaining; chain->off -= remaining; } buf->n_del_for_cb += len; /* Tell someone about changes in this buffer */ evbuffer_invoke_callbacks_(buf); done: EVBUFFER_UNLOCK(buf); return result; }
0
[ "CWE-189" ]
libevent
841ecbd96105c84ac2e7c9594aeadbcc6fb38bc4
252,913,564,695,345,800,000,000,000,000,000,000,000
66
Fix CVE-2014-6272 in Libevent 2.1 For this fix, we need to make sure that passing too-large inputs to the evbuffer functions can't make us do bad things with the heap. Also, lower the maximum chunk size to the lower of off_t, size_t maximum. This is necessary since otherwise we could get into an infinite loop if we make a chunk that 'misalign' cannot index into.
d_lite_hash(VALUE self) { st_index_t v, h[4]; get_d1(self); h[0] = m_nth(dat); h[1] = m_jd(dat); h[2] = m_df(dat); h[3] = m_sf(dat); v = rb_memhash(h, sizeof(h)); return ST2FIX(v); }
0
[]
date
3959accef8da5c128f8a8e2fd54e932a4fb253b0
250,168,188,193,732,460,000,000,000,000,000,000,000
12
Add length limit option for methods that parses date strings `Date.parse` now raises an ArgumentError when a given date string is longer than 128. You can configure the limit by giving `limit` keyword arguments like `Date.parse(str, limit: 1000)`. If you pass `limit: nil`, the limit is disabled. Not only `Date.parse` but also the following methods are changed. * Date._parse * Date.parse * DateTime.parse * Date._iso8601 * Date.iso8601 * DateTime.iso8601 * Date._rfc3339 * Date.rfc3339 * DateTime.rfc3339 * Date._xmlschema * Date.xmlschema * DateTime.xmlschema * Date._rfc2822 * Date.rfc2822 * DateTime.rfc2822 * Date._rfc822 * Date.rfc822 * DateTime.rfc822 * Date._jisx0301 * Date.jisx0301 * DateTime.jisx0301
int tls_choose_sigalg(SSL *s, int fatalerrs) { const SIGALG_LOOKUP *lu = NULL; int sig_idx = -1; s->s3.tmp.cert = NULL; s->s3.tmp.sigalg = NULL; if (SSL_IS_TLS13(s)) { lu = find_sig_alg(s, NULL, NULL); if (lu == NULL) { if (!fatalerrs) return 1; SSLfatal(s, SSL_AD_HANDSHAKE_FAILURE, SSL_F_TLS_CHOOSE_SIGALG, SSL_R_NO_SUITABLE_SIGNATURE_ALGORITHM); return 0; } } else { /* If ciphersuite doesn't require a cert nothing to do */ if (!(s->s3.tmp.new_cipher->algorithm_auth & SSL_aCERT)) return 1; if (!s->server && !ssl_has_cert(s, s->cert->key - s->cert->pkeys)) return 1; if (SSL_USE_SIGALGS(s)) { size_t i; if (s->s3.tmp.peer_sigalgs != NULL) { #ifndef OPENSSL_NO_EC int curve = -1; /* For Suite B need to match signature algorithm to curve */ if (tls1_suiteb(s)) curve = evp_pkey_get_EC_KEY_curve_nid(s->cert->pkeys[SSL_PKEY_ECC] .privatekey); #endif /* * Find highest preference signature algorithm matching * cert type */ for (i = 0; i < s->shared_sigalgslen; i++) { lu = s->shared_sigalgs[i]; if (s->server) { if ((sig_idx = tls12_get_cert_sigalg_idx(s, lu)) == -1) continue; } else { int cc_idx = s->cert->key - s->cert->pkeys; sig_idx = lu->sig_idx; if (cc_idx != sig_idx) continue; } /* Check that we have a cert, and sig_algs_cert */ if (!has_usable_cert(s, lu, sig_idx)) continue; if (lu->sig == EVP_PKEY_RSA_PSS) { /* validate that key is large enough for the signature algorithm */ EVP_PKEY *pkey = s->cert->pkeys[sig_idx].privatekey; if (!rsa_pss_check_min_key_size(s->ctx, pkey, lu)) continue; } #ifndef OPENSSL_NO_EC if (curve == -1 || lu->curve == curve) #endif break; } #ifndef OPENSSL_NO_GOST /* * Some Windows-based implementations do not send GOST algorithms indication * in supported_algorithms extension, so when we have GOST-based ciphersuite, * we have to assume GOST support. */ if (i == s->shared_sigalgslen && s->s3.tmp.new_cipher->algorithm_auth & (SSL_aGOST01 | SSL_aGOST12)) { if ((lu = tls1_get_legacy_sigalg(s, -1)) == NULL) { if (!fatalerrs) return 1; SSLfatal(s, SSL_AD_HANDSHAKE_FAILURE, SSL_F_TLS_CHOOSE_SIGALG, SSL_R_NO_SUITABLE_SIGNATURE_ALGORITHM); return 0; } else { i = 0; sig_idx = lu->sig_idx; } } #endif if (i == s->shared_sigalgslen) { if (!fatalerrs) return 1; SSLfatal(s, SSL_AD_HANDSHAKE_FAILURE, SSL_F_TLS_CHOOSE_SIGALG, SSL_R_NO_SUITABLE_SIGNATURE_ALGORITHM); return 0; } } else { /* * If we have no sigalg use defaults */ const uint16_t *sent_sigs; size_t sent_sigslen; if ((lu = tls1_get_legacy_sigalg(s, -1)) == NULL) { if (!fatalerrs) return 1; SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS_CHOOSE_SIGALG, SSL_R_NO_SUITABLE_SIGNATURE_ALGORITHM); return 0; } /* Check signature matches a type we sent */ sent_sigslen = tls12_get_psigalgs(s, 1, &sent_sigs); for (i = 0; i < sent_sigslen; i++, sent_sigs++) { if (lu->sigalg == *sent_sigs && has_usable_cert(s, lu, lu->sig_idx)) break; } if (i == sent_sigslen) { if (!fatalerrs) return 1; SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_F_TLS_CHOOSE_SIGALG, SSL_R_WRONG_SIGNATURE_TYPE); return 0; } } } else { if ((lu = tls1_get_legacy_sigalg(s, -1)) == NULL) { if (!fatalerrs) return 1; SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_TLS_CHOOSE_SIGALG, SSL_R_NO_SUITABLE_SIGNATURE_ALGORITHM); return 0; } } } if (sig_idx == -1) sig_idx = lu->sig_idx; s->s3.tmp.cert = &s->cert->pkeys[sig_idx]; s->cert->key = s->s3.tmp.cert; s->s3.tmp.sigalg = lu; return 1; }
0
[ "CWE-476" ]
openssl
a87f3fe01a5a894aa27ccd6a239155fd129988e4
309,168,679,855,519,580,000,000,000,000,000,000,000
145
Fix NULL dereference in SSL_check_chain() for TLS 1.3 In the tls1_check_sig_alg() helper function, we loop through the list of "signature_algorithms_cert" values received from the client and attempt to look up each one in turn in our internal table that maps wire codepoint to string-form name, digest and/or signature NID, etc., in order to compare the signature scheme from the peer's list against what is used to sign the certificates in the certificate chain we're checking. Unfortunately, when the peer sends a value that we don't support, the lookup returns NULL, but we unconditionally dereference the lookup result for the comparison, leading to an application crash triggerable by an unauthenticated client. Since we will not be able to say anything about algorithms we don't recognize, treat NULL return from lookup as "does not match". We currently only apply the "signature_algorithm_cert" checks on TLS 1.3 connections, so previous TLS versions are unaffected. SSL_check_chain() is not called directly from libssl, but may be used by the application inside a callback (e.g., client_hello or cert callback) to verify that a candidate certificate chain will be acceptable to the client. CVE-2020-1967 Reviewed-by: Matt Caswell <[email protected]>
hfs_attr_walk_zlib_rsrc(const TSK_FS_ATTR * fs_attr, int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr) { return hfs_attr_walk_compressed_rsrc( fs_attr, flags, a_action, ptr, hfs_read_zlib_block_table, hfs_decompress_zlib_block ); }
0
[ "CWE-190", "CWE-284" ]
sleuthkit
114cd3d0aac8bd1aeaf4b33840feb0163d342d5b
240,279,933,617,576,380,000,000,000,000,000,000,000
9
hfs: fix keylen check in hfs_cat_traverse() If key->key_len is 65535, calculating "uint16_t keylen' would cause an overflow: uint16_t keylen; ... keylen = 2 + tsk_getu16(hfs->fs_info.endian, key->key_len) so the code bypasses the sanity check "if (keylen > nodesize)" which results in crash later: ./toolfs/fstools/fls -b 512 -f hfs <image> ================================================================= ==16==ERROR: AddressSanitizer: SEGV on unknown address 0x6210000256a4 (pc 0x00000054812b bp 0x7ffca548a8f0 sp 0x7ffca548a480 T0) ==16==The signal is caused by a READ memory access. #0 0x54812a in hfs_dir_open_meta_cb /fuzzing/sleuthkit/tsk/fs/hfs_dent.c:237:20 #1 0x51a96c in hfs_cat_traverse /fuzzing/sleuthkit/tsk/fs/hfs.c:1082:21 #2 0x547785 in hfs_dir_open_meta /fuzzing/sleuthkit/tsk/fs/hfs_dent.c:480:9 #3 0x50f57d in tsk_fs_dir_open_meta /fuzzing/sleuthkit/tsk/fs/fs_dir.c:290:14 #4 0x54af17 in tsk_fs_path2inum /fuzzing/sleuthkit/tsk/fs/ifind_lib.c:237:23 #5 0x522266 in hfs_open /fuzzing/sleuthkit/tsk/fs/hfs.c:6579:9 #6 0x508e89 in main /fuzzing/sleuthkit/tools/fstools/fls.cpp:267:19 #7 0x7f9daf67c2b0 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x202b0) #8 0x41d679 in _start (/fuzzing/sleuthkit/tools/fstools/fls+0x41d679) Make 'keylen' int type to prevent the overflow and fix that. Now, I get proper error message instead of crash: ./toolfs/fstools/fls -b 512 -f hfs <image> General file system error (hfs_cat_traverse: length of key 3 in leaf node 1 too large (65537 vs 4096))
struct nfs_server *nfs_create_server(const struct nfs_mount_data *data, struct nfs_fh *mntfh) { struct nfs_server *server; struct nfs_fattr fattr; int error; server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); /* Get a client representation */ error = nfs_init_server(server, data); if (error < 0) goto error; BUG_ON(!server->nfs_client); BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); /* Probe the root fh to retrieve its FSID */ error = nfs_probe_fsinfo(server, mntfh, &fattr); if (error < 0) goto error; if (server->nfs_client->rpc_ops->version == 3) { if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) server->namelen = NFS3_MAXNAMLEN; if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) server->caps |= NFS_CAP_READDIRPLUS; } else { if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) server->namelen = NFS2_MAXNAMLEN; } if (!(fattr.valid & NFS_ATTR_FATTR)) { error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); if (error < 0) { dprintk("nfs_create_server: getattr error = %d\n", -error); goto error; } } memcpy(&server->fsid, &fattr.fsid, sizeof(server->fsid)); dprintk("Server FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); BUG_ON(!server->nfs_client); BUG_ON(!server->nfs_client->rpc_ops); BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); spin_lock(&nfs_client_lock); list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); list_add_tail(&server->master_link, &nfs_volume_list); spin_unlock(&nfs_client_lock); server->mount_time = jiffies; return server; error: nfs_free_server(server); return ERR_PTR(error); }
0
[ "CWE-20" ]
linux-2.6
54af3bb543c071769141387a42deaaab5074da55
63,561,232,934,435,280,000,000,000,000,000,000,000
63
NFS: Fix an Oops in encode_lookup() It doesn't look as if the NFS file name limit is being initialised correctly in the struct nfs_server. Make sure that we limit whatever is being set in nfs_probe_fsinfo() and nfs_init_server(). Also ensure that readdirplus and nfs4_path_walk respect our file name limits. Signed-off-by: Trond Myklebust <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
struct key *request_key_and_link(struct key_type *type, const char *description, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring, unsigned long flags) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .cred = current_cred(), .match_data.cmp = type->match, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, }; struct key *key; key_ref_t key_ref; int ret; kenter("%s,%s,%p,%zu,%p,%p,%lx", ctx.index_key.type->name, ctx.index_key.description, callout_info, callout_len, aux, dest_keyring, flags); if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) { key = ERR_PTR(ret); goto error; } } /* search all the process keyrings for a key */ key_ref = search_process_keyrings(&ctx); if (!IS_ERR(key_ref)) { key = key_ref_to_ptr(key_ref); if (dest_keyring) { construct_get_dest_keyring(&dest_keyring); ret = key_link(dest_keyring, key); key_put(dest_keyring); if (ret < 0) { key_put(key); key = ERR_PTR(ret); goto error_free; } } } else if (PTR_ERR(key_ref) != -EAGAIN) { key = ERR_CAST(key_ref); } else { /* the search failed, but the keyrings were searchable, so we * should consult userspace if we can */ key = ERR_PTR(-ENOKEY); if (!callout_info) goto error_free; key = construct_key_and_link(&ctx, callout_info, callout_len, aux, dest_keyring, flags); } error_free: if (type->match_free) type->match_free(&ctx.match_data); error: kleave(" = %p", key); return key; }
1
[ "CWE-476" ]
linux
c06cfb08b88dfbe13be44a69ae2fdc3a7c902d81
249,866,975,523,117,800,000,000,000,000,000,000,000
67
KEYS: Remove key_type::match in favour of overriding default by match_preparse A previous patch added a ->match_preparse() method to the key type. This is allowed to override the function called by the iteration algorithm. Therefore, we can just set a default that simply checks for an exact match of the key description with the original criterion data and allow match_preparse to override it as needed. The key_type::match op is then redundant and can be removed, as can the user_match() function. Signed-off-by: David Howells <[email protected]> Acked-by: Vivek Goyal <[email protected]>
skiptowhite(char_u *p) { while (*p != ' ' && *p != '\t' && *p != NUL) ++p; return p; }
0
[ "CWE-125", "CWE-787" ]
vim
94f3192b03ed27474db80b4d3a409e107140738b
249,203,400,305,464,900,000,000,000,000,000,000,000
6
patch 8.2.3950: going beyond the end of the line with /\%V Problem: Going beyond the end of the line with /\%V. Solution: Check for valid column in getvcol().
static BOOL clear_decompress_bands_data(CLEAR_CONTEXT* clear, wStream* s, UINT32 bandsByteCount, UINT32 nWidth, UINT32 nHeight, BYTE* pDstData, UINT32 DstFormat, UINT32 nDstStep, UINT32 nXDst, UINT32 nYDst) { UINT32 i, y; UINT32 count; UINT32 suboffset; UINT32 nXDstRel; UINT32 nYDstRel; if (Stream_GetRemainingLength(s) < bandsByteCount) { WLog_ERR(TAG, "stream short %" PRIuz " [11 expected]", Stream_GetRemainingLength(s)); return FALSE; } suboffset = 0; while (suboffset < bandsByteCount) { BYTE r, g, b; UINT16 xStart; UINT16 xEnd; UINT16 yStart; UINT16 yEnd; UINT32 colorBkg; UINT16 vBarHeader; UINT16 vBarYOn; UINT16 vBarYOff; UINT32 vBarCount; UINT32 vBarPixelCount; UINT32 vBarShortPixelCount; if (Stream_GetRemainingLength(s) < 11) { WLog_ERR(TAG, "stream short %" PRIuz " [11 expected]", Stream_GetRemainingLength(s)); return FALSE; } Stream_Read_UINT16(s, xStart); Stream_Read_UINT16(s, xEnd); Stream_Read_UINT16(s, yStart); Stream_Read_UINT16(s, yEnd); Stream_Read_UINT8(s, b); Stream_Read_UINT8(s, g); Stream_Read_UINT8(s, r); suboffset += 11; colorBkg = FreeRDPGetColor(clear->format, r, g, b, 0xFF); if (xEnd < xStart) { WLog_ERR(TAG, "xEnd %" PRIu16 " < xStart %" PRIu16 "", xEnd, xStart); return FALSE; } if (yEnd < yStart) { WLog_ERR(TAG, "yEnd %" PRIu16 " < yStart %" PRIu16 "", yEnd, yStart); return FALSE; } vBarCount = (xEnd - xStart) + 1; for (i = 0; i < vBarCount; i++) { UINT32 vBarHeight; CLEAR_VBAR_ENTRY* vBarEntry = NULL; CLEAR_VBAR_ENTRY* vBarShortEntry; BOOL vBarUpdate = FALSE; const BYTE* pSrcPixel; if (Stream_GetRemainingLength(s) < 2) { WLog_ERR(TAG, "stream short %" PRIuz " [2 expected]", Stream_GetRemainingLength(s)); return FALSE; } Stream_Read_UINT16(s, vBarHeader); suboffset += 2; vBarHeight = (yEnd - yStart + 1); if (vBarHeight > 52) { WLog_ERR(TAG, "vBarHeight (%" PRIu32 ") > 52", vBarHeight); return FALSE; } if ((vBarHeader & 0xC000) == 0x4000) /* SHORT_VBAR_CACHE_HIT */ { const UINT16 vBarIndex = (vBarHeader & 0x3FFF); vBarShortEntry = &(clear->ShortVBarStorage[vBarIndex]); if (!vBarShortEntry) { WLog_ERR(TAG, "missing vBarShortEntry %" PRIu16 "", vBarIndex); return FALSE; } if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "stream short %" PRIuz " [1 expected]", Stream_GetRemainingLength(s)); return FALSE; } Stream_Read_UINT8(s, vBarYOn); suboffset += 1; vBarShortPixelCount = vBarShortEntry->count; vBarUpdate = TRUE; } else if ((vBarHeader & 0xC000) == 0x0000) /* SHORT_VBAR_CACHE_MISS */ { vBarYOn = (vBarHeader & 0xFF); vBarYOff = ((vBarHeader >> 8) & 0x3F); if (vBarYOff < vBarYOn) { WLog_ERR(TAG, "vBarYOff %" PRIu16 " < vBarYOn %" PRIu16 "", vBarYOff, vBarYOn); return FALSE; } vBarShortPixelCount = (vBarYOff - vBarYOn); if (vBarShortPixelCount > 52) { WLog_ERR(TAG, "vBarShortPixelCount %" PRIu32 " > 52", vBarShortPixelCount); return FALSE; } if (Stream_GetRemainingLength(s) < (vBarShortPixelCount * 3)) { WLog_ERR(TAG, "stream short %" PRIuz " [%" PRIu32 " expected]", Stream_GetRemainingLength(s), (vBarShortPixelCount * 3)); return FALSE; } if (clear->ShortVBarStorageCursor >= CLEARCODEC_VBAR_SHORT_SIZE) { WLog_ERR(TAG, "clear->ShortVBarStorageCursor %" PRIu32 " >= CLEARCODEC_VBAR_SHORT_SIZE (%" PRIu32 ")", clear->ShortVBarStorageCursor, CLEARCODEC_VBAR_SHORT_SIZE); return FALSE; } vBarShortEntry = &(clear->ShortVBarStorage[clear->ShortVBarStorageCursor]); vBarShortEntry->count = vBarShortPixelCount; if (!resize_vbar_entry(clear, vBarShortEntry)) return FALSE; for (y = 0; y < vBarShortPixelCount; y++) { BYTE r, g, b; BYTE* dstBuffer = &vBarShortEntry->pixels[y * GetBytesPerPixel(clear->format)]; UINT32 color; Stream_Read_UINT8(s, b); Stream_Read_UINT8(s, g); Stream_Read_UINT8(s, r); color = FreeRDPGetColor(clear->format, r, g, b, 0xFF); if (!WriteColor(dstBuffer, clear->format, color)) return FALSE; } suboffset += (vBarShortPixelCount * 3); clear->ShortVBarStorageCursor = (clear->ShortVBarStorageCursor + 1) % CLEARCODEC_VBAR_SHORT_SIZE; vBarUpdate = TRUE; } else if ((vBarHeader & 0x8000) == 0x8000) /* VBAR_CACHE_HIT */ { const UINT16 vBarIndex = (vBarHeader & 0x7FFF); vBarEntry = &(clear->VBarStorage[vBarIndex]); /* If the cache was reset we need to fill in some dummy data. */ if (vBarEntry->size == 0) { WLog_WARN(TAG, "Empty cache index %" PRIu16 ", filling dummy data", vBarIndex); vBarEntry->count = vBarHeight; if (!resize_vbar_entry(clear, vBarEntry)) return FALSE; } } else { WLog_ERR(TAG, "invalid vBarHeader 0x%04" PRIX16 "", vBarHeader); return FALSE; /* invalid vBarHeader */ } if (vBarUpdate) { UINT32 x; BYTE* pSrcPixel; BYTE* dstBuffer; if (clear->VBarStorageCursor >= CLEARCODEC_VBAR_SIZE) { WLog_ERR(TAG, "clear->VBarStorageCursor %" PRIu32 " >= CLEARCODEC_VBAR_SIZE %" PRIu32 "", clear->VBarStorageCursor, CLEARCODEC_VBAR_SIZE); return FALSE; } vBarEntry = &(clear->VBarStorage[clear->VBarStorageCursor]); vBarPixelCount = vBarHeight; vBarEntry->count = vBarPixelCount; if (!resize_vbar_entry(clear, vBarEntry)) return FALSE; dstBuffer = vBarEntry->pixels; /* if (y < vBarYOn), use colorBkg */ y = 0; count = vBarYOn; if ((y + count) > vBarPixelCount) count = (vBarPixelCount > y) ? (vBarPixelCount - y) : 0; while (count--) { WriteColor(dstBuffer, clear->format, colorBkg); dstBuffer += GetBytesPerPixel(clear->format); } /* * if ((y >= vBarYOn) && (y < (vBarYOn + vBarShortPixelCount))), * use vBarShortPixels at index (y - shortVBarYOn) */ y = vBarYOn; count = vBarShortPixelCount; if ((y + count) > vBarPixelCount) count = (vBarPixelCount > y) ? (vBarPixelCount - y) : 0; pSrcPixel = &vBarShortEntry->pixels[(y - vBarYOn) * GetBytesPerPixel(clear->format)]; for (x = 0; x < count; x++) { UINT32 color; color = ReadColor(&pSrcPixel[x * GetBytesPerPixel(clear->format)], clear->format); if (!WriteColor(dstBuffer, clear->format, color)) return FALSE; dstBuffer += GetBytesPerPixel(clear->format); } /* if (y >= (vBarYOn + vBarShortPixelCount)), use colorBkg */ y = vBarYOn + vBarShortPixelCount; count = (vBarPixelCount > y) ? (vBarPixelCount - y) : 0; while (count--) { if (!WriteColor(dstBuffer, clear->format, colorBkg)) return FALSE; dstBuffer += GetBytesPerPixel(clear->format); } vBarEntry->count = vBarPixelCount; clear->VBarStorageCursor = (clear->VBarStorageCursor + 1) % CLEARCODEC_VBAR_SIZE; } if (vBarEntry->count != vBarHeight) { WLog_ERR(TAG, "vBarEntry->count %" PRIu32 " != vBarHeight %" PRIu32 "", vBarEntry->count, vBarHeight); vBarEntry->count = vBarHeight; if (!resize_vbar_entry(clear, vBarEntry)) return FALSE; } nXDstRel = nXDst + xStart; nYDstRel = nYDst + yStart; pSrcPixel = vBarEntry->pixels; if (i < nWidth) { count = vBarEntry->count; if (count > nHeight) count = nHeight; for (y = 0; y < count; y++) { BYTE* pDstPixel8 = &pDstData[((nYDstRel + y) * nDstStep) + ((nXDstRel + i) * GetBytesPerPixel(DstFormat))]; UINT32 color = ReadColor(pSrcPixel, clear->format); color = FreeRDPConvertColor(color, clear->format, DstFormat, NULL); if (!WriteColor(pDstPixel8, DstFormat, color)) return FALSE; pSrcPixel += GetBytesPerPixel(clear->format); } } } } return TRUE; }
0
[ "CWE-125" ]
FreeRDP
363d7046dfec4003b91aecf7867e3b05905f3843
87,336,701,581,291,980,000,000,000,000,000,000,000
308
Fixed oob read in clear_decompress_subcode_rlex Fixed length checks before stream read. Thanks to hac425 CVE-2020-11040
static void oprintf(struct out *op, const char *fmt, ...) { va_list ap; char buf[256]; int d, l; va_start(ap, fmt); l = vsprintf(buf, fmt, ap); while((d = op->ptr - op->buf) + l >= op->len-1) { op->buf = (char *) realloc(op->buf, op->len += 100); op->ptr = op->buf + d; } for(d = 0 ; d < l ; d++) *op->ptr++ = buf[d]; }
0
[ "CWE-20", "CWE-476" ]
libming
6e76e8c71cb51c8ba0aa9737a636b9ac3029887f
237,497,496,296,382,230,000,000,000,000,000,000,000
16
SWFShape_setLeftFillStyle: prevent fill overflow
static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, xfrm_address_t *saddr, xfrm_address_t *daddr, u16 *family) { int af, socklen; if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) return -EINVAL; af = pfkey_sockaddr_extract(sa, saddr); if (!af) return -EINVAL; socklen = pfkey_sockaddr_len(af); if (pfkey_sockaddr_extract((struct sockaddr *) (((u8 *)sa) + socklen), daddr) != af) return -EINVAL; *family = af; return 0; }
1
[]
linux
096f41d3a8fcbb8dde7f71379b1ca85fe213eded
226,262,108,183,392,660,000,000,000,000,000,000,000
21
af_key: Fix sadb_x_ipsecrequest parsing The parsing of sadb_x_ipsecrequest is broken in a number of ways. First of all we're not verifying sadb_x_ipsecrequest_len. This is needed when the structure carries addresses at the end. Worse we don't even look at the length when we parse those optional addresses. The migration code had similar parsing code that's better but it also has some deficiencies. The length is overcounted first of all as it includes the header itself. It also fails to check the length before dereferencing the sa_family field. This patch fixes those problems in parse_sockaddr_pair and then uses it in parse_ipsecrequest. Reported-by: Andrey Konovalov <[email protected]> Signed-off-by: Herbert Xu <[email protected]> Signed-off-by: Steffen Klassert <[email protected]>
static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp, my_option *options) { const char *plugin_name= tmp->plugin->name; const LEX_STRING plugin_dash = { C_STRING_WITH_LEN("plugin-") }; uint plugin_name_len= strlen(plugin_name); uint optnamelen; const int max_comment_len= 255; char *comment= (char *) alloc_root(mem_root, max_comment_len + 1); char *optname; int index= 0, UNINIT_VAR(offset); st_mysql_sys_var *opt, **plugin_option; st_bookmark *v; /** Used to circumvent the const attribute on my_option::name */ char *plugin_name_ptr, *plugin_name_with_prefix_ptr; DBUG_ENTER("construct_options"); plugin_name_ptr= (char*) alloc_root(mem_root, plugin_name_len + 1); strcpy(plugin_name_ptr, plugin_name); my_casedn_str(&my_charset_latin1, plugin_name_ptr); convert_underscore_to_dash(plugin_name_ptr, plugin_name_len); plugin_name_with_prefix_ptr= (char*) alloc_root(mem_root, plugin_name_len + plugin_dash.length + 1); strxmov(plugin_name_with_prefix_ptr, plugin_dash.str, plugin_name_ptr, NullS); if (!plugin_is_forced(tmp)) { /* support --skip-plugin-foo syntax */ options[0].name= plugin_name_ptr; options[1].name= plugin_name_with_prefix_ptr; options[0].id= options[1].id= 0; options[0].var_type= options[1].var_type= GET_ENUM; options[0].arg_type= options[1].arg_type= OPT_ARG; options[0].def_value= options[1].def_value= 1; /* ON */ options[0].typelib= options[1].typelib= &global_plugin_typelib; strxnmov(comment, max_comment_len, "Enable or disable ", plugin_name, " plugin. One of: ON, OFF, FORCE (don't start if the plugin" " fails to load), FORCE_PLUS_PERMANENT (like FORCE, but the" " plugin can not be uninstalled).", NullS); options[0].comment= comment; /* Allocate temporary space for the value of the tristate. This option will have a limited lifetime and is not used beyond server initialization. GET_ENUM value is an unsigned long integer. */ options[0].value= options[1].value= (uchar **)alloc_root(mem_root, sizeof(ulong)); *((ulong*) options[0].value)= (ulong) options[0].def_value; options+= 2; } /* Two passes as the 2nd pass will take pointer addresses for use by my_getopt and register_var() in the first pass uses realloc */ for (plugin_option= tmp->plugin->system_vars; plugin_option && *plugin_option; plugin_option++, index++) { opt= *plugin_option; if (!opt->name) { sql_print_error("Missing variable name in plugin '%s'.", plugin_name); DBUG_RETURN(-1); } if (!(opt->flags & PLUGIN_VAR_THDLOCAL)) continue; if (!(register_var(plugin_name_ptr, opt->name, opt->flags))) continue; switch (opt->flags & PLUGIN_VAR_TYPEMASK) { case PLUGIN_VAR_BOOL: ((thdvar_bool_t *) opt)->resolve= mysql_sys_var_char; break; case PLUGIN_VAR_INT: ((thdvar_int_t *) opt)->resolve= mysql_sys_var_int; break; case PLUGIN_VAR_LONG: ((thdvar_long_t *) opt)->resolve= mysql_sys_var_long; break; case PLUGIN_VAR_LONGLONG: ((thdvar_longlong_t *) opt)->resolve= mysql_sys_var_longlong; break; case PLUGIN_VAR_STR: ((thdvar_str_t *) opt)->resolve= mysql_sys_var_str; break; case PLUGIN_VAR_ENUM: ((thdvar_enum_t *) opt)->resolve= mysql_sys_var_ulong; break; case PLUGIN_VAR_SET: ((thdvar_set_t *) opt)->resolve= mysql_sys_var_ulonglong; break; case PLUGIN_VAR_DOUBLE: ((thdvar_double_t *) opt)->resolve= mysql_sys_var_double; break; default: sql_print_error("Unknown variable type code 0x%x in plugin '%s'.", opt->flags, plugin_name); DBUG_RETURN(-1); }; } for (plugin_option= tmp->plugin->system_vars; plugin_option && *plugin_option; plugin_option++, index++) { switch ((opt= *plugin_option)->flags & PLUGIN_VAR_TYPEMASK) { case PLUGIN_VAR_BOOL: if (!opt->check) opt->check= check_func_bool; if (!opt->update) opt->update= update_func_bool; break; case PLUGIN_VAR_INT: if (!opt->check) opt->check= check_func_int; if (!opt->update) opt->update= update_func_int; break; case PLUGIN_VAR_LONG: if (!opt->check) opt->check= check_func_long; if (!opt->update) opt->update= update_func_long; break; case PLUGIN_VAR_LONGLONG: if (!opt->check) opt->check= check_func_longlong; if (!opt->update) opt->update= update_func_longlong; break; case PLUGIN_VAR_STR: if (!opt->check) opt->check= check_func_str; if (!opt->update) { opt->update= update_func_str; if (!(opt->flags & (PLUGIN_VAR_MEMALLOC | PLUGIN_VAR_READONLY))) { opt->flags|= PLUGIN_VAR_READONLY; sql_print_warning("Server variable %s of plugin %s was forced " "to be read-only: string variable without " "update_func and PLUGIN_VAR_MEMALLOC flag", opt->name, plugin_name); } } break; case PLUGIN_VAR_ENUM: if (!opt->check) opt->check= check_func_enum; if (!opt->update) opt->update= update_func_long; break; case PLUGIN_VAR_SET: if (!opt->check) opt->check= check_func_set; if (!opt->update) opt->update= update_func_longlong; break; case PLUGIN_VAR_DOUBLE: if (!opt->check) opt->check= check_func_double; if (!opt->update) opt->update= update_func_double; break; default: sql_print_error("Unknown variable type code 0x%x in plugin '%s'.", opt->flags, plugin_name); DBUG_RETURN(-1); } if ((opt->flags & (PLUGIN_VAR_NOCMDOPT | PLUGIN_VAR_THDLOCAL)) == PLUGIN_VAR_NOCMDOPT) continue; if (!(opt->flags & PLUGIN_VAR_THDLOCAL)) { optnamelen= strlen(opt->name); optname= (char*) alloc_root(mem_root, plugin_name_len + optnamelen + 2); strxmov(optname, plugin_name_ptr, "-", opt->name, NullS); optnamelen= plugin_name_len + optnamelen + 1; } else { /* this should not fail because register_var should create entry */ if (!(v= find_bookmark(plugin_name_ptr, opt->name, opt->flags))) { sql_print_error("Thread local variable '%s' not allocated " "in plugin '%s'.", opt->name, plugin_name); DBUG_RETURN(-1); } *(int*)(opt + 1)= offset= v->offset; if (opt->flags & PLUGIN_VAR_NOCMDOPT) { char *val= global_system_variables.dynamic_variables_ptr + offset; if (((opt->flags & PLUGIN_VAR_TYPEMASK) == PLUGIN_VAR_STR) && (opt->flags & PLUGIN_VAR_MEMALLOC)) { char *def_val= *(char**)var_def_ptr(opt); *(char**)val= def_val ? my_strdup(def_val, MYF(0)) : NULL; } else memcpy(val, var_def_ptr(opt), var_storage_size(opt->flags)); continue; } optname= (char*) memdup_root(mem_root, v->key + 1, (optnamelen= v->name_len) + 1); } convert_underscore_to_dash(optname, optnamelen); options->name= optname; options->comment= opt->comment; options->app_type= (opt->flags & PLUGIN_VAR_NOSYSVAR) ? NULL : opt; options->id= 0; plugin_opt_set_limits(options, opt); if (opt->flags & PLUGIN_VAR_THDLOCAL) options->value= options->u_max_value= (uchar**) (global_system_variables.dynamic_variables_ptr + offset); else options->value= options->u_max_value= *(uchar***) (opt + 1); char *option_name_ptr; options[1]= options[0]; options[1].name= option_name_ptr= (char*) alloc_root(mem_root, plugin_dash.length + optnamelen + 1); options[1].comment= 0; /* Hidden from the help text */ strxmov(option_name_ptr, plugin_dash.str, optname, NullS); options+= 2; } DBUG_RETURN(0); }
0
[ "CWE-416" ]
server
c05fd700970ad45735caed3a6f9930d4ce19a3bd
274,583,830,769,818,520,000,000,000,000,000,000,000
248
MDEV-26323 use-after-poison issue of MariaDB server
static double mp_abs(_cimg_math_parser& mp) { return cimg::abs(_mp_arg(2));
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
219,297,154,860,003,060,000,000,000,000,000,000,000
3
Fix other issues in 'CImg<T>::load_bmp()'.
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename, ExceptionInfo *exception) { char ping_filename[MagickPathExtent]; Image *image, *images; ImageInfo *read_info; /* Ping image list from a file. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); (void) SetImageOption(image_info,"filename",filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); (void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename, (int) image_info->scene,ping_filename,exception); if (LocaleCompare(ping_filename,image_info->filename) != 0) { ExceptionInfo *sans; ssize_t extent, scene; /* Images of the form image-%d.png[1-5]. */ read_info=CloneImageInfo(image_info); sans=AcquireExceptionInfo(); (void) SetImageInfo(read_info,0,sans); sans=DestroyExceptionInfo(sans); if (read_info->number_scenes == 0) { read_info=DestroyImageInfo(read_info); return(PingImage(image_info,exception)); } (void) CopyMagickString(ping_filename,read_info->filename,MagickPathExtent); images=NewImageList(); extent=(ssize_t) (read_info->scene+read_info->number_scenes); for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++) { (void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename, (int) scene,read_info->filename,exception); image=PingImage(read_info,exception); if (image == (Image *) NULL) continue; AppendImageToList(&images,image); } read_info=DestroyImageInfo(read_info); return(images); } return(PingImage(image_info,exception)); }
1
[ "CWE-119" ]
ImageMagick
4e8c2ed53fcb54a34b3a6185b2584f26cf6874a3
23,046,549,141,861,670,000,000,000,000,000,000,000
64
https://github.com/ImageMagick/ImageMagick/issues/312
static void credit_entropy_bits(struct entropy_store *r, int nbits) { unsigned long flags; int entropy_count; if (!nbits) return; spin_lock_irqsave(&r->lock, flags); DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); entropy_count = r->entropy_count; entropy_count += nbits; if (entropy_count < 0) { DEBUG_ENT("negative entropy/overflow\n"); entropy_count = 0; } else if (entropy_count > r->poolinfo->POOLBITS) entropy_count = r->poolinfo->POOLBITS; r->entropy_count = entropy_count; /* should we wake readers? */ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { wake_up_interruptible(&random_read_wait); kill_fasync(&fasync, SIGIO, POLL_IN); } spin_unlock_irqrestore(&r->lock, flags); }
0
[ "CWE-310" ]
linux-2.6
8a0a9bd4db63bc45e3017bedeafbd88d0eb84d02
89,481,756,772,189,570,000,000,000,000,000,000,000
27
random: make get_random_int() more random It's a really simple patch that basically just open-codes the current "secure_ip_id()" call, but when open-coding it we now use a _static_ hashing area, so that it gets updated every time. And to make sure somebody can't just start from the same original seed of all-zeroes, and then do the "half_md4_transform()" over and over until they get the same sequence as the kernel has, each iteration also mixes in the same old "current->pid + jiffies" we used - so we should now have a regular strong pseudo-number generator, but we also have one that doesn't have a single seed. Note: the "pid + jiffies" is just meant to be a tiny tiny bit of noise. It has no real meaning. It could be anything. I just picked the previous seed, it's just that now we keep the state in between calls and that will feed into the next result, and that should make all the difference. I made that hash be a per-cpu data just to avoid cache-line ping-pong: having multiple CPU's write to the same data would be fine for randomness, and add yet another layer of chaos to it, but since get_random_int() is supposed to be a fast interface I did it that way instead. I considered using "__raw_get_cpu_var()" to avoid any preemption overhead while still getting the hash be _mostly_ ping-pong free, but in the end good taste won out. Signed-off-by: Ingo Molnar <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static OPJ_BOOL opj_j2k_write_mcc_record( opj_j2k_t *p_j2k, struct opj_simple_mcc_decorrelation_data * p_mcc_record, struct opj_stream_private *p_stream, struct opj_event_mgr * p_manager ) { OPJ_UINT32 i; OPJ_UINT32 l_mcc_size; OPJ_BYTE * l_current_data = 00; OPJ_UINT32 l_nb_bytes_for_comp; OPJ_UINT32 l_mask; OPJ_UINT32 l_tmcc; /* preconditions */ assert(p_j2k != 00); assert(p_manager != 00); assert(p_stream != 00); if (p_mcc_record->m_nb_comps > 255 ) { l_nb_bytes_for_comp = 2; l_mask = 0x8000; } else { l_nb_bytes_for_comp = 1; l_mask = 0; } l_mcc_size = p_mcc_record->m_nb_comps * 2 * l_nb_bytes_for_comp + 19; if (l_mcc_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) { OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_mcc_size); if (! new_header_tile_data) { opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data); p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL; p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0; opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write MCC marker\n"); return OPJ_FALSE; } p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data; p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_mcc_size; } l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data; opj_write_bytes(l_current_data,J2K_MS_MCC,2); /* MCC */ l_current_data += 2; opj_write_bytes(l_current_data,l_mcc_size-2,2); /* Lmcc */ l_current_data += 2; /* first marker */ opj_write_bytes(l_current_data,0,2); /* Zmcc */ l_current_data += 2; opj_write_bytes(l_current_data,p_mcc_record->m_index,1); /* Imcc -> no need for other values, take the first */ ++l_current_data; /* only one marker atm */ opj_write_bytes(l_current_data,0,2); /* Ymcc */ l_current_data+=2; opj_write_bytes(l_current_data,1,2); /* Qmcc -> number of collections -> 1 */ l_current_data+=2; opj_write_bytes(l_current_data,0x1,1); /* Xmcci type of component transformation -> array based decorrelation */ ++l_current_data; opj_write_bytes(l_current_data,p_mcc_record->m_nb_comps | l_mask,2); /* Nmcci number of input components involved and size for each component offset = 8 bits */ l_current_data+=2; for (i=0;i<p_mcc_record->m_nb_comps;++i) { opj_write_bytes(l_current_data,i,l_nb_bytes_for_comp); /* Cmccij Component offset*/ l_current_data+=l_nb_bytes_for_comp; } opj_write_bytes(l_current_data,p_mcc_record->m_nb_comps|l_mask,2); /* Mmcci number of output components involved and size for each component offset = 8 bits */ l_current_data+=2; for (i=0;i<p_mcc_record->m_nb_comps;++i) { opj_write_bytes(l_current_data,i,l_nb_bytes_for_comp); /* Wmccij Component offset*/ l_current_data+=l_nb_bytes_for_comp; } l_tmcc = ((!p_mcc_record->m_is_irreversible)&1)<<16; if (p_mcc_record->m_decorrelation_array) { l_tmcc |= p_mcc_record->m_decorrelation_array->m_index; } if (p_mcc_record->m_offset_array) { l_tmcc |= ((p_mcc_record->m_offset_array->m_index)<<8); } opj_write_bytes(l_current_data,l_tmcc,3); /* Tmcci : use MCT defined as number 1 and irreversible array based. */ l_current_data+=3; if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_mcc_size,p_manager) != l_mcc_size) { return OPJ_FALSE; } return OPJ_TRUE; }
0
[ "CWE-416" ]
openjpeg
940100c28ae28931722290794889cf84a92c5f6f
99,949,458,325,677,170,000,000,000,000,000,000,000
102
Fix potential use-after-free in opj_j2k_write_mco function Fixes #563
find_start_of_next_microdesc(const char *s, const char *eos) { int started_with_annotations; s = eat_whitespace_eos(s, eos); if (!s) return NULL; #define CHECK_LENGTH() STMT_BEGIN \ if (s+32 > eos) \ return NULL; \ STMT_END #define NEXT_LINE() STMT_BEGIN \ s = memchr(s, '\n', eos-s); \ if (!s || s+1 >= eos) \ return NULL; \ s++; \ STMT_END CHECK_LENGTH(); started_with_annotations = (*s == '@'); if (started_with_annotations) { /* Start by advancing to the first non-annotation line. */ while (*s == '@') NEXT_LINE(); } CHECK_LENGTH(); /* Now we should be pointed at an onion-key line. If we are, then skip * it. */ if (!strcmpstart(s, "onion-key")) NEXT_LINE(); /* Okay, now we're pointed at the first line of the microdescriptor which is not an annotation or onion-key. The next line that _is_ an annotation or onion-key is the start of the next microdescriptor. */ while (s+32 < eos) { if (*s == '@' || !strcmpstart(s, "onion-key")) return s; NEXT_LINE(); } return NULL; #undef CHECK_LENGTH #undef NEXT_LINE }
0
[ "CWE-399" ]
tor
57e35ad3d91724882c345ac709666a551a977f0f
149,360,157,063,014,080,000,000,000,000,000,000,000
48
Avoid possible segfault when handling networkstatus vote with bad flavor Fix for 6530; fix on 0.2.2.6-alpha.
wait_queue_func_t wake_func) __acquires(&ctx->completion_lock) { struct io_ring_ctx *ctx = req->ctx; bool cancel = false; INIT_HLIST_NODE(&req->hash_node); io_init_poll_iocb(poll, mask, wake_func); poll->file = req->file; poll->wait.private = req; ipt->pt._key = mask; ipt->req = req; ipt->error = -EINVAL; mask = vfs_poll(req->file, &ipt->pt) & poll->events; spin_lock_irq(&ctx->completion_lock); if (likely(poll->head)) { spin_lock(&poll->head->lock); if (unlikely(list_empty(&poll->wait.entry))) { if (ipt->error) cancel = true; ipt->error = 0; mask = 0; } if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error) list_del_init(&poll->wait.entry); else if (cancel) WRITE_ONCE(poll->canceled, true); else if (!poll->done) /* actually waiting for an event */ io_poll_req_insert(req); spin_unlock(&poll->head->lock); } return mask;
0
[ "CWE-787" ]
linux
d1f82808877bb10d3deee7cf3374a4eb3fb582db
140,973,401,967,068,050,000,000,000,000,000,000,000
36
io_uring: truncate lengths larger than MAX_RW_COUNT on provide buffers Read and write operations are capped to MAX_RW_COUNT. Some read ops rely on that limit, and that is not guaranteed by the IORING_OP_PROVIDE_BUFFERS. Truncate those lengths when doing io_add_buffers, so buffer addresses still use the uncapped length. Also, take the chance and change struct io_buffer len member to __u32, so it matches struct io_provide_buffer len member. This fixes CVE-2021-3491, also reported as ZDI-CAN-13546. Fixes: ddf0322db79c ("io_uring: add IORING_OP_PROVIDE_BUFFERS") Reported-by: Billy Jheng Bing-Jhong (@st424204) Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; int err; /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. */ if (vm_flags & VM_SPECIAL) return NULL; if (prev) next = prev->vm_next; else next = mm->mmap; area = next; if (area && area->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; /* verify some invariant that must be enforced by the caller */ VM_WARN_ON(prev && addr <= prev->vm_start); VM_WARN_ON(area && end > area->vm_end); VM_WARN_ON(addr >= end); /* * Can it merge with the predecessor? */ if (prev && prev->vm_end == addr && mpol_equal(vma_policy(prev), policy) && can_vma_merge_after(prev, vm_flags, anon_vma, file, pgoff, vm_userfaultfd_ctx)) { /* * OK, it can. Can we now merge in the successor as well? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx) && is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) { /* cases 1, 6 */ err = __vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL, prev); } else /* cases 2, 5, 7 */ err = __vma_adjust(prev, prev->vm_start, end, prev->vm_pgoff, NULL, prev); if (err) return NULL; khugepaged_enter_vma_merge(prev, vm_flags); return prev; } /* * Can this new request be merged in front of next? */ if (next && end == next->vm_start && mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen, vm_userfaultfd_ctx)) { if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, addr, prev->vm_pgoff, NULL, next); else { /* cases 3, 8 */ err = __vma_adjust(area, addr, next->vm_end, next->vm_pgoff - pglen, NULL, next); /* * In case 3 area is already equal to next and * this is a noop, but in case 8 "area" has * been removed and next was expanded over it. */ area = next; } if (err) return NULL; khugepaged_enter_vma_merge(area, vm_flags); return area; } return NULL; }
0
[ "CWE-119" ]
linux
1be7107fbe18eed3e319a6c3e83c78254b693acb
174,083,822,566,470,470,000,000,000,000,000,000,000
92
mm: larger stack guard gap, between vmas Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <[email protected]> Original-patch-by: Michal Hocko <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Acked-by: Michal Hocko <[email protected]> Tested-by: Helge Deller <[email protected]> # parisc Signed-off-by: Linus Torvalds <[email protected]>
static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; __be32 remote_id; remote_id = rej_msg->local_comm_id; if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) { spin_lock_irq(&cm.lock); timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari), remote_id); if (!timewait_info) { spin_unlock_irq(&cm.lock); return NULL; } cm_id_priv = idr_find(&cm.local_id_table, (__force int) (timewait_info->work.local_id ^ cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); else cm_id_priv = NULL; } spin_unlock_irq(&cm.lock); } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ) cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0); else cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id); return cm_id_priv; }
0
[ "CWE-20" ]
linux
b2853fd6c2d0f383dbdf7427e263eb576a633867
11,682,584,982,264,021,000,000,000,000,000,000,000
33
IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler The code that resolves the passive side source MAC within the rdma_cm connection request handler was both redundant and buggy, so remove it. It was redundant since later, when an RC QP is modified to RTR state, the resolution will take place in the ib_core module. It was buggy because this callback also deals with UD SIDR exchange, for which we incorrectly looked at the REQ member of the CM event and dereferenced a random value. Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures") Signed-off-by: Moni Shoua <[email protected]> Signed-off-by: Or Gerlitz <[email protected]> Signed-off-by: Roland Dreier <[email protected]>
static inline uint8_t *csrhci_out_packet_event(struct csrhci_s *s, int evt, int len) { uint8_t *ret = csrhci_out_packetz(s, len + 1 + sizeof(struct hci_event_hdr)); *ret ++ = H4_EVT_PKT; ((struct hci_event_hdr *) ret)->evt = evt; ((struct hci_event_hdr *) ret)->plen = len; return ret + sizeof(struct hci_event_hdr); }
0
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
186,320,282,555,937,700,000,000,000,000,000,000,000
12
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
static void csrhci_in_packet(struct csrhci_s *s, uint8_t *pkt) { uint8_t *rpkt; int opc; switch (*pkt ++) { case H4_CMD_PKT: opc = le16_to_cpu(((struct hci_command_hdr *) pkt)->opcode); if (cmd_opcode_ogf(opc) == OGF_VENDOR_CMD) { csrhci_in_packet_vendor(s, cmd_opcode_ocf(opc), pkt + sizeof(struct hci_command_hdr), s->in_len - sizeof(struct hci_command_hdr) - 1); return; } /* TODO: if the command is OCF_READ_LOCAL_COMMANDS or the likes, * we need to send it to the HCI layer and then add our supported * commands to the returned mask (such as OGF_VENDOR_CMD). With * bt-hci.c we could just have hooks for this kind of commands but * we can't with bt-host.c. */ s->hci->cmd_send(s->hci, pkt, s->in_len - 1); break; case H4_EVT_PKT: goto bad_pkt; case H4_ACL_PKT: s->hci->acl_send(s->hci, pkt, s->in_len - 1); break; case H4_SCO_PKT: s->hci->sco_send(s->hci, pkt, s->in_len - 1); break; case H4_NEG_PKT: if (s->in_hdr != sizeof(csrhci_neg_packet) || memcmp(pkt - 1, csrhci_neg_packet, s->in_hdr)) { fprintf(stderr, "%s: got a bad NEG packet\n", __FUNCTION__); return; } pkt += 2; rpkt = csrhci_out_packet_csr(s, H4_NEG_PKT, 10); *rpkt ++ = 0x20; /* Operational settings negotiation Ok */ memcpy(rpkt, pkt, 7); rpkt += 7; *rpkt ++ = 0xff; *rpkt = 0xff; break; case H4_ALIVE_PKT: if (s->in_hdr != 4 || pkt[1] != 0x55 || pkt[2] != 0x00) { fprintf(stderr, "%s: got a bad ALIVE packet\n", __FUNCTION__); return; } rpkt = csrhci_out_packet_csr(s, H4_ALIVE_PKT, 2); *rpkt ++ = 0xcc; *rpkt = 0x00; break; default: bad_pkt: /* TODO: error out */ fprintf(stderr, "%s: got a bad packet\n", __FUNCTION__); break; } csrhci_fifo_wake(s); }
0
[ "CWE-416" ]
qemu
a4afa548fc6dd9842ed86639b4d37d4d1c4ad480
183,150,233,727,716,600,000,000,000,000,000,000,000
72
char: move front end handlers in CharBackend Since the hanlders are associated with a CharBackend, rather than the CharDriverState, it is more appropriate to store in CharBackend. This avoids the handler copy dance in qemu_chr_fe_set_handlers() then mux_chr_update_read_handler(), by storing the CharBackend pointer directly. Also a mux CharDriver should go through mux->backends[focused], since chr->be will stay NULL. Before that, it was possible to call chr->handler by mistake with surprising results, for ex through qemu_chr_be_can_write(), which would result in calling the last set handler front end, not the one with focus. Signed-off-by: Marc-André Lureau <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
ipf_expiry_list_clean(struct hmap *frag_lists, struct ipf_list *ipf_list) /* OVS_REQUIRES(ipf_lock) */ { ipf_list_clean(frag_lists, ipf_list); }
0
[ "CWE-401" ]
ovs
803ed12e31b0377c37d7aa8c94b3b92f2081e349
213,952,372,373,197,800,000,000,000,000,000,000,000
6
ipf: release unhandled packets from the batch Since 640d4db788ed ("ipf: Fix a use-after-free error, ...") the ipf framework unconditionally allocates a new dp_packet to track individual fragments. This prevents a use-after-free. However, an additional issue was present - even when the packet buffer is cloned, if the ip fragment handling code keeps it, the original buffer is leaked during the refill loop. Even in the original processing code, the hardcoded dnsteal branches would always leak a packet buffer from the refill loop. This can be confirmed with valgrind: ==717566== 16,672 (4,480 direct, 12,192 indirect) bytes in 8 blocks are definitely lost in loss record 390 of 390 ==717566== at 0x484086F: malloc (vg_replace_malloc.c:380) ==717566== by 0x537BFD: xmalloc__ (util.c:137) ==717566== by 0x537BFD: xmalloc (util.c:172) ==717566== by 0x46DDD4: dp_packet_new (dp-packet.c:153) ==717566== by 0x46DDD4: dp_packet_new_with_headroom (dp-packet.c:163) ==717566== by 0x550AA6: netdev_linux_batch_rxq_recv_sock.constprop.0 (netdev-linux.c:1262) ==717566== by 0x5512AF: netdev_linux_rxq_recv (netdev-linux.c:1511) ==717566== by 0x4AB7E0: netdev_rxq_recv (netdev.c:727) ==717566== by 0x47F00D: dp_netdev_process_rxq_port (dpif-netdev.c:4699) ==717566== by 0x47FD13: dpif_netdev_run (dpif-netdev.c:5957) ==717566== by 0x4331D2: type_run (ofproto-dpif.c:370) ==717566== by 0x41DFD8: ofproto_type_run (ofproto.c:1768) ==717566== by 0x40A7FB: bridge_run__ (bridge.c:3245) ==717566== by 0x411269: bridge_run (bridge.c:3310) ==717566== by 0x406E6C: main (ovs-vswitchd.c:127) The fix is to delete the original packet when it isn't able to be reinserted into the packet batch. Subsequent valgrind runs show that the packets are not leaked from the batch any longer. Fixes: 640d4db788ed ("ipf: Fix a use-after-free error, and remove the 'do_not_steal' flag.") Fixes: 4ea96698f667 ("Userspace datapath: Add fragmentation handling.") Reported-by: Wan Junjie <[email protected]> Reported-at: https://github.com/openvswitch/ovs-issues/issues/226 Signed-off-by: Aaron Conole <[email protected]> Reviewed-by: David Marchand <[email protected]> Tested-by: Wan Junjie <[email protected]> Signed-off-by: Alin-Gabriel Serdean <[email protected]>
static void quantsmooth_block(JCOEFPTR coef, UINT16 *quantval, JSAMPLE *image, JSAMPLE *image2, int stride, int flags, float **tables, int luma) { int k, n = DCTSIZE, x, y, need_refresh = 1; JSAMPLE ALIGN(32) buf[DCTSIZE2 + DCTSIZE * 6], *border = buf + n * n; #ifndef NO_SIMD int16_t ALIGN(32) temp[DCTSIZE2 * 4 + DCTSIZE * (4 - 2)]; #endif #ifdef USE_JSIMD JSAMPROW output_buf[DCTSIZE]; int output_col = 0; for (k = 0; k < n; k++) output_buf[k] = buf + k * n; #endif (void)x; if (image2) { float ALIGN(32) fbuf[DCTSIZE2]; #if 1 && defined(USE_NEON) for (y = 0; y < n; y++) { uint8x8_t h0, h1; uint16x8_t sumA, sumB, v0, v1; uint16x4_t h2, h3; float32x4_t v5, scale; uint32x4_t v4, sumAA1, sumAB1, sumAA2, sumAB2; #define M1(xx, yy) \ h0 = vld1_u8(&image2[(y + yy) * stride + xx]); \ h1 = vld1_u8(&image[(y + yy) * stride + xx]); \ sumA = vaddw_u8(sumA, h0); v0 = vmull_u8(h0, h0); \ sumB = vaddw_u8(sumB, h1); v1 = vmull_u8(h0, h1); \ sumAA1 = vaddw_u16(sumAA1, vget_low_u16(v0)); \ sumAB1 = vaddw_u16(sumAB1, vget_low_u16(v1)); \ sumAA2 = vaddw_u16(sumAA2, vget_high_u16(v0)); \ sumAB2 = vaddw_u16(sumAB2, vget_high_u16(v1)); #define M2 \ sumA = vaddq_u16(sumA, sumA); sumB = vaddq_u16(sumB, sumB); \ sumAA1 = vaddq_u32(sumAA1, sumAA1); sumAA2 = vaddq_u32(sumAA2, sumAA2); \ sumAB1 = vaddq_u32(sumAB1, sumAB1); sumAB2 = vaddq_u32(sumAB2, sumAB2); h0 = vld1_u8(&image2[y * stride]); h1 = vld1_u8(&image[y * stride]); sumA = vmovl_u8(h0); v0 = vmull_u8(h0, h0); sumB = vmovl_u8(h1); v1 = vmull_u8(h0, h1); sumAA1 = vmovl_u16(vget_low_u16(v0)); sumAB1 = vmovl_u16(vget_low_u16(v1)); sumAA2 = vmovl_u16(vget_high_u16(v0)); sumAB2 = vmovl_u16(vget_high_u16(v1)); M2 M1(0, -1) M1(-1, 0) M1(1, 0) M1(0, 1) M2 M1(-1, -1) M1(1, -1) M1(-1, 1) M1(1, 1) #undef M2 #undef M1 v0 = vmovl_u8(vld1_u8(&image2[y * stride])); #define M1(low, sumAA, sumAB, x) \ h2 = vget_##low##_u16(sumA); sumAA = vshlq_n_u32(sumAA, 4); \ h3 = vget_##low##_u16(sumB); sumAB = vshlq_n_u32(sumAB, 4); \ sumAA = vmlsl_u16(sumAA, h2, h2); sumAB = vmlsl_u16(sumAB, h2, h3); \ v4 = vtstq_u32(sumAA, sumAA); \ sumAB = vandq_u32(sumAB, v4); sumAA = vornq_u32(sumAA, v4); \ scale = vdivq_f32(vcvtq_f32_s32(vreinterpretq_s32_u32(sumAB)), \ vcvtq_f32_s32(vreinterpretq_s32_u32(sumAA))); \ scale = vmaxq_f32(scale, vdupq_n_f32(-16.0f)); \ scale = vminq_f32(scale, vdupq_n_f32(16.0f)); \ v4 = vshll_n_u16(vget_##low##_u16(v0), 4); \ v5 = vcvtq_n_f32_s32(vreinterpretq_s32_u32(vsubw_u16(v4, h2)), 4); \ v5 = vmlaq_f32(vcvtq_n_f32_u32(vmovl_u16(h3), 4), v5, scale); \ v5 = vmaxq_f32(v5, vdupq_n_f32(0)); \ v5 = vsubq_f32(v5, vdupq_n_f32(CENTERJSAMPLE)); \ v5 = vminq_f32(v5, vdupq_n_f32(CENTERJSAMPLE)); \ vst1q_f32(fbuf + y * n + x, v5); M1(low, sumAA1, sumAB1, 0) M1(high, sumAA2, sumAB2, 4) #undef M1 } #elif 1 && defined(USE_AVX2) for (y = 0; y < n; y++) { __m128i v0, v1; __m256i v2, v3, v4, sumA, sumB, sumAA, sumAB; __m256 v5, scale; #define M1(x0, y0, x1, y1) \ v0 = _mm_loadl_epi64((__m128i*)&image2[(y + y0) * stride + x0]); \ v1 = _mm_loadl_epi64((__m128i*)&image2[(y + y1) * stride + x1]); \ v2 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(v0, v1)); \ v0 = _mm_loadl_epi64((__m128i*)&image[(y + y0) * stride + x0]); \ v1 = _mm_loadl_epi64((__m128i*)&image[(y + y1) * stride + x1]); \ v3 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(v0, v1)); \ sumA = _mm256_add_epi16(sumA, v2); \ sumB = _mm256_add_epi16(sumB, v3); \ sumAA = _mm256_add_epi32(sumAA, _mm256_madd_epi16(v2, v2)); \ sumAB = _mm256_add_epi32(sumAB, _mm256_madd_epi16(v2, v3)); v0 = _mm_loadl_epi64((__m128i*)&image2[y * stride]); v1 = _mm_loadl_epi64((__m128i*)&image[y * stride]); sumA = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(v0, v0)); sumB = _mm256_cvtepu8_epi16(_mm_unpacklo_epi8(v1, v1)); sumAA = _mm256_madd_epi16(sumA, sumA); sumAB = _mm256_madd_epi16(sumA, sumB); M1(0, -1, -1, 0) M1(1, 0, 0, 1) sumA = _mm256_add_epi16(sumA, sumA); sumAA = _mm256_add_epi32(sumAA, sumAA); sumB = _mm256_add_epi16(sumB, sumB); sumAB = _mm256_add_epi32(sumAB, sumAB); M1(-1, -1, 1, -1) M1(-1, 1, 1, 1) #undef M1 v3 = _mm256_set1_epi16(1); v2 = _mm256_madd_epi16(sumA, v3); sumAA = _mm256_slli_epi32(sumAA, 4); v3 = _mm256_madd_epi16(sumB, v3); sumAB = _mm256_slli_epi32(sumAB, 4); sumAA = _mm256_sub_epi32(sumAA, _mm256_mullo_epi32(v2, v2)); sumAB = _mm256_sub_epi32(sumAB, _mm256_mullo_epi32(v2, v3)); v4 = _mm256_cmpeq_epi32(sumAA, _mm256_setzero_si256()); sumAB = _mm256_andnot_si256(v4, sumAB); scale = _mm256_cvtepi32_ps(_mm256_or_si256(sumAA, v4)); scale = _mm256_div_ps(_mm256_cvtepi32_ps(sumAB), scale); scale = _mm256_max_ps(scale, _mm256_set1_ps(-16.0f)); scale = _mm256_min_ps(scale, _mm256_set1_ps(16.0f)); v0 = _mm_loadl_epi64((__m128i*)&image2[y * stride]); v4 = _mm256_slli_epi32(_mm256_cvtepu8_epi32(v0), 4); v5 = _mm256_cvtepi32_ps(_mm256_sub_epi32(v4, v2)); // v5 = _mm256_add_ps(_mm256_mul_ps(v5, scale), _mm256_cvtepi32_ps(v3)); v5 = _mm256_fmadd_ps(v5, scale, _mm256_cvtepi32_ps(v3)); v5 = _mm256_mul_ps(v5, _mm256_set1_ps(1.0f / 16)); v5 = _mm256_max_ps(v5, _mm256_setzero_ps()); v5 = _mm256_sub_ps(v5, _mm256_set1_ps(CENTERJSAMPLE)); v5 = _mm256_min_ps(v5, _mm256_set1_ps(CENTERJSAMPLE)); _mm256_storeu_ps(fbuf + y * n, v5); } #elif 1 && defined(USE_SSE2) for (y = 0; y < n; y++) { __m128i v0, v1, v2, v3, v4, sumA, sumB, sumAA1, sumAB1, sumAA2, sumAB2; __m128 v5, scale; #define M1(x0, y0, x1, y1) \ v0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image2[(y + y0) * stride + x0])); \ v1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image2[(y + y1) * stride + x1])); \ v2 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image[(y + y0) * stride + x0])); \ v3 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image[(y + y1) * stride + x1])); \ sumA = _mm_add_epi16(_mm_add_epi16(sumA, v0), v1); \ sumB = _mm_add_epi16(_mm_add_epi16(sumB, v2), v3); \ v4 = _mm_unpacklo_epi16(v0, v1); sumAA1 = _mm_add_epi32(sumAA1, _mm_madd_epi16(v4, v4)); \ v1 = _mm_unpackhi_epi16(v0, v1); sumAA2 = _mm_add_epi32(sumAA2, _mm_madd_epi16(v1, v1)); \ sumAB1 = _mm_add_epi32(sumAB1, _mm_madd_epi16(v4, _mm_unpacklo_epi16(v2, v3))); \ sumAB2 = _mm_add_epi32(sumAB2, _mm_madd_epi16(v1, _mm_unpackhi_epi16(v2, v3))); v0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image2[y * stride])); v1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image[y * stride])); v2 = _mm_unpacklo_epi16(v0, v0); sumAA1 = _mm_madd_epi16(v2, v2); v3 = _mm_unpacklo_epi16(v1, v1); sumAB1 = _mm_madd_epi16(v2, v3); v2 = _mm_unpackhi_epi16(v0, v0); sumAA2 = _mm_madd_epi16(v2, v2); v3 = _mm_unpackhi_epi16(v1, v1); sumAB2 = _mm_madd_epi16(v2, v3); sumA = _mm_add_epi16(v0, v0); sumB = _mm_add_epi16(v1, v1); M1(0, -1, -1, 0) M1(1, 0, 0, 1) sumA = _mm_add_epi16(sumA, sumA); sumB = _mm_add_epi16(sumB, sumB); sumAA1 = _mm_add_epi32(sumAA1, sumAA1); sumAA2 = _mm_add_epi32(sumAA2, sumAA2); sumAB1 = _mm_add_epi32(sumAB1, sumAB1); sumAB2 = _mm_add_epi32(sumAB2, sumAB2); M1(-1, -1, 1, -1) M1(-1, 1, 1, 1) #undef M1 v0 = _mm_setzero_si128(); v1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image2[y * stride])); #define M1(lo, sumAA, sumAB, x) \ v2 = _mm_unpack##lo##_epi16(sumA, v0); sumAA = _mm_slli_epi32(sumAA, 4); \ v3 = _mm_unpack##lo##_epi16(sumB, v0); sumAB = _mm_slli_epi32(sumAB, 4); \ sumAA = _mm_sub_epi32(sumAA, _mm_mullo_epi32(v2, v2)); \ sumAB = _mm_sub_epi32(sumAB, _mm_mullo_epi32(v2, v3)); \ v4 = _mm_cmpeq_epi32(sumAA, v0); sumAB = _mm_andnot_si128(v4, sumAB); \ scale = _mm_cvtepi32_ps(_mm_or_si128(sumAA, v4)); \ scale = _mm_div_ps(_mm_cvtepi32_ps(sumAB), scale); \ scale = _mm_max_ps(scale, _mm_set1_ps(-16.0f)); \ scale = _mm_min_ps(scale, _mm_set1_ps(16.0f)); \ v4 = _mm_slli_epi32(_mm_unpack##lo##_epi16(v1, v0), 4); \ v5 = _mm_cvtepi32_ps(_mm_sub_epi32(v4, v2)); \ v5 = _mm_add_ps(_mm_mul_ps(v5, scale), _mm_cvtepi32_ps(v3)); \ v5 = _mm_mul_ps(v5, _mm_set1_ps(1.0f / 16)); \ v5 = _mm_max_ps(v5, _mm_setzero_ps()); \ v5 = _mm_sub_ps(v5, _mm_set1_ps(CENTERJSAMPLE)); \ v5 = _mm_min_ps(v5, _mm_set1_ps(CENTERJSAMPLE)); \ _mm_storeu_ps(fbuf + y * n + x, v5); M1(lo, sumAA1, sumAB1, 0) M1(hi, sumAA2, sumAB2, 4) #undef M1 } #else for (y = 0; y < n; y++) for (x = 0; x < n; x++) { float sumA = 0, sumB = 0, sumAA = 0, sumAB = 0; float divN = 1.0f / 16, scale, offset; float a; #define M1(xx, yy) { \ float a = image2[(y + yy) * stride + x + xx]; \ float b = image[(y + yy) * stride + x + xx]; \ sumA += a; sumAA += a * a; \ sumB += b; sumAB += a * b; } #define M2 sumA += sumA; sumB += sumB; \ sumAA += sumAA; sumAB += sumAB; M1(0, 0) M2 M1(0, -1) M1(-1, 0) M1(1, 0) M1(0, 1) M2 M1(-1, -1) M1(1, -1) M1(-1, 1) M1(1, 1) #undef M2 #undef M1 scale = sumAA - sumA * divN * sumA; if (scale != 0.0f) scale = (sumAB - sumA * divN * sumB) / scale; scale = scale < -16.0f ? -16.0f : scale; scale = scale > 16.0f ? 16.0f : scale; offset = (sumB - scale * sumA) * divN; a = image2[y * stride + x] * scale + offset; a = a < 0 ? 0 : a > MAXJSAMPLE + 1 ? MAXJSAMPLE + 1 : a; fbuf[y * n + x] = a - CENTERJSAMPLE; } #endif fdct_clamp(fbuf, coef, quantval); } if (flags & JPEGQS_LOW_QUALITY) { float ALIGN(32) fbuf[DCTSIZE2]; float range = 0, c0 = 2, c1 = c0 * sqrtf(0.5f); if (image2) goto end; { int sum = 0; for (x = 1; x < n * n; x++) { int a = coef[x]; a = a < 0 ? -a : a; range += quantval[x] * a; sum += a; } if (sum) range *= 4.0f / sum; if (range > CENTERJSAMPLE) range = CENTERJSAMPLE; range = roundf(range); } #if 1 && defined(USE_NEON) for (y = 0; y < n; y++) { int16x8_t v4, v5; uint16x8_t v6 = vdupq_n_u16((int)range); float32x2_t f4; uint8x8_t i0, i1; float32x4_t f0, f1, s0 = vdupq_n_f32(0), s1 = s0, s2 = s0, s3 = s0; f4 = vset_lane_f32(c1, vdup_n_f32(c0), 1); i0 = vld1_u8(&image[y * stride]); #define M1(i, x, y) \ i1 = vld1_u8(&image[(y) * stride + x]); \ v4 = vreinterpretq_s16_u16(vsubl_u8(i0, i1)); \ v5 = vreinterpretq_s16_u16(vqsubq_u16(v6, \ vreinterpretq_u16_s16(vabsq_s16(v4)))); \ M2(low, s0, s1, i) M2(high, s2, s3, i) #define M2(low, s0, s1, i) \ f0 = vcvtq_f32_s32(vmovl_s16(vget_##low##_s16(v5))); \ f0 = vmulq_f32(f0, f0); f1 = vmulq_lane_f32(f0, f4, i); \ f0 = vmulq_f32(f0, vcvtq_f32_s32(vmovl_s16(vget_##low##_s16(v4)))); \ s0 = vmlaq_f32(s0, f0, f1); s1 = vmlaq_f32(s1, f1, f1); M1(1, -1, y-1) M1(0, 0, y-1) M1(1, 1, y-1) M1(0, -1, y) M1(0, 1, y) M1(1, -1, y+1) M1(0, 0, y+1) M1(1, 1, y+1) #undef M1 #undef M2 v4 = vreinterpretq_s16_u16(vmovl_u8(i0)); #define M1(low, s0, s1, x) \ f1 = vbslq_f32(vceqq_f32(s1, vdupq_n_f32(0)), vdupq_n_f32(1.0f), s1); \ f0 = vdivq_f32(s0, f1); \ f1 = vcvtq_f32_s32(vmovl_s16(vget_##low##_s16(v4))); \ f0 = vsubq_f32(f1, f0); \ f0 = vsubq_f32(f0, vdupq_n_f32(CENTERJSAMPLE)); \ vst1q_f32(fbuf + y * n + x, f0); M1(low, s0, s1, 0) M1(high, s2, s3, 4) #undef M1 } #elif 1 && defined(USE_AVX512) for (y = 0; y < n; y += 2) { __m256i v0, v1, v4, v5, v6 = _mm256_set1_epi16((int)range); __m512 f0, f1, f4, f5, s0 = _mm512_setzero_ps(), s1 = s0; __mmask16 m0; f4 = _mm512_set1_ps(c0); f5 = _mm512_set1_ps(c1); #define M2(v0, pos) \ v0 = _mm256_cvtepu8_epi16(_mm_unpacklo_epi64( \ _mm_loadl_epi64((__m128i*)&image[pos]), \ _mm_loadl_epi64((__m128i*)&image[pos + stride]))); #define M1(f4, x, y) M2(v1, (y) * stride + x) \ v4 = _mm256_sub_epi16(v0, v1); v5 = _mm256_subs_epu16(v6, _mm256_abs_epi16(v4)); \ f0 = _mm512_cvtepi32_ps(_mm512_cvtepi16_epi32(v5)); \ f0 = _mm512_mul_ps(f0, f0); f1 = _mm512_mul_ps(f0, f4); \ f0 = _mm512_mul_ps(f0, _mm512_cvtepi32_ps(_mm512_cvtepi16_epi32(v4))); \ s0 = _mm512_fmadd_ps(f0, f1, s0); s1 = _mm512_fmadd_ps(f1, f1, s1); M2(v0, y * stride) M1(f5, -1, y-1) M1(f4, 0, y-1) M1(f5, 1, y-1) M1(f4, -1, y) M1(f4, 1, y) M1(f5, -1, y+1) M1(f4, 0, y+1) M1(f5, 1, y+1) #undef M1 #undef M2 m0 = _mm512_cmp_ps_mask(s1, _mm512_setzero_ps(), 0); s1 = _mm512_mask_blend_ps(m0, s1, _mm512_set1_ps(1.0f)); f0 = _mm512_div_ps(s0, s1); f1 = _mm512_cvtepi32_ps(_mm512_cvtepi16_epi32(v0)); f0 = _mm512_sub_ps(f1, f0); f0 = _mm512_sub_ps(f0, _mm512_set1_ps(CENTERJSAMPLE)); _mm512_storeu_ps(fbuf + y * n, f0); } #elif 1 && defined(USE_AVX2) for (y = 0; y < n; y++) { __m128i v0, v1, v4, v5, v6 = _mm_set1_epi16((int)range); __m256 f0, f1, f4, f5, s0 = _mm256_setzero_ps(), s1 = s0; f4 = _mm256_set1_ps(c0); f5 = _mm256_set1_ps(c1); v0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image[y * stride])); #define M1(f4, x, y) \ v1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image[(y) * stride + x])); \ v4 = _mm_sub_epi16(v0, v1); v5 = _mm_subs_epu16(v6, _mm_abs_epi16(v4)); \ f0 = _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(v5)); \ f0 = _mm256_mul_ps(f0, f0); f1 = _mm256_mul_ps(f0, f4); \ f0 = _mm256_mul_ps(f0, _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(v4))); \ s0 = _mm256_fmadd_ps(f0, f1, s0); s1 = _mm256_fmadd_ps(f1, f1, s1); M1(f5, -1, y-1) M1(f4, 0, y-1) M1(f5, 1, y-1) M1(f4, -1, y) M1(f4, 1, y) M1(f5, -1, y+1) M1(f4, 0, y+1) M1(f5, 1, y+1) #undef M1 f1 = _mm256_cmp_ps(s1, _mm256_setzero_ps(), 0); s1 = _mm256_blendv_ps(s1, _mm256_set1_ps(1.0f), f1); f0 = _mm256_div_ps(s0, s1); f1 = _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(v0)); f0 = _mm256_sub_ps(f1, f0); f0 = _mm256_sub_ps(f0, _mm256_set1_ps(CENTERJSAMPLE)); _mm256_storeu_ps(fbuf + y * n, f0); } #elif 1 && defined(USE_SSE2) for (y = 0; y < n; y++) { __m128i v0, v1, v3, v4, v5, v6 = _mm_set1_epi16((int)range), v7 = _mm_setzero_si128(); __m128 f0, f1, f4, f5, s0 = _mm_setzero_ps(), s1 = s0, s2 = s0, s3 = s0; f4 = _mm_set1_ps(c0); f5 = _mm_set1_ps(c1); v0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image[y * stride])); #define M1(f4, x, y) \ v1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)&image[(y) * stride + x])); \ v4 = _mm_sub_epi16(v0, v1); v3 = _mm_srai_epi16(v4, 15); \ v5 = _mm_subs_epu16(v6, _mm_abs_epi16(v4)); \ M2(lo, s0, s1, f4) M2(hi, s2, s3, f4) #define M2(lo, s0, s1, f4) \ f0 = _mm_cvtepi32_ps(_mm_unpack##lo##_epi16(v5, v7)); \ f0 = _mm_mul_ps(f0, f0); f1 = _mm_mul_ps(f0, f4); \ f0 = _mm_mul_ps(f0, _mm_cvtepi32_ps(_mm_unpack##lo##_epi16(v4, v3))); \ f0 = _mm_mul_ps(f0, f1); f1 = _mm_mul_ps(f1, f1); \ s0 = _mm_add_ps(s0, f0); s1 = _mm_add_ps(s1, f1); M1(f5, -1, y-1) M1(f4, 0, y-1) M1(f5, 1, y-1) M1(f4, -1, y) M1(f4, 1, y) M1(f5, -1, y+1) M1(f4, 0, y+1) M1(f5, 1, y+1) #undef M1 #undef M2 #define M1(lo, s0, s1, x) \ f1 = _mm_cmpeq_ps(s1, _mm_setzero_ps()); \ f1 = _mm_and_ps(f1, _mm_set1_ps(1.0f)); \ f0 = _mm_div_ps(s0, _mm_or_ps(s1, f1)); \ f1 = _mm_cvtepi32_ps(_mm_unpack##lo##_epi16(v0, v7)); \ f0 = _mm_sub_ps(f1, f0); \ f0 = _mm_sub_ps(f0, _mm_set1_ps(CENTERJSAMPLE)); \ _mm_storeu_ps(fbuf + y * n + x, f0); M1(lo, s0, s1, 0) M1(hi, s2, s3, 4) #undef M1 } #else for (y = 0; y < n; y++) for (x = 0; x < n; x++) { #define M1(i, x, y) t0 = a - image[(y) * stride + x]; \ t = range - fabsf(t0); t = t < 0 ? 0 : t; t *= t; aw = c##i * t; \ a0 += t0 * t * aw; an += aw * aw; int a = image[(y)*stride+(x)]; float a0 = 0, an = 0, aw, t, t0; M1(1, x-1, y-1) M1(0, x, y-1) M1(1, x+1, y-1) M1(0, x-1, y) M1(0, x+1, y) M1(1, x-1, y+1) M1(0, x, y+1) M1(1, x+1, y+1) #undef M1 if (an > 0.0f) a -= a0 / an; fbuf[y * n + x] = a - CENTERJSAMPLE; } #endif fdct_clamp(fbuf, coef, quantval); goto end; } #if 1 && defined(USE_NEON) #define VINITD uint8x8_t i0, i1, i2; #define VDIFF(i) vst1q_u16((uint16_t*)temp + (i) * n, vsubl_u8(i0, i1)); #define VLDPIX(j, p) i##j = vld1_u8(p); #define VRIGHT(a, b) i##a = vext_u8(i##b, i##b, 1); #define VCOPY(a, b) i##a = i##b; #define VINIT \ int16x8_t v0, v5; uint16x8_t v6 = vdupq_n_u16(range); \ float32x4_t f0, f1, s0 = vdupq_n_f32(0), s1 = s0, s2 = s0, s3 = s0; #define VCORE \ v0 = vld1q_s16(temp + y * n); \ v5 = vreinterpretq_s16_u16(vqsubq_u16(v6, \ vreinterpretq_u16_s16(vabsq_s16(v0)))); \ VCORE1(low, s0, s1, tab) VCORE1(high, s2, s3, tab + 4) #define VCORE1(low, s0, s1, tab) \ f0 = vcvtq_f32_s32(vmovl_s16(vget_##low##_s16(v5))); \ f0 = vmulq_f32(f0, f0); f1 = vmulq_f32(f0, vld1q_f32(tab + y * n)); \ f0 = vmulq_f32(f0, vcvtq_f32_s32(vmovl_s16(vget_##low##_s16(v0)))); \ s0 = vmlaq_f32(s0, f0, f1); s1 = vmlaq_f32(s1, f1, f1); #ifdef __aarch64__ #define VFIN \ a2 = vaddvq_f32(vaddq_f32(s0, s2)); \ a3 = vaddvq_f32(vaddq_f32(s1, s3)); #else #define VFIN { \ float32x4x2_t p0; float32x2_t v0; \ p0 = vzipq_f32(vaddq_f32(s0, s2), vaddq_f32(s1, s3)); \ f0 = vaddq_f32(p0.val[0], p0.val[1]); \ v0 = vadd_f32(vget_low_f32(f0), vget_high_f32(f0)); \ a2 = vget_lane_f32(v0, 0); a3 = vget_lane_f32(v0, 1); \ } #endif #elif 1 && defined(USE_AVX512) #define VINCR 2 #define VINIT \ __m256i v4, v5, v6 = _mm256_set1_epi16(range); \ __m512 f0, f1, f4, s0 = _mm512_setzero_ps(), s1 = s0; #define VCORE \ v4 = _mm256_loadu_si256((__m256i*)&temp[y * n]); \ f4 = _mm512_load_ps(tab + y * n); \ v5 = _mm256_subs_epu16(v6, _mm256_abs_epi16(v4)); \ f0 = _mm512_cvtepi32_ps(_mm512_cvtepi16_epi32(v5)); \ f0 = _mm512_mul_ps(f0, f0); f1 = _mm512_mul_ps(f0, f4); \ f0 = _mm512_mul_ps(f0, _mm512_cvtepi32_ps(_mm512_cvtepi16_epi32(v4))); \ s0 = _mm512_fmadd_ps(f0, f1, s0); s1 = _mm512_fmadd_ps(f1, f1, s1); // "reduce_add" is not faster here, because it's a macro, not a single instruction // a2 = _mm512_reduce_add_ps(s0); a3 = _mm512_reduce_add_ps(s1); #define VFIN { __m256 s2, s3, f2; \ f0 = _mm512_shuffle_f32x4(s0, s1, 0x44); \ f1 = _mm512_shuffle_f32x4(s0, s1, 0xee); \ f0 = _mm512_add_ps(f0, f1); s2 = _mm512_castps512_ps256(f0); \ s3 = _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(f0), 1)); \ f2 = _mm256_permute2f128_ps(s2, s3, 0x20); \ f2 = _mm256_add_ps(f2, _mm256_permute2f128_ps(s2, s3, 0x31)); \ f2 = _mm256_add_ps(f2, _mm256_shuffle_ps(f2, f2, 0xee)); \ f2 = _mm256_add_ps(f2, _mm256_shuffle_ps(f2, f2, 0x55)); \ a2 = _mm256_cvtss_f32(f2); \ a3 = _mm_cvtss_f32(_mm256_extractf128_ps(f2, 1)); } #elif 1 && defined(USE_AVX2) #define VINIT \ __m128i v4, v5, v6 = _mm_set1_epi16(range); \ __m256 f0, f1, f4, s0 = _mm256_setzero_ps(), s1 = s0; #define VCORE \ v4 = _mm_loadu_si128((__m128i*)&temp[y * n]); \ f4 = _mm256_load_ps(tab + y * n); \ v5 = _mm_subs_epu16(v6, _mm_abs_epi16(v4)); \ f0 = _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(v5)); \ f0 = _mm256_mul_ps(f0, f0); f1 = _mm256_mul_ps(f0, f4); \ f0 = _mm256_mul_ps(f0, _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(v4))); \ s0 = _mm256_fmadd_ps(f0, f1, s0); s1 = _mm256_fmadd_ps(f1, f1, s1); #define VFIN \ f0 = _mm256_permute2f128_ps(s0, s1, 0x20); \ f1 = _mm256_permute2f128_ps(s0, s1, 0x31); \ f0 = _mm256_add_ps(f0, f1); \ f0 = _mm256_add_ps(f0, _mm256_shuffle_ps(f0, f0, 0xee)); \ f0 = _mm256_add_ps(f0, _mm256_shuffle_ps(f0, f0, 0x55)); \ a2 = _mm256_cvtss_f32(f0); \ a3 = _mm_cvtss_f32(_mm256_extractf128_ps(f0, 1)); #elif 1 && defined(USE_SSE2) #define VINIT \ __m128i v3, v4, v5, v6 = _mm_set1_epi16(range), v7 = _mm_setzero_si128(); \ __m128 f0, f1, s0 = _mm_setzero_ps(), s1 = s0, s2 = s0, s3 = s0; #define VCORE \ v4 = _mm_loadu_si128((__m128i*)&temp[y * n]); \ v3 = _mm_srai_epi16(v4, 15); \ v5 = _mm_subs_epu16(v6, _mm_abs_epi16(v4)); \ VCORE1(lo, s0, s1, tab) VCORE1(hi, s2, s3, tab + 4) #define VCORE1(lo, s0, s1, tab) \ f0 = _mm_cvtepi32_ps(_mm_unpack##lo##_epi16(v5, v7)); \ f0 = _mm_mul_ps(f0, f0); \ f1 = _mm_mul_ps(f0, _mm_load_ps(tab + y * n)); \ f0 = _mm_mul_ps(f0, _mm_cvtepi32_ps(_mm_unpack##lo##_epi16(v4, v3))); \ f0 = _mm_mul_ps(f0, f1); f1 = _mm_mul_ps(f1, f1); \ s0 = _mm_add_ps(s0, f0); s1 = _mm_add_ps(s1, f1); #define VFIN \ f0 = _mm_add_ps(s0, s2); f1 = _mm_add_ps(s1, s3); \ f0 = _mm_add_ps(_mm_unpacklo_ps(f0, f1), _mm_unpackhi_ps(f0, f1)); \ f0 = _mm_add_ps(f0, _mm_shuffle_ps(f0, f0, 0xee)); \ a2 = _mm_cvtss_f32(f0); \ a3 = _mm_cvtss_f32(_mm_shuffle_ps(f0, f0, 0x55)); #elif !defined(NO_SIMD) // vector code simulation #define VINITD JSAMPLE *p0, *p1, *p2; #define VDIFF(i) for (x = 0; x < n; x++) temp[(i) * n + x] = p0[x] - p1[x]; #define VLDPIX(i, a) p##i = a; #define VRIGHT(a, b) p##a = p##b + 1; #define VCOPY(a, b) p##a = p##b; #define VINIT int j; float a0, a1, f0, sum[DCTSIZE * 2]; \ for (j = 0; j < n * 2; j++) sum[j] = 0; #define VCORE \ for (j = 0; j < n; j++) { \ a0 = temp[y * n + j]; a1 = tab[y * n + j]; \ f0 = (float)range - fabsf(a0); if (f0 < 0) f0 = 0; f0 *= f0; \ a0 *= f0; a1 *= f0; a0 *= a1; a1 *= a1; \ sum[j] += a0; sum[j + n] += a1; \ } #define VCORE1(sum) \ ((sum[0] + sum[4]) + (sum[1] + sum[5])) + \ ((sum[2] + sum[6]) + (sum[3] + sum[7])); #define VFIN a2 += VCORE1(sum) a3 += VCORE1((sum+8)) #endif for (y = 0; y < n; y++) { border[y + n * 2] = image[y - stride]; border[y + n * 3] = image[y + stride * n]; border[y + n * 4] = image[y * stride - 1]; border[y + n * 5] = image[y * stride + n]; } for (k = n * n - 1; k > 0; k--) { int i = jpegqs_natural_order[k]; float *tab = tables[i], a2 = 0, a3 = 0; int range = quantval[i] * 2; if (need_refresh && zigzag_refresh[i]) { idct_islow(coef, buf, n); need_refresh = 0; #ifdef VINIT for (y = 0; y < n; y++) { border[y] = buf[y * n]; border[y + n] = buf[y * n + n - 1]; } #ifndef VINITD // same for SSE2, AVX2, AVX512 #define VINITD __m128i v0, v1, v2; #define VDIFF(i) _mm_storeu_si128((__m128i*)&temp[(i) * n], _mm_sub_epi16(v0, v1)); #define VLDPIX(i, p) v##i = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)(p))); #define VRIGHT(a, b) v##a = _mm_bsrli_si128(v##b, 2); #define VCOPY(a, b) v##a = v##b; #endif { VINITD VLDPIX(0, buf) VLDPIX(1, border + n * 2) VDIFF(n) VRIGHT(1, 0) VDIFF(0) for (y = 1; y < n; y++) { VLDPIX(1, buf + y * n) VDIFF(y + n + 3) VCOPY(0, 1) VRIGHT(1, 0) VDIFF(y) } VLDPIX(1, border + n * 3) VDIFF(n + 1) VLDPIX(0, border) VLDPIX(1, border + n * 4) VDIFF(n + 2) VLDPIX(0, border + n) VLDPIX(1, border + n * 5) VDIFF(n + 3) if (flags & JPEGQS_DIAGONALS) { VLDPIX(0, buf) for (y = 0; y < n - 1; y++) { VLDPIX(2, buf + y * n + n) VRIGHT(1, 2) VDIFF(n * 2 + 4 + y * 2) VRIGHT(0, 0) VCOPY(1, 2) VDIFF(n * 2 + 4 + y * 2 + 1) VCOPY(0, 2) } } } #undef VINITD #undef VLDPIX #undef VRIGHT #undef VCOPY #undef VDIFF #endif } #ifdef VINIT #ifndef VINCR #define VINCR 1 #endif { int y0 = i & (n - 1) ? 0 : n; int y1 = (i >= n ? n - 1 : 0) + n + 4; VINIT for (y = y0; y < y1; y += VINCR) { VCORE } if (flags & JPEGQS_DIAGONALS) { y0 = n * 2 + 4; y1 = y0 + (n - 1) * 2; for (y = y0; y < y1; y += VINCR) { VCORE } } VFIN } #undef VINCR #undef VINIT #undef VCORE #ifdef VCORE1 #undef VCORE1 #endif #undef VFIN #else { int p; float a0, a1, t; #define CORE t = (float)range - fabsf(a0); \ t = t < 0 ? 0 : t; t *= t; a0 *= t; a1 *= t; a2 += a0 * a1; a3 += a1 * a1; #define M1(a, b) \ for (y = 0; y < n - 1 + a; y++) \ for (x = 0; x < n - 1 + b; x++) { p = y * n + x; \ a0 = buf[p] - buf[(y + b) * n + x + a]; a1 = tab[p]; CORE } #define M2(z, i) for (z = 0; z < n; z++) { p = y * n + x; \ a0 = buf[p] - border[i * n + z]; a1 = *tab++; CORE } if (i & (n - 1)) M1(1, 0) tab += n * n; y = 0; M2(x, 2) y = n - 1; M2(x, 3) x = 0; M2(y, 4) x = n - 1; M2(y, 5) if (i > (n - 1)) M1(0, 1) if (flags & JPEGQS_DIAGONALS) { tab += n * n; for (y = 0; y < n - 1; y++, tab += n * 2) for (x = 0; x < n - 1; x++) { p = y * n + x; a0 = buf[p] - buf[p + n + 1]; a1 = tab[x]; CORE a0 = buf[p + 1] - buf[p + n]; a1 = tab[x + n]; CORE } } #undef M2 #undef M1 #undef CORE } #endif a2 = a2 / a3; range = roundf(a2); if (range) { int div = quantval[i], coef1 = coef[i], add; int dh, dl, d0 = (div - 1) >> 1, d1 = div >> 1; int a0 = (coef1 + (coef1 < 0 ? -d1 : d1)) / div * div; dh = a0 + (a0 < 0 ? d1 : d0); dl = a0 - (a0 > 0 ? d1 : d0); add = coef1 - range; if (add > dh) add = dh; if (add < dl) add = dl; coef[i] = add; need_refresh |= add ^ coef1; } } end: if (flags & JPEGQS_NO_REBALANCE) return; if (!luma && flags & JPEGQS_NO_REBALANCE_UV) return; #if 1 && defined(USE_NEON) if (sizeof(quantval[0]) == 2 && sizeof(quantval[0]) == sizeof(coef[0])) { JCOEF orig[DCTSIZE2]; int coef0 = coef[0]; int32_t m0, m1; int32x4_t s0 = vdupq_n_s32(0), s1 = s0; coef[0] = 0; for (k = 0; k < DCTSIZE2; k += 8) { int16x8_t v0, v1, v2, v3; float32x4_t f0, f3, f4, f5; int32x4_t v4; v1 = vld1q_s16((int16_t*)&quantval[k]); v0 = vld1q_s16((int16_t*)&coef[k]); v3 = vshrq_n_s16(v0, 15); v2 = veorq_s16(vaddq_s16(vshrq_n_s16(v1, 1), v3), v3); v2 = vaddq_s16(v0, v2); f3 = vdupq_n_f32(0.5f); f5 = vnegq_f32(f3); #define M1(low, f0) \ v4 = vmovl_s16(vget_##low##_s16(v2)); \ f0 = vbslq_f32(vreinterpretq_u32_s32(vshrq_n_s32(v4, 31)), f5, f3); \ f4 = vcvtq_f32_s32(vmovl_s16(vget_##low##_s16(v1))); \ f0 = vdivq_f32(vaddq_f32(vcvtq_f32_s32(v4), f0), f4); M1(low, f0) M1(high, f3) #undef M1 v2 = vcombine_s16(vmovn_s32(vcvtq_s32_f32(f0)), vmovn_s32(vcvtq_s32_f32(f3))); v2 = vmulq_s16(v2, v1); vst1q_s16((int16_t*)&orig[k], v2); #define M1(low) \ s0 = vmlal_s16(s0, vget_##low##_s16(v0), vget_##low##_s16(v2)); \ s1 = vmlal_s16(s1, vget_##low##_s16(v2), vget_##low##_s16(v2)); M1(low) M1(high) #undef M1 } { #ifdef __aarch64__ m0 = vaddvq_s32(s0); m1 = vaddvq_s32(s1); #else int32x4x2_t v0 = vzipq_s32(s0, s1); int32x2_t v1; s0 = vaddq_s32(v0.val[0], v0.val[1]); v1 = vadd_s32(vget_low_s32(s0), vget_high_s32(s0)); m0 = vget_lane_s32(v1, 0); m1 = vget_lane_s32(v1, 1); #endif } if (m1 > m0) { int mul = (((int64_t)m1 << 13) + (m0 >> 1)) / m0; int16x8_t v4 = vdupq_n_s16(mul); for (k = 0; k < DCTSIZE2; k += 8) { int16x8_t v0, v1, v2, v3; v1 = vld1q_s16((int16_t*)&quantval[k]); v2 = vld1q_s16((int16_t*)&coef[k]); v2 = vqrdmulhq_s16(vshlq_n_s16(v2, 2), v4); v0 = vld1q_s16((int16_t*)&orig[k]); v3 = vaddq_s16(v1, vreinterpretq_s16_u16(vcgeq_s16(v0, vdupq_n_s16(0)))); v2 = vminq_s16(v2, vaddq_s16(v0, vshrq_n_s16(v3, 1))); v3 = vaddq_s16(v1, vreinterpretq_s16_u16(vcleq_s16(v0, vdupq_n_s16(0)))); v2 = vmaxq_s16(v2, vsubq_s16(v0, vshrq_n_s16(v3, 1))); vst1q_s16((int16_t*)&coef[k], v2); } } coef[0] = coef0; } else #elif 1 && defined(USE_AVX2) if (sizeof(quantval[0]) == 2 && sizeof(quantval[0]) == sizeof(coef[0])) { JCOEF orig[DCTSIZE2]; int coef0 = coef[0]; int32_t m0, m1; __m128i s0 = _mm_setzero_si128(), s1 = s0; coef[0] = 0; for (k = 0; k < DCTSIZE2; k += 8) { __m128i v0, v1, v2, v3; __m256i v4; __m256 f0; v1 = _mm_loadu_si128((__m128i*)&quantval[k]); v0 = _mm_loadu_si128((__m128i*)&coef[k]); v2 = _mm_srli_epi16(v1, 1); v3 = _mm_srai_epi16(v0, 15); v2 = _mm_xor_si128(_mm_add_epi16(v2, v3), v3); v2 = _mm_add_epi16(v0, v2); f0 = _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(v2)); f0 = _mm256_div_ps(f0, _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(v1))); v4 = _mm256_cvttps_epi32(f0); v2 = _mm_packs_epi32(_mm256_castsi256_si128(v4), _mm256_extractf128_si256(v4, 1)); v2 = _mm_mullo_epi16(v2, v1); _mm_storeu_si128((__m128i*)&orig[k], v2); s0 = _mm_add_epi32(s0, _mm_madd_epi16(v0, v2)); s1 = _mm_add_epi32(s1, _mm_madd_epi16(v2, v2)); } s0 = _mm_hadd_epi32(s0, s1); s0 = _mm_hadd_epi32(s0, s0); m0 = _mm_cvtsi128_si32(s0); m1 = _mm_extract_epi32(s0, 1); if (m1 > m0) { int mul = (((int64_t)m1 << 13) + (m0 >> 1)) / m0; __m256i v4 = _mm256_set1_epi16(mul); for (k = 0; k < DCTSIZE2; k += 16) { __m256i v0, v1, v2, v3; v1 = _mm256_loadu_si256((__m256i*)&quantval[k]); v2 = _mm256_loadu_si256((__m256i*)&coef[k]); v2 = _mm256_mulhrs_epi16(_mm256_slli_epi16(v2, 2), v4); v0 = _mm256_loadu_si256((__m256i*)&orig[k]); v1 = _mm256_add_epi16(v1, _mm256_set1_epi16(-1)); v3 = _mm256_sub_epi16(v1, _mm256_srai_epi16(v0, 15)); v2 = _mm256_min_epi16(v2, _mm256_add_epi16(v0, _mm256_srai_epi16(v3, 1))); v3 = _mm256_sub_epi16(v1, _mm256_cmpgt_epi16(v0, _mm256_setzero_si256())); v2 = _mm256_max_epi16(v2, _mm256_sub_epi16(v0, _mm256_srai_epi16(v3, 1))); _mm256_storeu_si256((__m256i*)&coef[k], v2); } } coef[0] = coef0; } else #elif 1 && defined(USE_SSE2) if (sizeof(quantval[0]) == 2 && sizeof(quantval[0]) == sizeof(coef[0])) { JCOEF orig[DCTSIZE2]; int coef0 = coef[0]; int32_t m0, m1; __m128i s0 = _mm_setzero_si128(), s1 = s0; coef[0] = 0; for (k = 0; k < DCTSIZE2; k += 8) { __m128i v0, v1, v2, v3, v7; __m128 f0, f2, f4; v1 = _mm_loadu_si128((__m128i*)&quantval[k]); v0 = _mm_loadu_si128((__m128i*)&coef[k]); v2 = _mm_srli_epi16(v1, 1); v3 = _mm_srai_epi16(v0, 15); v2 = _mm_xor_si128(_mm_add_epi16(v2, v3), v3); v2 = _mm_add_epi16(v0, v2); v7 = _mm_setzero_si128(); v3 = _mm_srai_epi16(v2, 15); #define M1(lo, f0) \ f4 = _mm_cvtepi32_ps(_mm_unpack##lo##_epi16(v1, v7)); \ f0 = _mm_cvtepi32_ps(_mm_unpack##lo##_epi16(v2, v3)); \ f0 = _mm_div_ps(f0, f4); M1(lo, f0) M1(hi, f2) #undef M1 v2 = _mm_packs_epi32(_mm_cvttps_epi32(f0), _mm_cvttps_epi32(f2)); v2 = _mm_mullo_epi16(v2, v1); _mm_storeu_si128((__m128i*)&orig[k], v2); s0 = _mm_add_epi32(s0, _mm_madd_epi16(v0, v2)); s1 = _mm_add_epi32(s1, _mm_madd_epi16(v2, v2)); } #ifdef USE_SSE4 s0 = _mm_hadd_epi32(s0, s1); s0 = _mm_hadd_epi32(s0, s0); m0 = _mm_cvtsi128_si32(s0); m1 = _mm_extract_epi32(s0, 1); #else s0 = _mm_add_epi32(_mm_unpacklo_epi32(s0, s1), _mm_unpackhi_epi32(s0, s1)); s0 = _mm_add_epi32(s0, _mm_bsrli_si128(s0, 8)); m0 = _mm_cvtsi128_si32(s0); m1 = _mm_cvtsi128_si32(_mm_bsrli_si128(s0, 4)); #endif if (m1 > m0) { int mul = (((int64_t)m1 << 13) + (m0 >> 1)) / m0; __m128i v4 = _mm_set1_epi16(mul); for (k = 0; k < DCTSIZE2; k += 8) { __m128i v0, v1, v2, v3 = _mm_set1_epi16(-1); v1 = _mm_loadu_si128((__m128i*)&quantval[k]); v2 = _mm_loadu_si128((__m128i*)&coef[k]); #ifdef USE_SSE4 v2 = _mm_mulhrs_epi16(_mm_slli_epi16(v2, 2), v4); #else v2 = _mm_mulhi_epi16(_mm_slli_epi16(v2, 4), v4); v2 = _mm_srai_epi16(_mm_sub_epi16(v2, v3), 1); #endif v0 = _mm_loadu_si128((__m128i*)&orig[k]); v1 = _mm_add_epi16(v1, v3); v3 = _mm_sub_epi16(v1, _mm_srai_epi16(v0, 15)); v2 = _mm_min_epi16(v2, _mm_add_epi16(v0, _mm_srai_epi16(v3, 1))); v3 = _mm_sub_epi16(v1, _mm_cmpgt_epi16(v0, _mm_setzero_si128())); v2 = _mm_max_epi16(v2, _mm_sub_epi16(v0, _mm_srai_epi16(v3, 1))); _mm_storeu_si128((__m128i*)&coef[k], v2); } } coef[0] = coef0; } else #endif { JCOEF orig[DCTSIZE2]; int64_t m0 = 0, m1 = 0; for (k = 1; k < DCTSIZE2; k++) { int div = quantval[k], coef1 = coef[k], d1 = div >> 1; int a0 = (coef1 + (coef1 < 0 ? -d1 : d1)) / div * div; orig[k] = a0; m0 += coef1 * a0; m1 += a0 * a0; } if (m1 > m0) { int mul = ((m1 << 13) + (m0 >> 1)) / m0; for (k = 1; k < DCTSIZE2; k++) { int div = quantval[k], coef1 = coef[k], add; int dh, dl, d0 = (div - 1) >> 1, d1 = div >> 1; int a0 = orig[k]; dh = a0 + (a0 < 0 ? d1 : d0); dl = a0 - (a0 > 0 ? d1 : d0); add = (coef1 * mul + 0x1000) >> 13; if (add > dh) add = dh; if (add < dl) add = dl; coef[k] = add; } } } }
0
[]
jpeg-quantsmooth
3ab3838e610d361b71d937738edf156505c59c58
206,505,863,500,643,800,000,000,000,000,000,000,000
823
avoid divide-by-zero on damaged JPEG files Also fixed misleading indentation warnings from GCC.
Status ComputeConv2DDimension(const Conv2DParameters& params, const Tensor& input, const Tensor& filter, Conv2DDimensions* dimensions) { // Check that 2D convolution input and filter have exactly 4 dimensions. TF_REQUIRES(input.dims() == 4, errors::InvalidArgument("input must be 4-dimensional", input.shape().DebugString())); TF_REQUIRES(filter.dims() == 4, errors::InvalidArgument("filter must be 4-dimensional: ", filter.shape().DebugString())); for (int i = 0; i < 3; i++) { TF_REQUIRES( FastBoundsCheck(filter.dim_size(i), std::numeric_limits<int>::max()), errors::InvalidArgument("filter too large")); } // The last dimension for input is in_depth. Check that it is the same as the // filter's in_depth or it is evenly divisible by filter's in_depth. const int64 in_depth_raw = GetTensorDim(input, params.data_format, 'C'); const int64 patch_depth_raw = filter.dim_size(2); TF_REQUIRES(FastBoundsCheck(in_depth_raw, std::numeric_limits<int>::max()), errors::InvalidArgument("Input depth too large")); TF_REQUIRES(FastBoundsCheck(patch_depth_raw, std::numeric_limits<int>::max()), errors::InvalidArgument("Patch depth too large")); const int in_depth = static_cast<int>(in_depth_raw); const int patch_depth = static_cast<int>(patch_depth_raw); TF_REQUIRES(patch_depth > 0, errors::InvalidArgument( "filter depth must be stricly positive, got ", patch_depth)); TF_REQUIRES(in_depth % patch_depth == 0, errors::InvalidArgument( "input depth must be evenly divisible by filter depth: ", in_depth, " vs ", patch_depth)); // The last dimension for filter is out_depth. const int out_depth = static_cast<int>(filter.dim_size(3)); // The second dimension for input is rows/height. // The first dimension for filter is rows/height. const int64 input_rows_raw = GetTensorDim(input, params.data_format, 'H'); TF_REQUIRES(FastBoundsCheck(input_rows_raw, std::numeric_limits<int>::max()), errors::InvalidArgument("Input rows too large")); const int input_rows = static_cast<int>(input_rows_raw); const int filter_rows = static_cast<int>(filter.dim_size(0)); // The third dimension for input is columns/width. // The second dimension for filter is columns/width. const int64 input_cols_raw = GetTensorDim(input, params.data_format, 'W'); TF_REQUIRES(FastBoundsCheck(input_cols_raw, std::numeric_limits<int>::max()), errors::InvalidArgument("Input cols too large")); const int input_cols = static_cast<int>(input_cols_raw); const int filter_cols = static_cast<int>(filter.dim_size(1)); // The first dimension for input is batch. const int64 batch_raw = GetTensorDim(input, params.data_format, 'N'); TF_REQUIRES(FastBoundsCheck(batch_raw, std::numeric_limits<int>::max()), errors::InvalidArgument("batch is too large")); const int batch = static_cast<int>(batch_raw); // Take the stride and dilation from the second and third dimensions only (we // do not support striding or dilation on the batch or depth dimension). const int stride_rows = GetTensorDim(params.strides, params.data_format, 'H'); const int stride_cols = GetTensorDim(params.strides, params.data_format, 'W'); const int dilation_rows = GetTensorDim(params.dilations, params.data_format, 'H'); const int dilation_cols = GetTensorDim(params.dilations, params.data_format, 'W'); int64 pad_rows_before, pad_rows_after, pad_cols_before, pad_cols_after; if (params.padding == Padding::EXPLICIT) { GetExplicitPaddingForDim(params.explicit_paddings, params.data_format, 'H', &pad_rows_before, &pad_rows_after); GetExplicitPaddingForDim(params.explicit_paddings, params.data_format, 'W', &pad_cols_before, &pad_cols_after); } // Compute windowed output sizes for rows and columns. int64 out_rows = 0, out_cols = 0; TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerboseV2( input_rows, filter_rows, dilation_rows, stride_rows, params.padding, &out_rows, &pad_rows_before, &pad_rows_after)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerboseV2( input_cols, filter_cols, dilation_cols, stride_cols, params.padding, &out_cols, &pad_cols_before, &pad_cols_after)); dimensions->batch = batch; dimensions->input_rows = input_rows; dimensions->input_cols = input_cols; dimensions->in_depth = in_depth; dimensions->filter_rows = filter_rows; dimensions->filter_cols = filter_cols; dimensions->patch_depth = patch_depth; dimensions->out_depth = out_depth; dimensions->stride_rows = stride_rows; dimensions->stride_cols = stride_cols; dimensions->dilation_rows = dilation_rows; dimensions->dilation_cols = dilation_cols; dimensions->out_rows = out_rows; dimensions->out_cols = out_cols; dimensions->pad_rows_before = pad_rows_before; dimensions->pad_rows_after = pad_rows_after; dimensions->pad_cols_before = pad_cols_before; dimensions->pad_cols_after = pad_cols_after; return Status::OK(); }
0
[ "CWE-369" ]
tensorflow
b12aa1d44352de21d1a6faaf04172d8c2508b42b
161,405,276,803,893,140,000,000,000,000,000,000,000
106
Fix one more FPE. PiperOrigin-RevId: 369346568 Change-Id: I840fd575962adc879713a4c9cc59e6da3331caa7
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file) { loff_t loopsize; /* Compute loopsize in bytes */ loopsize = i_size_read(file->f_mapping->host); if (offset > 0) loopsize -= offset; /* offset is beyond i_size, weird but possible */ if (loopsize < 0) return 0; if (sizelimit > 0 && sizelimit < loopsize) loopsize = sizelimit; /* * Unfortunately, if we want to do I/O on the device, * the number of 512-byte sectors has to fit into a sector_t. */ return loopsize >> 9; }
0
[ "CWE-416", "CWE-362" ]
linux
ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5
10,107,777,578,245,203,000,000,000,000,000,000,000
20
loop: fix concurrent lo_open/lo_release 范龙飞 reports that KASAN can report a use-after-free in __lock_acquire. The reason is due to insufficient serialization in lo_release(), which will continue to use the loop device even after it has decremented the lo_refcnt to zero. In the meantime, another process can come in, open the loop device again as it is being shut down. Confusion ensues. Reported-by: 范龙飞 <[email protected]> Signed-off-by: Linus Torvalds <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
diff_fold_update(diff_T *dp, int skip_idx) { int i; win_T *wp; FOR_ALL_WINDOWS(wp) for (i = 0; i < DB_COUNT; ++i) if (curtab->tp_diffbuf[i] == wp->w_buffer && i != skip_idx) foldUpdate(wp, dp->df_lnum[i], dp->df_lnum[i] + dp->df_count[i]); }
0
[ "CWE-787" ]
vim
c101abff4c6756db4f5e740fde289decb9452efa
324,795,390,470,704,650,000,000,000,000,000,000,000
11
patch 8.2.5164: invalid memory access after diff buffer manipulations Problem: Invalid memory access after diff buffer manipulations. Solution: Use zero offset when change removes all lines in a diff block.
textw_text_release(gs_text_enum_t *pte, client_name_t cname) { textw_text_enum_t *const penum = (textw_text_enum_t *)pte; gx_device_txtwrite_t *const tdev = (gx_device_txtwrite_t *) pte->dev; /* Free the working buffer where the Unicode was assembled from the enumerated text */ if (penum->TextBuffer) gs_free(tdev->memory, penum->TextBuffer, 1, penum->TextBufferIndex, "txtwrite free temporary text buffer"); if (penum->Widths) gs_free(tdev->memory, penum->Widths, sizeof(float), pte->text.size, "txtwrite free temporary widths array"); /* If this is copied away when we complete the text enumeration succesfully, then * we set the pointer to NULL, if we get here with it non-NULL , then there was * an error. */ if (penum->text_state) gs_free(tdev->memory, penum->text_state, 1, sizeof(penum->text_state), "txtwrite free text state"); gs_text_release(pte, cname); }
0
[ "CWE-476" ]
ghostpdl
407c98a38c3a6ac1681144ed45cc2f4fc374c91f
24,869,455,362,180,790,000,000,000,000,000,000,000
19
txtwrite - guard against using GS_NO_GLYPH to retrieve Unicode values Bug 701822 "Segmentation fault at psi/iname.c:296 in names_index_ref" Avoid using a glyph with the value GS_NO_GLYPH to retrieve a glyph name or Unicode code point from the glyph ID, as this is not a valid ID.
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) { struct rb_node *pos; int status = 0; restart: spin_lock(&clp->cl_lock); for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) continue; atomic_inc(&sp->so_count); spin_unlock(&clp->cl_lock); status = nfs4_reclaim_open_state(sp, ops); if (status < 0) { set_bit(ops->owner_flag_bit, &sp->so_flags); nfs4_put_state_owner(sp); nfs4_recovery_handle_error(clp, status); return status; } nfs4_put_state_owner(sp); goto restart; } spin_unlock(&clp->cl_lock); return status; }
0
[ "CWE-703" ]
linux
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
127,303,402,935,524,220,000,000,000,000,000,000,000
26
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]>
trace_ok_for_array(struct tracer *t, struct trace_array *tr) { return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; }
0
[ "CWE-415" ]
linux
4397f04575c44e1440ec2e49b6302785c95fd2f8
117,589,548,596,063,600,000,000,000,000,000,000,000
4
tracing: Fix possible double free on failure of allocating trace buffer Jing Xia and Chunyan Zhang reported that on failing to allocate part of the tracing buffer, memory is freed, but the pointers that point to them are not initialized back to NULL, and later paths may try to free the freed memory again. Jing and Chunyan fixed one of the locations that does this, but missed a spot. Link: http://lkml.kernel.org/r/[email protected] Cc: [email protected] Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code") Reported-by: Jing Xia <[email protected]> Reported-by: Chunyan Zhang <[email protected]> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, int work_to_do) { RING_IDX cons = vif->tx.req_cons; int frags = 0; if (!(first->flags & XEN_NETTXF_more_data)) return 0; do { if (frags >= work_to_do) { netdev_err(vif->dev, "Need more frags\n"); netbk_fatal_tx_err(vif); return -frags; } if (unlikely(frags >= MAX_SKB_FRAGS)) { netdev_err(vif->dev, "Too many frags\n"); netbk_fatal_tx_err(vif); return -frags; } memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), sizeof(*txp)); if (txp->size > first->size) { netdev_err(vif->dev, "Frag is bigger than frame.\n"); netbk_fatal_tx_err(vif); return -frags; } first->size -= txp->size; frags++; if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { netdev_err(vif->dev, "txp->offset: %x, size: %u\n", txp->offset, txp->size); netbk_fatal_tx_err(vif); return -frags; } } while ((txp++)->flags & XEN_NETTXF_more_data); return frags; }
0
[ "CWE-399" ]
linux
7d5145d8eb2b9791533ffe4dc003b129b9696c48
308,599,153,963,290,140,000,000,000,000,000,000,000
44
xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop. Signed-off-by: Matthew Daley <[email protected]> Reviewed-by: Konrad Rzeszutek Wilk <[email protected]> Acked-by: Ian Campbell <[email protected]> Acked-by: Jan Beulich <[email protected]> Signed-off-by: David S. Miller <[email protected]>
int dns_server_ifindex(const DnsServer *s) { assert(s); /* The link ifindex always takes precedence */ if (s->link) return s->link->ifindex; if (s->ifindex > 0) return s->ifindex; return 0; }
0
[ "CWE-416" ]
systemd
904dcaf9d4933499f8334859f52ea8497f2d24ff
149,373,525,069,830,300,000,000,000,000,000,000,000
12
resolved: take particular care when detaching DnsServer from its default stream DnsStream and DnsServer have a symbiotic relationship: one DnsStream is the current "default" stream of the server (and thus reffed by it), but each stream also refs the server it is connected to. This cyclic dependency can result in weird situations: when one is destroyed/unlinked/stopped it needs to unregister itself from the other, but doing this will trigger unregistration of the other. Hence, let's make sure we unregister the stream from the server before destroying it, to break this cycle. Most likely fixes: #10725
static void _invoke_callback(pmixp_coll_ring_ctx_t *coll_ctx) { pmixp_coll_ring_cbdata_t *cbdata; char *data; size_t data_sz; pmixp_coll_t *coll = _ctx_get_coll(coll_ctx); if (!coll->cbfunc) return; data = get_buf_data(coll_ctx->ring_buf); data_sz = get_buf_offset(coll_ctx->ring_buf); cbdata = xmalloc(sizeof(pmixp_coll_ring_cbdata_t)); cbdata->coll = coll; cbdata->coll_ctx = coll_ctx; cbdata->buf = coll_ctx->ring_buf; cbdata->seq = coll_ctx->seq; pmixp_lib_modex_invoke(coll->cbfunc, SLURM_SUCCESS, data, data_sz, coll->cbdata, _libpmix_cb, (void *)cbdata); /* * Clear callback info as we are not allowed to use it second time */ coll->cbfunc = NULL; coll->cbdata = NULL; }
0
[ "CWE-120" ]
slurm
c3142dd87e06621ff148791c3d2f298b5c0b3a81
139,884,576,600,355,390,000,000,000,000,000,000,000
27
PMIx - fix potential buffer overflows from use of unpackmem(). CVE-2020-27745.
static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) { struct net_device *ndev = qdev->ndev; int retval = 0; netif_stop_queue(ndev); netif_carrier_off(ndev); clear_bit(QL_ADAPTER_UP, &qdev->flags); clear_bit(QL_LINK_MASTER, &qdev->flags); ql_disable_interrupts(qdev); free_irq(qdev->pdev->irq, ndev); if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); clear_bit(QL_MSI_ENABLED, &qdev->flags); pci_disable_msi(qdev->pdev); } del_timer_sync(&qdev->adapter_timer); napi_disable(&qdev->napi); if (do_reset) { int soft_reset; unsigned long hw_flags; spin_lock_irqsave(&qdev->hw_lock, hw_flags); if (ql_wait_for_drvr_lock(qdev)) { soft_reset = ql_adapter_reset(qdev); if (soft_reset) { netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", qdev->index); } netdev_err(ndev, "Releasing driver lock via chip reset\n"); } else { netdev_err(ndev, "Could not acquire driver lock to do reset!\n"); retval = -1; } spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); } ql_free_mem_resources(qdev); return retval;
0
[ "CWE-401" ]
linux
1acb8f2a7a9f10543868ddd737e37424d5c36cf4
195,331,966,282,106,180,000,000,000,000,000,000,000
48
net: qlogic: Fix memory leak in ql_alloc_large_buffers In ql_alloc_large_buffers, a new skb is allocated via netdev_alloc_skb. This skb should be released if pci_dma_mapping_error fails. Fixes: 0f8ab89e825f ("qla3xxx: Check return code from pci_map_single() in ql_release_to_lrg_buf_free_list(), ql_populate_free_queue(), ql_alloc_large_buffers(), and ql3xxx_send()") Signed-off-by: Navid Emamdoost <[email protected]> Signed-off-by: David S. Miller <[email protected]>
MagickExport ExceptionInfo *CloneExceptionInfo(ExceptionInfo *exception) { ExceptionInfo *clone_exception; clone_exception=(ExceptionInfo *) AcquireMagickMemory(sizeof(*exception)); if (clone_exception == (ExceptionInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); InitializeExceptionInfo(clone_exception); InheritException(clone_exception,exception); clone_exception->relinquish=MagickTrue; return(clone_exception); }
0
[ "CWE-120", "CWE-787" ]
ImageMagick
e45e48b881038487d0bc94d92a16c1537616cc0a
20,408,637,291,731,429,000,000,000,000,000,000,000
13
Suspend exception processing if too many exceptions
entering_window(win_T *win) { // Only matters for a prompt window. if (!bt_prompt(win->w_buffer)) return; // When switching to a prompt buffer that was in Insert mode, don't stop // Insert mode, it may have been set in leaving_window(). if (win->w_buffer->b_prompt_insert != NUL) stop_insert_mode = FALSE; // When entering the prompt window restart Insert mode if we were in Insert // mode when we left it. restart_edit = win->w_buffer->b_prompt_insert; }
0
[ "CWE-416" ]
vim
ec66c41d84e574baf8009dbc0bd088d2bc5b2421
156,364,539,151,514,690,000,000,000,000,000,000,000
15
patch 8.1.2136: using freed memory with autocmd from fuzzer Problem: using freed memory with autocmd from fuzzer. (Dhiraj Mishra, Dominique Pelle) Solution: Avoid using "wp" after autocommands. (closes #5041)
static av_always_inline void paint_raw(uint8_t *dst, int w, int h, GetByteContext *gb, int bpp, int be, int stride) { int i, j, p; for (j = 0; j < h; j++) { for (i = 0; i < w; i++) { p = vmnc_get_pixel(gb, bpp, be); switch (bpp) { case 1: dst[i] = p; break; case 2: ((uint16_t*)dst)[i] = p; break; case 4: ((uint32_t*)dst)[i] = p; break; } } dst += stride; } }
0
[ "CWE-703" ]
FFmpeg
6ba02602aa7fc7d38db582e75b8b093fb3c1608d
1,918,198,033,236,253,000,000,000,000,000,000,000
23
avcodec/vmnc: Check that rectangles are within the picture Prevents out of array accesses with CODEC_FLAG_EMU_EDGE Signed-off-by: Michael Niedermayer <[email protected]>
_binder_inner_proc_unlock(struct binder_proc *proc, int line) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_unlock(&proc->inner_lock); }
0
[ "CWE-416" ]
linux
7bada55ab50697861eee6bb7d60b41e68a961a9c
326,170,400,138,977,700,000,000,000,000,000,000,000
6
binder: fix race that allows malicious free of live buffer Malicious code can attempt to free buffers using the BC_FREE_BUFFER ioctl to binder. There are protections against a user freeing a buffer while in use by the kernel, however there was a window where BC_FREE_BUFFER could be used to free a recently allocated buffer that was not completely initialized. This resulted in a use-after-free detected by KASAN with a malicious test program. This window is closed by setting the buffer's allow_user_free attribute to 0 when the buffer is allocated or when the user has previously freed it instead of waiting for the caller to set it. The problem was that when the struct buffer was recycled, allow_user_free was stale and set to 1 allowing a free to go through. Signed-off-by: Todd Kjos <[email protected]> Acked-by: Arve Hjønnevåg <[email protected]> Cc: stable <[email protected]> # 4.14 Signed-off-by: Greg Kroah-Hartman <[email protected]>
file_is_read_only (struct rw *rw) { /* Permissions are hard, and this is only used as an early check * before the copy. Proceed with the copy and fail if it fails. */ return false; }
0
[ "CWE-252" ]
libnbd
8d444b41d09a700c7ee6f9182a649f3f2d325abb
67,114,405,887,146,770,000,000,000,000,000,000,000
7
copy: CVE-2022-0485: Fail nbdcopy if NBD read or write fails nbdcopy has a nasty bug when performing multi-threaded copies using asynchronous nbd calls - it was blindly treating the completion of an asynchronous command as successful, rather than checking the *error parameter. This can result in the silent creation of a corrupted image in two different ways: when a read fails, we blindly wrote garbage to the destination; when a write fails, we did not flag that the destination was not written. Since nbdcopy already calls exit() on a synchronous read or write failure to a file, doing the same for an asynchronous op to an NBD server is the simplest solution. A nicer solution, but more invasive to code and thus not done here, might be to allow up to N retries of the transaction (in case the read or write failure was transient), or even having a mode where as much data is copied as possible (portions of the copy that failed would be logged on stderr, and nbdcopy would still fail with a non-zero exit status, but this would copy more than just stopping at the first error, as can be done with rsync or ddrescue). Note that since we rely on auto-retiring and do NOT call nbd_aio_command_completed, our completion callbacks must always return 1 (if they do not exit() first), even when acting on *error, so as not leave the command allocated until nbd_close. As such, there is no sane way to return an error to a manual caller of the callback, and therefore we can drop dead code that calls perror() and exit() if the callback "failed". It is also worth documenting the contract on when we must manually call the callback during the asynch_zero callback, so that we do not leak or double-free the command; thankfully, all the existing code paths were correct. The added testsuite script demonstrates several scenarios, some of which fail without the rest of this patch in place, and others which showcase ways in which sparse images can bypass errors. Once backports are complete, a followup patch on the main branch will edit docs/libnbd-security.pod with the mailing list announcement of the stable branch commit ids and release versions that incorporate this fix. Reported-by: Nir Soffer <[email protected]> Fixes: bc896eec4d ("copy: Implement multi-conn, multiple threads, multiple requests in flight.", v1.5.6) Fixes: https://bugzilla.redhat.com/2046194 Message-Id: <[email protected]> Acked-by: Richard W.M. Jones <[email protected]> Acked-by: Nir Soffer <[email protected]> [eblake: fix error message per Nir, tweak requires lines in unit test per Rich] Reviewed-by: Laszlo Ersek <[email protected]>
static double mp_sort(_cimg_math_parser& mp) { double *const ptrd = &_mp_arg(1) + 1; const double *const ptrs = &_mp_arg(2) + 1; const bool is_increasing = (bool)_mp_arg(4); const unsigned int siz = (unsigned int)mp.opcode[3], nb_elts = mp.opcode[5]==~0U?siz:(unsigned int)_mp_arg(5), siz_elt = (unsigned int)_mp_arg(6); const ulongT sn = siz_elt*nb_elts; if (sn>siz || siz_elt<1) throw CImgArgumentException("[" cimg_appname "_math_parser] CImg<%s>: Function 'sort()': " "Arguments 'nb_elts=%g' and 'siz_elt=%g' are invalid " "for sorting a vector of size %u.", mp.imgin.pixel_type(),_mp_arg(5),_mp_arg(6),siz); CImg<doubleT>(ptrd,siz_elt,nb_elts,1,1,true) = CImg<doubleT>(ptrs,siz_elt,nb_elts,1,1,true). get_sort(is_increasing,siz_elt>1?'y':0); if (sn<siz) CImg<doubleT>(ptrd + sn,siz - sn,1,1,1,true) = CImg<doubleT>(ptrs + sn,siz - sn,1,1,1,true); return cimg::type<double>::nan(); }
0
[ "CWE-770" ]
cimg
619cb58dd90b4e03ac68286c70ed98acbefd1c90
130,993,761,580,095,480,000,000,000,000,000,000,000
19
CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size.
RZ_API ut64 rz_dyldcache_get_slide(RzDyldCache *cache) { rz_return_val_if_fail(cache, 0); if (!cache->rebase_infos || !cache->rebase_infos->length) { return 0; } size_t i; for (i = 0; i < cache->rebase_infos->length; i++) { if (cache->rebase_infos->entries[i].info) { return cache->rebase_infos->entries[i].info->slide; } } return 0; }
0
[ "CWE-787" ]
rizin
556ca2f9eef01ec0f4a76d1fbacfcf3a87a44810
115,080,141,797,487,360,000,000,000,000,000,000,000
15
Fix oob write in dyldcache When the individual n_slide_infos were too high, the sum would overflow and too few entries would be allocated.
u32 gf_m4a_get_channel_cfg(u32 nb_chan) { u32 i, count = sizeof(GF_M4ANumChannels) / sizeof(u32); for (i = 0; i < count; i++) { if (GF_M4ANumChannels[i] == nb_chan) return i + 1; } return 0; }
0
[ "CWE-190", "CWE-787" ]
gpac
51cdb67ff7c5f1242ac58c5aa603ceaf1793b788
197,190,371,119,679,000,000,000,000,000,000,000,000
8
add safety in avc/hevc/vvc sps/pps/vps ID check - cf #1720 #1721 #1722
MagickExport MagickBooleanType CloseBlob(Image *image) { BlobInfo *magick_restrict blob_info; int status; /* Close image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); blob_info=image->blob; if ((blob_info == (BlobInfo *) NULL) || (blob_info->type == UndefinedStream)) return(MagickTrue); status=SyncBlob(image); switch (blob_info->type) { case UndefinedStream: case StandardStream: break; case FileStream: case PipeStream: { if (blob_info->synchronize != MagickFalse) status=fsync(fileno(blob_info->file_info.file)); status=ferror(blob_info->file_info.file); break; } case ZipStream: { #if defined(MAGICKCORE_ZLIB_DELEGATE) (void) gzerror(blob_info->file_info.gzfile,&status); #endif break; } case BZipStream: { #if defined(MAGICKCORE_BZLIB_DELEGATE) (void) BZ2_bzerror(blob_info->file_info.bzfile,&status); #endif break; } case FifoStream: break; case BlobStream: { if (blob_info->file_info.file != (FILE *) NULL) { if (blob_info->synchronize != MagickFalse) status=fsync(fileno(blob_info->file_info.file)); status=ferror(blob_info->file_info.file); } break; } case CustomStream: break; } blob_info->status=status < 0 ? MagickTrue : MagickFalse; blob_info->size=GetBlobSize(image); image->extent=blob_info->size; blob_info->eof=MagickFalse; blob_info->error=0; blob_info->mode=UndefinedBlobMode; if (blob_info->exempt != MagickFalse) { blob_info->type=UndefinedStream; return(blob_info->status); } switch (blob_info->type) { case UndefinedStream: case StandardStream: break; case FileStream: { if (fileno(blob_info->file_info.file) != -1) status=fclose(blob_info->file_info.file); break; } case PipeStream: { #if defined(MAGICKCORE_HAVE_PCLOSE) status=pclose(blob_info->file_info.file); #endif break; } case ZipStream: { #if defined(MAGICKCORE_ZLIB_DELEGATE) status=gzclose(blob_info->file_info.gzfile); #endif break; } case BZipStream: { #if defined(MAGICKCORE_BZLIB_DELEGATE) BZ2_bzclose(blob_info->file_info.bzfile); #endif break; } case FifoStream: break; case BlobStream: { if (blob_info->file_info.file != (FILE *) NULL) status=fclose(blob_info->file_info.file); break; } case CustomStream: break; } (void) DetachBlob(blob_info); blob_info->status=status < 0 ? MagickTrue : MagickFalse; return(blob_info->status); }
0
[ "CWE-416", "CWE-399" ]
ImageMagick
c5d012a46ae22be9444326aa37969a3f75daa3ba
228,407,969,497,317,680,000,000,000,000,000,000,000
119
https://github.com/ImageMagick/ImageMagick6/issues/43
static int usbhid_restart_out_queue(struct usbhid_device *usbhid) { struct hid_device *hid = usb_get_intfdata(usbhid->intf); int kicked; int r; if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) || test_bit(HID_SUSPENDED, &usbhid->iofl)) return 0; if ((kicked = (usbhid->outhead != usbhid->outtail))) { hid_dbg(hid, "Kicking head %d tail %d", usbhid->outhead, usbhid->outtail); /* Try to wake up from autosuspend... */ r = usb_autopm_get_interface_async(usbhid->intf); if (r < 0) return r; /* * If still suspended, don't submit. Submission will * occur if/when resume drains the queue. */ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) { usb_autopm_put_interface_no_suspend(usbhid->intf); return r; } /* Asynchronously flush queue. */ set_bit(HID_OUT_RUNNING, &usbhid->iofl); if (hid_submit_out(hid)) { clear_bit(HID_OUT_RUNNING, &usbhid->iofl); usb_autopm_put_interface_async(usbhid->intf); } wake_up(&usbhid->wait); } return kicked; }
0
[ "CWE-125", "CWE-787" ]
linux
f043bfc98c193c284e2cd768fefabe18ac2fed9b
176,185,815,598,401,300,000,000,000,000,000,000,000
37
HID: usbhid: fix out-of-bounds bug The hid descriptor identifies the length and type of subordinate descriptors for a device. If the received hid descriptor is smaller than the size of the struct hid_descriptor, it is possible to cause out-of-bounds. In addition, if bNumDescriptors of the hid descriptor have an incorrect value, this can also cause out-of-bounds while approaching hdesc->desc[n]. So check the size of hid descriptor and bNumDescriptors. BUG: KASAN: slab-out-of-bounds in usbhid_parse+0x9b1/0xa20 Read of size 1 at addr ffff88006c5f8edf by task kworker/1:2/1261 CPU: 1 PID: 1261 Comm: kworker/1:2 Not tainted 4.14.0-rc1-42251-gebb2c2437d80 #169 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 print_address_description+0x78/0x280 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 kasan_report+0x22f/0x340 mm/kasan/report.c:409 __asan_report_load1_noabort+0x19/0x20 mm/kasan/report.c:427 usbhid_parse+0x9b1/0xa20 drivers/hid/usbhid/hid-core.c:1004 hid_add_device+0x16b/0xb30 drivers/hid/hid-core.c:2944 usbhid_probe+0xc28/0x1100 drivers/hid/usbhid/hid-core.c:1369 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26e/0x3d0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932 generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174 usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26e/0x3d0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457 hub_port_connect drivers/usb/core/hub.c:4903 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc7f/0x1db0 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x3a1/0x470 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 Cc: [email protected] Reported-by: Andrey Konovalov <[email protected]> Signed-off-by: Jaejoong Kim <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Acked-by: Alan Stern <[email protected]> Signed-off-by: Jiri Kosina <[email protected]>
int64_t Cluster::GetRelativeTimecode(int64_t abs_timecode) const { const int64_t cluster_timecode = this->Cluster::timecode(); const int64_t rel_timecode = static_cast<int64_t>(abs_timecode) - cluster_timecode; if (rel_timecode < 0 || rel_timecode > kMaxBlockTimecode) return -1; return rel_timecode; }
0
[ "CWE-20" ]
libvpx
f00890eecdf8365ea125ac16769a83aa6b68792d
218,393,743,200,336,800,000,000,000,000,000,000,000
10
update libwebm to libwebm-1.0.0.27-352-g6ab9fcf https://chromium.googlesource.com/webm/libwebm/+log/af81f26..6ab9fcf Change-Id: I9d56e1fbaba9b96404b4fbabefddc1a85b79c25d
static SECURITY_STATUS SEC_ENTRY kerberos_InitializeSecurityContextW( PCredHandle phCredential, PCtxtHandle phContext, SEC_WCHAR* pszTargetName, ULONG fContextReq, ULONG Reserved1, ULONG TargetDataRep, PSecBufferDesc pInput, ULONG Reserved2, PCtxtHandle phNewContext, PSecBufferDesc pOutput, ULONG* pfContextAttr, PTimeStamp ptsExpiry) { SECURITY_STATUS status; char* target_name = NULL; if (pszTargetName) ConvertFromUnicode(CP_UTF8, 0, pszTargetName, -1, &target_name, 0, NULL, NULL); status = kerberos_InitializeSecurityContextA(phCredential, phContext, target_name, fContextReq, Reserved1, TargetDataRep, pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry); if (target_name) free(target_name); return status; }
0
[]
FreeRDP
479e891545473f01c187daffdfa05fc752b54b72
315,570,565,896,676,900,000,000,000,000,000,000,000
20
check return values for SetCredentialsAttributes, throw warnings for unsupported attributes
static void nested_svm_init(struct vcpu_svm *svm) { svm->vmcb->control.exit_code = SVM_EXIT_INIT; svm->vmcb->control.exit_info_1 = 0; svm->vmcb->control.exit_info_2 = 0; nested_svm_vmexit(svm); }
0
[ "CWE-416" ]
linux
a58d9166a756a0f4a6618e4f593232593d6df134
272,907,512,767,528,800,000,000,000,000,000,000,000
8
KVM: SVM: load control fields from VMCB12 before checking them Avoid races between check and use of the nested VMCB controls. This for example ensures that the VMRUN intercept is always reflected to the nested hypervisor, instead of being processed by the host. Without this patch, it is possible to end up with svm->nested.hsave pointing to the MSR permission bitmap for nested guests. This bug is CVE-2021-29657. Reported-by: Felix Wilhelm <[email protected]> Cc: [email protected] Fixes: 2fcf4876ada ("KVM: nSVM: implement on demand allocation of the nested state") Signed-off-by: Paolo Bonzini <[email protected]>
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *len) { /* FIXME use last free of some kind */ return find_free_dev_extent_start(device, num_bytes, 0, start, len); }
0
[ "CWE-476", "CWE-703" ]
linux
e4571b8c5e9ffa1e85c0c671995bd4dcc5c75091
189,640,390,335,846,430,000,000,000,000,000,000,000
6
btrfs: fix NULL pointer dereference when deleting device by invalid id [BUG] It's easy to trigger NULL pointer dereference, just by removing a non-existing device id: # mkfs.btrfs -f -m single -d single /dev/test/scratch1 \ /dev/test/scratch2 # mount /dev/test/scratch1 /mnt/btrfs # btrfs device remove 3 /mnt/btrfs Then we have the following kernel NULL pointer dereference: BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP NOPTI CPU: 9 PID: 649 Comm: btrfs Not tainted 5.14.0-rc3-custom+ #35 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:btrfs_rm_device+0x4de/0x6b0 [btrfs] btrfs_ioctl+0x18bb/0x3190 [btrfs] ? lock_is_held_type+0xa5/0x120 ? find_held_lock.constprop.0+0x2b/0x80 ? do_user_addr_fault+0x201/0x6a0 ? lock_release+0xd2/0x2d0 ? __x64_sys_ioctl+0x83/0xb0 __x64_sys_ioctl+0x83/0xb0 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae [CAUSE] Commit a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly") moves the "missing" device path check into btrfs_rm_device(). But btrfs_rm_device() itself can have case where it only receives @devid, with NULL as @device_path. In that case, calling strcmp() on NULL will trigger the NULL pointer dereference. Before that commit, we handle the "missing" case inside btrfs_find_device_by_devspec(), which will not check @device_path at all if @devid is provided, thus no way to trigger the bug. [FIX] Before calling strcmp(), also make sure @device_path is not NULL. Fixes: a27a94c2b0c7 ("btrfs: Make btrfs_find_device_by_devspec return btrfs_device directly") CC: [email protected] # 5.4+ Reported-by: butt3rflyh4ck <[email protected]> Reviewed-by: Anand Jain <[email protected]> Signed-off-by: Qu Wenruo <[email protected]> Reviewed-by: David Sterba <[email protected]> Signed-off-by: David Sterba <[email protected]>
static void pcnet_update_irq(PCNetState *s) { int isr = 0; s->csr[0] &= ~0x0080; #if 1 if (((s->csr[0] & ~s->csr[3]) & 0x5f00) || (((s->csr[4]>>1) & ~s->csr[4]) & 0x0115) || (((s->csr[5]>>1) & s->csr[5]) & 0x0048)) #else if ((!(s->csr[3] & 0x4000) && !!(s->csr[0] & 0x4000)) /* BABL */ || (!(s->csr[3] & 0x1000) && !!(s->csr[0] & 0x1000)) /* MISS */ || (!(s->csr[3] & 0x0100) && !!(s->csr[0] & 0x0100)) /* IDON */ || (!(s->csr[3] & 0x0200) && !!(s->csr[0] & 0x0200)) /* TINT */ || (!(s->csr[3] & 0x0400) && !!(s->csr[0] & 0x0400)) /* RINT */ || (!(s->csr[3] & 0x0800) && !!(s->csr[0] & 0x0800)) /* MERR */ || (!(s->csr[4] & 0x0001) && !!(s->csr[4] & 0x0002)) /* JAB */ || (!(s->csr[4] & 0x0004) && !!(s->csr[4] & 0x0008)) /* TXSTRT */ || (!(s->csr[4] & 0x0010) && !!(s->csr[4] & 0x0020)) /* RCVO */ || (!(s->csr[4] & 0x0100) && !!(s->csr[4] & 0x0200)) /* MFCO */ || (!!(s->csr[5] & 0x0040) && !!(s->csr[5] & 0x0080)) /* EXDINT */ || (!!(s->csr[5] & 0x0008) && !!(s->csr[5] & 0x0010)) /* MPINT */) #endif { isr = CSR_INEA(s); s->csr[0] |= 0x0080; } if (!!(s->csr[4] & 0x0080) && CSR_INEA(s)) { /* UINT */ s->csr[4] &= ~0x0080; s->csr[4] |= 0x0040; s->csr[0] |= 0x0080; isr = 1; trace_pcnet_user_int(s); } #if 1 if (((s->csr[5]>>1) & s->csr[5]) & 0x0500) #else if ((!!(s->csr[5] & 0x0400) && !!(s->csr[5] & 0x0800)) /* SINT */ || (!!(s->csr[5] & 0x0100) && !!(s->csr[5] & 0x0200)) /* SLPINT */ ) #endif { isr = 1; s->csr[0] |= 0x0080; } if (isr != s->isr) { trace_pcnet_isr_change(s, isr, s->isr); } qemu_set_irq(s->irq, isr); s->isr = isr; }
0
[]
qemu
837f21aacf5a714c23ddaadbbc5212f9b661e3f7
335,577,439,739,022,930,000,000,000,000,000,000,000
54
net: pcnet: add check to validate receive data size(CVE-2015-7504) In loopback mode, pcnet_receive routine appends CRC code to the receive buffer. If the data size given is same as the buffer size, the appended CRC code overwrites 4 bytes after s->buffer. Added a check to avoid that. Reported by: Qinghao Tang <[email protected]> Cc: [email protected] Reviewed-by: Michael S. Tsirkin <[email protected]> Signed-off-by: Prasad J Pandit <[email protected]> Signed-off-by: Jason Wang <[email protected]>
static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct sfq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return q->block; }
0
[ "CWE-330" ]
linux
55667441c84fa5e0911a0aac44fb059c15ba6da2
234,873,644,251,025,740,000,000,000,000,000,000,000
9
net/flow_dissector: switch to siphash UDP IPv6 packets auto flowlabels are using a 32bit secret (static u32 hashrnd in net/core/flow_dissector.c) and apply jhash() over fields known by the receivers. Attackers can easily infer the 32bit secret and use this information to identify a device and/or user, since this 32bit secret is only set at boot time. Really, using jhash() to generate cookies sent on the wire is a serious security concern. Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be a dead end. Trying to periodically change the secret (like in sch_sfq.c) could change paths taken in the network for long lived flows. Let's switch to siphash, as we did in commit df453700e8d8 ("inet: switch IP ID generator to siphash") Using a cryptographically strong pseudo random function will solve this privacy issue and more generally remove other weak points in the stack. Packet schedulers using skb_get_hash_perturb() benefit from this change. Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default") Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels") Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel") Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit") Signed-off-by: Eric Dumazet <[email protected]> Reported-by: Jonathan Berger <[email protected]> Reported-by: Amit Klein <[email protected]> Reported-by: Benny Pinkas <[email protected]> Cc: Tom Herbert <[email protected]> Signed-off-by: David S. Miller <[email protected]>
static unsigned next_desc(struct vring_desc *desc) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ if (!(desc->flags & VRING_DESC_F_NEXT)) return -1U; /* Check they're not leading us off end of descriptors. */ next = desc->next; /* Make sure compiler knows to grab that: we don't want it changing! */ /* We will use the result as an index in an array, so most * architectures only need a compiler barrier here. */ read_barrier_depends(); return next; }
0
[]
linux-2.6
bd97120fc3d1a11f3124c7c9ba1d91f51829eb85
231,118,965,444,733,900,000,000,000,000,000,000,000
17
vhost: fix length for cross region descriptor If a single descriptor crosses a region, the second chunk length should be decremented by size translated so far, instead it includes the full descriptor length. Signed-off-by: Michael S. Tsirkin <[email protected]> Acked-by: Jason Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
lookup_custom_attr (MonoImage *image, gpointer member) { MonoCustomAttrInfo* res; res = mono_image_property_lookup (image, member, MONO_PROP_DYNAMIC_CATTR); if (!res) return NULL; res = g_memdup (res, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * res->num_attrs); res->cached = 0; return res; }
0
[ "CWE-399", "CWE-264" ]
mono
89d1455a80ef13cddee5d79ec00c06055da3085c
259,685,638,649,788,950,000,000,000,000,000,000,000
13
Don't use finalization to cleanup dynamic methods. * reflection.c: Use a reference queue to cleanup dynamic methods instead of finalization. * runtime.c: Shutdown the dynamic method queue before runtime cleanup begins. * DynamicMethod.cs: No longer finalizable. * icall-def.h: Remove unused dynamic method icall. Fixes #660422