func
stringlengths
0
484k
target
int64
0
1
cwe
listlengths
0
4
project
stringclasses
799 values
commit_id
stringlengths
40
40
hash
float64
1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
size
int64
1
24k
message
stringlengths
0
13.3k
static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; int i; for (i = 0; i < 2000; i++) { if (readl(reg->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); return true; } msleep(10); } /* max 20 seconds */ return false; }
0
[ "CWE-119", "CWE-787" ]
linux
7bc2b55a5c030685b399bb65b6baa9ccc3d1f167
135,782,019,884,832,200,000,000,000,000,000,000,000
19
scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer() We need to put an upper bound on "user_len" so the memcpy() doesn't overflow. Cc: <[email protected]> Reported-by: Marco Grassi <[email protected]> Signed-off-by: Dan Carpenter <[email protected]> Reviewed-by: Tomas Henzl <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
yaffsfs_fscheck(TSK_FS_INFO * /*fs*/, FILE * /*hFile*/) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("fscheck not implemented yet for YAFFS"); return 1; }
0
[ "CWE-125", "CWE-787" ]
sleuthkit
459ae818fc8dae717549810150de4d191ce158f1
242,239,134,015,677,000,000,000,000,000,000,000,000
7
Fix stack buffer overflow in yaffsfs_istat Prevent a stack buffer overflow in yaffsfs_istat by increasing the buffer size to the size required by tsk_fs_time_to_str.
size_t WireFormat::FieldDataOnlyByteSize(const FieldDescriptor* field, const Message& message) { const Reflection* message_reflection = message.GetReflection(); size_t data_size = 0; if (field->is_map()) { const MapFieldBase* map_field = message_reflection->GetMapData(message, field); if (map_field->IsMapValid()) { MapIterator iter(const_cast<Message*>(&message), field); MapIterator end(const_cast<Message*>(&message), field); const FieldDescriptor* key_field = field->message_type()->field(0); const FieldDescriptor* value_field = field->message_type()->field(1); for (map_field->MapBegin(&iter), map_field->MapEnd(&end); iter != end; ++iter) { size_t size = kMapEntryTagByteSize; size += MapKeyDataOnlyByteSize(key_field, iter.GetKey()); size += MapValueRefDataOnlyByteSize(value_field, iter.GetValueRef()); data_size += WireFormatLite::LengthDelimitedSize(size); } return data_size; } } size_t count = 0; if (field->is_repeated()) { count = internal::FromIntSize(message_reflection->FieldSize(message, field)); } else if (field->containing_type()->options().map_entry()) { // Map entry fields always need to be serialized. count = 1; } else if (message_reflection->HasField(message, field)) { count = 1; } switch (field->type()) { #define HANDLE_TYPE(TYPE, TYPE_METHOD, CPPTYPE_METHOD) \ case FieldDescriptor::TYPE_##TYPE: \ if (field->is_repeated()) { \ for (size_t j = 0; j < count; j++) { \ data_size += WireFormatLite::TYPE_METHOD##Size( \ message_reflection->GetRepeated##CPPTYPE_METHOD(message, field, \ j)); \ } \ } else { \ data_size += WireFormatLite::TYPE_METHOD##Size( \ message_reflection->Get##CPPTYPE_METHOD(message, field)); \ } \ break; #define HANDLE_FIXED_TYPE(TYPE, TYPE_METHOD) \ case FieldDescriptor::TYPE_##TYPE: \ data_size += count * WireFormatLite::k##TYPE_METHOD##Size; \ break; HANDLE_TYPE(INT32, Int32, Int32) HANDLE_TYPE(INT64, Int64, Int64) HANDLE_TYPE(SINT32, SInt32, Int32) HANDLE_TYPE(SINT64, SInt64, Int64) HANDLE_TYPE(UINT32, UInt32, UInt32) HANDLE_TYPE(UINT64, UInt64, UInt64) HANDLE_FIXED_TYPE(FIXED32, Fixed32) HANDLE_FIXED_TYPE(FIXED64, Fixed64) HANDLE_FIXED_TYPE(SFIXED32, SFixed32) HANDLE_FIXED_TYPE(SFIXED64, SFixed64) HANDLE_FIXED_TYPE(FLOAT, Float) HANDLE_FIXED_TYPE(DOUBLE, Double) HANDLE_FIXED_TYPE(BOOL, Bool) HANDLE_TYPE(GROUP, Group, Message) HANDLE_TYPE(MESSAGE, Message, Message) #undef HANDLE_TYPE #undef HANDLE_FIXED_TYPE case FieldDescriptor::TYPE_ENUM: { if (field->is_repeated()) { for (size_t j = 0; j < count; j++) { data_size += WireFormatLite::EnumSize( message_reflection->GetRepeatedEnum(message, field, j)->number()); } } else { data_size += WireFormatLite::EnumSize( message_reflection->GetEnum(message, field)->number()); } break; } // Handle strings separately so that we can get string references // instead of copying. case FieldDescriptor::TYPE_STRING: case FieldDescriptor::TYPE_BYTES: { for (size_t j = 0; j < count; j++) { std::string scratch; const std::string& value = field->is_repeated() ? message_reflection->GetRepeatedStringReference(message, field, j, &scratch) : message_reflection->GetStringReference(message, field, &scratch); data_size += WireFormatLite::StringSize(value); } break; } } return data_size; }
0
[ "CWE-703" ]
protobuf
d1635e1496f51e0d5653d856211e8821bc47adc4
221,716,415,612,352,300,000,000,000,000,000,000,000
110
Apply patch
static void tg3_timer_start(struct tg3 *tp) { tp->asf_counter = tp->asf_multiplier; tp->timer_counter = tp->timer_multiplier; tp->timer.expires = jiffies + tp->timer_offset; add_timer(&tp->timer); }
0
[ "CWE-476", "CWE-119" ]
linux
715230a44310a8cf66fbfb5a46f9a62a9b2de424
289,284,703,567,798,540,000,000,000,000,000,000,000
8
tg3: fix length overflow in VPD firmware parsing Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version when present") introduced VPD parsing that contained a potential length overflow. Limit the hardware's reported firmware string length (max 255 bytes) to stay inside the driver's firmware string length (32 bytes). On overflow, truncate the formatted firmware string instead of potentially overwriting portions of the tg3 struct. http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf Signed-off-by: Kees Cook <[email protected]> Reported-by: Oded Horovitz <[email protected]> Reported-by: Brad Spengler <[email protected]> Cc: [email protected] Cc: Matt Carlson <[email protected]> Signed-off-by: David S. Miller <[email protected]>
dump_args (struct obstack *obs, int argc, token_data **argv, const char *sep, bool quoted) { int i; size_t len = strlen (sep); for (i = 1; i < argc; i++) { if (i > 1) obstack_grow (obs, sep, len); if (quoted) obstack_grow (obs, lquote.string, lquote.length); obstack_grow (obs, TOKEN_DATA_TEXT (argv[i]), strlen (TOKEN_DATA_TEXT (argv[i]))); if (quoted) obstack_grow (obs, rquote.string, rquote.length); } }
0
[]
m4
5345bb49077bfda9fabd048e563f9e7077fe335d
137,376,063,114,673,300,000,000,000,000,000,000,000
18
Minor security fix: Quote output of mkstemp. * src/builtin.c (mkstemp_helper): Produce quoted output. * doc/m4.texinfo (Mkstemp): Update the documentation and tests. * NEWS: Document this change. Signed-off-by: Eric Blake <[email protected]> (cherry picked from commit bd9900d65eb9cd5add0f107e94b513fa267495ba)
int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { int ret; ASSERT_RTNL(); if (type == ieee80211_vif_type_p2p(&sdata->vif)) return 0; /* Setting ad-hoc mode on non-IBSS channel is not supported. */ if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS && type == NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; if (ieee80211_sdata_running(sdata)) { ret = ieee80211_runtime_change_iftype(sdata, type); if (ret) return ret; } else { /* Purge and reset type-dependent state. */ ieee80211_teardown_sdata(sdata->dev); ieee80211_setup_sdata(sdata, type); } /* reset some values that shouldn't be kept across type changes */ sdata->vif.bss_conf.basic_rates = ieee80211_mandatory_rates(sdata->local, sdata->local->hw.conf.channel->band); sdata->drop_unencrypted = 0; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = false; return 0; }
0
[ "CWE-703", "CWE-264" ]
linux
550fd08c2cebad61c548def135f67aba284c6162
195,318,322,371,440,570,000,000,000,000,000,000,000
35
net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared After the last patch, We are left in a state in which only drivers calling ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real hardware call ether_setup for their net_devices and don't hold any state in their skbs. There are a handful of drivers that violate this assumption of course, and need to be fixed up. This patch identifies those drivers, and marks them as not being able to support the safe transmission of skbs by clearning the IFF_TX_SKB_SHARING flag in priv_flags Signed-off-by: Neil Horman <[email protected]> CC: Karsten Keil <[email protected]> CC: "David S. Miller" <[email protected]> CC: Jay Vosburgh <[email protected]> CC: Andy Gospodarek <[email protected]> CC: Patrick McHardy <[email protected]> CC: Krzysztof Halasa <[email protected]> CC: "John W. Linville" <[email protected]> CC: Greg Kroah-Hartman <[email protected]> CC: Marcel Holtmann <[email protected]> CC: Johannes Berg <[email protected]> Signed-off-by: David S. Miller <[email protected]>
gdm_session_worker_handle_set_language_name (GdmDBusWorker *object, GDBusMethodInvocation *invocation, const char *language_name) { GdmSessionWorker *worker = GDM_SESSION_WORKER (object); g_debug ("GdmSessionWorker: language name set to %s", language_name); gdm_session_settings_set_language_name (worker->priv->user_settings, language_name); gdm_dbus_worker_complete_set_language_name (object, invocation); return TRUE; }
0
[ "CWE-362" ]
gdm
dcdbaaa04012541ad2813cf83559d91d52f208b9
250,105,187,356,661,600,000,000,000,000,000,000,000
11
session-worker: Don't switch back VTs until session is fully exited There's a race condition on shutdown where the session worker is switching VTs back to the initial VT at the same time as the session exit is being processed. This means that manager may try to start a login screen (because of the VT switch) when autologin is enabled when there shouldn't be a login screen. This commit makes sure both the PostSession script, and session-exited signal emission are complete before initiating the VT switch back to the initial VT. https://gitlab.gnome.org/GNOME/gdm/-/issues/660
static zval *to_zval_bool(encodeTypePtr type, xmlNodePtr data TSRMLS_DC) { zval *ret; MAKE_STD_ZVAL(ret); FIND_XML_NULL(data, ret); if (data && data->children) { if (data->children->type == XML_TEXT_NODE && data->children->next == NULL) { whiteSpace_collapse(data->children->content); if (stricmp((char*)data->children->content, "true") == 0 || stricmp((char*)data->children->content, "t") == 0 || strcmp((char*)data->children->content, "1") == 0) { ZVAL_BOOL(ret, 1); } else if (stricmp((char*)data->children->content, "false") == 0 || stricmp((char*)data->children->content, "f") == 0 || strcmp((char*)data->children->content, "0") == 0) { ZVAL_BOOL(ret, 0); } else { ZVAL_STRING(ret, (char*)data->children->content, 1); convert_to_boolean(ret); } } else { soap_error0(E_ERROR, "Encoding: Violation of encoding rules"); } } else { ZVAL_NULL(ret); } return ret; }
0
[ "CWE-19" ]
php-src
c8eaca013a3922e8383def6158ece2b63f6ec483
191,336,185,811,572,700,000,000,000,000,000,000,000
29
Added type checks
virtual enum precedence precedence() const { return DEFAULT_PRECEDENCE; }
0
[ "CWE-617" ]
server
2e7891080667c59ac80f788eef4d59d447595772
221,872,269,110,108,000,000,000,000,000,000,000,000
1
MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view This bug could manifest itself after pushing a where condition over a mergeable derived table / view / CTE DT into a grouping view / derived table / CTE V whose item list contained set functions with constant arguments such as MIN(2), SUM(1) etc. In such cases the field references used in the condition pushed into the view V that correspond set functions are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation of the virtual method const_item() for the class Item_direct_view_ref the wrapped set functions with constant arguments could be erroneously taken for constant items. This could lead to a wrong result set returned by the main select query in 10.2. In 10.4 where a possibility of pushing condition from HAVING into WHERE had been added this could cause a crash. Approved by Sergey Petrunya <[email protected]>
TEST_P(SslCertficateIntegrationTest, ServerRsaClientEcdsaOnly) { server_rsa_cert_ = true; server_ecdsa_cert_ = false; client_ecdsa_cert_ = true; initialize(); EXPECT_FALSE( makeRawHttpConnection(makeSslClientConnection(ecdsaOnlyClientOptions()), absl::nullopt) ->connected()); const std::string counter_name = listenerStatPrefix("ssl.connection_error"); Stats::CounterSharedPtr counter = test_server_->counter(counter_name); test_server_->waitForCounterGe(counter_name, 1); EXPECT_EQ(1U, counter->value()); counter->reset(); }
0
[ "CWE-400" ]
envoy
0e49a495826ea9e29134c1bd54fdeb31a034f40c
337,094,231,617,525,240,000,000,000,000,000,000,000
14
http/2: add stats and stream flush timeout (#139) This commit adds a new stream flush timeout to guard against a remote server that does not open window once an entire stream has been buffered for flushing. Additional stats have also been added to better understand the codecs view of active streams as well as amount of data buffered. Signed-off-by: Matt Klein <[email protected]>
DEFUN (clear_ip_bgp_external_ipv4_soft, clear_ip_bgp_external_ipv4_soft_cmd, "clear ip bgp external ipv4 (unicast|multicast) soft", CLEAR_STR IP_STR BGP_STR "Clear all external peers\n" "Address family\n" "Address Family modifier\n" "Address Family modifier\n" "Soft reconfig\n") { if (strncmp (argv[0], "m", 1) == 0) return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_MULTICAST, clear_external, BGP_CLEAR_SOFT_BOTH, NULL); return bgp_clear_vty (vty, NULL, AFI_IP, SAFI_UNICAST, clear_external, BGP_CLEAR_SOFT_BOTH, NULL); }
0
[ "CWE-125" ]
frr
6d58272b4cf96f0daa846210dd2104877900f921
250,757,202,477,559,720,000,000,000,000,000,000,000
19
[bgpd] cleanup, compact and consolidate capability parsing code 2007-07-26 Paul Jakma <[email protected]> * (general) Clean up and compact capability parsing slightly. Consolidate validation of length and logging of generic TLV, and memcpy of capability data, thus removing such from cap specifc code (not always present or correct). * bgp_open.h: Add structures for the generic capability TLV header and for the data formats of the various specific capabilities we support. Hence remove the badly named, or else misdefined, struct capability. * bgp_open.c: (bgp_capability_vty_out) Use struct capability_mp_data. Do the length checks *before* memcpy()'ing based on that length (stored capability - should have been validated anyway on input, but..). (bgp_afi_safi_valid_indices) new function to validate (afi,safi) which is about to be used as index into arrays, consolidates several instances of same, at least one of which appeared to be incomplete.. (bgp_capability_mp) Much condensed. (bgp_capability_orf_entry) New, process one ORF entry (bgp_capability_orf) Condensed. Fixed to process all ORF entries. (bgp_capability_restart) Condensed, and fixed to use a cap-specific type, rather than abusing capability_mp. (struct message capcode_str) added to aid generic logging. (size_t cap_minsizes[]) added to aid generic validation of capability length field. (bgp_capability_parse) Generic logging and validation of TLV consolidated here. Code compacted as much as possible. * bgp_packet.c: (bgp_open_receive) Capability parsers now use streams, so no more need here to manually fudge the input stream getp. (bgp_capability_msg_parse) use struct capability_mp_data. Validate lengths /before/ memcpy. Use bgp_afi_safi_valid_indices. (bgp_capability_receive) Exported for use by test harness. * bgp_vty.c: (bgp_show_summary) fix conversion warning (bgp_show_peer) ditto * bgp_debug.h: Fix storage 'extern' after type 'const'. * lib/log.c: (mes_lookup) warning about code not being in same-number array slot should be debug, not warning. E.g. BGP has several discontigious number spaces, allocating from different parts of a space is not uncommon (e.g. IANA assigned versus vendor-assigned code points in some number space).
static size_t curl_read(char *data, size_t size, size_t nmemb, void *ctx) { php_curl *ch = (php_curl *) ctx; php_curl_read *t = ch->handlers->read; int length = 0; switch (t->method) { case PHP_CURL_DIRECT: if (t->fp) { length = fread(data, size, nmemb, t->fp); } break; case PHP_CURL_USER: { zval **argv[3]; zval *handle = NULL; zval *zfd = NULL; zval *zlength = NULL; zval *retval_ptr; int error; zend_fcall_info fci; TSRMLS_FETCH_FROM_CTX(ch->thread_ctx); MAKE_STD_ZVAL(handle); MAKE_STD_ZVAL(zfd); MAKE_STD_ZVAL(zlength); ZVAL_RESOURCE(handle, ch->id); zend_list_addref(ch->id); ZVAL_RESOURCE(zfd, t->fd); zend_list_addref(t->fd); ZVAL_LONG(zlength, (int) size * nmemb); argv[0] = &handle; argv[1] = &zfd; argv[2] = &zlength; fci.size = sizeof(fci); fci.function_table = EG(function_table); fci.function_name = t->func_name; fci.object_ptr = NULL; fci.retval_ptr_ptr = &retval_ptr; fci.param_count = 3; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache TSRMLS_CC); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Cannot call the CURLOPT_READFUNCTION"); #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ length = CURL_READFUNC_ABORT; #endif } else if (retval_ptr) { if (Z_TYPE_P(retval_ptr) == IS_STRING) { length = MIN((int) (size * nmemb), Z_STRLEN_P(retval_ptr)); memcpy(data, Z_STRVAL_P(retval_ptr), length); } zval_ptr_dtor(&retval_ptr); } zval_ptr_dtor(argv[0]); zval_ptr_dtor(argv[1]); zval_ptr_dtor(argv[2]); break; } } return length; }
1
[]
php-src
0ea75af9be8a40836951fc89f723dd5390b8b46f
227,533,401,729,489,300,000,000,000,000,000,000,000
71
Fixed bug #69316 (Use-after-free in php_curl related to CURLOPT_FILE/_INFILE/_WRITEHEADER)
static int php_curl_ssl_mutex_create(void **m) { if (*((MUTEX_T *) m) = tsrm_mutex_alloc()) { return SUCCESS; } else { return FAILURE; } }
0
[]
php-src
124fb22a13fafa3648e4e15b4f207c7096d8155e
98,406,212,503,383,000,000,000,000,000,000,000,000
8
Fixed bug #68739 #68740 #68741
parse_auth_header(struct mg_connection *conn, struct auth_s *auth) { if (!auth || !conn) { return 0; } const char *auth_header = NULL; if (((auth_header = mg_get_header(conn, "Authorization")) == NULL) || strncasecmp(auth_header, "Basic ", 6) != 0) { return 0; } /* Parse authorization header */ const char* src = auth_header + 6; size_t len = apr_base64_decode_len((const char*)src); auth->pworkbuf = auth->workbuf; if (len > sizeof(auth->workbuf)) { auth->pworkbuf = calloc(0, len); auth->workbuf_len = len; } len = apr_base64_decode(auth->pworkbuf, src); if (len == 0) { return 0; } char *passwd = NULL, *saveptr = NULL; char *user = strtok_r(auth->pworkbuf, ":", &saveptr); if (user) { passwd = strtok_r(NULL, ":", &saveptr); } auth->pszUser = user; auth->pszPasswd = passwd; return 1; }
0
[ "CWE-787" ]
rsyslog
89955b0bcb1ff105e1374aad7e0e993faa6a038f
91,326,865,639,579,020,000,000,000,000,000,000,000
36
net bugfix: potential buffer overrun
ConnStateData::startDechunkingRequest() { Must(bodyPipe != NULL); debugs(33, 5, HERE << "start dechunking" << bodyPipe->status()); assert(!bodyParser); bodyParser = new Http1::TeChunkedParser; }
0
[ "CWE-444" ]
squid
fd68382860633aca92065e6c343cfd1b12b126e7
68,119,335,934,654,070,000,000,000,000,000,000,000
7
Improve Transfer-Encoding handling (#702) Reject messages containing Transfer-Encoding header with coding other than chunked or identity. Squid does not support other codings. For simplicity and security sake, also reject messages where Transfer-Encoding contains unnecessary complex values that are technically equivalent to "chunked" or "identity" (e.g., ",,chunked" or "identity, chunked"). RFC 7230 formally deprecated and removed identity coding, but it is still used by some agents.
static PHP_FUNCTION(session_set_save_handler) { zval ***args = NULL; int i, num_args, argc = ZEND_NUM_ARGS(); char *name; if (PS(session_status) != php_session_none) { RETURN_FALSE; } if (argc > 0 && argc <= 2) { zval *obj = NULL, *callback = NULL; zend_uint func_name_len; char *func_name; HashPosition pos; zend_function *default_mptr, *current_mptr; ulong func_index; php_shutdown_function_entry shutdown_function_entry; zend_bool register_shutdown = 1; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O|b", &obj, php_session_iface_entry, &register_shutdown) == FAILURE) { RETURN_FALSE; } /* Find implemented methods - SessionHandlerInterface */ zend_hash_internal_pointer_reset_ex(&php_session_iface_entry->function_table, &pos); i = 0; while (zend_hash_get_current_data_ex(&php_session_iface_entry->function_table, (void **) &default_mptr, &pos) == SUCCESS) { zend_hash_get_current_key_ex(&php_session_iface_entry->function_table, &func_name, &func_name_len, &func_index, 0, &pos); if (zend_hash_find(&Z_OBJCE_P(obj)->function_table, func_name, func_name_len, (void **)&current_mptr) == SUCCESS) { if (PS(mod_user_names).names[i] != NULL) { zval_ptr_dtor(&PS(mod_user_names).names[i]); } MAKE_STD_ZVAL(callback); array_init_size(callback, 2); Z_ADDREF_P(obj); add_next_index_zval(callback, obj); add_next_index_stringl(callback, func_name, func_name_len - 1, 1); PS(mod_user_names).names[i] = callback; } else { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Session handler's function table is corrupt"); RETURN_FALSE; } zend_hash_move_forward_ex(&php_session_iface_entry->function_table, &pos); ++i; } /* Find implemented methods - SessionIdInterface (optional) */ zend_hash_internal_pointer_reset_ex(&php_session_id_iface_entry->function_table, &pos); while (zend_hash_get_current_data_ex(&php_session_id_iface_entry->function_table, (void **) &default_mptr, &pos) == SUCCESS) { zend_hash_get_current_key_ex(&php_session_id_iface_entry->function_table, &func_name, &func_name_len, &func_index, 0, &pos); if (zend_hash_find(&Z_OBJCE_P(obj)->function_table, func_name, func_name_len, (void **)&current_mptr) == SUCCESS) { if (PS(mod_user_names).names[i] != NULL) { zval_ptr_dtor(&PS(mod_user_names).names[i]); } MAKE_STD_ZVAL(callback); array_init_size(callback, 2); Z_ADDREF_P(obj); add_next_index_zval(callback, obj); add_next_index_stringl(callback, func_name, func_name_len - 1, 1); PS(mod_user_names).names[i] = callback; } zend_hash_move_forward_ex(&php_session_id_iface_entry->function_table, &pos); ++i; } if (register_shutdown) { /* create shutdown function */ shutdown_function_entry.arg_count = 1; shutdown_function_entry.arguments = (zval **) safe_emalloc(sizeof(zval *), 1, 0); MAKE_STD_ZVAL(callback); ZVAL_STRING(callback, "session_register_shutdown", 1); shutdown_function_entry.arguments[0] = callback; /* add shutdown function, removing the old one if it exists */ if (!register_user_shutdown_function("session_shutdown", sizeof("session_shutdown"), &shutdown_function_entry TSRMLS_CC)) { zval_ptr_dtor(&callback); efree(shutdown_function_entry.arguments); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Unable to register session shutdown function"); RETURN_FALSE; } } else { /* remove shutdown function */ remove_user_shutdown_function("session_shutdown", sizeof("session_shutdown") TSRMLS_CC); } if (PS(mod) && PS(session_status) == php_session_none && PS(mod) != &ps_mod_user) { zend_alter_ini_entry("session.save_handler", sizeof("session.save_handler"), "user", sizeof("user")-1, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); } RETURN_TRUE; } if (argc != 6 && argc != 7) { WRONG_PARAM_COUNT; } if (zend_parse_parameters(argc TSRMLS_CC, "+", &args, &num_args) == FAILURE) { return; } /* remove shutdown function */ remove_user_shutdown_function("session_shutdown", sizeof("session_shutdown") TSRMLS_CC); /* at this point argc can only be 6 or 7 */ for (i = 0; i < argc; i++) { if (!zend_is_callable(*args[i], 0, &name TSRMLS_CC)) { efree(args); php_error_docref(NULL TSRMLS_CC, E_WARNING, "Argument %d is not a valid callback", i+1); efree(name); RETURN_FALSE; } efree(name); } if (PS(mod) && PS(mod) != &ps_mod_user) { zend_alter_ini_entry("session.save_handler", sizeof("session.save_handler"), "user", sizeof("user")-1, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); } for (i = 0; i < argc; i++) { if (PS(mod_user_names).names[i] != NULL) { zval_ptr_dtor(&PS(mod_user_names).names[i]); } Z_ADDREF_PP(args[i]); PS(mod_user_names).names[i] = *args[i]; } efree(args); RETURN_TRUE; }
0
[ "CWE-264" ]
php-src
25e8fcc88fa20dc9d4c47184471003f436927cde
158,008,099,966,738,180,000,000,000,000,000,000,000
137
Strict session
bool AuthorizationManager::shouldValidateAuthSchemaOnStartup() { return _startupAuthSchemaValidation; }
0
[ "CWE-613" ]
mongo
db19e7ce84cfd702a4ba9983ee2ea5019f470f82
229,703,132,192,766,540,000,000,000,000,000,000,000
3
SERVER-38984 Validate unique User ID on UserCache hit (cherry picked from commit e55d6e2292e5dbe2f97153251d8193d1cc89f5d7)
llsec_key_alloc(const struct ieee802154_llsec_key *template) { const int authsizes[3] = { 4, 8, 16 }; struct mac802154_llsec_key *key; int i; key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; kref_init(&key->ref); key->key = *template; BUILD_BUG_ON(ARRAY_SIZE(authsizes) != ARRAY_SIZE(key->tfm)); for (i = 0; i < ARRAY_SIZE(key->tfm); i++) { key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(key->tfm[i])) goto err_tfm; if (crypto_aead_setkey(key->tfm[i], template->key, IEEE802154_LLSEC_KEY_SIZE)) goto err_tfm; if (crypto_aead_setauthsize(key->tfm[i], authsizes[i])) goto err_tfm; } key->tfm0 = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0); if (IS_ERR(key->tfm0)) goto err_tfm; if (crypto_sync_skcipher_setkey(key->tfm0, template->key, IEEE802154_LLSEC_KEY_SIZE)) goto err_tfm0; return key; err_tfm0: crypto_free_sync_skcipher(key->tfm0); err_tfm: for (i = 0; i < ARRAY_SIZE(key->tfm); i++) if (!IS_ERR_OR_NULL(key->tfm[i])) crypto_free_aead(key->tfm[i]); kfree_sensitive(key); return NULL; }
0
[ "CWE-416" ]
linux
1165affd484889d4986cf3b724318935a0b120d8
269,434,376,195,721,160,000,000,000,000,000,000,000
47
net: mac802154: Fix general protection fault syzbot found general protection fault in crypto_destroy_tfm()[1]. It was caused by wrong clean up loop in llsec_key_alloc(). If one of the tfm array members is in IS_ERR() range it will cause general protection fault in clean up function [1]. Call Trace: crypto_free_aead include/crypto/aead.h:191 [inline] [1] llsec_key_alloc net/mac802154/llsec.c:156 [inline] mac802154_llsec_key_add+0x9e0/0xcc0 net/mac802154/llsec.c:249 ieee802154_add_llsec_key+0x56/0x80 net/mac802154/cfg.c:338 rdev_add_llsec_key net/ieee802154/rdev-ops.h:260 [inline] nl802154_add_llsec_key+0x3d3/0x560 net/ieee802154/nl802154.c:1584 genl_family_rcv_msg_doit+0x228/0x320 net/netlink/genetlink.c:739 genl_family_rcv_msg net/netlink/genetlink.c:783 [inline] genl_rcv_msg+0x328/0x580 net/netlink/genetlink.c:800 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2502 genl_rcv+0x24/0x40 net/netlink/genetlink.c:811 netlink_unicast_kernel net/netlink/af_netlink.c:1312 [inline] netlink_unicast+0x533/0x7d0 net/netlink/af_netlink.c:1338 netlink_sendmsg+0x856/0xd90 net/netlink/af_netlink.c:1927 sock_sendmsg_nosec net/socket.c:654 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:674 ____sys_sendmsg+0x6e8/0x810 net/socket.c:2350 ___sys_sendmsg+0xf3/0x170 net/socket.c:2404 __sys_sendmsg+0xe5/0x1b0 net/socket.c:2433 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae Signed-off-by: Pavel Skripkin <[email protected]> Reported-by: [email protected] Acked-by: Alexander Aring <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Stefan Schmidt <[email protected]>
fetch_fp_info(Oid func_id, struct fp_info * fip) { HeapTuple func_htp; Form_pg_proc pp; Assert(OidIsValid(func_id)); Assert(fip != NULL); /* * Since the validity of this structure is determined by whether the * funcid is OK, we clear the funcid here. It must not be set to the * correct value until we are about to return with a good struct fp_info, * since we can be interrupted (i.e., with an ereport(ERROR, ...)) at any * time. [No longer really an issue since we don't save the struct * fp_info across transactions anymore, but keep it anyway.] */ MemSet(fip, 0, sizeof(struct fp_info)); fip->funcid = InvalidOid; fmgr_info(func_id, &fip->flinfo); func_htp = SearchSysCache1(PROCOID, ObjectIdGetDatum(func_id)); if (!HeapTupleIsValid(func_htp)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), errmsg("function with OID %u does not exist", func_id))); pp = (Form_pg_proc) GETSTRUCT(func_htp); /* watch out for catalog entries with more than FUNC_MAX_ARGS args */ if (pp->pronargs > FUNC_MAX_ARGS) elog(ERROR, "function %s has more than %d arguments", NameStr(pp->proname), FUNC_MAX_ARGS); fip->namespace = pp->pronamespace; fip->rettype = pp->prorettype; memcpy(fip->argtypes, pp->proargtypes.values, pp->pronargs * sizeof(Oid)); strlcpy(fip->fname, NameStr(pp->proname), NAMEDATALEN); ReleaseSysCache(func_htp); /* * This must be last! */ fip->funcid = func_id; }
0
[ "CWE-89" ]
postgres
2b3a8b20c2da9f39ffecae25ab7c66974fbc0d3b
188,720,838,185,138,500,000,000,000,000,000,000,000
45
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
static void *ocfs2_acl_to_xattr(const struct posix_acl *acl, size_t *size) { struct ocfs2_acl_entry *entry = NULL; char *ocfs2_acl; size_t n; *size = acl->a_count * sizeof(struct posix_acl_entry); ocfs2_acl = kmalloc(*size, GFP_NOFS); if (!ocfs2_acl) return ERR_PTR(-ENOMEM); entry = (struct ocfs2_acl_entry *)ocfs2_acl; for (n = 0; n < acl->a_count; n++, entry++) { entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); switch(acl->a_entries[n].e_tag) { case ACL_USER: entry->e_id = cpu_to_le32( from_kuid(&init_user_ns, acl->a_entries[n].e_uid)); break; case ACL_GROUP: entry->e_id = cpu_to_le32( from_kgid(&init_user_ns, acl->a_entries[n].e_gid)); break; default: entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID); break; } } return ocfs2_acl; }
0
[ "CWE-862", "CWE-285" ]
linux
073931017b49d9458aa351605b43a7e34598caef
178,668,802,860,168,830,000,000,000,000,000,000,000
34
posix_acl: Clear SGID bit when setting file permissions When file permissions are modified via chmod(2) and the user is not in the owning group or capable of CAP_FSETID, the setgid bit is cleared in inode_change_ok(). Setting a POSIX ACL via setxattr(2) sets the file permissions as well as the new ACL, but doesn't clear the setgid bit in a similar way; this allows to bypass the check in chmod(2). Fix that. References: CVE-2016-7097 Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Jeff Layton <[email protected]> Signed-off-by: Jan Kara <[email protected]> Signed-off-by: Andreas Gruenbacher <[email protected]>
int main(int argc, char **argv) { swaylock_log_init(LOG_ERROR); initialize_pw_backend(argc, argv); srand(time(NULL)); enum line_mode line_mode = LM_LINE; state.failed_attempts = 0; state.args = (struct swaylock_args){ .mode = BACKGROUND_MODE_FILL, .font = strdup("sans-serif"), .font_size = 0, .radius = 50, .thickness = 10, .indicator_x_position = 0, .indicator_y_position = 0, .override_indicator_x_position = false, .override_indicator_y_position = false, .ignore_empty = false, .show_indicator = true, .show_caps_lock_indicator = false, .show_caps_lock_text = true, .show_keyboard_layout = false, .hide_keyboard_layout = false, .show_failed_attempts = false, .indicator_idle_visible = false }; wl_list_init(&state.images); set_default_colors(&state.args.colors); char *config_path = NULL; int result = parse_options(argc, argv, NULL, NULL, &config_path); if (result != 0) { free(config_path); return result; } if (!config_path) { config_path = get_config_path(); } if (config_path) { swaylock_log(LOG_DEBUG, "Found config at %s", config_path); int config_status = load_config(config_path, &state, &line_mode); free(config_path); if (config_status != 0) { free(state.args.font); return config_status; } } if (argc > 1) { swaylock_log(LOG_DEBUG, "Parsing CLI Args"); int result = parse_options(argc, argv, &state, &line_mode, NULL); if (result != 0) { free(state.args.font); return result; } } if (line_mode == LM_INSIDE) { state.args.colors.line = state.args.colors.inside; } else if (line_mode == LM_RING) { state.args.colors.line = state.args.colors.ring; } #ifdef __linux__ // Most non-linux platforms require root to mlock() if (mlock(state.password.buffer, sizeof(state.password.buffer)) != 0) { swaylock_log(LOG_ERROR, "Unable to mlock() password memory."); return EXIT_FAILURE; } #endif wl_list_init(&state.surfaces); state.xkb.context = xkb_context_new(XKB_CONTEXT_NO_FLAGS); state.display = wl_display_connect(NULL); if (!state.display) { free(state.args.font); swaylock_log(LOG_ERROR, "Unable to connect to the compositor. " "If your compositor is running, check or set the " "WAYLAND_DISPLAY environment variable."); return EXIT_FAILURE; } struct wl_registry *registry = wl_display_get_registry(state.display); wl_registry_add_listener(registry, &registry_listener, &state); wl_display_roundtrip(state.display); if (!state.compositor || !state.shm) { swaylock_log(LOG_ERROR, "Missing wl_compositor or wl_shm"); return 1; } if (state.ext_session_lock_manager_v1) { swaylock_log(LOG_DEBUG, "Using ext-session-lock-v1"); state.ext_session_lock_v1 = ext_session_lock_manager_v1_lock(state.ext_session_lock_manager_v1); ext_session_lock_v1_add_listener(state.ext_session_lock_v1, &ext_session_lock_v1_listener, &state); } else if (state.layer_shell && state.input_inhibit_manager) { swaylock_log(LOG_DEBUG, "Using wlr-layer-shell + wlr-input-inhibitor"); zwlr_input_inhibit_manager_v1_get_inhibitor(state.input_inhibit_manager); } else { swaylock_log(LOG_ERROR, "Missing ext-session-lock-v1, wlr-layer-shell " "and wlr-input-inhibitor"); return 1; } if (wl_display_roundtrip(state.display) == -1) { free(state.args.font); if (state.input_inhibit_manager) { swaylock_log(LOG_ERROR, "Exiting - failed to inhibit input:" " is another lockscreen already running?"); return 2; } return 1; } if (state.zxdg_output_manager) { struct swaylock_surface *surface; wl_list_for_each(surface, &state.surfaces, link) { surface->xdg_output = zxdg_output_manager_v1_get_xdg_output( state.zxdg_output_manager, surface->output); zxdg_output_v1_add_listener( surface->xdg_output, &_xdg_output_listener, surface); } wl_display_roundtrip(state.display); } else { swaylock_log(LOG_INFO, "Compositor does not support zxdg output " "manager, images assigned to named outputs will not work"); } struct swaylock_surface *surface; wl_list_for_each(surface, &state.surfaces, link) { create_surface(surface); } if (state.args.daemonize) { wl_display_roundtrip(state.display); daemonize(); } state.eventloop = loop_create(); loop_add_fd(state.eventloop, wl_display_get_fd(state.display), POLLIN, display_in, NULL); loop_add_fd(state.eventloop, get_comm_reply_fd(), POLLIN, comm_in, NULL); state.run_display = true; while (state.run_display) { errno = 0; if (wl_display_flush(state.display) == -1 && errno != EAGAIN) { break; } loop_poll(state.eventloop); } if (state.ext_session_lock_v1) { ext_session_lock_v1_unlock_and_destroy(state.ext_session_lock_v1); wl_display_flush(state.display); } free(state.args.font); return 0; }
0
[ "CWE-703" ]
swaylock
1d1c75b6316d21933069a9d201f966d84099f6ca
132,123,284,634,053,700,000,000,000,000,000,000,000
163
Add support for ext-session-lock-v1 This is a new protocol to lock the session [1]. It should be more reliable than layer-shell + input-inhibitor. [1]: https://gitlab.freedesktop.org/wayland/wayland-protocols/-/merge_requests/131
void Temp2CHAD(cmsMAT3* Chad, cmsFloat64Number Temp) { cmsCIEXYZ White; cmsCIExyY ChromaticityOfWhite; cmsWhitePointFromTemp(&ChromaticityOfWhite, Temp); cmsxyY2XYZ(&White, &ChromaticityOfWhite); _cmsAdaptationMatrix(Chad, NULL, &White, cmsD50_XYZ()); }
0
[]
Little-CMS
41d222df1bc6188131a8f46c32eab0a4d4cdf1b6
180,097,188,285,291,750,000,000,000,000,000,000,000
9
Memory squeezing fix: lcms2 cmsPipeline construction When creating a new pipeline, lcms would often try to allocate a stage and pass it to cmsPipelineInsertStage without checking whether the allocation succeeded. cmsPipelineInsertStage would then assert (or crash) if it had not. The fix here is to change cmsPipelineInsertStage to check and return an error value. All calling code is then checked to test this return value and cope.
static int readlink_stat(const char *path, STRUCT_STAT *stp, char *linkbuf) { #ifdef SUPPORT_LINKS if (link_stat(path, stp, copy_dirlinks) < 0) return -1; if (S_ISLNK(stp->st_mode)) { int llen = do_readlink(path, linkbuf, MAXPATHLEN - 1); if (llen < 0) return -1; linkbuf[llen] = '\0'; if (copy_unsafe_links && unsafe_symlink(linkbuf, path)) { if (INFO_GTE(SYMSAFE, 1)) { rprintf(FINFO,"copying unsafe symlink \"%s\" -> \"%s\"\n", path, linkbuf); } return x_stat(path, stp, NULL); } if (munge_symlinks && am_sender && llen > SYMLINK_PREFIX_LEN && strncmp(linkbuf, SYMLINK_PREFIX, SYMLINK_PREFIX_LEN) == 0) { memmove(linkbuf, linkbuf + SYMLINK_PREFIX_LEN, llen - SYMLINK_PREFIX_LEN + 1); } } return 0; #else return x_stat(path, stp, NULL); #endif }
0
[ "CWE-59" ]
rsync
962f8b90045ab331fc04c9e65f80f1a53e68243b
40,456,385,672,220,050,000,000,000,000,000,000,000
28
Complain if an inc-recursive path is not right for its dir. This ensures that a malicious sender can't use a just-sent symlink as a trasnfer path.
void DequantizeSlice(const Device& d, OpKernelContext* ctx, const ConstVec& input, float min_range, float max_range, Vec output) { // TODO(pauldonnelly): Factor out the similar calculations in quantize, // dequantize and quantize_and_dequantize ops. const float half_range = !std::is_signed<T>::value ? 0.0f : (static_cast<float>(std::numeric_limits<T>::max()) - std::numeric_limits<T>::min() + 1) / 2.0f; if (mode_ == QUANTIZE_MODE_MIN_COMBINED) { const float scale_factor = (max_range - min_range) / (static_cast<float>(std::numeric_limits<T>::max()) - std::numeric_limits<T>::min()); output.device(d) = ((input.template cast<float>() + half_range) * scale_factor) + min_range; } else if (mode_ == QUANTIZE_MODE_SCALED) { const int min_output_value = std::numeric_limits<T>::min() + (narrow_range_ ? 1 : 0); const float scale_factor = std::numeric_limits<T>::min() == 0 ? (max_range / std::numeric_limits<T>::max()) : std::max(min_range / min_output_value, max_range / std::numeric_limits<T>::max()); output.device(d) = input.template cast<float>() * scale_factor; } }
0
[ "CWE-20", "CWE-125" ]
tensorflow
5899741d0421391ca878da47907b1452f06aaf1b
5,218,915,848,930,661,000,000,000,000,000,000,000
32
Fix heap OOB read in dequantize op. Also fixes SEGV in same op PiperOrigin-RevId: 372437896 Change-Id: I135e94d360c2a1ce374c10f7e0fed1af603dbc02
int ha_myisam::delete_table(const char *name) { return mi_delete_table(name); }
0
[ "CWE-362" ]
mysql-server
4e5473862e6852b0f3802b0cd0c6fa10b5253291
63,250,298,704,983,115,000,000,000,000,000,000,000
4
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD) is created. When repair finishes, this file is renamed to the original .MYD file. The problem was that during this rename, we copied the stats from the old file to the new file with chmod/chown. If a user managed to replace the temporary file before chmod/chown was executed, it was possible to get an arbitrary file with the privileges of the mysql user. This patch fixes the problem by not copying stats from the old file to the new file. This is not needed as the new file was created with the correct stats. This fix only changes server behavior - external utilities such as myisamchk still does chmod/chown. No test case provided since the problem involves synchronization with file system operations.
static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param) { int ret = 0; struct sta_info *psta = NULL; struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct sta_priv *pstapriv = &padapter->stapriv; DBG_88E("%s, sta_addr: %pM\n", __func__, (param->sta_addr)); if (!check_fwstate(pmlmepriv, _FW_LINKED | WIFI_AP_STATE)) return -EINVAL; if (is_broadcast_ether_addr(param->sta_addr)) return -EINVAL; psta = rtw_get_stainfo(pstapriv, param->sta_addr); if (psta) { if (psta->wpa_ie[0] == WLAN_EID_RSN || psta->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) { int wpa_ie_len; int copy_len; wpa_ie_len = psta->wpa_ie[1]; copy_len = min_t(int, wpa_ie_len + 2, sizeof(psta->wpa_ie)); param->u.wpa_ie.len = copy_len; memcpy(param->u.wpa_ie.reserved, psta->wpa_ie, copy_len); } else { DBG_88E("sta's wpa_ie is NONE\n"); } } else { ret = -1; } return ret; }
0
[ "CWE-787" ]
linux
74b6b20df8cfe90ada777d621b54c32e69e27cd7
100,538,111,657,894,530,000,000,000,000,000,000,000
36
staging: rtl8188eu: prevent ->ssid overflow in rtw_wx_set_scan() This code has a check to prevent read overflow but it needs another check to prevent writing beyond the end of the ->ssid[] array. Fixes: a2c60d42d97c ("staging: r8188eu: Add files for new driver - part 16") Signed-off-by: Dan Carpenter <[email protected]> Cc: stable <[email protected]> Link: https://lore.kernel.org/r/YEHymwsnHewzoam7@mwanda Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int _server_handle_qTfV(libgdbr_t *g, int (*cmd_cb) (void*, const char*, char*, size_t), void *core_ptr) { // TODO if (send_ack (g) < 0) { return -1; } return send_msg (g, ""); }
0
[ "CWE-703", "CWE-787" ]
radare2
796dd28aaa6b9fa76d99c42c4d5ff8b257cc2191
182,149,404,888,827,230,000,000,000,000,000,000,000
7
Fix ext2 buffer overflow in r2_sbu_grub_memmove
evbuffer_deferred_callback(struct event_callback *cb, void *arg) { struct bufferevent *parent = NULL; struct evbuffer *buffer = arg; /* XXXX It would be better to run these callbacks without holding the * lock */ EVBUFFER_LOCK(buffer); parent = buffer->parent; evbuffer_run_callbacks(buffer, 1); evbuffer_decref_and_unlock_(buffer); if (parent) bufferevent_decref_(parent); }
0
[ "CWE-189" ]
libevent
841ecbd96105c84ac2e7c9594aeadbcc6fb38bc4
227,224,176,700,445,460,000,000,000,000,000,000,000
14
Fix CVE-2014-6272 in Libevent 2.1 For this fix, we need to make sure that passing too-large inputs to the evbuffer functions can't make us do bad things with the heap. Also, lower the maximum chunk size to the lower of off_t, size_t maximum. This is necessary since otherwise we could get into an infinite loop if we make a chunk that 'misalign' cannot index into.
static ssize_t pm_qos_no_power_off_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { int ret; if (kstrtoint(buf, 0, &ret)) return -EINVAL; if (ret != 0 && ret != 1) return -EINVAL; ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); return ret < 0 ? ret : n; }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
300,305,856,912,986,550,000,000,000,000,000,000,000
15
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
numLinesInBuffer (Compressor * compressor) { return compressor? compressor->numScanLines(): 1; }
0
[ "CWE-125" ]
openexr
e79d2296496a50826a15c667bf92bdc5a05518b4
232,365,722,979,620,730,000,000,000,000,000,000,000
4
fix memory leaks and invalid memory accesses Signed-off-by: Peter Hillman <[email protected]>
static struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast) { struct sk_buff *skb; void *data; if (size <= NLMSG_GOODSIZE || broadcast) return alloc_skb(size, GFP_KERNEL); size = SKB_DATA_ALIGN(size) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); data = vmalloc(size); if (data == NULL) return NULL; skb = build_skb(data, size); if (skb == NULL) vfree(data); else { skb->head_frag = 0; skb->destructor = netlink_skb_destructor; } return skb; }
0
[ "CWE-20", "CWE-269" ]
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
155,812,317,339,730,730,000,000,000,000,000,000,000
26
net: rework recvmsg handler msg_name and msg_namelen logic This patch now always passes msg->msg_namelen as 0. recvmsg handlers must set msg_namelen to the proper size <= sizeof(struct sockaddr_storage) to return msg_name to the user. This prevents numerous uninitialized memory leaks we had in the recvmsg handlers and makes it harder for new code to accidentally leak uninitialized memory. Optimize for the case recvfrom is called with NULL as address. We don't need to copy the address at all, so set it to NULL before invoking the recvmsg handler. We can do so, because all the recvmsg handlers must cope with the case a plain read() is called on them. read() also sets msg_name to NULL. Also document these changes in include/linux/net.h as suggested by David Miller. Changes since RFC: Set msg->msg_name = NULL if user specified a NULL in msg_name but had a non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't affect sendto as it would bail out earlier while trying to copy-in the address. It also more naturally reflects the logic by the callers of verify_iovec. With this change in place I could remove " if (!uaddr || msg_sys->msg_namelen == 0) msg->msg_name = NULL ". This change does not alter the user visible error logic as we ignore msg_namelen as long as msg_name is NULL. Also remove two unnecessary curly brackets in ___sys_recvmsg and change comments to netdev style. Cc: David Miller <[email protected]> Suggested-by: Eric Dumazet <[email protected]> Signed-off-by: Hannes Frederic Sowa <[email protected]> Signed-off-by: David S. Miller <[email protected]>
global_notify_fifo(vector_t *strvec) { notify_fifo(strvec, "", &global_data->notify_fifo); }
0
[ "CWE-200" ]
keepalived
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
250,825,400,076,588,060,000,000,000,000,000,000,000
4
Add command line and configuration option to set umask Issue #1048 identified that files created by keepalived are created with mode 0666. This commit changes the default to 0644, and also allows the umask to be specified in the configuration or as a command line option. Signed-off-by: Quentin Armitage <[email protected]>
static int pool_can_expand_by(struct mempool *pool) { size_t available_space = 0; if (pool == NULL) { return 0; } if (pool->mem_get_free_space_func) { available_space = pool->mem_get_free_space_func(); } else { const size_t allocated_space = pool->total_bufs_allocated_by_pool * pool->mempool_item_size; if (pool->max_memory_threshold > allocated_space) { available_space = pool->max_memory_threshold - allocated_space; } } // We can expand by at least (available_space / pool->mempool_item_size) // buffer count return available_space / pool->mempool_item_size; }
0
[ "CWE-703" ]
cortx-s3server
dd6bfbc4b84f14f898598922ca9efd5aaa645c5d
155,765,425,777,455,280,000,000,000,000,000,000,000
22
avoid the unrelesed lock after the method returns (#1041) Update s3_memory_pool.c. Avoid the unreleased lock pool->lock after the method returns. Co-authored-by: nileshgovande <[email protected]>
static void __device_links_queue_sync_state(struct device *dev, struct list_head *list) { struct device_link *link; if (!dev_has_sync_state(dev)) return; if (dev->state_synced) return; list_for_each_entry(link, &dev->links.consumers, s_node) { if (!(link->flags & DL_FLAG_MANAGED)) continue; if (link->status != DL_STATE_ACTIVE) return; } /* * Set the flag here to avoid adding the same device to a list more * than once. This can happen if new consumers get added to the device * and probed before the list is flushed. */ dev->state_synced = true; if (WARN_ON(!list_empty(&dev->links.defer_hook))) return; get_device(dev); list_add_tail(&dev->links.defer_hook, list); }
0
[ "CWE-787" ]
linux
aa838896d87af561a33ecefea1caa4c15a68bc47
59,183,834,508,779,470,000,000,000,000,000,000,000
30
drivers core: Use sysfs_emit and sysfs_emit_at for show(device *...) functions Convert the various sprintf fmaily calls in sysfs device show functions to sysfs_emit and sysfs_emit_at for PAGE_SIZE buffer safety. Done with: $ spatch -sp-file sysfs_emit_dev.cocci --in-place --max-width=80 . And cocci script: $ cat sysfs_emit_dev.cocci @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - sprintf(buf, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... return - strcpy(buf, chr); + sysfs_emit(buf, chr); ...> } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - sprintf(buf, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - snprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... len = - scnprintf(buf, PAGE_SIZE, + sysfs_emit(buf, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; identifier len; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { <... - len += scnprintf(buf + len, PAGE_SIZE - len, + len += sysfs_emit_at(buf, len, ...); ...> return len; } @@ identifier d_show; identifier dev, attr, buf; expression chr; @@ ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf) { ... - strcpy(buf, chr); - return strlen(buf); + return sysfs_emit(buf, chr); } Signed-off-by: Joe Perches <[email protected]> Link: https://lore.kernel.org/r/3d033c33056d88bbe34d4ddb62afd05ee166ab9a.1600285923.git.joe@perches.com Signed-off-by: Greg Kroah-Hartman <[email protected]>
static void get_contype_from_attrs(map<string, bufferlist>& attrs, string& content_type) { map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_CONTENT_TYPE); if (iter != attrs.end()) { content_type = rgw_bl_str(iter->second); } }
0
[ "CWE-617" ]
ceph
f44a8ae8aa27ecef69528db9aec220f12492810e
80,707,342,663,950,650,000,000,000,000,000,000,000
8
rgw: RGWSwiftWebsiteHandler::is_web_dir checks empty subdir_name checking for empty name avoids later assertion in RGWObjectCtx::set_atomic Fixes: CVE-2021-3531 Reviewed-by: Casey Bodley <[email protected]> Signed-off-by: Casey Bodley <[email protected]> (cherry picked from commit 7196a469b4470f3c8628489df9a41ec8b00a5610)
int RGWPostObj_ObjStore::verify_params() { /* check that we have enough memory to store the object note that this test isn't exact and may fail unintentionally for large requests is */ if (!s->length) { return -ERR_LENGTH_REQUIRED; } off_t len = atoll(s->length); if (len > (off_t)(s->cct->_conf->rgw_max_put_size)) { return -ERR_TOO_LARGE; } supplied_md5_b64 = s->info.env->get("HTTP_CONTENT_MD5"); return 0; }
0
[ "CWE-770" ]
ceph
ab29bed2fc9f961fe895de1086a8208e21ddaddc
111,588,217,964,383,320,000,000,000,000,000,000,000
17
rgw: fix issues with 'enforce bounds' patch The patch to enforce bounds on max-keys/max-uploads/max-parts had a few issues that would prevent us from compiling it. Instead of changing the code provided by the submitter, we're addressing them in a separate commit to maintain the DCO. Signed-off-by: Joao Eduardo Luis <[email protected]> Signed-off-by: Abhishek Lekshmanan <[email protected]> (cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a) mimic specific fixes: As the largeish change from master g_conf() isn't in mimic yet, use the g_conf global structure, also make rgw_op use the value from req_info ceph context as we do for all the requests
futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) { struct futex_hash_bucket *hb; struct futex_q *this, *next; union futex_key key = FUTEX_KEY_INIT; int ret; if (!bitset) return -EINVAL; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ); if (unlikely(ret != 0)) goto out; hb = hash_futex(&key); /* Make sure we really have tasks to wakeup */ if (!hb_waiters_pending(hb)) goto out_put_key; spin_lock(&hb->lock); plist_for_each_entry_safe(this, next, &hb->chain, list) { if (match_futex (&this->key, &key)) { if (this->pi_state || this->rt_waiter) { ret = -EINVAL; break; } /* Check if one of the bits is set in both bitsets */ if (!(this->bitset & bitset)) continue; wake_futex(this); if (++ret >= nr_wake) break; } } spin_unlock(&hb->lock); out_put_key: put_futex_key(&key); out: return ret; }
0
[ "CWE-264", "CWE-269" ]
linux
e9c243a5a6de0be8e584c604d353412584b592f8
7,294,689,688,030,056,000,000,000,000,000,000,000
45
futex-prevent-requeue-pi-on-same-futex.patch futex: Forbid uaddr == uaddr2 in futex_requeue(..., requeue_pi=1) If uaddr == uaddr2, then we have broken the rule of only requeueing from a non-pi futex to a pi futex with this call. If we attempt this, then dangling pointers may be left for rt_waiter resulting in an exploitable condition. This change brings futex_requeue() in line with futex_wait_requeue_pi() which performs the same check as per commit 6f7b0a2a5c0f ("futex: Forbid uaddr == uaddr2 in futex_wait_requeue_pi()") [ tglx: Compare the resulting keys as well, as uaddrs might be different depending on the mapping ] Fixes CVE-2014-3153. Reported-by: Pinkie Pie Signed-off-by: Will Drewry <[email protected]> Signed-off-by: Kees Cook <[email protected]> Cc: [email protected] Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Darren Hart <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
static void show_parameter(int snum, struct parm_struct *parm) { int i; void *ptr = parm->ptr; char *utf8_s1, *utf8_s2; size_t converted_size; TALLOC_CTX *ctx = talloc_stackframe(); if (parm->p_class == P_LOCAL && snum >= 0) { ptr = lp_local_ptr_by_snum(snum, ptr); } printf("<tr><td>%s</td><td>", get_parm_translated(ctx, stripspaceupper(parm->label), _("Help"), parm->label)); switch (parm->type) { case P_CHAR: printf("<input type=text size=2 name=\"parm_%s\" value=\"%c\">", make_parm_name(parm->label), *(char *)ptr); printf("<input type=button value=\"%s\" onClick=\"swatform.parm_%s.value=\'%c\'\">", _("Set Default"), make_parm_name(parm->label),(char)(parm->def.cvalue)); break; case P_LIST: printf("<input type=text size=40 name=\"parm_%s\" value=\"", make_parm_name(parm->label)); if ((char ***)ptr && *(char ***)ptr && **(char ***)ptr) { char **list = *(char ***)ptr; for (;*list;list++) { /* enclose in HTML encoded quotes if the string contains a space */ if ( strchr_m(*list, ' ') ) { push_utf8_talloc(talloc_tos(), &utf8_s1, *list, &converted_size); push_utf8_talloc(talloc_tos(), &utf8_s2, ((*(list+1))?", ":""), &converted_size); printf("&quot;%s&quot;%s", utf8_s1, utf8_s2); } else { push_utf8_talloc(talloc_tos(), &utf8_s1, *list, &converted_size); push_utf8_talloc(talloc_tos(), &utf8_s2, ((*(list+1))?", ":""), &converted_size); printf("%s%s", utf8_s1, utf8_s2); } TALLOC_FREE(utf8_s1); TALLOC_FREE(utf8_s2); } } printf("\">"); printf("<input type=button value=\"%s\" onClick=\"swatform.parm_%s.value=\'", _("Set Default"), make_parm_name(parm->label)); if (parm->def.lvalue) { char **list = (char **)(parm->def.lvalue); for (; *list; list++) { /* enclose in HTML encoded quotes if the string contains a space */ if ( strchr_m(*list, ' ') ) printf("&quot;%s&quot;%s", *list, ((*(list+1))?", ":"")); else printf("%s%s", *list, ((*(list+1))?", ":"")); } } printf("\'\">"); break; case P_STRING: case P_USTRING: push_utf8_talloc(talloc_tos(), &utf8_s1, *(char **)ptr, &converted_size); printf("<input type=text size=40 name=\"parm_%s\" value=\"%s\">", make_parm_name(parm->label), fix_quotes(ctx, utf8_s1)); TALLOC_FREE(utf8_s1); printf("<input type=button value=\"%s\" onClick=\"swatform.parm_%s.value=\'%s\'\">", _("Set Default"), make_parm_name(parm->label),fix_backslash((char *)(parm->def.svalue))); break; case P_BOOL: printf("<select name=\"parm_%s\">",make_parm_name(parm->label)); printf("<option %s>Yes", (*(bool *)ptr)?"selected":""); printf("<option %s>No", (*(bool *)ptr)?"":"selected"); printf("</select>"); printf("<input type=button value=\"%s\" onClick=\"swatform.parm_%s.selectedIndex=\'%d\'\">", _("Set Default"), make_parm_name(parm->label),(bool)(parm->def.bvalue)?0:1); break; case P_BOOLREV: printf("<select name=\"parm_%s\">",make_parm_name(parm->label)); printf("<option %s>Yes", (*(bool *)ptr)?"":"selected"); printf("<option %s>No", (*(bool *)ptr)?"selected":""); printf("</select>"); printf("<input type=button value=\"%s\" onClick=\"swatform.parm_%s.selectedIndex=\'%d\'\">", _("Set Default"), make_parm_name(parm->label),(bool)(parm->def.bvalue)?1:0); break; case P_INTEGER: printf("<input type=text size=8 name=\"parm_%s\" value=\"%d\">", make_parm_name(parm->label), *(int *)ptr); printf("<input type=button value=\"%s\" onClick=\"swatform.parm_%s.value=\'%d\'\">", _("Set Default"), make_parm_name(parm->label),(int)(parm->def.ivalue)); break; case P_OCTAL: { char *o; o = octal_string(*(int *)ptr); printf("<input type=text size=8 name=\"parm_%s\" value=%s>", make_parm_name(parm->label), o); TALLOC_FREE(o); o = octal_string((int)(parm->def.ivalue)); printf("<input type=button value=\"%s\" " "onClick=\"swatform.parm_%s.value=\'%s\'\">", _("Set Default"), make_parm_name(parm->label), o); TALLOC_FREE(o); break; } case P_ENUM: printf("<select name=\"parm_%s\">",make_parm_name(parm->label)); for (i=0;parm->enum_list[i].name;i++) { if (i == 0 || parm->enum_list[i].value != parm->enum_list[i-1].value) { printf("<option %s>%s",(*(int *)ptr)==parm->enum_list[i].value?"selected":"",parm->enum_list[i].name); } } printf("</select>"); printf("<input type=button value=\"%s\" onClick=\"swatform.parm_%s.selectedIndex=\'%d\'\">", _("Set Default"), make_parm_name(parm->label),enum_index((int)(parm->def.ivalue),parm->enum_list)); break; case P_SEP: break; } printf("</td></tr>\n"); TALLOC_FREE(ctx); }
0
[]
samba
71225948a249f079120282740fcc39fd6faa880e
126,696,294,548,376,740,000,000,000,000,000,000,000
123
swat: Use X-Frame-Options header to avoid clickjacking Jann Horn reported a potential clickjacking vulnerability in SWAT where the SWAT page could be embedded into an attacker's page using a frame or iframe and then used to trick the user to change Samba settings. Avoid this by telling the browser to refuse the frame embedding via the X-Frame-Options: DENY header. Signed-off-by: Kai Blin <[email protected]> Fix bug #9576 - CVE-2013-0213: Clickjacking issue in SWAT.
void extcap_dlts() { u_int dlts_number = DLT_EN10MB; printf("dlt {number=%u}{name=%s}{display=%s}\n", dlts_number, "ndpi", "nDPI Interface"); exit(0); }
0
[ "CWE-125" ]
nDPI
b7e666e465f138ae48ab81976726e67deed12701
306,024,440,350,973,400,000,000,000,000,000,000,000
5
Added fix to avoid potential heap buffer overflow in H.323 dissector Modified HTTP report information to make it closer to the HTTP field names
RGWListBucketMultiparts() { max_uploads = 0; is_truncated = false; default_max = 0; }
0
[ "CWE-770" ]
ceph
ab29bed2fc9f961fe895de1086a8208e21ddaddc
95,685,286,180,521,430,000,000,000,000,000,000,000
5
rgw: fix issues with 'enforce bounds' patch The patch to enforce bounds on max-keys/max-uploads/max-parts had a few issues that would prevent us from compiling it. Instead of changing the code provided by the submitter, we're addressing them in a separate commit to maintain the DCO. Signed-off-by: Joao Eduardo Luis <[email protected]> Signed-off-by: Abhishek Lekshmanan <[email protected]> (cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a) mimic specific fixes: As the largeish change from master g_conf() isn't in mimic yet, use the g_conf global structure, also make rgw_op use the value from req_info ceph context as we do for all the requests
ModuleExport void UnregisterUndefinedImage(void) { }
0
[ "CWE-200", "CWE-362" ]
ImageMagick
01faddbe2711a4156180c4a92837e2f23683cc68
96,708,663,326,456,770,000,000,000,000,000,000,000
3
Use the correct rights.
static int tls1_alpn_handle_client_hello(SSL *s, PACKET *pkt, int *al) { PACKET protocol_list, save_protocol_list, protocol; *al = SSL_AD_DECODE_ERROR; if (!PACKET_as_length_prefixed_2(pkt, &protocol_list) || PACKET_remaining(&protocol_list) < 2) { return 0; } save_protocol_list = protocol_list; do { /* Protocol names can't be empty. */ if (!PACKET_get_length_prefixed_1(&protocol_list, &protocol) || PACKET_remaining(&protocol) == 0) { return 0; } } while (PACKET_remaining(&protocol_list) != 0); if (!PACKET_memdup(&save_protocol_list, &s->s3->alpn_proposed, &s->s3->alpn_proposed_len)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } return 1; }
0
[ "CWE-20" ]
openssl
4ad93618d26a3ea23d36ad5498ff4f59eff3a4d2
224,654,170,749,654,900,000,000,000,000,000,000,000
28
Don't change the state of the ETM flags until CCS processing Changing the ciphersuite during a renegotiation can result in a crash leading to a DoS attack. ETM has not been implemented in 1.1.0 for DTLS so this is TLS only. The problem is caused by changing the flag indicating whether to use ETM or not immediately on negotiation of ETM, rather than at CCS. Therefore, during a renegotiation, if the ETM state is changing (usually due to a change of ciphersuite), then an error/crash will occur. Due to the fact that there are separate CCS messages for read and write we actually now need two flags to determine whether to use ETM or not. CVE-2017-3733 Reviewed-by: Richard Levitte <[email protected]>
static double mp_self_map_vector_v(_cimg_math_parser& mp) { // Vector += vector unsigned int ptrd = (unsigned int)mp.opcode[1] + 1, siz = (unsigned int)mp.opcode[2], ptrs = (unsigned int)mp.opcode[4] + 1; mp_func op = (mp_func)mp.opcode[3]; CImg<ulongT> l_opcode(1,4); l_opcode.swap(mp.opcode); ulongT &target = mp.opcode[1], &argument = mp.opcode[2]; while (siz-->0) { target = ptrd++; argument = ptrs++; (*op)(mp); } l_opcode.swap(mp.opcode); return cimg::type<double>::nan();
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
295,452,241,023,526,800,000,000,000,000,000,000,000
13
Fix other issues in 'CImg<T>::load_bmp()'.
static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { struct fastrpc_dma_buf_attachment *a; struct fastrpc_buf *buffer = dmabuf->priv; int ret; a = kzalloc(sizeof(*a), GFP_KERNEL); if (!a) return -ENOMEM; ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, FASTRPC_PHYS(buffer->phys), buffer->size); if (ret < 0) { dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); return -EINVAL; } a->dev = attachment->dev; INIT_LIST_HEAD(&a->node); attachment->priv = a; mutex_lock(&buffer->lock); list_add(&a->node, &buffer->attachments); mutex_unlock(&buffer->lock); return 0; }
1
[ "CWE-400", "CWE-401" ]
linux
fc739a058d99c9297ef6bfd923b809d85855b9a9
144,310,532,328,839,810,000,000,000,000,000,000,000
28
misc: fastrpc: prevent memory leak in fastrpc_dma_buf_attach In fastrpc_dma_buf_attach if dma_get_sgtable fails the allocated memory for a should be released. Signed-off-by: Navid Emamdoost <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
::testing::AssertionResult operator()(const char *, const T &value) const { ::testing::AssertionResult result = check_write<char>(value, "char"); return result ? check_write<wchar_t>(value, "wchar_t") : result; }
0
[ "CWE-134", "CWE-119", "CWE-787" ]
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
40,198,368,437,229,983,000,000,000,000,000,000,000
4
Fix segfault on complex pointer formatting (#642)
static int randstring(char *target, unsigned int min, unsigned int max) { int p = 0; int len = min+rand()%(max-min+1); int minval, maxval; switch(rand() % 3) { case 0: minval = 0; maxval = 255; break; case 1: minval = 48; maxval = 122; break; case 2: minval = 48; maxval = 52; break; default: assert(NULL); } while(p < len) target[p++] = minval+rand()%(maxval-minval+1); return len; }
0
[ "CWE-190" ]
redis
f6a40570fa63d5afdd596c78083d754081d80ae3
138,102,221,595,365,540,000,000,000,000,000,000,000
25
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628) - fix possible heap corruption in ziplist and listpack resulting by trying to allocate more than the maximum size of 4GB. - prevent ziplist (hash and zset) from reaching size of above 1GB, will be converted to HT encoding, that's not a useful size. - prevent listpack (stream) from reaching size of above 1GB. - XADD will start a new listpack if the new record may cause the previous listpack to grow over 1GB. - XADD will respond with an error if a single stream record is over 1GB - List type (ziplist in quicklist) was truncating strings that were over 4GB, now it'll respond with an error.
OVS_REQUIRES(ct->ct_lock) { hmap_remove(&ct->zone_limits, &zl->node); free(zl); }
0
[ "CWE-400" ]
ovs
79349cbab0b2a755140eedb91833ad2760520a83
242,921,071,591,816,570,000,000,000,000,000,000,000
5
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
GF_Err ssix_box_read(GF_Box *s, GF_BitStream *bs) { u32 i,j; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->subsegment_count = gf_bs_read_u32(bs); //each subseg has at least one range_count (4 bytes), abort if not enough bytes (broken box) if (ptr->size < ptr->subsegment_count*4) return GF_ISOM_INVALID_FILE; GF_SAFE_ALLOC_N(ptr->subsegments, ptr->subsegment_count, GF_SubsegmentInfo); if (!ptr->subsegments) return GF_OUT_OF_MEM; for (i = 0; i < ptr->subsegment_count; i++) { GF_SubsegmentInfo *subseg = &ptr->subsegments[i]; ISOM_DECREASE_SIZE(ptr, 4) subseg->range_count = gf_bs_read_u32(bs); //each range is 4 bytes, abort if not enough bytes if (ptr->size < subseg->range_count*4) return GF_ISOM_INVALID_FILE; subseg->ranges = (GF_SubsegmentRangeInfo*) gf_malloc(sizeof(GF_SubsegmentRangeInfo) * subseg->range_count); if (!subseg->ranges) return GF_OUT_OF_MEM; for (j = 0; j < subseg->range_count; j++) { ISOM_DECREASE_SIZE(ptr, 4) subseg->ranges[j].level = gf_bs_read_u8(bs); subseg->ranges[j].range_size = gf_bs_read_u24(bs); } } return GF_OK;
0
[ "CWE-787" ]
gpac
388ecce75d05e11fc8496aa4857b91245007d26e
334,845,234,980,822,900,000,000,000,000,000,000,000
31
fixed #1587
static int __dccp_feat_activate(struct sock *sk, const int idx, const bool is_local, dccp_feat_val const *fval) { bool rx; u64 val; if (idx < 0 || idx >= DCCP_FEAT_SUPPORTED_MAX) return -1; if (dccp_feat_table[idx].activation_hdlr == NULL) return 0; if (fval == NULL) { val = dccp_feat_table[idx].default_value; } else if (dccp_feat_table[idx].reconciliation == FEAT_SP) { if (fval->sp.vec == NULL) { /* * This can happen when an empty Confirm is sent * for an SP (i.e. known) feature. In this case * we would be using the default anyway. */ DCCP_CRIT("Feature #%d undefined: using default", idx); val = dccp_feat_table[idx].default_value; } else { val = fval->sp.vec[0]; } } else { val = fval->nn; } /* Location is RX if this is a local-RX or remote-TX feature */ rx = (is_local == (dccp_feat_table[idx].rxtx == FEAT_AT_RX)); dccp_debug(" -> activating %s %s, %sval=%llu\n", rx ? "RX" : "TX", dccp_feat_fname(dccp_feat_table[idx].feat_num), fval ? "" : "default ", (unsigned long long)val); return dccp_feat_table[idx].activation_hdlr(sk, val, rx); }
0
[ "CWE-401" ]
linux
1d3ff0950e2b40dc861b1739029649d03f591820
170,757,646,472,769,750,000,000,000,000,000,000,000
38
dccp: Fix memleak in __feat_register_sp If dccp_feat_push_change fails, we forget free the mem which is alloced by kmemdup in dccp_feat_clone_sp_val. Reported-by: Hulk Robot <[email protected]> Fixes: e8ef967a54f4 ("dccp: Registration routines for changing feature values") Reviewed-by: Mukesh Ojha <[email protected]> Signed-off-by: YueHaibing <[email protected]> Signed-off-by: David S. Miller <[email protected]>
z_check_file_permissions(gs_memory_t *mem, const char *fname, const int len, const char *permission) { i_ctx_t *i_ctx_p = get_minst_from_memory(mem)->i_ctx_p; gs_parsed_file_name_t pname; const char *permitgroup = permission[0] == 'r' ? "PermitFileReading" : "PermitFileWriting"; int code = gs_parse_file_name(&pname, fname, len, imemory); if (code < 0) return code; if (pname.iodev && i_ctx_p->LockFilePermissions && strcmp(pname.iodev->dname, "%pipe%") == 0) return gs_error_invalidfileaccess; code = check_file_permissions(i_ctx_p, fname, len, permitgroup); return code; }
1
[ "CWE-200" ]
ghostpdl
b60d50b7567369ad856cebe1efb6cd7dd2284219
165,434,629,871,372,930,000,000,000,000,000,000,000
15
Bug 697193: status operator honour SAFER option
int main(int argc, char **argv) { BIO *bio_err; bio_err = BIO_new_fp(stderr, BIO_NOCLOSE); CRYPTO_malloc_debug_init(); CRYPTO_dbg_set_options(V_CRYPTO_MDEBUG_ALL); CRYPTO_mem_ctrl(CRYPTO_MEM_CHECK_ON); ERR_load_crypto_strings(); /* "Negative" test, expect a mismatch */ if(run_srp("alice", "password1", "password2") == 0) { fprintf(stderr, "Mismatched SRP run failed\n"); return 1; } /* "Positive" test, should pass */ if(run_srp("alice", "password", "password") != 0) { fprintf(stderr, "Plain SRP run failed\n"); return 1; } CRYPTO_cleanup_all_ex_data(); ERR_remove_thread_state(NULL); ERR_free_strings(); CRYPTO_mem_leaks(bio_err); return 0; }
0
[]
openssl
edc032b5e3f3ebb1006a9c89e0ae00504f47966f
334,602,765,464,397,700,000,000,000,000,000,000,000
32
Add SRP support.
uint32_t mobi_get_orth_entry_length(const MOBIIndexEntry *entry) { uint32_t entry_textlen; MOBI_RET ret = mobi_get_indxentry_tagvalue(&entry_textlen, entry, INDX_TAG_ORTH_LENGTH); if (ret != MOBI_SUCCESS) { return MOBI_NOTSET; } return entry_textlen; }
0
[ "CWE-125", "CWE-787" ]
libmobi
eafc415bc6067e72577f70d6dd5acbf057ce6e6f
57,179,491,527,465,200,000,000,000,000,000,000,000
10
Fix wrong boundary checks in inflections parser resulting in stack buffer over-read with corrupt input
inline void perf_swevent_put_recursion_context(int rctx) { struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); put_recursion_context(swhash->recursion, rctx); }
0
[ "CWE-284", "CWE-264" ]
linux
f63a8daa5812afef4f06c962351687e1ff9ccb2b
33,818,836,664,796,826,000,000,000,000,000,000,000
6
perf: Fix event->ctx locking There have been a few reported issues wrt. the lack of locking around changing event->ctx. This patch tries to address those. It avoids the whole rwsem thing; and while it appears to work, please give it some thought in review. What I did fail at is sensible runtime checks on the use of event->ctx, the RCU use makes it very hard. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Jiri Olsa <[email protected]> Cc: Arnaldo Carvalho de Melo <[email protected]> Cc: Linus Torvalds <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) { int connected; connected = unix_dgram_peer_wake_connect(sk, other); /* If other is SOCK_DEAD, we want to make sure we signal * POLLOUT, such that a subsequent write() can get a * -ECONNREFUSED. Otherwise, if we haven't queued any skbs * to other and its full, we will hang waiting for POLLOUT. */ if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD)) return 1; if (connected) unix_dgram_peer_wake_disconnect(sk, other); return 0; }
0
[ "CWE-362" ]
linux
cbcf01128d0a92e131bd09f1688fe032480b65ca
263,061,593,178,650,180,000,000,000,000,000,000,000
19
af_unix: fix garbage collect vs MSG_PEEK unix_gc() assumes that candidate sockets can never gain an external reference (i.e. be installed into an fd) while the unix_gc_lock is held. Except for MSG_PEEK this is guaranteed by modifying inflight count under the unix_gc_lock. MSG_PEEK does not touch any variable protected by unix_gc_lock (file count is not), yet it needs to be serialized with garbage collection. Do this by locking/unlocking unix_gc_lock: 1) increment file count 2) lock/unlock barrier to make sure incremented file count is visible to garbage collection 3) install file into fd This is a lock barrier (unlike smp_mb()) that ensures that garbage collection is run completely before or completely after the barrier. Cc: <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> Signed-off-by: Miklos Szeredi <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
int gg_send_packet(struct gg_session *sess, int type, ...) { struct gg_header *h; char *tmp; unsigned int tmp_length; void *payload; unsigned int payload_length; va_list ap; int res; gg_debug_session(sess, GG_DEBUG_FUNCTION, "** gg_send_packet(%p, 0x%.2x, ...);\n", sess, type); tmp_length = sizeof(struct gg_header); if (!(tmp = malloc(tmp_length))) { gg_debug_session(sess, GG_DEBUG_ERROR, "// gg_send_packet() not enough memory for packet header\n"); return -1; } va_start(ap, type); payload = va_arg(ap, void *); while (payload) { char *tmp2; payload_length = va_arg(ap, unsigned int); if (!(tmp2 = realloc(tmp, tmp_length + payload_length))) { gg_debug_session(sess, GG_DEBUG_ERROR, "// gg_send_packet() not enough memory for payload\n"); free(tmp); va_end(ap); return -1; } tmp = tmp2; memcpy(tmp + tmp_length, payload, payload_length); tmp_length += payload_length; payload = va_arg(ap, void *); } va_end(ap); h = (struct gg_header*) tmp; h->type = gg_fix32(type); h->length = gg_fix32(tmp_length - sizeof(struct gg_header)); gg_debug_session(sess, GG_DEBUG_MISC, "// gg_send_packet(type=0x%.2x, length=%d)\n", gg_fix32(h->type), gg_fix32(h->length)); gg_debug_dump(sess, GG_DEBUG_DUMP, tmp, tmp_length); res = gg_write(sess, tmp, tmp_length); free(tmp); if (res == -1) { gg_debug_session(sess, GG_DEBUG_ERROR, "// gg_send_packet() write() failed. res = %d, errno = %d (%s)\n", res, errno, strerror(errno)); return -1; } if (sess->async) gg_debug_session(sess, GG_DEBUG_NET, "// gg_send_packet() partial write(), %d sent, %d left, %d total left\n", res, tmp_length - res, sess->send_left); if (sess->send_buf) sess->check |= GG_CHECK_WRITE; return 0; }
0
[ "CWE-310" ]
libgadu
23644f1fb8219031b3cac93289a588b05f90226b
73,213,885,835,469,380,000,000,000,000,000,000,000
69
Poprawka ograniczania długości opisu.
int cmp_item_time::cmp(Item *arg) { const bool rc= value != arg->val_time_packed(current_thd); return (m_null_value || arg->null_value) ? UNKNOWN : rc; }
0
[ "CWE-617" ]
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
74,698,051,251,667,560,000,000,000,000,000,000,000
5
MDEV-26402: A SEGV in Item_field::used_tables/update_depend_map_for_order... When doing condition pushdown from HAVING into WHERE, Item_equal::create_pushable_equalities() calls item->set_extraction_flag(IMMUTABLE_FL) for constant items. Then, Item::cleanup_excluding_immutables_processor() checks for this flag to see if it should call item->cleanup() or leave the item as-is. The failure happens when a constant item has a non-constant one inside it, like: (tbl.col=0 AND impossible_cond) item->walk(cleanup_excluding_immutables_processor) works in a bottom-up way so it 1. will call Item_func_eq(tbl.col=0)->cleanup() 2. will not call Item_cond_and->cleanup (as the AND is constant) This creates an item tree where a fixed Item has an un-fixed Item inside it which eventually causes an assertion failure. Fixed by introducing this rule: instead of just calling item->set_extraction_flag(IMMUTABLE_FL); we call Item::walk() to set the flag for all sub-items of the item.
static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, struct net_device *upper_dev) { __netdev_adjacent_dev_unlink(dev, upper_dev); __netdev_adjacent_dev_unlink_lists(dev, upper_dev, &dev->adj_list.upper, &upper_dev->adj_list.lower);
0
[ "CWE-400", "CWE-703" ]
linux
fac8e0f579695a3ecbc4d3cac369139d7f819971
323,546,161,257,309,900,000,000,000,000,000,000,000
8
tunnels: Don't apply GRO to multiple layers of encapsulation. When drivers express support for TSO of encapsulated packets, they only mean that they can do it for one layer of encapsulation. Supporting additional levels would mean updating, at a minimum, more IP length fields and they are unaware of this. No encapsulation device expresses support for handling offloaded encapsulated packets, so we won't generate these types of frames in the transmit path. However, GRO doesn't have a check for multiple levels of encapsulation and will attempt to build them. UDP tunnel GRO actually does prevent this situation but it only handles multiple UDP tunnels stacked on top of each other. This generalizes that solution to prevent any kind of tunnel stacking that would cause problems. Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack") Signed-off-by: Jesse Gross <[email protected]> Signed-off-by: David S. Miller <[email protected]>
message_add_overwrite_header (SoupMessage *msg, gboolean overwrite) { soup_message_headers_append (msg->request_headers, "Overwrite", overwrite ? "T" : "F"); }
0
[]
gvfs
f81ff2108ab3b6e370f20dcadd8708d23f499184
181,381,400,065,573,200,000,000,000,000,000,000,000
7
dav: don't unescape the uri twice path_equal tries to unescape path before comparing. Unfortunately this function is used also for already unescaped paths. Therefore unescaping can fail. This commit reverts changes which was done in commit 50af53d and unescape just uris, which aren't unescaped yet. https://bugzilla.gnome.org/show_bug.cgi?id=743298
static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_removeres *res) { struct xdr_stream xdr; struct compound_hdr hdr; int status; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; if ((status = decode_putfh(&xdr)) != 0) goto out; if ((status = decode_remove(&xdr, &res->cinfo)) != 0) goto out; decode_getfattr(&xdr, &res->dir_attr, res->server); out: return status; }
0
[ "CWE-703" ]
linux
dc0b027dfadfcb8a5504f7d8052754bf8d501ab9
308,178,286,492,292,840,000,000,000,000,000,000,000
17
NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust <[email protected]>
Document getSpec() { return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 2) << DOC_ARRAY(1)) << "expected" << DOC("$setIsSubset" << false << "$setEquals" << false << "$setIntersection" << DOC_ARRAY(1) << "$setUnion" << DOC_ARRAY(1 << 2) << "$setDifference" << DOC_ARRAY(2))); }
0
[ "CWE-835" ]
mongo
0a076417d1d7fba3632b73349a1fd29a83e68816
264,668,036,773,332,100,000,000,000,000,000,000,000
10
SERVER-38070 fix infinite loop in agg expression
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs, bool base_only) { union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only); role.base.ad_disabled = (shadow_accessed_mask == 0); role.base.level = kvm_mmu_get_tdp_level(vcpu); role.base.direct = true; role.base.has_4_byte_gpte = false; return role; }
0
[ "CWE-476" ]
linux
9f46c187e2e680ecd9de7983e4d081c3391acc76
280,113,999,294,457,500,000,000,000,000,000,000,000
12
KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID With shadow paging enabled, the INVPCID instruction results in a call to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the invlpg callback is not set and the result is a NULL pointer dereference. Fix it trivially by checking for mmu->invlpg before every call. There are other possibilities: - check for CR0.PG, because KVM (like all Intel processors after P5) flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a nop with paging disabled - check for EFER.LMA, because KVM syncs and flushes when switching MMU contexts outside of 64-bit mode All of these are tricky, go for the simple solution. This is CVE-2022-1789. Reported-by: Yongkang Jia <[email protected]> Cc: [email protected] Signed-off-by: Paolo Bonzini <[email protected]>
static void incoming_probe(AvahiServer *s, AvahiRecord *record, AvahiInterface *i) { AvahiEntry *e, *n; int ours = 0, won = 0, lost = 0; assert(s); assert(record); assert(i); /* Handle incoming probes and check if they conflict our own probes */ for (e = avahi_hashmap_lookup(s->entries_by_key, record->key); e; e = n) { int cmp; n = e->by_key_next; if (e->dead) continue; if ((cmp = avahi_record_lexicographical_compare(e->record, record)) == 0) { ours = 1; break; } else { if (avahi_entry_is_probing(s, e, i)) { if (cmp > 0) won = 1; else /* cmp < 0 */ lost = 1; } } } if (!ours) { char *t = avahi_record_to_string(record); if (won) avahi_log_debug("Received conflicting probe [%s]. Local host won.", t); else if (lost) { avahi_log_debug("Received conflicting probe [%s]. Local host lost. Withdrawing.", t); withdraw_rrset(s, record->key); } avahi_free(t); } }
0
[ "CWE-399" ]
avahi
3093047f1aa36bed8a37fa79004bf0ee287929f4
274,563,230,254,037,700,000,000,000,000,000,000,000
44
Don't get confused by UDP packets with a source port that is zero This is a fix for rhbz 475394. Problem identified by Hugo Dias.
GF_BitStream *gf_bs_new_cbk(GF_Err (*on_block_out)(void *cbk, u8 *data, u32 block_size), void *usr_data, u32 block_size) { return gf_bs_new_cbk_buffer(on_block_out, usr_data, NULL, block_size); }
0
[ "CWE-617", "CWE-703" ]
gpac
9ea93a2ec8f555ceed1ee27294cf94822f14f10f
52,733,131,115,282,970,000,000,000,000,000,000,000
5
fixed #2165
void minf_del(GF_Box *s) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; if (ptr == NULL) return; //if we have a Handler not self-contained, delete it (the self-contained belongs to the movie) if (ptr->dataHandler) { gf_isom_datamap_close(ptr); } if (ptr->InfoHeader) gf_isom_box_del((GF_Box *)ptr->InfoHeader); if (ptr->dataInformation) gf_isom_box_del((GF_Box *)ptr->dataInformation); if (ptr->sampleTable) gf_isom_box_del((GF_Box *)ptr->sampleTable); gf_free(ptr); }
0
[ "CWE-400", "CWE-401" ]
gpac
d2371b4b204f0a3c0af51ad4e9b491144dd1225c
256,674,550,784,849,060,000,000,000,000,000,000,000
14
prevent dref memleak on invalid input (#1183)
static bool checkreturn decode_extension(pb_istream_t *stream, uint32_t tag, pb_wire_type_t wire_type, pb_extension_t *extension) { size_t pos = stream->bytes_left; while (extension != NULL && pos == stream->bytes_left) { bool status; if (extension->type->decode) status = extension->type->decode(stream, extension, tag, wire_type); else status = default_extension_decoder(stream, extension, tag, wire_type); if (!status) return false; extension = extension->next; } return true; }
0
[ "CWE-763" ]
nanopb
e2f0ccf939d9f82931d085acb6df8e9a182a4261
225,116,914,994,178,660,000,000,000,000,000,000,000
21
Fix invalid free() with oneof (#647) Nanopb would call free() or realloc() on an invalid (attacker controlled) pointer value when all the following conditions are true: - PB_ENABLE_MALLOC is defined at the compile time - Message definition contains an oneof field, and the oneof contains at least one pointer type field and at least one non-pointer type field. - Data being decoded first contains a non-pointer value for the oneof field, and later contains an overwriting pointer value. Depending on message layout, the bug may not be exploitable in all cases, but it is known to be exploitable at least with string and bytes fields. Actual security impact will also depend on the heap implementation used.
bool lftp_ssl_openssl::want_in() { return SSL_want_read(ssl); }
0
[ "CWE-310" ]
lftp
6357bed2583171b7515af6bb6585cf56d2117e3f
77,576,719,367,272,140,000,000,000,000,000,000,000
4
use hostmatch function from latest curl (addresses CVE-2014-0139)
static void key_search_validate(struct extent_buffer *b, struct btrfs_key *key, int level) { #ifdef CONFIG_BTRFS_ASSERT struct btrfs_disk_key disk_key; btrfs_cpu_key_to_disk(&disk_key, key); if (level == 0) ASSERT(!memcmp_extent_buffer(b, &disk_key, offsetof(struct btrfs_leaf, items[0].key), sizeof(disk_key))); else ASSERT(!memcmp_extent_buffer(b, &disk_key, offsetof(struct btrfs_node, ptrs[0].key), sizeof(disk_key))); #endif }
0
[ "CWE-416", "CWE-362" ]
linux
5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
287,496,713,614,702,560,000,000,000,000,000,000,000
19
Btrfs: make xattr replace operations atomic Replacing a xattr consists of doing a lookup for its existing value, delete the current value from the respective leaf, release the search path and then finally insert the new value. This leaves a time window where readers (getxattr, listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs, so this has security implications. This change also fixes 2 other existing issues which were: *) Deleting the old xattr value without verifying first if the new xattr will fit in the existing leaf item (in case multiple xattrs are packed in the same item due to name hash collision); *) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't exist but we have have an existing item that packs muliple xattrs with the same name hash as the input xattr. In this case we should return ENOSPC. A test case for xfstests follows soon. Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace implementation. Reported-by: Alexandre Oliva <[email protected]> Signed-off-by: Filipe Manana <[email protected]> Signed-off-by: Chris Mason <[email protected]>
parse_vlan(const void **datap, size_t *sizep, union flow_vlan_hdr *vlan_hdrs) { const ovs_be16 *eth_type; data_pull(datap, sizep, ETH_ADDR_LEN * 2); eth_type = *datap; size_t n; for (n = 0; eth_type_vlan(*eth_type) && n < flow_vlan_limit; n++) { if (OVS_UNLIKELY(*sizep < sizeof(ovs_be32) + sizeof(ovs_be16))) { break; } memset(vlan_hdrs + n, 0, sizeof(union flow_vlan_hdr)); const ovs_16aligned_be32 *qp = data_pull(datap, sizep, sizeof *qp); vlan_hdrs[n].qtag = get_16aligned_be32(qp); vlan_hdrs[n].tci |= htons(VLAN_CFI); eth_type = *datap; } return n; }
0
[ "CWE-400" ]
ovs
79349cbab0b2a755140eedb91833ad2760520a83
318,933,919,578,963,540,000,000,000,000,000,000,000
22
flow: Support extra padding length. Although not required, padding can be optionally added until the packet length is MTU bytes. A packet with extra padding currently fails sanity checks. Vulnerability: CVE-2020-35498 Fixes: fa8d9001a624 ("miniflow_extract: Properly handle small IP packets.") Reported-by: Joakim Hindersson <[email protected]> Acked-by: Ilya Maximets <[email protected]> Signed-off-by: Flavio Leitner <[email protected]> Signed-off-by: Ilya Maximets <[email protected]>
GF_Err moov_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); }
0
[ "CWE-476", "CWE-787" ]
gpac
b8f8b202d4fc23eb0ab4ce71ae96536ca6f5d3f8
225,040,920,280,406,540,000,000,000,000,000,000,000
4
fixed #1757
f_swapinfo(typval_T *argvars, typval_T *rettv) { if (rettv_dict_alloc(rettv) == OK) get_b0_dict(tv_get_string(argvars), rettv->vval.v_dict); }
0
[ "CWE-78" ]
vim
8c62a08faf89663e5633dc5036cd8695c80f1075
247,802,271,589,428,150,000,000,000,000,000,000,000
5
patch 8.1.0881: can execute shell commands in rvim through interfaces Problem: Can execute shell commands in rvim through interfaces. Solution: Disable using interfaces in restricted mode. Allow for writing file with writefile(), histadd() and a few others.
static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_bd_addr *rp = (void *) skb->data; BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); if (rp->status) return; if (test_bit(HCI_INIT, &hdev->flags)) bacpy(&hdev->bdaddr, &rp->bdaddr); if (hci_dev_test_flag(hdev, HCI_SETUP)) bacpy(&hdev->setup_addr, &rp->bdaddr); }
0
[ "CWE-290" ]
linux
3ca44c16b0dcc764b641ee4ac226909f5c421aa3
255,988,954,814,843,000,000,000,000,000,000,000,000
15
Bluetooth: Consolidate encryption handling in hci_encrypt_cfm This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection state is BT_CONFIG so callers don't have to check the state. Signed-off-by: Luiz Augusto von Dentz <[email protected]> Signed-off-by: Marcel Holtmann <[email protected]>
*/ static void xmlXPathCompLiteral(xmlXPathParserContextPtr ctxt) { const xmlChar *q; xmlChar *ret = NULL; if (CUR == '"') { NEXT; q = CUR_PTR; while ((IS_CHAR_CH(CUR)) && (CUR != '"')) NEXT; if (!IS_CHAR_CH(CUR)) { XP_ERROR(XPATH_UNFINISHED_LITERAL_ERROR); } else { ret = xmlStrndup(q, CUR_PTR - q); NEXT; } } else if (CUR == '\'') { NEXT; q = CUR_PTR; while ((IS_CHAR_CH(CUR)) && (CUR != '\'')) NEXT; if (!IS_CHAR_CH(CUR)) { XP_ERROR(XPATH_UNFINISHED_LITERAL_ERROR); } else { ret = xmlStrndup(q, CUR_PTR - q); NEXT; } } else { XP_ERROR(XPATH_START_LITERAL_ERROR); } if (ret == NULL) return; PUSH_LONG_EXPR(XPATH_OP_VALUE, XPATH_STRING, 0, 0, xmlXPathCacheNewString(ctxt->context, ret), NULL);
0
[ "CWE-119" ]
libxml2
91d19754d46acd4a639a8b9e31f50f31c78f8c9c
40,778,933,176,898,385,000,000,000,000,000,000,000
34
Fix the semantic of XPath axis for namespace/attribute context nodes The processing of namespace and attributes nodes was not compliant to the XPath-1.0 specification
connection_dir_process_inbuf(dir_connection_t *conn) { tor_assert(conn); tor_assert(conn->_base.type == CONN_TYPE_DIR); /* Directory clients write, then read data until they receive EOF; * directory servers read data until they get an HTTP command, then * write their response (when it's finished flushing, they mark for * close). */ /* If we're on the dirserver side, look for a command. */ if (conn->_base.state == DIR_CONN_STATE_SERVER_COMMAND_WAIT) { if (directory_handle_command(conn) < 0) { connection_mark_for_close(TO_CONN(conn)); return -1; } return 0; } if (buf_datalen(conn->_base.inbuf) > MAX_DIRECTORY_OBJECT_SIZE) { log_warn(LD_HTTP, "Too much data received from directory connection: " "denial of service attempt, or you need to upgrade?"); connection_mark_for_close(TO_CONN(conn)); return -1; } if (!conn->_base.inbuf_reached_eof) log_debug(LD_HTTP,"Got data, not eof. Leaving on inbuf."); return 0; }
0
[]
tor
973c18bf0e84d14d8006a9ae97fde7f7fb97e404
12,513,859,402,908,774,000,000,000,000,000,000,000
31
Fix assertion failure in tor_timegm. Fixes bug 6811.
z2grestoreall(i_ctx_t *i_ctx_p) { for (;;) { int code = restore_page_device(i_ctx_p, igs, gs_gstate_saved(igs)); if (code < 0) return code; if (code == 0) { bool done = !gs_gstate_saved(gs_gstate_saved(igs)); gs_grestore(igs); if (done) break; } else return push_callout(i_ctx_p, "%grestoreallpagedevice"); } return 0; }
0
[]
ghostpdl
643b24dbd002fb9c131313253c307cf3951b3d47
252,844,493,665,497,900,000,000,000,000,000,000,000
16
Bug 699718(2): Improve/augment stack size checking Improve the rebustness of the previous solution (previously it could trigger an error when there *was* stack capacity available). Remove redundant check: we don't need to check if the *current* stack size is sufficient, before checking the maximum permitted stack size. Also check the exec stack, as execstackoverflow can also cause the Postscript call out to fail. Lastly, in event of failure, put the LockSafetyParams flag back in the existing device (this is only necessary because we don't enfore JOBSERVER mode). Note: the Postscript callout (%grestorepagedevice) never pushes any dictionaries on the dict stack - if that changes, we should check that stack, too.
static int io_fadvise(struct io_kiocb *req, bool force_nonblock) { struct io_fadvise *fa = &req->fadvise; int ret; if (force_nonblock) { switch (fa->advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_RANDOM: case POSIX_FADV_SEQUENTIAL: break; default: return -EAGAIN; } } ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); if (ret < 0) req_set_fail_links(req); io_req_complete(req, ret); return 0; }
0
[]
linux
0f2122045b946241a9e549c2a76cea54fa58a7ff
33,431,111,258,213,020,000,000,000,000,000,000,000
22
io_uring: don't rely on weak ->files references Grab actual references to the files_struct. To avoid circular references issues due to this, we add a per-task note that keeps track of what io_uring contexts a task has used. When the tasks execs or exits its assigned files, we cancel requests based on this tracking. With that, we can grab proper references to the files table, and no longer need to rely on stashing away ring_fd and ring_file to check if the ring_fd may have been closed. Cc: [email protected] # v5.5+ Reviewed-by: Pavel Begunkov <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
static const char *am_get_idp(request_rec *r) { LassoServer *server; const char *idp_provider_id; server = am_get_lasso_server(r); if (server == NULL) return NULL; /* * If we have a single IdP, return that one. */ if (g_hash_table_size(server->providers) == 1) return am_first_idp(r); /* * If IdP discovery handed us an IdP, try to use it. */ idp_provider_id = am_extract_query_parameter(r->pool, r->args, "IdP"); if (idp_provider_id != NULL) { int rc; rc = am_urldecode((char *)idp_provider_id); if (rc != OK) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, "Could not urldecode IdP discovery value."); idp_provider_id = NULL; } else { if (g_hash_table_lookup(server->providers, idp_provider_id) == NULL) idp_provider_id = NULL; } /* * If we do not know about it, fall back to default. */ if (idp_provider_id == NULL) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "IdP discovery returned unknown or inexistant IdP"); idp_provider_id = am_first_idp(r); } return idp_provider_id; } /* * No IdP answered, use default * Perhaps we should redirect to an error page instead. */ return am_first_idp(r); }
0
[]
mod_auth_mellon
6bdda9170a8f1757dabc5b109958657417728018
196,713,826,236,277,240,000,000,000,000,000,000,000
50
Fix segmentation fault when receiving badly formed logout message. If the logout message is badly formed, we won't get the entityID in `logout->parent.remote_providerID`. If we call `apr_hash_get()` with a null pointer, it will cause a segmentation fault. Add a check to validate that the entityID is correctly set.
static void array_move(json_array_t *array, size_t dest, size_t src, size_t count) { memmove(&array->table[dest], &array->table[src], count * sizeof(json_t *)); }
0
[ "CWE-310" ]
jansson
8f80c2d83808150724d31793e6ade92749b1faa4
151,998,563,155,535,030,000,000,000,000,000,000,000
5
CVE-2013-6401: Change hash function, randomize hashes Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing and testing.
display_epoch_time(gchar *buf, int buflen, const time_t sec, gint32 frac, const to_str_time_res_t units) { double elapsed_secs; elapsed_secs = difftime(sec,(time_t)0); /* This code copied from display_signed_time; keep it in case anyone is looking at captures from before 1970 (???). If the fractional part of the time stamp is negative, print its absolute value and, if the seconds part isn't (the seconds part should be zero in that case), stick a "-" in front of the entire time stamp. */ if (frac < 0) { frac = -frac; if (elapsed_secs >= 0) { if (buflen < 1) { return; } buf[0] = '-'; buf++; buflen--; } } switch (units) { case TO_STR_TIME_RES_T_SECS: g_snprintf(buf, buflen, "%0.0f", elapsed_secs); break; case TO_STR_TIME_RES_T_DSECS: g_snprintf(buf, buflen, "%0.0f.%01d", elapsed_secs, frac); break; case TO_STR_TIME_RES_T_CSECS: g_snprintf(buf, buflen, "%0.0f.%02d", elapsed_secs, frac); break; case TO_STR_TIME_RES_T_MSECS: g_snprintf(buf, buflen, "%0.0f.%03d", elapsed_secs, frac); break; case TO_STR_TIME_RES_T_USECS: g_snprintf(buf, buflen, "%0.0f.%06d", elapsed_secs, frac); break; case TO_STR_TIME_RES_T_NSECS: g_snprintf(buf, buflen, "%0.0f.%09d", elapsed_secs, frac); break; } }
0
[ "CWE-125" ]
wireshark
d5f2657825e63e4126ebd7d13a59f3c6e8a9e4e1
225,988,952,967,224,240,000,000,000,000,000,000,000
51
epan: Limit our bits in decode_bits_in_field. Limit the number of bits we process in decode_bits_in_field, otherwise we'll overrun our buffer. Fixes #16958.
static double mp_vector_map_sv(_cimg_math_parser& mp) { // Operator(scalar,vector) unsigned int siz = (unsigned int)mp.opcode[2], ptrs = (unsigned int)mp.opcode[5] + 1; double *ptrd = &_mp_arg(1) + 1; mp_func op = (mp_func)mp.opcode[3]; CImg<ulongT> l_opcode(4); l_opcode[2] = mp.opcode[4]; // Scalar argument1 l_opcode.swap(mp.opcode); ulongT &argument2 = mp.opcode[3]; while (siz-->0) { argument2 = ptrs++; *(ptrd++) = (*op)(mp); } l_opcode.swap(mp.opcode); return cimg::type<double>::nan();
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
117,682,735,867,303,200,000,000,000,000,000,000,000
14
Fix other issues in 'CImg<T>::load_bmp()'.
static int xen_irq_info_virq_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, unsigned virq) { struct irq_info *info = info_for_irq(irq); info->u.virq = virq; per_cpu(virq_to_irq, cpu)[virq] = irq; return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); }
0
[ "CWE-400", "CWE-703" ]
linux
e99502f76271d6bc4e374fe368c50c67a1fd3070
42,666,548,010,312,700,000,000,000,000,000,000,000
13
xen/events: defer eoi in case of excessive number of events In case rogue guests are sending events at high frequency it might happen that xen_evtchn_do_upcall() won't stop processing events in dom0. As this is done in irq handling a crash might be the result. In order to avoid that, delay further inter-domain events after some time in xen_evtchn_do_upcall() by forcing eoi processing into a worker on the same cpu, thus inhibiting new events coming in. The time after which eoi processing is to be delayed is configurable via a new module parameter "event_loop_timeout" which specifies the maximum event loop time in jiffies (default: 2, the value was chosen after some tests showing that a value of 2 was the lowest with an only slight drop of dom0 network throughput while multiple guests performed an event storm). How long eoi processing will be delayed can be specified via another parameter "event_eoi_delay" (again in jiffies, default 10, again the value was chosen after testing with different delay values). This is part of XSA-332. Cc: [email protected] Reported-by: Julien Grall <[email protected]> Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Stefano Stabellini <[email protected]> Reviewed-by: Wei Liu <[email protected]>
static void add_softcursor(struct vc_data *vc) { int i = scr_readw((u16 *) vc->vc_pos); u32 type = vc->vc_cursor_type; if (! (type & 0x10)) return; if (softcursor_original != -1) return; softcursor_original = i; i |= ((type >> 8) & 0xff00 ); i ^= ((type) & 0xff00 ); if ((type & 0x20) && ((softcursor_original & 0x7000) == (i & 0x7000))) i ^= 0x7000; if ((type & 0x40) && ((i & 0x700) == ((i & 0x7000) >> 4))) i ^= 0x0700; scr_writew(i, (u16 *) vc->vc_pos); if (con_should_update(vc)) vc->vc_sw->con_putc(vc, i, vc->vc_y, vc->vc_x); }
0
[ "CWE-416", "CWE-362" ]
linux
ca4463bf8438b403596edd0ec961ca0d4fbe0220
16,873,524,303,530,494,000,000,000,000,000,000,000
16
vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console The VT_DISALLOCATE ioctl can free a virtual console while tty_release() is still running, causing a use-after-free in con_shutdown(). This occurs because VT_DISALLOCATE considers a virtual console's 'struct vc_data' to be unused as soon as the corresponding tty's refcount hits 0. But actually it may be still being closed. Fix this by making vc_data be reference-counted via the embedded 'struct tty_port'. A newly allocated virtual console has refcount 1. Opening it for the first time increments the refcount to 2. Closing it for the last time decrements the refcount (in tty_operations::cleanup() so that it happens late enough), as does VT_DISALLOCATE. Reproducer: #include <fcntl.h> #include <linux/vt.h> #include <sys/ioctl.h> #include <unistd.h> int main() { if (fork()) { for (;;) close(open("/dev/tty5", O_RDWR)); } else { int fd = open("/dev/tty10", O_RDWR); for (;;) ioctl(fd, VT_DISALLOCATE, 5); } } KASAN report: BUG: KASAN: use-after-free in con_shutdown+0x76/0x80 drivers/tty/vt/vt.c:3278 Write of size 8 at addr ffff88806a4ec108 by task syz_vt/129 CPU: 0 PID: 129 Comm: syz_vt Not tainted 5.6.0-rc2 #11 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20191223_100556-anatol 04/01/2014 Call Trace: [...] con_shutdown+0x76/0x80 drivers/tty/vt/vt.c:3278 release_tty+0xa8/0x410 drivers/tty/tty_io.c:1514 tty_release_struct+0x34/0x50 drivers/tty/tty_io.c:1629 tty_release+0x984/0xed0 drivers/tty/tty_io.c:1789 [...] Allocated by task 129: [...] kzalloc include/linux/slab.h:669 [inline] vc_allocate drivers/tty/vt/vt.c:1085 [inline] vc_allocate+0x1ac/0x680 drivers/tty/vt/vt.c:1066 con_install+0x4d/0x3f0 drivers/tty/vt/vt.c:3229 tty_driver_install_tty drivers/tty/tty_io.c:1228 [inline] tty_init_dev+0x94/0x350 drivers/tty/tty_io.c:1341 tty_open_by_driver drivers/tty/tty_io.c:1987 [inline] tty_open+0x3ca/0xb30 drivers/tty/tty_io.c:2035 [...] Freed by task 130: [...] kfree+0xbf/0x1e0 mm/slab.c:3757 vt_disallocate drivers/tty/vt/vt_ioctl.c:300 [inline] vt_ioctl+0x16dc/0x1e30 drivers/tty/vt/vt_ioctl.c:818 tty_ioctl+0x9db/0x11b0 drivers/tty/tty_io.c:2660 [...] Fixes: 4001d7b7fc27 ("vt: push down the tty lock so we can see what is left to tackle") Cc: <[email protected]> # v3.4+ Reported-by: [email protected] Acked-by: Jiri Slaby <[email protected]> Signed-off-by: Eric Biggers <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Greg Kroah-Hartman <[email protected]>
static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } zpff_init(hdev); return 0; err: return ret; }
0
[ "CWE-787" ]
linux
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
199,248,549,767,136,860,000,000,000,000,000,000,000
22
HID: Fix assumption that devices have inputs The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff driver. The problem is caused by the driver's assumption that the device must have an input report. While this will be true for all normal HID input devices, a suitably malicious device can violate the assumption. The same assumption is present in over a dozen other HID drivers. This patch fixes them by checking that the list of hid_inputs for the hid_device is nonempty before allowing it to be used. Reported-and-tested-by: [email protected] Signed-off-by: Alan Stern <[email protected]> CC: <[email protected]> Signed-off-by: Benjamin Tissoires <[email protected]>
void PSOutputDev::startPage(int pageNum, GfxState *state) { int x1, y1, x2, y2, width, height; int imgWidth, imgHeight, imgWidth2, imgHeight2; GBool landscape; if (mode == psModePS || mode == psModePSOrigPageSizes) { GooString pageLabel; const GBool gotLabel = m_catalog->indexToLabel(pageNum -1, &pageLabel); if (gotLabel) { // See bug13338 for why we try to avoid parentheses... GBool needParens; GooString *filteredString = filterPSLabel(&pageLabel, &needParens); if (needParens) { writePSFmt("%%Page: ({0:t}) {1:d}\n", filteredString, seqPage); } else { writePSFmt("%%Page: {0:t} {1:d}\n", filteredString, seqPage); } delete filteredString; } else { writePSFmt("%%Page: {0:d} {1:d}\n", pageNum, seqPage); } if (mode != psModePSOrigPageSizes) writePS("%%BeginPageSetup\n"); } // underlays if (underlayCbk) { (*underlayCbk)(this, underlayCbkData); } if (overlayCbk) { saveState(NULL); } switch (mode) { case psModePSOrigPageSizes: x1 = (int)floor(state->getX1()); y1 = (int)floor(state->getY1()); x2 = (int)ceil(state->getX2()); y2 = (int)ceil(state->getY2()); width = x2 - x1; height = y2 - y1; if (width > height) { landscape = gTrue; } else { landscape = gFalse; } writePSFmt("%%PageBoundingBox: {0:d} {1:d} {2:d} {3:d}\n", x1, y1, x2 - x1, y2 - y1); writePS("%%BeginPageSetup\n"); writePSFmt("%%PageOrientation: {0:s}\n", landscape ? "Landscape" : "Portrait"); if ((width != prevWidth) || (height != prevHeight)) { // Set page size only when it actually changes, as otherwise Duplex // printing does not work writePSFmt("<</PageSize [{0:d} {1:d}]>> setpagedevice\n", width, height); prevWidth = width; prevHeight = height; } writePS("pdfStartPage\n"); writePSFmt("{0:d} {1:d} {2:d} {3:d} re W\n", x1, y1, x2 - x1, y2 - y1); writePS("%%EndPageSetup\n"); ++seqPage; break; case psModePS: // rotate, translate, and scale page imgWidth = imgURX - imgLLX; imgHeight = imgURY - imgLLY; x1 = (int)floor(state->getX1()); y1 = (int)floor(state->getY1()); x2 = (int)ceil(state->getX2()); y2 = (int)ceil(state->getY2()); width = x2 - x1; height = y2 - y1; tx = ty = 0; // rotation and portrait/landscape mode if (rotate0 >= 0) { rotate = (360 - rotate0) % 360; landscape = gFalse; } else { rotate = (360 - state->getRotate()) % 360; if (rotate == 0 || rotate == 180) { if (width > height && width > imgWidth) { rotate += 90; landscape = gTrue; } else { landscape = gFalse; } } else { // rotate == 90 || rotate == 270 if (height > width && height > imgWidth) { rotate = 270 - rotate; landscape = gTrue; } else { landscape = gFalse; } } } writePSFmt("%%PageOrientation: {0:s}\n", landscape ? "Landscape" : "Portrait"); writePS("pdfStartPage\n"); if (rotate == 0) { imgWidth2 = imgWidth; imgHeight2 = imgHeight; } else if (rotate == 90) { writePS("90 rotate\n"); ty = -imgWidth; imgWidth2 = imgHeight; imgHeight2 = imgWidth; } else if (rotate == 180) { writePS("180 rotate\n"); imgWidth2 = imgWidth; imgHeight2 = imgHeight; tx = -imgWidth; ty = -imgHeight; } else { // rotate == 270 writePS("270 rotate\n"); tx = -imgHeight; imgWidth2 = imgHeight; imgHeight2 = imgWidth; } // shrink or expand if (xScale0 > 0 && yScale0 > 0) { xScale = xScale0; yScale = yScale0; } else if ((globalParams->getPSShrinkLarger() && (width > imgWidth2 || height > imgHeight2)) || (globalParams->getPSExpandSmaller() && (width < imgWidth2 && height < imgHeight2))) { xScale = (double)imgWidth2 / (double)width; yScale = (double)imgHeight2 / (double)height; if (yScale < xScale) { xScale = yScale; } else { yScale = xScale; } } else { xScale = yScale = 1; } // deal with odd bounding boxes or clipping if (clipLLX0 < clipURX0 && clipLLY0 < clipURY0) { tx -= xScale * clipLLX0; ty -= yScale * clipLLY0; } else { tx -= xScale * x1; ty -= yScale * y1; } // center if (tx0 >= 0 && ty0 >= 0) { tx += rotate == 0 ? tx0 : ty0; ty += rotate == 0 ? ty0 : -tx0; } else if (globalParams->getPSCenter()) { if (clipLLX0 < clipURX0 && clipLLY0 < clipURY0) { tx += (imgWidth2 - xScale * (clipURX0 - clipLLX0)) / 2; ty += (imgHeight2 - yScale * (clipURY0 - clipLLY0)) / 2; } else { tx += (imgWidth2 - xScale * width) / 2; ty += (imgHeight2 - yScale * height) / 2; } } tx += rotate == 0 ? imgLLX : imgLLY; ty += rotate == 0 ? imgLLY : -imgLLX; if (tx != 0 || ty != 0) { writePSFmt("{0:.6g} {1:.6g} translate\n", tx, ty); } if (xScale != 1 || yScale != 1) { writePSFmt("{0:.6f} {1:.6f} scale\n", xScale, yScale); } if (clipLLX0 < clipURX0 && clipLLY0 < clipURY0) { writePSFmt("{0:.6g} {1:.6g} {2:.6g} {3:.6g} re W\n", clipLLX0, clipLLY0, clipURX0 - clipLLX0, clipURY0 - clipLLY0); } else { writePSFmt("{0:d} {1:d} {2:d} {3:d} re W\n", x1, y1, x2 - x1, y2 - y1); } writePS("%%EndPageSetup\n"); ++seqPage; break; case psModeEPS: writePS("pdfStartPage\n"); tx = ty = 0; rotate = (360 - state->getRotate()) % 360; if (rotate == 0) { } else if (rotate == 90) { writePS("90 rotate\n"); tx = -epsX1; ty = -epsY2; } else if (rotate == 180) { writePS("180 rotate\n"); tx = -(epsX1 + epsX2); ty = -(epsY1 + epsY2); } else { // rotate == 270 writePS("270 rotate\n"); tx = -epsX2; ty = -epsY1; } if (tx != 0 || ty != 0) { writePSFmt("{0:.6g} {1:.6g} translate\n", tx, ty); } xScale = yScale = 1; break; case psModeForm: writePS("/PaintProc {\n"); writePS("begin xpdf begin\n"); writePS("pdfStartPage\n"); tx = ty = 0; xScale = yScale = 1; rotate = 0; break; } }
0
[]
poppler
abf167af8b15e5f3b510275ce619e6fdb42edd40
102,867,622,997,733,500,000,000,000,000,000,000,000
213
Implement tiling/patterns in SplashOutputDev Fixes bug 13518
static int reencrypt_make_backup_segments(struct crypt_device *cd, struct luks2_hdr *hdr, int keyslot_new, const char *cipher, uint64_t data_offset, const struct crypt_params_reencrypt *params) { int r, segment, moved_segment = -1, digest_old = -1, digest_new = -1; json_object *jobj_segment_new = NULL, *jobj_segment_old = NULL, *jobj_segment_bcp = NULL; uint32_t sector_size = params->luks2 ? params->luks2->sector_size : SECTOR_SIZE; uint64_t segment_offset, tmp, data_shift = params->data_shift << SECTOR_SHIFT; if (params->mode != CRYPT_REENCRYPT_DECRYPT) { digest_new = LUKS2_digest_by_keyslot(hdr, keyslot_new); if (digest_new < 0) return -EINVAL; } if (params->mode != CRYPT_REENCRYPT_ENCRYPT) { digest_old = LUKS2_digest_by_segment(hdr, CRYPT_DEFAULT_SEGMENT); if (digest_old < 0) return -EINVAL; } segment = LUKS2_segment_first_unused_id(hdr); if (segment < 0) return -EINVAL; if (params->mode == CRYPT_REENCRYPT_ENCRYPT && (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT)) { json_object_copy(LUKS2_get_segment_jobj(hdr, 0), &jobj_segment_bcp); r = LUKS2_segment_set_flag(jobj_segment_bcp, "backup-moved-segment"); if (r) goto err; moved_segment = segment++; json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), moved_segment, jobj_segment_bcp); } /* FIXME: Add detection for case (digest old == digest new && old segment == new segment) */ if (digest_old >= 0) json_object_copy(LUKS2_get_segment_jobj(hdr, CRYPT_DEFAULT_SEGMENT), &jobj_segment_old); else if (params->mode == CRYPT_REENCRYPT_ENCRYPT) { r = LUKS2_get_data_size(hdr, &tmp, NULL); if (r) goto err; if (params->flags & CRYPT_REENCRYPT_MOVE_FIRST_SEGMENT) jobj_segment_old = json_segment_create_linear(0, tmp ? &tmp : NULL, 0); else jobj_segment_old = json_segment_create_linear(data_offset, tmp ? &tmp : NULL, 0); } if (!jobj_segment_old) { r = -EINVAL; goto err; } r = LUKS2_segment_set_flag(jobj_segment_old, "backup-previous"); if (r) goto err; json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_old); jobj_segment_old = NULL; if (digest_old >= 0) LUKS2_digest_segment_assign(cd, hdr, segment, digest_old, 1, 0); segment++; if (digest_new >= 0) { segment_offset = data_offset; if (params->mode != CRYPT_REENCRYPT_ENCRYPT && modify_offset(&segment_offset, data_shift, params->direction)) { r = -EINVAL; goto err; } jobj_segment_new = json_segment_create_crypt(segment_offset, crypt_get_iv_offset(cd), NULL, cipher, sector_size, 0); } else if (params->mode == CRYPT_REENCRYPT_DECRYPT) { segment_offset = data_offset; if (modify_offset(&segment_offset, data_shift, params->direction)) { r = -EINVAL; goto err; } jobj_segment_new = json_segment_create_linear(segment_offset, NULL, 0); } if (!jobj_segment_new) { r = -EINVAL; goto err; } r = LUKS2_segment_set_flag(jobj_segment_new, "backup-final"); if (r) goto err; json_object_object_add_by_uint(LUKS2_get_segments_jobj(hdr), segment, jobj_segment_new); jobj_segment_new = NULL; if (digest_new >= 0) LUKS2_digest_segment_assign(cd, hdr, segment, digest_new, 1, 0); /* FIXME: also check occupied space by keyslot in shrunk area */ if (params->direction == CRYPT_REENCRYPT_FORWARD && data_shift && crypt_metadata_device(cd) == crypt_data_device(cd) && LUKS2_set_keyslots_size(cd, hdr, json_segment_get_offset(reencrypt_segment_new(hdr), 0))) { log_err(cd, _("Failed to set new keyslots area size.")); r = -EINVAL; goto err; } return 0; err: json_object_put(jobj_segment_new); json_object_put(jobj_segment_old); return r; }
0
[ "CWE-345" ]
cryptsetup
0113ac2d889c5322659ad0596d4cfc6da53e356c
2,280,049,036,320,590,700,000,000,000,000,000,000
113
Fix CVE-2021-4122 - LUKS2 reencryption crash recovery attack Fix possible attacks against data confidentiality through LUKS2 online reencryption extension crash recovery. An attacker can modify on-disk metadata to simulate decryption in progress with crashed (unfinished) reencryption step and persistently decrypt part of the LUKS device. This attack requires repeated physical access to the LUKS device but no knowledge of user passphrases. The decryption step is performed after a valid user activates the device with a correct passphrase and modified metadata. There are no visible warnings for the user that such recovery happened (except using the luksDump command). The attack can also be reversed afterward (simulating crashed encryption from a plaintext) with possible modification of revealed plaintext. The problem was caused by reusing a mechanism designed for actual reencryption operation without reassessing the security impact for new encryption and decryption operations. While the reencryption requires calculating and verifying both key digests, no digest was needed to initiate decryption recovery if the destination is plaintext (no encryption key). Also, some metadata (like encryption cipher) is not protected, and an attacker could change it. Note that LUKS2 protects visible metadata only when a random change occurs. It does not protect against intentional modification but such modification must not cause a violation of data confidentiality. The fix introduces additional digest protection of reencryption metadata. The digest is calculated from known keys and critical reencryption metadata. Now an attacker cannot create correct metadata digest without knowledge of a passphrase for used keyslots. For more details, see LUKS2 On-Disk Format Specification version 1.1.0.
bool Item_field::rename_fields_processor(void *arg) { Item::func_processor_rename *rename= (Item::func_processor_rename*) arg; List_iterator<Create_field> def_it(rename->fields); Create_field *def; while ((def=def_it++)) { if (def->change && (!db_name || !db_name[0] || !my_strcasecmp(table_alias_charset, db_name, rename->db_name.str)) && (!table_name || !table_name[0] || !my_strcasecmp(table_alias_charset, table_name, rename->table_name.str)) && !my_strcasecmp(system_charset_info, field_name, def->change)) { field_name= def->field_name; break; } } return 0; }
0
[ "CWE-89" ]
server
b5e16a6e0381b28b598da80b414168ce9a5016e5
177,365,960,666,254,180,000,000,000,000,000,000,000
21
MDEV-26061 MariaDB server crash at Field::set_default * Item_default_value::fix_fields creates a copy of its argument's field. * Field::default_value is changed when its expression is prepared in unpack_vcol_info_from_frm() This means we must unpack any vcol expression that includes DEFAULT(x) strictly after unpacking x->default_value. To avoid building and solving this dependency graph on every table open, we update Item_default_value::field->default_value after all vcols are unpacked and fixed.
regset_search_body_position_lead(OnigRegSet* set, const UChar* str, const UChar* end, const UChar* start, const UChar* range, /* match start range */ const UChar* orig_range, /* data range */ OnigOptionType option, MatchArg* msas, int* rmatch_pos) { int r, n, i; UChar *s, *prev; UChar *low, *high, *low_prev; UChar* sch_range; regex_t* reg; OnigEncoding enc; SearchRange* sr; n = set->n; enc = set->enc; s = (UChar* )start; if (s > str) prev = onigenc_get_prev_char_head(enc, str, s); else prev = (UChar* )NULL; sr = (SearchRange* )xmalloc(sizeof(*sr) * n); CHECK_NULL_RETURN_MEMERR(sr); for (i = 0; i < n; i++) { reg = set->rs[i].reg; sr[i].state = SRS_DEAD; if (reg->optimize != OPTIMIZE_NONE) { if (reg->dist_max != INFINITE_LEN) { if (end - range > reg->dist_max) sch_range = (UChar* )range + reg->dist_max; else sch_range = (UChar* )end; if (forward_search(reg, str, end, s, sch_range, &low, &high, &low_prev)) { sr[i].state = SRS_LOW_HIGH; sr[i].low = low; sr[i].high = high; sr[i].low_prev = low_prev; sr[i].sch_range = sch_range; } } else { sch_range = (UChar* )end; if (forward_search(reg, str, end, s, sch_range, &low, &high, (UChar** )NULL)) { goto total_active; } } } else { total_active: sr[i].state = SRS_ALL_RANGE; sr[i].low = s; sr[i].high = (UChar* )range; sr[i].low_prev = prev; } } #define ACTIVATE_ALL_LOW_HIGH_SEARCH_THRESHOLD_LEN 500 if (set->all_low_high != 0 && range - start > ACTIVATE_ALL_LOW_HIGH_SEARCH_THRESHOLD_LEN) { do { int try_count = 0; for (i = 0; i < n; i++) { if (sr[i].state == SRS_DEAD) continue; if (s < sr[i].low) continue; if (s >= sr[i].high) { if (forward_search(set->rs[i].reg, str, end, s, sr[i].sch_range, &low, &high, &low_prev) != 0) { sr[i].low = low; sr[i].high = high; sr[i].low_prev = low_prev; if (s < low) continue; } else { sr[i].state = SRS_DEAD; continue; } } reg = set->rs[i].reg; REGSET_MATCH_AND_RETURN_CHECK(orig_range); try_count++; } /* for (i) */ if (s >= range) break; if (try_count == 0) { low = (UChar* )range; for (i = 0; i < n; i++) { if (sr[i].state == SRS_LOW_HIGH && low > sr[i].low) { low = sr[i].low; low_prev = sr[i].low_prev; } } if (low == range) break; s = low; prev = low_prev; } else { prev = s; s += enclen(enc, s); } } while (1); } else { int prev_is_newline = 1; do { for (i = 0; i < n; i++) { if (sr[i].state == SRS_DEAD) continue; if (sr[i].state == SRS_LOW_HIGH) { if (s < sr[i].low) continue; if (s >= sr[i].high) { if (forward_search(set->rs[i].reg, str, end, s, sr[i].sch_range, &low, &high, &low_prev) != 0) { sr[i].low = low; sr[i].high = high; /* sr[i].low_prev = low_prev; */ if (s < low) continue; } else { sr[i].state = SRS_DEAD; continue; } } } reg = set->rs[i].reg; if ((reg->anchor & ANCR_ANYCHAR_INF) == 0 || prev_is_newline != 0) { REGSET_MATCH_AND_RETURN_CHECK(orig_range); } } if (s >= range) break; if (set->anychar_inf != 0) prev_is_newline = ONIGENC_IS_MBC_NEWLINE(set->enc, s, end); prev = s; s += enclen(enc, s); } while (1); } xfree(sr); return ONIG_MISMATCH; finish: xfree(sr); return r; match: xfree(sr); *rmatch_pos = (int )(s - str); return i; }
0
[ "CWE-125" ]
oniguruma
778a43dd56925ed58bbe26e3a7bb8202d72c3f3f
23,821,324,843,431,746,000,000,000,000,000,000,000
162
fix #164: Integer overflow related to reg->dmax in search_in_range()
free_ldap_seqof_key_data(ldap_seqof_key_data *keysets, krb5_int16 n_keysets) { int i; if (keysets == NULL) return; for (i = 0; i < n_keysets; i++) k5_free_key_data(keysets[i].n_key_data, keysets[i].key_data); free(keysets); }
0
[ "CWE-703" ]
krb5
08c642c09c38a9c6454ab43a9b53b2a89b9eef99
81,573,998,750,165,290,000,000,000,000,000,000,000
11
Fix LDAP null deref on empty arg [CVE-2016-3119] In the LDAP KDB module's process_db_args(), strtok_r() may return NULL if there is an empty string in the db_args array. Check for this case and avoid dereferencing a null pointer. CVE-2016-3119: In MIT krb5 1.6 and later, an authenticated attacker with permission to modify a principal entry can cause kadmind to dereference a null pointer by supplying an empty DB argument to the modify_principal command, if kadmind is configured to use the LDAP KDB module. CVSSv2 Vector: AV:N/AC:H/Au:S/C:N/I:N/A:C/E:H/RL:OF/RC:ND ticket: 8383 (new) target_version: 1.14-next target_version: 1.13-next tags: pullup
create_code(struct archive_read *a, struct huffman_code *code, unsigned char *lengths, int numsymbols, char maxlength) { int i, j, codebits = 0, symbolsleft = numsymbols; code->numentries = 0; code->numallocatedentries = 0; if (new_node(code) < 0) { archive_set_error(&a->archive, ENOMEM, "Unable to allocate memory for node data."); return (ARCHIVE_FATAL); } code->numentries = 1; code->minlength = INT_MAX; code->maxlength = INT_MIN; codebits = 0; for(i = 1; i <= maxlength; i++) { for(j = 0; j < numsymbols; j++) { if (lengths[j] != i) continue; if (add_value(a, code, j, codebits, i) != ARCHIVE_OK) return (ARCHIVE_FATAL); codebits++; if (--symbolsleft <= 0) { break; break; } } codebits <<= 1; } return (ARCHIVE_OK); }
0
[ "CWE-119", "CWE-787" ]
libarchive
05caadc7eedbef471ac9610809ba683f0c698700
191,908,602,618,180,500,000,000,000,000,000,000,000
30
Issue 719: Fix for TALOS-CAN-154 A RAR file with an invalid zero dictionary size was not being rejected, leading to a zero-sized allocation for the dictionary storage which was then overwritten during the dictionary initialization. Thanks to the Open Source and Threat Intelligence project at Cisco for reporting this.
void CLASS leaf_hdr_load_raw() { ushort *pixel=0; unsigned tile=0, r, c, row, col; if (!filters) { pixel = (ushort *) calloc (raw_width, sizeof *pixel); merror (pixel, "leaf_hdr_load_raw()"); } #ifdef LIBRAW_LIBRARY_BUILD try { #endif FORC(tiff_samples) for (r=0; r < raw_height; r++) { #ifdef LIBRAW_LIBRARY_BUILD checkCancel(); #endif if (r % tile_length == 0) { fseek (ifp, data_offset + 4*tile++, SEEK_SET); fseek (ifp, get4(), SEEK_SET); } if (filters && c != shot_select) continue; if (filters) pixel = raw_image + r*raw_width; read_shorts (pixel, raw_width); if (!filters && (row = r - top_margin) < height) for (col=0; col < width; col++) image[row*width+col][c] = pixel[col+left_margin]; } #ifdef LIBRAW_LIBRARY_BUILD } catch (...) { if(!filters) free(pixel); throw; } #endif if (!filters) { maximum = 0xffff; raw_color = 1; free (pixel); } }
1
[ "CWE-369", "CWE-704" ]
LibRaw
9f26ce37f5be86ea11bfc6831366558650b1f6ff
64,223,732,535,727,770,000,000,000,000,000,000,000
40
SA81000: LibRaw 0.18.8
static void auth_passdb_init(struct auth_passdb *passdb) { passdb_init(passdb->passdb); i_assert(passdb->passdb->default_pass_scheme != NULL || passdb->cache_key == NULL); }
0
[ "CWE-284" ]
core
7bad6a24160e34bce8f10e73dbbf9e5fbbcd1904
247,763,943,667,031,200,000,000,000,000,000,000,000
7
auth: Fix handling passdbs with identical driver/args but different mechanisms/username_filter The passdb was wrongly deduplicated in this situation, causing wrong mechanisms or username_filter setting to be used. This would be a rather unlikely configuration though. Fixed by moving mechanisms and username_filter from struct passdb_module to struct auth_passdb, which is where they should have been in the first place.
int snd_usb_lock_shutdown(struct snd_usb_audio *chip) { int err; atomic_inc(&chip->usage_count); if (atomic_read(&chip->shutdown)) { err = -EIO; goto error; } err = snd_usb_autoresume(chip); if (err < 0) goto error; return 0; error: if (atomic_dec_and_test(&chip->usage_count)) wake_up(&chip->shutdown_wait); return err; }
0
[ "CWE-284", "CWE-125" ]
linux
bfc81a8bc18e3c4ba0cbaa7666ff76be2f998991
79,771,414,752,410,470,000,000,000,000,000,000,000
19
ALSA: usb-audio: Check out-of-bounds access by corrupted buffer descriptor When a USB-audio device receives a maliciously adjusted or corrupted buffer descriptor, the USB-audio driver may access an out-of-bounce value at its parser. This was detected by syzkaller, something like: BUG: KASAN: slab-out-of-bounds in usb_audio_probe+0x27b2/0x2ab0 Read of size 1 at addr ffff88006b83a9e8 by task kworker/0:1/24 CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc1-42251-gebb2c2437d80 #224 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Workqueue: usb_hub_wq hub_event Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 print_address_description+0x78/0x280 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 kasan_report+0x22f/0x340 mm/kasan/report.c:409 __asan_report_load1_noabort+0x19/0x20 mm/kasan/report.c:427 snd_usb_create_streams sound/usb/card.c:248 usb_audio_probe+0x27b2/0x2ab0 sound/usb/card.c:605 usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26e/0x3d0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932 generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174 usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266 really_probe drivers/base/dd.c:413 driver_probe_device+0x610/0xa00 drivers/base/dd.c:557 __device_attach_driver+0x230/0x290 drivers/base/dd.c:653 bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463 __device_attach+0x26e/0x3d0 drivers/base/dd.c:710 device_initial_probe+0x1f/0x30 drivers/base/dd.c:757 bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523 device_add+0xd0b/0x1660 drivers/base/core.c:1835 usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457 hub_port_connect drivers/usb/core/hub.c:4903 hub_port_connect_change drivers/usb/core/hub.c:5009 port_event drivers/usb/core/hub.c:5115 hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195 process_one_work+0xc7f/0x1db0 kernel/workqueue.c:2119 worker_thread+0x221/0x1850 kernel/workqueue.c:2253 kthread+0x3a1/0x470 kernel/kthread.c:231 ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431 This patch adds the checks of out-of-bounce accesses at appropriate places and bails out when it goes out of the given buffer. Reported-by: Andrey Konovalov <[email protected]> Tested-by: Andrey Konovalov <[email protected]> Cc: <[email protected]> Signed-off-by: Takashi Iwai <[email protected]>
dns_zone_getjournalsize(dns_zone_t *zone) { REQUIRE(DNS_ZONE_VALID(zone)); return (zone->journalsize); }
0
[ "CWE-327" ]
bind9
f09352d20a9d360e50683cd1d2fc52ccedcd77a0
321,390,497,177,831,360,000,000,000,000,000,000,000
5
Update keyfetch_done compute_tag check If in keyfetch_done the compute_tag fails (because for example the algorithm is not supported), don't crash, but instead ignore the key.
static int sas_recover_I_T(struct domain_device *dev) { int res = TMF_RESP_FUNC_FAILED; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); SAS_DPRINTK("I_T nexus reset for dev %016llx\n", SAS_ADDR(dev->sas_addr)); if (i->dft->lldd_I_T_nexus_reset) res = i->dft->lldd_I_T_nexus_reset(dev); return res; }
0
[]
linux
318aaf34f1179b39fa9c30fa0f3288b645beee39
281,529,313,678,885,400,000,000,000,000,000,000,000
14
scsi: libsas: defer ata device eh commands to libata When ata device doing EH, some commands still attached with tasks are not passed to libata when abort failed or recover failed, so libata did not handle these commands. After these commands done, sas task is freed, but ata qc is not freed. This will cause ata qc leak and trigger a warning like below: WARNING: CPU: 0 PID: 28512 at drivers/ata/libata-eh.c:4037 ata_eh_finish+0xb4/0xcc CPU: 0 PID: 28512 Comm: kworker/u32:2 Tainted: G W OE 4.14.0#1 ...... Call trace: [<ffff0000088b7bd0>] ata_eh_finish+0xb4/0xcc [<ffff0000088b8420>] ata_do_eh+0xc4/0xd8 [<ffff0000088b8478>] ata_std_error_handler+0x44/0x8c [<ffff0000088b8068>] ata_scsi_port_error_handler+0x480/0x694 [<ffff000008875fc4>] async_sas_ata_eh+0x4c/0x80 [<ffff0000080f6be8>] async_run_entry_fn+0x4c/0x170 [<ffff0000080ebd70>] process_one_work+0x144/0x390 [<ffff0000080ec100>] worker_thread+0x144/0x418 [<ffff0000080f2c98>] kthread+0x10c/0x138 [<ffff0000080855dc>] ret_from_fork+0x10/0x18 If ata qc leaked too many, ata tag allocation will fail and io blocked for ever. As suggested by Dan Williams, defer ata device commands to libata and merge sas_eh_finish_cmd() with sas_eh_defer_cmd(). libata will handle ata qcs correctly after this. Signed-off-by: Jason Yan <[email protected]> CC: Xiaofei Tan <[email protected]> CC: John Garry <[email protected]> CC: Dan Williams <[email protected]> Reviewed-by: Dan Williams <[email protected]> Signed-off-by: Martin K. Petersen <[email protected]>
template<typename tc1, typename tc2, typename t> CImg<T>& _draw_text(const int x0, const int y0, const char *const text, const tc1 *const foreground_color, const tc2 *const background_color, const float opacity, const CImgList<t>& font, const bool is_native_font) { if (!text) return *this; if (!font) throw CImgArgumentException(_cimg_instance "draw_text(): Empty specified font.", cimg_instance); const unsigned int text_length = (unsigned int)std::strlen(text); const bool _is_empty = is_empty(); if (_is_empty) { // If needed, pre-compute necessary size of the image int x = 0, y = 0, w = 0; unsigned char c = 0; for (unsigned int i = 0; i<text_length; ++i) { c = (unsigned char)text[i]; switch (c) { case '\n' : y+=font[0]._height; if (x>w) w = x; x = 0; break; case '\t' : x+=4*font[' ']._width; break; default : if (c<font._width) x+=font[c]._width; } } if (x!=0 || c=='\n') { if (x>w) w=x; y+=font[0]._height; } assign(x0 + w,y0 + y,1,is_native_font?1:font[0]._spectrum,(T)0); } int x = x0, y = y0; for (unsigned int i = 0; i<text_length; ++i) { const unsigned char c = (unsigned char)text[i]; switch (c) { case '\n' : y+=font[0]._height; x = x0; break; case '\t' : x+=4*font[' ']._width; break; default : if (c<font._width) { CImg<T> letter = font[c]; if (letter) { if (is_native_font && _spectrum>letter._spectrum) letter.resize(-100,-100,1,_spectrum,0,2); const unsigned int cmin = std::min(_spectrum,letter._spectrum); if (foreground_color) for (unsigned int c = 0; c<cmin; ++c) if (foreground_color[c]!=1) letter.get_shared_channel(c)*=foreground_color[c]; if (c + 256<font.width()) { // Letter has mask. if (background_color) for (unsigned int c = 0; c<cmin; ++c) draw_rectangle(x,y,0,c,x + letter._width - 1,y + letter._height - 1,0,c, background_color[c],opacity); draw_image(x,y,letter,font[c + 256],opacity,255.0f); } else draw_image(x,y,letter,opacity); // Letter has no mask. x+=letter._width; } } } } return *this;
0
[ "CWE-125" ]
CImg
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
300,557,687,005,963,300,000,000,000,000,000,000,000
60
Fix other issues in 'CImg<T>::load_bmp()'.
Item_string(const char *name_par, const char *str, uint length, CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE, uint repertoire= MY_REPERTOIRE_UNICODE30) : m_cs_specified(FALSE) { str_value.set_or_copy_aligned(str, length, cs); collation.set(cs, dv, repertoire); max_length= str_value.numchars()*cs->mbmaxlen; set_name(name_par, 0, cs); decimals=NOT_FIXED_DEC; // it is constant => can be used without fix_fields (and frequently used) fixed= 1; }
0
[]
mysql-server
f7316aa0c9a3909fc7498e7b95d5d3af044a7e21
205,849,913,366,367,560,000,000,000,000,000,000,000
13
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL)) Backport of Bug#19143243 fix. NAME_CONST item can return NULL_ITEM type in case of incorrect arguments. NULL_ITEM has special processing in Item_func_in function. In Item_func_in::fix_length_and_dec an array of possible comparators is created. Since NAME_CONST function has NULL_ITEM type, corresponding array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE. ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(), so the NULL_ITEM is attempted compared with an empty comparator. The fix is to disable the caching of Item_name_const item.
static int tipc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct tipc_sock *tsk = tipc_sk(sk); struct sk_buff *skb; u32 dnode = tsk_peer_node(tsk); u32 dport = tsk_peer_port(tsk); u32 onode = tipc_own_addr(net); u32 oport = tsk->portid; int res; if (how != SHUT_RDWR) return -EINVAL; lock_sock(sk); switch (sock->state) { case SS_CONNECTING: case SS_CONNECTED: restart: dnode = tsk_peer_node(tsk); /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ skb = __skb_dequeue(&sk->sk_receive_queue); if (skb) { if (TIPC_SKB_CB(skb)->handle != NULL) { kfree_skb(skb); goto restart; } tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN); } else { skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, onode, dport, oport, TIPC_CONN_SHUTDOWN); tipc_node_xmit_skb(net, skb, dnode, tsk->portid); } tsk->connected = 0; sock->state = SS_DISCONNECTING; tipc_node_remove_conn(net, dnode, tsk->portid); /* fall through */ case SS_DISCONNECTING: /* Discard any unreceived messages */ __skb_queue_purge(&sk->sk_receive_queue); /* Wake up anyone sleeping in poll */ sk->sk_state_change(sk); res = 0; break; default: res = -ENOTCONN; } release_sock(sk); return res; }
0
[ "CWE-703" ]
linux
45e093ae2830cd1264677d47ff9a95a71f5d9f9c
234,223,302,361,771,950,000,000,000,000,000,000,000
61
tipc: check nl sock before parsing nested attributes Make sure the socket for which the user is listing publication exists before parsing the socket netlink attributes. Prior to this patch a call without any socket caused a NULL pointer dereference in tipc_nl_publ_dump(). Tested-and-reported-by: Baozeng Ding <[email protected]> Signed-off-by: Richard Alpe <[email protected]> Acked-by: Jon Maloy <[email protected]> Signed-off-by: David S. Miller <[email protected]>
BasicArrayWriter(Char *array, std::size_t size) : BasicWriter<Char>(buffer_), buffer_(array, size) {}
0
[ "CWE-134", "CWE-119", "CWE-787" ]
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
108,315,919,303,561,820,000,000,000,000,000,000,000
2
Fix segfault on complex pointer formatting (#642)
static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, struct sk_buff *skb, int portid) { struct net *net; bool found = false; int res = -ENOENT; rcu_read_lock(); for_each_net_rcu(net) { if (data->netgroup == hwsim_net_get_netgroup(net)) { res = genlmsg_unicast(net, skb, portid); found = true; break; } } rcu_read_unlock(); if (!found) nlmsg_free(skb); return res; }
0
[ "CWE-703", "CWE-772" ]
linux
0ddcff49b672239dda94d70d0fcf50317a9f4b51
7,423,375,211,744,365,000,000,000,000,000,000,000
22
mac80211_hwsim: fix possible memory leak in hwsim_new_radio_nl() 'hwname' is malloced in hwsim_new_radio_nl() and should be freed before leaving from the error handling cases, otherwise it will cause memory leak. Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length") Signed-off-by: Wei Yongjun <[email protected]> Reviewed-by: Ben Hutchings <[email protected]> Signed-off-by: Johannes Berg <[email protected]>
int __init efi_memblock_x86_reserve_range(void) { struct efi_info *e = &boot_params.efi_info; struct efi_memory_map_data data; phys_addr_t pmap; int rv; if (efi_enabled(EFI_PARAVIRT)) return 0; #ifdef CONFIG_X86_32 /* Can't handle data above 4GB at this time */ if (e->efi_memmap_hi) { pr_err("Memory map is above 4GB, disabling EFI.\n"); return -EINVAL; } pmap = e->efi_memmap; #else pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32)); #endif data.phys_map = pmap; data.size = e->efi_memmap_size; data.desc_size = e->efi_memdesc_size; data.desc_version = e->efi_memdesc_version; rv = efi_memmap_init_early(&data); if (rv) return rv; if (add_efi_memmap) do_add_efi_memmap(); WARN(efi.memmap.desc_version != 1, "Unexpected EFI_MEMORY_DESCRIPTOR version %ld", efi.memmap.desc_version); memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size); return 0; }
0
[ "CWE-388" ]
tip
4e78921ba4dd0aca1cc89168f45039add4183f8e
24,334,576,957,843,847,000,000,000,000,000,000,000
40
efi/x86/Add missing error handling to old_memmap 1:1 mapping code The old_memmap flow in efi_call_phys_prolog() performs numerous memory allocations, and either does not check for failure at all, or it does but fails to propagate it back to the caller, which may end up calling into the firmware with an incomplete 1:1 mapping. So let's fix this by returning NULL from efi_call_phys_prolog() on memory allocation failures only, and by handling this condition in the caller. Also, clean up any half baked sets of page tables that we may have created before returning with a NULL return value. Note that any failure at this level will trigger a panic() two levels up, so none of this makes a huge difference, but it is a nice cleanup nonetheless. [ardb: update commit log, add efi_call_phys_epilog() call on error path] Signed-off-by: Gen Zhang <[email protected]> Signed-off-by: Ard Biesheuvel <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rob Bradford <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
networkstatus_parse_vote_from_string(const char *s, const char **eos_out, networkstatus_type_t ns_type) { smartlist_t *tokens = smartlist_create(); smartlist_t *rs_tokens = NULL, *footer_tokens = NULL; networkstatus_voter_info_t *voter = NULL; networkstatus_t *ns = NULL; digests_t ns_digests; const char *cert, *end_of_header, *end_of_footer, *s_dup = s; directory_token_t *tok; int ok; struct in_addr in; int i, inorder, n_signatures = 0; memarea_t *area = NULL, *rs_area = NULL; consensus_flavor_t flav = FLAV_NS; tor_assert(s); if (eos_out) *eos_out = NULL; if (router_get_networkstatus_v3_hashes(s, &ns_digests)) { log_warn(LD_DIR, "Unable to compute digest of network-status"); goto err; } area = memarea_new(); end_of_header = find_start_of_next_routerstatus(s); if (tokenize_string(area, s, end_of_header, tokens, (ns_type == NS_TYPE_CONSENSUS) ? networkstatus_consensus_token_table : networkstatus_token_table, 0)) { log_warn(LD_DIR, "Error tokenizing network-status vote header"); goto err; } ns = tor_malloc_zero(sizeof(networkstatus_t)); memcpy(&ns->digests, &ns_digests, sizeof(ns_digests)); tok = find_by_keyword(tokens, K_NETWORK_STATUS_VERSION); tor_assert(tok); if (tok->n_args > 1) { int flavor = networkstatus_parse_flavor_name(tok->args[1]); if (flavor < 0) { log_warn(LD_DIR, "Can't parse document with unknown flavor %s", escaped(tok->args[1])); goto err; } ns->flavor = flav = flavor; } if (flav != FLAV_NS && ns_type != NS_TYPE_CONSENSUS) { log_warn(LD_DIR, "Flavor found on non-consensus networkstatus."); goto err; } if (ns_type != NS_TYPE_CONSENSUS) { const char *end_of_cert = NULL; if (!(cert = strstr(s, "\ndir-key-certificate-version"))) goto err; ++cert; ns->cert = authority_cert_parse_from_string(cert, &end_of_cert); if (!ns->cert || !end_of_cert || end_of_cert > end_of_header) goto err; } tok = find_by_keyword(tokens, K_VOTE_STATUS); tor_assert(tok->n_args); if (!strcmp(tok->args[0], "vote")) { ns->type = NS_TYPE_VOTE; } else if (!strcmp(tok->args[0], "consensus")) { ns->type = NS_TYPE_CONSENSUS; } else if (!strcmp(tok->args[0], "opinion")) { ns->type = NS_TYPE_OPINION; } else { log_warn(LD_DIR, "Unrecognized vote status %s in network-status", escaped(tok->args[0])); goto err; } if (ns_type != ns->type) { log_warn(LD_DIR, "Got the wrong kind of v3 networkstatus."); goto err; } if (ns->type == NS_TYPE_VOTE || ns->type == NS_TYPE_OPINION) { tok = find_by_keyword(tokens, K_PUBLISHED); if (parse_iso_time(tok->args[0], &ns->published)) goto err; ns->supported_methods = smartlist_create(); tok = find_opt_by_keyword(tokens, K_CONSENSUS_METHODS); if (tok) { for (i=0; i < tok->n_args; ++i) smartlist_add(ns->supported_methods, tor_strdup(tok->args[i])); } else { smartlist_add(ns->supported_methods, tor_strdup("1")); } } else { tok = find_opt_by_keyword(tokens, K_CONSENSUS_METHOD); if (tok) { ns->consensus_method = (int)tor_parse_long(tok->args[0], 10, 1, INT_MAX, &ok, NULL); if (!ok) goto err; } else { ns->consensus_method = 1; } } tok = find_by_keyword(tokens, K_VALID_AFTER); if (parse_iso_time(tok->args[0], &ns->valid_after)) goto err; tok = find_by_keyword(tokens, K_FRESH_UNTIL); if (parse_iso_time(tok->args[0], &ns->fresh_until)) goto err; tok = find_by_keyword(tokens, K_VALID_UNTIL); if (parse_iso_time(tok->args[0], &ns->valid_until)) goto err; tok = find_by_keyword(tokens, K_VOTING_DELAY); tor_assert(tok->n_args >= 2); ns->vote_seconds = (int) tor_parse_long(tok->args[0], 10, 0, INT_MAX, &ok, NULL); if (!ok) goto err; ns->dist_seconds = (int) tor_parse_long(tok->args[1], 10, 0, INT_MAX, &ok, NULL); if (!ok) goto err; if (ns->valid_after + MIN_VOTE_INTERVAL > ns->fresh_until) { log_warn(LD_DIR, "Vote/consensus freshness interval is too short"); goto err; } if (ns->valid_after + MIN_VOTE_INTERVAL*2 > ns->valid_until) { log_warn(LD_DIR, "Vote/consensus liveness interval is too short"); goto err; } if (ns->vote_seconds < MIN_VOTE_SECONDS) { log_warn(LD_DIR, "Vote seconds is too short"); goto err; } if (ns->dist_seconds < MIN_DIST_SECONDS) { log_warn(LD_DIR, "Dist seconds is too short"); goto err; } if ((tok = find_opt_by_keyword(tokens, K_CLIENT_VERSIONS))) { ns->client_versions = tor_strdup(tok->args[0]); } if ((tok = find_opt_by_keyword(tokens, K_SERVER_VERSIONS))) { ns->server_versions = tor_strdup(tok->args[0]); } tok = find_by_keyword(tokens, K_KNOWN_FLAGS); ns->known_flags = smartlist_create(); inorder = 1; for (i = 0; i < tok->n_args; ++i) { smartlist_add(ns->known_flags, tor_strdup(tok->args[i])); if (i>0 && strcmp(tok->args[i-1], tok->args[i])>= 0) { log_warn(LD_DIR, "%s >= %s", tok->args[i-1], tok->args[i]); inorder = 0; } } if (!inorder) { log_warn(LD_DIR, "known-flags not in order"); goto err; } tok = find_opt_by_keyword(tokens, K_PARAMS); if (tok) { inorder = 1; ns->net_params = smartlist_create(); for (i = 0; i < tok->n_args; ++i) { int ok=0; char *eq = strchr(tok->args[i], '='); if (!eq) { log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i])); goto err; } tor_parse_long(eq+1, 10, INT32_MIN, INT32_MAX, &ok, NULL); if (!ok) { log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i])); goto err; } if (i > 0 && strcmp(tok->args[i-1], tok->args[i]) >= 0) { log_warn(LD_DIR, "%s >= %s", tok->args[i-1], tok->args[i]); inorder = 0; } smartlist_add(ns->net_params, tor_strdup(tok->args[i])); } if (!inorder) { log_warn(LD_DIR, "params not in order"); goto err; } } ns->voters = smartlist_create(); SMARTLIST_FOREACH_BEGIN(tokens, directory_token_t *, _tok) { tok = _tok; if (tok->tp == K_DIR_SOURCE) { tor_assert(tok->n_args >= 6); if (voter) smartlist_add(ns->voters, voter); voter = tor_malloc_zero(sizeof(networkstatus_voter_info_t)); voter->sigs = smartlist_create(); if (ns->type != NS_TYPE_CONSENSUS) memcpy(voter->vote_digest, ns_digests.d[DIGEST_SHA1], DIGEST_LEN); voter->nickname = tor_strdup(tok->args[0]); if (strlen(tok->args[1]) != HEX_DIGEST_LEN || base16_decode(voter->identity_digest, sizeof(voter->identity_digest), tok->args[1], HEX_DIGEST_LEN) < 0) { log_warn(LD_DIR, "Error decoding identity digest %s in " "network-status vote.", escaped(tok->args[1])); goto err; } if (ns->type != NS_TYPE_CONSENSUS && tor_memneq(ns->cert->cache_info.identity_digest, voter->identity_digest, DIGEST_LEN)) { log_warn(LD_DIR,"Mismatch between identities in certificate and vote"); goto err; } voter->address = tor_strdup(tok->args[2]); if (!tor_inet_aton(tok->args[3], &in)) { log_warn(LD_DIR, "Error decoding IP address %s in network-status.", escaped(tok->args[3])); goto err; } voter->addr = ntohl(in.s_addr); voter->dir_port = (uint16_t) tor_parse_long(tok->args[4], 10, 0, 65535, &ok, NULL); if (!ok) goto err; voter->or_port = (uint16_t) tor_parse_long(tok->args[5], 10, 0, 65535, &ok, NULL); if (!ok) goto err; } else if (tok->tp == K_CONTACT) { if (!voter || voter->contact) { log_warn(LD_DIR, "contact element is out of place."); goto err; } voter->contact = tor_strdup(tok->args[0]); } else if (tok->tp == K_VOTE_DIGEST) { tor_assert(ns->type == NS_TYPE_CONSENSUS); tor_assert(tok->n_args >= 1); if (!voter || ! tor_digest_is_zero(voter->vote_digest)) { log_warn(LD_DIR, "vote-digest element is out of place."); goto err; } if (strlen(tok->args[0]) != HEX_DIGEST_LEN || base16_decode(voter->vote_digest, sizeof(voter->vote_digest), tok->args[0], HEX_DIGEST_LEN) < 0) { log_warn(LD_DIR, "Error decoding vote digest %s in " "network-status consensus.", escaped(tok->args[0])); goto err; } } } SMARTLIST_FOREACH_END(_tok); if (voter) { smartlist_add(ns->voters, voter); voter = NULL; } if (smartlist_len(ns->voters) == 0) { log_warn(LD_DIR, "Missing dir-source elements in a vote networkstatus."); goto err; } else if (ns->type != NS_TYPE_CONSENSUS && smartlist_len(ns->voters) != 1) { log_warn(LD_DIR, "Too many dir-source elements in a vote networkstatus."); goto err; } if (ns->type != NS_TYPE_CONSENSUS && (tok = find_opt_by_keyword(tokens, K_LEGACY_DIR_KEY))) { int bad = 1; if (strlen(tok->args[0]) == HEX_DIGEST_LEN) { networkstatus_voter_info_t *voter = smartlist_get(ns->voters, 0); if (base16_decode(voter->legacy_id_digest, DIGEST_LEN, tok->args[0], HEX_DIGEST_LEN)<0) bad = 1; else bad = 0; } if (bad) { log_warn(LD_DIR, "Invalid legacy key digest %s on vote.", escaped(tok->args[0])); } } /* Parse routerstatus lines. */ rs_tokens = smartlist_create(); rs_area = memarea_new(); s = end_of_header; ns->routerstatus_list = smartlist_create(); while (!strcmpstart(s, "r ")) { if (ns->type != NS_TYPE_CONSENSUS) { vote_routerstatus_t *rs = tor_malloc_zero(sizeof(vote_routerstatus_t)); if (routerstatus_parse_entry_from_string(rs_area, &s, rs_tokens, ns, rs, 0, 0)) smartlist_add(ns->routerstatus_list, rs); else { tor_free(rs->version); tor_free(rs); } } else { routerstatus_t *rs; if ((rs = routerstatus_parse_entry_from_string(rs_area, &s, rs_tokens, NULL, NULL, ns->consensus_method, flav))) smartlist_add(ns->routerstatus_list, rs); } } for (i = 1; i < smartlist_len(ns->routerstatus_list); ++i) { routerstatus_t *rs1, *rs2; if (ns->type != NS_TYPE_CONSENSUS) { vote_routerstatus_t *a = smartlist_get(ns->routerstatus_list, i-1); vote_routerstatus_t *b = smartlist_get(ns->routerstatus_list, i); rs1 = &a->status; rs2 = &b->status; } else { rs1 = smartlist_get(ns->routerstatus_list, i-1); rs2 = smartlist_get(ns->routerstatus_list, i); } if (fast_memcmp(rs1->identity_digest, rs2->identity_digest, DIGEST_LEN) >= 0) { log_warn(LD_DIR, "Vote networkstatus entries not sorted by identity " "digest"); goto err; } } /* Parse footer; check signature. */ footer_tokens = smartlist_create(); if ((end_of_footer = strstr(s, "\nnetwork-status-version "))) ++end_of_footer; else end_of_footer = s + strlen(s); if (tokenize_string(area,s, end_of_footer, footer_tokens, networkstatus_vote_footer_token_table, 0)) { log_warn(LD_DIR, "Error tokenizing network-status vote footer."); goto err; } { int found_sig = 0; SMARTLIST_FOREACH_BEGIN(footer_tokens, directory_token_t *, _tok) { tok = _tok; if (tok->tp == K_DIRECTORY_SIGNATURE) found_sig = 1; else if (found_sig) { log_warn(LD_DIR, "Extraneous token after first directory-signature"); goto err; } } SMARTLIST_FOREACH_END(_tok); } if ((tok = find_opt_by_keyword(footer_tokens, K_DIRECTORY_FOOTER))) { if (tok != smartlist_get(footer_tokens, 0)) { log_warn(LD_DIR, "Misplaced directory-footer token"); goto err; } } tok = find_opt_by_keyword(footer_tokens, K_BW_WEIGHTS); if (tok) { ns->weight_params = smartlist_create(); for (i = 0; i < tok->n_args; ++i) { int ok=0; char *eq = strchr(tok->args[i], '='); if (!eq) { log_warn(LD_DIR, "Bad element '%s' in weight params", escaped(tok->args[i])); goto err; } tor_parse_long(eq+1, 10, INT32_MIN, INT32_MAX, &ok, NULL); if (!ok) { log_warn(LD_DIR, "Bad element '%s' in params", escaped(tok->args[i])); goto err; } smartlist_add(ns->weight_params, tor_strdup(tok->args[i])); } } SMARTLIST_FOREACH_BEGIN(footer_tokens, directory_token_t *, _tok) { char declared_identity[DIGEST_LEN]; networkstatus_voter_info_t *v; document_signature_t *sig; const char *id_hexdigest = NULL; const char *sk_hexdigest = NULL; digest_algorithm_t alg = DIGEST_SHA1; tok = _tok; if (tok->tp != K_DIRECTORY_SIGNATURE) continue; tor_assert(tok->n_args >= 2); if (tok->n_args == 2) { id_hexdigest = tok->args[0]; sk_hexdigest = tok->args[1]; } else { const char *algname = tok->args[0]; int a; id_hexdigest = tok->args[1]; sk_hexdigest = tok->args[2]; a = crypto_digest_algorithm_parse_name(algname); if (a<0) { log_warn(LD_DIR, "Unknown digest algorithm %s; skipping", escaped(algname)); continue; } alg = a; } if (!tok->object_type || strcmp(tok->object_type, "SIGNATURE") || tok->object_size < 128 || tok->object_size > 512) { log_warn(LD_DIR, "Bad object type or length on directory-signature"); goto err; } if (strlen(id_hexdigest) != HEX_DIGEST_LEN || base16_decode(declared_identity, sizeof(declared_identity), id_hexdigest, HEX_DIGEST_LEN) < 0) { log_warn(LD_DIR, "Error decoding declared identity %s in " "network-status vote.", escaped(id_hexdigest)); goto err; } if (!(v = networkstatus_get_voter_by_id(ns, declared_identity))) { log_warn(LD_DIR, "ID on signature on network-status vote does not match " "any declared directory source."); goto err; } sig = tor_malloc_zero(sizeof(document_signature_t)); memcpy(sig->identity_digest, v->identity_digest, DIGEST_LEN); sig->alg = alg; if (strlen(sk_hexdigest) != HEX_DIGEST_LEN || base16_decode(sig->signing_key_digest, sizeof(sig->signing_key_digest), sk_hexdigest, HEX_DIGEST_LEN) < 0) { log_warn(LD_DIR, "Error decoding declared signing key digest %s in " "network-status vote.", escaped(sk_hexdigest)); tor_free(sig); goto err; } if (ns->type != NS_TYPE_CONSENSUS) { if (tor_memneq(declared_identity, ns->cert->cache_info.identity_digest, DIGEST_LEN)) { log_warn(LD_DIR, "Digest mismatch between declared and actual on " "network-status vote."); tor_free(sig); goto err; } } if (voter_get_sig_by_algorithm(v, sig->alg)) { /* We already parsed a vote with this algorithm from this voter. Use the first one. */ log_fn(LOG_PROTOCOL_WARN, LD_DIR, "We received a networkstatus " "that contains two votes from the same voter with the same " "algorithm. Ignoring the second vote."); tor_free(sig); continue; } if (ns->type != NS_TYPE_CONSENSUS) { if (check_signature_token(ns_digests.d[DIGEST_SHA1], DIGEST_LEN, tok, ns->cert->signing_key, 0, "network-status vote")) { tor_free(sig); goto err; } sig->good_signature = 1; } else { if (tok->object_size >= INT_MAX || tok->object_size >= SIZE_T_CEILING) { tor_free(sig); goto err; } sig->signature = tor_memdup(tok->object_body, tok->object_size); sig->signature_len = (int) tok->object_size; } smartlist_add(v->sigs, sig); ++n_signatures; } SMARTLIST_FOREACH_END(_tok); if (! n_signatures) { log_warn(LD_DIR, "No signatures on networkstatus vote."); goto err; } else if (ns->type == NS_TYPE_VOTE && n_signatures != 1) { log_warn(LD_DIR, "Received more than one signature on a " "network-status vote."); goto err; } if (eos_out) *eos_out = end_of_footer; goto done; err: dump_desc(s_dup, "v3 networkstatus"); networkstatus_vote_free(ns); ns = NULL; done: if (tokens) { SMARTLIST_FOREACH(tokens, directory_token_t *, t, token_clear(t)); smartlist_free(tokens); } if (voter) { if (voter->sigs) { SMARTLIST_FOREACH(voter->sigs, document_signature_t *, sig, document_signature_free(sig)); smartlist_free(voter->sigs); } tor_free(voter->nickname); tor_free(voter->address); tor_free(voter->contact); tor_free(voter); } if (rs_tokens) { SMARTLIST_FOREACH(rs_tokens, directory_token_t *, t, token_clear(t)); smartlist_free(rs_tokens); } if (footer_tokens) { SMARTLIST_FOREACH(footer_tokens, directory_token_t *, t, token_clear(t)); smartlist_free(footer_tokens); } if (area) { DUMP_AREA(area, "v3 networkstatus"); memarea_drop_all(area); } if (rs_area) memarea_drop_all(rs_area); return ns; }
0
[ "CWE-399" ]
tor
57e35ad3d91724882c345ac709666a551a977f0f
182,402,160,074,870,680,000,000,000,000,000,000,000
536
Avoid possible segfault when handling networkstatus vote with bad flavor Fix for 6530; fix on 0.2.2.6-alpha.