func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
void *EC_EX_DATA_get_data(const EC_EXTRA_DATA *ex_data,
void *(*dup_func) (void *),
void (*free_func) (void *),
void (*clear_free_func) (void *))
{
const EC_EXTRA_DATA *d;
for (d = ex_data; d != NULL; d = d->next) {
if (d->dup_func == dup_func && d->free_func == free_func
&& d->clear_free_func == clear_free_func)
return d->data;
}
return NULL;
} | 0 | [] | openssl | 21c856b75d81eff61aa63b4f036bb64a85bf6d46 | 338,540,826,415,579,860,000,000,000,000,000,000,000 | 15 | [crypto/ec] for ECC parameters with NULL or zero cofactor, compute it
The cofactor argument to EC_GROUP_set_generator is optional, and SCA
mitigations for ECC currently use it. So the library currently falls
back to very old SCA-vulnerable code if the cofactor is not present.
This PR allows EC_GROUP_set_generator to compute the cofactor for all
curves of cryptographic interest. Steering scalar multiplication to more
SCA-robust code.
This issue affects persisted private keys in explicit parameter form,
where the (optional) cofactor field is zero or absent.
It also affects curves not built-in to the library, but constructed
programatically with explicit parameters, then calling
EC_GROUP_set_generator with a nonsensical value (NULL, zero).
The very old scalar multiplication code is known to be vulnerable to
local uarch attacks, outside of the OpenSSL threat model. New results
suggest the code path is also vulnerable to traditional wall clock
timing attacks.
CVE-2019-1547
Reviewed-by: Nicola Tuveri <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9799) |
bool check_vcol_func_processor(void *arg)
{
return mark_unsupported_function("name_const()", arg, VCOL_IMPOSSIBLE);
} | 0 | [
"CWE-617"
] | server | 2e7891080667c59ac80f788eef4d59d447595772 | 261,515,243,352,143,100,000,000,000,000,000,000,000 | 4 | MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]> |
gx_dc_pattern2_color_has_bbox(const gx_device_color * pdevc)
{
gs_pattern2_instance_t *pinst = (gs_pattern2_instance_t *)pdevc->ccolor.pattern;
const gs_shading_t *psh = pinst->templat.Shading;
return psh->params.have_BBox;
} | 0 | [
"CWE-704"
] | ghostpdl | 693baf02152119af6e6afd30bb8ec76d14f84bbf | 187,220,490,693,118,970,000,000,000,000,000,000,000 | 7 | PS interpreter - check the Implementation of a Pattern before use
Bug #700141 "Type confusion in setpattern"
As the bug thread says, we were not checking that the Implementation
of a pattern dictionary was a structure type, leading to a crash when
we tried to treat it as one.
Here we make the st_pattern1_instance and st_pattern2_instance
structures public definitions and in zsetcolor we check the object
stored under the Implementation key in the supplied dictionary to see if
its a t_struct or t_astruct type, and if it is that its a
st_pattern1_instance or st_pattern2_instance structure.
If either check fails we throw a typecheck error.
We need to make the st_pattern1_instance and st_pattern2_instance
definitions public as they are defined in the graphics library and we
need to check in the interpreter. |
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
{
iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
((reg & 0x0000ffff) | (2 << 28)));
return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
} | 0 | [
"CWE-476"
] | linux | 8188a18ee2e48c9a7461139838048363bfce3fef | 97,814,264,651,882,080,000,000,000,000,000,000,000 | 6 | iwlwifi: pcie: fix rb_allocator workqueue allocation
We don't handle failures in the rb_allocator workqueue allocation
correctly. To fix that, move the code earlier so the cleanup is
easier and we don't have to undo all the interrupt allocations in
this case.
Signed-off-by: Johannes Berg <[email protected]>
Signed-off-by: Luca Coelho <[email protected]> |
Warn_filter(bool want_warning_edom, bool want_note_truncated_spaces) :
m_want_warning_edom(want_warning_edom),
m_want_note_truncated_spaces(want_note_truncated_spaces)
{ } | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 100,981,805,390,400,770,000,000,000,000,000,000,000 | 4 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
static int ram_save_complete(QEMUFile *f, void *opaque)
{
qemu_mutex_lock_ramlist();
migration_bitmap_sync();
ram_control_before_iterate(f, RAM_CONTROL_FINISH);
/* try transferring iterative blocks of memory */
/* flush all remaining blocks regardless of rate limiting */
while (true) {
int bytes_sent;
bytes_sent = ram_find_and_save_block(f, true);
/* no more blocks to sent */
if (bytes_sent == 0) {
break;
}
bytes_transferred += bytes_sent;
}
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
migration_end();
qemu_mutex_unlock_ramlist();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
} | 0 | [
"CWE-20"
] | qemu | 0be839a2701369f669532ea5884c15bead1c6e08 | 138,456,551,317,890,690,000,000,000,000,000,000,000 | 29 | migration: fix parameter validation on ram load
During migration, the values read from migration stream during ram load
are not validated. Especially offset in host_from_stream_offset() and
also the length of the writes in the callers of said function.
To fix this, we need to make sure that the [offset, offset + length]
range fits into one of the allocated memory regions.
Validating addr < len should be sufficient since data seems to always be
managed in TARGET_PAGE_SIZE chunks.
Fixes: CVE-2014-7840
Note: follow-up patches add extra checks on each block->host access.
Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]>
Reviewed-by: Dr. David Alan Gilbert <[email protected]>
Signed-off-by: Amit Shah <[email protected]> |
static OPJ_BOOL opj_j2k_write_sod( opj_j2k_t *p_j2k,
opj_tcd_t * p_tile_coder,
OPJ_BYTE * p_data,
OPJ_UINT32 * p_data_written,
OPJ_UINT32 p_total_data_size,
const opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
opj_codestream_info_t *l_cstr_info = 00;
OPJ_UINT32 l_remaining_data;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
opj_write_bytes(p_data,J2K_MS_SOD,2); /* SOD */
p_data += 2;
/* make room for the EOF marker */
l_remaining_data = p_total_data_size - 4;
/* update tile coder */
p_tile_coder->tp_num = p_j2k->m_specific_param.m_encoder.m_current_poc_tile_part_number ;
p_tile_coder->cur_tp_num = p_j2k->m_specific_param.m_encoder.m_current_tile_part_number;
/* INDEX >> */
/* TODO mergeV2: check this part which use cstr_info */
/*l_cstr_info = p_j2k->cstr_info;
if (l_cstr_info) {
if (!p_j2k->m_specific_param.m_encoder.m_current_tile_part_number ) {
//TODO cstr_info->tile[p_j2k->m_current_tile_number].end_header = p_stream_tell(p_stream) + p_j2k->pos_correction - 1;
l_cstr_info->tile[p_j2k->m_current_tile_number].tileno = p_j2k->m_current_tile_number;
}
else {*/
/*
TODO
if
(cstr_info->tile[p_j2k->m_current_tile_number].packet[cstr_info->packno - 1].end_pos < p_stream_tell(p_stream))
{
cstr_info->tile[p_j2k->m_current_tile_number].packet[cstr_info->packno].start_pos = p_stream_tell(p_stream);
}*/
/*}*/
/* UniPG>> */
#ifdef USE_JPWL
/* update markers struct */
/*OPJ_BOOL res = j2k_add_marker(p_j2k->cstr_info, J2K_MS_SOD, p_j2k->sod_start, 2);
*/
assert( 0 && "TODO" );
#endif /* USE_JPWL */
/* <<UniPG */
/*}*/
/* << INDEX */
if (p_j2k->m_specific_param.m_encoder.m_current_tile_part_number == 0) {
p_tile_coder->tcd_image->tiles->packno = 0;
if (l_cstr_info) {
l_cstr_info->packno = 0;
}
}
*p_data_written = 0;
if (! opj_tcd_encode_tile(p_tile_coder, p_j2k->m_current_tile_number, p_data, p_data_written, l_remaining_data , l_cstr_info)) {
opj_event_msg(p_manager, EVT_ERROR, "Cannot encode tile\n");
return OPJ_FALSE;
}
*p_data_written += 2;
return OPJ_TRUE;
} | 0 | [
"CWE-416"
] | openjpeg | 940100c28ae28931722290794889cf84a92c5f6f | 317,019,145,660,880,860,000,000,000,000,000,000,000 | 73 | Fix potential use-after-free in opj_j2k_write_mco function
Fixes #563 |
int avahi_send_dns_packet_ipv6(
int fd,
AvahiIfIndex interface,
AvahiDnsPacket *p,
const AvahiIPv6Address *src_address,
const AvahiIPv6Address *dst_address,
uint16_t dst_port) {
struct sockaddr_in6 sa;
struct msghdr msg;
struct iovec io;
struct cmsghdr *cmsg;
size_t cmsg_data[(CMSG_SPACE(sizeof(struct in6_pktinfo))/sizeof(size_t)) + 1];
assert(fd >= 0);
assert(p);
assert(avahi_dns_packet_check_valid(p) >= 0);
assert(!dst_address || dst_port > 0);
if (!dst_address)
mdns_mcast_group_ipv6(&sa);
else
ipv6_address_to_sockaddr(&sa, dst_address, dst_port);
memset(&io, 0, sizeof(io));
io.iov_base = AVAHI_DNS_PACKET_DATA(p);
io.iov_len = p->size;
memset(&msg, 0, sizeof(msg));
msg.msg_name = &sa;
msg.msg_namelen = sizeof(sa);
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_flags = 0;
if (interface > 0 || src_address) {
struct in6_pktinfo *pkti;
memset(cmsg_data, 0, sizeof(cmsg_data));
msg.msg_control = cmsg_data;
msg.msg_controllen = CMSG_LEN(sizeof(struct in6_pktinfo));
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_len = msg.msg_controllen;
cmsg->cmsg_level = IPPROTO_IPV6;
cmsg->cmsg_type = IPV6_PKTINFO;
pkti = (struct in6_pktinfo*) CMSG_DATA(cmsg);
if (interface > 0)
pkti->ipi6_ifindex = interface;
if (src_address)
memcpy(&pkti->ipi6_addr, src_address->address, sizeof(src_address->address));
} else {
msg.msg_control = NULL;
msg.msg_controllen = 0;
}
return sendmsg_loop(fd, &msg, 0);
} | 0 | [
"CWE-399"
] | avahi | 46109dfec75534fe270c0ab902576f685d5ab3a6 | 98,515,646,333,800,550,000,000,000,000,000,000,000 | 61 | socket: Still read corrupt packets from the sockets
Else, we end up with an infinite loop with 100% CPU.
http://www.avahi.org/ticket/325
https://bugzilla.redhat.com/show_bug.cgi?id=667187 |
static void crypto_skcipher_free_instance(struct crypto_instance *inst)
{
struct skcipher_instance *skcipher =
container_of(inst, struct skcipher_instance, s.base);
skcipher->free(skcipher);
} | 0 | [
"CWE-476",
"CWE-703"
] | linux | 9933e113c2e87a9f46a40fde8dafbf801dca1ab9 | 60,086,562,996,004,650,000,000,000,000,000,000,000 | 7 | crypto: skcipher - Add missing API setkey checks
The API setkey checks for key sizes and alignment went AWOL during the
skcipher conversion. This patch restores them.
Cc: <[email protected]>
Fixes: 4e6c3df4d729 ("crypto: skcipher - Add low-level skcipher...")
Reported-by: Baozeng <[email protected]>
Signed-off-by: Herbert Xu <[email protected]> |
TABLE_LIST* st_select_lex_node::get_table_list() { return 0; } | 0 | [
"CWE-476"
] | server | 3a52569499e2f0c4d1f25db1e81617a9d9755400 | 306,750,667,700,936,930,000,000,000,000,000,000,000 | 1 | MDEV-25636: Bug report: abortion in sql/sql_parse.cc:6294
The asserion failure was caused by this query
select /*id=1*/ from t1
where
col= ( select /*id=2*/ from ... where corr_cond1
union
select /*id=4*/ from ... where corr_cond2)
Here,
- select with id=2 was correlated due to corr_cond1.
- select with id=4 was initially correlated due to corr_cond2, but then
the optimizer optimized away the correlation, making the select with id=4
uncorrelated.
However, since select with id=2 remained correlated, the execution had to
re-compute the whole UNION. When it tried to execute select with id=4, it
hit an assertion (join buffer already free'd).
This is because select with id=4 has freed its execution structures after
it has been executed once. The select is uncorrelated, so it did not expect
it would need to be executed for the second time.
Fixed this by adding this logic in
st_select_lex::optimize_unflattened_subqueries():
If a member of a UNION is correlated, mark all its members as
correlated, so that they are prepared to be executed multiple times. |
PS_SERIALIZER_ENCODE_FUNC(wddx)
{
wddx_packet *packet;
PS_ENCODE_VARS;
packet = php_wddx_constructor();
php_wddx_packet_start(packet, NULL, 0);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_S);
PS_ENCODE_LOOP(
php_wddx_serialize_var(packet, *struc, key, key_length TSRMLS_CC);
);
php_wddx_add_chunk_static(packet, WDDX_STRUCT_E);
php_wddx_packet_end(packet);
*newstr = php_wddx_gather(packet);
php_wddx_destructor(packet);
if (newlen) {
*newlen = strlen(*newstr);
}
return SUCCESS;
} | 1 | [] | php-src | 1785d2b805f64eaaacf98c14c9e13107bf085ab1 | 115,507,282,911,148,130,000,000,000,000,000,000,000 | 25 | Fixed bug #70741: Session WDDX Packet Deserialization Type Confusion Vulnerability |
char* PE_(r_bin_pe_get_os)(struct PE_(r_bin_pe_obj_t)* bin) {
char* os;
if (!bin || !bin->nt_headers) {
return NULL;
}
switch (bin->nt_headers->optional_header.Subsystem) {
case PE_IMAGE_SUBSYSTEM_NATIVE:
os = strdup ("native");
break;
case PE_IMAGE_SUBSYSTEM_WINDOWS_GUI:
case PE_IMAGE_SUBSYSTEM_WINDOWS_CUI:
case PE_IMAGE_SUBSYSTEM_WINDOWS_CE_GUI:
os = strdup ("windows");
break;
case PE_IMAGE_SUBSYSTEM_POSIX_CUI:
os = strdup ("posix");
break;
case PE_IMAGE_SUBSYSTEM_EFI_APPLICATION:
case PE_IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER:
case PE_IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER:
case PE_IMAGE_SUBSYSTEM_EFI_ROM:
os = strdup ("efi");
break;
case PE_IMAGE_SUBSYSTEM_XBOX:
os = strdup ("xbox");
break;
default:
// XXX: this is unknown
os = strdup ("windows");
}
return os;
} | 0 | [
"CWE-125"
] | radare2 | 4e1cf0d3e6f6fe2552a269def0af1cd2403e266c | 5,956,336,386,394,548,000,000,000,000,000,000,000 | 32 | Fix crash in pe |
static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
struct genl_info *info,
struct cfg80211_crypto_settings *settings,
int cipher_limit)
{
memset(settings, 0, sizeof(*settings));
settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT];
if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) {
u16 proto;
proto = nla_get_u16(
info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]);
settings->control_port_ethertype = cpu_to_be16(proto);
if (!(rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
proto != ETH_P_PAE)
return -EINVAL;
if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT])
settings->control_port_no_encrypt = true;
} else
settings->control_port_ethertype = cpu_to_be16(ETH_P_PAE);
if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) {
void *data;
int len, i;
data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]);
len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]);
settings->n_ciphers_pairwise = len / sizeof(u32);
if (len % sizeof(u32))
return -EINVAL;
if (settings->n_ciphers_pairwise > cipher_limit)
return -EINVAL;
memcpy(settings->ciphers_pairwise, data, len);
for (i = 0; i < settings->n_ciphers_pairwise; i++)
if (!nl80211_valid_cipher_suite(
settings->ciphers_pairwise[i]))
return -EINVAL;
}
if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) {
settings->cipher_group =
nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]);
if (!nl80211_valid_cipher_suite(settings->cipher_group))
return -EINVAL;
}
if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) {
settings->wpa_versions =
nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]);
if (!nl80211_valid_wpa_versions(settings->wpa_versions))
return -EINVAL;
}
if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
void *data;
int len, i;
data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]);
len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]);
settings->n_akm_suites = len / sizeof(u32);
if (len % sizeof(u32))
return -EINVAL;
memcpy(settings->akm_suites, data, len);
for (i = 0; i < settings->n_ciphers_pairwise; i++)
if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
return -EINVAL;
}
return 0; | 0 | [
"CWE-362",
"CWE-119"
] | linux | 208c72f4fe44fe09577e7975ba0e7fa0278f3d03 | 301,505,109,190,795,960,000,000,000,000,000,000,000 | 78 | nl80211: fix check for valid SSID size in scan operations
In both trigger_scan and sched_scan operations, we were checking for
the SSID length before assigning the value correctly. Since the
memory was just kzalloc'ed, the check was always failing and SSID with
over 32 characters were allowed to go through.
This was causing a buffer overflow when copying the actual SSID to the
proper place.
This bug has been there since 2.6.29-rc4.
Cc: [email protected]
Signed-off-by: Luciano Coelho <[email protected]>
Signed-off-by: John W. Linville <[email protected]> |
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;
u64 sec = NSEC_PER_SEC;
u64 divisor, dividend;
int count_fls, nsec_fls, frequency_fls, sec_fls;
count_fls = fls64(count);
nsec_fls = fls64(nsec);
frequency_fls = fls64(frequency);
sec_fls = 30;
/*
* We got @count in @nsec, with a target of sample_freq HZ
* the target period becomes:
*
* @count * 10^9
* period = -------------------
* @nsec * sample_freq
*
*/
/*
* Reduce accuracy by one bit such that @a and @b converge
* to a similar magnitude.
*/
#define REDUCE_FLS(a, b) \
do { \
if (a##_fls > b##_fls) { \
a >>= 1; \
a##_fls--; \
} else { \
b >>= 1; \
b##_fls--; \
} \
} while (0)
/*
* Reduce accuracy until either term fits in a u64, then proceed with
* the other, so that finally we can do a u64/u64 division.
*/
while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
REDUCE_FLS(sec, count);
}
if (count_fls + sec_fls > 64) {
divisor = nsec * frequency;
while (count_fls + sec_fls > 64) {
REDUCE_FLS(count, sec);
divisor >>= 1;
}
dividend = count * sec;
} else {
dividend = count * sec;
while (nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
dividend >>= 1;
}
divisor = nsec * frequency;
}
if (!divisor)
return dividend;
return div64_u64(dividend, divisor);
} | 0 | [
"CWE-703",
"CWE-189"
] | linux | 8176cced706b5e5d15887584150764894e94e02f | 120,521,351,415,936,970,000,000,000,000,000,000,000 | 72 | perf: Treat attr.config as u64 in perf_swevent_init()
Trinity discovered that we fail to check all 64 bits of
attr.config passed by user space, resulting to out-of-bounds
access of the perf_swevent_enabled array in
sw_perf_event_destroy().
Introduced in commit b0a873ebb ("perf: Register PMU
implementations").
Signed-off-by: Tommi Rantala <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
Cc: Paul Mackerras <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
static const char *get_mode(const char *str, unsigned int *modep)
{
unsigned char c;
unsigned int mode = 0;
if (*str == ' ')
return NULL;
while ((c = *str++) != ' ') {
if (c < '0' || c > '7')
return NULL;
mode = (mode << 3) + (c - '0');
}
*modep = mode;
return str;
} | 0 | [
"CWE-20"
] | git | e1d911dd4c7b76a5a8cec0f5c8de15981e34da83 | 269,328,412,674,105,320,000,000,000,000,000,000,000 | 16 | mingw: disallow backslash characters in tree objects' file names
The backslash character is not a valid part of a file name on Windows.
Hence it is dangerous to allow writing files that were unpacked from
tree objects, when the stored file name contains a backslash character:
it will be misinterpreted as directory separator.
This not only causes ambiguity when a tree contains a blob `a\b` and a
tree `a` that contains a blob `b`, but it also can be used as part of an
attack vector to side-step the careful protections against writing into
the `.git/` directory during a clone of a maliciously-crafted
repository.
Let's prevent that, addressing CVE-2019-1354.
Note: we guard against backslash characters in tree objects' file names
_only_ on Windows (because on other platforms, even on those where NTFS
volumes can be mounted, the backslash character is _not_ a directory
separator), and _only_ when `core.protectNTFS = true` (because users
might need to generate tree objects for other platforms, of course
without touching the worktree, e.g. using `git update-index
--cacheinfo`).
Signed-off-by: Johannes Schindelin <[email protected]> |
int extract_or_test_files(__G) /* return PK-type error code */
__GDEF
{
unsigned i, j;
zoff_t cd_bufstart;
uch *cd_inptr;
int cd_incnt;
ulg filnum=0L, blknum=0L;
int reached_end;
#ifndef SFX
int no_endsig_found;
#endif
int error, error_in_archive=PK_COOL;
int *fn_matched=NULL, *xn_matched=NULL;
zucn_t members_processed;
ulg num_skipped=0L, num_bad_pwd=0L;
zoff_t old_extra_bytes = 0L;
#ifdef SET_DIR_ATTRIB
unsigned num_dirs=0;
direntry *dirlist=(direntry *)NULL, **sorted_dirlist=(direntry **)NULL;
#endif
/*
* First, two general initializations are applied. These have been moved
* here from process_zipfiles() because they are only needed for accessing
* and/or extracting the data content of the zip archive.
*/
/* a) initialize the CRC table pointer (once) */
if (CRC_32_TAB == NULL) {
if ((CRC_32_TAB = get_crc_table()) == NULL) {
return PK_MEM;
}
}
#if (!defined(SFX) || defined(SFX_EXDIR))
/* b) check out if specified extraction root directory exists */
if (uO.exdir != (char *)NULL && G.extract_flag) {
G.create_dirs = !uO.fflag;
if ((error = checkdir(__G__ uO.exdir, ROOT)) > MPN_INF_SKIP) {
/* out of memory, or file in way */
return (error == MPN_NOMEM ? PK_MEM : PK_ERR);
}
}
#endif /* !SFX || SFX_EXDIR */
/* One more: initialize cover structure for bomb detection. Start with
spans that cover any extra bytes at the start, the central directory,
the end of central directory record (including the Zip64 end of central
directory locator, if present), and the Zip64 end of central directory
record, if present. */
if (G.cover == NULL) {
G.cover = malloc(sizeof(cover_t));
if (G.cover == NULL) {
Info(slide, 0x401, ((char *)slide,
LoadFarString(NotEnoughMemCover)));
return PK_MEM;
}
((cover_t *)G.cover)->span = NULL;
((cover_t *)G.cover)->max = 0;
}
((cover_t *)G.cover)->num = 0;
if (cover_add((cover_t *)G.cover,
G.extra_bytes + G.ecrec.offset_start_central_directory,
G.extra_bytes + G.ecrec.offset_start_central_directory +
G.ecrec.size_central_directory) != 0) {
Info(slide, 0x401, ((char *)slide,
LoadFarString(NotEnoughMemCover)));
return PK_MEM;
}
if ((G.extra_bytes != 0 &&
cover_add((cover_t *)G.cover, 0, G.extra_bytes) != 0) ||
(G.ecrec.have_ecr64 &&
cover_add((cover_t *)G.cover, G.ecrec.ec64_start,
G.ecrec.ec64_end) != 0) ||
cover_add((cover_t *)G.cover, G.ecrec.ec_start,
G.ecrec.ec_end) != 0) {
Info(slide, 0x401, ((char *)slide,
LoadFarString(OverlappedComponents)));
return PK_BOMB;
}
/*---------------------------------------------------------------------------
The basic idea of this function is as follows. Since the central di-
rectory lies at the end of the zipfile and the member files lie at the
beginning or middle or wherever, it is not very desirable to simply
read a central directory entry, jump to the member and extract it, and
then jump back to the central directory. In the case of a large zipfile
this would lead to a whole lot of disk-grinding, especially if each mem-
ber file is small. Instead, we read from the central directory the per-
tinent information for a block of files, then go extract/test the whole
block. Thus this routine contains two small(er) loops within a very
large outer loop: the first of the small ones reads a block of files
from the central directory; the second extracts or tests each file; and
the outer one loops over blocks. There's some file-pointer positioning
stuff in between, but that's about it. Btw, it's because of this jump-
ing around that we can afford to be lenient if an error occurs in one of
the member files: we should still be able to go find the other members,
since we know the offset of each from the beginning of the zipfile.
---------------------------------------------------------------------------*/
G.pInfo = G.info;
#if CRYPT
G.newzip = TRUE;
#endif
#ifndef SFX
G.reported_backslash = FALSE;
#endif
/* malloc space for check on unmatched filespecs (OK if one or both NULL) */
if (G.filespecs > 0 &&
(fn_matched=(int *)malloc(G.filespecs*sizeof(int))) != (int *)NULL)
for (i = 0; i < G.filespecs; ++i)
fn_matched[i] = FALSE;
if (G.xfilespecs > 0 &&
(xn_matched=(int *)malloc(G.xfilespecs*sizeof(int))) != (int *)NULL)
for (i = 0; i < G.xfilespecs; ++i)
xn_matched[i] = FALSE;
/*---------------------------------------------------------------------------
Begin main loop over blocks of member files. We know the entire central
directory is on this disk: we would not have any of this information un-
less the end-of-central-directory record was on this disk, and we would
not have gotten to this routine unless this is also the disk on which
the central directory starts. In practice, this had better be the ONLY
disk in the archive, but we'll add multi-disk support soon.
---------------------------------------------------------------------------*/
members_processed = 0;
#ifndef SFX
no_endsig_found = FALSE;
#endif
reached_end = FALSE;
while (!reached_end) {
j = 0;
#ifdef AMIGA
memzero(G.filenotes, DIR_BLKSIZ * sizeof(char *));
#endif
/*
* Loop through files in central directory, storing offsets, file
* attributes, case-conversion and text-conversion flags until block
* size is reached.
*/
while ((j < DIR_BLKSIZ)) {
G.pInfo = &G.info[j];
if (readbuf(__G__ G.sig, 4) == 0) {
error_in_archive = PK_EOF;
reached_end = TRUE; /* ...so no more left to do */
break;
}
if (memcmp(G.sig, central_hdr_sig, 4)) { /* is it a new entry? */
/* no new central directory entry
* -> is the number of processed entries compatible with the
* number of entries as stored in the end_central record?
*/
if ((members_processed
& (G.ecrec.have_ecr64 ? MASK_ZUCN64 : MASK_ZUCN16))
== G.ecrec.total_entries_central_dir) {
#ifndef SFX
/* yes, so look if we ARE back at the end_central record
*/
no_endsig_found =
( (memcmp(G.sig,
(G.ecrec.have_ecr64 ?
end_central64_sig : end_central_sig),
4) != 0)
&& (!G.ecrec.is_zip64_archive)
&& (memcmp(G.sig, end_central_sig, 4) != 0)
);
#endif /* !SFX */
} else {
/* no; we have found an error in the central directory
* -> report it and stop searching for more Zip entries
*/
Info(slide, 0x401, ((char *)slide,
LoadFarString(CentSigMsg), j + blknum*DIR_BLKSIZ + 1));
Info(slide, 0x401, ((char *)slide,
LoadFarString(ReportMsg)));
error_in_archive = PK_BADERR;
}
reached_end = TRUE; /* ...so no more left to do */
break;
}
/* process_cdir_file_hdr() sets pInfo->hostnum, pInfo->lcflag */
if ((error = process_cdir_file_hdr(__G)) != PK_COOL) {
error_in_archive = error; /* only PK_EOF defined */
reached_end = TRUE; /* ...so no more left to do */
break;
}
if ((error = do_string(__G__ G.crec.filename_length, DS_FN)) !=
PK_COOL)
{
if (error > error_in_archive)
error_in_archive = error;
if (error > PK_WARN) { /* fatal: no more left to do */
Info(slide, 0x401, ((char *)slide,
LoadFarString(FilNamMsg),
FnFilter1(G.filename), "central"));
reached_end = TRUE;
break;
}
}
if ((error = do_string(__G__ G.crec.extra_field_length,
EXTRA_FIELD)) != 0)
{
if (error > error_in_archive)
error_in_archive = error;
if (error > PK_WARN) { /* fatal */
Info(slide, 0x401, ((char *)slide,
LoadFarString(ExtFieldMsg),
FnFilter1(G.filename), "central"));
reached_end = TRUE;
break;
}
}
#ifdef AMIGA
G.filenote_slot = j;
if ((error = do_string(__G__ G.crec.file_comment_length,
uO.N_flag ? FILENOTE : SKIP)) != PK_COOL)
#else
if ((error = do_string(__G__ G.crec.file_comment_length, SKIP))
!= PK_COOL)
#endif
{
if (error > error_in_archive)
error_in_archive = error;
if (error > PK_WARN) { /* fatal */
Info(slide, 0x421, ((char *)slide,
LoadFarString(BadFileCommLength),
FnFilter1(G.filename)));
reached_end = TRUE;
break;
}
}
if (G.process_all_files) {
if (store_info(__G))
++j; /* file is OK; info[] stored; continue with next */
else
++num_skipped;
} else {
int do_this_file;
if (G.filespecs == 0)
do_this_file = TRUE;
else { /* check if this entry matches an `include' argument */
do_this_file = FALSE;
for (i = 0; i < G.filespecs; i++)
if (match(G.filename, G.pfnames[i], uO.C_flag WISEP)) {
do_this_file = TRUE; /* ^-- ignore case or not? */
if (fn_matched)
fn_matched[i] = TRUE;
break; /* found match, so stop looping */
}
}
if (do_this_file) { /* check if this is an excluded file */
for (i = 0; i < G.xfilespecs; i++)
if (match(G.filename, G.pxnames[i], uO.C_flag WISEP)) {
do_this_file = FALSE; /* ^-- ignore case or not? */
if (xn_matched)
xn_matched[i] = TRUE;
break;
}
}
if (do_this_file) {
if (store_info(__G))
++j; /* file is OK */
else
++num_skipped; /* unsupp. compression or encryption */
}
} /* end if (process_all_files) */
members_processed++;
} /* end while-loop (adding files to current block) */
/* save position in central directory so can come back later */
cd_bufstart = G.cur_zipfile_bufstart;
cd_inptr = G.inptr;
cd_incnt = G.incnt;
/*-----------------------------------------------------------------------
Second loop: process files in current block, extracting or testing
each one.
-----------------------------------------------------------------------*/
error = extract_or_test_entrylist(__G__ j,
&filnum, &num_bad_pwd, &old_extra_bytes,
#ifdef SET_DIR_ATTRIB
&num_dirs, &dirlist,
#endif
error_in_archive);
if (error != PK_COOL) {
if (error > error_in_archive)
error_in_archive = error;
/* ...and keep going (unless disk full or user break) */
if (G.disk_full > 1 || error_in_archive == IZ_CTRLC ||
error == PK_BOMB) {
/* clear reached_end to signal premature stop ... */
reached_end = FALSE;
/* ... and cancel scanning the central directory */
break;
}
}
/*
* Jump back to where we were in the central directory, then go and do
* the next batch of files.
*/
#ifdef USE_STRM_INPUT
zfseeko(G.zipfd, cd_bufstart, SEEK_SET);
G.cur_zipfile_bufstart = zftello(G.zipfd);
#else /* !USE_STRM_INPUT */
G.cur_zipfile_bufstart =
zlseek(G.zipfd, cd_bufstart, SEEK_SET);
#endif /* ?USE_STRM_INPUT */
read(G.zipfd, (char *)G.inbuf, INBUFSIZ); /* been here before... */
G.inptr = cd_inptr;
G.incnt = cd_incnt;
++blknum;
#ifdef TEST
printf("\ncd_bufstart = %ld (%.8lXh)\n", cd_bufstart, cd_bufstart);
printf("cur_zipfile_bufstart = %ld (%.8lXh)\n", cur_zipfile_bufstart,
cur_zipfile_bufstart);
printf("inptr-inbuf = %d\n", G.inptr-G.inbuf);
printf("incnt = %d\n\n", G.incnt);
#endif
} /* end while-loop (blocks of files in central directory) */
/*---------------------------------------------------------------------------
Process the list of deferred symlink extractions and finish up
the symbolic links.
---------------------------------------------------------------------------*/
#ifdef SYMLINKS
if (G.slink_last != NULL) {
if (QCOND2)
Info(slide, 0, ((char *)slide, LoadFarString(SymLnkDeferred)));
while (G.slink_head != NULL) {
set_deferred_symlink(__G__ G.slink_head);
/* remove the processed entry from the chain and free its memory */
G.slink_last = G.slink_head;
G.slink_head = G.slink_last->next;
free(G.slink_last);
}
G.slink_last = NULL;
}
#endif /* SYMLINKS */
/*---------------------------------------------------------------------------
Go back through saved list of directories, sort and set times/perms/UIDs
and GIDs from the deepest level on up.
---------------------------------------------------------------------------*/
#ifdef SET_DIR_ATTRIB
if (num_dirs > 0) {
sorted_dirlist = (direntry **)malloc(num_dirs*sizeof(direntry *));
if (sorted_dirlist == (direntry **)NULL) {
Info(slide, 0x401, ((char *)slide,
LoadFarString(DirlistSortNoMem)));
while (dirlist != (direntry *)NULL) {
direntry *d = dirlist;
dirlist = dirlist->next;
free(d);
}
} else {
ulg ndirs_fail = 0;
if (num_dirs == 1)
sorted_dirlist[0] = dirlist;
else {
for (i = 0; i < num_dirs; ++i) {
sorted_dirlist[i] = dirlist;
dirlist = dirlist->next;
}
qsort((char *)sorted_dirlist, num_dirs, sizeof(direntry *),
dircomp);
}
Trace((stderr, "setting directory times/perms/attributes\n"));
for (i = 0; i < num_dirs; ++i) {
direntry *d = sorted_dirlist[i];
Trace((stderr, "dir = %s\n", d->fn));
if ((error = set_direc_attribs(__G__ d)) != PK_OK) {
ndirs_fail++;
Info(slide, 0x201, ((char *)slide,
LoadFarString(DirlistSetAttrFailed), d->fn));
if (!error_in_archive)
error_in_archive = error;
}
free(d);
}
free(sorted_dirlist);
if (!uO.tflag && QCOND2) {
if (ndirs_fail > 0)
Info(slide, 0, ((char *)slide,
LoadFarString(DirlistFailAttrSum), ndirs_fail));
}
}
}
#endif /* SET_DIR_ATTRIB */
/*---------------------------------------------------------------------------
Check for unmatched filespecs on command line and print warning if any
found. Free allocated memory. (But suppress check when central dir
scan was interrupted prematurely.)
---------------------------------------------------------------------------*/
if (fn_matched) {
if (reached_end) for (i = 0; i < G.filespecs; ++i)
if (!fn_matched[i]) {
#ifdef DLL
if (!G.redirect_data && !G.redirect_text)
Info(slide, 0x401, ((char *)slide,
LoadFarString(FilenameNotMatched), G.pfnames[i]));
else
setFileNotFound(__G);
#else
Info(slide, 1, ((char *)slide,
LoadFarString(FilenameNotMatched), G.pfnames[i]));
#endif
if (error_in_archive <= PK_WARN)
error_in_archive = PK_FIND; /* some files not found */
}
free((zvoid *)fn_matched);
}
if (xn_matched) {
if (reached_end) for (i = 0; i < G.xfilespecs; ++i)
if (!xn_matched[i])
Info(slide, 0x401, ((char *)slide,
LoadFarString(ExclFilenameNotMatched), G.pxnames[i]));
free((zvoid *)xn_matched);
}
/*---------------------------------------------------------------------------
Now, all locally allocated memory has been released. When the central
directory processing has been interrupted prematurely, it is safe to
return immediately. All completeness checks and summary messages are
skipped in this case.
---------------------------------------------------------------------------*/
if (!reached_end)
return error_in_archive;
/*---------------------------------------------------------------------------
Double-check that we're back at the end-of-central-directory record, and
print quick summary of results, if we were just testing the archive. We
send the summary to stdout so that people doing the testing in the back-
ground and redirecting to a file can just do a "tail" on the output file.
---------------------------------------------------------------------------*/
#ifndef SFX
if (no_endsig_found) { /* just to make sure */
Info(slide, 0x401, ((char *)slide, LoadFarString(EndSigMsg)));
Info(slide, 0x401, ((char *)slide, LoadFarString(ReportMsg)));
if (!error_in_archive) /* don't overwrite stronger error */
error_in_archive = PK_WARN;
}
#endif /* !SFX */
if (uO.tflag) {
ulg num = filnum - num_bad_pwd;
if (uO.qflag < 2) { /* GRR 930710: was (uO.qflag == 1) */
if (error_in_archive)
Info(slide, 0, ((char *)slide, LoadFarString(ErrorInArchive),
(error_in_archive == PK_WARN)? "warning-" : "", G.zipfn));
else if (num == 0L)
Info(slide, 0, ((char *)slide, LoadFarString(ZeroFilesTested),
G.zipfn));
else if (G.process_all_files && (num_skipped+num_bad_pwd == 0L))
Info(slide, 0, ((char *)slide, LoadFarString(NoErrInCompData),
G.zipfn));
else
Info(slide, 0, ((char *)slide, LoadFarString(NoErrInTestedFiles)
, G.zipfn, num, (num==1L)? "":"s"));
if (num_skipped > 0L)
Info(slide, 0, ((char *)slide, LoadFarString(FilesSkipped),
num_skipped, (num_skipped==1L)? "":"s"));
#if CRYPT
if (num_bad_pwd > 0L)
Info(slide, 0, ((char *)slide, LoadFarString(FilesSkipBadPasswd)
, num_bad_pwd, (num_bad_pwd==1L)? "":"s"));
#endif /* CRYPT */
}
}
/* give warning if files not tested or extracted (first condition can still
* happen if zipfile is empty and no files specified on command line) */
if ((filnum == 0) && error_in_archive <= PK_WARN) {
if (num_skipped > 0L)
error_in_archive = IZ_UNSUP; /* unsupport. compression/encryption */
else
error_in_archive = PK_FIND; /* no files found at all */
}
#if CRYPT
else if ((filnum == num_bad_pwd) && error_in_archive <= PK_WARN)
error_in_archive = IZ_BADPWD; /* bad passwd => all files skipped */
#endif
else if ((num_skipped > 0L) && error_in_archive <= PK_WARN)
error_in_archive = IZ_UNSUP; /* was PK_WARN; Jean-loup complained */
#if CRYPT
else if ((num_bad_pwd > 0L) && !error_in_archive)
error_in_archive = PK_WARN;
#endif
return error_in_archive;
} /* end function extract_or_test_files() */ | 0 | [
"CWE-400"
] | unzip | 6d351831be705cc26d897db44f878a978f4138fc | 137,888,859,200,102,200,000,000,000,000,000,000,000 | 517 | Do not raise a zip bomb alert for a misplaced central directory.
There is a zip-like file in the Firefox distribution, omni.ja,
which is a zip container with the central directory placed at the
start of the file instead of after the local entries as required
by the zip standard. This commit marks the actual location of the
central directory, as well as the end of central directory records,
as disallowed locations. This now permits such containers to not
raise a zip bomb alert, where in fact there are no overlaps. |
int dtls1_heartbeat(SSL *s)
{
unsigned char *buf, *p;
int ret = -1;
unsigned int payload = 18; /* Sequence number + random bytes */
unsigned int padding = 16; /* Use minimum padding */
/* Only send if peer supports and accepts HB requests... */
if (!(s->tlsext_heartbeat & SSL_TLSEXT_HB_ENABLED) ||
s->tlsext_heartbeat & SSL_TLSEXT_HB_DONT_SEND_REQUESTS) {
SSLerr(SSL_F_DTLS1_HEARTBEAT, SSL_R_TLS_HEARTBEAT_PEER_DOESNT_ACCEPT);
return -1;
}
/* ...and there is none in flight yet... */
if (s->tlsext_hb_pending) {
SSLerr(SSL_F_DTLS1_HEARTBEAT, SSL_R_TLS_HEARTBEAT_PENDING);
return -1;
}
/* ...and no handshake in progress. */
if (SSL_in_init(s) || s->in_handshake) {
SSLerr(SSL_F_DTLS1_HEARTBEAT, SSL_R_UNEXPECTED_MESSAGE);
return -1;
}
/*
* Check if padding is too long, payload and padding must not exceed 2^14
* - 3 = 16381 bytes in total.
*/
OPENSSL_assert(payload + padding <= 16381);
/*-
* Create HeartBeat message, we just use a sequence number
* as payload to distuingish different messages and add
* some random stuff.
* - Message Type, 1 byte
* - Payload Length, 2 bytes (unsigned int)
* - Payload, the sequence number (2 bytes uint)
* - Payload, random bytes (16 bytes uint)
* - Padding
*/
buf = OPENSSL_malloc(1 + 2 + payload + padding);
p = buf;
/* Message Type */
*p++ = TLS1_HB_REQUEST;
/* Payload length (18 bytes here) */
s2n(payload, p);
/* Sequence number */
s2n(s->tlsext_hb_seq, p);
/* 16 random bytes */
if (RAND_bytes(p, 16) <= 0)
goto err;
p += 16;
/* Random padding */
if (RAND_bytes(p, padding) <= 0)
goto err;
ret = dtls1_write_bytes(s, TLS1_RT_HEARTBEAT, buf, 3 + payload + padding);
if (ret >= 0) {
if (s->msg_callback)
s->msg_callback(1, s->version, TLS1_RT_HEARTBEAT,
buf, 3 + payload + padding,
s, s->msg_callback_arg);
dtls1_start_timer(s);
s->tlsext_hb_pending = 1;
}
err:
OPENSSL_free(buf);
return ret;
} | 0 | [
"CWE-399"
] | openssl | 00a4c1421407b6ac796688871b0a49a179c694d9 | 320,999,506,375,014,450,000,000,000,000,000,000,000 | 74 | Fix DTLS buffered message DoS attack
DTLS can handle out of order record delivery. Additionally since
handshake messages can be bigger than will fit into a single packet, the
messages can be fragmented across multiple records (as with normal TLS).
That means that the messages can arrive mixed up, and we have to
reassemble them. We keep a queue of buffered messages that are "from the
future", i.e. messages we're not ready to deal with yet but have arrived
early. The messages held there may not be full yet - they could be one
or more fragments that are still in the process of being reassembled.
The code assumes that we will eventually complete the reassembly and
when that occurs the complete message is removed from the queue at the
point that we need to use it.
However, DTLS is also tolerant of packet loss. To get around that DTLS
messages can be retransmitted. If we receive a full (non-fragmented)
message from the peer after previously having received a fragment of
that message, then we ignore the message in the queue and just use the
non-fragmented version. At that point the queued message will never get
removed.
Additionally the peer could send "future" messages that we never get to
in order to complete the handshake. Each message has a sequence number
(starting from 0). We will accept a message fragment for the current
message sequence number, or for any sequence up to 10 into the future.
However if the Finished message has a sequence number of 2, anything
greater than that in the queue is just left there.
So, in those two ways we can end up with "orphaned" data in the queue
that will never get removed - except when the connection is closed. At
that point all the queues are flushed.
An attacker could seek to exploit this by filling up the queues with
lots of large messages that are never going to be used in order to
attempt a DoS by memory exhaustion.
I will assume that we are only concerned with servers here. It does not
seem reasonable to be concerned about a memory exhaustion attack on a
client. They are unlikely to process enough connections for this to be
an issue.
A "long" handshake with many messages might be 5 messages long (in the
incoming direction), e.g. ClientHello, Certificate, ClientKeyExchange,
CertificateVerify, Finished. So this would be message sequence numbers 0
to 4. Additionally we can buffer up to 10 messages in the future.
Therefore the maximum number of messages that an attacker could send
that could get orphaned would typically be 15.
The maximum size that a DTLS message is allowed to be is defined by
max_cert_list, which by default is 100k. Therefore the maximum amount of
"orphaned" memory per connection is 1500k.
Message sequence numbers get reset after the Finished message, so
renegotiation will not extend the maximum number of messages that can be
orphaned per connection.
As noted above, the queues do get cleared when the connection is closed.
Therefore in order to mount an effective attack, an attacker would have
to open many simultaneous connections.
Issue reported by Quan Luo.
CVE-2016-2179
Reviewed-by: Richard Levitte <[email protected]> |
BN_ULONG bn_add_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n)
{ BN_ULONG ret;
size_t i=0;
if (n <= 0) return 0;
asm volatile (
" subq %0,%0 \n" /* clear carry */
" jmp 1f \n"
".p2align 4 \n"
"1: movq (%4,%2,8),%0 \n"
" adcq (%5,%2,8),%0 \n"
" movq %0,(%3,%2,8) \n"
" lea 1(%2),%2 \n"
" loop 1b \n"
" sbbq %0,%0 \n"
: "=&r"(ret),"+c"(n),"+r"(i)
: "r"(rp),"r"(ap),"r"(bp)
: "cc", "memory"
);
return ret&1;
} | 0 | [
"CWE-310"
] | openssl | a7a44ba55cb4f884c6bc9ceac90072dea38e66d0 | 213,338,567,365,672,900,000,000,000,000,000,000,000 | 23 | Fix for CVE-2014-3570 (with minor bn_asm.c revamp).
Reviewed-by: Emilia Kasper <[email protected]> |
*/
static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
{
struct sk_buff *skb = napi->skb;
const struct ethhdr *eth;
unsigned int hlen = sizeof(*eth);
napi->skb = NULL;
skb_reset_mac_header(skb);
skb_gro_reset_offset(skb);
if (unlikely(skb_gro_header_hard(skb, hlen))) {
eth = skb_gro_header_slow(skb, hlen, 0);
if (unlikely(!eth)) {
net_warn_ratelimited("%s: dropping impossible skb from %s\n",
__func__, napi->dev->name);
napi_reuse_skb(napi, skb);
return NULL;
}
} else {
eth = (const struct ethhdr *)skb->data;
gro_pull_from_frag0(skb, hlen);
NAPI_GRO_CB(skb)->frag0 += hlen;
NAPI_GRO_CB(skb)->frag0_len -= hlen;
}
__skb_pull(skb, hlen);
/*
* This works because the only protocols we care about don't require
* special handling.
* We'll fix it up properly in napi_frags_finish()
*/
skb->protocol = eth->h_proto;
return skb; | 0 | [
"CWE-416"
] | linux | a4270d6795b0580287453ea55974d948393e66ef | 255,626,330,209,853,740,000,000,000,000,000,000,000 | 36 | net-gro: fix use-after-free read in napi_gro_frags()
If a network driver provides to napi_gro_frags() an
skb with a page fragment of exactly 14 bytes, the call
to gro_pull_from_frag0() will 'consume' the fragment
by calling skb_frag_unref(skb, 0), and the page might
be freed and reused.
Reading eth->h_proto at the end of napi_frags_skb() might
read mangled data, or crash under specific debugging features.
BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline]
BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
Read of size 2 at addr ffff88809366840c by task syz-executor599/8957
CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188
__kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
kasan_report+0x12/0x20 mm/kasan/common.c:614
__asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142
napi_frags_skb net/core/dev.c:5833 [inline]
napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037
call_write_iter include/linux/fs.h:1872 [inline]
do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693
do_iter_write fs/read_write.c:970 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:951
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015
do_writev+0x15b/0x330 fs/read_write.c:1058
Fixes: a50e233c50db ("net-gro: restore frag0 optimization")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void change_console(struct vc_data *new_vc)
{
struct vc_data *vc;
if (!new_vc || new_vc->vc_num == fg_console || vt_dont_switch)
return;
/*
* If this vt is in process mode, then we need to handshake with
* that process before switching. Essentially, we store where that
* vt wants to switch to and wait for it to tell us when it's done
* (via VT_RELDISP ioctl).
*
* We also check to see if the controlling process still exists.
* If it doesn't, we reset this vt to auto mode and continue.
* This is a cheap way to track process control. The worst thing
* that can happen is: we send a signal to a process, it dies, and
* the switch gets "lost" waiting for a response; hopefully, the
* user will try again, we'll detect the process is gone (unless
* the user waits just the right amount of time :-) and revert the
* vt to auto control.
*/
vc = vc_cons[fg_console].d;
if (vc->vt_mode.mode == VT_PROCESS) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
* is awry.
*
* We need to set vt_newvt *before* sending the signal or we
* have a race.
*/
vc->vt_newvt = new_vc->vc_num;
if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
/*
* It worked. Mark the vt to switch to and
* return. The process needs to send us a
* VT_RELDISP ioctl to complete the switch.
*/
return;
}
/*
* The controlling process has died, so we revert back to
* normal operation. In this case, we'll also change back
* to KD_TEXT mode. I'm not sure if this is strictly correct
* but it saves the agony when the X server dies and the screen
* remains blanked due to KD_GRAPHICS! It would be nice to do
* this outside of VT_PROCESS but there is no single process
* to account for and tracking tty count may be undesirable.
*/
reset_vc(vc);
/*
* Fall through to normal (VT_AUTO) handling of the switch...
*/
}
/*
* Ignore all switches in KD_GRAPHICS+VT_AUTO mode
*/
if (vc->vc_mode == KD_GRAPHICS)
return;
complete_change_console(new_vc);
} | 0 | [
"CWE-662"
] | linux | 90bfdeef83f1d6c696039b6a917190dcbbad3220 | 90,807,549,859,261,940,000,000,000,000,000,000,000 | 66 | tty: make FONTX ioctl use the tty pointer they were actually passed
Some of the font tty ioctl's always used the current foreground VC for
their operations. Don't do that then.
This fixes a data race on fg_console.
Side note: both Michael Ellerman and Jiri Slaby point out that all these
ioctls are deprecated, and should probably have been removed long ago,
and everything seems to be using the KDFONTOP ioctl instead.
In fact, Michael points out that it looks like busybox's loadfont
program seems to have switched over to using KDFONTOP exactly _because_
of this bug (ahem.. 12 years ago ;-).
Reported-by: Minh Yuan <[email protected]>
Acked-by: Michael Ellerman <[email protected]>
Acked-by: Jiri Slaby <[email protected]>
Cc: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
AnyP::Uri::hostOrIp() const
{
static char ip[MAX_IPSTRLEN];
if (hostIsNumeric())
return SBuf(hostIP().toStr(ip, sizeof(ip)));
else
return SBuf(host());
} | 0 | [
"CWE-20"
] | squid | dfd818595b54942cb1adc45f6aed95c9b706e3a8 | 251,819,291,525,922,550,000,000,000,000,000,000,000 | 8 | Merge pull request from GHSA-jvf6-h9gj-pmj6
* Add slash prefix to path-rootless or path-noscheme URLs
* Update src/anyp/Uri.cc
Co-authored-by: Alex Rousskov <[email protected]>
* restore file trailer GH auto-removes
* Remove redundant path-empty check
* Removed stale comment left behind by b2ab59a
Many things imply a leading `/` in a URI. Their enumeration is likely to
(and did) become stale, misleading the reader.
* fixup: Remind that the `src` iterator may be at its end
We are dereferencing `src` without comparing it to `\0`.
To many readers that (incorrectly) implies that we are not done iterating yet.
Also fixed branch-added comment indentation.
Co-authored-by: Alex Rousskov <[email protected]> |
sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_ecnehdr_t *ecne;
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
ecne = (sctp_ecnehdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t));
/* If this is a newer ECNE than the last CWR packet we sent out */
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE,
SCTP_U32(ntohl(ecne->lowest_tsn)));
return SCTP_DISPOSITION_CONSUME;
} | 0 | [
"CWE-20"
] | linux-2.6 | ba0166708ef4da7eeb61dd92bbba4d5a749d6561 | 251,868,108,725,517,370,000,000,000,000,000,000,000 | 25 | sctp: Fix kernel panic while process protocol violation parameter
Since call to function sctp_sf_abort_violation() need paramter 'arg' with
'struct sctp_chunk' type, it will read the chunk type and chunk length from
the chunk_hdr member of chunk. But call to sctp_sf_violation_paramlen()
always with 'struct sctp_paramhdr' type's parameter, it will be passed to
sctp_sf_abort_violation(). This may cause kernel panic.
sctp_sf_violation_paramlen()
|-- sctp_sf_abort_violation()
|-- sctp_make_abort_violation()
This patch fixed this problem. This patch also fix two place which called
sctp_sf_violation_paramlen() with wrong paramter type.
Signed-off-by: Wei Yongjun <[email protected]>
Signed-off-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
struct bpf_reg_state *dst_reg,
struct bpf_reg_state src_reg)
{
struct bpf_reg_state *regs = cur_regs(env);
u8 opcode = BPF_OP(insn->code);
bool src_known, dst_known;
s64 smin_val, smax_val;
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
umax_val = src_reg.umax_value;
src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off);
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
return 0;
}
switch (opcode) {
case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
}
if (dst_reg->umin_value + umin_val < umin_val ||
dst_reg->umax_value + umax_val < umax_val) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value += umin_val;
dst_reg->umax_value += umax_val;
}
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break;
case BPF_SUB:
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value -= smax_val;
dst_reg->smax_value -= smin_val;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
/* Cannot overflow (as long as bounds are consistent) */
dst_reg->umin_value -= umax_val;
dst_reg->umax_value -= umin_val;
}
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
break;
case BPF_MUL:
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
if (smin_val < 0 || dst_reg->smin_value < 0) {
/* Ain't nobody got time to multiply that sign */
__mark_reg_unbounded(dst_reg);
__update_reg_bounds(dst_reg);
break;
}
/* Both values are positive, so we can work with unsigned and
* copy the result to signed (unless it exceeds S64_MAX).
*/
if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
/* Potential overflow, we know nothing */
__mark_reg_unbounded(dst_reg);
/* (except what we can learn from the var_off) */
__update_reg_bounds(dst_reg);
break;
}
dst_reg->umin_value *= umin_val;
dst_reg->umax_value *= umax_val;
if (dst_reg->umax_value > S64_MAX) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
break;
case BPF_AND:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value &
src_reg.var_off.value);
break;
}
/* We get our minimum from the var_off, since that's inherently
* bitwise. Our maximum is the minimum of the operands' maxima.
*/
dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = dst_reg->var_off.value;
dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ANDing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ANDing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_OR:
if (src_known && dst_known) {
__mark_reg_known(dst_reg, dst_reg->var_off.value |
src_reg.var_off.value);
break;
}
/* We get our maximum from the var_off, and our minimum is the
* maximum of the operands' minima
*/
dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
dst_reg->umax_value = dst_reg->var_off.value |
dst_reg->var_off.mask;
if (dst_reg->smin_value < 0 || smin_val < 0) {
/* Lose signed bounds when ORing negative numbers,
* ain't nobody got time for that.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
/* ORing two positives gives a positive, so safe to
* cast result into s64.
*/
dst_reg->smin_value = dst_reg->umin_value;
dst_reg->smax_value = dst_reg->umax_value;
}
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_LSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* We lose all sign bit information (except what we can pick
* up from var_off)
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
/* If we might shift our top bit out, then we know nothing */
if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value <<= umin_val;
dst_reg->umax_value <<= umax_val;
}
if (src_known)
dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
else
dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
case BPF_RSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
* This includes shifts by a negative number.
*/
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
/* BPF_RSH is an unsigned shift. If the value in dst_reg might
* be negative, then either:
* 1) src_reg might be zero, so the sign bit of the result is
* unknown, so we lose our signed bounds
* 2) it's known negative, thus the unsigned bounds capture the
* signed bounds
* 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
* unsigned bounts capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val);
else
dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
dst_reg->umin_value >>= umax_val;
dst_reg->umax_value >>= umin_val;
/* We may learn something more from the var_off */
__update_reg_bounds(dst_reg);
break;
default:
mark_reg_unknown(env, regs, insn->dst_reg);
break;
}
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
coerce_reg_to_size(&src_reg, 4);
}
__reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg);
return 0;
} | 0 | [
"CWE-190"
] | linux | bb7f0f989ca7de1153bd128a40a71709e339fa03 | 130,962,292,423,363,260,000,000,000,000,000,000,000 | 229 | bpf: fix integer overflows
There were various issues related to the limited size of integers used in
the verifier:
- `off + size` overflow in __check_map_access()
- `off + reg->off` overflow in check_mem_access()
- `off + reg->var_off.value` overflow or 32-bit truncation of
`reg->var_off.value` in check_mem_access()
- 32-bit truncation in check_stack_boundary()
Make sure that any integer math cannot overflow by not allowing
pointer math with large values.
Also reduce the scope of "scalar op scalar" tracking.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]> |
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
{
unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
} | 0 | [] | linux | 7bced397510ab569d31de4c70b39e13355046387 | 2,641,315,179,206,444,000,000,000,000,000,000,000 | 5 | net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]> |
create_proxy_socket (char *template)
{
g_autofree char *proxy_socket = g_build_filename (g_get_user_runtime_dir (), template, NULL);
int fd;
fd = g_mkstemp (proxy_socket);
if (fd == -1)
return NULL;
close (fd);
return g_steal_pointer (&proxy_socket);
} | 0 | [
"CWE-20"
] | flatpak | 902fb713990a8f968ea4350c7c2a27ff46f1a6c4 | 172,225,728,561,482,640,000,000,000,000,000,000,000 | 13 | Use seccomp to filter out TIOCSTI ioctl
This would otherwise let the sandbox add input to the controlling tty. |
ossl_asn1_default_tag(VALUE obj)
{
VALUE tmp_class, tag;
tmp_class = CLASS_OF(obj);
while (!NIL_P(tmp_class)) {
tag = rb_hash_lookup(class_tag_map, tmp_class);
if (tag != Qnil)
return NUM2INT(tag);
tmp_class = rb_class_superclass(tmp_class);
}
ossl_raise(eASN1Error, "universal tag for %"PRIsVALUE" not found",
rb_obj_class(obj));
} | 0 | [
"CWE-119"
] | openssl | 1648afef33c1d97fb203c82291b8a61269e85d3b | 69,940,503,738,903,060,000,000,000,000,000,000,000 | 14 | asn1: fix out-of-bounds read in decoding constructed objects
OpenSSL::ASN1.{decode,decode_all,traverse} have a bug of out-of-bounds
read. int_ossl_asn1_decode0_cons() does not give the correct available
length to ossl_asn1_decode() when decoding the inner components of a
constructed object. This can cause out-of-bounds read if a crafted input
given.
Reference: https://hackerone.com/reports/170316 |
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
return pte;
} | 0 | [] | linux | ee53664bda169f519ce3c6a22d378f0b946c8178 | 303,277,900,105,299,700,000,000,000,000,000,000,000 | 4 | mm: Fix NULL pointer dereference in madvise(MADV_WILLNEED) support
Sasha Levin found a NULL pointer dereference that is due to a missing
page table lock, which in turn is due to the pmd entry in question being
a transparent huge-table entry.
The code - introduced in commit 1998cc048901 ("mm: make
madvise(MADV_WILLNEED) support swap file prefetch") - correctly checks
for this situation using pmd_none_or_trans_huge_or_clear_bad(), but it
turns out that that function doesn't work correctly.
pmd_none_or_trans_huge_or_clear_bad() expected that pmd_bad() would
trigger if the transparent hugepage bit was set, but it doesn't do that
if pmd_numa() is also set. Note that the NUMA bit only gets set on real
NUMA machines, so people trying to reproduce this on most normal
development systems would never actually trigger this.
Fix it by removing the very subtle (and subtly incorrect) expectation,
and instead just checking pmd_trans_huge() explicitly.
Reported-by: Sasha Levin <[email protected]>
Acked-by: Andrea Arcangeli <[email protected]>
[ Additionally remove the now stale test for pmd_trans_huge() inside the
pmd_bad() case - Linus ]
Signed-off-by: Linus Torvalds <[email protected]> |
void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *buf,
size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0);
} | 0 | [
"CWE-120"
] | linux | f88eb7c0d002a67ef31aeb7850b42ff69abc46dc | 157,027,169,378,828,770,000,000,000,000,000,000,000 | 7 | nl80211: validate beacon head
We currently don't validate the beacon head, i.e. the header,
fixed part and elements that are to go in front of the TIM
element. This means that the variable elements there can be
malformed, e.g. have a length exceeding the buffer size, but
most downstream code from this assumes that this has already
been checked.
Add the necessary checks to the netlink policy.
Cc: [email protected]
Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings")
Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid
Signed-off-by: Johannes Berg <[email protected]> |
explicit NaturalCompare(const bool caseSensitive = true)
: m_caseSensitive(caseSensitive)
{
#ifdef QBT_USES_QT5
#if defined(Q_OS_WIN)
// Without ICU library, QCollator uses the native API on Windows 7+. But that API
// sorts older versions of μTorrent differently than the newer ones because the
// 'μ' character is encoded differently and the native API can't cope with that.
// So default to using our custom natural sorting algorithm instead.
// See #5238 and #5240
// Without ICU library, QCollator doesn't support `setNumericMode(true)` on OS older than Win7
// if (QSysInfo::windowsVersion() < QSysInfo::WV_WINDOWS7)
return;
#endif
m_collator.setNumericMode(true);
m_collator.setCaseSensitivity(caseSensitive ? Qt::CaseSensitive : Qt::CaseInsensitive);
#endif
} | 0 | [
"CWE-20",
"CWE-79"
] | qBittorrent | 6ca3e4f094da0a0017cb2d483ec1db6176bb0b16 | 254,125,687,261,395,550,000,000,000,000,000,000,000 | 18 | Add Utils::String::toHtmlEscaped |
static struct audit_watch *audit_init_watch(char *path)
{
struct audit_watch *watch;
watch = kzalloc(sizeof(*watch), GFP_KERNEL);
if (unlikely(!watch))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&watch->rules);
atomic_set(&watch->count, 1);
watch->path = path;
watch->dev = (dev_t)-1;
watch->ino = (unsigned long)-1;
return watch;
} | 0 | [
"CWE-362"
] | linux-2.6 | 8f7b0ba1c853919b85b54774775f567f30006107 | 313,867,961,973,011,480,000,000,000,000,000,000,000 | 16 | Fix inotify watch removal/umount races
Inotify watch removals suck violently.
To kick the watch out we need (in this order) inode->inotify_mutex and
ih->mutex. That's fine if we have a hold on inode; however, for all
other cases we need to make damn sure we don't race with umount. We can
*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
happily sail past it and we'll end with reference to inode potentially
outliving its superblock.
Ideally we just want to grab an active reference to superblock if we
can; that will make sure we won't go into inotify_umount_inodes() until
we are done. Cleanup is just deactivate_super().
However, that leaves a messy case - what if we *are* racing with
umount() and active references to superblock can't be acquired anymore?
We can bump ->s_count, grab ->s_umount, which will almost certainly wait
until the superblock is shut down and the watch in question is pining
for fjords. That's fine, but there is a problem - we might have hit the
window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
the moment when superblock is past the point of no return and is heading
for shutdown) and the moment when deactivate_super() acquires
->s_umount.
We could just do drop_super() yield() and retry, but that's rather
antisocial and this stuff is luser-triggerable. OTOH, having grabbed
->s_umount and having found that we'd got there first (i.e. that
->s_root is non-NULL) we know that we won't race with
inotify_umount_inodes().
So we could grab a reference to watch and do the rest as above, just
with drop_super() instead of deactivate_super(), right? Wrong. We had
to drop ih->mutex before we could grab ->s_umount. So the watch
could've been gone already.
That still can be dealt with - we need to save watch->wd, do idr_find()
and compare its result with our pointer. If they match, we either have
the damn thing still alive or we'd lost not one but two races at once,
the watch had been killed and a new one got created with the same ->wd
at the same address. That couldn't have happened in inotify_destroy(),
but inotify_rm_wd() could run into that. Still, "new one got created"
is not a problem - we have every right to kill it or leave it alone,
whatever's more convenient.
So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
"grab it and kill it" check. If it's been our original watch, we are
fine, if it's a newcomer - nevermind, just pretend that we'd won the
race and kill the fscker anyway; we are safe since we know that its
superblock won't be going away.
And yes, this is far beyond mere "not very pretty"; so's the entire
concept of inotify to start with.
Signed-off-by: Al Viro <[email protected]>
Acked-by: Greg KH <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void register_subpage(struct uc_struct *uc, FlatView *fv, MemoryRegionSection *section)
{
AddressSpaceDispatch *d = flatview_to_dispatch(fv);
subpage_t *subpage;
hwaddr base = section->offset_within_address_space
& TARGET_PAGE_MASK;
MemoryRegionSection *existing = phys_page_find(d, base);
MemoryRegionSection subsection = {
.offset_within_address_space = base,
.size = int128_make64(TARGET_PAGE_SIZE),
};
hwaddr start, end;
assert(existing->mr->subpage || existing->mr == &(section->mr->uc->io_mem_unassigned));
if (!(existing->mr->subpage)) {
subpage = subpage_init(uc, fv, base);
subsection.fv = fv;
subsection.mr = &subpage->iomem;
phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
phys_section_add(uc, &d->map, &subsection));
} else {
subpage = container_of(existing->mr, subpage_t, iomem);
}
start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
end = start + int128_get64(section->size) - 1;
subpage_register(uc, subpage, start, end,
phys_section_add(uc, &d->map, section));
} | 0 | [
"CWE-476"
] | unicorn | 3d3deac5e6d38602b689c4fef5dac004f07a2e63 | 77,365,933,942,794,780,000,000,000,000,000,000,000 | 29 | Fix crash when mapping a big memory and calling uc_close |
static inline struct sock *icmpv6_sk(struct net *net)
{
return net->ipv6.icmp_sk[smp_processor_id()];
} | 0 | [
"CWE-20",
"CWE-200"
] | linux | 79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2 | 243,305,282,377,506,000,000,000,000,000,000,000,000 | 4 | net: handle no dst on skb in icmp6_send
Andrey reported the following while fuzzing the kernel with syzkaller:
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 0 PID: 3859 Comm: a.out Not tainted 4.9.0-rc6+ #429
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff8800666d4200 task.stack: ffff880067348000
RIP: 0010:[<ffffffff833617ec>] [<ffffffff833617ec>]
icmp6_send+0x5fc/0x1e30 net/ipv6/icmp.c:451
RSP: 0018:ffff88006734f2c0 EFLAGS: 00010206
RAX: ffff8800666d4200 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000018
RBP: ffff88006734f630 R08: ffff880064138418 R09: 0000000000000003
R10: dffffc0000000000 R11: 0000000000000005 R12: 0000000000000000
R13: ffffffff84e7e200 R14: ffff880064138484 R15: ffff8800641383c0
FS: 00007fb3887a07c0(0000) GS:ffff88006cc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020000000 CR3: 000000006b040000 CR4: 00000000000006f0
Stack:
ffff8800666d4200 ffff8800666d49f8 ffff8800666d4200 ffffffff84c02460
ffff8800666d4a1a 1ffff1000ccdaa2f ffff88006734f498 0000000000000046
ffff88006734f440 ffffffff832f4269 ffff880064ba7456 0000000000000000
Call Trace:
[<ffffffff83364ddc>] icmpv6_param_prob+0x2c/0x40 net/ipv6/icmp.c:557
[< inline >] ip6_tlvopt_unknown net/ipv6/exthdrs.c:88
[<ffffffff83394405>] ip6_parse_tlv+0x555/0x670 net/ipv6/exthdrs.c:157
[<ffffffff8339a759>] ipv6_parse_hopopts+0x199/0x460 net/ipv6/exthdrs.c:663
[<ffffffff832ee773>] ipv6_rcv+0xfa3/0x1dc0 net/ipv6/ip6_input.c:191
...
icmp6_send / icmpv6_send is invoked for both rx and tx paths. In both
cases the dst->dev should be preferred for determining the L3 domain
if the dst has been set on the skb. Fallback to the skb->dev if it has
not. This covers the case reported here where icmp6_send is invoked on
Rx before the route lookup.
Fixes: 5d41ce29e ("net: icmp6_send should use dst dev to determine L3 domain")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David Ahern <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static data_type op(data_type el1, data_type el2) {
return el1 < el2 ? el1 : el2;
} | 0 | [
"CWE-125"
] | tensorflow | 953f28dca13c92839ba389c055587cfe6c723578 | 42,306,755,844,433,670,000,000,000,000,000,000,000 | 3 | Prevent a null pointer exception in TFLite
PiperOrigin-RevId: 370800206
Change-Id: Idd437ebce4ff224120d8eefc1c14c062173b71d6 |
void rtnl_af_register(struct rtnl_af_ops *ops)
{
rtnl_lock();
list_add_tail_rcu(&ops->list, &rtnl_af_ops);
rtnl_unlock();
} | 0 | [
"CWE-476"
] | linux | f428fe4a04cc339166c8bbd489789760de3a0cee | 28,213,358,325,478,250,000,000,000,000,000,000,000 | 6 | rtnetlink: give a user socket to get_target_net()
This function is used from two places: rtnl_dump_ifinfo and
rtnl_getlink. In rtnl_getlink(), we give a request skb into
get_target_net(), but in rtnl_dump_ifinfo, we give a response skb
into get_target_net().
The problem here is that NETLINK_CB() isn't initialized for the response
skb. In both cases we can get a user socket and give it instead of skb
into get_target_net().
This bug was found by syzkaller with this call-trace:
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 1 PID: 3149 Comm: syzkaller140561 Not tainted 4.15.0-rc4-mm1+ #47
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS
Google 01/01/2011
RIP: 0010:__netlink_ns_capable+0x8b/0x120 net/netlink/af_netlink.c:868
RSP: 0018:ffff8801c880f348 EFLAGS: 00010206
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: ffffffff8443f900
RDX: 000000000000007b RSI: ffffffff86510f40 RDI: 00000000000003d8
RBP: ffff8801c880f360 R08: 0000000000000000 R09: 1ffff10039101e4f
R10: 0000000000000000 R11: 0000000000000001 R12: ffffffff86510f40
R13: 000000000000000c R14: 0000000000000004 R15: 0000000000000011
FS: 0000000001a1a880(0000) GS:ffff8801db300000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020151000 CR3: 00000001c9511005 CR4: 00000000001606e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
Call Trace:
netlink_ns_capable+0x26/0x30 net/netlink/af_netlink.c:886
get_target_net+0x9d/0x120 net/core/rtnetlink.c:1765
rtnl_dump_ifinfo+0x2e5/0xee0 net/core/rtnetlink.c:1806
netlink_dump+0x48c/0xce0 net/netlink/af_netlink.c:2222
__netlink_dump_start+0x4f0/0x6d0 net/netlink/af_netlink.c:2319
netlink_dump_start include/linux/netlink.h:214 [inline]
rtnetlink_rcv_msg+0x7f0/0xb10 net/core/rtnetlink.c:4485
netlink_rcv_skb+0x21e/0x460 net/netlink/af_netlink.c:2441
rtnetlink_rcv+0x1c/0x20 net/core/rtnetlink.c:4540
netlink_unicast_kernel net/netlink/af_netlink.c:1308 [inline]
netlink_unicast+0x4be/0x6a0 net/netlink/af_netlink.c:1334
netlink_sendmsg+0xa4a/0xe60 net/netlink/af_netlink.c:1897
Cc: Jiri Benc <[email protected]>
Fixes: 79e1ad148c84 ("rtnetlink: use netnsid to query interface")
Signed-off-by: Andrei Vagin <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
TEST(IndexBoundsBuilderTest, TranslateGtPositiveInfinity) {
auto testIndex = buildSimpleIndexEntry();
BSONObj obj = fromjson("{a: {$gt: Infinity}}");
auto expr = parseMatchExpression(obj);
BSONElement elt = obj.firstElement();
OrderedIntervalList oil;
IndexBoundsBuilder::BoundsTightness tightness;
IndexBoundsBuilder::translate(expr.get(), elt, testIndex, &oil, &tightness);
ASSERT_EQUALS(oil.name, "a");
ASSERT_EQUALS(oil.intervals.size(), 0U);
ASSERT_EQUALS(tightness, IndexBoundsBuilder::EXACT);
} | 0 | [
"CWE-754"
] | mongo | f8f55e1825ee5c7bdb3208fc7c5b54321d172732 | 274,100,535,942,119,800,000,000,000,000,000,000,000 | 12 | SERVER-44377 generate correct plan for indexed inequalities to null |
void InitFromNumeric(const ::Numeric::Numeric& numeric)
{
InitCommand(numeric.GetNumeric());
for (std::vector<std::string>::const_iterator i = numeric.GetParams().begin(); i != numeric.GetParams().end(); ++i)
PushParamRef(*i);
} | 0 | [
"CWE-200",
"CWE-732"
] | inspircd | 4350a11c663b0d75f8119743bffb7736d87abd4d | 252,917,300,415,494,480,000,000,000,000,000,000,000 | 6 | Fix sending malformed pong messages in some cases. |
SSL_CTX *SSL_CTX_new(const SSL_METHOD *meth)
{
SSL_CTX *ret=NULL;
if (meth == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_NULL_SSL_METHOD_PASSED);
return(NULL);
}
if (SSL_get_ex_data_X509_STORE_CTX_idx() < 0)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_X509_VERIFICATION_SETUP_PROBLEMS);
goto err;
}
ret=(SSL_CTX *)OPENSSL_malloc(sizeof(SSL_CTX));
if (ret == NULL)
goto err;
memset(ret,0,sizeof(SSL_CTX));
ret->method=meth;
ret->cert_store=NULL;
ret->session_cache_mode=SSL_SESS_CACHE_SERVER;
ret->session_cache_size=SSL_SESSION_CACHE_MAX_SIZE_DEFAULT;
ret->session_cache_head=NULL;
ret->session_cache_tail=NULL;
/* We take the system default */
ret->session_timeout=meth->get_timeout();
ret->new_session_cb=0;
ret->remove_session_cb=0;
ret->get_session_cb=0;
ret->generate_session_id=0;
memset((char *)&ret->stats,0,sizeof(ret->stats));
ret->references=1;
ret->quiet_shutdown=0;
/* ret->cipher=NULL;*/
/* ret->s2->challenge=NULL;
ret->master_key=NULL;
ret->key_arg=NULL;
ret->s2->conn_id=NULL; */
ret->info_callback=NULL;
ret->app_verify_callback=0;
ret->app_verify_arg=NULL;
ret->max_cert_list=SSL_MAX_CERT_LIST_DEFAULT;
ret->read_ahead=0;
ret->msg_callback=0;
ret->msg_callback_arg=NULL;
ret->verify_mode=SSL_VERIFY_NONE;
#if 0
ret->verify_depth=-1; /* Don't impose a limit (but x509_lu.c does) */
#endif
ret->sid_ctx_length=0;
ret->default_verify_callback=NULL;
if ((ret->cert=ssl_cert_new()) == NULL)
goto err;
ret->default_passwd_callback=0;
ret->default_passwd_callback_userdata=NULL;
ret->client_cert_cb=0;
ret->app_gen_cookie_cb=0;
ret->app_verify_cookie_cb=0;
ret->sessions=lh_SSL_SESSION_new();
if (ret->sessions == NULL) goto err;
ret->cert_store=X509_STORE_new();
if (ret->cert_store == NULL) goto err;
ssl_create_cipher_list(ret->method,
&ret->cipher_list,&ret->cipher_list_by_id,
meth->version == SSL2_VERSION ? "SSLv2" : SSL_DEFAULT_CIPHER_LIST);
if (ret->cipher_list == NULL
|| sk_SSL_CIPHER_num(ret->cipher_list) <= 0)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_LIBRARY_HAS_NO_CIPHERS);
goto err2;
}
ret->param = X509_VERIFY_PARAM_new();
if (!ret->param)
goto err;
if ((ret->rsa_md5=EVP_get_digestbyname("ssl2-md5")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL2_MD5_ROUTINES);
goto err2;
}
if ((ret->md5=EVP_get_digestbyname("ssl3-md5")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL3_MD5_ROUTINES);
goto err2;
}
if ((ret->sha1=EVP_get_digestbyname("ssl3-sha1")) == NULL)
{
SSLerr(SSL_F_SSL_CTX_NEW,SSL_R_UNABLE_TO_LOAD_SSL3_SHA1_ROUTINES);
goto err2;
}
if ((ret->client_CA=sk_X509_NAME_new_null()) == NULL)
goto err;
CRYPTO_new_ex_data(CRYPTO_EX_INDEX_SSL_CTX, ret, &ret->ex_data);
ret->extra_certs=NULL;
ret->comp_methods=SSL_COMP_get_compression_methods();
ret->max_send_fragment = SSL3_RT_MAX_PLAIN_LENGTH;
#ifndef OPENSSL_NO_TLSEXT
ret->tlsext_servername_callback = 0;
ret->tlsext_servername_arg = NULL;
/* Setup RFC4507 ticket keys */
if ((RAND_pseudo_bytes(ret->tlsext_tick_key_name, 16) <= 0)
|| (RAND_bytes(ret->tlsext_tick_hmac_key, 16) <= 0)
|| (RAND_bytes(ret->tlsext_tick_aes_key, 16) <= 0))
ret->options |= SSL_OP_NO_TICKET;
ret->tlsext_status_cb = 0;
ret->tlsext_status_arg = NULL;
# ifndef OPENSSL_NO_NPN
ret->next_protos_advertised_cb = 0;
ret->next_proto_select_cb = 0;
# endif
#endif
#ifndef OPENSSL_NO_PSK
ret->psk_identity_hint=NULL;
ret->psk_client_callback=NULL;
ret->psk_server_callback=NULL;
#endif
#ifndef OPENSSL_NO_BUF_FREELISTS
ret->freelist_max_len = SSL_MAX_BUF_FREELIST_LEN_DEFAULT;
ret->rbuf_freelist = OPENSSL_malloc(sizeof(SSL3_BUF_FREELIST));
if (!ret->rbuf_freelist)
goto err;
ret->rbuf_freelist->chunklen = 0;
ret->rbuf_freelist->len = 0;
ret->rbuf_freelist->head = NULL;
ret->wbuf_freelist = OPENSSL_malloc(sizeof(SSL3_BUF_FREELIST));
if (!ret->wbuf_freelist)
{
OPENSSL_free(ret->rbuf_freelist);
goto err;
}
ret->wbuf_freelist->chunklen = 0;
ret->wbuf_freelist->len = 0;
ret->wbuf_freelist->head = NULL;
#endif
#ifndef OPENSSL_NO_ENGINE
ret->client_cert_engine = NULL;
#ifdef OPENSSL_SSL_CLIENT_ENGINE_AUTO
#define eng_strx(x) #x
#define eng_str(x) eng_strx(x)
/* Use specific client engine automatically... ignore errors */
{
ENGINE *eng;
eng = ENGINE_by_id(eng_str(OPENSSL_SSL_CLIENT_ENGINE_AUTO));
if (!eng)
{
ERR_clear_error();
ENGINE_load_builtin_engines();
eng = ENGINE_by_id(eng_str(OPENSSL_SSL_CLIENT_ENGINE_AUTO));
}
if (!eng || !SSL_CTX_set_client_cert_engine(ret, eng))
ERR_clear_error();
}
#endif
#endif
/* Default is to connect to non-RI servers. When RI is more widely
* deployed might change this.
*/
ret->options |= SSL_OP_LEGACY_SERVER_CONNECT;
return(ret);
err:
SSLerr(SSL_F_SSL_CTX_NEW,ERR_R_MALLOC_FAILURE);
err2:
if (ret != NULL) SSL_CTX_free(ret);
return(NULL);
} | 0 | [] | openssl | ee2ffc279417f15fef3b1073c7dc81a908991516 | 182,542,117,519,272,580,000,000,000,000,000,000,000 | 189 | Add Next Protocol Negotiation. |
void rand_drbg_cleanup_int(void)
{
if (master_drbg != NULL) {
RAND_DRBG_free(master_drbg);
master_drbg = NULL;
CRYPTO_THREAD_cleanup_local(&private_drbg);
CRYPTO_THREAD_cleanup_local(&public_drbg);
}
} | 0 | [
"CWE-330"
] | openssl | 1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be | 265,841,479,152,424,580,000,000,000,000,000,000,000 | 10 | drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802) |
uint64 MakeUID(unsigned int* seed) {
uint64 uid = 0;
#ifdef __MINGW32__
srand(*seed);
#endif
for (int i = 0; i < 7; ++i) { // avoid problems with 8-byte values
uid <<= 8;
// TODO(fgalligan): Move random number generation to platform specific code.
#ifdef _MSC_VER
(void)seed;
const int32 nn = rand();
#elif __ANDROID__
(void)seed;
int32 temp_num = 1;
int fd = open("/dev/urandom", O_RDONLY);
if (fd != -1) {
read(fd, &temp_num, sizeof(temp_num));
close(fd);
}
const int32 nn = temp_num;
#elif defined __MINGW32__
const int32 nn = rand();
#else
const int32 nn = rand_r(seed);
#endif
const int32 n = 0xFF & (nn >> 4); // throw away low-order bits
uid |= n;
}
return uid;
} | 0 | [
"CWE-20"
] | libvpx | 34d54b04e98dd0bac32e9aab0fbda0bf501bc742 | 269,992,910,938,987,750,000,000,000,000,000,000,000 | 35 | update libwebm to libwebm-1.0.0.27-358-gdbf1d10
changelog:
https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10
Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3 |
int Lex_input_stream::find_keyword(Lex_ident_cli_st *kwd,
uint len, bool function)
{
const char *tok= m_tok_start;
SYMBOL *symbol= get_hash_symbol(tok, len, function);
if (symbol)
{
kwd->set_keyword(tok, len);
DBUG_ASSERT(tok >= get_buf());
DBUG_ASSERT(tok < get_end_of_query());
if (m_thd->variables.sql_mode & MODE_ORACLE)
{
switch (symbol->tok) {
case BEGIN_MARIADB_SYM: return BEGIN_ORACLE_SYM;
case BLOB_MARIADB_SYM: return BLOB_ORACLE_SYM;
case BODY_MARIADB_SYM: return BODY_ORACLE_SYM;
case CLOB_MARIADB_SYM: return CLOB_ORACLE_SYM;
case CONTINUE_MARIADB_SYM: return CONTINUE_ORACLE_SYM;
case DECLARE_MARIADB_SYM: return DECLARE_ORACLE_SYM;
case DECODE_MARIADB_SYM: return DECODE_ORACLE_SYM;
case ELSEIF_MARIADB_SYM: return ELSEIF_ORACLE_SYM;
case ELSIF_MARIADB_SYM: return ELSIF_ORACLE_SYM;
case EXCEPTION_MARIADB_SYM: return EXCEPTION_ORACLE_SYM;
case EXIT_MARIADB_SYM: return EXIT_ORACLE_SYM;
case GOTO_MARIADB_SYM: return GOTO_ORACLE_SYM;
case NUMBER_MARIADB_SYM: return NUMBER_ORACLE_SYM;
case OTHERS_MARIADB_SYM: return OTHERS_ORACLE_SYM;
case PACKAGE_MARIADB_SYM: return PACKAGE_ORACLE_SYM;
case RAISE_MARIADB_SYM: return RAISE_ORACLE_SYM;
case RAW_MARIADB_SYM: return RAW_ORACLE_SYM;
case RETURN_MARIADB_SYM: return RETURN_ORACLE_SYM;
case ROWTYPE_MARIADB_SYM: return ROWTYPE_ORACLE_SYM;
case VARCHAR2_MARIADB_SYM: return VARCHAR2_ORACLE_SYM;
}
}
if ((symbol->tok == NOT_SYM) &&
(m_thd->variables.sql_mode & MODE_HIGH_NOT_PRECEDENCE))
return NOT2_SYM;
if ((symbol->tok == OR2_SYM) &&
(m_thd->variables.sql_mode & MODE_PIPES_AS_CONCAT))
{
return (m_thd->variables.sql_mode & MODE_ORACLE) ?
ORACLE_CONCAT_SYM : MYSQL_CONCAT_SYM;
}
return symbol->tok;
}
return 0;
} | 0 | [
"CWE-703"
] | server | 39feab3cd31b5414aa9b428eaba915c251ac34a2 | 135,455,078,514,014,840,000,000,000,000,000,000,000 | 52 | MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]> |
void __fastcall Edit(TCustomScpExplorerForm * ScpExplorer, TStrings * FileList)
{
ScpExplorer->StandaloneEdit(FileList->Strings[0]);
Abort();
}
| 0 | [
"CWE-787"
] | winscp | faa96e8144e6925a380f94a97aa382c9427f688d | 219,919,134,113,274,300,000,000,000,000,000,000,000 | 5 | Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs
https://winscp.net/tracker/1943
(cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0)
Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b |
int hns_ppe_get_sset_count(int stringset)
{
if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
return ETH_PPE_STATIC_NUM;
return 0;
} | 0 | [
"CWE-119",
"CWE-703"
] | linux | 412b65d15a7f8a93794653968308fc100f2aa87c | 318,131,185,558,873,300,000,000,000,000,000,000,000 | 6 | net: hns: fix ethtool_get_strings overflow in hns driver
hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated
is not enough for ethtool_get_strings(), which will cause random memory
corruption.
When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the
the following can be observed without this patch:
[ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80
[ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070.
[ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70)
[ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk
[ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k
[ 43.115218] Next obj: start=ffff801fb0b69098, len=80
[ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b.
[ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38)
[ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_
[ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai
Signed-off-by: Timmy Li <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int xmlInitializeDict(void) {
if (xmlDictInitialized)
return(1);
if ((xmlDictMutex = xmlNewRMutex()) == NULL)
return(0);
#ifdef DICT_RANDOMIZATION
srand(time(NULL));
#endif
xmlDictInitialized = 1;
return(1);
} | 0 | [
"CWE-399"
] | libxml2 | 8973d58b7498fa5100a876815476b81fd1a2412a | 83,481,768,655,310,430,000,000,000,000,000,000,000 | 13 | Add hash randomization to hash and dict structures
Following http://www.ocert.org/advisories/ocert-2011-003.html
it seems that having hash randomization might be a good idea
when using XML with untrusted data
* configure.in: lookup for rand, srand and time
* dict.c: add randomization to dictionaries hash tables
* hash.c: add randomization to normal hash tables |
httpHeaderInitModule(void)
{
/* check that we have enough space for masks */
assert(8 * sizeof(HttpHeaderMask) >= Http::HdrType::enumEnd_);
// masks are needed for stats page still
for (auto h : WholeEnum<Http::HdrType>()) {
if (Http::HeaderLookupTable.lookup(h).request)
CBIT_SET(RequestHeadersMask,h);
if (Http::HeaderLookupTable.lookup(h).reply)
CBIT_SET(ReplyHeadersMask,h);
}
/* header stats initialized by class constructor */
assert(HttpHeaderStatCount == hoReply + 1);
/* init dependent modules */
httpHdrCcInitModule();
httpHdrScInitModule();
httpHeaderRegisterWithCacheManager();
} | 0 | [
"CWE-444"
] | squid | 9c8e2a71aa1d3c159a319d9365c346c48dc783a5 | 207,875,408,682,432,860,000,000,000,000,000,000,000 | 22 | Enforce token characters for field-name (#700)
RFC 7230 defines field-name as a token. Request splitting and cache
poisoning attacks have used non-token characters to fool broken HTTP
agents behind or in front of Squid for years. This change should
significantly reduce that abuse.
If we discover exceptional situations that need special treatment, the
relaxed parser can allow them on a case-by-case basis (while being extra
careful about framing-related header fields), just like we already
tolerate some header whitespace (e.g., between the response header
field-name and colon). |
static void intern_alloc(mlsize_t whsize, mlsize_t num_objects)
{
mlsize_t wosize;
if (whsize == 0) {
intern_obj_table = NULL;
intern_extra_block = NULL;
intern_block = 0;
return;
}
wosize = Wosize_whsize(whsize);
if (wosize > Max_wosize) {
/* Round desired size up to next page */
asize_t request =
((Bsize_wsize(whsize) + Page_size - 1) >> Page_log) << Page_log;
intern_extra_block = caml_alloc_for_heap(request);
if (intern_extra_block == NULL) caml_raise_out_of_memory();
intern_color = caml_allocation_color(intern_extra_block);
intern_dest = (header_t *) intern_extra_block;
} else {
/* this is a specialised version of caml_alloc from alloc.c */
if (wosize == 0){
intern_block = Atom (String_tag);
}else if (wosize <= Max_young_wosize){
intern_block = caml_alloc_small (wosize, String_tag);
}else{
intern_block = caml_alloc_shr (wosize, String_tag);
/* do not do the urgent_gc check here because it might darken
intern_block into gray and break the Assert 3 lines down */
}
intern_header = Hd_val(intern_block);
intern_color = Color_hd(intern_header);
Assert (intern_color == Caml_white || intern_color == Caml_black);
intern_dest = (header_t *) Hp_val(intern_block);
intern_extra_block = NULL;
}
obj_counter = 0;
if (num_objects > 0)
intern_obj_table = (value *) caml_stat_alloc(num_objects * sizeof(value));
else
intern_obj_table = NULL;
} | 0 | [
"CWE-200"
] | ocaml | 659615c7b100a89eafe6253e7a5b9d84d0e8df74 | 209,873,146,799,854,740,000,000,000,000,000,000,000 | 42 | fix PR#7003 and a few other bugs caused by misuse of Int_val
git-svn-id: http://caml.inria.fr/svn/ocaml/trunk@16525 f963ae5c-01c2-4b8c-9fe0-0dff7051ff02 |
xdaemon(bool nochdir, bool noclose, bool exitflag)
{
pid_t pid;
int ret;
if (log_file_name)
flush_log_file();
/* In case of fork is error. */
pid = fork();
if (pid < 0) {
log_message(LOG_INFO, "xdaemon: fork error");
return -1;
}
/* In case of this is parent process. */
if (pid != 0) {
if (!exitflag)
exit(0);
else
return pid;
}
/* Become session leader and get pid. */
pid = setsid();
if (pid < -1) {
log_message(LOG_INFO, "xdaemon: setsid error");
return -1;
}
/* Change directory to root. */
if (!nochdir) {
ret = chdir("/");
if (ret < 0) {
log_message(LOG_INFO, "xdaemon: chdir error");
}
}
/* File descriptor close. */
if (!noclose)
set_std_fd(true);
return 0;
} | 1 | [
"CWE-200"
] | keepalived | 26c8d6374db33bcfcdcd758b1282f12ceef4b94f | 57,528,622,643,361,950,000,000,000,000,000,000,000 | 44 | Disable fopen_safe() append mode by default
If a non privileged user creates /tmp/keepalived.log and has it open
for read (e.g. tail -f), then even though keepalived will change the
owner to root and remove all read/write permissions from non owners,
the application which already has the file open will be able to read
the added log entries.
Accordingly, opening a file in append mode is disabled by default, and
only enabled if --enable-smtp-alert-debug or --enable-log-file (which
are debugging options and unset by default) are enabled.
This should further alleviate security concerns related to CVE-2018-19046.
Signed-off-by: Quentin Armitage <[email protected]> |
static const char *build_version_platform_to_string(ut32 platform) {
switch (platform) {
case 1:
return "macOS";
case 2:
return "iOS";
case 3:
return "tvOS";
case 4:
return "watchOS";
case 5:
return "bridgeOS";
case 6:
return "iOSmac";
case 7:
return "iOS Simulator";
case 8:
return "tvOS Simulator";
case 9:
return "watchOS Simulator";
default:
return "unknown";
}
} | 0 | [
"CWE-125",
"CWE-787"
] | radare2 | 0052500c1ed5bf8263b26b9fd7773dbdc6f170c4 | 100,115,891,963,313,430,000,000,000,000,000,000,000 | 24 | Fix heap OOB read in macho.iterate_chained_fixups ##crash
* Reported by peacock-doris via huntr.dev
* Reproducer 'tests_65305'
mrmacete:
* Return early if segs_count is 0
* Initialize segs_count also for reconstructed fixups
Co-authored-by: pancake <[email protected]>
Co-authored-by: Francesco Tamagni <[email protected]> |
static int alg_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
int err = -EBUSY;
lock_sock(sk);
if (ask->refcnt)
goto unlock;
type = ask->type;
err = -ENOPROTOOPT;
if (level != SOL_ALG || !type)
goto unlock;
switch (optname) {
case ALG_SET_KEY:
if (sock->state == SS_CONNECTED)
goto unlock;
if (!type->setkey)
goto unlock;
err = alg_setkey(sk, optval, optlen);
break;
case ALG_SET_AEAD_AUTHSIZE:
if (sock->state == SS_CONNECTED)
goto unlock;
if (!type->setauthsize)
goto unlock;
err = type->setauthsize(ask->private, optlen);
}
unlock:
release_sock(sk);
return err;
} | 0 | [
"CWE-416"
] | linux | 9060cb719e61b685ec0102574e10337fa5f445ea | 210,978,281,883,541,400,000,000,000,000,000,000,000 | 40 | net: crypto set sk to NULL when af_alg_release.
KASAN has found use-after-free in sockfs_setattr.
The existed commit 6d8c50dcb029 ("socket: close race condition between sock_close()
and sockfs_setattr()") is to fix this simillar issue, but it seems to ignore
that crypto module forgets to set the sk to NULL after af_alg_release.
KASAN report details as below:
BUG: KASAN: use-after-free in sockfs_setattr+0x120/0x150
Write of size 4 at addr ffff88837b956128 by task syz-executor0/4186
CPU: 2 PID: 4186 Comm: syz-executor0 Not tainted xxx + #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
1.10.2-1ubuntu1 04/01/2014
Call Trace:
dump_stack+0xca/0x13e
print_address_description+0x79/0x330
? vprintk_func+0x5e/0xf0
kasan_report+0x18a/0x2e0
? sockfs_setattr+0x120/0x150
sockfs_setattr+0x120/0x150
? sock_register+0x2d0/0x2d0
notify_change+0x90c/0xd40
? chown_common+0x2ef/0x510
chown_common+0x2ef/0x510
? chmod_common+0x3b0/0x3b0
? __lock_is_held+0xbc/0x160
? __sb_start_write+0x13d/0x2b0
? __mnt_want_write+0x19a/0x250
do_fchownat+0x15c/0x190
? __ia32_sys_chmod+0x80/0x80
? trace_hardirqs_on_thunk+0x1a/0x1c
__x64_sys_fchownat+0xbf/0x160
? lockdep_hardirqs_on+0x39a/0x5e0
do_syscall_64+0xc8/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462589
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89
f7 48 89 d6 48 89
ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3
48 c7 c1 bc ff ff
ff f7 d8 64 89 01 48
RSP: 002b:00007fb4b2c83c58 EFLAGS: 00000246 ORIG_RAX: 0000000000000104
RAX: ffffffffffffffda RBX: 000000000072bfa0 RCX: 0000000000462589
RDX: 0000000000000000 RSI: 00000000200000c0 RDI: 0000000000000007
RBP: 0000000000000005 R08: 0000000000001000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007fb4b2c846bc
R13: 00000000004bc733 R14: 00000000006f5138 R15: 00000000ffffffff
Allocated by task 4185:
kasan_kmalloc+0xa0/0xd0
__kmalloc+0x14a/0x350
sk_prot_alloc+0xf6/0x290
sk_alloc+0x3d/0xc00
af_alg_accept+0x9e/0x670
hash_accept+0x4a3/0x650
__sys_accept4+0x306/0x5c0
__x64_sys_accept4+0x98/0x100
do_syscall_64+0xc8/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 4184:
__kasan_slab_free+0x12e/0x180
kfree+0xeb/0x2f0
__sk_destruct+0x4e6/0x6a0
sk_destruct+0x48/0x70
__sk_free+0xa9/0x270
sk_free+0x2a/0x30
af_alg_release+0x5c/0x70
__sock_release+0xd3/0x280
sock_close+0x1a/0x20
__fput+0x27f/0x7f0
task_work_run+0x136/0x1b0
exit_to_usermode_loop+0x1a7/0x1d0
do_syscall_64+0x461/0x580
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Syzkaller reproducer:
r0 = perf_event_open(&(0x7f0000000000)={0x0, 0x70, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @perf_config_ext}, 0x0, 0x0,
0xffffffffffffffff, 0x0)
r1 = socket$alg(0x26, 0x5, 0x0)
getrusage(0x0, 0x0)
bind(r1, &(0x7f00000001c0)=@alg={0x26, 'hash\x00', 0x0, 0x0,
'sha256-ssse3\x00'}, 0x80)
r2 = accept(r1, 0x0, 0x0)
r3 = accept4$unix(r2, 0x0, 0x0, 0x0)
r4 = dup3(r3, r0, 0x0)
fchownat(r4, &(0x7f00000000c0)='\x00', 0x0, 0x0, 0x1000)
Fixes: 6d8c50dcb029 ("socket: close race condition between sock_close() and sockfs_setattr()")
Signed-off-by: Mao Wenan <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int get_page_ext(struct page *pg,
unsigned int *pgroup, unsigned int *pidx)
{
union page_ext ext = { .mapping = pg->mapping };
struct xen_netbk *netbk;
unsigned int group, idx;
group = ext.e.group - 1;
if (group < 0 || group >= xen_netbk_group_nr)
return 0;
netbk = &xen_netbk[group];
idx = ext.e.idx;
if ((idx < 0) || (idx >= MAX_PENDING_REQS))
return 0;
if (netbk->mmap_pages[idx] != pg)
return 0;
*pgroup = group;
*pidx = idx;
return 1;
} | 0 | [
"CWE-399"
] | linux | 7d5145d8eb2b9791533ffe4dc003b129b9696c48 | 267,284,805,124,093,260,000,000,000,000,000,000,000 | 27 | xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop.
Signed-off-by: Matthew Daley <[email protected]>
Reviewed-by: Konrad Rzeszutek Wilk <[email protected]>
Acked-by: Ian Campbell <[email protected]>
Acked-by: Jan Beulich <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static rsRetVal qAddFixedArray(qqueue_t *pThis, void* in)
{
DEFiRet;
ASSERT(pThis != NULL);
pThis->tVars.farray.pBuf[pThis->tVars.farray.tail] = in;
pThis->tVars.farray.tail++;
if (pThis->tVars.farray.tail == pThis->iMaxQueueSize)
pThis->tVars.farray.tail = 0;
RETiRet;
} | 0 | [
"CWE-772"
] | rsyslog | dfa88369d4ca4290db56b843f9eabdae1bfe0fd5 | 197,241,734,038,808,200,000,000,000,000,000,000,000 | 12 | bugfix: memory leak when $RepeatedMsgReduction on was used
bug tracker: http://bugzilla.adiscon.com/show_bug.cgi?id=225 |
**/
const CImg<T>& save_cimg(const char *const filename, const bool is_compressed=false) const {
CImgList<T>(*this,true).save_cimg(filename,is_compressed);
return *this; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 268,502,633,829,490,660,000,000,000,000,000,000,000 | 4 | Fix other issues in 'CImg<T>::load_bmp()'. |
TfLiteRegistration* Register_MAX_POOL_GENERIC_OPT() {
static TfLiteRegistration r = {pooling::Init, pooling::Free,
pooling::GenericPrepare<pooling::kMax>,
pooling::MaxEval<pooling::kGenericOptimized>};
return &r;
} | 0 | [
"CWE-703",
"CWE-835"
] | tensorflow | dfa22b348b70bb89d6d6ec0ff53973bacb4f4695 | 300,911,182,407,001,700,000,000,000,000,000,000,000 | 6 | Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3 |
static void fits_delete_hdulist (FITS_HDU_LIST *hl)
{FITS_HDU_LIST *next;
while (hl != NULL)
{
fits_delete_recordlist (hl->header_record_list);
next = hl->next_hdu;
hl->next_hdu = NULL;
free ((char *)hl);
hl = next;
}
} | 0 | [
"CWE-476"
] | gimp | ace45631595e8781a1420842582d67160097163c | 131,922,365,623,145,930,000,000,000,000,000,000,000 | 13 | Bug 676804 - file handling DoS for fit file format
Apply patch from [email protected] which fixes a buffer overflow on
broken/malicious fits files. |
void jas_matrix_setall(jas_matrix_t *matrix, jas_seqent_t val)
{
int i;
int j;
jas_seqent_t *rowstart;
int rowstep;
jas_seqent_t *data;
rowstep = jas_matrix_rowstep(matrix);
for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i,
rowstart += rowstep) {
for (j = matrix->numcols_, data = rowstart; j > 0; --j,
++data) {
*data = val;
}
}
} | 1 | [
"CWE-20"
] | jasper | c87ad330a8b8d6e5eb0065675601fdfae08ebaab | 63,575,735,078,061,300,000,000,000,000,000,000,000 | 17 | Added fix for CVE-2016-2089. |
static int emulator_read_write_onepage(unsigned long addr, void *val,
unsigned int bytes,
struct x86_exception *exception,
struct kvm_vcpu *vcpu,
const struct read_write_emulator_ops *ops)
{
gpa_t gpa;
int handled, ret;
bool write = ops->write;
struct kvm_mmio_fragment *frag;
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
/* For APIC access vmexit */
if (ret)
goto mmio;
if (ops->read_write_emulate(vcpu, gpa, val, bytes))
return X86EMUL_CONTINUE;
mmio:
/*
* Is this MMIO handled locally?
*/
handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
if (handled == bytes)
return X86EMUL_CONTINUE;
gpa += handled;
bytes -= handled;
val += handled;
WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
frag->gpa = gpa;
frag->data = val;
frag->len = bytes;
return X86EMUL_CONTINUE;
} | 0 | [] | linux | 6d1068b3a98519247d8ba4ec85cd40ac136dbdf9 | 204,012,653,548,878,240,000,000,000,000,000,000,000 | 42 | KVM: x86: invalid opcode oops on SET_SREGS with OSXSAVE bit set (CVE-2012-4461)
On hosts without the XSAVE support unprivileged local user can trigger
oops similar to the one below by setting X86_CR4_OSXSAVE bit in guest
cr4 register using KVM_SET_SREGS ioctl and later issuing KVM_RUN
ioctl.
invalid opcode: 0000 [#2] SMP
Modules linked in: tun ip6table_filter ip6_tables ebtable_nat ebtables
...
Pid: 24935, comm: zoog_kvm_monito Tainted: G D 3.2.0-3-686-pae
EIP: 0060:[<f8b9550c>] EFLAGS: 00210246 CPU: 0
EIP is at kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm]
EAX: 00000001 EBX: 000f387e ECX: 00000000 EDX: 00000000
ESI: 00000000 EDI: 00000000 EBP: ef5a0060 ESP: d7c63e70
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
Process zoog_kvm_monito (pid: 24935, ti=d7c62000 task=ed84a0c0
task.ti=d7c62000)
Stack:
00000001 f70a1200 f8b940a9 ef5a0060 00000000 00200202 f8769009 00000000
ef5a0060 000f387e eda5c020 8722f9c8 00015bae 00000000 ed84a0c0 ed84a0c0
c12bf02d 0000ae80 ef7f8740 fffffffb f359b740 ef5a0060 f8b85dc1 0000ae80
Call Trace:
[<f8b940a9>] ? kvm_arch_vcpu_ioctl_set_sregs+0x2fe/0x308 [kvm]
...
[<c12bfb44>] ? syscall_call+0x7/0xb
Code: 89 e8 e8 14 ee ff ff ba 00 00 04 00 89 e8 e8 98 48 ff ff 85 c0 74
1e 83 7d 48 00 75 18 8b 85 08 07 00 00 31 c9 8b 95 0c 07 00 00 <0f> 01
d1 c7 45 48 01 00 00 00 c7 45 1c 01 00 00 00 0f ae f0 89
EIP: [<f8b9550c>] kvm_arch_vcpu_ioctl_run+0x92a/0xd13 [kvm] SS:ESP
0068:d7c63e70
QEMU first retrieves the supported features via KVM_GET_SUPPORTED_CPUID
and then sets them later. So guest's X86_FEATURE_XSAVE should be masked
out on hosts without X86_FEATURE_XSAVE, making kvm_set_cr4 with
X86_CR4_OSXSAVE fail. Userspaces that allow specifying guest cpuid with
X86_FEATURE_XSAVE even on hosts that do not support it, might be
susceptible to this attack from inside the guest as well.
Allow setting X86_CR4_OSXSAVE bit only if host has XSAVE support.
Signed-off-by: Petr Matousek <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static bool cma_protocol_roce(const struct rdma_cm_id *id)
{
struct ib_device *device = id->device;
const u32 port_num = id->port_num ?: rdma_start_port(device);
return rdma_protocol_roce(device, port_num);
} | 0 | [
"CWE-416"
] | linux | bc0bdc5afaa740d782fbf936aaeebd65e5c2921d | 24,863,401,611,367,520,000,000,000,000,000,000,000 | 7 | RDMA/cma: Do not change route.addr.src_addr.ss_family
If the state is not idle then rdma_bind_addr() will immediately fail and
no change to global state should happen.
For instance if the state is already RDMA_CM_LISTEN then this will corrupt
the src_addr and would cause the test in cma_cancel_operation():
if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4
family, failing the test.
This would manifest as this trace from syzkaller:
BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26
Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204
CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:79 [inline]
dump_stack+0x141/0x1d7 lib/dump_stack.c:120
print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232
__kasan_report mm/kasan/report.c:399 [inline]
kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416
__list_add_valid+0x93/0xa0 lib/list_debug.c:26
__list_add include/linux/list.h:67 [inline]
list_add_tail include/linux/list.h:100 [inline]
cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline]
rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751
ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102
ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732
vfs_write+0x28e/0xa30 fs/read_write.c:603
ksys_write+0x1ee/0x250 fs/read_write.c:658
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Which is indicating that an rdma_id_private was destroyed without doing
cma_cancel_listens().
Instead of trying to re-use the src_addr memory to indirectly create an
any address build one explicitly on the stack and bind to that as any
other normal flow would do.
Link: https://lore.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear")
Reported-by: [email protected]
Tested-by: Hao Sun <[email protected]>
Reviewed-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
flatpak_deploy_data_get_runtime (GVariant *deploy_data)
{
return flatpak_deploy_data_get_string (deploy_data, "runtime");
} | 0 | [
"CWE-668"
] | flatpak | cd2142888fc4c199723a0dfca1f15ea8788a5483 | 321,830,495,758,055,250,000,000,000,000,000,000,000 | 4 | Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this. |
static void start_check_enables(struct smi_info *smi_info)
{
unsigned char msg[2];
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_CHECKING_ENABLES;
} | 0 | [
"CWE-416"
] | linux | 401e7e88d4ef80188ffa07095ac00456f901b8c4 | 86,458,536,904,452,640,000,000,000,000,000,000,000 | 10 | ipmi_si: fix use-after-free of resource->name
When we excute the following commands, we got oops
rmmod ipmi_si
cat /proc/ioports
[ 1623.482380] Unable to handle kernel paging request at virtual address ffff00000901d478
[ 1623.482382] Mem abort info:
[ 1623.482383] ESR = 0x96000007
[ 1623.482385] Exception class = DABT (current EL), IL = 32 bits
[ 1623.482386] SET = 0, FnV = 0
[ 1623.482387] EA = 0, S1PTW = 0
[ 1623.482388] Data abort info:
[ 1623.482389] ISV = 0, ISS = 0x00000007
[ 1623.482390] CM = 0, WnR = 0
[ 1623.482393] swapper pgtable: 4k pages, 48-bit VAs, pgdp = 00000000d7d94a66
[ 1623.482395] [ffff00000901d478] pgd=000000dffbfff003, pud=000000dffbffe003, pmd=0000003f5d06e003, pte=0000000000000000
[ 1623.482399] Internal error: Oops: 96000007 [#1] SMP
[ 1623.487407] Modules linked in: ipmi_si(E) nls_utf8 isofs rpcrdma ib_iser ib_srpt target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_umad rdma_cm ib_cm dm_mirror dm_region_hash dm_log iw_cm dm_mod aes_ce_blk crypto_simd cryptd aes_ce_cipher ses ghash_ce sha2_ce enclosure sha256_arm64 sg sha1_ce hisi_sas_v2_hw hibmc_drm sbsa_gwdt hisi_sas_main ip_tables mlx5_ib ib_uverbs marvell ib_core mlx5_core ixgbe mdio hns_dsaf ipmi_devintf hns_enet_drv ipmi_msghandler hns_mdio [last unloaded: ipmi_si]
[ 1623.532410] CPU: 30 PID: 11438 Comm: cat Kdump: loaded Tainted: G E 5.0.0-rc3+ #168
[ 1623.541498] Hardware name: Huawei TaiShan 2280 /BC11SPCD, BIOS 1.37 11/21/2017
[ 1623.548822] pstate: a0000005 (NzCv daif -PAN -UAO)
[ 1623.553684] pc : string+0x28/0x98
[ 1623.557040] lr : vsnprintf+0x368/0x5e8
[ 1623.560837] sp : ffff000013213a80
[ 1623.564191] x29: ffff000013213a80 x28: ffff00001138abb5
[ 1623.569577] x27: ffff000013213c18 x26: ffff805f67d06049
[ 1623.574963] x25: 0000000000000000 x24: ffff00001138abb5
[ 1623.580349] x23: 0000000000000fb7 x22: ffff0000117ed000
[ 1623.585734] x21: ffff000011188fd8 x20: ffff805f67d07000
[ 1623.591119] x19: ffff805f67d06061 x18: ffffffffffffffff
[ 1623.596505] x17: 0000000000000200 x16: 0000000000000000
[ 1623.601890] x15: ffff0000117ed748 x14: ffff805f67d07000
[ 1623.607276] x13: ffff805f67d0605e x12: 0000000000000000
[ 1623.612661] x11: 0000000000000000 x10: 0000000000000000
[ 1623.618046] x9 : 0000000000000000 x8 : 000000000000000f
[ 1623.623432] x7 : ffff805f67d06061 x6 : fffffffffffffffe
[ 1623.628817] x5 : 0000000000000012 x4 : ffff00000901d478
[ 1623.634203] x3 : ffff0a00ffffff04 x2 : ffff805f67d07000
[ 1623.639588] x1 : ffff805f67d07000 x0 : ffffffffffffffff
[ 1623.644974] Process cat (pid: 11438, stack limit = 0x000000008d4cbc10)
[ 1623.651592] Call trace:
[ 1623.654068] string+0x28/0x98
[ 1623.657071] vsnprintf+0x368/0x5e8
[ 1623.660517] seq_vprintf+0x70/0x98
[ 1623.668009] seq_printf+0x7c/0xa0
[ 1623.675530] r_show+0xc8/0xf8
[ 1623.682558] seq_read+0x330/0x440
[ 1623.689877] proc_reg_read+0x78/0xd0
[ 1623.697346] __vfs_read+0x60/0x1a0
[ 1623.704564] vfs_read+0x94/0x150
[ 1623.711339] ksys_read+0x6c/0xd8
[ 1623.717939] __arm64_sys_read+0x24/0x30
[ 1623.725077] el0_svc_common+0x120/0x148
[ 1623.732035] el0_svc_handler+0x30/0x40
[ 1623.738757] el0_svc+0x8/0xc
[ 1623.744520] Code: d1000406 aa0103e2 54000149 b4000080 (39400085)
[ 1623.753441] ---[ end trace f91b6a4937de9835 ]---
[ 1623.760871] Kernel panic - not syncing: Fatal exception
[ 1623.768935] SMP: stopping secondary CPUs
[ 1623.775718] Kernel Offset: disabled
[ 1623.781998] CPU features: 0x002,21006008
[ 1623.788777] Memory Limit: none
[ 1623.798329] Starting crashdump kernel...
[ 1623.805202] Bye!
If io_setup is called successful in try_smi_init() but try_smi_init()
goes out_err before calling ipmi_register_smi(), so ipmi_unregister_smi()
will not be called while removing module. It leads to the resource that
allocated in io_setup() can not be freed, but the name(DEVICE_NAME) of
resource is freed while removing the module. It causes use-after-free
when cat /proc/ioports.
Fix this by calling io_cleanup() while try_smi_init() goes to out_err.
and don't call io_cleanup() until io_setup() returns successful to avoid
warning prints.
Fixes: 93c303d2045b ("ipmi_si: Clean up shutdown a bit")
Cc: [email protected]
Reported-by: NuoHan Qiao <[email protected]>
Suggested-by: Corey Minyard <[email protected]>
Signed-off-by: Yang Yingliang <[email protected]>
Signed-off-by: Corey Minyard <[email protected]> |
int dtls1_read_failed(SSL *s, int code)
{
if ( code > 0)
{
fprintf( stderr, "invalid state reached %s:%d", __FILE__, __LINE__);
return 1;
}
if (!dtls1_is_timer_expired(s))
{
/* not a timeout, none of our business,
let higher layers handle this. in fact it's probably an error */
return code;
}
if ( ! SSL_in_init(s)) /* done, no need to send a retransmit */
{
BIO_set_flags(SSL_get_rbio(s), BIO_FLAGS_READ);
return code;
}
#if 0 /* for now, each alert contains only one record number */
item = pqueue_peek(state->rcvd_records);
if ( item )
{
/* send an alert immediately for all the missing records */
}
else
#endif
#if 0 /* no more alert sending, just retransmit the last set of messages */
if ( state->timeout.read_timeouts >= DTLS1_TMO_READ_COUNT)
ssl3_send_alert(s,SSL3_AL_WARNING,
DTLS1_AD_MISSING_HANDSHAKE_MESSAGE);
#endif
return dtls1_handle_timeout(s);
} | 1 | [] | openssl | 4817504d069b4c5082161b02a22116ad75f822b1 | 165,134,939,975,652,210,000,000,000,000,000,000,000 | 38 | PR: 2658
Submitted by: Robin Seggelmann <[email protected]>
Reviewed by: steve
Support for TLS/DTLS heartbeats. |
static int __net_init xfrm_user_net_init(struct net *net)
{
struct sock *nlsk;
struct netlink_kernel_cfg cfg = {
.groups = XFRMNLGRP_MAX,
.input = xfrm_netlink_rcv,
};
nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg);
if (nlsk == NULL)
return -ENOMEM;
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
rcu_assign_pointer(net->xfrm.nlsk, nlsk);
return 0;
} | 0 | [
"CWE-200"
] | linux | 1f86840f897717f86d523a13e99a447e6a5d2fa5 | 240,317,985,153,897,050,000,000,000,000,000,000,000 | 15 | xfrm_user: fix info leak in copy_to_user_tmpl()
The memory used for the template copy is a local stack variable. As
struct xfrm_user_tmpl contains multiple holes added by the compiler for
alignment, not initializing the memory will lead to leaking stack bytes
to userland. Add an explicit memset(0) to avoid the info leak.
Initial version of the patch by Brad Spengler.
Cc: Brad Spengler <[email protected]>
Signed-off-by: Mathias Krause <[email protected]>
Acked-by: Steffen Klassert <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
aspath_cmp (const void *arg1, const void *arg2)
{
const struct assegment *seg1 = ((const struct aspath *)arg1)->segments;
const struct assegment *seg2 = ((const struct aspath *)arg2)->segments;
while (seg1 || seg2)
{
int i;
if ((!seg1 && seg2) || (seg1 && !seg2))
return 0;
if (seg1->type != seg2->type)
return 0;
if (seg1->length != seg2->length)
return 0;
for (i = 0; i < seg1->length; i++)
if (seg1->as[i] != seg2->as[i])
return 0;
seg1 = seg1->next;
seg2 = seg2->next;
}
return 1;
} | 0 | [
"CWE-20"
] | quagga | 7a42b78be9a4108d98833069a88e6fddb9285008 | 261,000,408,058,536,200,000,000,000,000,000,000,000 | 22 | bgpd: Fix AS_PATH size calculation for long paths
If you have an AS_PATH with more entries than
what can be written into a single AS_SEGMENT_MAX
it needs to be broken up. The code that noticed
that the AS_PATH needs to be broken up was not
correctly calculating the size of the resulting
message. This patch addresses this issue. |
static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
struct hci_conn *hcon;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
if (ev->status)
return;
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
if (hcon) {
hcon->state = BT_CLOSED;
hci_conn_del(hcon);
}
hci_dev_unlock(hdev);
} | 0 | [
"CWE-290"
] | linux | 3ca44c16b0dcc764b641ee4ac226909f5c421aa3 | 116,613,181,870,041,200,000,000,000,000,000,000,000 | 21 | Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
return cfs_bandwidth_used() && cfs_rq->throttled;
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-835"
] | linux | c40f7d74c741a907cfaeb73a7697081881c497d0 | 92,060,806,627,996,980,000,000,000,000,000,000,000 | 4 | sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
void ConnectionManagerImpl::chargeTracingStats(const Tracing::Reason& tracing_reason,
ConnectionManagerTracingStats& tracing_stats) {
switch (tracing_reason) {
case Tracing::Reason::ClientForced:
tracing_stats.client_enabled_.inc();
break;
case Tracing::Reason::NotTraceableRequestId:
tracing_stats.not_traceable_.inc();
break;
case Tracing::Reason::Sampling:
tracing_stats.random_sampling_.inc();
break;
case Tracing::Reason::ServiceForced:
tracing_stats.service_forced_.inc();
break;
default:
throw std::invalid_argument(
fmt::format("invalid tracing reason, value: {}", static_cast<int32_t>(tracing_reason)));
}
} | 0 | [
"CWE-400",
"CWE-703"
] | envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 35,659,344,290,739,964,000,000,000,000,000,000,000 | 20 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
/* {{{ day of week helpers */
char *php_date_full_day_name(timelib_sll y, timelib_sll m, timelib_sll d)
{
timelib_sll day_of_week = timelib_day_of_week(y, m, d);
if (day_of_week < 0) {
return "Unknown";
} | 0 | [] | php-src | bb057498f7457e8b2eba98332a3bad434de4cf12 | 240,394,867,352,502,980,000,000,000,000,000,000,000 | 8 | Fix #70277: new DateTimeZone($foo) is ignoring text after null byte
The DateTimeZone constructors are not binary safe. They're parsing the timezone
as string, but discard the length when calling timezone_initialize(). This
patch adds a tz_len parameter and a respective check to timezone_initialize(). |
pk_transaction_download_packages (PkTransaction *transaction,
GVariant *params,
GDBusMethodInvocation *context)
{
gboolean ret;
gint retval;
guint length;
gboolean store_in_cache;
gchar **package_ids = NULL;
_cleanup_error_free_ GError *error = NULL;
_cleanup_free_ gchar *directory = NULL;
_cleanup_free_ gchar *package_ids_temp = NULL;
g_return_if_fail (PK_IS_TRANSACTION (transaction));
g_return_if_fail (transaction->priv->tid != NULL);
g_variant_get (params, "(b^a&s)",
&store_in_cache,
&package_ids);
package_ids_temp = pk_package_ids_to_string (package_ids);
g_debug ("DownloadPackages method called: %s", package_ids_temp);
/* not implemented yet */
if (!pk_backend_is_implemented (transaction->priv->backend,
PK_ROLE_ENUM_DOWNLOAD_PACKAGES)) {
g_set_error (&error,
PK_TRANSACTION_ERROR,
PK_TRANSACTION_ERROR_NOT_SUPPORTED,
"DownloadPackages not supported by backend");
pk_transaction_set_state (transaction, PK_TRANSACTION_STATE_ERROR);
goto out;
}
/* check for length sanity */
length = g_strv_length (package_ids);
if (length > PK_TRANSACTION_MAX_PACKAGES_TO_PROCESS) {
g_set_error (&error,
PK_TRANSACTION_ERROR,
PK_TRANSACTION_ERROR_NUMBER_OF_PACKAGES_INVALID,
"Too many packages to process (%i/%i)",
length, PK_TRANSACTION_MAX_PACKAGES_TO_PROCESS);
pk_transaction_set_state (transaction, PK_TRANSACTION_STATE_ERROR);
goto out;
}
/* check package_ids */
ret = pk_package_ids_check (package_ids);
if (!ret) {
g_set_error (&error,
PK_TRANSACTION_ERROR,
PK_TRANSACTION_ERROR_PACKAGE_ID_INVALID,
"The package id's '%s' are not valid", package_ids_temp);
goto out;
}
/* create cache directory */
if (!store_in_cache) {
directory = g_build_filename (LOCALSTATEDIR, "cache", "PackageKit",
"downloads", transaction->priv->tid, NULL);
/* rwxrwxr-x */
retval = g_mkdir (directory, 0775);
if (retval != 0) {
g_set_error (&error,
PK_TRANSACTION_ERROR,
PK_TRANSACTION_ERROR_DENIED,
"cannot create %s", directory);
goto out;
}
}
/* save so we can run later */
transaction->priv->cached_package_ids = g_strdupv (package_ids);
transaction->priv->cached_directory = g_strdup (directory);
pk_transaction_set_role (transaction, PK_ROLE_ENUM_DOWNLOAD_PACKAGES);
pk_transaction_set_state (transaction, PK_TRANSACTION_STATE_READY);
out:
pk_transaction_dbus_return (context, error);
} | 0 | [
"CWE-287"
] | PackageKit | f176976e24e8c17b80eff222572275517c16bdad | 177,960,873,302,303,700,000,000,000,000,000,000,000 | 79 | Reinstallation and downgrade require authorization
Added new policy actions:
* org.freedesktop.packagekit.package-reinstall
* org.freedesktop.packagekit.package-downgrade
The first does not depend or require any other actions to be authorized
except for org.freedesktop.packagekit.package-install-untrusted in case
of reinstallation of not trusted package. The same applies to second one
plus it implies org.freedesktop.packagekit.package-install action (if
the user is authorized to downgrade, he's authorized to install as
well).
Now the authorization can spawn up to 3 asynchronous calls to polkit for
single package because each transaction flag (allow-downgrade,
allow-reinstall) the client supplies needs to be checked separately. |
void jas_matrix_bindsub(jas_matrix_t *mat0, jas_matrix_t *mat1, int r0,
int c0, int r1, int c1)
{
int i;
if (mat0->data_) {
if (!(mat0->flags_ & JAS_MATRIX_REF)) {
jas_free(mat0->data_);
}
mat0->data_ = 0;
mat0->datasize_ = 0;
}
if (mat0->rows_) {
jas_free(mat0->rows_);
mat0->rows_ = 0;
}
mat0->flags_ |= JAS_MATRIX_REF;
mat0->numrows_ = r1 - r0 + 1;
mat0->numcols_ = c1 - c0 + 1;
mat0->maxrows_ = mat0->numrows_;
if (!(mat0->rows_ = jas_alloc2(mat0->maxrows_, sizeof(jas_seqent_t *)))) {
/*
There is no way to indicate failure to the caller.
So, we have no choice but to abort.
Ideally, this function should have a non-void return type.
In practice, a non-void return type probably would not help
much anyways as the caller would just have to terminate anyways.
*/
abort();
}
for (i = 0; i < mat0->numrows_; ++i) {
mat0->rows_[i] = mat1->rows_[r0 + i] + c0;
}
mat0->xstart_ = mat1->xstart_ + c0;
mat0->ystart_ = mat1->ystart_ + r0;
mat0->xend_ = mat0->xstart_ + mat0->numcols_;
mat0->yend_ = mat0->ystart_ + mat0->numrows_;
} | 1 | [
"CWE-20",
"CWE-190"
] | jasper | d42b2388f7f8e0332c846675133acea151fc557a | 202,728,080,875,064,170,000,000,000,000,000,000,000 | 40 | The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now. |
irc_server_set_index_current_address (struct t_irc_server *server, int index)
{
if (server->current_address)
{
free (server->current_address);
server->current_address = NULL;
}
server->current_port = 0;
if (index < server->addresses_count)
{
server->index_current_address = index;
if (server->current_address)
free (server->current_address);
server->current_address = strdup (server->addresses_array[index]);
server->current_port = server->ports_array[index];
}
} | 0 | [
"CWE-20"
] | weechat | c265cad1c95b84abfd4e8d861f25926ef13b5d91 | 47,229,473,834,664,910,000,000,000,000,000,000,000 | 18 | Fix verification of SSL certificates by calling gnutls verify callback (patch #7459) |
static inline void read_io_word(Encoder *encoder)
{
if (G_UNLIKELY(encoder->io_now == encoder->io_end)) {
more_io_words(encoder);
}
spice_extra_assert(encoder->io_now < encoder->io_end);
encoder->io_next_word = GUINT32_FROM_LE(*(encoder->io_now));
encoder->io_now++;
} | 0 | [] | spice-common | 762e0abae36033ccde658fd52d3235887b60862d | 10,871,236,406,079,025,000,000,000,000,000,000,000 | 10 | quic: Check we have some data to start decoding quic image
All paths already pass some data to quic_decode_begin but for the
test check it, it's not that expensive test.
Checking for not 0 is enough, all other words will potentially be
read calling more_io_words but we need one to avoid a potential
initial buffer overflow or deferencing an invalid pointer.
Signed-off-by: Frediano Ziglio <[email protected]>
Acked-by: Uri Lublin <[email protected]> |
PHP_MSHUTDOWN_FUNCTION(basic) /* {{{ */
{
#ifdef HAVE_SYSLOG_H
PHP_MSHUTDOWN(syslog)(SHUTDOWN_FUNC_ARGS_PASSTHRU);
#endif
#ifdef ZTS
ts_free_id(basic_globals_id);
#ifdef PHP_WIN32
ts_free_id(php_win32_core_globals_id);
#endif
#else
basic_globals_dtor(&basic_globals TSRMLS_CC);
#ifdef PHP_WIN32
php_win32_core_globals_dtor(&the_php_win32_core_globals TSRMLS_CC);
#endif
#endif
php_unregister_url_stream_wrapper("php" TSRMLS_CC);
php_unregister_url_stream_wrapper("http" TSRMLS_CC);
php_unregister_url_stream_wrapper("ftp" TSRMLS_CC);
BASIC_MSHUTDOWN_SUBMODULE(browscap)
BASIC_MSHUTDOWN_SUBMODULE(array)
BASIC_MSHUTDOWN_SUBMODULE(assert)
BASIC_MSHUTDOWN_SUBMODULE(url_scanner_ex)
BASIC_MSHUTDOWN_SUBMODULE(file)
BASIC_MSHUTDOWN_SUBMODULE(standard_filters)
#if defined(HAVE_LOCALECONV) && defined(ZTS)
BASIC_MSHUTDOWN_SUBMODULE(localeconv)
#endif
#if HAVE_CRYPT
BASIC_MSHUTDOWN_SUBMODULE(crypt)
#endif
zend_hash_destroy(&basic_submodules);
return SUCCESS;
} | 0 | [
"CWE-601"
] | php-src | 98b9dfaec95e6f910f125ed172cdbd25abd006ec | 218,387,845,015,317,230,000,000,000,000,000,000,000 | 37 | Fix for HTTP_PROXY issue.
The following changes are made:
- _SERVER/_ENV only has HTTP_PROXY if the local environment has it,
and only one from the environment.
- getenv('HTTP_PROXY') only returns one from the local environment
- getenv has optional second parameter, telling it to only consider
local environment |
bool ShouldUseSafeDialogs() const { return safe_dialogs_; } | 0 | [] | electron | e9fa834757f41c0b9fe44a4dffe3d7d437f52d34 | 40,219,558,879,070,360,000,000,000,000,000,000,000 | 1 | fix: ensure ElectronBrowser mojo service is only bound to appropriate render frames (#33344)
* fix: ensure ElectronBrowser mojo service is only bound to authorized render frames
Notes: no-notes
* refactor: extract electron API IPC to its own mojo interface
* fix: just check main frame not primary main frame
Co-authored-by: Samuel Attard <[email protected]>
Co-authored-by: Samuel Attard <[email protected]> |
ZEND_VM_HANDLER(160, ZEND_YIELD, CONST|TMP|VAR|CV|UNUSED, CONST|TMP|VAR|CV|UNUSED, SRC)
{
USE_OPLINE
zend_generator *generator = zend_get_running_generator(EXECUTE_DATA_C);
SAVE_OPLINE();
if (UNEXPECTED(generator->flags & ZEND_GENERATOR_FORCED_CLOSE)) {
ZEND_VM_DISPATCH_TO_HELPER(zend_yield_in_closed_generator_helper);
}
/* Destroy the previously yielded value */
zval_ptr_dtor(&generator->value);
/* Destroy the previously yielded key */
zval_ptr_dtor(&generator->key);
/* Set the new yielded value */
if (OP1_TYPE != IS_UNUSED) {
zend_free_op free_op1;
if (UNEXPECTED(EX(func)->op_array.fn_flags & ZEND_ACC_RETURN_REFERENCE)) {
/* Constants and temporary variables aren't yieldable by reference,
* but we still allow them with a notice. */
if (OP1_TYPE & (IS_CONST|IS_TMP_VAR)) {
zval *value;
zend_error(E_NOTICE, "Only variable references should be yielded by reference");
value = GET_OP1_ZVAL_PTR(BP_VAR_R);
ZVAL_COPY_VALUE(&generator->value, value);
if (OP1_TYPE == IS_CONST) {
if (UNEXPECTED(Z_OPT_REFCOUNTED(generator->value))) {
Z_ADDREF(generator->value);
}
}
} else {
zval *value_ptr = GET_OP1_ZVAL_PTR_PTR(BP_VAR_W);
/* If a function call result is yielded and the function did
* not return by reference we throw a notice. */
do {
if (OP1_TYPE == IS_VAR) {
ZEND_ASSERT(value_ptr != &EG(uninitialized_zval));
if (opline->extended_value == ZEND_RETURNS_FUNCTION
&& !Z_ISREF_P(value_ptr)) {
zend_error(E_NOTICE, "Only variable references should be yielded by reference");
ZVAL_COPY(&generator->value, value_ptr);
break;
}
}
if (Z_ISREF_P(value_ptr)) {
Z_ADDREF_P(value_ptr);
} else {
ZVAL_MAKE_REF_EX(value_ptr, 2);
}
ZVAL_REF(&generator->value, Z_REF_P(value_ptr));
} while (0);
FREE_OP1_VAR_PTR();
}
} else {
zval *value = GET_OP1_ZVAL_PTR(BP_VAR_R);
/* Consts, temporary variables and references need copying */
if (OP1_TYPE == IS_CONST) {
ZVAL_COPY_VALUE(&generator->value, value);
if (UNEXPECTED(Z_OPT_REFCOUNTED(generator->value))) {
Z_ADDREF(generator->value);
}
} else if (OP1_TYPE == IS_TMP_VAR) {
ZVAL_COPY_VALUE(&generator->value, value);
} else if ((OP1_TYPE & (IS_VAR|IS_CV)) && Z_ISREF_P(value)) {
ZVAL_COPY(&generator->value, Z_REFVAL_P(value));
FREE_OP1_IF_VAR();
} else {
ZVAL_COPY_VALUE(&generator->value, value);
if (OP1_TYPE == IS_CV) {
if (Z_OPT_REFCOUNTED_P(value)) Z_ADDREF_P(value);
}
}
}
} else {
/* If no value was specified yield null */
ZVAL_NULL(&generator->value);
}
/* Set the new yielded key */
if (OP2_TYPE != IS_UNUSED) {
zend_free_op free_op2;
zval *key = GET_OP2_ZVAL_PTR(BP_VAR_R);
/* Consts, temporary variables and references need copying */
if (OP2_TYPE == IS_CONST) {
ZVAL_COPY_VALUE(&generator->key, key);
if (UNEXPECTED(Z_OPT_REFCOUNTED(generator->key))) {
Z_ADDREF(generator->key);
}
} else if (OP2_TYPE == IS_TMP_VAR) {
ZVAL_COPY_VALUE(&generator->key, key);
} else if ((OP2_TYPE & (IS_VAR|IS_CV)) && Z_ISREF_P(key)) {
ZVAL_COPY(&generator->key, Z_REFVAL_P(key));
FREE_OP2_IF_VAR();
} else {
ZVAL_COPY_VALUE(&generator->key, key);
if (OP2_TYPE == IS_CV) {
if (Z_OPT_REFCOUNTED_P(key)) Z_ADDREF_P(key);
}
}
if (Z_TYPE(generator->key) == IS_LONG
&& Z_LVAL(generator->key) > generator->largest_used_integer_key
) {
generator->largest_used_integer_key = Z_LVAL(generator->key);
}
} else {
/* If no key was specified we use auto-increment keys */
generator->largest_used_integer_key++;
ZVAL_LONG(&generator->key, generator->largest_used_integer_key);
}
if (RETURN_VALUE_USED(opline)) {
/* If the return value of yield is used set the send
* target and initialize it to NULL */
generator->send_target = EX_VAR(opline->result.var);
ZVAL_NULL(generator->send_target);
} else {
generator->send_target = NULL;
}
/* We increment to the next op, so we are at the correct position when the
* generator is resumed. */
ZEND_VM_INC_OPCODE();
/* The GOTO VM uses a local opline variable. We need to set the opline
* variable in execute_data so we don't resume at an old position. */
SAVE_OPLINE();
ZEND_VM_RETURN();
} | 0 | [
"CWE-787"
] | php-src | f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d | 198,105,183,157,399,360,000,000,000,000,000,000,000 | 140 | Fix #73122: Integer Overflow when concatenating strings
We must avoid integer overflows in memory allocations, so we introduce
an additional check in the VM, and bail out in the rare case of an
overflow. Since the recent fix for bug #74960 still doesn't catch all
possible overflows, we fix that right away. |
static void maybe_unmark_and_push(struct sock *x)
{
struct unix_sock *u = unix_sk(x);
if (u->gc_tree != GC_ORPHAN)
return;
sock_hold(x);
u->gc_tree = gc_current;
gc_current = x;
} | 1 | [] | linux-2.6 | 1fd05ba5a2f2aa8e7b9b52ef55df850e2e7d54c9 | 75,883,221,676,963,210,000,000,000,000,000,000,000 | 10 | [AF_UNIX]: Rewrite garbage collector, fixes race.
Throw out the old mark & sweep garbage collector and put in a
refcounting cycle detecting one.
The old one had a race with recvmsg, that resulted in false positives
and hence data loss. The old algorithm operated on all unix sockets
in the system, so any additional locking would have meant performance
problems for all users of these.
The new algorithm instead only operates on "in flight" sockets, which
are very rare, and the additional locking for these doesn't negatively
impact the vast majority of users.
In fact it's probable, that there weren't *any* heavy senders of
sockets over sockets, otherwise the above race would have been
discovered long ago.
The patch works OK with the app that exposed the race with the old
code. The garbage collection has also been verified to work in a few
simple cases.
Signed-off-by: Miklos Szeredi <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int coolkey_apdu_io(sc_card_t *card, int cla, int ins, int p1, int p2,
const u8 * sendbuf, size_t sendbuflen, u8 ** recvbuf, size_t * recvbuflen,
const u8 *nonce, size_t nonce_len)
{
int r;
sc_apdu_t apdu;
u8 rbufinitbuf[COOLKEY_MAX_SIZE];
u8 rsendbuf[COOLKEY_MAX_SIZE];
u8 *rbuf;
size_t rbuflen;
int cse = 0;
SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE);
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"%02x %02x %02x %"SC_FORMAT_LEN_SIZE_T"u : %"SC_FORMAT_LEN_SIZE_T"u %"SC_FORMAT_LEN_SIZE_T"u\n",
ins, p1, p2, sendbuflen, card->max_send_size,
card->max_recv_size);
rbuf = rbufinitbuf;
rbuflen = sizeof(rbufinitbuf);
/* if caller provided a buffer and length */
if (recvbuf && *recvbuf && recvbuflen && *recvbuflen) {
rbuf = *recvbuf;
rbuflen = *recvbuflen;
}
if (sendbuf || nonce) {
if (recvbuf) {
cse = SC_APDU_CASE_4_SHORT;
} else {
cse = SC_APDU_CASE_3_SHORT;
}
} else {
if (recvbuf) {
cse = SC_APDU_CASE_2_SHORT;
} else {
cse = SC_APDU_CASE_1;
}
}
/* append the nonce if we have it. Coolkey just blindly puts this at the end
* of the APDU (while adjusting lc). This converts case 1 to case 3. coolkey
* also always drops le in case 4 (which happens when proto = T0). nonces are
* never used on case 2 commands, so we can simply append the nonce to the data
* and we should be fine */
if (nonce) {
u8 *buf = rsendbuf;
if (sendbuf) {
sendbuflen = MIN(sendbuflen,sizeof(rsendbuf)-nonce_len);
memcpy(rsendbuf, sendbuf, sendbuflen);
buf += sendbuflen;
}
memcpy(buf, nonce, nonce_len);
sendbuflen += nonce_len;
sendbuf =rsendbuf;
}
sc_format_apdu(card, &apdu, cse, ins, p1, p2);
apdu.lc = sendbuflen;
apdu.datalen = sendbuflen;
apdu.data = sendbuf;
/* coolkey uses non-standard classes */
apdu.cla = cla;
if (recvbuf) {
apdu.resp = rbuf;
apdu.le = (rbuflen > 255) ? 255 : rbuflen;
apdu.resplen = rbuflen;
} else {
apdu.resp = rbuf;
apdu.le = 0;
apdu.resplen = 0;
}
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"calling sc_transmit_apdu flags=%lx le=%"SC_FORMAT_LEN_SIZE_T"u, resplen=%"SC_FORMAT_LEN_SIZE_T"u, resp=%p",
apdu.flags, apdu.le, apdu.resplen, apdu.resp);
/* with new adpu.c and chaining, this actually reads the whole object */
r = sc_transmit_apdu(card, &apdu);
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"result r=%d apdu.resplen=%"SC_FORMAT_LEN_SIZE_T"u sw1=%02x sw2=%02x",
r, apdu.resplen, apdu.sw1, apdu.sw2);
if (r < 0) {
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,"Transmit failed");
goto err;
}
r = sc_check_sw(card, apdu.sw1, apdu.sw2);
if (r < 0) {
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,"Transmit failed");
goto err;
}
if (recvbuflen) {
if (recvbuf && *recvbuf == NULL) {
*recvbuf = malloc(apdu.resplen);
if (*recvbuf == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
goto err;
}
memcpy(*recvbuf, rbuf, apdu.resplen);
}
*recvbuflen = apdu.resplen;
r = *recvbuflen;
}
err:
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_NORMAL, r);
} | 0 | [
"CWE-125"
] | OpenSC | 8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | 45,796,469,931,864,100,000,000,000,000,000,000,000 | 117 | fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes. |
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
{
struct net_device *dev = skb->dev;
int fhoff, nhoff, ret;
struct frag_hdr *fhdr;
struct frag_queue *fq;
struct ipv6hdr *hdr;
u8 prevhdr;
/* Jumbo payload inhibits frag. header */
if (ipv6_hdr(skb)->payload_len == 0) {
pr_debug("payload len = 0\n");
return -EINVAL;
}
if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
return -EINVAL;
if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
return -ENOMEM;
skb_set_transport_header(skb, fhoff);
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
return -ENOMEM;
}
spin_lock_bh(&fq->q.lock);
if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
ret = -EINVAL;
goto out_unlock;
}
/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
* must be returned.
*/
ret = -EINPROGRESS;
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
fq->q.meat == fq->q.len &&
nf_ct_frag6_reasm(fq, skb, dev))
ret = 0;
out_unlock:
spin_unlock_bh(&fq->q.lock);
inet_frag_put(&fq->q, &nf_frags);
return ret;
} | 1 | [
"CWE-787"
] | linux | 9b57da0630c9fd36ed7a20fc0f98dc82cc0777fa | 263,959,933,329,077,140,000,000,000,000,000,000,000 | 53 | netfilter: ipv6: nf_defrag: drop mangled skb on ream error
Dmitry Vyukov reported GPF in network stack that Andrey traced down to
negative nh offset in nf_ct_frag6_queue().
Problem is that all network headers before fragment header are pulled.
Normal ipv6 reassembly will drop the skb when errors occur further down
the line.
netfilter doesn't do this, and instead passed the original fragment
along. That was also fine back when netfilter ipv6 defrag worked with
cloned fragments, as the original, pristine fragment was passed on.
So we either have to undo the pull op, or discard such fragments.
Since they're malformed after all (e.g. overlapping fragment) it seems
preferrable to just drop them.
Same for temporary errors -- it doesn't make sense to accept (and
perhaps forward!) only some fragments of same datagram.
Fixes: 029f7f3b8701cc7ac ("netfilter: ipv6: nf_defrag: avoid/free clone operations")
Reported-by: Dmitry Vyukov <[email protected]>
Debugged-by: Andrey Konovalov <[email protected]>
Diagnosed-by: Eric Dumazet <Eric Dumazet <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Acked-by: Eric Dumazet <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
int tipc_netlink_start(void)
{
int res;
res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops);
if (res) {
pr_err("Failed to register netlink interface\n");
return res;
}
return 0;
} | 0 | [
"CWE-264"
] | net | 90f62cf30a78721641e08737bda787552428061e | 247,800,908,602,174,000,000,000,000,000,000,000,000 | 11 | net: Use netlink_ns_capable to verify the permisions of netlink messages
It is possible by passing a netlink socket to a more privileged
executable and then to fool that executable into writing to the socket
data that happens to be valid netlink message to do something that
privileged executable did not intend to do.
To keep this from happening replace bare capable and ns_capable calls
with netlink_capable, netlink_net_calls and netlink_ns_capable calls.
Which act the same as the previous calls except they verify that the
opener of the socket had the desired permissions as well.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
return _mlx5_ib_post_send(ibqp, wr, bad_wr, false);
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 0625b4ba1a5d4703c7fb01c497bd6c156908af00 | 3,795,285,938,345,956,000,000,000,000,000,000,000 | 5 | IB/mlx5: Fix leaking stack memory to userspace
mlx5_ib_create_qp_resp was never initialized and only the first 4 bytes
were written.
Fixes: 41d902cb7c32 ("RDMA/mlx5: Fix definition of mlx5_ib_create_qp_resp")
Cc: <[email protected]>
Acked-by: Leon Romanovsky <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]> |
write_xattr_metadata (const char *name, const char *value, FILE *fp)
{
(void)name;
(void)value;
(void)fp;
return 0;
} | 0 | [
"CWE-200"
] | wget | 3cdfb594cf75f11cdbb9702ac5e856c332ccacfa | 257,128,896,943,985,230,000,000,000,000,000,000,000 | 8 | Don't save user/pw with --xattr
Also the Referer info is reduced to scheme+host+port.
* src/ftp.c (getftp): Change params of set_file_metadata()
* src/http.c (gethttp): Change params of set_file_metadata()
* src/xattr.c (set_file_metadata): Remove user/password from origin URL,
reduce Referer value to scheme/host/port.
* src/xattr.h: Change prototype of set_file_metadata() |
size_t mg_mqtt_next_unsub(struct mg_mqtt_message *msg, struct mg_str *topic,
size_t pos) {
return mg_mqtt_next_topic(msg, topic, NULL, pos);
} | 0 | [
"CWE-552"
] | mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 69,114,333,650,507,190,000,000,000,000,000,000,000 | 4 | Protect against the directory traversal in mg_upload() |
static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
{
int res = TMF_RESP_FUNC_FAILED;
struct scsi_lun lun;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
int_to_scsilun(cmd->device->lun, &lun);
SAS_DPRINTK("eh: device %llx LUN %llx has the task\n",
SAS_ADDR(dev->sas_addr),
cmd->device->lun);
if (i->dft->lldd_abort_task_set)
res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
if (res == TMF_RESP_FUNC_FAILED) {
if (i->dft->lldd_clear_task_set)
res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
}
if (res == TMF_RESP_FUNC_FAILED) {
if (i->dft->lldd_lu_reset)
res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
}
return res;
} | 0 | [] | linux | 318aaf34f1179b39fa9c30fa0f3288b645beee39 | 312,185,602,512,795,700,000,000,000,000,000,000,000 | 28 | scsi: libsas: defer ata device eh commands to libata
When ata device doing EH, some commands still attached with tasks are
not passed to libata when abort failed or recover failed, so libata did
not handle these commands. After these commands done, sas task is freed,
but ata qc is not freed. This will cause ata qc leak and trigger a
warning like below:
WARNING: CPU: 0 PID: 28512 at drivers/ata/libata-eh.c:4037
ata_eh_finish+0xb4/0xcc
CPU: 0 PID: 28512 Comm: kworker/u32:2 Tainted: G W OE 4.14.0#1
......
Call trace:
[<ffff0000088b7bd0>] ata_eh_finish+0xb4/0xcc
[<ffff0000088b8420>] ata_do_eh+0xc4/0xd8
[<ffff0000088b8478>] ata_std_error_handler+0x44/0x8c
[<ffff0000088b8068>] ata_scsi_port_error_handler+0x480/0x694
[<ffff000008875fc4>] async_sas_ata_eh+0x4c/0x80
[<ffff0000080f6be8>] async_run_entry_fn+0x4c/0x170
[<ffff0000080ebd70>] process_one_work+0x144/0x390
[<ffff0000080ec100>] worker_thread+0x144/0x418
[<ffff0000080f2c98>] kthread+0x10c/0x138
[<ffff0000080855dc>] ret_from_fork+0x10/0x18
If ata qc leaked too many, ata tag allocation will fail and io blocked
for ever.
As suggested by Dan Williams, defer ata device commands to libata and
merge sas_eh_finish_cmd() with sas_eh_defer_cmd(). libata will handle
ata qcs correctly after this.
Signed-off-by: Jason Yan <[email protected]>
CC: Xiaofei Tan <[email protected]>
CC: John Garry <[email protected]>
CC: Dan Williams <[email protected]>
Reviewed-by: Dan Williams <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
EXPORTED void appendentryatt(struct entryattlist **l, const char *entry,
struct attvaluelist *attvalues)
{
struct entryattlist **tail = l;
while (*tail) tail = &(*tail)->next;
*tail = (struct entryattlist *)xmalloc(sizeof(struct entryattlist));
(*tail)->entry = xstrdup(entry);
(*tail)->attvalues = attvalues;
(*tail)->next = NULL;
} | 0 | [
"CWE-732"
] | cyrus-imapd | 621f9e41465b521399f691c241181300fab55995 | 41,876,000,599,932,875,000,000,000,000,000,000,000 | 12 | annotate: don't allow everyone to write shared server entries |
static void write_int(bytearray_t * bplist, uint64_t val)
{
uint64_t size = get_needed_bytes(val);
uint8_t *buff = NULL;
//do not write 3bytes int node
if (size == 3)
size++;
#ifdef __BIG_ENDIAN__
val = val << ((sizeof(uint64_t) - size) * 8);
#endif
buff = (uint8_t *) malloc(sizeof(uint8_t) + size);
buff[0] = BPLIST_UINT | Log2(size);
memcpy(buff + 1, &val, size);
byte_convert(buff + 1, size);
byte_array_append(bplist, buff, sizeof(uint8_t) + size);
free(buff);
} | 0 | [
"CWE-770"
] | libplist | 26061aac4ec75e7a4469a9aab9a424716223e5c4 | 271,115,801,798,356,360,000,000,000,000,000,000,000 | 19 | bplist: Check for invalid offset_size in bplist trailer |
static int write_metadata_block (WavpackContext *wpc)
{
char *block_buff, *block_ptr;
WavpackHeader *wphdr;
if (wpc->metacount) {
int metacount = wpc->metacount, block_size = sizeof (WavpackHeader);
WavpackMetadata *wpmdp = wpc->metadata;
while (metacount--) {
block_size += wpmdp->byte_length + (wpmdp->byte_length & 1);
block_size += (wpmdp->byte_length > 510) ? 4 : 2;
wpmdp++;
}
// allocate 6 extra bytes for 4-byte checksum (which we add last)
wphdr = (WavpackHeader *) (block_buff = malloc (block_size + 6));
CLEAR (*wphdr);
memcpy (wphdr->ckID, "wvpk", 4);
SET_TOTAL_SAMPLES (*wphdr, wpc->total_samples);
wphdr->version = wpc->stream_version;
wphdr->ckSize = block_size - 8;
wphdr->block_samples = 0;
block_ptr = (char *)(wphdr + 1);
wpmdp = wpc->metadata;
while (wpc->metacount) {
block_ptr = write_metadata (wpmdp, block_ptr);
wpc->metabytes -= wpmdp->byte_length;
free_metadata (wpmdp++);
wpc->metacount--;
}
free (wpc->metadata);
wpc->metadata = NULL;
// add a 4-byte checksum here (increases block size by 6)
block_add_checksum ((unsigned char *) block_buff, (unsigned char *) block_buff + (block_size += 6), 4);
WavpackNativeToLittleEndian ((WavpackHeader *) block_buff, WavpackHeaderFormat);
if (!wpc->blockout (wpc->wv_out, block_buff, block_size)) {
free (block_buff);
strcpy (wpc->error_message, "can't write WavPack data, disk probably full!");
return FALSE;
}
free (block_buff);
}
return TRUE;
} | 0 | [
"CWE-703",
"CWE-835"
] | WavPack | 070ef6f138956d9ea9612e69586152339dbefe51 | 111,060,944,460,070,750,000,000,000,000,000,000,000 | 53 | issue #53: error out on zero sample rate |
static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
int ifindex, struct netlink_ext_ack *extack)
{
if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
return 0;
/* Do we search for filter, attached to class? */
if (TC_H_MIN(parent)) {
const struct Qdisc_class_ops *cops = q->ops->cl_ops;
*cl = cops->find(q, parent);
if (*cl == 0) {
NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
return -ENOENT;
}
}
return 0;
} | 0 | [
"CWE-416"
] | linux | 04c2a47ffb13c29778e2a14e414ad4cb5a5db4b5 | 133,229,892,526,502,400,000,000,000,000,000,000,000 | 19 | net: sched: fix use-after-free in tc_new_tfilter()
Whenever tc_new_tfilter() jumps back to replay: label,
we need to make sure @q and @chain local variables are cleared again,
or risk use-after-free as in [1]
For consistency, apply the same fix in tc_ctl_chain()
BUG: KASAN: use-after-free in mini_qdisc_pair_swap+0x1b9/0x1f0 net/sched/sch_generic.c:1581
Write of size 8 at addr ffff8880985c4b08 by task syz-executor.4/1945
CPU: 0 PID: 1945 Comm: syz-executor.4 Not tainted 5.17.0-rc1-syzkaller-00495-gff58831fa02d #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
<TASK>
__dump_stack lib/dump_stack.c:88 [inline]
dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
print_address_description.constprop.0.cold+0x8d/0x336 mm/kasan/report.c:255
__kasan_report mm/kasan/report.c:442 [inline]
kasan_report.cold+0x83/0xdf mm/kasan/report.c:459
mini_qdisc_pair_swap+0x1b9/0x1f0 net/sched/sch_generic.c:1581
tcf_chain_head_change_item net/sched/cls_api.c:372 [inline]
tcf_chain0_head_change.isra.0+0xb9/0x120 net/sched/cls_api.c:386
tcf_chain_tp_insert net/sched/cls_api.c:1657 [inline]
tcf_chain_tp_insert_unique net/sched/cls_api.c:1707 [inline]
tc_new_tfilter+0x1e67/0x2350 net/sched/cls_api.c:2086
rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:5583
netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494
netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline]
netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343
netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:705 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:725
____sys_sendmsg+0x331/0x810 net/socket.c:2413
___sys_sendmsg+0xf3/0x170 net/socket.c:2467
__sys_sendmmsg+0x195/0x470 net/socket.c:2553
__do_sys_sendmmsg net/socket.c:2582 [inline]
__se_sys_sendmmsg net/socket.c:2579 [inline]
__x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f2647172059
Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007f2645aa5168 EFLAGS: 00000246 ORIG_RAX: 0000000000000133
RAX: ffffffffffffffda RBX: 00007f2647285100 RCX: 00007f2647172059
RDX: 040000000000009f RSI: 00000000200002c0 RDI: 0000000000000006
RBP: 00007f26471cc08d R08: 0000000000000000 R09: 0000000000000000
R10: 9e00000000000000 R11: 0000000000000246 R12: 0000000000000000
R13: 00007fffb3f7f02f R14: 00007f2645aa5300 R15: 0000000000022000
</TASK>
Allocated by task 1944:
kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
kasan_set_track mm/kasan/common.c:45 [inline]
set_alloc_info mm/kasan/common.c:436 [inline]
____kasan_kmalloc mm/kasan/common.c:515 [inline]
____kasan_kmalloc mm/kasan/common.c:474 [inline]
__kasan_kmalloc+0xa9/0xd0 mm/kasan/common.c:524
kmalloc_node include/linux/slab.h:604 [inline]
kzalloc_node include/linux/slab.h:726 [inline]
qdisc_alloc+0xac/0xa10 net/sched/sch_generic.c:941
qdisc_create.constprop.0+0xce/0x10f0 net/sched/sch_api.c:1211
tc_modify_qdisc+0x4c5/0x1980 net/sched/sch_api.c:1660
rtnetlink_rcv_msg+0x413/0xb80 net/core/rtnetlink.c:5592
netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494
netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline]
netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343
netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:705 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:725
____sys_sendmsg+0x331/0x810 net/socket.c:2413
___sys_sendmsg+0xf3/0x170 net/socket.c:2467
__sys_sendmmsg+0x195/0x470 net/socket.c:2553
__do_sys_sendmmsg net/socket.c:2582 [inline]
__se_sys_sendmmsg net/socket.c:2579 [inline]
__x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0xae
Freed by task 3609:
kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
kasan_set_track+0x21/0x30 mm/kasan/common.c:45
kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:370
____kasan_slab_free mm/kasan/common.c:366 [inline]
____kasan_slab_free+0x130/0x160 mm/kasan/common.c:328
kasan_slab_free include/linux/kasan.h:236 [inline]
slab_free_hook mm/slub.c:1728 [inline]
slab_free_freelist_hook+0x8b/0x1c0 mm/slub.c:1754
slab_free mm/slub.c:3509 [inline]
kfree+0xcb/0x280 mm/slub.c:4562
rcu_do_batch kernel/rcu/tree.c:2527 [inline]
rcu_core+0x7b8/0x1540 kernel/rcu/tree.c:2778
__do_softirq+0x29b/0x9c2 kernel/softirq.c:558
Last potentially related work creation:
kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
__kasan_record_aux_stack+0xbe/0xd0 mm/kasan/generic.c:348
__call_rcu kernel/rcu/tree.c:3026 [inline]
call_rcu+0xb1/0x740 kernel/rcu/tree.c:3106
qdisc_put_unlocked+0x6f/0x90 net/sched/sch_generic.c:1109
tcf_block_release+0x86/0x90 net/sched/cls_api.c:1238
tc_new_tfilter+0xc0d/0x2350 net/sched/cls_api.c:2148
rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:5583
netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494
netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline]
netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343
netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919
sock_sendmsg_nosec net/socket.c:705 [inline]
sock_sendmsg+0xcf/0x120 net/socket.c:725
____sys_sendmsg+0x331/0x810 net/socket.c:2413
___sys_sendmsg+0xf3/0x170 net/socket.c:2467
__sys_sendmmsg+0x195/0x470 net/socket.c:2553
__do_sys_sendmmsg net/socket.c:2582 [inline]
__se_sys_sendmmsg net/socket.c:2579 [inline]
__x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0xae
The buggy address belongs to the object at ffff8880985c4800
which belongs to the cache kmalloc-1k of size 1024
The buggy address is located 776 bytes inside of
1024-byte region [ffff8880985c4800, ffff8880985c4c00)
The buggy address belongs to the page:
page:ffffea0002617000 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x985c0
head:ffffea0002617000 order:3 compound_mapcount:0 compound_pincount:0
flags: 0xfff00000010200(slab|head|node=0|zone=1|lastcpupid=0x7ff)
raw: 00fff00000010200 0000000000000000 dead000000000122 ffff888010c41dc0
raw: 0000000000000000 0000000000100010 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
page_owner tracks the page as allocated
page last allocated via order 3, migratetype Unmovable, gfp_mask 0x1d20c0(__GFP_IO|__GFP_FS|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_NOMEMALLOC|__GFP_HARDWALL), pid 1941, ts 1038999441284, free_ts 1033444432829
prep_new_page mm/page_alloc.c:2434 [inline]
get_page_from_freelist+0xa72/0x2f50 mm/page_alloc.c:4165
__alloc_pages+0x1b2/0x500 mm/page_alloc.c:5389
alloc_pages+0x1aa/0x310 mm/mempolicy.c:2271
alloc_slab_page mm/slub.c:1799 [inline]
allocate_slab mm/slub.c:1944 [inline]
new_slab+0x28a/0x3b0 mm/slub.c:2004
___slab_alloc+0x87c/0xe90 mm/slub.c:3018
__slab_alloc.constprop.0+0x4d/0xa0 mm/slub.c:3105
slab_alloc_node mm/slub.c:3196 [inline]
slab_alloc mm/slub.c:3238 [inline]
__kmalloc+0x2fb/0x340 mm/slub.c:4420
kmalloc include/linux/slab.h:586 [inline]
kzalloc include/linux/slab.h:715 [inline]
__register_sysctl_table+0x112/0x1090 fs/proc/proc_sysctl.c:1335
neigh_sysctl_register+0x2c8/0x5e0 net/core/neighbour.c:3787
devinet_sysctl_register+0xb1/0x230 net/ipv4/devinet.c:2618
inetdev_init+0x286/0x580 net/ipv4/devinet.c:278
inetdev_event+0xa8a/0x15d0 net/ipv4/devinet.c:1532
notifier_call_chain+0xb5/0x200 kernel/notifier.c:84
call_netdevice_notifiers_info+0xb5/0x130 net/core/dev.c:1919
call_netdevice_notifiers_extack net/core/dev.c:1931 [inline]
call_netdevice_notifiers net/core/dev.c:1945 [inline]
register_netdevice+0x1073/0x1500 net/core/dev.c:9698
veth_newlink+0x59c/0xa90 drivers/net/veth.c:1722
page last free stack trace:
reset_page_owner include/linux/page_owner.h:24 [inline]
free_pages_prepare mm/page_alloc.c:1352 [inline]
free_pcp_prepare+0x374/0x870 mm/page_alloc.c:1404
free_unref_page_prepare mm/page_alloc.c:3325 [inline]
free_unref_page+0x19/0x690 mm/page_alloc.c:3404
release_pages+0x748/0x1220 mm/swap.c:956
tlb_batch_pages_flush mm/mmu_gather.c:50 [inline]
tlb_flush_mmu_free mm/mmu_gather.c:243 [inline]
tlb_flush_mmu+0xe9/0x6b0 mm/mmu_gather.c:250
zap_pte_range mm/memory.c:1441 [inline]
zap_pmd_range mm/memory.c:1490 [inline]
zap_pud_range mm/memory.c:1519 [inline]
zap_p4d_range mm/memory.c:1540 [inline]
unmap_page_range+0x1d1d/0x2a30 mm/memory.c:1561
unmap_single_vma+0x198/0x310 mm/memory.c:1606
unmap_vmas+0x16b/0x2f0 mm/memory.c:1638
exit_mmap+0x201/0x670 mm/mmap.c:3178
__mmput+0x122/0x4b0 kernel/fork.c:1114
mmput+0x56/0x60 kernel/fork.c:1135
exit_mm kernel/exit.c:507 [inline]
do_exit+0xa3c/0x2a30 kernel/exit.c:793
do_group_exit+0xd2/0x2f0 kernel/exit.c:935
__do_sys_exit_group kernel/exit.c:946 [inline]
__se_sys_exit_group kernel/exit.c:944 [inline]
__x64_sys_exit_group+0x3a/0x50 kernel/exit.c:944
do_syscall_x64 arch/x86/entry/common.c:50 [inline]
do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
entry_SYSCALL_64_after_hwframe+0x44/0xae
Memory state around the buggy address:
ffff8880985c4a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880985c4a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff8880985c4b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8880985c4b80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880985c4c00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
Fixes: 470502de5bdb ("net: sched: unlock rules update API")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Vlad Buslov <[email protected]>
Cc: Jiri Pirko <[email protected]>
Cc: Cong Wang <[email protected]>
Reported-by: syzbot <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
hybiReturnData(char *dst, int len, ws_ctx_t *wsctx, int *nWritten)
{
int nextState = WS_HYBI_STATE_ERR;
/* if we have something already decoded copy and return */
if (wsctx->readlen > 0) {
/* simply return what we have */
if (wsctx->readlen > len) {
ws_dbg("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\n", len, wsctx->readPos, wsctx->readlen);
memcpy(dst, wsctx->readPos, len);
*nWritten = len;
wsctx->readlen -= len;
wsctx->readPos += len;
nextState = WS_HYBI_STATE_DATA_AVAILABLE;
} else {
ws_dbg("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\n", wsctx->readlen, wsctx->readPos, wsctx->readlen);
memcpy(dst, wsctx->readPos, wsctx->readlen);
*nWritten = wsctx->readlen;
wsctx->readlen = 0;
wsctx->readPos = NULL;
if (hybiRemaining(wsctx) == 0) {
nextState = WS_HYBI_STATE_FRAME_COMPLETE;
} else {
nextState = WS_HYBI_STATE_DATA_NEEDED;
}
}
ws_dbg("after copy: readPos=%p, readLen=%d\n", wsctx->readPos, wsctx->readlen);
} else {
/* it may happen that we read some bytes but could not decode them,
* in that case, set errno to EAGAIN and return -1 */
nextState = wsctx->hybiDecodeState;
errno = EAGAIN;
*nWritten = -1;
}
return nextState;
} | 0 | [
"CWE-787"
] | libvncserver | 0cf1400c61850065de590d403f6d49e32882fd76 | 64,829,752,053,214,105,000,000,000,000,000,000,000 | 36 | fix crash because of unaligned accesses in hybiReadAndDecode() |
static void qtm_update_model(struct qtm_model *model) {
struct qtm_modelsym tmp;
int i, j;
if (--model->shiftsleft) {
for (i = model->entries - 1; i >= 0; i--) {
/* -1, not -2; the 0 entry saves this */
model->syms[i].cumfreq >>= 1;
if (model->syms[i].cumfreq <= model->syms[i+1].cumfreq) {
model->syms[i].cumfreq = model->syms[i+1].cumfreq + 1;
}
}
}
else {
model->shiftsleft = 50;
for (i = 0; i < model->entries; i++) {
/* no -1, want to include the 0 entry */
/* this converts cumfreqs into frequencies, then shifts right */
model->syms[i].cumfreq -= model->syms[i+1].cumfreq;
model->syms[i].cumfreq++; /* avoid losing things entirely */
model->syms[i].cumfreq >>= 1;
}
/* now sort by frequencies, decreasing order -- this must be an
* inplace selection sort, or a sort with the same (in)stability
* characteristics */
for (i = 0; i < model->entries - 1; i++) {
for (j = i + 1; j < model->entries; j++) {
if (model->syms[i].cumfreq < model->syms[j].cumfreq) {
tmp = model->syms[i];
model->syms[i] = model->syms[j];
model->syms[j] = tmp;
}
}
}
/* then convert frequencies back to cumfreq */
for (i = model->entries - 1; i >= 0; i--) {
model->syms[i].cumfreq += model->syms[i+1].cumfreq;
}
}
} | 0 | [] | clamav-devel | 158c35e81a25ea5fda55a2a7f62ea9fec2e883d9 | 327,001,534,910,619,350,000,000,000,000,000,000,000 | 42 | libclamav/mspack.c: improve unpacking of malformed cabinets (bb#1826) |
UsbHubInit (
IN USB_INTERFACE *HubIf
)
{
EFI_USB_HUB_DESCRIPTOR HubDesc;
USB_ENDPOINT_DESC *EpDesc;
USB_INTERFACE_SETTING *Setting;
EFI_USB_IO_PROTOCOL *UsbIo;
USB_DEVICE *HubDev;
EFI_STATUS Status;
UINT8 Index;
UINT8 NumEndpoints;
UINT16 Depth;
//
// Locate the interrupt endpoint for port change map
//
HubIf->IsHub = FALSE;
Setting = HubIf->IfSetting;
HubDev = HubIf->Device;
EpDesc = NULL;
NumEndpoints = Setting->Desc.NumEndpoints;
for (Index = 0; Index < NumEndpoints; Index++) {
ASSERT ((Setting->Endpoints != NULL) && (Setting->Endpoints[Index] != NULL));
EpDesc = Setting->Endpoints[Index];
if (USB_BIT_IS_SET (EpDesc->Desc.EndpointAddress, USB_ENDPOINT_DIR_IN) &&
(USB_ENDPOINT_TYPE (&EpDesc->Desc) == USB_ENDPOINT_INTERRUPT)) {
break;
}
}
if (Index == NumEndpoints) {
DEBUG (( EFI_D_ERROR, "UsbHubInit: no interrupt endpoint found for hub %d\n", HubDev->Address));
return EFI_DEVICE_ERROR;
}
Status = UsbHubReadDesc (HubDev, &HubDesc);
if (EFI_ERROR (Status)) {
DEBUG (( EFI_D_ERROR, "UsbHubInit: failed to read HUB descriptor %r\n", Status));
return Status;
}
HubIf->NumOfPort = HubDesc.NumPorts;
DEBUG (( EFI_D_INFO, "UsbHubInit: hub %d has %d ports\n", HubDev->Address,HubIf->NumOfPort));
//
// OK, set IsHub to TRUE. Now usb bus can handle this device
// as a working HUB. If failed eariler, bus driver will not
// recognize it as a hub. Other parts of the bus should be able
// to work.
//
HubIf->IsHub = TRUE;
HubIf->HubApi = &mUsbHubApi;
HubIf->HubEp = EpDesc;
if (HubIf->Device->Speed == EFI_USB_SPEED_SUPER) {
Depth = (UINT16)(HubIf->Device->Tier - 1);
DEBUG ((EFI_D_INFO, "UsbHubInit: Set Hub Depth as 0x%x\n", Depth));
UsbHubCtrlSetHubDepth (HubIf->Device, Depth);
for (Index = 0; Index < HubDesc.NumPorts; Index++) {
UsbHubCtrlSetPortFeature (HubIf->Device, Index, USB_HUB_PORT_REMOTE_WAKE_MASK);
}
} else {
//
// Feed power to all the hub ports. It should be ok
// for both gang/individual powered hubs.
//
for (Index = 0; Index < HubDesc.NumPorts; Index++) {
UsbHubCtrlSetPortFeature (HubIf->Device, Index, (EFI_USB_PORT_FEATURE) USB_HUB_PORT_POWER);
}
//
// Update for the usb hub has no power on delay requirement
//
if (HubDesc.PwrOn2PwrGood > 0) {
gBS->Stall (HubDesc.PwrOn2PwrGood * USB_SET_PORT_POWER_STALL);
}
UsbHubAckHubStatus (HubIf->Device);
}
//
// Create an event to enumerate the hub's port. On
//
Status = gBS->CreateEvent (
EVT_NOTIFY_SIGNAL,
TPL_CALLBACK,
UsbHubEnumeration,
HubIf,
&HubIf->HubNotify
);
if (EFI_ERROR (Status)) {
DEBUG (( EFI_D_ERROR, "UsbHubInit: failed to create signal for hub %d - %r\n",
HubDev->Address, Status));
return Status;
}
//
// Create AsyncInterrupt to query hub port change endpoint
// periodically. If the hub ports are changed, hub will return
// changed port map from the interrupt endpoint. The port map
// must be able to hold (HubIf->NumOfPort + 1) bits (one bit for
// host change status).
//
UsbIo = &HubIf->UsbIo;
Status = UsbIo->UsbAsyncInterruptTransfer (
UsbIo,
EpDesc->Desc.EndpointAddress,
TRUE,
USB_HUB_POLL_INTERVAL,
HubIf->NumOfPort / 8 + 1,
UsbOnHubInterrupt,
HubIf
);
if (EFI_ERROR (Status)) {
DEBUG (( EFI_D_ERROR, "UsbHubInit: failed to queue interrupt transfer for hub %d - %r\n",
HubDev->Address, Status));
gBS->CloseEvent (HubIf->HubNotify);
HubIf->HubNotify = NULL;
return Status;
}
DEBUG (( EFI_D_INFO, "UsbHubInit: hub %d initialized\n", HubDev->Address));
return Status;
}
| 1 | [
"CWE-787"
] | edk2 | acebdf14c985c5c9f50b37ece0b15ada87767359 | 50,492,416,165,356,680,000,000,000,000,000,000,000 | 135 | MdeModulePkg UsbBusDxe: Fix wrong buffer length used to read hub desc
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=973
HUB descriptor has variable length.
But the code uses stack (HubDesc in UsbHubInit) with fixed length
sizeof(EFI_USB_HUB_DESCRIPTOR) to hold HUB descriptor data.
It uses hard code length value (32 that is greater than
sizeof(EFI_USB_HUB_DESCRIPTOR)) for SuperSpeed path, then there will
be stack overflow when IOMMU is enabled because the Unmap operation
will copy the data from device buffer to host buffer.
And it uses HubDesc->Length for none SuperSpeed path, then there will
be stack overflow when HubDesc->Length is greater than
sizeof(EFI_USB_HUB_DESCRIPTOR).
The patch updates the code to use a big enough buffer to hold the
descriptor data.
The definition EFI_USB_SUPER_SPEED_HUB_DESCRIPTOR is wrong (HubDelay
field should be UINT16 type) and no code is using it, the patch
removes it.
Cc: Jiewen Yao <[email protected]>
Cc: Ruiyu Ni <[email protected]>
Cc: Bret Barkelew <[email protected]>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Star Zeng <[email protected]>
Reviewed-by: Bret Barkelew <[email protected]> |
_gnutls_x509_verify_algorithm (gnutls_mac_algorithm_t * hash,
const gnutls_datum_t * signature,
const gnutls_x509_crt_t issuer)
{
bigint_t issuer_params[MAX_PUBLIC_PARAMS_SIZE];
opaque digest[MAX_HASH_SIZE];
gnutls_datum_t decrypted;
int issuer_params_size;
int digest_size;
int ret, i;
switch (gnutls_x509_crt_get_pk_algorithm (issuer, NULL))
{
case GNUTLS_PK_DSA:
if (hash)
*hash = GNUTLS_MAC_SHA1;
return 0;
case GNUTLS_PK_RSA:
issuer_params_size = MAX_PUBLIC_PARAMS_SIZE;
ret =
_gnutls_x509_crt_get_mpis (issuer, issuer_params,
&issuer_params_size);
if (ret < 0)
{
gnutls_assert ();
return ret;
}
ret =
_gnutls_pkcs1_rsa_decrypt (&decrypted, signature,
issuer_params, issuer_params_size, 1);
/* release allocated mpis */
for (i = 0; i < issuer_params_size; i++)
{
_gnutls_mpi_release (&issuer_params[i]);
}
if (ret < 0)
{
gnutls_assert ();
return ret;
}
digest_size = sizeof (digest);
if ((ret =
decode_ber_digest_info (&decrypted, hash, digest,
&digest_size)) != 0)
{
gnutls_assert ();
_gnutls_free_datum (&decrypted);
return ret;
}
_gnutls_free_datum (&decrypted);
if (digest_size != _gnutls_hash_get_algo_len (*hash))
{
gnutls_assert ();
return GNUTLS_E_ASN1_GENERIC_ERROR;
}
return 0;
default:
gnutls_assert ();
return GNUTLS_E_INTERNAL_ERROR;
}
} | 0 | [
"CWE-17"
] | gnutls | 897cbce62c0263a498088ac3e465aa5f05f8719c | 138,438,741,925,122,920,000,000,000,000,000,000,000 | 69 | Extended time verification to trusted certificate list as well. Introduced
the flag GNUTLS_VERIFY_DISABLE_TRUSTED_TIME_CHECKS that will prevent the
trusted certificate list verification. |
static void sig_chat_protocol_deinit(CHAT_PROTOCOL_REC *proto)
{
disconnect_servers(servers, proto->id);
disconnect_servers(lookup_servers, proto->id);
} | 0 | [
"CWE-20"
] | irssi-proxy | 85bbc05b21678e80423815d2ef1dfe26208491ab | 311,364,834,915,762,500,000,000,000,000,000,000,000 | 5 | Check if an SSL certificate matches the hostname of the server we are connecting to
git-svn-id: http://svn.irssi.org/repos/irssi/trunk@5104 dbcabf3a-b0e7-0310-adc4-f8d773084564 |
DLLIMPORT cfg_print_filter_func_t cfg_set_print_filter_func(cfg_t *cfg, cfg_print_filter_func_t pff)
{
cfg_print_filter_func_t old;
if (!cfg) {
errno = EINVAL;
return NULL;
}
old = cfg->pff;
cfg->pff = pff;
return old;
} | 0 | [] | libconfuse | d73777c2c3566fb2647727bb56d9a2295b81669b | 228,291,492,818,843,200,000,000,000,000,000,000,000 | 14 | Fix #163: unterminated username used with getpwnam()
Signed-off-by: Joachim Wiberg <[email protected]> |
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
struct kvm_vcpu *vcpu = filp->private_data;
debugfs_remove_recursive(vcpu->debugfs_dentry);
kvm_put_kvm(vcpu->kvm);
return 0;
} | 0 | [
"CWE-416"
] | linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 46,885,687,326,495,000,000,000,000,000,000,000,000 | 8 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list,
uint first_unused)
{
List_iterator<TABLE_LIST> li(*join_list);
TABLE_LIST *table;
DBUG_ENTER("build_bitmap_for_nested_joins");
while ((table= li++))
{
NESTED_JOIN *nested_join;
if ((nested_join= table->nested_join))
{
/*
It is guaranteed by simplify_joins() function that a nested join
that has only one child represents a single table VIEW (and the child
is an underlying table). We don't assign bits to such nested join
structures because
1. it is redundant (a "sequence" of one table cannot be interleaved
with anything)
2. we could run out bits in nested_join_map otherwise.
*/
if (nested_join->n_tables != 1)
{
/* Don't assign bits to sj-nests */
if (table->on_expr)
nested_join->nj_map= (nested_join_map) 1 << first_unused++;
first_unused= build_bitmap_for_nested_joins(&nested_join->join_list,
first_unused);
}
}
}
DBUG_RETURN(first_unused);
} | 0 | [] | server | ff77a09bda884fe6bf3917eb29b9d3a2f53f919b | 311,138,576,246,249,200,000,000,000,000,000,000,000 | 32 | MDEV-22464 Server crash on UPDATE with nested subquery
Uninitialized ref_pointer_array[] because setup_fields() got empty
fields list. mysql_multi_update() for some reason does that by
substituting the fields list with empty total_list for the
mysql_select() call (looks like wrong merge since total_list is not
used anywhere else and is always empty). The fix would be to return
back the original fields list. But this fails update_use_source.test
case:
--error ER_BAD_FIELD_ERROR
update v1 set t1c1=2 order by 1;
Actually not failing the above seems to be ok.
The other fix would be to keep resolve_in_select_list false (and that
keeps outer context from being resolved in
Item_ref::fix_fields()). This fix is more consistent with how SELECT
behaves:
--error ER_SUBQUERY_NO_1_ROW
select a from t1 where a= (select 2 from t1 having (a = 3));
So this patch implements this fix. |
static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask)
{
int order = huge_page_order(h);
gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
} | 0 | [
"CWE-703"
] | linux | 5af10dfd0afc559bb4b0f7e3e8227a1578333995 | 55,120,697,790,308,225,000,000,000,000,000,000,000 | 10 | userfaultfd: hugetlbfs: remove superfluous page unlock in VM_SHARED case
huge_add_to_page_cache->add_to_page_cache implicitly unlocks the page
before returning in case of errors.
The error returned was -EEXIST by running UFFDIO_COPY on a non-hole
offset of a VM_SHARED hugetlbfs mapping. It was an userland bug that
triggered it and the kernel must cope with it returning -EEXIST from
ioctl(UFFDIO_COPY) as expected.
page dumped because: VM_BUG_ON_PAGE(!PageLocked(page))
kernel BUG at mm/filemap.c:964!
invalid opcode: 0000 [#1] SMP
CPU: 1 PID: 22582 Comm: qemu-system-x86 Not tainted 4.11.11-300.fc26.x86_64 #1
RIP: unlock_page+0x4a/0x50
Call Trace:
hugetlb_mcopy_atomic_pte+0xc0/0x320
mcopy_atomic+0x96f/0xbe0
userfaultfd_ioctl+0x218/0xe90
do_vfs_ioctl+0xa5/0x600
SyS_ioctl+0x79/0x90
entry_SYSCALL_64_fastpath+0x1a/0xa9
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Andrea Arcangeli <[email protected]>
Tested-by: Maxime Coquelin <[email protected]>
Reviewed-by: Mike Kravetz <[email protected]>
Cc: "Dr. David Alan Gilbert" <[email protected]>
Cc: Mike Rapoport <[email protected]>
Cc: Alexey Perevalov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void addrconf_sit_config(struct net_device *dev)
{
struct inet6_dev *idev;
ASSERT_RTNL();
/*
* Configure the tunnel with one of our IPv4
* addresses... we should configure all of
* our v4 addrs in the tunnel
*/
if ((idev = ipv6_find_idev(dev)) == NULL) {
printk(KERN_DEBUG "init sit: add_dev failed\n");
return;
}
sit_add_v4_addrs(idev);
if (dev->flags&IFF_POINTOPOINT) {
addrconf_add_mroute(dev);
addrconf_add_lroute(dev);
} else
sit_route_add(dev);
} | 0 | [
"CWE-200"
] | linux-2.6 | 8a47077a0b5aa2649751c46e7a27884e6686ccbf | 19,543,124,320,650,119,000,000,000,000,000,000,000 | 25 | [NETLINK]: Missing padding fields in dumped structures
Plug holes with padding fields and initialized them to zero.
Signed-off-by: Patrick McHardy <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
unsigned int len = skb->len;
int ret;
const struct macvlan_dev *vlan = netdev_priv(dev);
ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct macvlan_pcpu_stats *pcpu_stats;
pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
pcpu_stats->tx_packets++;
pcpu_stats->tx_bytes += len;
u64_stats_update_end(&pcpu_stats->syncp);
} else {
this_cpu_inc(vlan->pcpu_stats->tx_dropped);
}
return ret;
} | 0 | [
"CWE-703",
"CWE-264"
] | linux | 550fd08c2cebad61c548def135f67aba284c6162 | 314,075,554,165,256,430,000,000,000,000,000,000,000 | 21 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
scanner_detect_eval_call (parser_context_t *context_p, /**< context */
scanner_context_t *scanner_context_p) /**< scanner context */
{
if (context_p->token.keyword_type == LEXER_KEYW_EVAL
&& lexer_check_next_character (context_p, LIT_CHAR_LEFT_PAREN))
{
#if JERRY_ESNEXT
const uint16_t flags = (uint16_t) (SCANNER_LITERAL_POOL_CAN_EVAL | SCANNER_LITERAL_POOL_HAS_SUPER_REFERENCE);
#else /* !JERRY_ESNEXT */
const uint16_t flags = SCANNER_LITERAL_POOL_CAN_EVAL;
#endif /* JERRY_ESNEXT */
scanner_context_p->active_literal_pool_p->status_flags |= flags;
}
} /* scanner_detect_eval_call */ | 0 | [
"CWE-416"
] | jerryscript | 3bcd48f72d4af01d1304b754ef19fe1a02c96049 | 251,988,352,444,990,720,000,000,000,000,000,000,000 | 15 | Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected] |
FstringParser_ConcatFstring(FstringParser *state, const char **str,
const char *end, int raw, int recurse_lvl,
struct compiling *c, const node *n)
{
FstringParser_check_invariants(state);
state->fmode = 1;
/* Parse the f-string. */
while (1) {
PyObject *literal = NULL;
expr_ty expression = NULL;
/* If there's a zero length literal in front of the
expression, literal will be NULL. If we're at the end of
the f-string, expression will be NULL (unless result == 1,
see below). */
int result = fstring_find_literal_and_expr(str, end, raw, recurse_lvl,
&literal, &expression,
c, n);
if (result < 0)
return -1;
/* Add the literal, if any. */
if (!literal) {
/* Do nothing. Just leave last_str alone (and possibly
NULL). */
} else if (!state->last_str) {
/* Note that the literal can be zero length, if the
input string is "\\\n" or "\\\r", among others. */
state->last_str = literal;
literal = NULL;
} else {
/* We have a literal, concatenate it. */
assert(PyUnicode_GET_LENGTH(literal) != 0);
if (FstringParser_ConcatAndDel(state, literal) < 0)
return -1;
literal = NULL;
}
/* We've dealt with the literal now. It can't be leaked on further
errors. */
assert(literal == NULL);
/* See if we should just loop around to get the next literal
and expression, while ignoring the expression this
time. This is used for un-doubling braces, as an
optimization. */
if (result == 1)
continue;
if (!expression)
/* We're done with this f-string. */
break;
/* We know we have an expression. Convert any existing string
to a Constant node. */
if (!state->last_str) {
/* Do nothing. No previous literal. */
} else {
/* Convert the existing last_str literal to a Constant node. */
expr_ty str = make_str_node_and_del(&state->last_str, c, n);
if (!str || ExprList_Append(&state->expr_list, str) < 0)
return -1;
}
if (ExprList_Append(&state->expr_list, expression) < 0)
return -1;
}
/* If recurse_lvl is zero, then we must be at the end of the
string. Otherwise, we must be at a right brace. */
if (recurse_lvl == 0 && *str < end-1) {
ast_error(c, n, "f-string: unexpected end of string");
return -1;
}
if (recurse_lvl != 0 && **str != '}') {
ast_error(c, n, "f-string: expecting '}'");
return -1;
}
FstringParser_check_invariants(state);
return 0;
} | 0 | [
"CWE-125"
] | cpython | a4d78362397fc3bced6ea80fbc7b5f4827aec55e | 119,946,009,551,837,080,000,000,000,000,000,000,000 | 84 | bpo-36495: Fix two out-of-bounds array reads (GH-12641)
Research and fix by @bradlarsen. |
PizCompressor::uncompress (const char *inPtr,
int inSize,
int minY,
const char *&outPtr)
{
return uncompress (inPtr,
inSize,
Box2i (V2i (_minX, minY),
V2i (_maxX, minY + numScanLines() - 1)),
outPtr);
} | 0 | [
"CWE-125"
] | openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 123,434,969,885,527,780,000,000,000,000,000,000,000 | 11 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
static int ipv4_from_asc(unsigned char *v4, const char *in)
{
int a0, a1, a2, a3;
if (sscanf(in, "%d.%d.%d.%d", &a0, &a1, &a2, &a3) != 4)
return 0;
if ((a0 < 0) || (a0 > 255) || (a1 < 0) || (a1 > 255)
|| (a2 < 0) || (a2 > 255) || (a3 < 0) || (a3 > 255))
return 0;
v4[0] = a0;
v4[1] = a1;
v4[2] = a2;
v4[3] = a3;
return 1;
} | 0 | [
"CWE-125"
] | openssl | bb4d2ed4091408404e18b3326e3df67848ef63d0 | 277,170,269,917,990,170,000,000,000,000,000,000,000 | 14 | Fix append_ia5 function to not assume NUL terminated strings
ASN.1 strings may not be NUL terminated. Don't assume they are.
CVE-2021-3712
Reviewed-by: Viktor Dukhovni <[email protected]>
Reviewed-by: Paul Dale <[email protected]> |
xmlMallocLoc(size_t size, const char * file, int line)
{
MEMHDR *p;
void *ret;
if (!xmlMemInitialized) xmlInitMemory();
#ifdef DEBUG_MEMORY
xmlGenericError(xmlGenericErrorContext,
"Malloc(%d)\n",size);
#endif
TEST_POINT
if (size > (MAX_SIZE_T - RESERVE_SIZE)) {
xmlGenericError(xmlGenericErrorContext,
"xmlMallocLoc : Unsigned overflow\n");
xmlMemoryDump();
return(NULL);
}
p = (MEMHDR *) malloc(RESERVE_SIZE+size);
if (!p) {
xmlGenericError(xmlGenericErrorContext,
"xmlMallocLoc : Out of free space\n");
xmlMemoryDump();
return(NULL);
}
p->mh_tag = MEMTAG;
p->mh_size = size;
p->mh_type = MALLOC_TYPE;
p->mh_file = file;
p->mh_line = line;
xmlMutexLock(xmlMemMutex);
p->mh_number = ++block;
debugMemSize += size;
debugMemBlocks++;
if (debugMemSize > debugMaxMemSize) debugMaxMemSize = debugMemSize;
#ifdef MEM_LIST
debugmem_list_add(p);
#endif
xmlMutexUnlock(xmlMemMutex);
#ifdef DEBUG_MEMORY
xmlGenericError(xmlGenericErrorContext,
"Malloc(%d) Ok\n",size);
#endif
if (xmlMemStopAtBlock == p->mh_number) xmlMallocBreakpoint();
ret = HDR_2_CLIENT(p);
if (xmlMemTraceBlockAt == ret) {
xmlGenericError(xmlGenericErrorContext,
"%p : Malloc(%lu) Ok\n", xmlMemTraceBlockAt,
(long unsigned)size);
xmlMallocBreakpoint();
}
TEST_POINT
return(ret);
} | 0 | [
"CWE-787"
] | libxml2 | 897dffbae322b46b83f99a607d527058a72c51ed | 186,468,362,608,309,900,000,000,000,000,000,000,000 | 63 | Check for integer overflow in memory debug code
Fixes bug 783026.
Thanks to Pranjal Jumde for the report. |
Subsets and Splits