func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
ZEND_VM_HOT_TYPE_SPEC_HANDLER(ZEND_FETCH_DIM_R, (!(op2_info & (MAY_BE_UNDEF|MAY_BE_NULL|MAY_BE_STRING|MAY_BE_ARRAY|MAY_BE_OBJECT|MAY_BE_RESOURCE|MAY_BE_REF))), ZEND_FETCH_DIM_R_INDEX, CONST|TMPVAR|CV, CONST|TMPVARCV, SPEC(NO_CONST_CONST))
{
USE_OPLINE
zend_free_op free_op1, free_op2;
zval *container, *dim, *value;
zend_long offset;
HashTable *ht;
container = GET_OP1_ZVAL_PTR_UNDEF(BP_VAR_R);
dim = GET_OP2_ZVAL_PTR_UNDEF(BP_VAR_R);
if (EXPECTED(Z_TYPE_P(container) == IS_ARRAY)) {
ZEND_VM_C_LABEL(fetch_dim_r_index_array):
if (EXPECTED(Z_TYPE_P(dim) == IS_LONG)) {
offset = Z_LVAL_P(dim);
} else {
offset = zval_get_long(dim);
}
ht = Z_ARRVAL_P(container);
ZEND_HASH_INDEX_FIND(ht, offset, value, ZEND_VM_C_LABEL(fetch_dim_r_index_undef));
ZVAL_COPY_DEREF(EX_VAR(opline->result.var), value);
if (OP1_TYPE & (IS_TMP_VAR|IS_VAR)) {
SAVE_OPLINE();
FREE_OP1();
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
} else {
ZEND_VM_NEXT_OPCODE();
}
} else if (OP1_TYPE != IS_CONST && EXPECTED(Z_TYPE_P(container) == IS_REFERENCE)) {
container = Z_REFVAL_P(container);
if (EXPECTED(Z_TYPE_P(container) == IS_ARRAY)) {
ZEND_VM_C_GOTO(fetch_dim_r_index_array);
} else {
ZEND_VM_C_GOTO(fetch_dim_r_index_slow);
}
} else {
ZEND_VM_C_LABEL(fetch_dim_r_index_slow):
SAVE_OPLINE();
if (OP2_TYPE == IS_CONST && Z_EXTRA_P(dim) == ZEND_EXTRA_VALUE) {
dim++;
}
zend_fetch_dimension_address_read_R_slow(container, dim OPLINE_CC EXECUTE_DATA_CC);
FREE_OP1();
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
}
ZEND_VM_C_LABEL(fetch_dim_r_index_undef):
ZVAL_NULL(EX_VAR(opline->result.var));
SAVE_OPLINE();
zend_undefined_offset(offset);
FREE_OP1();
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
} | 0 | [
"CWE-787"
]
| php-src | f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d | 279,226,911,185,230,380,000,000,000,000,000,000,000 | 52 | Fix #73122: Integer Overflow when concatenating strings
We must avoid integer overflows in memory allocations, so we introduce
an additional check in the VM, and bail out in the rare case of an
overflow. Since the recent fix for bug #74960 still doesn't catch all
possible overflows, we fix that right away. |
close_lb(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
#endif
/* think about this one for a while */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function \"close_lb\" not implemented")));
PG_RETURN_NULL();
} | 0 | [
"CWE-703",
"CWE-189"
]
| postgres | 31400a673325147e1205326008e32135a78b4d8a | 142,125,185,426,255,750,000,000,000,000,000,000,000 | 14 | Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064 |
static int __devinit airo_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct net_device *dev;
if (pci_enable_device(pdev))
return -ENODEV;
pci_set_master(pdev);
if (pdev->device == 0x5000 || pdev->device == 0xa504)
dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev);
else
dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev);
if (!dev) {
pci_disable_device(pdev);
return -ENODEV;
}
pci_set_drvdata(pdev, dev);
return 0;
} | 0 | [
"CWE-703",
"CWE-264"
]
| linux | 550fd08c2cebad61c548def135f67aba284c6162 | 280,544,295,079,812,200,000,000,000,000,000,000,000 | 21 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int Field_temporal_with_date::store_decimal(const my_decimal *d)
{
ulonglong nr;
ulong sec_part;
int error;
MYSQL_TIME ltime;
longlong tmp;
THD *thd= get_thd();
ErrConvDecimal str(d);
if (my_decimal2seconds(d, &nr, &sec_part))
{
tmp= -1;
error= 2;
}
else
tmp= number_to_datetime(nr, sec_part, <ime, sql_mode_for_dates(thd),
&error);
return store_TIME_with_warning(<ime, &str, error, tmp != -1);
} | 0 | [
"CWE-416",
"CWE-703"
]
| server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 104,137,209,528,352,570,000,000,000,000,000,000,000 | 21 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
njs_function_instance_length(njs_vm_t *vm, njs_object_prop_t *prop,
njs_value_t *value, njs_value_t *setval, njs_value_t *retval)
{
njs_object_t *proto;
njs_function_t *function;
proto = njs_object(value);
do {
if (njs_fast_path(proto->type == NJS_FUNCTION)) {
break;
}
proto = proto->__proto__;
} while (proto != NULL);
if (njs_slow_path(proto == NULL)) {
njs_internal_error(vm, "no function in proto chain");
return NJS_ERROR;
}
function = (njs_function_t *) proto;
njs_set_number(retval, function->args_count);
return NJS_OK;
} | 0 | [
"CWE-416"
]
| njs | 6a07c2156a07ef307b6dcf3c2ca8571a5f1af7a6 | 165,071,525,453,414,280,000,000,000,000,000,000,000 | 27 | Fixed recursive async function calls.
Previously, PromiseCapability record was stored (function->context)
directly in function object during a function invocation. This is
not correct, because PromiseCapability record should be linked to
current execution context. As a result, function->context is
overwritten with consecutive recursive calls which results in
use-after-free.
This closes #451 issue on Github. |
static void hso_free_tiomget(struct hso_serial *serial)
{
struct hso_tiocmget *tiocmget;
if (!serial)
return;
tiocmget = serial->tiocmget;
if (tiocmget) {
usb_free_urb(tiocmget->urb);
tiocmget->urb = NULL;
serial->tiocmget = NULL;
kfree(tiocmget);
}
} | 0 | [
"CWE-125"
]
| linux | 5146f95df782b0ac61abde36567e718692725c89 | 170,128,595,347,890,040,000,000,000,000,000,000,000 | 13 | USB: hso: Fix OOB memory access in hso_probe/hso_get_config_data
The function hso_probe reads if_num from the USB device (as an u8) and uses
it without a length check to index an array, resulting in an OOB memory read
in hso_probe or hso_get_config_data.
Add a length check for both locations and updated hso_probe to bail on
error.
This issue has been assigned CVE-2018-19985.
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Signed-off-by: Hui Peng <[email protected]>
Signed-off-by: Mathias Payer <[email protected]>
Reviewed-by: Sebastian Andrzej Siewior <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
MagickExport void *FileToBlob(const char *filename,const size_t extent,
size_t *length,ExceptionInfo *exception)
{
int
file;
MagickBooleanType
status;
MagickOffsetType
offset;
register size_t
i;
ssize_t
count;
struct stat
attributes;
unsigned char
*blob;
void
*map;
assert(filename != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename);
assert(exception != (ExceptionInfo *) NULL);
*length=0;
status=IsRightsAuthorized(PathPolicyDomain,ReadPolicyRights,filename);
if (status == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",filename);
return(NULL);
}
file=fileno(stdin);
if (LocaleCompare(filename,"-") != 0)
{
status=GetPathAttributes(filename,&attributes);
if ((status == MagickFalse) || (S_ISDIR(attributes.st_mode) != 0))
{
ThrowFileException(exception,BlobError,"UnableToReadBlob",filename);
return(NULL);
}
file=open_utf8(filename,O_RDONLY | O_BINARY,0);
}
if (file == -1)
{
ThrowFileException(exception,BlobError,"UnableToOpenFile",filename);
return(NULL);
}
offset=(MagickOffsetType) lseek(file,0,SEEK_END);
count=0;
if ((file == fileno(stdin)) || (offset < 0) ||
(offset != (MagickOffsetType) ((ssize_t) offset)))
{
size_t
quantum;
struct stat
file_stats;
/*
Stream is not seekable.
*/
offset=(MagickOffsetType) lseek(file,0,SEEK_SET);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
blob=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*blob));
for (i=0; blob != (unsigned char *) NULL; i+=count)
{
count=read(file,blob+i,quantum);
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
if (~((size_t) i) < (quantum+1))
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
break;
}
blob=(unsigned char *) ResizeQuantumMemory(blob,i+quantum+1,
sizeof(*blob));
if ((size_t) (i+count) >= extent)
break;
}
if (LocaleCompare(filename,"-") != 0)
file=close(file);
if (blob == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",filename);
return(NULL);
}
if (file == -1)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowFileException(exception,BlobError,"UnableToReadBlob",filename);
return(NULL);
}
*length=(size_t) MagickMin(i+count,extent);
blob[*length]='\0';
return(blob);
}
*length=(size_t) MagickMin(offset,(MagickOffsetType)
MagickMin(extent,(size_t) SSIZE_MAX));
blob=(unsigned char *) NULL;
if (~(*length) >= (MagickPathExtent-1))
blob=(unsigned char *) AcquireQuantumMemory(*length+MagickPathExtent,
sizeof(*blob));
if (blob == (unsigned char *) NULL)
{
file=close(file);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",filename);
return(NULL);
}
map=MapBlob(file,ReadMode,0,*length);
if (map != (unsigned char *) NULL)
{
(void) memcpy(blob,map,*length);
(void) UnmapBlob(map,*length);
}
else
{
(void) lseek(file,0,SEEK_SET);
for (i=0; i < *length; i+=count)
{
count=read(file,blob+i,(size_t) MagickMin(*length-i,(size_t)
SSIZE_MAX));
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
if (i < *length)
{
file=close(file)-1;
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowFileException(exception,BlobError,"UnableToReadBlob",filename);
return(NULL);
}
}
blob[*length]='\0';
if (LocaleCompare(filename,"-") != 0)
file=close(file);
if (file == -1)
{
blob=(unsigned char *) RelinquishMagickMemory(blob);
ThrowFileException(exception,BlobError,"UnableToReadBlob",filename);
}
return(blob);
} | 0 | [
"CWE-416",
"CWE-399"
]
| ImageMagick | c5d012a46ae22be9444326aa37969a3f75daa3ba | 155,656,929,466,982,450,000,000,000,000,000,000,000 | 162 | https://github.com/ImageMagick/ImageMagick6/issues/43 |
bool is_outer_table(TABLE_LIST *table, SELECT_LEX *select)
{
DBUG_ASSERT(table->select_lex != select);
TABLE_LIST *tl;
if (table->belong_to_view &&
table->belong_to_view->select_lex == select)
return FALSE;
for (tl= select->master_unit()->derived;
tl && tl->is_merged_derived();
select= tl->select_lex, tl= select->master_unit()->derived)
{
if (tl->select_lex == table->select_lex)
return FALSE;
}
return TRUE;
} | 0 | []
| server | b000e169562697aa072600695d4f0c0412f94f4f | 337,113,837,319,431,800,000,000,000,000,000,000,000 | 18 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
iperf_stats_callback(struct iperf_test *test)
{
struct iperf_stream *sp;
struct iperf_stream_result *rp = NULL;
struct iperf_interval_results *irp, temp;
temp.omitted = test->omitting;
SLIST_FOREACH(sp, &test->streams, streams) {
rp = sp->result;
temp.bytes_transferred = test->sender ? rp->bytes_sent_this_interval : rp->bytes_received_this_interval;
irp = TAILQ_LAST(&rp->interval_results, irlisthead);
/* result->end_time contains timestamp of previous interval */
if ( irp != NULL ) /* not the 1st interval */
memcpy(&temp.interval_start_time, &rp->end_time, sizeof(struct timeval));
else /* or use timestamp from beginning */
memcpy(&temp.interval_start_time, &rp->start_time, sizeof(struct timeval));
/* now save time of end of this interval */
gettimeofday(&rp->end_time, NULL);
memcpy(&temp.interval_end_time, &rp->end_time, sizeof(struct timeval));
temp.interval_duration = timeval_diff(&temp.interval_start_time, &temp.interval_end_time);
//temp.interval_duration = timeval_diff(&temp.interval_start_time, &temp.interval_end_time);
if (test->protocol->id == Ptcp) {
if ( has_tcpinfo()) {
save_tcpinfo(sp, &temp);
if (test->sender && test->sender_has_retransmits) {
long total_retrans = get_total_retransmits(&temp);
temp.interval_retrans = total_retrans - rp->stream_prev_total_retrans;
rp->stream_retrans += temp.interval_retrans;
rp->stream_prev_total_retrans = total_retrans;
temp.snd_cwnd = get_snd_cwnd(&temp);
}
}
} else {
if (irp == NULL) {
temp.interval_packet_count = sp->packet_count;
temp.interval_outoforder_packets = sp->outoforder_packets;
temp.interval_cnt_error = sp->cnt_error;
} else {
temp.interval_packet_count = sp->packet_count - irp->packet_count;
temp.interval_outoforder_packets = sp->outoforder_packets - irp->outoforder_packets;
temp.interval_cnt_error = sp->cnt_error - irp->cnt_error;
}
temp.packet_count = sp->packet_count;
temp.jitter = sp->jitter;
temp.outoforder_packets = sp->outoforder_packets;
temp.cnt_error = sp->cnt_error;
}
add_to_interval_list(rp, &temp);
rp->bytes_sent_this_interval = rp->bytes_received_this_interval = 0;
}
} | 0 | [
"CWE-120",
"CWE-119",
"CWE-787"
]
| iperf | 91f2fa59e8ed80dfbf400add0164ee0e508e412a | 209,979,619,579,759,160,000,000,000,000,000,000,000 | 54 | Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <[email protected]> |
bool ThreadCommand::classof(const LoadCommand* cmd) {
// This must be sync with BinaryParser.tcc
const LOAD_COMMAND_TYPES type = cmd->command();
return type == LOAD_COMMAND_TYPES::LC_THREAD ||
type == LOAD_COMMAND_TYPES::LC_UNIXTHREAD;
} | 0 | [
"CWE-787"
]
| LIEF | 0033b6312fd311b2e45e379c04a83d77c1e58578 | 99,483,532,418,231,670,000,000,000,000,000,000,000 | 6 | Resolve #767 |
TEST_F(ExprMatchTest, GtWithLHSFieldPathMatchesCorrectly) {
createMatcher(fromjson("{$expr: {$gt: ['$x', 3]}}"));
ASSERT_TRUE(matches(BSON("x" << 10)));
ASSERT_FALSE(matches(BSON("x" << 1)));
ASSERT_FALSE(matches(BSON("x" << 3)));
} | 0 | []
| mongo | ee97c0699fd55b498310996ee002328e533681a3 | 272,631,643,808,582,720,000,000,000,000,000,000,000 | 8 | SERVER-36993 Fix crash due to incorrect $or pushdown for indexed $expr. |
*/
PHP_FUNCTION(date_modify)
{
zval *object;
char *modify;
int modify_len;
if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "Os", &object, date_ce_date, &modify, &modify_len) == FAILURE) {
RETURN_FALSE;
}
if (php_date_modify(object, modify, modify_len TSRMLS_CC)) {
RETURN_ZVAL(object, 1, 0);
}
RETURN_FALSE; | 0 | []
| php-src | c377f1a715476934133f3254d1e0d4bf3743e2d2 | 107,389,952,638,263,830,000,000,000,000,000,000,000 | 16 | Fix bug #68942 (Use after free vulnerability in unserialize() with DateTimeZone) |
brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
struct net_device *ndev,
const struct brcmf_event_msg *e, void *data)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg);
static int generation;
u32 event = e->event_code;
u32 reason = e->reason;
struct station_info *sinfo;
brcmf_dbg(CONN, "event %s (%u), reason %d\n",
brcmf_fweh_event_name(event), event, reason);
if (event == BRCMF_E_LINK && reason == BRCMF_E_REASON_LINK_BSSCFG_DIS &&
ndev != cfg_to_ndev(cfg)) {
brcmf_dbg(CONN, "AP mode link down\n");
complete(&cfg->vif_disabled);
return 0;
}
if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) &&
(reason == BRCMF_E_STATUS_SUCCESS)) {
if (!data) {
bphy_err(wiphy, "No IEs present in ASSOC/REASSOC_IND\n");
return -EINVAL;
}
sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
sinfo->assoc_req_ies = data;
sinfo->assoc_req_ies_len = e->datalen;
generation++;
sinfo->generation = generation;
cfg80211_new_sta(ndev, e->addr, sinfo, GFP_KERNEL);
kfree(sinfo);
} else if ((event == BRCMF_E_DISASSOC_IND) ||
(event == BRCMF_E_DEAUTH_IND) ||
(event == BRCMF_E_DEAUTH)) {
cfg80211_del_sta(ndev, e->addr, GFP_KERNEL);
}
return 0;
} | 0 | [
"CWE-787"
]
| linux | 1b5e2423164b3670e8bc9174e4762d297990deff | 177,293,861,205,345,160,000,000,000,000,000,000,000 | 44 | brcmfmac: assure SSID length from firmware is limited
The SSID length as received from firmware should not exceed
IEEE80211_MAX_SSID_LEN as that would result in heap overflow.
Reviewed-by: Hante Meuleman <[email protected]>
Reviewed-by: Pieter-Paul Giesberts <[email protected]>
Reviewed-by: Franky Lin <[email protected]>
Signed-off-by: Arend van Spriel <[email protected]>
Signed-off-by: Kalle Valo <[email protected]> |
CLEAR_MARKS_PROC(aos_clear_marks)
{ aos_state_t *const pptr = vptr;
r_clear_attrs(&pptr->blocks, l_mark);
} | 0 | []
| ghostpdl | 04b37bbce174eed24edec7ad5b920eb93db4d47d | 194,736,761,960,034,230,000,000,000,000,000,000,000 | 5 | Bug 697799: have .rsdparams check its parameters
The Ghostscript internal operator .rsdparams wasn't checking the number or
type of the operands it was being passed. Do so. |
void mp_encode_lua_number(lua_State *L, mp_buf *buf) {
lua_Number n = lua_tonumber(L,-1);
if (IS_INT64_EQUIVALENT(n)) {
mp_encode_lua_integer(L, buf);
} else {
mp_encode_double(L,buf,(double)n);
}
} | 0 | [
"CWE-119",
"CWE-787"
]
| redis | 52a00201fca331217c3b4b8b634f6a0f57d6b7d3 | 187,663,891,783,661,120,000,000,000,000,000,000,000 | 9 | Security: fix Lua cmsgpack library stack overflow.
During an auditing effort, the Apple Vulnerability Research team discovered
a critical Redis security issue affecting the Lua scripting part of Redis.
-- Description of the problem
Several years ago I merged a pull request including many small changes at
the Lua MsgPack library (that originally I authored myself). The Pull
Request entered Redis in commit 90b6337c1, in 2014.
Unfortunately one of the changes included a variadic Lua function that
lacked the check for the available Lua C stack. As a result, calling the
"pack" MsgPack library function with a large number of arguments, results
into pushing into the Lua C stack a number of new values proportional to
the number of arguments the function was called with. The pushed values,
moreover, are controlled by untrusted user input.
This in turn causes stack smashing which we believe to be exploitable,
while not very deterministic, but it is likely that an exploit could be
created targeting specific versions of Redis executables. However at its
minimum the issue results in a DoS, crashing the Redis server.
-- Versions affected
Versions greater or equal to Redis 2.8.18 are affected.
-- Reproducing
Reproduce with this (based on the original reproduction script by
Apple security team):
https://gist.github.com/antirez/82445fcbea6d9b19f97014cc6cc79f8a
-- Verification of the fix
The fix was tested in the following way:
1) I checked that the problem is no longer observable running the trigger.
2) The Lua code was analyzed to understand the stack semantics, and that
actually enough stack is allocated in all the cases of mp_pack() calls.
3) The mp_pack() function was modified in order to show exactly what items
in the stack were being set, to make sure that there is no silent overflow
even after the fix.
-- Credits
Thank you to the Apple team and to the other persons that helped me
checking the patch and coordinating this communication. |
asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
{
if (resource >= RLIM_NLIMITS)
return -EINVAL;
else {
struct rlimit value;
task_lock(current->group_leader);
value = current->signal->rlim[resource];
task_unlock(current->group_leader);
return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
}
} | 0 | [
"CWE-20"
]
| linux-2.6 | 9926e4c74300c4b31dee007298c6475d33369df0 | 302,681,378,235,367,640,000,000,000,000,000,000,000 | 12 | CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix
As discovered here today, the change in Kernel 2.6.17 intended to inhibit
users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by
"cheating" and setting it to 1 in such a case, does not make a difference,
as the check is done in the wrong place (too late), and only applies to the
profiling code.
On all systems I checked running kernels above 2.6.17, no matter what the
hard and soft CPU time limits were before, a user could escape them by
issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's
process was not ever killed.
Attached is a trivial patch to fix that. Simply moving the check to a
slightly earlier location (specifically, before the line that actually
assigns the limit - *old_rlim = new_rlim), does the trick.
Do note that at least the zsh (but not ash, dash, or bash) shell has the
problem of "caching" the limits set by the ulimit command, so when running
zsh the fix will not immediately be evident - after entering "ulimit -t 0",
"ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual
limit as returned by getrlimit(...) will be 1. It can be verified by
opening a subshell (which will not have the values of the parent shell in
cache) and checking in it, or just by running a CPU intensive command like
"echo '65536^1048576' | bc" and verifying that it dumps core after one
second.
Regardless of whether that is a misfeature in the shell, perhaps it would
be better to return -EINVAL from setrlimit in such a case instead of
cheating and setting to 1, as that does not really reflect the actual state
of the process anymore. I do not however know what the ground for that
decision was in the original 2.6.17 change, and whether there would be any
"backward" compatibility issues, so I preferred not to touch that right
now.
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
virDomainGraphicsDefParseXML(virDomainXMLOptionPtr xmlopt,
xmlNodePtr node,
xmlXPathContextPtr ctxt,
unsigned int flags)
{
virDomainGraphicsDefPtr def;
int typeVal;
g_autofree char *type = NULL;
if (!(def = virDomainGraphicsDefNew(xmlopt)))
return NULL;
type = virXMLPropString(node, "type");
if (!type) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("missing graphics device type"));
goto error;
}
if ((typeVal = virDomainGraphicsTypeFromString(type)) < 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("unknown graphics device type '%s'"), type);
goto error;
}
def->type = typeVal;
switch (def->type) {
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
if (virDomainGraphicsDefParseXMLVNC(def, node, ctxt, flags) < 0)
goto error;
break;
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
if (virDomainGraphicsDefParseXMLSDL(def, node, ctxt) < 0)
goto error;
break;
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
if (virDomainGraphicsDefParseXMLRDP(def, node, ctxt, flags) < 0)
goto error;
break;
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
if (virDomainGraphicsDefParseXMLDesktop(def, node) < 0)
goto error;
break;
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
if (virDomainGraphicsDefParseXMLSpice(def, node, ctxt, flags) < 0)
goto error;
break;
case VIR_DOMAIN_GRAPHICS_TYPE_EGL_HEADLESS:
if (virDomainGraphicsDefParseXMLEGLHeadless(def, node, ctxt) < 0)
goto error;
break;
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
break;
}
return def;
error:
virDomainGraphicsDefFree(def);
def = NULL;
return NULL;
} | 0 | [
"CWE-212"
]
| libvirt | a5b064bf4b17a9884d7d361733737fb614ad8979 | 239,130,690,212,277,600,000,000,000,000,000,000,000 | 62 | conf: Don't format http cookies unless VIR_DOMAIN_DEF_FORMAT_SECURE is used
Starting with 3b076391befc3fe72deb0c244ac6c2b4c100b410
(v6.1.0-122-g3b076391be) we support http cookies. Since they may contain
somewhat sensitive information we should not format them into the XML
unless VIR_DOMAIN_DEF_FORMAT_SECURE is asserted.
Reported-by: Han Han <[email protected]>
Signed-off-by: Peter Krempa <[email protected]>
Reviewed-by: Erik Skultety <[email protected]> |
static bool red_stream_write_u32_le(RedStream *s, uint32_t n)
{
n = GUINT32_TO_LE(n);
return red_stream_write_all(s, &n, sizeof(uint32_t));
} | 0 | []
| spice | 95a0cfac8a1c8eff50f05e65df945da3bb501fc9 | 197,565,083,366,231,800,000,000,000,000,000,000,000 | 5 | With OpenSSL 1.0.2 and earlier: disable client-side renegotiation.
Fixed issue #49
Fixes BZ#1904459
Signed-off-by: Julien Ropé <[email protected]>
Reported-by: BlackKD
Acked-by: Frediano Ziglio <[email protected]> |
static unsigned long get_trampoline_vaddr(void)
{
struct xol_area *area;
unsigned long trampoline_vaddr = -1;
area = current->mm->uprobes_state.xol_area;
smp_read_barrier_depends();
if (area)
trampoline_vaddr = area->vaddr;
return trampoline_vaddr;
} | 0 | [
"CWE-416"
]
| linux | 355627f518978b5167256d27492fe0b343aaf2f2 | 149,026,700,729,089,100,000,000,000,000,000,000,000 | 12 | mm, uprobes: fix multiple free of ->uprobes_state.xol_area
Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for
write killable") made it possible to kill a forking task while it is
waiting to acquire its ->mmap_sem for write, in dup_mmap().
However, it was overlooked that this introduced an new error path before
the new mm_struct's ->uprobes_state.xol_area has been set to NULL after
being copied from the old mm_struct by the memcpy in dup_mm(). For a
task that has previously hit a uprobe tracepoint, this resulted in the
'struct xol_area' being freed multiple times if the task was killed at
just the right time while forking.
Fix it by setting ->uprobes_state.xol_area to NULL in mm_init() rather
than in uprobe_dup_mmap().
With CONFIG_UPROBE_EVENTS=y, the bug can be reproduced by the same C
program given by commit 2b7e8665b4ff ("fork: fix incorrect fput of
->exe_file causing use-after-free"), provided that a uprobe tracepoint
has been set on the fork_thread() function. For example:
$ gcc reproducer.c -o reproducer -lpthread
$ nm reproducer | grep fork_thread
0000000000400719 t fork_thread
$ echo "p $PWD/reproducer:0x719" > /sys/kernel/debug/tracing/uprobe_events
$ echo 1 > /sys/kernel/debug/tracing/events/uprobes/enable
$ ./reproducer
Here is the use-after-free reported by KASAN:
BUG: KASAN: use-after-free in uprobe_clear_state+0x1c4/0x200
Read of size 8 at addr ffff8800320a8b88 by task reproducer/198
CPU: 1 PID: 198 Comm: reproducer Not tainted 4.13.0-rc7-00015-g36fde05f3fb5 #255
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-20170228_101828-anatol 04/01/2014
Call Trace:
dump_stack+0xdb/0x185
print_address_description+0x7e/0x290
kasan_report+0x23b/0x350
__asan_report_load8_noabort+0x19/0x20
uprobe_clear_state+0x1c4/0x200
mmput+0xd6/0x360
do_exit+0x740/0x1670
do_group_exit+0x13f/0x380
get_signal+0x597/0x17d0
do_signal+0x99/0x1df0
exit_to_usermode_loop+0x166/0x1e0
syscall_return_slowpath+0x258/0x2c0
entry_SYSCALL_64_fastpath+0xbc/0xbe
...
Allocated by task 199:
save_stack_trace+0x1b/0x20
kasan_kmalloc+0xfc/0x180
kmem_cache_alloc_trace+0xf3/0x330
__create_xol_area+0x10f/0x780
uprobe_notify_resume+0x1674/0x2210
exit_to_usermode_loop+0x150/0x1e0
prepare_exit_to_usermode+0x14b/0x180
retint_user+0x8/0x20
Freed by task 199:
save_stack_trace+0x1b/0x20
kasan_slab_free+0xa8/0x1a0
kfree+0xba/0x210
uprobe_clear_state+0x151/0x200
mmput+0xd6/0x360
copy_process.part.8+0x605f/0x65d0
_do_fork+0x1a5/0xbd0
SyS_clone+0x19/0x20
do_syscall_64+0x22f/0x660
return_from_SYSCALL_64+0x0/0x7a
Note: without KASAN, you may instead see a "Bad page state" message, or
simply a general protection fault.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable")
Signed-off-by: Eric Biggers <[email protected]>
Reported-by: Oleg Nesterov <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: <[email protected]> [4.7+]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void f2fs_destroy_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et = F2FS_I(inode)->extent_tree;
unsigned int node_cnt = 0;
if (!et)
return;
if (inode->i_nlink && !is_bad_inode(inode) &&
atomic_read(&et->node_cnt)) {
mutex_lock(&sbi->extent_tree_lock);
list_add_tail(&et->list, &sbi->zombie_list);
atomic_inc(&sbi->total_zombie_tree);
mutex_unlock(&sbi->extent_tree_lock);
return;
}
/* free all extent info belong to this extent tree */
node_cnt = f2fs_destroy_extent_node(inode);
/* delete extent tree entry in radix tree */
mutex_lock(&sbi->extent_tree_lock);
f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
kmem_cache_free(extent_tree_slab, et);
atomic_dec(&sbi->total_ext_tree);
mutex_unlock(&sbi->extent_tree_lock);
F2FS_I(inode)->extent_tree = NULL;
trace_f2fs_destroy_extent_tree(inode, node_cnt);
} | 0 | [
"CWE-119",
"CWE-787"
]
| linux | dad48e73127ba10279ea33e6dbc8d3905c4d31c0 | 153,607,176,159,188,560,000,000,000,000,000,000,000 | 33 | f2fs: fix a bug caused by NULL extent tree
Thread A: Thread B:
-f2fs_remount
-sbi->mount_opt.opt = 0;
<--- -f2fs_iget
-do_read_inode
-f2fs_init_extent_tree
-F2FS_I(inode)->extent_tree is NULL
-default_options && parse_options
-remount return
<--- -f2fs_map_blocks
-f2fs_lookup_extent_tree
-f2fs_bug_on(sbi, !et);
The same problem with f2fs_new_inode.
Signed-off-by: Yunlei He <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]> |
static void schedule_async_open(struct timeval request_time,
struct smb_request *req)
{
struct deferred_open_record state;
struct timeval timeout;
timeout = timeval_set(20, 0);
ZERO_STRUCT(state);
state.delayed_for_oplocks = false;
state.async_open = true;
if (!request_timed_out(request_time, timeout)) {
defer_open(NULL, request_time, timeout, req, &state);
}
} | 0 | []
| samba | 60f922bf1bd8816eacbb32c24793ad1f97a1d9f2 | 103,765,046,869,100,110,000,000,000,000,000,000,000 | 16 | Fix bug #10229 - No access check verification on stream files.
https://bugzilla.samba.org/show_bug.cgi?id=10229
We need to check if the requested access mask
could be used to open the underlying file (if
it existed), as we're passing in zero for the
access mask to the base filename.
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Stefan Metzmacher <[email protected]>
Reviewed-by: David Disseldorp <[email protected]> |
MagickExport int EOFBlob(const Image *image)
{
BlobInfo
*magick_restrict blob_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->blob != (BlobInfo *) NULL);
assert(image->blob->type != UndefinedStream);
blob_info=image->blob;
switch (blob_info->type)
{
case UndefinedStream:
case StandardStream:
break;
case FileStream:
case PipeStream:
{
blob_info->eof=feof(blob_info->file_info.file) != 0 ? MagickTrue :
MagickFalse;
break;
}
case ZipStream:
{
#if defined(MAGICKCORE_ZLIB_DELEGATE)
blob_info->eof=gzeof(blob_info->file_info.gzfile) != 0 ? MagickTrue :
MagickFalse;
#endif
break;
}
case BZipStream:
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
int
status;
status=0;
(void) BZ2_bzerror(blob_info->file_info.bzfile,&status);
blob_info->eof=status == BZ_UNEXPECTED_EOF ? MagickTrue : MagickFalse;
#endif
break;
}
case FifoStream:
{
blob_info->eof=MagickFalse;
break;
}
case BlobStream:
break;
case CustomStream:
break;
}
return((int) blob_info->eof);
} | 0 | [
"CWE-416",
"CWE-399"
]
| ImageMagick | c5d012a46ae22be9444326aa37969a3f75daa3ba | 73,394,598,787,734,800,000,000,000,000,000,000,000 | 56 | https://github.com/ImageMagick/ImageMagick6/issues/43 |
static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
struct bpf_verifier_state *state = env->cur_state;
struct bpf_reg_state *regs = state->regs;
int off, i, slot, spi;
if (regs[regno].type != PTR_TO_STACK) {
/* Allow zero-byte read from NULL, regardless of pointer type */
if (zero_size_allowed && access_size == 0 &&
register_is_null(regs[regno]))
return 0;
verbose(env, "R%d type=%s expected=%s\n", regno,
reg_type_str[regs[regno].type],
reg_type_str[PTR_TO_STACK]);
return -EACCES;
}
/* Only allow fixed-offset stack reads */
if (!tnum_is_const(regs[regno].var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf);
return -EACCES;
}
off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
regno, off, access_size);
return -EACCES;
}
if (env->prog->aux->stack_depth < -off)
env->prog->aux->stack_depth = -off;
if (meta && meta->raw_mode) {
meta->access_size = access_size;
meta->regno = regno;
return 0;
}
for (i = 0; i < access_size; i++) {
slot = -(off + i) - 1;
spi = slot / BPF_REG_SIZE;
if (state->allocated_stack <= slot ||
state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
STACK_MISC) {
verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
off, i, access_size);
return -EACCES;
}
}
return 0;
} | 0 | [
"CWE-190"
]
| linux | bb7f0f989ca7de1153bd128a40a71709e339fa03 | 4,440,700,468,841,552,500,000,000,000,000,000,000 | 59 | bpf: fix integer overflows
There were various issues related to the limited size of integers used in
the verifier:
- `off + size` overflow in __check_map_access()
- `off + reg->off` overflow in check_mem_access()
- `off + reg->var_off.value` overflow or 32-bit truncation of
`reg->var_off.value` in check_mem_access()
- 32-bit truncation in check_stack_boundary()
Make sure that any integer math cannot overflow by not allowing
pointer math with large values.
Also reduce the scope of "scalar op scalar" tracking.
Fixes: f1174f77b50c ("bpf/verifier: rework value tracking")
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]> |
static int cx24116_firmware_ondemand(struct dvb_frontend *fe)
{
struct cx24116_state *state = fe->demodulator_priv;
const struct firmware *fw;
int ret = 0;
dprintk("%s()\n", __func__);
if (cx24116_readreg(state, 0x20) > 0) {
if (state->skip_fw_load)
return 0;
/* Load firmware */
/* request the firmware, this will block until loaded */
printk(KERN_INFO "%s: Waiting for firmware upload (%s)...\n",
__func__, CX24116_DEFAULT_FIRMWARE);
ret = request_firmware(&fw, CX24116_DEFAULT_FIRMWARE,
state->i2c->dev.parent);
printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n",
__func__);
if (ret) {
printk(KERN_ERR "%s: No firmware uploaded "
"(timeout or file not found?)\n", __func__);
return ret;
}
/* Make sure we don't recurse back through here
* during loading */
state->skip_fw_load = 1;
ret = cx24116_load_firmware(fe, fw);
if (ret)
printk(KERN_ERR "%s: Writing firmware to device failed\n",
__func__);
release_firmware(fw);
printk(KERN_INFO "%s: Firmware upload %s\n", __func__,
ret == 0 ? "complete" : "failed");
/* Ensure firmware is always loaded if required */
state->skip_fw_load = 0;
}
return ret;
} | 0 | [
"CWE-476",
"CWE-119",
"CWE-125"
]
| linux | 1fa2337a315a2448c5434f41e00d56b01a22283c | 310,982,214,199,791,900,000,000,000,000,000,000,000 | 47 | [media] cx24116: fix a buffer overflow when checking userspace params
The maximum size for a DiSEqC command is 6, according to the
userspace API. However, the code allows to write up much more values:
drivers/media/dvb-frontends/cx24116.c:983 cx24116_send_diseqc_msg() error: buffer overflow 'd->msg' 6 <= 23
Cc: [email protected]
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static void buzz_set_leds(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
struct list_head *report_list =
&hdev->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next,
struct hid_report, list);
s32 *value = report->field[0]->value;
BUILD_BUG_ON(MAX_LEDS < 4);
value[0] = 0x00;
value[1] = sc->led_state[0] ? 0xff : 0x00;
value[2] = sc->led_state[1] ? 0xff : 0x00;
value[3] = sc->led_state[2] ? 0xff : 0x00;
value[4] = sc->led_state[3] ? 0xff : 0x00;
value[5] = 0x00;
value[6] = 0x00;
hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
} | 0 | [
"CWE-787"
]
| linux | d9d4b1e46d9543a82c23f6df03f4ad697dab361b | 15,857,824,433,490,087,000,000,000,000,000,000,000 | 20 | HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]> |
static struct dentry *__lookup_hash(const struct qstr *name,
struct dentry *base, unsigned int flags)
{
struct dentry *dentry = lookup_dcache(name, base, flags);
struct dentry *old;
struct inode *dir = base->d_inode;
if (dentry)
return dentry;
/* Don't create child dentry for a dead directory. */
if (unlikely(IS_DEADDIR(dir)))
return ERR_PTR(-ENOENT);
dentry = d_alloc(base, name);
if (unlikely(!dentry))
return ERR_PTR(-ENOMEM);
old = dir->i_op->lookup(dir, dentry, flags);
if (unlikely(old)) {
dput(dentry);
dentry = old;
}
return dentry;
} | 0 | [
"CWE-416",
"CWE-284"
]
| linux | d0cb50185ae942b03c4327be322055d622dc79f6 | 163,830,558,680,490,970,000,000,000,000,000,000,000 | 25 | do_last(): fetch directory ->i_mode and ->i_uid before it's too late
may_create_in_sticky() call is done when we already have dropped the
reference to dir.
Fixes: 30aba6656f61e (namei: allow restricted O_CREAT of FIFOs and regular files)
Signed-off-by: Al Viro <[email protected]> |
static const char *
my_os_charset_to_mysql_charset(const char *csname)
{
const MY_CSET_OS_NAME *csp;
for (csp= charsets; csp->os_name; csp++)
{
if (!my_strcasecmp(&my_charset_latin1, csp->os_name, csname))
{
switch (csp->param)
{
case my_cs_exact:
return csp->my_name;
case my_cs_approx:
/*
Maybe we should print a warning eventually:
character set correspondence is not exact.
*/
return csp->my_name;
default:
my_printf_error(ER_UNKNOWN_ERROR,
"OS character set '%s'"
" is not supported by MySQL client",
MYF(0), csp->my_name);
goto def;
}
}
}
my_printf_error(ER_UNKNOWN_ERROR,
"Unknown OS character set '%s'.",
MYF(0), csname);
def:
csname= MYSQL_DEFAULT_CHARSET_NAME;
my_printf_error(ER_UNKNOWN_ERROR,
"Switching to the default character set '%s'.",
MYF(0), csname);
return csname; | 0 | [
"CWE-284",
"CWE-295"
]
| mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 298,180,249,005,823,700,000,000,000,000,000,000,000 | 40 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
void snd_card_disconnect_sync(struct snd_card *card)
{
int err;
err = snd_card_disconnect(card);
if (err < 0) {
dev_err(card->dev,
"snd_card_disconnect error (%d), skipping sync\n",
err);
return;
}
spin_lock_irq(&card->files_lock);
wait_event_lock_irq(card->remove_sleep,
list_empty(&card->files_list),
card->files_lock);
spin_unlock_irq(&card->files_lock);
} | 0 | [
"CWE-416"
]
| linux | 2a3f7221acddfe1caa9ff09b3a8158c39b2fdeac | 102,136,263,838,419,810,000,000,000,000,000,000,000 | 18 | ALSA: core: Fix card races between register and disconnect
There is a small race window in the card disconnection code that
allows the registration of another card with the very same card id.
This leads to a warning in procfs creation as caught by syzkaller.
The problem is that we delete snd_cards and snd_cards_lock entries at
the very beginning of the disconnection procedure. This makes the
slot available to be assigned for another card object while the
disconnection procedure is being processed. Then it becomes possible
to issue a procfs registration with the existing file name although we
check the conflict beforehand.
The fix is simply to move the snd_cards and snd_cards_lock clearances
at the end of the disconnection procedure. The references to these
entries are merely either from the global proc files like
/proc/asound/cards or from the card registration / disconnection, so
it should be fine to shift at the very end.
Reported-by: [email protected]
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
int rfc822_write_address (char *buf, size_t buflen, ADDRESS *addr, int display)
{
char *pbuf = buf;
size_t len = mutt_strlen (buf);
buflen--; /* save room for the terminal nul */
if (len > 0)
{
if (len > buflen)
return pbuf - buf; /* safety check for bogus arguments */
pbuf += len;
buflen -= len;
if (!buflen)
goto done;
*pbuf++ = ',';
buflen--;
if (!buflen)
goto done;
*pbuf++ = ' ';
buflen--;
}
for (; addr && buflen > 0; addr = addr->next)
{
/* use buflen+1 here because we already saved space for the trailing
nul char, and the subroutine can make use of it */
rfc822_write_address_single (pbuf, buflen + 1, addr, display);
/* this should be safe since we always have at least 1 char passed into
the above call, which means `pbuf' should always be nul terminated */
len = mutt_strlen (pbuf);
pbuf += len;
buflen -= len;
/* if there is another address, and its not a group mailbox name or
group terminator, add a comma to separate the addresses */
if (addr->next && addr->next->mailbox && !addr->group)
{
if (!buflen)
goto done;
*pbuf++ = ',';
buflen--;
if (!buflen)
goto done;
*pbuf++ = ' ';
buflen--;
}
}
done:
*pbuf = 0;
return pbuf - buf;
} | 0 | [
"CWE-401"
]
| mutt | 4a2becbdb4422aaffe3ce314991b9d670b7adf17 | 102,590,033,296,082,580,000,000,000,000,000,000,000 | 54 | Fix memory leak parsing group addresses without a display name.
When there was a group address terminator with no previous
addresses (including the group display-name), an address would be
allocated but not attached to the address list.
Change this to only allocate when last exists.
It would be more correct to not allocate at all unless we are inside a
group list, but I will address that in a separate commit to master. |
static void release_all_pte_pages(pte_t *pte)
{
release_pte_pages(pte, pte + HPAGE_PMD_NR);
} | 0 | [
"CWE-399"
]
| linux | 78f11a255749d09025f54d4e2df4fbcb031530e2 | 68,132,576,904,041,060,000,000,000,000,000,000,000 | 4 | mm: thp: fix /dev/zero MAP_PRIVATE and vm_flags cleanups
The huge_memory.c THP page fault was allowed to run if vm_ops was null
(which would succeed for /dev/zero MAP_PRIVATE, as the f_op->mmap wouldn't
setup a special vma->vm_ops and it would fallback to regular anonymous
memory) but other THP logics weren't fully activated for vmas with vm_file
not NULL (/dev/zero has a not NULL vma->vm_file).
So this removes the vm_file checks so that /dev/zero also can safely use
THP (the other albeit safer approach to fix this bug would have been to
prevent the THP initial page fault to run if vm_file was set).
After removing the vm_file checks, this also makes huge_memory.c stricter
in khugepaged for the DEBUG_VM=y case. It doesn't replace the vm_file
check with a is_pfn_mapping check (but it keeps checking for VM_PFNMAP
under VM_BUG_ON) because for a is_cow_mapping() mapping VM_PFNMAP should
only be allowed to exist before the first page fault, and in turn when
vma->anon_vma is null (so preventing khugepaged registration). So I tend
to think the previous comment saying if vm_file was set, VM_PFNMAP might
have been set and we could still be registered in khugepaged (despite
anon_vma was not NULL to be registered in khugepaged) was too paranoid.
The is_linear_pfn_mapping check is also I think superfluous (as described
by comment) but under DEBUG_VM it is safe to stay.
Addresses https://bugzilla.kernel.org/show_bug.cgi?id=33682
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Caspar Zhang <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Acked-by: Rik van Riel <[email protected]>
Cc: <[email protected]> [2.6.38.x]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int main(int argc, char **argv)
{
char self_name[FN_REFLEN];
MY_INIT(argv[0]);
#if __WIN__
if (GetModuleFileName(NULL, self_name, FN_REFLEN) == 0)
#endif
{
strncpy(self_name, argv[0], FN_REFLEN);
}
if (init_dynamic_string(&ds_args, "", 512, 256) ||
init_dynamic_string(&conn_args, "", 512, 256))
die("Out of memory");
my_getopt_use_args_separator= TRUE;
if (load_defaults("my", load_default_groups, &argc, &argv))
die(NULL);
my_getopt_use_args_separator= FALSE;
defaults_argv= argv; /* Must be freed by 'free_defaults' */
if (handle_options(&argc, &argv, my_long_options, get_one_option))
die(NULL);
if (debug_info_flag)
my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO;
if (debug_check_flag)
my_end_arg= MY_CHECK_ERROR;
if (tty_password)
{
opt_password= get_tty_password(NullS);
/* add password to defaults file */
dynstr_append_os_quoted(&ds_args, "--password=", opt_password, NullS);
dynstr_append(&ds_args, " ");
}
/* add user to defaults file */
dynstr_append_os_quoted(&ds_args, "--user=", opt_user, NullS);
dynstr_append(&ds_args, " ");
/* Find mysql */
find_tool(mysql_path, IF_WIN("mysql.exe", "mysql"), self_name);
if (!opt_systables_only)
{
/* Find mysqlcheck */
find_tool(mysqlcheck_path, IF_WIN("mysqlcheck.exe", "mysqlcheck"), self_name);
}
else
{
printf("The --upgrade-system-tables option was used, databases won't be touched.\n");
}
/*
Read the mysql_upgrade_info file to check if mysql_upgrade
already has been run for this installation of MySQL
*/
if (!opt_force && upgrade_already_done())
{
printf("This installation of MySQL is already upgraded to %s, "
"use --force if you still need to run mysql_upgrade\n",
MYSQL_SERVER_VERSION);
die(NULL);
}
if (opt_version_check && check_version_match())
die("Upgrade failed");
/*
Run "mysqlcheck" and "mysql_fix_privilege_tables.sql"
First run mysqlcheck on the system database.
Then do the upgrade.
And then run mysqlcheck on all tables.
*/
if (!opt_systables_only)
{
if (run_mysqlcheck_mysql_db_fixnames())
{
die("Error during call to mysql_check for fixing the db/tables names on "
"mysql db");
}
if (run_mysqlcheck_mysql_db_upgrade())
{
die("Error during call to mysql_check for upgrading the tables names on "
"mysql db");
}
}
if (run_sql_fix_privilege_tables())
{
/* Specific error msg (if present) would be printed in the function call
* above */
die("Upgrade failed");
}
if (!opt_systables_only)
{
if (run_mysqlcheck_fixnames())
{
die("Error during call to mysql_check for fixing the db/tables names on "
"all db(s) except mysql");
}
if (run_mysqlcheck_upgrade())
{
die("Error during call to mysql_check for upgrading the tables names on "
"all db(s) except mysql");
}
}
verbose("OK");
/* Create a file indicating upgrade has been performed */
create_mysql_upgrade_info_file();
free_used_memory();
my_end(my_end_arg);
exit(0);
} | 0 | [
"CWE-319"
]
| mysql-server | 0002e1380d5f8c113b6bce91f2cf3f75136fd7c7 | 65,001,621,767,038,640,000,000,000,000,000,000,000 | 116 | BUG#25575605: SETTING --SSL-MODE=REQUIRED SENDS CREDENTIALS BEFORE VERIFYING SSL CONNECTION
MYSQL_OPT_SSL_MODE option introduced.
It is set in case of --ssl-mode=REQUIRED and permits only SSL connection.
(cherry picked from commit f91b941842d240b8a62645e507f5554e8be76aec) |
void Curl_http_method(struct Curl_easy *data, struct connectdata *conn,
const char **method, Curl_HttpReq *reqp)
{
Curl_HttpReq httpreq = data->state.httpreq;
const char *request;
if((conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_FTP)) &&
data->set.upload)
httpreq = HTTPREQ_PUT;
/* Now set the 'request' pointer to the proper request string */
if(data->set.str[STRING_CUSTOMREQUEST])
request = data->set.str[STRING_CUSTOMREQUEST];
else {
if(data->set.opt_no_body)
request = "HEAD";
else {
DEBUGASSERT((httpreq >= HTTPREQ_GET) && (httpreq <= HTTPREQ_HEAD));
switch(httpreq) {
case HTTPREQ_POST:
case HTTPREQ_POST_FORM:
case HTTPREQ_POST_MIME:
request = "POST";
break;
case HTTPREQ_PUT:
request = "PUT";
break;
default: /* this should never happen */
case HTTPREQ_GET:
request = "GET";
break;
case HTTPREQ_HEAD:
request = "HEAD";
break;
}
}
}
*method = request;
*reqp = httpreq;
} | 0 | []
| curl | 48d7064a49148f03942380967da739dcde1cdc24 | 116,559,111,797,850,840,000,000,000,000,000,000,000 | 39 | cookie: apply limits
- Send no more than 150 cookies per request
- Cap the max length used for a cookie: header to 8K
- Cap the max number of received Set-Cookie: headers to 50
Bug: https://curl.se/docs/CVE-2022-32205.html
CVE-2022-32205
Reported-by: Harry Sintonen
Closes #9048 |
ssh_connect_direct(const char *host, struct addrinfo *aitop,
struct sockaddr_storage *hostaddr, u_short port, int family,
int connection_attempts, int *timeout_ms, int want_keepalive, int needpriv)
{
int on = 1;
int sock = -1, attempt;
char ntop[NI_MAXHOST], strport[NI_MAXSERV];
struct addrinfo *ai;
debug2("ssh_connect: needpriv %d", needpriv);
for (attempt = 0; attempt < connection_attempts; attempt++) {
if (attempt > 0) {
/* Sleep a moment before retrying. */
sleep(1);
debug("Trying again...");
}
/*
* Loop through addresses for this host, and try each one in
* sequence until the connection succeeds.
*/
for (ai = aitop; ai; ai = ai->ai_next) {
if (ai->ai_family != AF_INET &&
ai->ai_family != AF_INET6)
continue;
if (getnameinfo(ai->ai_addr, ai->ai_addrlen,
ntop, sizeof(ntop), strport, sizeof(strport),
NI_NUMERICHOST|NI_NUMERICSERV) != 0) {
error("ssh_connect: getnameinfo failed");
continue;
}
debug("Connecting to %.200s [%.100s] port %s.",
host, ntop, strport);
/* Create a socket for connecting. */
sock = ssh_create_socket(needpriv, ai);
if (sock < 0)
/* Any error is already output */
continue;
if (timeout_connect(sock, ai->ai_addr, ai->ai_addrlen,
timeout_ms) >= 0) {
/* Successful connection. */
memcpy(hostaddr, ai->ai_addr, ai->ai_addrlen);
break;
} else {
debug("connect to address %s port %s: %s",
ntop, strport, strerror(errno));
close(sock);
sock = -1;
}
}
if (sock != -1)
break; /* Successful connection. */
}
/* Return failure if we didn't get a successful connection. */
if (sock == -1) {
error("ssh: connect to host %s port %s: %s",
host, strport, strerror(errno));
return (-1);
}
debug("Connection established.");
/* Set SO_KEEPALIVE if requested. */
if (want_keepalive &&
setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (void *)&on,
sizeof(on)) < 0)
error("setsockopt SO_KEEPALIVE: %.100s", strerror(errno));
/* Set the connection. */
packet_set_connection(sock, sock);
return 0;
} | 0 | [
"CWE-20"
]
| openssh-portable | 7d6a9fb660c808882d064e152d6070ffc3844c3f | 301,124,422,328,006,660,000,000,000,000,000,000,000 | 76 | - [email protected] 2014/04/01 03:34:10
[sshconnect.c]
When using VerifyHostKeyDNS with a DNSSEC resolver, down-convert any
certificate keys to plain keys and attempt SSHFP resolution.
Prevents a server from skipping SSHFP lookup and forcing a new-hostkey
dialog by offering only certificate keys.
Reported by mcv21 AT cam.ac.uk |
TfLiteStatus MultiplyAndCheckOverflow(size_t a, size_t b, size_t* product) {
// Multiplying a * b where a and b are size_t cannot result in overflow in a
// size_t accumulator if both numbers have no non-zero bits in their upper
// half.
constexpr size_t size_t_bits = 8 * sizeof(size_t);
constexpr size_t overflow_upper_half_bit_position = size_t_bits / 2;
*product = a * b;
// If neither integers have non-zero bits past 32 bits can't overflow.
// Otherwise check using slow devision.
if (TFLITE_EXPECT_FALSE((a | b) >> overflow_upper_half_bit_position != 0)) {
if (a != 0 && *product / a != b) return kTfLiteError;
}
return kTfLiteOk;
} | 0 | [
"CWE-20",
"CWE-787"
]
| tensorflow | d58c96946b2880991d63d1dacacb32f0a4dfa453 | 178,231,244,800,399,920,000,000,000,000,000,000,000 | 14 | [tflite] Ensure inputs and outputs don't overlap.
If a model uses the same tensor for both an input and an output then this can result in data loss and memory corruption. This should not happen.
PiperOrigin-RevId: 332522916
Change-Id: If0905b142415a9dfceaf2d181872f2a8fb88f48a |
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, unsigned long len)
{
int r;
unsigned long addr;
addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
if (kvm_is_error_hva(addr))
return -EFAULT;
pagefault_disable();
r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
pagefault_enable();
if (r)
return -EFAULT;
return 0;
} | 0 | [
"CWE-416"
]
| linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 254,133,066,017,794,240,000,000,000,000,000,000,000 | 16 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port)
{
struct sas_ha_struct *ha;
struct sas_phy *new_phy;
if (!dev)
return;
ha = dev->port->ha;
new_phy = sas_port_get_phy(port);
/* pin and record last seen phy */
spin_lock_irq(&ha->phy_port_lock);
if (new_phy) {
sas_port_put_phy(dev->phy);
dev->phy = new_phy;
}
spin_unlock_irq(&ha->phy_port_lock);
} | 0 | [
"CWE-284"
]
| linux | 0558f33c06bb910e2879e355192227a8e8f0219d | 282,507,551,689,220,220,000,000,000,000,000,000,000 | 19 | scsi: libsas: direct call probe and destruct
In commit 87c8331fcf72 ("[SCSI] libsas: prevent domain rediscovery
competing with ata error handling") introduced disco mutex to prevent
rediscovery competing with ata error handling and put the whole
revalidation in the mutex. But the rphy add/remove needs to wait for the
error handling which also grabs the disco mutex. This may leads to dead
lock.So the probe and destruct event were introduce to do the rphy
add/remove asynchronously and out of the lock.
The asynchronously processed workers makes the whole discovery process
not atomic, the other events may interrupt the process. For example,
if a loss of signal event inserted before the probe event, the
sas_deform_port() is called and the port will be deleted.
And sas_port_delete() may run before the destruct event, but the
port-x:x is the top parent of end device or expander. This leads to
a kernel WARNING such as:
[ 82.042979] sysfs group 'power' not found for kobject 'phy-1:0:22'
[ 82.042983] ------------[ cut here ]------------
[ 82.042986] WARNING: CPU: 54 PID: 1714 at fs/sysfs/group.c:237
sysfs_remove_group+0x94/0xa0
[ 82.043059] Call trace:
[ 82.043082] [<ffff0000082e7624>] sysfs_remove_group+0x94/0xa0
[ 82.043085] [<ffff00000864e320>] dpm_sysfs_remove+0x60/0x70
[ 82.043086] [<ffff00000863ee10>] device_del+0x138/0x308
[ 82.043089] [<ffff00000869a2d0>] sas_phy_delete+0x38/0x60
[ 82.043091] [<ffff00000869a86c>] do_sas_phy_delete+0x6c/0x80
[ 82.043093] [<ffff00000863dc20>] device_for_each_child+0x58/0xa0
[ 82.043095] [<ffff000008696f80>] sas_remove_children+0x40/0x50
[ 82.043100] [<ffff00000869d1bc>] sas_destruct_devices+0x64/0xa0
[ 82.043102] [<ffff0000080e93bc>] process_one_work+0x1fc/0x4b0
[ 82.043104] [<ffff0000080e96c0>] worker_thread+0x50/0x490
[ 82.043105] [<ffff0000080f0364>] kthread+0xfc/0x128
[ 82.043107] [<ffff0000080836c0>] ret_from_fork+0x10/0x50
Make probe and destruct a direct call in the disco and revalidate function,
but put them outside the lock. The whole discovery or revalidate won't
be interrupted by other events. And the DISCE_PROBE and DISCE_DESTRUCT
event are deleted as a result of the direct call.
Introduce a new list to destruct the sas_port and put the port delete after
the destruct. This makes sure the right order of destroying the sysfs
kobject and fix the warning above.
In sas_ex_revalidate_domain() have a loop to find all broadcasted
device, and sometimes we have a chance to find the same expander twice.
Because the sas_port will be deleted at the end of the whole revalidate
process, sas_port with the same name cannot be added before this.
Otherwise the sysfs will complain of creating duplicate filename. Since
the LLDD will send broadcast for every device change, we can only
process one expander's revalidation.
[mkp: kbuild test robot warning]
Signed-off-by: Jason Yan <[email protected]>
CC: John Garry <[email protected]>
CC: Johannes Thumshirn <[email protected]>
CC: Ewan Milne <[email protected]>
CC: Christoph Hellwig <[email protected]>
CC: Tomas Henzl <[email protected]>
CC: Dan Williams <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
ex_unmap(exarg_T *eap)
{
do_exmap(eap, FALSE);
} | 0 | [
"CWE-78"
]
| vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 127,350,526,083,224,290,000,000,000,000,000,000,000 | 4 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
GF_Err gf_isom_remove_edit(GF_ISOFile *movie, u32 trackNumber, u32 seg_index)
{
GF_Err e;
GF_TrackBox *trak;
GF_EdtsEntry *ent, *next_ent;
trak = gf_isom_get_track_from_file(movie, trackNumber);
if (!trak || !seg_index) return GF_BAD_PARAM;
e = CanAccessMovie(movie, GF_ISOM_OPEN_WRITE);
if (e) return e;
if (!trak->editBox || !trak->editBox->editList) return GF_OK;
if (gf_list_count(trak->editBox->editList->entryList)<=1) return gf_isom_remove_edits(movie, trackNumber);
ent = (GF_EdtsEntry*) gf_list_get(trak->editBox->editList->entryList, seg_index-1);
gf_list_rem(trak->editBox->editList->entryList, seg_index-1);
next_ent = (GF_EdtsEntry *)gf_list_get(trak->editBox->editList->entryList, seg_index-1);
if (next_ent) next_ent->segmentDuration += ent->segmentDuration;
gf_free(ent);
return SetTrackDuration(trak);
} | 0 | [
"CWE-476"
]
| gpac | ebfa346eff05049718f7b80041093b4c5581c24e | 236,372,395,827,109,460,000,000,000,000,000,000,000 | 21 | fixed #1706 |
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
zoneref->zone_idx = zone_idx(zone);
} | 0 | []
| linux | 400e22499dd92613821374c8c6c88c7225359980 | 199,068,525,048,904,620,000,000,000,000,000,000,000 | 5 | mm: don't warn about allocations which stall for too long
Commit 63f53dea0c98 ("mm: warn about allocations which stall for too
long") was a great step for reducing possibility of silent hang up
problem caused by memory allocation stalls. But this commit reverts it,
for it is possible to trigger OOM lockup and/or soft lockups when many
threads concurrently called warn_alloc() (in order to warn about memory
allocation stalls) due to current implementation of printk(), and it is
difficult to obtain useful information due to limitation of synchronous
warning approach.
Current printk() implementation flushes all pending logs using the
context of a thread which called console_unlock(). printk() should be
able to flush all pending logs eventually unless somebody continues
appending to printk() buffer.
Since warn_alloc() started appending to printk() buffer while waiting
for oom_kill_process() to make forward progress when oom_kill_process()
is processing pending logs, it became possible for warn_alloc() to force
oom_kill_process() loop inside printk(). As a result, warn_alloc()
significantly increased possibility of preventing oom_kill_process()
from making forward progress.
---------- Pseudo code start ----------
Before warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
}
goto retry;
After warn_alloc() was introduced:
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else if (waited_for_10seconds()) {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
Although waited_for_10seconds() becomes true once per 10 seconds,
unbounded number of threads can call waited_for_10seconds() at the same
time. Also, since threads doing waited_for_10seconds() keep doing
almost busy loop, the thread doing print_one_log() can use little CPU
resource. Therefore, this situation can be simplified like
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_lock)
} else {
atomic_inc(&printk_pending_logs);
}
goto retry;
---------- Pseudo code end ----------
when printk() is called faster than print_one_log() can process a log.
One of possible mitigation would be to introduce a new lock in order to
make sure that no other series of printk() (either oom_kill_process() or
warn_alloc()) can append to printk() buffer when one series of printk()
(either oom_kill_process() or warn_alloc()) is already in progress.
Such serialization will also help obtaining kernel messages in readable
form.
---------- Pseudo code start ----------
retry:
if (mutex_trylock(&oom_lock)) {
mutex_lock(&oom_printk_lock);
while (atomic_read(&printk_pending_logs) > 0) {
atomic_dec(&printk_pending_logs);
print_one_log();
}
// Send SIGKILL here.
mutex_unlock(&oom_printk_lock);
mutex_unlock(&oom_lock)
} else {
if (mutex_trylock(&oom_printk_lock)) {
atomic_inc(&printk_pending_logs);
mutex_unlock(&oom_printk_lock);
}
}
goto retry;
---------- Pseudo code end ----------
But this commit does not go that direction, for we don't want to
introduce a new lock dependency, and we unlikely be able to obtain
useful information even if we serialized oom_kill_process() and
warn_alloc().
Synchronous approach is prone to unexpected results (e.g. too late [1],
too frequent [2], overlooked [3]). As far as I know, warn_alloc() never
helped with providing information other than "something is going wrong".
I want to consider asynchronous approach which can obtain information
during stalls with possibly relevant threads (e.g. the owner of
oom_lock and kswapd-like threads) and serve as a trigger for actions
(e.g. turn on/off tracepoints, ask libvirt daemon to take a memory dump
of stalling KVM guest for diagnostic purpose).
This commit temporarily loses ability to report e.g. OOM lockup due to
unable to invoke the OOM killer due to !__GFP_FS allocation request.
But asynchronous approach will be able to detect such situation and emit
warning. Thus, let's remove warn_alloc().
[1] https://bugzilla.kernel.org/show_bug.cgi?id=192981
[2] http://lkml.kernel.org/r/CAM_iQpWuPVGc2ky8M-9yukECtS+zKjiDasNymX7rMcBjBFyM_A@mail.gmail.com
[3] commit db73ee0d46379922 ("mm, vmscan: do not loop on too_many_isolated for ever"))
Link: http://lkml.kernel.org/r/1509017339-4802-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp
Signed-off-by: Tetsuo Handa <[email protected]>
Reported-by: Cong Wang <[email protected]>
Reported-by: yuwang.yuwang <[email protected]>
Reported-by: Johannes Weiner <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Sergey Senozhatsky <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Steven Rostedt <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
gst_h264_parser_parse_pps (GstH264NalParser * nalparser,
GstH264NalUnit * nalu, GstH264PPS * pps)
{
GstH264ParserResult res = gst_h264_parse_pps (nalparser, nalu, pps);
if (res == GST_H264_PARSER_OK) {
GST_DEBUG ("adding picture parameter set with id: %d to array", pps->id);
if (!gst_h264_pps_copy (&nalparser->pps[pps->id], pps))
return GST_H264_PARSER_ERROR;
nalparser->last_pps = &nalparser->pps[pps->id];
}
return res;
} | 0 | [
"CWE-787"
]
| gst-plugins-bad | 11353b3f6e2f047cc37483d21e6a37ae558896bc | 327,629,450,899,585,170,000,000,000,000,000,000,000 | 15 | codecparsers: h264parser: guard against ref_pic_markings overflow
Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/merge_requests/1703> |
int Field::store(const char *to, size_t length, CHARSET_INFO *cs,
enum_check_fields check_level)
{
Check_level_instant_set check_level_save(get_thd(), check_level);
return store(to, length, cs);
} | 0 | [
"CWE-416",
"CWE-703"
]
| server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 148,752,044,500,408,020,000,000,000,000,000,000,000 | 6 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
static JSValue js_sys_url_cat(JSContext *ctx, JSValueConst this_val, int argc, JSValueConst *argv)
{
const char *parent;
const char *src;
char *url;
JSValue res;
if (argc<2) return GF_JS_EXCEPTION(ctx);
parent = JS_ToCString(ctx, argv[0]);
src = JS_ToCString(ctx, argv[1]);
url = gf_url_concatenate(parent, src);
if (url) {
res = JS_NewString(ctx, url);
gf_free(url);
} else {
res = JS_NewString(ctx, src);
}
JS_FreeCString(ctx, parent);
JS_FreeCString(ctx, src);
return res;
} | 0 | [
"CWE-787"
]
| gpac | ea1eca00fd92fa17f0e25ac25652622924a9a6a0 | 131,585,747,350,238,550,000,000,000,000,000,000,000 | 20 | fixed #2138 |
const char *ipx_frame_name(__be16 frame)
{
char* rc = "None";
switch (ntohs(frame)) {
case ETH_P_IPX: rc = "EtherII"; break;
case ETH_P_802_2: rc = "802.2"; break;
case ETH_P_SNAP: rc = "SNAP"; break;
case ETH_P_802_3: rc = "802.3"; break;
}
return rc;
} | 0 | [
"CWE-416"
]
| linux | ee0d8d8482345ff97a75a7d747efc309f13b0d80 | 158,764,794,981,465,140,000,000,000,000,000,000,000 | 13 | ipx: call ipxitf_put() in ioctl error path
We should call ipxitf_put() if the copy_to_user() fails.
Reported-by: 李强 <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(double *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *) RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau*=PerceptibleReciprocal((double) number_nodes);
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
} | 0 | [
"CWE-369"
]
| ImageMagick | a4c89f2a61069ad7637bc7749cc1a839de442526 | 73,541,710,271,091,600,000,000,000,000,000,000,000 | 184 | https://github.com/ImageMagick/ImageMagick/issues/1730 |
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio)
{
struct block_device *bdev = sbi->sb->s_bdev;
int i;
if (f2fs_is_multi_device(sbi)) {
for (i = 0; i < sbi->s_ndevs; i++) {
if (FDEV(i).start_blk <= blk_addr &&
FDEV(i).end_blk >= blk_addr) {
blk_addr -= FDEV(i).start_blk;
bdev = FDEV(i).bdev;
break;
}
}
}
if (bio) {
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
}
return bdev;
} | 0 | [
"CWE-476"
]
| linux | 4969c06a0d83c9c3dc50b8efcdc8eeedfce896f6 | 302,288,351,260,026,820,000,000,000,000,000,000,000 | 22 | f2fs: support swap file w/ DIO
Signed-off-by: Jaegeuk Kim <[email protected]> |
void Downstream::inspect_http1_request() {
if (req_.method == HTTP_CONNECT) {
req_.upgrade_request = true;
} else if (req_.http_minor > 0) {
auto upgrade = req_.fs.header(http2::HD_UPGRADE);
if (upgrade) {
const auto &val = upgrade->value;
// TODO Perform more strict checking for upgrade headers
if (util::streq_l(NGHTTP2_CLEARTEXT_PROTO_VERSION_ID, val.c_str(),
val.size())) {
req_.http2_upgrade_seen = true;
} else {
req_.upgrade_request = true;
// TODO Should we check Sec-WebSocket-Key, and
// Sec-WebSocket-Version as well?
if (util::strieq_l("websocket", val)) {
req_.connect_proto = ConnectProto::WEBSOCKET;
}
}
}
}
auto transfer_encoding = req_.fs.header(http2::HD_TRANSFER_ENCODING);
if (transfer_encoding) {
req_.fs.content_length = -1;
if (util::iends_with_l(transfer_encoding->value, "chunked")) {
chunked_request_ = true;
}
}
} | 1 | []
| nghttp2 | 319d5ab1c6d916b6b8a0d85b2ae3f01b3ad04f2c | 58,365,931,505,958,020,000,000,000,000,000,000,000 | 30 | nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full. |
static void ctxload_finalize(GF_Filter *filter)
{
CTXLoadPriv *priv = gf_filter_get_udta(filter);
if (priv->ctx) gf_sm_del(priv->ctx);
if (priv->files_to_delete) gf_list_del(priv->files_to_delete);
} | 0 | [
"CWE-276"
]
| gpac | 96699aabae042f8f55cf8a85fa5758e3db752bae | 80,425,923,785,756,390,000,000,000,000,000,000,000 | 7 | fixed #2061 |
my_decimal *Item_sum_avg::val_decimal(my_decimal *val)
{
my_decimal cnt;
const my_decimal *sum_dec;
DBUG_ASSERT(fixed == 1);
if (aggr)
aggr->endup();
if (!count)
{
null_value=1;
return NULL;
}
/*
For non-DECIMAL result_type() the division will be done in
Item_sum_avg::val_real().
*/
if (Item_sum_avg::result_type() != DECIMAL_RESULT)
return val_decimal_from_real(val);
sum_dec= dec_buffs + curr_dec_buff;
int2my_decimal(E_DEC_FATAL_ERROR, count, 0, &cnt);
my_decimal_div(E_DEC_FATAL_ERROR, val, sum_dec, &cnt, prec_increment);
return val;
} | 0 | [
"CWE-120"
]
| server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 298,716,981,722,125,950,000,000,000,000,000,000,000 | 25 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
ikev2_auth_print(netdissect_options *ndo, u_char tpay,
const struct isakmp_gen *ext,
u_int item_len _U_, const u_char *ep,
uint32_t phase _U_, uint32_t doi _U_,
uint32_t proto _U_, int depth _U_)
{
struct ikev2_auth a;
const char *v2_auth[]={ "invalid", "rsasig",
"shared-secret", "dsssig" };
const u_char *authdata = (const u_char*)ext + sizeof(a);
unsigned int len;
ND_TCHECK(*ext);
UNALIGNED_MEMCPY(&a, ext, sizeof(a));
ikev2_pay_print(ndo, NPSTR(tpay), a.h.critical);
len = ntohs(a.h.len);
/*
* Our caller has ensured that the length is >= 4.
*/
ND_PRINT((ndo," len=%u method=%s", len-4,
STR_OR_ID(a.auth_method, v2_auth)));
if (len > 4) {
if (ndo->ndo_vflag > 1) {
ND_PRINT((ndo, " authdata=("));
if (!rawprint(ndo, (const uint8_t *)authdata, len - sizeof(a)))
goto trunc;
ND_PRINT((ndo, ") "));
} else if (ndo->ndo_vflag) {
if (!ike_show_somedata(ndo, authdata, ep))
goto trunc;
}
}
return (const u_char *)ext + len;
trunc:
ND_PRINT((ndo," [|%s]", NPSTR(tpay)));
return NULL;
} | 1 | [
"CWE-125",
"CWE-787"
]
| tcpdump | 8dca25d26c7ca2caf6138267f6f17111212c156e | 249,175,220,691,638,030,000,000,000,000,000,000,000 | 39 | CVE-2017-13690/IKEv2: Fix some bounds checks.
Use a pointer of the correct type in ND_TCHECK(), or use ND_TCHECK2()
and provide the correct length.
While we're at it, remove the blank line between some checks and the
UNALIGNED_MEMCPY()s they protect.
Also, note the places where we print the entire payload.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s). |
void str_to_file2(const char *fname, char *str, int size, my_bool append)
{
int fd;
char buff[FN_REFLEN];
int flags= O_WRONLY | O_CREAT;
if (!test_if_hard_path(fname))
{
strxmov(buff, opt_basedir, fname, NullS);
fname= buff;
}
fn_format(buff, fname, "", "", MY_UNPACK_FILENAME);
if (!append)
flags|= O_TRUNC;
if ((fd= my_open(buff, flags,
MYF(MY_WME | MY_FFNF))) < 0)
die("Could not open '%s' for writing, errno: %d", buff, errno);
if (append && my_seek(fd, 0, SEEK_END, MYF(0)) == MY_FILEPOS_ERROR)
die("Could not find end of file '%s', errno: %d", buff, errno);
if (my_write(fd, (uchar*)str, size, MYF(MY_WME|MY_FNABP)))
die("write failed, errno: %d", errno);
my_close(fd, MYF(0));
} | 0 | [
"CWE-284",
"CWE-295"
]
| mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 76,206,270,570,934,275,000,000,000,000,000,000,000 | 23 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
GC_API void GC_CALL GC_set_force_unmap_on_gcollect(int value)
{
GC_force_unmap_on_gcollect = (GC_bool)value;
} | 0 | [
"CWE-119"
]
| bdwgc | 7292c02fac2066d39dd1bcc37d1a7054fd1e32ee | 58,322,731,880,576,420,000,000,000,000,000,000,000 | 4 | Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now). |
static int unit_export_log_extra_fields(Unit *u, const ExecContext *c) {
_cleanup_close_ int fd = -1;
struct iovec *iovec;
const char *p;
char *pattern;
le64_t *sizes;
ssize_t n;
size_t i;
int r;
if (u->exported_log_extra_fields)
return 0;
if (c->n_log_extra_fields <= 0)
return 0;
sizes = newa(le64_t, c->n_log_extra_fields);
iovec = newa(struct iovec, c->n_log_extra_fields * 2);
for (i = 0; i < c->n_log_extra_fields; i++) {
sizes[i] = htole64(c->log_extra_fields[i].iov_len);
iovec[i*2] = IOVEC_MAKE(sizes + i, sizeof(le64_t));
iovec[i*2+1] = c->log_extra_fields[i];
}
p = strjoina("/run/systemd/units/log-extra-fields:", u->id);
pattern = strjoina(p, ".XXXXXX");
fd = mkostemp_safe(pattern);
if (fd < 0)
return log_unit_debug_errno(u, fd, "Failed to create extra fields file %s: %m", p);
n = writev(fd, iovec, c->n_log_extra_fields*2);
if (n < 0) {
r = log_unit_debug_errno(u, errno, "Failed to write extra fields: %m");
goto fail;
}
(void) fchmod(fd, 0644);
if (rename(pattern, p) < 0) {
r = log_unit_debug_errno(u, errno, "Failed to rename extra fields file: %m");
goto fail;
}
u->exported_log_extra_fields = true;
return 0;
fail:
(void) unlink(pattern);
return r;
} | 0 | [
"CWE-269"
]
| systemd | bf65b7e0c9fc215897b676ab9a7c9d1c688143ba | 237,373,792,367,992,250,000,000,000,000,000,000,000 | 53 | core: imply NNP and SUID/SGID restriction for DynamicUser=yes service
Let's be safe, rather than sorry. This way DynamicUser=yes services can
neither take benefit of, nor create SUID/SGID binaries.
Given that DynamicUser= is a recent addition only we should be able to
get away with turning this on, even though this is strictly speaking a
binary compatibility breakage. |
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
{
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
int i;
trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
kvm_hv_notify_acked_sint(vcpu, i);
} | 0 | [
"CWE-476"
]
| linux | 919f4ebc598701670e80e31573a58f1f2d2bf918 | 168,583,593,922,302,470,000,000,000,000,000,000,000 | 11 | KVM: x86: hyper-v: Fix Hyper-V context null-ptr-deref
Reported by syzkaller:
KASAN: null-ptr-deref in range [0x0000000000000140-0x0000000000000147]
CPU: 1 PID: 8370 Comm: syz-executor859 Not tainted 5.11.0-syzkaller #0
RIP: 0010:synic_get arch/x86/kvm/hyperv.c:165 [inline]
RIP: 0010:kvm_hv_set_sint_gsi arch/x86/kvm/hyperv.c:475 [inline]
RIP: 0010:kvm_hv_irq_routing_update+0x230/0x460 arch/x86/kvm/hyperv.c:498
Call Trace:
kvm_set_irq_routing+0x69b/0x940 arch/x86/kvm/../../../virt/kvm/irqchip.c:223
kvm_vm_ioctl+0x12d0/0x2800 arch/x86/kvm/../../../virt/kvm/kvm_main.c:3959
vfs_ioctl fs/ioctl.c:48 [inline]
__do_sys_ioctl fs/ioctl.c:753 [inline]
__se_sys_ioctl fs/ioctl.c:739 [inline]
__x64_sys_ioctl+0x193/0x200 fs/ioctl.c:739
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
Hyper-V context is lazily allocated until Hyper-V specific MSRs are accessed
or SynIC is enabled. However, the syzkaller testcase sets irq routing table
directly w/o enabling SynIC. This results in null-ptr-deref when accessing
SynIC Hyper-V context. This patch fixes it.
syzkaller source: https://syzkaller.appspot.com/x/repro.c?x=163342ccd00000
Reported-by: [email protected]
Fixes: 8f014550dfb1 ("KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional")
Signed-off-by: Wanpeng Li <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static void set_rpa_resolution_complete(uint8_t status, uint16_t len,
const void *param, void *user_data)
{
if (status != 0)
error("Set RPA Resolution failed with status 0x%02x (%s)",
status, mgmt_errstr(status));
else
DBG("RPA Resolution successfully set");
} | 0 | [
"CWE-862",
"CWE-863"
]
| bluez | b497b5942a8beb8f89ca1c359c54ad67ec843055 | 64,710,258,845,152,940,000,000,000,000,000,000,000 | 9 | adapter: Fix storing discoverable setting
discoverable setting shall only be store when changed via Discoverable
property and not when discovery client set it as that be considered
temporary just for the lifetime of the discovery. |
int smb_vfs_call_sys_acl_delete_def_file(struct vfs_handle_struct *handle,
const char *path)
{
VFS_FIND(sys_acl_delete_def_file);
return handle->fns->sys_acl_delete_def_file_fn(handle, path);
} | 0 | [
"CWE-264"
]
| samba | 4278ef25f64d5fdbf432ff1534e275416ec9561e | 296,711,518,050,723,340,000,000,000,000,000,000,000 | 6 | CVE-2015-5252: s3: smbd: Fix symlink verification (file access outside the share).
Ensure matching component ends in '/' or '\0'.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11395
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Volker Lendecke <[email protected]> |
cdf_dump_header(const cdf_header_t *h)
{
size_t i;
#define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b)
#define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \
h->h_ ## b, 1 << h->h_ ## b)
DUMP("%d", revision);
DUMP("%d", version);
DUMP("%#x", byte_order);
DUMP2("%d", sec_size_p2);
DUMP2("%d", short_sec_size_p2);
DUMP("%d", num_sectors_in_sat);
DUMP("%d", secid_first_directory);
DUMP("%d", min_size_standard_stream);
DUMP("%d", secid_first_sector_in_short_sat);
DUMP("%d", num_sectors_in_short_sat);
DUMP("%d", secid_first_sector_in_master_sat);
DUMP("%d", num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++) {
if (h->h_master_sat[i] == CDF_SECID_FREE)
break;
(void)fprintf(stderr, "%35.35s[%.3" SIZE_T_FORMAT "u] = %d\n",
"master_sat", i, h->h_master_sat[i]);
}
} | 0 | [
"CWE-787"
]
| file | 46a8443f76cec4b41ec736eca396984c74664f84 | 257,613,716,104,927,940,000,000,000,000,000,000,000 | 26 | Limit the number of elements in a vector (found by oss-fuzz) |
void WriteVirtIODeviceByte(ULONG_PTR ulRegister, u8 bValue)
{
DPrintf(6, ("[%s]R[%x]=%x\n", __FUNCTION__, (ULONG)ulRegister, bValue) );
NdisRawWritePortUchar(ulRegister, bValue);
} | 0 | [
"CWE-20"
]
| kvm-guest-drivers-windows | 723416fa4210b7464b28eab89cc76252e6193ac1 | 185,421,506,597,077,180,000,000,000,000,000,000,000 | 6 | NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <[email protected]> |
static bool notify_append_le16(struct nfy_mult_data *data, uint16_t value)
{
if (data->offset + sizeof(value) > data->len)
return false;
put_le16(value, data->pdu + data->offset);
data->offset += sizeof(value);
return true;
} | 0 | [
"CWE-476",
"CWE-119",
"CWE-787"
]
| bluez | 591c546c536b42bef696d027f64aa22434f8c3f0 | 153,023,523,810,231,330,000,000,000,000,000,000,000 | 10 | shared/gatt-server: Fix heap overflow when appending prepare writes
The code shall check if the prepare writes would append more the
allowed maximum attribute length.
Fixes https://github.com/bluez/bluez/security/advisories/GHSA-479m-xcq5-9g2q |
static inline int rcu_use_vmalloc(int size)
{
/* Too big for a single page? */
if (HDRLEN_KMALLOC + size > PAGE_SIZE)
return 1;
return 0;
} | 0 | [
"CWE-703",
"CWE-189"
]
| linux | 6062a8dc0517bce23e3c2f7d2fea5e22411269a3 | 273,334,731,274,306,580,000,000,000,000,000,000,000 | 7 | ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[[email protected]: do not call sem_lock when bogus sma]
[[email protected]: make refcounter atomic]
Signed-off-by: Rik van Riel <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Acked-by: Davidlohr Bueso <[email protected]>
Cc: Chegu Vinod <[email protected]>
Cc: Jason Low <[email protected]>
Reviewed-by: Michel Lespinasse <[email protected]>
Cc: Peter Hurley <[email protected]>
Cc: Stanislav Kinsbursky <[email protected]>
Tested-by: Emmanuel Benisty <[email protected]>
Tested-by: Sedat Dilek <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static void sntp_cb(struct mg_connection *c, int ev, void *evd, void *fnd) {
if (ev == MG_EV_SNTP_TIME) {
*(int64_t *) fnd = *(int64_t *) evd;
}
(void) c;
} | 0 | [
"CWE-552"
]
| mongoose | c65c8fdaaa257e0487ab0aaae9e8f6b439335945 | 13,735,100,177,361,483,000,000,000,000,000,000,000 | 6 | Protect against the directory traversal in mg_upload() |
static inline bool is_no_device(u32 intr_info)
{
return is_exception_n(intr_info, NM_VECTOR);
} | 0 | [
"CWE-284"
]
| linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 255,118,056,326,747,500,000,000,000,000,000,000,000 | 4 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static ZIPARCHIVE_METHOD(getExternalAttributesIndex)
{
struct zip *intern;
zval *self = getThis(), *z_opsys, *z_attr;
zend_long index, flags=0;
zip_uint8_t opsys;
zip_uint32_t attr;
struct zip_stat sb;
if (!self) {
RETURN_FALSE;
}
ZIP_FROM_OBJECT(intern, self);
if (zend_parse_parameters(ZEND_NUM_ARGS(), "lz/z/|l",
&index, &z_opsys, &z_attr, &flags) == FAILURE) {
return;
}
PHP_ZIP_STAT_INDEX(intern, index, 0, sb);
if (zip_file_get_external_attributes(intern, (zip_uint64_t)index,
(zip_flags_t)flags, &opsys, &attr) < 0) {
RETURN_FALSE;
}
zval_dtor(z_opsys);
ZVAL_LONG(z_opsys, opsys);
zval_dtor(z_attr);
ZVAL_LONG(z_attr, attr);
RETURN_TRUE;
} | 0 | [
"CWE-190"
]
| php-src | 3b8d4de300854b3517c7acb239b84f7726c1353c | 24,071,615,986,789,373,000,000,000,000,000,000,000 | 31 | Fix bug #71923 - integer overflow in ZipArchive::getFrom* |
static void detach_task(struct task_struct *p, struct lb_env *env)
{
lockdep_assert_held(&env->src_rq->lock);
p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, env->dst_cpu);
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-835"
]
| linux | c40f7d74c741a907cfaeb73a7697081881c497d0 | 48,264,770,314,329,450,000,000,000,000,000,000,000 | 8 | sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <[email protected]>
Analyzed-by: Vincent Guittot <[email protected]>
Reported-by: Zhipeng Xie <[email protected]>
Reported-by: Sargun Dhillon <[email protected]>
Reported-by: Xie XiuQi <[email protected]>
Tested-by: Zhipeng Xie <[email protected]>
Tested-by: Sargun Dhillon <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Acked-by: Vincent Guittot <[email protected]>
Cc: <[email protected]> # v4.13+
Cc: Bin Li <[email protected]>
Cc: Mike Galbraith <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Tejun Heo <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]> |
xmlSchemaParseIncludeOrRedefineAttrs(xmlSchemaParserCtxtPtr pctxt,
xmlSchemaPtr schema,
xmlNodePtr node,
xmlChar **schemaLocation,
int type)
{
xmlAttrPtr attr;
if ((pctxt == NULL) || (schema == NULL) || (node == NULL) ||
(schemaLocation == NULL))
return (-1);
*schemaLocation = NULL;
/*
* Check for illegal attributes.
* Applies for both <include> and <redefine>.
*/
attr = node->properties;
while (attr != NULL) {
if (attr->ns == NULL) {
if ((!xmlStrEqual(attr->name, BAD_CAST "id")) &&
(!xmlStrEqual(attr->name, BAD_CAST "schemaLocation"))) {
xmlSchemaPIllegalAttrErr(pctxt,
XML_SCHEMAP_S4S_ATTR_NOT_ALLOWED, NULL, attr);
}
} else if (xmlStrEqual(attr->ns->href, xmlSchemaNs)) {
xmlSchemaPIllegalAttrErr(pctxt,
XML_SCHEMAP_S4S_ATTR_NOT_ALLOWED, NULL, attr);
}
attr = attr->next;
}
xmlSchemaPValAttrID(pctxt, node, BAD_CAST "id");
/*
* Preliminary step, extract the URI-Reference and make an URI
* from the base.
*/
/*
* Attribute "schemaLocation" is mandatory.
*/
attr = xmlSchemaGetPropNode(node, "schemaLocation");
if (attr != NULL) {
xmlChar *base = NULL;
xmlChar *uri = NULL;
if (xmlSchemaPValAttrNode(pctxt, NULL, attr,
xmlSchemaGetBuiltInType(XML_SCHEMAS_ANYURI),
(const xmlChar **) schemaLocation) != 0)
goto exit_error;
base = xmlNodeGetBase(node->doc, node);
if (base == NULL) {
uri = xmlBuildURI(*schemaLocation, node->doc->URL);
} else {
uri = xmlBuildURI(*schemaLocation, base);
xmlFree(base);
}
if (uri == NULL) {
PERROR_INT("xmlSchemaParseIncludeOrRedefine",
"could not build an URI from the schemaLocation")
goto exit_failure;
}
(*schemaLocation) = (xmlChar *) xmlDictLookup(pctxt->dict, uri, -1);
xmlFree(uri);
} else {
xmlSchemaPMissingAttrErr(pctxt,
XML_SCHEMAP_S4S_ATTR_MISSING,
NULL, node, "schemaLocation", NULL);
goto exit_error;
}
/*
* Report self-inclusion and self-redefinition.
*/
if (xmlStrEqual(*schemaLocation, pctxt->URL)) {
if (type == XML_SCHEMA_SCHEMA_REDEFINE) {
xmlSchemaPCustomErr(pctxt,
XML_SCHEMAP_SRC_REDEFINE,
NULL, node,
"The schema document '%s' cannot redefine itself.",
*schemaLocation);
} else {
xmlSchemaPCustomErr(pctxt,
XML_SCHEMAP_SRC_INCLUDE,
NULL, node,
"The schema document '%s' cannot include itself.",
*schemaLocation);
}
goto exit_error;
}
return(0);
exit_error:
return(pctxt->err);
exit_failure:
return(-1);
} | 0 | [
"CWE-134"
]
| libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 242,409,516,049,178,660,000,000,000,000,000,000,000 | 94 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
void Run(OpKernelContext* c, CollectiveParams* col_params,
DoneCallback done) {
CollectiveExecutor* col_exec = c->collective_executor();
OP_REQUIRES_ASYNC(
c, col_exec,
errors::Internal(
"Failed to get CollectiveExecutor from OpKernelContext for Op ",
name_),
done);
// Resolve the collective params.
// Schedule the `CompleteParamsAsync` call on a work queue that can handle
// blocking work because it's not guaranteed that this call cannot block.
c->collective_executor()->RunClosure([c, done = std::move(done), col_params,
col_exec]() {
VLOG(1) << "Collective CompleteParams for " << col_params->name
<< " device " << c->device()->name() << " group "
<< col_params->group.group_key << " instance "
<< col_params->instance.instance_key;
col_exec->CompleteParamsAsync(
c->device()->attributes(), col_params, c->cancellation_manager(),
[c, done = std::move(done), col_params, col_exec](const Status& s) {
if (s.ok()) {
auto actual_done = [c, col_params,
done = std::move(done)](const Status& s) {
VLOG(1) << "Collective ExecuteAsync done for "
<< col_params->name << " device " << c->device()->name()
<< " group " << col_params->group.group_key
<< " instance " << col_params->instance.instance_key
<< " status " << s;
if (!s.ok()) {
c->SetStatus(s);
}
done();
};
VLOG(1) << "Collective ExecuteAsync start for "
<< col_params->name << " device " << c->device()->name()
<< " group " << col_params->group.group_key
<< " instance " << col_params->instance.instance_key;
col_exec->ExecuteAsync(
c, col_params,
CollectiveKey(c, col_params->group.group_key,
col_params->instance.instance_key),
actual_done);
} else {
c->SetStatus(s);
done();
}
});
});
} | 0 | [
"CWE-416"
]
| tensorflow | ca38dab9d3ee66c5de06f11af9a4b1200da5ef75 | 81,768,683,026,637,690,000,000,000,000,000,000,000 | 50 | Fix undefined behavior in CollectiveReduceV2 and others
We should not call done after it's moved.
PiperOrigin-RevId: 400838185
Change-Id: Ifc979740054b8f8c6f4d50acc89472fe60c4fdb1 |
int sys_posix_fallocate(int fd, off_t offset, off_t len)
{
#if defined(HAVE_POSIX_FALLOCATE) && !defined(HAVE_BROKEN_POSIX_FALLOCATE)
return posix_fallocate(fd, offset, len);
#elif defined(F_RESVSP64)
/* this handles XFS on IRIX */
struct flock64 fl;
off_t new_len = offset + len;
int ret;
struct stat64 sbuf;
/* unlikely to get a too large file on a 64bit system but ... */
if (new_len < 0)
return EFBIG;
fl.l_whence = SEEK_SET;
fl.l_start = offset;
fl.l_len = len;
ret=fcntl(fd, F_RESVSP64, &fl);
if (ret != 0)
return errno;
/* Make sure the file gets enlarged after we allocated space: */
fstat64(fd, &sbuf);
if (new_len > sbuf.st_size)
ftruncate64(fd, new_len);
return 0;
#else
return ENOSYS;
#endif
} | 0 | [
"CWE-20"
]
| samba | d77a74237e660dd2ce9f1e14b02635f8a2569653 | 329,425,343,698,518,440,000,000,000,000,000,000,000 | 33 | s3: nmbd: Fix bug 10633 - nmbd denial of service
The Linux kernel has a bug in that it can give spurious
wakeups on a non-blocking UDP socket for a non-deliverable packet.
When nmbd was changed to use non-blocking sockets it
became vulnerable to a spurious wakeup from poll/epoll.
Fix sys_recvfile() to return on EWOULDBLOCK/EAGAIN.
CVE-2014-0244
https://bugzilla.samba.org/show_bug.cgi?id=10633
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Andreas Schneider <[email protected]> |
get_default_executable_text_file_action (void)
{
int preferences_value;
preferences_value = eel_preferences_get_enum
(NAUTILUS_PREFERENCES_EXECUTABLE_TEXT_ACTIVATION);
switch (preferences_value) {
case NAUTILUS_EXECUTABLE_TEXT_LAUNCH:
return ACTIVATION_ACTION_LAUNCH;
case NAUTILUS_EXECUTABLE_TEXT_DISPLAY:
return ACTIVATION_ACTION_OPEN_IN_APPLICATION;
case NAUTILUS_EXECUTABLE_TEXT_ASK:
default:
return ACTIVATION_ACTION_ASK;
}
} | 0 | []
| nautilus | 7632a3e13874a2c5e8988428ca913620a25df983 | 50,260,050,833,249,940,000,000,000,000,000,000,000 | 16 | Check for trusted desktop file launchers.
2009-02-24 Alexander Larsson <[email protected]>
* libnautilus-private/nautilus-directory-async.c:
Check for trusted desktop file launchers.
* libnautilus-private/nautilus-file-private.h:
* libnautilus-private/nautilus-file.c:
* libnautilus-private/nautilus-file.h:
Add nautilus_file_is_trusted_link.
Allow unsetting of custom display name.
* libnautilus-private/nautilus-mime-actions.c:
Display dialog when trying to launch a non-trusted desktop file.
svn path=/trunk/; revision=15003 |
bool isPngType(BasicIo& iIo, bool advance)
{
if (iIo.error() || iIo.eof()) {
throw Error(kerInputDataReadFailed);
}
const int32_t len = 8;
byte buf[len];
iIo.read(buf, len);
if (iIo.error() || iIo.eof()) {
return false;
}
int rc = memcmp(buf, pngSignature, 8);
if (!advance || rc != 0) {
iIo.seek(-len, BasicIo::cur);
}
return rc == 0;
} | 0 | [
"CWE-190"
]
| exiv2 | 491c3ebe3b3faa6d8f75fb28146186792c2439da | 303,938,128,593,327,640,000,000,000,000,000,000,000 | 18 | Avoid negative integer overflow when `iccOffset > chunkLength`.
This fixes #790.
(cherry picked from commit 6fa2e31206127bd8bcac0269311f3775a8d6ea21) |
BGD_DECLARE(void) gdImageXbmCtx(gdImagePtr image, char* file_name, int fg, gdIOCtx * out)
{
int x, y, c, b, sx, sy, p;
char *name, *f;
size_t i, l;
name = file_name;
if ((f = strrchr(name, '/')) != NULL) name = f+1;
if ((f = strrchr(name, '\\')) != NULL) name = f+1;
name = strdup(name);
if ((f = strrchr(name, '.')) != NULL && !strcasecmp(f, ".XBM")) *f = '\0';
if ((l = strlen(name)) == 0) {
free(name);
name = strdup("image");
} else {
for (i=0; i<l; i++) {
/* only in C-locale isalnum() would work */
if (!isupper(name[i]) && !islower(name[i]) && !isdigit(name[i])) {
name[i] = '_';
}
}
}
gdCtxPrintf(out, "#define %s_width %d\n", name, gdImageSX(image));
gdCtxPrintf(out, "#define %s_height %d\n", name, gdImageSY(image));
gdCtxPrintf(out, "static unsigned char %s_bits[] = {\n ", name);
free(name);
b = 1;
p = 0;
c = 0;
sx = gdImageSX(image);
sy = gdImageSY(image);
for (y = 0; y < sy; y++) {
for (x = 0; x < sx; x++) {
if (gdImageGetPixel(image, x, y) == fg) {
c |= b;
}
if ((b == 128) || (x == sx && y == sy)) {
b = 1;
if (p) {
gdCtxPrintf(out, ", ");
if (!(p%12)) {
gdCtxPrintf(out, "\n ");
p = 12;
}
}
p++;
gdCtxPrintf(out, "0x%02X", c);
c = 0;
} else {
b <<= 1;
}
}
}
gdCtxPrintf(out, "};\n");
} | 1 | [
"CWE-119",
"CWE-787"
]
| libgd | 4dc1a2d7931017d3625f2d7cff70a17ce58b53b4 | 81,687,968,932,291,470,000,000,000,000,000,000,000 | 58 | xbm: avoid stack overflow (read) with large names #211
We use the name passed in to printf into a local stack buffer which is
limited to 4000 bytes. So given a large enough value, lots of stack
data is leaked. Rewrite the code to do simple memory copies with most
of the strings to avoid that issue, and only use stack buffer for small
numbers of constant size.
This closes #211. |
static const char* lua_ap_get_server_name(request_rec* r)
{
const char *name;
name = ap_get_server_name(r);
return name ? name : "localhost";
} | 0 | [
"CWE-20"
]
| httpd | 78eb3b9235515652ed141353d98c239237030410 | 336,405,114,744,677,170,000,000,000,000,000,000,000 | 6 | *) SECURITY: CVE-2015-0228 (cve.mitre.org)
mod_lua: A maliciously crafted websockets PING after a script
calls r:wsupgrade() can cause a child process crash.
[Edward Lu <Chaosed0 gmail.com>]
Discovered by Guido Vranken <guidovranken gmail.com>
Submitted by: Edward Lu
Committed by: covener
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1657261 13f79535-47bb-0310-9956-ffa450edef68 |
template<typename tp, typename tf, typename tc, typename to>
CImg<T>& draw_object3d(LibBoard::Board& board,
const float x0, const float y0, const float z0,
const CImg<tp>& vertices, const CImgList<tf>& primitives,
const CImgList<tc>& colors,
const unsigned int render_type=4,
const bool is_double_sided=false, const float focale=700,
const float lightx=0, const float lighty=0, const float lightz=-5e8,
const float specular_lightness=0.2f, const float specular_shininess=0.1f,
const float g_opacity=1) {
return draw_object3d(x0,y0,z0,vertices,primitives,colors,CImg<floatT>::const_empty(),
render_type,is_double_sided,focale,lightx,lighty,lightz,
specular_lightness,specular_shininess,g_opacity,CImg<floatT>::empty()); | 0 | [
"CWE-119",
"CWE-787"
]
| CImg | ac8003393569aba51048c9d67e1491559877b1d1 | 270,983,233,920,883,100,000,000,000,000,000,000,000 | 13 | . |
hstore_contained(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall2(hstore_contains,
PG_GETARG_DATUM(1),
PG_GETARG_DATUM(0)
));
} | 0 | [
"CWE-703",
"CWE-189"
]
| postgres | 31400a673325147e1205326008e32135a78b4d8a | 73,102,553,226,876,805,000,000,000,000,000,000,000 | 7 | Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064 |
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
if (!kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu);
} | 0 | [
"CWE-862"
]
| kvm | 0f923e07124df069ba68d8bb12324398f4b6b709 | 243,732,318,993,160,230,000,000,000,000,000,000,000 | 5 | KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
* Invert the mask of bits that we pick from L2 in
nested_vmcb02_prepare_control
* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
This fixes a security issue that allowed a malicious L1 to run L2 with
AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
AVIC to read/write the host physical memory at some offsets.
Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
{
void __iomem *mbox = tp->regs + off;
writel(val, mbox);
if (tg3_flag(tp, TXD_MBOX_HWBUG))
writel(val, mbox);
if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
tg3_flag(tp, FLUSH_POSTED_WRITES))
readl(mbox);
} | 0 | [
"CWE-476",
"CWE-119"
]
| linux | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 273,642,258,056,342,400,000,000,000,000,000,000,000 | 10 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Oded Horovitz <[email protected]>
Reported-by: Brad Spengler <[email protected]>
Cc: [email protected]
Cc: Matt Carlson <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static size_t kat_nonce(RAND_DRBG *drbg, unsigned char **pout,
int entropy, size_t min_len, size_t max_len)
{
TEST_CTX *t = (TEST_CTX *)RAND_DRBG_get_ex_data(drbg, app_data_index);
t->noncecnt++;
*pout = (unsigned char *)t->nonce;
return t->noncelen;
} | 0 | [
"CWE-330"
]
| openssl | 1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be | 262,940,219,054,899,800,000,000,000,000,000,000,000 | 9 | drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802) |
static s32 gf_media_hevc_read_pps_bs(GF_BitStream *bs, HEVCState *hevc)
{
u32 i;
s32 pps_id = -1;
HEVC_PPS *pps;
//NAL header already read
pps_id = bs_get_ue(bs);
if ((pps_id<0) || (pps_id>=64)) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong PPS ID %d in PPS\n", pps_id));
return -1;
}
pps = &hevc->pps[pps_id];
if (!pps->state) {
pps->id = pps_id;
pps->state = 1;
}
pps->sps_id = bs_get_ue(bs);
if (pps->sps_id>16) {
GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong SPS ID %d in PPS\n", pps->sps_id));
return -1;
}
hevc->sps_active_idx = pps->sps_id; /*set active sps*/
pps->dependent_slice_segments_enabled_flag = gf_bs_read_int(bs, 1);
pps->output_flag_present_flag = gf_bs_read_int(bs, 1);
pps->num_extra_slice_header_bits = gf_bs_read_int(bs, 3);
/*sign_data_hiding_flag = */gf_bs_read_int(bs, 1);
pps->cabac_init_present_flag = gf_bs_read_int(bs, 1);
pps->num_ref_idx_l0_default_active = 1 + bs_get_ue(bs);
pps->num_ref_idx_l1_default_active = 1 + bs_get_ue(bs);
/*pic_init_qp_minus26 = */bs_get_se(bs);
/*constrained_intra_pred_flag = */gf_bs_read_int(bs, 1);
/*transform_skip_enabled_flag = */gf_bs_read_int(bs, 1);
if (/*cu_qp_delta_enabled_flag = */gf_bs_read_int(bs, 1) )
/*diff_cu_qp_delta_depth = */bs_get_ue(bs);
/*pic_cb_qp_offset = */bs_get_se(bs);
/*pic_cr_qp_offset = */bs_get_se(bs);
pps->slice_chroma_qp_offsets_present_flag = gf_bs_read_int(bs, 1);
pps->weighted_pred_flag = gf_bs_read_int(bs, 1);
pps->weighted_bipred_flag = gf_bs_read_int(bs, 1);
/*transquant_bypass_enable_flag = */gf_bs_read_int(bs, 1);
pps->tiles_enabled_flag = gf_bs_read_int(bs, 1);
pps->entropy_coding_sync_enabled_flag = gf_bs_read_int(bs, 1);
if (pps->tiles_enabled_flag) {
pps->num_tile_columns = 1 + bs_get_ue(bs);
pps->num_tile_rows = 1 + bs_get_ue(bs);
pps->uniform_spacing_flag = gf_bs_read_int(bs, 1);
if (!pps->uniform_spacing_flag ) {
for (i=0; i<pps->num_tile_columns-1; i++) {
pps->column_width[i] = 1 + bs_get_ue(bs);
}
for (i=0; i<pps->num_tile_rows-1; i++) {
pps->row_height[i] = 1+bs_get_ue(bs);
}
}
pps->loop_filter_across_tiles_enabled_flag = gf_bs_read_int(bs, 1);
}
pps->loop_filter_across_slices_enabled_flag = gf_bs_read_int(bs, 1);
if( /*deblocking_filter_control_present_flag = */gf_bs_read_int(bs, 1) ) {
pps->deblocking_filter_override_enabled_flag = gf_bs_read_int(bs, 1);
if (! /*pic_disable_deblocking_filter_flag= */gf_bs_read_int(bs, 1) ) {
/*beta_offset_div2 = */bs_get_se(bs);
/*tc_offset_div2 = */bs_get_se(bs);
}
}
if (/*pic_scaling_list_data_present_flag = */gf_bs_read_int(bs, 1) ) {
hevc_scaling_list_data(bs);
}
pps->lists_modification_present_flag = gf_bs_read_int(bs, 1);
/*log2_parallel_merge_level_minus2 = */bs_get_ue(bs);
pps->slice_segment_header_extension_present_flag = gf_bs_read_int(bs, 1);
if ( /*pps_extension_flag= */gf_bs_read_int(bs, 1) ) {
while (gf_bs_available(bs) ) {
/*pps_extension_data_flag */ gf_bs_read_int(bs, 1);
}
}
return pps_id;
} | 0 | [
"CWE-119",
"CWE-787"
]
| gpac | 90dc7f853d31b0a4e9441cba97feccf36d8b69a4 | 301,125,735,870,067,540,000,000,000,000,000,000,000 | 82 | fix some exploitable overflows (#994, #997) |
dns_zonemgr_unreachabledel(dns_zonemgr_t *zmgr, isc_sockaddr_t *remote,
isc_sockaddr_t *local)
{
unsigned int i;
isc_rwlocktype_t locktype;
isc_result_t result;
char master[ISC_SOCKADDR_FORMATSIZE];
char source[ISC_SOCKADDR_FORMATSIZE];
isc_sockaddr_format(remote, master, sizeof(master));
isc_sockaddr_format(local, source, sizeof(source));
REQUIRE(DNS_ZONEMGR_VALID(zmgr));
locktype = isc_rwlocktype_read;
RWLOCK(&zmgr->urlock, locktype);
for (i = 0; i < UNREACH_CHACHE_SIZE; i++) {
if (isc_sockaddr_equal(&zmgr->unreachable[i].remote, remote) &&
isc_sockaddr_equal(&zmgr->unreachable[i].local, local)) {
if (zmgr->unreachable[i].expire == 0)
break;
result = isc_rwlock_tryupgrade(&zmgr->urlock);
if (result == ISC_R_SUCCESS) {
locktype = isc_rwlocktype_write;
zmgr->unreachable[i].expire = 0;
isc_log_write(dns_lctx, DNS_LOGCATEGORY_GENERAL,
DNS_LOGMODULE_ZONE, ISC_LOG_INFO,
"master %s (source %s) deleted "
"from unreachable cache",
master, source);
}
break;
}
}
RWUNLOCK(&zmgr->urlock, locktype);
} | 0 | [
"CWE-327"
]
| bind9 | f09352d20a9d360e50683cd1d2fc52ccedcd77a0 | 10,060,886,933,922,106,000,000,000,000,000,000,000 | 37 | Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key. |
ofputil_queue_stats_to_ofp14(const struct ofputil_queue_stats *oqs,
struct ofp14_queue_stats *qs14)
{
qs14->length = htons(sizeof *qs14);
memset(qs14->pad, 0, sizeof qs14->pad);
ofputil_queue_stats_to_ofp13(oqs, &qs14->qs);
} | 0 | [
"CWE-772"
]
| ovs | 77ad4225d125030420d897c873e4734ac708c66b | 15,542,498,163,839,173,000,000,000,000,000,000,000 | 7 | ofp-util: Fix memory leaks on error cases in ofputil_decode_group_mod().
Found by libFuzzer.
Reported-by: Bhargava Shastry <[email protected]>
Signed-off-by: Ben Pfaff <[email protected]>
Acked-by: Justin Pettit <[email protected]> |
finish_object (P11KitIter *iter)
{
iter->object = 0;
} | 0 | [
"CWE-190"
]
| p11-kit | 5307a1d21a50cacd06f471a873a018d23ba4b963 | 119,339,537,834,926,290,000,000,000,000,000,000,000 | 4 | Check for arithmetic overflows before allocating |
parse_command_modifiers(
exarg_T *eap,
char **errormsg,
cmdmod_T *cmod,
int skip_only)
{
char_u *cmd_start = NULL;
char_u *p;
int starts_with_colon = FALSE;
int vim9script = in_vim9script();
int has_visual_range = FALSE;
CLEAR_POINTER(cmod);
cmod->cmod_flags = sticky_cmdmod_flags;
if (STRNCMP(eap->cmd, "'<,'>", 5) == 0)
{
// The automatically inserted Visual area range is skipped, so that
// typing ":cmdmod cmd" in Visual mode works without having to move the
// range to after the modififiers.
eap->cmd += 5;
cmd_start = eap->cmd;
has_visual_range = TRUE;
}
// Repeat until no more command modifiers are found.
for (;;)
{
while (*eap->cmd == ' ' || *eap->cmd == '\t' || *eap->cmd == ':')
{
if (*eap->cmd == ':')
starts_with_colon = TRUE;
++eap->cmd;
}
// in ex mode, an empty line works like :+
if (*eap->cmd == NUL && exmode_active
&& (getline_equal(eap->getline, eap->cookie, getexmodeline)
|| getline_equal(eap->getline, eap->cookie, getexline))
&& curwin->w_cursor.lnum < curbuf->b_ml.ml_line_count)
{
eap->cmd = (char_u *)"+";
if (!skip_only)
ex_pressedreturn = TRUE;
}
// ignore comment and empty lines
if (comment_start(eap->cmd, starts_with_colon))
{
// a comment ends at a NL
if (eap->nextcmd == NULL)
{
eap->nextcmd = vim_strchr(eap->cmd, '\n');
if (eap->nextcmd != NULL)
++eap->nextcmd;
}
if (vim9script && has_cmdmod(cmod, FALSE))
*errormsg = _(e_command_modifier_without_command);
return FAIL;
}
if (*eap->cmd == NUL)
{
if (!skip_only)
{
ex_pressedreturn = TRUE;
if (vim9script && has_cmdmod(cmod, FALSE))
*errormsg = _(e_command_modifier_without_command);
}
return FAIL;
}
p = skip_range(eap->cmd, TRUE, NULL);
// In Vim9 script a variable can shadow a command modifier:
// verbose = 123
// verbose += 123
// silent! verbose = func()
// verbose.member = 2
// verbose[expr] = 2
// But not:
// verbose [a, b] = list
if (vim9script)
{
char_u *s, *n;
for (s = eap->cmd; ASCII_ISALPHA(*s); ++s)
;
n = skipwhite(s);
if (*n == '.' || *n == '=' || (*n != NUL && n[1] == '=')
|| *s == '[')
break;
}
switch (*p)
{
// When adding an entry, also modify cmd_exists().
case 'a': if (!checkforcmd_noparen(&eap->cmd, "aboveleft", 3))
break;
cmod->cmod_split |= WSP_ABOVE;
continue;
case 'b': if (checkforcmd_noparen(&eap->cmd, "belowright", 3))
{
cmod->cmod_split |= WSP_BELOW;
continue;
}
if (checkforcmd_opt(&eap->cmd, "browse", 3, TRUE))
{
#ifdef FEAT_BROWSE_CMD
cmod->cmod_flags |= CMOD_BROWSE;
#endif
continue;
}
if (!checkforcmd_noparen(&eap->cmd, "botright", 2))
break;
cmod->cmod_split |= WSP_BOT;
continue;
case 'c': if (!checkforcmd_opt(&eap->cmd, "confirm", 4, TRUE))
break;
#if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG)
cmod->cmod_flags |= CMOD_CONFIRM;
#endif
continue;
case 'k': if (checkforcmd_noparen(&eap->cmd, "keepmarks", 3))
{
cmod->cmod_flags |= CMOD_KEEPMARKS;
continue;
}
if (checkforcmd_noparen(&eap->cmd, "keepalt", 5))
{
cmod->cmod_flags |= CMOD_KEEPALT;
continue;
}
if (checkforcmd_noparen(&eap->cmd, "keeppatterns", 5))
{
cmod->cmod_flags |= CMOD_KEEPPATTERNS;
continue;
}
if (!checkforcmd_noparen(&eap->cmd, "keepjumps", 5))
break;
cmod->cmod_flags |= CMOD_KEEPJUMPS;
continue;
case 'f': // only accept ":filter {pat} cmd"
{
char_u *reg_pat;
char_u *nulp = NULL;
int c = 0;
if (!checkforcmd_noparen(&p, "filter", 4)
|| *p == NUL
|| (ends_excmd(*p)
#ifdef FEAT_EVAL
// in ":filter #pat# cmd" # does not
// start a comment
&& (!vim9script || VIM_ISWHITE(p[1]))
#endif
))
break;
if (*p == '!')
{
cmod->cmod_filter_force = TRUE;
p = skipwhite(p + 1);
if (*p == NUL || ends_excmd(*p))
break;
}
#ifdef FEAT_EVAL
// Avoid that "filter(arg)" is recognized.
if (vim9script && !VIM_ISWHITE(p[-1]))
break;
#endif
if (skip_only)
p = skip_vimgrep_pat(p, NULL, NULL);
else
// NOTE: This puts a NUL after the pattern.
p = skip_vimgrep_pat_ext(p, ®_pat, NULL,
&nulp, &c);
if (p == NULL || *p == NUL)
break;
if (!skip_only)
{
cmod->cmod_filter_regmatch.regprog =
vim_regcomp(reg_pat, RE_MAGIC);
if (cmod->cmod_filter_regmatch.regprog == NULL)
break;
// restore the character overwritten by NUL
if (nulp != NULL)
*nulp = c;
}
eap->cmd = p;
continue;
}
// ":hide" and ":hide | cmd" are not modifiers
case 'h': if (p != eap->cmd || !checkforcmd_noparen(&p, "hide", 3)
|| *p == NUL || ends_excmd(*p))
break;
eap->cmd = p;
cmod->cmod_flags |= CMOD_HIDE;
continue;
case 'l': if (checkforcmd_noparen(&eap->cmd, "lockmarks", 3))
{
cmod->cmod_flags |= CMOD_LOCKMARKS;
continue;
}
if (checkforcmd_noparen(&eap->cmd, "legacy", 3))
{
if (ends_excmd2(p, eap->cmd))
{
*errormsg =
_(e_legacy_must_be_followed_by_command);
return FAIL;
}
cmod->cmod_flags |= CMOD_LEGACY;
continue;
}
if (!checkforcmd_noparen(&eap->cmd, "leftabove", 5))
break;
cmod->cmod_split |= WSP_ABOVE;
continue;
case 'n': if (checkforcmd_noparen(&eap->cmd, "noautocmd", 3))
{
cmod->cmod_flags |= CMOD_NOAUTOCMD;
continue;
}
if (!checkforcmd_noparen(&eap->cmd, "noswapfile", 3))
break;
cmod->cmod_flags |= CMOD_NOSWAPFILE;
continue;
case 'r': if (!checkforcmd_noparen(&eap->cmd, "rightbelow", 6))
break;
cmod->cmod_split |= WSP_BELOW;
continue;
case 's': if (checkforcmd_noparen(&eap->cmd, "sandbox", 3))
{
cmod->cmod_flags |= CMOD_SANDBOX;
continue;
}
if (!checkforcmd_noparen(&eap->cmd, "silent", 3))
break;
cmod->cmod_flags |= CMOD_SILENT;
if (*eap->cmd == '!' && !VIM_ISWHITE(eap->cmd[-1]))
{
// ":silent!", but not "silent !cmd"
eap->cmd = skipwhite(eap->cmd + 1);
cmod->cmod_flags |= CMOD_ERRSILENT;
}
continue;
case 't': if (checkforcmd_noparen(&p, "tab", 3))
{
if (!skip_only)
{
long tabnr = get_address(eap, &eap->cmd,
ADDR_TABS, eap->skip,
skip_only, FALSE, 1);
if (tabnr == MAXLNUM)
cmod->cmod_tab = tabpage_index(curtab) + 1;
else
{
if (tabnr < 0 || tabnr > LAST_TAB_NR)
{
*errormsg = _(e_invalid_range);
return FAIL;
}
cmod->cmod_tab = tabnr + 1;
}
}
eap->cmd = p;
continue;
}
if (!checkforcmd_noparen(&eap->cmd, "topleft", 2))
break;
cmod->cmod_split |= WSP_TOP;
continue;
case 'u': if (!checkforcmd_noparen(&eap->cmd, "unsilent", 3))
break;
cmod->cmod_flags |= CMOD_UNSILENT;
continue;
case 'v': if (checkforcmd_noparen(&eap->cmd, "vertical", 4))
{
cmod->cmod_split |= WSP_VERT;
continue;
}
if (checkforcmd_noparen(&eap->cmd, "vim9cmd", 4))
{
if (ends_excmd2(p, eap->cmd))
{
*errormsg =
_(e_vim9cmd_must_be_followed_by_command);
return FAIL;
}
cmod->cmod_flags |= CMOD_VIM9CMD;
continue;
}
if (!checkforcmd_noparen(&p, "verbose", 4))
break;
if (vim_isdigit(*eap->cmd))
{
cmod->cmod_verbose = atoi((char *)eap->cmd);
if (cmod->cmod_verbose == 0)
cmod->cmod_verbose = -1;
}
else
cmod->cmod_verbose = 1;
eap->cmd = p;
continue;
}
break;
}
if (has_visual_range)
{
if (eap->cmd > cmd_start)
{
// Move the '<,'> range to after the modifiers and insert a colon.
// Since the modifiers have been parsed put the colon on top of the
// space: "'<,'>mod cmd" -> "mod:'<,'>cmd
// Put eap->cmd after the colon.
mch_memmove(cmd_start - 5, cmd_start, eap->cmd - cmd_start);
eap->cmd -= 5;
mch_memmove(eap->cmd - 1, ":'<,'>", 6);
}
else
// no modifiers, move the pointer back
eap->cmd -= 5;
}
return OK;
} | 1 | [
"CWE-122"
]
| vim | f50808ed135ab973296bca515ae4029b321afe47 | 263,926,808,274,265,130,000,000,000,000,000,000,000 | 339 | patch 8.2.4763: using invalid pointer with "V:" in Ex mode
Problem: Using invalid pointer with "V:" in Ex mode.
Solution: Correctly handle the command being changed to "+". |
ArgParser::ArgParser(int argc, char* argv[], Options& o) :
argc(argc),
argv(argv),
o(o),
cur_arg(0),
bash_completion(false),
zsh_completion(false)
{
option_table = &main_option_table;
initOptionTable();
} | 0 | [
"CWE-787"
]
| qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 281,191,231,104,589,200,000,000,000,000,000,000,000 | 11 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
{
if (pthread_key_create(key, cleanup) != 0)
return 0;
return 1;
} | 0 | [
"CWE-330"
]
| openssl | 1b0fe00e2704b5e20334a16d3c9099d1ba2ef1be | 74,519,411,251,339,680,000,000,000,000,000,000,000 | 7 | drbg: ensure fork-safety without using a pthread_atfork handler
When the new OpenSSL CSPRNG was introduced in version 1.1.1,
it was announced in the release notes that it would be fork-safe,
which the old CSPRNG hadn't been.
The fork-safety was implemented using a fork count, which was
incremented by a pthread_atfork handler. Initially, this handler
was enabled by default. Unfortunately, the default behaviour
had to be changed for other reasons in commit b5319bdbd095, so
the new OpenSSL CSPRNG failed to keep its promise.
This commit restores the fork-safety using a different approach.
It replaces the fork count by a fork id, which coincides with
the process id on UNIX-like operating systems and is zero on other
operating systems. It is used to detect when an automatic reseed
after a fork is necessary.
To prevent a future regression, it also adds a test to verify that
the child reseeds after fork.
CVE-2019-1549
Reviewed-by: Paul Dale <[email protected]>
Reviewed-by: Matt Caswell <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/9802) |
const MD5& sslHashes::get_MD5() const
{
return md5HandShake_;
} | 0 | [
"CWE-254"
]
| mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 78,246,922,545,914,960,000,000,000,000,000,000,000 | 4 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
ModResult OnUserPreTagMessage(User* user, const MessageTarget& target, CTCTags::TagMessageDetails& details) CXX11_OVERRIDE
{
if (target.type == MessageTarget::TYPE_CHANNEL)
return BuildChannelExempts(user, target.Get<Channel>(), SilenceEntry::SF_TAGMSG_CHANNEL, details.exemptions);
if (target.type == MessageTarget::TYPE_USER && !CanReceiveMessage(user, target.Get<User>(), SilenceEntry::SF_TAGMSG_USER))
{
details.echo_original = true;
return MOD_RES_DENY;
}
return MOD_RES_PASSTHRU;
} | 0 | [
"CWE-416"
]
| inspircd | 7b47de3c194f239c5fea09a0e49696c9af017d51 | 33,545,519,623,845,474,000,000,000,000,000,000,000 | 13 | Copy the silence flags when sending update notifications.
This fixes a crash bug in the silence module on some versions of GCC. |
AdjustFractDays(double frac, struct pg_tm * tm, fsec_t *fsec, int scale)
{
int extra_days;
if (frac == 0)
return;
frac *= scale;
extra_days = (int) frac;
tm->tm_mday += extra_days;
frac -= extra_days;
AdjustFractSeconds(frac, tm, fsec, SECS_PER_DAY);
} | 0 | [
"CWE-119"
]
| postgres | 01824385aead50e557ca1af28640460fa9877d51 | 231,975,752,157,068,760,000,000,000,000,000,000,000 | 12 | Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt() |
template<typename tc>
CImg<T>& draw_point(const int x0, const int y0,
const tc *const color, const float opacity=1) {
return draw_point(x0,y0,0,color,opacity); | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 297,085,476,705,393,720,000,000,000,000,000,000,000 | 4 | Fix other issues in 'CImg<T>::load_bmp()'. |
static int switch_character_set_results(MYSQL *mysql, const char *cs_name)
{
char query_buffer[QUERY_LENGTH];
size_t query_length;
/* Server lacks facility. This is not an error, by arbitrary decision . */
if (!server_supports_switching_charsets)
return FALSE;
query_length= my_snprintf(query_buffer,
sizeof (query_buffer),
"SET SESSION character_set_results = '%s'",
(const char *) cs_name);
return mysql_real_query(mysql, query_buffer, query_length);
} | 0 | [
"CWE-284",
"CWE-295"
]
| mysql-server | 3bd5589e1a5a93f9c224badf983cd65c45215390 | 169,763,884,773,224,090,000,000,000,000,000,000,000 | 16 | WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options |
static int ext4_split_extent_at(handle_t *handle,
struct inode *inode,
struct ext4_ext_path *path,
ext4_lblk_t split,
int split_flag,
int flags)
{
ext4_fsblk_t newblock;
ext4_lblk_t ee_block;
struct ext4_extent *ex, newex, orig_ex;
struct ext4_extent *ex2 = NULL;
unsigned int ee_len, depth;
int err = 0;
ext_debug("ext4_split_extents_at: inode %lu, logical"
"block %llu\n", inode->i_ino, (unsigned long long)split);
ext4_ext_show_leaf(inode, path);
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
newblock = split - ee_block + ext4_ext_pblock(ex);
BUG_ON(split < ee_block || split >= (ee_block + ee_len));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
if (split == ee_block) {
/*
* case b: block @split is the block that the extent begins with
* then we just change the state of the extent, and splitting
* is not needed.
*/
if (split_flag & EXT4_EXT_MARK_UNINIT2)
ext4_ext_mark_uninitialized(ex);
else
ext4_ext_mark_initialized(ex);
if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + depth);
goto out;
}
/* case a */
memcpy(&orig_ex, ex, sizeof(orig_ex));
ex->ee_len = cpu_to_le16(split - ee_block);
if (split_flag & EXT4_EXT_MARK_UNINIT1)
ext4_ext_mark_uninitialized(ex);
/*
* path may lead to new leaf, not to original leaf any more
* after ext4_ext_insert_extent() returns,
*/
err = ext4_ext_dirty(handle, inode, path + depth);
if (err)
goto fix_extent_len;
ex2 = &newex;
ex2->ee_block = cpu_to_le32(split);
ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
ext4_ext_store_pblock(ex2, newblock);
if (split_flag & EXT4_EXT_MARK_UNINIT2)
ext4_ext_mark_uninitialized(ex2);
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
err = ext4_ext_zeroout(inode, &orig_ex);
if (err)
goto fix_extent_len;
/* update the extent length and mark as initialized */
ex->ee_len = cpu_to_le32(ee_len);
ext4_ext_try_to_merge(inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + depth);
goto out;
} else if (err)
goto fix_extent_len;
out:
ext4_ext_show_leaf(inode, path);
return err;
fix_extent_len:
ex->ee_len = orig_ex.ee_len;
ext4_ext_dirty(handle, inode, path + depth);
return err;
} | 0 | [
"CWE-703"
]
| linux | 667eff35a1f56fa74ce98a0c7c29a40adc1ba4e3 | 108,960,409,715,415,750,000,000,000,000,000,000,000 | 92 | ext4: reimplement convert and split_unwritten
Reimplement ext4_ext_convert_to_initialized() and
ext4_split_unwritten_extents() using ext4_split_extent()
Signed-off-by: Yongqiang Yang <[email protected]>
Signed-off-by: "Theodore Ts'o" <[email protected]>
Tested-by: Allison Henderson <[email protected]> |
long qemu_maxrampagesize(void)
{
return qemu_real_host_page_size();
} | 0 | [
"CWE-908"
]
| qemu | 418ade7849ce7641c0f7333718caf5091a02fd4c | 311,269,696,724,321,760,000,000,000,000,000,000,000 | 4 | softmmu: Always initialize xlat in address_space_translate_for_iotlb
The bug is an uninitialized memory read, along the translate_fail
path, which results in garbage being read from iotlb_to_section,
which can lead to a crash in io_readx/io_writex.
The bug may be fixed by writing any value with zero
in ~TARGET_PAGE_MASK, so that the call to iotlb_to_section using
the xlat'ed address returns io_mem_unassigned, as desired by the
translate_fail path.
It is most useful to record the original physical page address,
which will eventually be logged by memory_region_access_valid
when the access is rejected by unassigned_mem_accepts.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1065
Signed-off-by: Richard Henderson <[email protected]>
Reviewed-by: Peter Maydell <[email protected]>
Message-Id: <[email protected]> |
gdImageColorExact (gdImagePtr im, int r, int g, int b)
{
return gdImageColorExactAlpha (im, r, g, b, gdAlphaOpaque);
} | 0 | [
"CWE-119"
]
| php-src | feba44546c27b0158f9ac20e72040a224b918c75 | 119,122,182,207,733,240,000,000,000,000,000,000,000 | 4 | Fixed bug #22965 (Crash in gd lib's ImageFillToBorder()). |
newoffsets(dns_message_t *msg) {
dns_msgblock_t *msgblock;
dns_offsets_t *offsets;
msgblock = ISC_LIST_TAIL(msg->offsets);
offsets = msgblock_get(msgblock, dns_offsets_t);
if (offsets == NULL) {
msgblock = msgblock_allocate(msg->mctx,
sizeof(dns_offsets_t),
OFFSET_COUNT);
if (msgblock == NULL)
return (NULL);
ISC_LIST_APPEND(msg->offsets, msgblock, link);
offsets = msgblock_get(msgblock, dns_offsets_t);
}
return (offsets);
} | 0 | [
"CWE-617"
]
| bind9 | 6ed167ad0a647dff20c8cb08c944a7967df2d415 | 158,778,450,184,527,950,000,000,000,000,000,000,000 | 20 | Always keep a copy of the message
this allows it to be available even when dns_message_parse()
returns a error. |
calculateBytesPerPixel (const Header &header)
{
const ChannelList &channels = header.channels();
size_t bytesPerPixel = 0;
for (ChannelList::ConstIterator c = channels.begin();
c != channels.end();
++c)
{
bytesPerPixel += pixelTypeSize (c.channel().type);
}
return bytesPerPixel;
} | 0 | [
"CWE-125"
]
| openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 166,290,580,943,272,890,000,000,000,000,000,000,000 | 15 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
resolve_list_keys(struct lys_node_list *list, const char *keys_str)
{
int i, len, rc;
const char *value;
char *s = NULL;
struct ly_ctx *ctx = list->module->ctx;
for (i = 0; i < list->keys_size; ++i) {
assert(keys_str);
if (!list->child) {
/* no child, possible forward reference */
LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, list, "list keys", keys_str);
return EXIT_FAILURE;
}
/* get the key name */
if ((value = strpbrk(keys_str, " \t\n"))) {
len = value - keys_str;
while (isspace(value[0])) {
value++;
}
} else {
len = strlen(keys_str);
}
rc = lys_getnext_data(lys_node_module((struct lys_node *)list), (struct lys_node *)list, keys_str, len, LYS_LEAF,
LYS_GETNEXT_NOSTATECHECK, (const struct lys_node **)&list->keys[i]);
if (rc) {
LOGVAL(ctx, LYE_INRESOLV, LY_VLOG_LYS, list, "list key", keys_str);
return EXIT_FAILURE;
}
if (check_key(list, i, keys_str, len)) {
/* check_key logs */
return -1;
}
/* check status */
if (lyp_check_status(list->flags, list->module, list->name,
list->keys[i]->flags, list->keys[i]->module, list->keys[i]->name,
(struct lys_node *)list->keys[i])) {
return -1;
}
/* default value - is ignored, keep it but print a warning */
if (list->keys[i]->dflt) {
/* log is not hidden only in case this resolving fails and in such a case
* we cannot get here
*/
assert(log_opt == ILO_STORE);
log_opt = ILO_LOG;
LOGWRN(ctx, "Default value \"%s\" in the list key \"%s\" is ignored. (%s)", list->keys[i]->dflt,
list->keys[i]->name, s = lys_path((struct lys_node*)list, LYS_PATH_FIRST_PREFIX));
log_opt = ILO_STORE;
free(s);
}
/* prepare for next iteration */
while (value && isspace(value[0])) {
value++;
}
keys_str = value;
}
return EXIT_SUCCESS;
} | 0 | [
"CWE-119"
]
| libyang | 32fb4993bc8bb49e93e84016af3c10ea53964be5 | 245,730,864,337,438,750,000,000,000,000,000,000,000 | 66 | schema tree BUGFIX do not check features while still resolving schema
Fixes #723 |
do_compose_group16(pdf14_buf *tos, pdf14_buf *nos, pdf14_buf *maskbuf,
int x0, int x1, int y0, int y1, int n_chan, bool additive,
const pdf14_nonseparable_blending_procs_t * pblend_procs,
bool has_matte, bool overprint, gx_color_index drawn_comps,
gs_memory_t *memory, gx_device *dev)
{
int num_spots = tos->num_spots;
uint16_t alpha = tos->alpha;
uint16_t shape = tos->shape;
gs_blend_mode_t blend_mode = tos->blend_mode;
uint16_t *tos_ptr =
(uint16_t *)(void *)(tos->data + (x0 - tos->rect.p.x)*2 +
(y0 - tos->rect.p.y) * tos->rowstride);
uint16_t *nos_ptr =
(uint16_t *)(void *)(nos->data + (x0 - nos->rect.p.x)*2 +
(y0 - nos->rect.p.y) * nos->rowstride);
uint16_t *mask_row_ptr = NULL;
int tos_planestride = tos->planestride;
int nos_planestride = nos->planestride;
uint16_t mask_bg_alpha = 0; /* Quiet compiler. */
bool tos_isolated = tos->isolated;
bool nos_isolated = nos->isolated;
bool nos_knockout = nos->knockout;
uint16_t *nos_alpha_g_ptr;
int tos_shape_offset = n_chan * tos_planestride;
int tos_alpha_g_offset = tos_shape_offset + (tos->has_shape ? tos_planestride : 0);
bool tos_has_tag = tos->has_tags;
int tos_tag_offset = tos_planestride * (tos->n_planes - 1);
int nos_shape_offset = n_chan * nos_planestride;
int nos_alpha_g_offset = nos_shape_offset + (nos->has_shape ? nos_planestride : 0);
int nos_tag_offset = nos_planestride * (nos->n_planes - 1);
const uint16_t *mask_tr_fn = NULL; /* Quiet compiler. */
bool has_mask = false;
uint16_t *backdrop_ptr = NULL;
pdf14_device *pdev = (pdf14_device *)dev;
#if RAW_DUMP
uint16_t *composed_ptr = NULL;
int width = x1 - x0;
#endif
art_pdf_compose_group16_fn fn;
if ((tos->n_chan == 0) || (nos->n_chan == 0))
return;
rect_merge(nos->dirty, tos->dirty);
if (nos->has_tags)
if_debug7m('v', memory,
"pdf14_pop_transparency_group y0 = %d, y1 = %d, w = %d, alpha = %d, shape = %d, tag = %d, bm = %d\n",
y0, y1, x1 - x0, alpha, shape, dev->graphics_type_tag & ~GS_DEVICE_ENCODES_TAGS, blend_mode);
else
if_debug6m('v', memory,
"pdf14_pop_transparency_group y0 = %d, y1 = %d, w = %d, alpha = %d, shape = %d, bm = %d\n",
y0, y1, x1 - x0, alpha, shape, blend_mode);
if (!nos->has_shape)
nos_shape_offset = 0;
if (!nos->has_tags)
nos_tag_offset = 0;
if (nos->has_alpha_g) {
nos_alpha_g_ptr = nos_ptr + (nos_alpha_g_offset>>1);
} else
nos_alpha_g_ptr = NULL;
if (nos->backdrop != NULL) {
backdrop_ptr =
(uint16_t *)(void *)(nos->backdrop + (x0 - nos->rect.p.x)*2 +
(y0 - nos->rect.p.y) * nos->rowstride);
}
if (blend_mode != BLEND_MODE_Compatible && blend_mode != BLEND_MODE_Normal)
overprint = false;
if (maskbuf != NULL) {
unsigned int tmp;
mask_tr_fn = (uint16_t *)maskbuf->transfer_fn;
/* Make sure we are in the mask buffer */
if (maskbuf->data != NULL) {
mask_row_ptr =
(uint16_t *)(void *)(maskbuf->data + (x0 - maskbuf->rect.p.x)*2 +
(y0 - maskbuf->rect.p.y) * maskbuf->rowstride);
has_mask = true;
}
/* We may have a case, where we are outside the maskbuf rect. */
/* We would have avoided creating the maskbuf->data */
/* In that case, we should use the background alpha value */
/* See discussion on the BC entry in the PDF spec. */
mask_bg_alpha = maskbuf->alpha;
/* Adjust alpha by the mask background alpha. This is only used
if we are outside the soft mask rect during the filling operation */
mask_bg_alpha = interp16(mask_tr_fn, mask_bg_alpha);
tmp = alpha * mask_bg_alpha + 0x8000;
mask_bg_alpha = (tmp + (tmp >> 8)) >> 8;
}
n_chan--; /* Now the true number of colorants (i.e. not including alpha)*/
#if RAW_DUMP
composed_ptr = nos_ptr;
dump_raw_buffer(memory, y1-y0, width, tos->n_planes, tos_planestride, tos->rowstride,
"bImageTOS", (byte *)tos_ptr, tos->deep);
dump_raw_buffer(memory, y1-y0, width, nos->n_planes, nos_planestride, nos->rowstride,
"cImageNOS", (byte *)nos_ptr, tos->deep);
if (maskbuf !=NULL && maskbuf->data != NULL) {
dump_raw_buffer(memory, maskbuf->rect.q.y - maskbuf->rect.p.y,
maskbuf->rect.q.x - maskbuf->rect.p.x, maskbuf->n_planes,
maskbuf->planestride, maskbuf->rowstride, "dMask",
maskbuf->data, maskbuf->deep);
}
#endif
/* You might hope that has_mask iff maskbuf != NULL, but this is
* not the case. Certainly we can see cases where maskbuf != NULL
* and has_mask = 0. What's more, treating such cases as being
* has_mask = 0 causes diffs. */
#ifdef TRACK_COMPOSE_GROUPS
{
int code = 0;
code += !!nos_knockout;
code += (!!nos_isolated)<<1;
code += (!!tos_isolated)<<2;
code += (!!tos->has_shape)<<3;
code += (!!tos_has_tag)<<4;
code += (!!additive)<<5;
code += (!!overprint)<<6;
code += (!!has_mask || maskbuf != NULL)<<7;
code += (!!has_matte)<<8;
code += (backdrop_ptr != NULL)<<9;
code += (num_spots != 0)<<10;
code += blend_mode<<11;
if (track_compose_groups == 0)
{
atexit(dump_track_compose_groups);
track_compose_groups = 1;
}
compose_groups[code]++;
}
#endif
/* We have tested the files on the cluster to see what percentage of
* files/devices hit the different options. */
if (nos_knockout)
fn = &compose_group16_knockout; /* Small %ages, nothing more than 1.1% */
else if (blend_mode != 0)
fn = &compose_group16_nonknockout_blend; /* Small %ages, nothing more than 2% */
else if (tos->has_shape == 0 && tos_has_tag == 0 && nos_isolated == 0 && nos_alpha_g_ptr == NULL &&
nos_shape_offset == 0 && nos_tag_offset == 0 && backdrop_ptr == NULL && has_matte == 0 && num_spots == 0 &&
overprint == 0) {
/* Additive vs Subtractive makes no difference in normal blend mode with no spots */
if (tos_isolated) {
if (has_mask && maskbuf) {/* 7% */
/* AirPrint test case hits this */
if (maskbuf && maskbuf->rect.p.x <= x0 && maskbuf->rect.p.y <= y0 &&
maskbuf->rect.q.x >= x1 && maskbuf->rect.q.y >= y1)
fn = &compose_group16_nonknockout_nonblend_isolated_allmask_common;
else
fn = &compose_group16_nonknockout_nonblend_isolated_mask_common;
} else /* 14% */
fn = &compose_group16_nonknockout_nonblend_isolated_nomask_common;
} else {
if (has_mask || maskbuf) /* 4% */
fn = &compose_group16_nonknockout_nonblend_nonisolated_mask_common;
else /* 15% */
fn = &compose_group16_nonknockout_nonblend_nonisolated_nomask_common;
}
} else
fn = compose_group16_nonknockout_noblend_general;
tos_planestride >>= 1;
tos_shape_offset >>= 1;
tos_alpha_g_offset >>= 1;
tos_tag_offset >>= 1;
nos_planestride >>= 1;
nos_shape_offset >>= 1;
nos_tag_offset >>= 1;
fn(tos_ptr, tos_isolated, tos_planestride, tos->rowstride>>1, alpha, shape, blend_mode, tos->has_shape,
tos_shape_offset, tos_alpha_g_offset, tos_tag_offset, tos_has_tag,
nos_ptr, nos_isolated, nos_planestride, nos->rowstride>>1, nos_alpha_g_ptr, nos_knockout,
nos_shape_offset, nos_tag_offset,
mask_row_ptr, has_mask, maskbuf, mask_bg_alpha, mask_tr_fn,
backdrop_ptr,
has_matte, n_chan, additive, num_spots, overprint, drawn_comps, x0, y0, x1, y1,
pblend_procs, pdev);
#if RAW_DUMP
dump_raw_buffer(memory, y1-y0, width, nos->n_planes, nos_planestride<<1, nos->rowstride,
"eComposed", (byte *)composed_ptr, nos->deep);
global_index++;
#endif
} | 0 | [
"CWE-476"
]
| ghostpdl | 7870f4951bcc6a153f317e3439e14d0e929fd231 | 272,016,536,818,458,700,000,000,000,000,000,000,000 | 185 | Bug 701795: Segv due to image mask issue |
static void do_git_path(const struct repository *repo,
const struct worktree *wt, struct strbuf *buf,
const char *fmt, va_list args)
{
int gitdir_len;
strbuf_worktree_gitdir(buf, repo, wt);
if (buf->len && !is_dir_sep(buf->buf[buf->len - 1]))
strbuf_addch(buf, '/');
gitdir_len = buf->len;
strbuf_vaddf(buf, fmt, args);
if (!wt)
adjust_git_path(repo, buf, gitdir_len);
strbuf_cleanup_path(buf);
} | 0 | [
"CWE-20"
]
| git | 7c3745fc6185495d5765628b4dfe1bd2c25a2981 | 286,664,847,799,407,730,000,000,000,000,000,000,000 | 14 | path: safeguard `.git` against NTFS Alternate Streams Accesses
Probably inspired by HFS' resource streams, NTFS supports "Alternate
Data Streams": by appending `:<stream-name>` to the file name,
information in addition to the file contents can be written and read,
information that is copied together with the file (unless copied to a
non-NTFS location).
These Alternate Data Streams are typically used for things like marking
an executable as having just been downloaded from the internet (and
hence not necessarily being trustworthy).
In addition to a stream name, a stream type can be appended, like so:
`:<stream-name>:<stream-type>`. Unless specified, the default stream
type is `$DATA` for files and `$INDEX_ALLOCATION` for directories. In
other words, `.git::$INDEX_ALLOCATION` is a valid way to reference the
`.git` directory!
In our work in Git v2.2.1 to protect Git on NTFS drives under
`core.protectNTFS`, we focused exclusively on NTFS short names, unaware
of the fact that NTFS Alternate Data Streams offer a similar attack
vector.
Let's fix this.
Seeing as it is better to be safe than sorry, we simply disallow paths
referring to *any* NTFS Alternate Data Stream of `.git`, not just
`::$INDEX_ALLOCATION`. This also simplifies the implementation.
This closes CVE-2019-1352.
Further reading about NTFS Alternate Data Streams:
https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-fscc/c54dec26-1551-4d3a-a0ea-4fa40f848eb3
Reported-by: Nicolas Joly <[email protected]>
Signed-off-by: Johannes Schindelin <[email protected]> |
static int sched_read_attr(struct sched_attr __user *uattr,
struct sched_attr *attr,
unsigned int usize)
{
int ret;
if (!access_ok(VERIFY_WRITE, uattr, usize))
return -EFAULT;
/*
* If we're handed a smaller struct than we know of,
* ensure all the unknown bits are 0 - i.e. old
* user-space does not get uncomplete information.
*/
if (usize < sizeof(*attr)) {
unsigned char *addr;
unsigned char *end;
addr = (void *)attr + usize;
end = (void *)attr + sizeof(*attr);
for (; addr < end; addr++) {
if (*addr)
return -EFBIG;
}
attr->size = usize;
}
ret = copy_to_user(uattr, attr, attr->size);
if (ret)
return -EFAULT;
return 0;
} | 0 | [
"CWE-119"
]
| linux | 29d6455178a09e1dc340380c582b13356227e8df | 205,094,584,523,807,200,000,000,000,000,000,000,000 | 35 | sched: panic on corrupted stack end
Until now, hitting this BUG_ON caused a recursive oops (because oops
handling involves do_exit(), which calls into the scheduler, which in
turn raises an oops), which caused stuff below the stack to be
overwritten until a panic happened (e.g. via an oops in interrupt
context, caused by the overwritten CPU index in the thread_info).
Just panic directly.
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
port::StatusOr<DeviceMemory<uint8>> CreateBatchNormForwardWorkspace(
Stream* stream, const CudnnHandle& cudnn, const cudnnBatchNormMode_t& mode,
const cudnnBatchNormOps_t& bn_ops,
const cudnnActivationDescriptor_t& activation_desc,
const CudnnTensorDescriptor& x_descriptor,
const CudnnTensorDescriptor& scale_offset_descriptor,
ScratchAllocator* workspace_allocator) {
// Query the workspace size.
size_t workspace_size_in_bytes = 0;
RETURN_IF_CUDNN_ERROR(
cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(
/*handle=*/cudnn.handle(), /*mode=*/mode, /*bnOps=*/bn_ops,
/*xDesc=*/x_descriptor.handle(), /*zDesc=*/x_descriptor.handle(),
/*yDesc=*/x_descriptor.handle(),
/*bnScaleBiasMeanVarDesc=*/scale_offset_descriptor.handle(),
/*activationDesc=*/activation_desc,
/*sizeInBytes=*/&workspace_size_in_bytes));
// Allocate the workspace.
if (workspace_size_in_bytes == 0) {
return DeviceMemory<uint8>();
}
return workspace_allocator->AllocateBytes(workspace_size_in_bytes);
} | 0 | [
"CWE-20"
]
| tensorflow | 14755416e364f17fb1870882fa778c7fec7f16e3 | 331,573,609,817,897,670,000,000,000,000,000,000,000 | 23 | Prevent CHECK-fail in LSTM/GRU with zero-length input.
PiperOrigin-RevId: 346239181
Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f |
unlimit_nproc(void)
{
#ifdef __linux__
struct rlimit rl;
debug_decl(unlimit_nproc, SUDOERS_DEBUG_UTIL);
if (getrlimit(RLIMIT_NPROC, &nproclimit) != 0)
sudo_warn("getrlimit(RLIMIT_NPROC)");
rl.rlim_cur = rl.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_NPROC, &rl) != 0) {
rl.rlim_cur = rl.rlim_max = nproclimit.rlim_max;
if (setrlimit(RLIMIT_NPROC, &rl) != 0)
sudo_warn("setrlimit(RLIMIT_NPROC)");
}
debug_return;
#endif /* __linux__ */
} | 0 | [
"CWE-193"
]
| sudo | 1f8638577d0c80a4ff864a2aad80a0d95488e9a8 | 100,221,184,173,583,720,000,000,000,000,000,000,000 | 17 | Fix potential buffer overflow when unescaping backslashes in user_args.
Also, do not try to unescaping backslashes unless in run mode *and*
we are running the command via a shell.
Found by Qualys, this fixes CVE-2021-3156. |
join_path(const char *basedir, const char *name, const char *suffix,
char *buffer, size_t length, LOG_Severity severity)
{
const char *sep;
if (!basedir) {
basedir = "";
sep = "";
} else {
sep = "/";
}
if (!suffix)
suffix = "";
if (snprintf(buffer, length, "%s%s%s%s", basedir, sep, name, suffix) >= length) {
LOG(severity, "File path %s%s%s%s too long", basedir, sep, name, suffix);
return 0;
}
return 1;
} | 0 | [
"CWE-59"
]
| chrony | 7a4c396bba8f92a3ee8018620983529152050c74 | 261,202,690,241,563,780,000,000,000,000,000,000,000 | 22 | util: add functions for common file operations
Add a function to open a file for reading, writing, or appending.
In uppercase modes errors are handled as fatal, i.e. the caller doesn't
need to check for NULL. To avoid string manipulations in the callers,
the function accepts an optional directory and suffix. New files are
created with specified permissions, which will be needed for saving
keys. The O_EXCL flag is used in the writing mode to make sure a new
file is created (on filesystems that support it).
Also, add a function to rename a temporary file by changing its suffix,
and a function to remove a file.
All functions log all errors, at least as debug messages. |
Defrag4Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p)
{
Packet *rp = NULL;
/* Should not be here unless we have seen the last fragment. */
if (!tracker->seen_last)
return NULL;
/* Check that we have all the data. Relies on the fact that
* fragments are inserted if frag_offset order. */
Frag *frag;
int len = 0;
TAILQ_FOREACH(frag, &tracker->frags, next) {
if (frag->skip)
continue;
if (frag == TAILQ_FIRST(&tracker->frags)) {
if (frag->offset != 0) {
goto done;
}
len = frag->data_len;
}
else {
if (frag->offset > len) {
/* This fragment starts after the end of the previous
* fragment. We have a hole. */
goto done;
}
else {
len += frag->data_len;
}
}
}
/* Allocate a Packet for the reassembled packet. On failure we
* SCFree all the resources held by this tracker. */
rp = PacketDefragPktSetup(p, NULL, 0, IPV4_GET_IPPROTO(p));
if (rp == NULL) {
SCLogError(SC_ERR_MEM_ALLOC, "Failed to allocate packet for "
"fragmentation re-assembly, dumping fragments.");
goto error_remove_tracker;
}
PKT_SET_SRC(rp, PKT_SRC_DEFRAG);
rp->flags |= PKT_REBUILT_FRAGMENT;
rp->recursion_level = p->recursion_level;
int fragmentable_offset = 0;
int fragmentable_len = 0;
int hlen = 0;
int ip_hdr_offset = 0;
TAILQ_FOREACH(frag, &tracker->frags, next) {
SCLogDebug("frag %p, data_len %u, offset %u, pcap_cnt %"PRIu64,
frag, frag->data_len, frag->offset, frag->pcap_cnt);
if (frag->skip)
continue;
if (frag->data_len - frag->ltrim <= 0)
continue;
if (frag->offset == 0) {
if (PacketCopyData(rp, frag->pkt, frag->len) == -1)
goto error_remove_tracker;
hlen = frag->hlen;
ip_hdr_offset = frag->ip_hdr_offset;
/* This is the start of the fragmentable portion of the
* first packet. All fragment offsets are relative to
* this. */
fragmentable_offset = frag->ip_hdr_offset + frag->hlen;
fragmentable_len = frag->data_len;
}
else {
int pkt_end = fragmentable_offset + frag->offset + frag->data_len;
if (pkt_end > (int)MAX_PAYLOAD_SIZE) {
SCLogWarning(SC_ERR_REASSEMBLY, "Failed re-assemble "
"fragmented packet, exceeds size of packet buffer.");
goto error_remove_tracker;
}
if (PacketCopyDataOffset(rp, fragmentable_offset + frag->offset + frag->ltrim,
frag->pkt + frag->data_offset + frag->ltrim,
frag->data_len - frag->ltrim) == -1) {
goto error_remove_tracker;
}
if (frag->offset + frag->data_len > fragmentable_len)
fragmentable_len = frag->offset + frag->data_len;
}
if (!frag->more_frags) {
break;
}
}
SCLogDebug("ip_hdr_offset %u, hlen %u, fragmentable_len %u",
ip_hdr_offset, hlen, fragmentable_len);
rp->ip4h = (IPV4Hdr *)(GET_PKT_DATA(rp) + ip_hdr_offset);
int old = rp->ip4h->ip_len + rp->ip4h->ip_off;
rp->ip4h->ip_len = htons(fragmentable_len + hlen);
rp->ip4h->ip_off = 0;
rp->ip4h->ip_csum = FixChecksum(rp->ip4h->ip_csum,
old, rp->ip4h->ip_len + rp->ip4h->ip_off);
SET_PKT_LEN(rp, ip_hdr_offset + hlen + fragmentable_len);
tracker->remove = 1;
DefragTrackerFreeFrags(tracker);
done:
return rp;
error_remove_tracker:
tracker->remove = 1;
DefragTrackerFreeFrags(tracker);
if (rp != NULL)
PacketFreeOrRelease(rp);
return NULL;
} | 0 | [
"CWE-358"
]
| suricata | 4a04f814b15762eb446a5ead4d69d021512df6f8 | 151,963,393,966,409,320,000,000,000,000,000,000,000 | 116 | defrag - take protocol into account during re-assembly
The IP protocol was not being used to match fragments with
their packets allowing a carefully constructed packet
with a different protocol to be matched, allowing re-assembly
to complete, creating a packet that would not be re-assembled
by the destination host. |
Subsets and Splits