func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
message_add_argument(
message_t *message,
char *key,
char *value)
{
int i = 0;
while (message->arg_array[i].key != NULL) {
if (strcmp(key, message->arg_array[i].key) == 0) {
assert(message->arg_array[i].value.type == MESSAGE_STRING);
g_free(message->arg_array[i].value.string);
message->arg_array[i].value.string = g_strdup(value);
}
i++;
}
if (i > message->argument_allocated) {
message->argument_allocated *= 2;
message->arg_array = g_realloc(message->arg_array, (message->argument_allocated+1) * sizeof(message_arg_array_t));
}
message->arg_array[i].key = g_strdup(key);
message->arg_array[i].value.type = MESSAGE_STRING;
message->arg_array[i].value.string = g_strdup(value);
i++;
message->arg_array[i].key = NULL;
message->arg_array[i].value.type = MESSAGE_NULL;
message->arg_array[i].value.string = NULL;
} | 0 | [
"CWE-77"
]
| amanda | 29bae2e271093cd8d06ea98f73a474c685c5a314 | 122,953,945,073,270,450,000,000,000,000,000,000,000 | 27 | * application-src/ambsdtar.c, application-src/amgtar.c,
application-src/amstar.c: Filter option from COMMAND-OPTIONS
* common-src/ammessage.c: Add message.
git-svn-id: https://svn.code.sf.net/p/amanda/code/amanda/trunk@6483 a8d146d6-cc15-0410-8900-af154a0219e0 |
void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (IsClassOfTest(expr)) {
CallRuntime* call = expr->left()->AsCallRuntime();
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
Literal* literal = expr->right()->AsLiteral();
Handle<String> rhs = Handle<String>::cast(literal->handle());
HClassOfTestAndBranch* instr =
new(zone()) HClassOfTestAndBranch(value, rhs);
instr->set_position(expr->position());
return ast_context()->ReturnControl(instr, expr->id());
}
TypeInfo type_info = oracle()->CompareType(expr);
// Check if this expression was ever executed according to type feedback.
// Note that for the special typeof/null/undefined cases we get unknown here.
if (type_info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
type_info = TypeInfo::Unknown();
}
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
HValue* context = environment()->LookupContext();
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
HTypeof* typeof_expr = NULL;
Handle<String> check;
if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
return HandleLiteralCompareTypeof(expr, typeof_expr, check);
}
HValue* sub_expr = NULL;
Factory* f = graph()->isolate()->factory();
if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
}
if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
}
if (IsLiteralCompareBool(left, op, right)) {
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
// residing in new space. If it is we assume that the function will stay the
// same.
Handle<JSFunction> target = Handle<JSFunction>::null();
VariableProxy* proxy = expr->right()->AsVariableProxy();
bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
if (global_function &&
info()->has_global_object() &&
!info()->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = proxy->name();
Handle<GlobalObject> global(info()->global_object());
LookupResult lookup(isolate());
global->Lookup(*name, &lookup);
if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!isolate()->heap()->InNewSpace(*candidate)) {
target = candidate;
}
}
}
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
HInstanceOf* result = new(zone()) HInstanceOf(context, left, right);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
AddInstruction(new(zone()) HCheckFunction(right, target));
HInstanceOfKnownGlobal* result =
new(zone()) HInstanceOfKnownGlobal(context, left, target);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
}
} else if (op == Token::IN) {
HIn* result = new(zone()) HIn(context, left, right);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else if (type_info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
// Can we get away with map check and not instance type check?
Handle<Map> map = oracle()->GetCompareMap(expr);
if (!map.is_null()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckMaps::NewWithTransitions(left, map, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckMaps::NewWithTransitions(right, map, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
}
default:
return Bailout("Unsupported non-primitive compare");
}
} else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
(op == Token::EQ || op == Token::EQ_STRICT)) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsSymbol(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
Representation r = ToRepresentation(type_info);
if (r.IsTagged()) {
HCompareGeneric* result =
new(zone()) HCompareGeneric(context, left, right, op);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
HCompareIDAndBranch* result =
new(zone()) HCompareIDAndBranch(left, right, op);
result->set_position(expr->position());
result->SetInputRepresentation(r);
return ast_context()->ReturnControl(result, expr->id());
}
}
} | 0 | []
| node | fd80a31e0697d6317ce8c2d289575399f4e06d21 | 126,302,455,299,181,230,000,000,000,000,000,000,000 | 150 | deps: backport 5f836c from v8 upstream
Original commit message:
Fix Hydrogen bounds check elimination
When combining bounds checks, they must all be moved before the first load/store
that they are guarding.
BUG=chromium:344186
LOG=y
[email protected]
Review URL: https://codereview.chromium.org/172093002
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
fix #8070 |
TORRENT_TEST(clear)
{
char b1[] = "d1:ai1e1:b3:foo1:cli1ei2ee1:dd1:xi1eee";
char b2[] = "li1ei2ee";
bdecode_node e;
error_code ec;
int ret = bdecode(b1, b1 + sizeof(b1)-1, e, ec);
printf("%s\n", print_entry(e).c_str());
TEST_EQUAL(ret, 0);
TEST_EQUAL(e.type(), bdecode_node::dict_t);
TEST_EQUAL(e.dict_size(), 4);
TEST_EQUAL(e.dict_at(1).first, "b");
ret = bdecode(b2, b2 + sizeof(b2)-1, e, ec);
printf("%s\n", print_entry(e).c_str());
TEST_EQUAL(ret, 0);
TEST_EQUAL(e.type(), bdecode_node::list_t);
TEST_EQUAL(e.list_size(), 2);
TEST_EQUAL(e.list_int_value_at(1), 2);
} | 0 | [
"CWE-125"
]
| libtorrent | ec30a5e9ec703afb8abefba757c6d401303b53db | 188,215,166,089,514,040,000,000,000,000,000,000,000 | 21 | fix out-of-bounds read in bdecode
Fixes #2099 |
save_config(
struct recvbuf *rbufp,
int restrict_mask
)
{
#ifdef SAVECONFIG
static const char savedconfig_eq[] = "savedconfig=";
/* Build a safe open mode from the available mode flags. We want
* to create a new file and write it in text mode (when
* applicable -- only Windows does this...)
*/
static const int openmode = O_CREAT | O_TRUNC | O_WRONLY
# if defined(O_EXCL) /* posix, vms */
| O_EXCL
# elif defined(_O_EXCL) /* windows is alway very special... */
| _O_EXCL
# endif
# if defined(_O_TEXT) /* windows, again */
| _O_TEXT
#endif
;
char filespec[128];
char filename[128];
char fullpath[512];
char savedconfig[sizeof(savedconfig_eq) + sizeof(filename)];
time_t now;
int fd;
FILE *fptr;
int prc;
size_t reqlen;
#endif
if (RES_NOMODIFY & restrict_mask) {
ctl_printf("%s", "saveconfig prohibited by restrict ... nomodify");
ctl_flushpkt(0);
NLOG(NLOG_SYSINFO)
msyslog(LOG_NOTICE,
"saveconfig from %s rejected due to nomodify restriction",
stoa(&rbufp->recv_srcadr));
sys_restricted++;
return;
}
#ifdef SAVECONFIG
if (NULL == saveconfigdir) {
ctl_printf("%s", "saveconfig prohibited, no saveconfigdir configured");
ctl_flushpkt(0);
NLOG(NLOG_SYSINFO)
msyslog(LOG_NOTICE,
"saveconfig from %s rejected, no saveconfigdir",
stoa(&rbufp->recv_srcadr));
return;
}
/* The length checking stuff gets serious. Do not assume a NUL
* byte can be found, but if so, use it to calculate the needed
* buffer size. If the available buffer is too short, bail out;
* likewise if there is no file spec. (The latter will not
* happen when using NTPQ, but there are other ways to craft a
* network packet!)
*/
reqlen = (size_t)(reqend - reqpt);
if (0 != reqlen) {
char * nulpos = (char*)memchr(reqpt, 0, reqlen);
if (NULL != nulpos)
reqlen = (size_t)(nulpos - reqpt);
}
if (0 == reqlen)
return;
if (reqlen >= sizeof(filespec)) {
ctl_printf("saveconfig exceeded maximum raw name length (%u)",
(u_int)sizeof(filespec));
ctl_flushpkt(0);
msyslog(LOG_NOTICE,
"saveconfig exceeded maximum raw name length from %s",
stoa(&rbufp->recv_srcadr));
return;
}
/* copy data directly as we exactly know the size */
memcpy(filespec, reqpt, reqlen);
filespec[reqlen] = '\0';
/*
* allow timestamping of the saved config filename with
* strftime() format such as:
* ntpq -c "saveconfig ntp-%Y%m%d-%H%M%S.conf"
* XXX: Nice feature, but not too safe.
* YYY: The check for permitted characters in file names should
* weed out the worst. Let's hope 'strftime()' does not
* develop pathological problems.
*/
time(&now);
if (0 == strftime(filename, sizeof(filename), filespec,
localtime(&now)))
{
/*
* If we arrive here, 'strftime()' balked; most likely
* the buffer was too short. (Or it encounterd an empty
* format, or just a format that expands to an empty
* string.) We try to use the original name, though this
* is very likely to fail later if there are format
* specs in the string. Note that truncation cannot
* happen here as long as both buffers have the same
* size!
*/
strlcpy(filename, filespec, sizeof(filename));
}
/*
* Check the file name for sanity. This migth/will rule out file
* names that would be legal but problematic, and it blocks
* directory traversal.
*/
if (!is_safe_filename(filename)) {
ctl_printf("saveconfig rejects unsafe file name '%s'",
filename);
ctl_flushpkt(0);
msyslog(LOG_NOTICE,
"saveconfig rejects unsafe file name from %s",
stoa(&rbufp->recv_srcadr));
return;
}
/* concatenation of directory and path can cause another
* truncation...
*/
prc = snprintf(fullpath, sizeof(fullpath), "%s%s",
saveconfigdir, filename);
if (prc < 0 || prc >= sizeof(fullpath)) {
ctl_printf("saveconfig exceeded maximum path length (%u)",
(u_int)sizeof(fullpath));
ctl_flushpkt(0);
msyslog(LOG_NOTICE,
"saveconfig exceeded maximum path length from %s",
stoa(&rbufp->recv_srcadr));
return;
}
fd = open(fullpath, openmode, S_IRUSR | S_IWUSR);
if (-1 == fd)
fptr = NULL;
else
fptr = fdopen(fd, "w");
if (NULL == fptr || -1 == dump_all_config_trees(fptr, 1)) {
ctl_printf("Unable to save configuration to file '%s': %m",
filename);
msyslog(LOG_ERR,
"saveconfig %s from %s failed", filename,
stoa(&rbufp->recv_srcadr));
} else {
ctl_printf("Configuration saved to '%s'", filename);
msyslog(LOG_NOTICE,
"Configuration saved to '%s' (requested by %s)",
fullpath, stoa(&rbufp->recv_srcadr));
/*
* save the output filename in system variable
* savedconfig, retrieved with:
* ntpq -c "rv 0 savedconfig"
* Note: the way 'savedconfig' is defined makes overflow
* checks unnecessary here.
*/
snprintf(savedconfig, sizeof(savedconfig), "%s%s",
savedconfig_eq, filename);
set_sys_var(savedconfig, strlen(savedconfig) + 1, RO);
}
if (NULL != fptr)
fclose(fptr);
#else /* !SAVECONFIG follows */
ctl_printf("%s",
"saveconfig unavailable, configured with --disable-saveconfig");
#endif
ctl_flushpkt(0);
} | 0 | [
"CWE-254"
]
| ntp | 3680c2e4d5f88905ce062c7b43305d610a2c9796 | 214,231,633,639,293,200,000,000,000,000,000,000,000 | 178 | [Bug 2938] ntpq saveconfig command allows dangerous characters in filenames.
- make sure the file does not exist (no overwrite allowed)
- ensure text mode where applicable (windows) |
std::string help() const override {
return "Returns information about roles.";
} | 0 | [
"CWE-613"
]
| mongo | e55d6e2292e5dbe2f97153251d8193d1cc89f5d7 | 232,378,444,845,597,140,000,000,000,000,000,000,000 | 3 | SERVER-38984 Validate unique User ID on UserCache hit |
static void __init xen_hvm_init_mem_mapping(void)
{
early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
/*
* The virtual address of the shared_info page has changed, so
* the vcpu_info pointer for VCPU 0 is now stale.
*
* The prepare_boot_cpu callback will re-initialize it via
* xen_vcpu_setup, but we can't rely on that to be called for
* old Xen versions (xen_have_vector_callback == 0).
*
* It is, in any case, bad to have a stale vcpu_info pointer
* so reset it now.
*/
xen_vcpu_info_reset(0);
} | 0 | []
| linux | fa1f57421e0b1c57843902c89728f823abc32f02 | 114,760,246,957,238,110,000,000,000,000,000,000,000 | 18 | xen/virtio: Enable restricted memory access using Xen grant mappings
In order to support virtio in Xen guests add a config option XEN_VIRTIO
enabling the user to specify whether in all Xen guests virtio should
be able to access memory via Xen grant mappings only on the host side.
Also set PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS feature from the guest
initialization code on Arm and x86 if CONFIG_XEN_VIRTIO is enabled.
Signed-off-by: Juergen Gross <[email protected]>
Signed-off-by: Oleksandr Tyshchenko <[email protected]>
Reviewed-by: Stefano Stabellini <[email protected]>
Reviewed-by: Boris Ostrovsky <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Juergen Gross <[email protected]> |
unsigned int arpt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table)
{
unsigned int hook = state->hook;
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
unsigned int verdict = NF_DROP;
const struct arphdr *arp;
struct arpt_entry *e, **jumpstack;
const char *indev, *outdev;
const void *table_base;
unsigned int cpu, stackidx = 0;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
return NF_DROP;
indev = state->in ? state->in->name : nulldevname;
outdev = state->out ? state->out->name : nulldevname;
local_bh_disable();
addend = xt_write_recseq_begin();
private = table->private;
cpu = smp_processor_id();
/*
* Ensure we load private-> members after we've fetched the base
* pointer.
*/
smp_read_barrier_depends();
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
/* No TEE support for arptables, so no need to switch to alternate
* stack. All targets that reenter must return absolute verdicts.
*/
e = get_entry(table_base, private->hook_entry[hook]);
acpar.net = state->net;
acpar.in = state->in;
acpar.out = state->out;
acpar.hooknum = hook;
acpar.family = NFPROTO_ARP;
acpar.hotdrop = false;
arp = arp_hdr(skb);
do {
const struct xt_entry_target *t;
struct xt_counters *counter;
if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
e = arpt_next_entry(e);
continue;
}
counter = xt_get_this_cpu_counter(&e->counters);
ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1);
t = arpt_get_target_c(e);
/* Standard target? */
if (!t->u.kernel.target->target) {
int v;
v = ((struct xt_standard_target *)t)->verdict;
if (v < 0) {
/* Pop from stack? */
if (v != XT_RETURN) {
verdict = (unsigned int)(-v) - 1;
break;
}
if (stackidx == 0) {
e = get_entry(table_base,
private->underflow[hook]);
} else {
e = jumpstack[--stackidx];
e = arpt_next_entry(e);
}
continue;
}
if (table_base + v
!= arpt_next_entry(e)) {
jumpstack[stackidx++] = e;
}
e = get_entry(table_base, v);
continue;
}
acpar.target = t->u.kernel.target;
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
/* Target might have changed stuff. */
arp = arp_hdr(skb);
if (verdict == XT_CONTINUE)
e = arpt_next_entry(e);
else
/* Verdict */
break;
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);
local_bh_enable();
if (acpar.hotdrop)
return NF_DROP;
else
return verdict;
} | 0 | [
"CWE-119"
]
| nf-next | d7591f0c41ce3e67600a982bab6989ef0f07b3ce | 330,777,305,789,051,370,000,000,000,000,000,000,000 | 111 | netfilter: x_tables: introduce and use xt_copy_counters_from_user
The three variants use same copy&pasted code, condense this into a
helper and use that.
Make sure info.name is 0-terminated.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
dissect_NOTIFY_INFO(tvbuff_t *tvb, int offset, packet_info *pinfo,
proto_tree *tree, dcerpc_info *di, guint8 *drep)
{
guint32 count;
offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep,
hf_notify_info_version, NULL);
offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep,
hf_notify_info_flags, NULL);
offset = dissect_ndr_uint32(tvb, offset, pinfo, tree, di, drep,
hf_notify_info_count, &count);
if (!di->conformant_run)
col_append_fstr(
pinfo->cinfo, COL_INFO, ", %d %s", count,
notify_plural(count));
offset = dissect_ndr_ucarray(tvb, offset, pinfo, tree, di, drep,
dissect_NOTIFY_INFO_DATA);
return offset;
} | 0 | [
"CWE-399"
]
| wireshark | b4d16b4495b732888e12baf5b8a7e9bf2665e22b | 334,928,523,720,276,900,000,000,000,000,000,000,000 | 24 | SPOOLSS: Try to avoid an infinite loop.
Use tvb_reported_length_remaining in dissect_spoolss_uint16uni. Make
sure our offset always increments in dissect_spoolss_keybuffer.
Change-Id: I7017c9685bb2fa27161d80a03b8fca4ef630e793
Reviewed-on: https://code.wireshark.org/review/14687
Reviewed-by: Gerald Combs <[email protected]>
Petri-Dish: Gerald Combs <[email protected]>
Tested-by: Petri Dish Buildbot <[email protected]>
Reviewed-by: Michael Mann <[email protected]> |
int MqttClient_Disconnect_ex(MqttClient *client, MqttDisconnect *disconnect)
{
int rc, len;
/* Validate required arguments */
if (client == NULL) {
return MQTT_CODE_ERROR_BAD_ARG;
}
#ifdef WOLFMQTT_V5
if (disconnect != NULL) {
/* Use specified protocol version if set */
disconnect->protocol_level = client->protocol_level;
}
#endif
#ifdef WOLFMQTT_MULTITHREAD
/* Lock send socket mutex */
rc = wm_SemLock(&client->lockSend);
if (rc != 0) {
return rc;
}
#endif
/* Encode the disconnect packet */
rc = MqttEncode_Disconnect(client->tx_buf, client->tx_buf_len, disconnect);
#ifdef WOLFMQTT_DEBUG_CLIENT
PRINTF("MqttClient_EncodePacket: Len %d, Type %s (%d), ID %d, QoS %d",
rc, MqttPacket_TypeDesc(MQTT_PACKET_TYPE_DISCONNECT),
MQTT_PACKET_TYPE_DISCONNECT, 0, 0);
#endif
if (rc <= 0) {
#ifdef WOLFMQTT_MULTITHREAD
wm_SemUnlock(&client->lockSend);
#endif
return rc;
}
len = rc;
/* Send disconnect packet */
rc = MqttPacket_Write(client, client->tx_buf, len);
#ifdef WOLFMQTT_MULTITHREAD
wm_SemUnlock(&client->lockSend);
#endif
if (rc != len) {
return rc;
}
/* No response for MQTT disconnect packet */
return MQTT_CODE_SUCCESS;
} | 0 | [
"CWE-787"
]
| wolfMQTT | 84d4b53122e0fa0280c7872350b89d5777dabbb2 | 272,833,391,788,824,500,000,000,000,000,000,000,000 | 52 | Fix wolfmqtt-fuzzer: Null-dereference WRITE in MqttProps_Free |
rl_get_keymap_by_name (name)
const char *name;
{
register int i;
for (i = 0; keymap_names[i].name; i++)
if (_rl_stricmp (name, keymap_names[i].name) == 0)
return (keymap_names[i].map);
return ((Keymap) NULL);
} | 0 | []
| bash | 955543877583837c85470f7fb8a97b7aa8d45e6c | 170,265,107,986,587,040,000,000,000,000,000,000,000 | 10 | bash-4.4-rc2 release |
int module_unload(
YR_OBJECT* module_object)
{
return ERROR_SUCCESS;
} | 0 | [
"CWE-416"
]
| yara | 053e67e3ec81cc9268ce30eaf0d6663d8639ed1e | 260,761,590,134,713,870,000,000,000,000,000,000,000 | 5 | Fix issue #658 |
static __init int vdso_setup(void)
{
struct lib32_elfinfo v32;
struct lib64_elfinfo v64;
v32.hdr = vdso32_kbase;
#ifdef CONFIG_PPC64
v64.hdr = vdso64_kbase;
#endif
if (vdso_do_find_sections(&v32, &v64))
return -1;
if (vdso_fixup_datapage(&v32, &v64))
return -1;
if (vdso_fixup_features(&v32, &v64))
return -1;
if (vdso_fixup_alt_funcs(&v32, &v64))
return -1;
vdso_setup_trampolines(&v32, &v64);
return 0;
} | 0 | [
"CWE-20"
]
| linux-2.6 | 89f5b7da2a6bad2e84670422ab8192382a5aeb9f | 12,079,784,168,080,658,000,000,000,000,000,000,000 | 25 | Reinstate ZERO_PAGE optimization in 'get_user_pages()' and fix XIP
KAMEZAWA Hiroyuki and Oleg Nesterov point out that since the commit
557ed1fa2620dc119adb86b34c614e152a629a80 ("remove ZERO_PAGE") removed
the ZERO_PAGE from the VM mappings, any users of get_user_pages() will
generally now populate the VM with real empty pages needlessly.
We used to get the ZERO_PAGE when we did the "handle_mm_fault()", but
since fault handling no longer uses ZERO_PAGE for new anonymous pages,
we now need to handle that special case in follow_page() instead.
In particular, the removal of ZERO_PAGE effectively removed the core
file writing optimization where we would skip writing pages that had not
been populated at all, and increased memory pressure a lot by allocating
all those useless newly zeroed pages.
This reinstates the optimization by making the unmapped PTE case the
same as for a non-existent page table, which already did this correctly.
While at it, this also fixes the XIP case for follow_page(), where the
caller could not differentiate between the case of a page that simply
could not be used (because it had no "struct page" associated with it)
and a page that just wasn't mapped.
We do that by simply returning an error pointer for pages that could not
be turned into a "struct page *". The error is arbitrarily picked to be
EFAULT, since that was what get_user_pages() already used for the
equivalent IO-mapped page case.
[ Also removed an impossible test for pte_offset_map_lock() failing:
that's not how that function works ]
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Nick Piggin <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Roland McGrath <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int snd_usbmidi_input_close(struct snd_rawmidi_substream *substream)
{
return substream_open(substream, 1, 0);
} | 0 | [
"CWE-703"
]
| linux | 07d86ca93db7e5cdf4743564d98292042ec21af7 | 291,231,142,032,909,150,000,000,000,000,000,000,000 | 4 | ALSA: usb-audio: avoid freeing umidi object twice
The 'umidi' object will be free'd on the error path by snd_usbmidi_free()
when tearing down the rawmidi interface. So we shouldn't try to free it
in snd_usbmidi_create() after having registered the rawmidi interface.
Found by KASAN.
Signed-off-by: Andrey Konovalov <[email protected]>
Acked-by: Clemens Ladisch <[email protected]>
Cc: <[email protected]>
Signed-off-by: Takashi Iwai <[email protected]> |
reAnchor(Buffer *buf, char *re)
{
return reAnchorAny(buf, re, _put_anchor_all);
} | 0 | [
"CWE-119"
]
| w3m | 4e464819dd360ffd3d58fa2a89216fe413cfcc74 | 197,595,415,785,206,400,000,000,000,000,000,000,000 | 4 | Prevent segfault due to buffer overflows in addMultirowsForm
Bug-Debian: https://github.com/tats/w3m/issues/21
Bug-Debian: https://github.com/tats/w3m/issues/26 |
isdn_wildmat(char *s, char *p)
{
register int last;
register int matched;
register int reverse;
register int nostar = 1;
if (!(*s) && !(*p))
return (1);
for (; *p; s++, p++)
switch (*p) {
case '\\':
/*
* Literal match with following character,
* fall through.
*/
p++;
default:
if (*s != *p)
return (*s == '\0') ? 2 : 1;
continue;
case '?':
/* Match anything. */
if (*s == '\0')
return (2);
continue;
case '*':
nostar = 0;
/* Trailing star matches everything. */
return (*++p ? isdn_star(s, p) : 0);
case '[':
/* [^....] means inverse character class. */
if ((reverse = (p[1] == '^')))
p++;
for (last = 0, matched = 0; *++p && (*p != ']'); last = *p)
/* This next line requires a good C compiler. */
if (*p == '-' ? *s <= *++p && *s >= last : *s == *p)
matched = 1;
if (matched == reverse)
return (1);
continue;
}
return (*s == '\0') ? 0 : nostar;
} | 0 | [
"CWE-119"
]
| linux | 9f5af546e6acc30f075828cb58c7f09665033967 | 206,530,384,214,071,300,000,000,000,000,000,000,000 | 44 | isdn/i4l: fix buffer overflow
This fixes a potential buffer overflow in isdn_net.c caused by an
unbounded strcpy.
[ ISDN seems to be effectively unmaintained, and the I4L driver in
particular is long deprecated, but in case somebody uses this..
- Linus ]
Signed-off-by: Jiten Thakkar <[email protected]>
Signed-off-by: Annie Cherkaev <[email protected]>
Cc: Karsten Keil <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: [email protected]
Signed-off-by: Linus Torvalds <[email protected]> |
char * fstostr(
time_t ntp_stamp
)
{
char * buf;
struct tm * tm;
time_t unix_stamp;
LIB_GETBUF(buf);
unix_stamp = ntp_stamp - JAN_1970;
tm = gmtime(&unix_stamp);
if (NULL == tm)
#ifdef WAIT_FOR_NTP_CRYPTO_C_CALLERS_ABLE_TO_HANDLE_MORE_THAN_20_CHARS
msnprintf(buf, LIB_BUFLENGTH, "gmtime: %m");
#else
strncpy(buf, "gmtime() error", LIB_BUFLENGTH);
#endif
else
snprintf(buf, LIB_BUFLENGTH, "%04d%02d%02d%02d%02d",
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min);
return buf;
} | 0 | [
"CWE-20"
]
| ntp | 52e977d79a0c4ace997e5c74af429844da2f27be | 317,755,885,234,709,600,000,000,000,000,000,000,000 | 24 | [Bug 1773] openssl not detected during ./configure.
[Bug 1774] Segfaults if cryptostats enabled and built without OpenSSL. |
seamless_line_handler(const char *line, void *data)
{
if (!seamless_process_line(line, data))
{
logger(Core, Warning, "seamless_line_handler(), invalid request '%s'", line);
}
return True;
} | 0 | [
"CWE-119",
"CWE-125",
"CWE-703",
"CWE-787"
]
| rdesktop | 4dca546d04321a610c1835010b5dad85163b65e1 | 135,174,561,279,633,180,000,000,000,000,000,000,000 | 8 | Malicious RDP server security fixes
This commit includes fixes for a set of 21 vulnerabilities in
rdesktop when a malicious RDP server is used.
All vulnerabilities was identified and reported by Eyal Itkin.
* Add rdp_protocol_error function that is used in several fixes
* Refactor of process_bitmap_updates
* Fix possible integer overflow in s_check_rem() on 32bit arch
* Fix memory corruption in process_bitmap_data - CVE-2018-8794
* Fix remote code execution in process_bitmap_data - CVE-2018-8795
* Fix remote code execution in process_plane - CVE-2018-8797
* Fix Denial of Service in mcs_recv_connect_response - CVE-2018-20175
* Fix Denial of Service in mcs_parse_domain_params - CVE-2018-20175
* Fix Denial of Service in sec_parse_crypt_info - CVE-2018-20176
* Fix Denial of Service in sec_recv - CVE-2018-20176
* Fix minor information leak in rdpdr_process - CVE-2018-8791
* Fix Denial of Service in cssp_read_tsrequest - CVE-2018-8792
* Fix remote code execution in cssp_read_tsrequest - CVE-2018-8793
* Fix Denial of Service in process_bitmap_data - CVE-2018-8796
* Fix minor information leak in rdpsnd_process_ping - CVE-2018-8798
* Fix Denial of Service in process_secondary_order - CVE-2018-8799
* Fix remote code execution in in ui_clip_handle_data - CVE-2018-8800
* Fix major information leak in ui_clip_handle_data - CVE-2018-20174
* Fix memory corruption in rdp_in_unistr - CVE-2018-20177
* Fix Denial of Service in process_demand_active - CVE-2018-20178
* Fix remote code execution in lspci_process - CVE-2018-20179
* Fix remote code execution in rdpsnddbg_process - CVE-2018-20180
* Fix remote code execution in seamless_process - CVE-2018-20181
* Fix remote code execution in seamless_process_line - CVE-2018-20182 |
Convert a mime-encoded text to UTF-8 */
PHP_FUNCTION(imap_utf8)
{
zend_string *str;
SIZEDTEXT src, dest;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "S", &str) == FAILURE) {
return;
}
src.data = NULL;
src.size = 0;
dest.data = NULL;
dest.size = 0;
cpytxt(&src, ZSTR_VAL(str), ZSTR_LEN(str));
#ifndef HAVE_NEW_MIME2TEXT
utf8_mime2text(&src, &dest);
#else
utf8_mime2text(&src, &dest, U8T_DECOMPOSE);
#endif
RETVAL_STRINGL((char*)dest.data, dest.size);
if (dest.data) {
free(dest.data);
}
if (src.data && src.data != dest.data) {
free(src.data);
} | 0 | [
"CWE-88"
]
| php-src | 336d2086a9189006909ae06c7e95902d7d5ff77e | 320,095,756,136,916,880,000,000,000,000,000,000,000 | 29 | Disable rsh/ssh functionality in imap by default (bug #77153) |
static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
{
int as_id = kvm_arch_vcpu_memslots_id(vcpu);
return __kvm_memslots(vcpu->kvm, as_id);
} | 0 | [
"CWE-416"
]
| linux | 0774a964ef561b7170d8d1b1bfe6f88002b6d219 | 30,121,393,938,396,300,000,000,000,000,000,000,000 | 6 | KVM: Fix out of range accesses to memslots
Reset the LRU slot if it becomes invalid when deleting a memslot to fix
an out-of-bounds/use-after-free access when searching through memslots.
Explicitly check for there being no used slots in search_memslots(), and
in the caller of s390's approximation variant.
Fixes: 36947254e5f9 ("KVM: Dynamically size memslot array based on number of used slots")
Reported-by: Qian Cai <[email protected]>
Cc: Peter Xu <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Message-Id: <[email protected]>
Acked-by: Christian Borntraeger <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
PHP_FUNCTION(openssl_pkey_get_private)
{
zval *cert;
EVP_PKEY *pkey;
char * passphrase = "";
size_t passphrase_len = sizeof("")-1;
zend_resource *res;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "z|s", &cert, &passphrase, &passphrase_len) == FAILURE) {
return;
}
PHP_OPENSSL_CHECK_SIZE_T_TO_INT(passphrase_len, passphrase);
pkey = php_openssl_evp_from_zval(cert, 0, passphrase, passphrase_len, 1, &res);
if (pkey == NULL) {
RETURN_FALSE;
}
ZVAL_RES(return_value, res);
Z_ADDREF_P(return_value);
} | 0 | [
"CWE-326"
]
| php-src | 0216630ea2815a5789a24279a1211ac398d4de79 | 266,437,614,544,431,200,000,000,000,000,000,000,000 | 21 | Fix bug #79601 (Wrong ciphertext/tag in AES-CCM encryption for a 12 bytes IV) |
lineBufferMaxY (int y, int minY, int linesInLineBuffer)
{
return lineBufferMinY (y, minY, linesInLineBuffer) + linesInLineBuffer - 1;
} | 0 | [
"CWE-125"
]
| openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 109,449,747,077,706,340,000,000,000,000,000,000,000 | 4 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
static int mounts_open_common(struct inode *inode, struct file *file,
const struct seq_operations *op)
{
struct task_struct *task = get_proc_task(inode);
struct nsproxy *nsp;
struct mnt_namespace *ns = NULL;
struct path root;
struct proc_mounts *p;
int ret = -EINVAL;
if (task) {
rcu_read_lock();
nsp = task_nsproxy(task);
if (nsp) {
ns = nsp->mnt_ns;
if (ns)
get_mnt_ns(ns);
}
rcu_read_unlock();
if (ns && get_fs_path(task, &root, 1) == 0)
ret = 0;
put_task_struct(task);
}
if (!ns)
goto err;
if (ret)
goto err_put_ns;
ret = -ENOMEM;
p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL);
if (!p)
goto err_put_path;
file->private_data = &p->m;
ret = seq_open(file, op);
if (ret)
goto err_free;
p->m.private = p;
p->ns = ns;
p->root = root;
p->event = ns->event;
return 0;
err_free:
kfree(p);
err_put_path:
path_put(&root);
err_put_ns:
put_mnt_ns(ns);
err:
return ret;
} | 0 | [
"CWE-20",
"CWE-362",
"CWE-416"
]
| linux | 86acdca1b63e6890540fa19495cfc708beff3d8b | 282,785,981,026,639,000,000,000,000,000,000,000,000 | 55 | fix autofs/afs/etc. magic mountpoint breakage
We end up trying to kfree() nd.last.name on open("/mnt/tmp", O_CREAT)
if /mnt/tmp is an autofs direct mount. The reason is that nd.last_type
is bogus here; we want LAST_BIND for everything of that kind and we
get LAST_NORM left over from finding parent directory.
So make sure that it *is* set properly; set to LAST_BIND before
doing ->follow_link() - for normal symlinks it will be changed
by __vfs_follow_link() and everything else needs it set that way.
Signed-off-by: Al Viro <[email protected]> |
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
auto data_type = output->type;
TF_LITE_ENSURE(context,
data_type == kTfLiteFloat32 || data_type == kTfLiteInt8);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
const int block_size = params->block_size;
const int input_height = input->dims->data[kHeightRank];
const int input_width = input->dims->data[kWidthRank];
const int input_channels = input->dims->data[kDepthRank];
int output_height = input_height * block_size;
int output_width = input_width * block_size;
int output_channels = input_channels / block_size / block_size;
TF_LITE_ENSURE_EQ(context, input_height, output_height / block_size);
TF_LITE_ENSURE_EQ(context, input_width, output_width / block_size);
TF_LITE_ENSURE_EQ(context, input_channels,
output_channels * block_size * block_size);
// We must update the output tensor dimensions.
// The dims storage is expected to be the same area in memory
// for both TfLiteTensor and TfLiteEvalTensor. This is important
// because TfLiteTensor in the MicroInterpreter is a temporary
// allocation. For the KernelRunner interpreter, TfLiteEvalTensor
// is a temporary allocation. We must therefore relocate the dims
// from the FlatBuffer to the persistant storage arena.
TfLiteEvalTensor* output_eval =
tflite::micro::GetEvalOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy(
context, output, output_eval));
output->dims->data[kBatchRank] = input->dims->data[kBatchRank];
output->dims->data[kHeightRank] = output_height;
output->dims->data[kWidthRank] = output_width;
output->dims->data[kDepthRank] = output_channels;
return kTfLiteOk;
} | 1 | [
"CWE-369"
]
| tensorflow | 106d8f4fb89335a2c52d7c895b7a7485465ca8d9 | 216,460,474,948,037,200,000,000,000,000,000,000,000 | 51 | Prevent division by 0 in TFLite
PiperOrigin-RevId: 370800311
Change-Id: I21ccdbd31c30118acc67df8751807ee2e0b12f91 |
static MagickBooleanType load_tile(Image *image,Image *tile_image,
XCFDocInfo *inDocInfo,XCFLayerInfo *inLayerInfo,size_t data_length,
ExceptionInfo *exception)
{
ssize_t
y;
register ssize_t
x;
register Quantum
*q;
size_t
extent;
ssize_t
count;
unsigned char
*graydata;
XCFPixelInfo
*xcfdata,
*xcfodata;
extent=0;
if (inDocInfo->image_type == GIMP_GRAY)
extent=tile_image->columns*tile_image->rows*sizeof(*graydata);
else
if (inDocInfo->image_type == GIMP_RGB)
extent=tile_image->columns*tile_image->rows*sizeof(*xcfdata);
if (extent > data_length)
ThrowBinaryException(CorruptImageError,"NotEnoughPixelData",
image->filename);
xcfdata=(XCFPixelInfo *) AcquireQuantumMemory(MagickMax(data_length,
tile_image->columns*tile_image->rows),sizeof(*xcfdata));
if (xcfdata == (XCFPixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
xcfodata=xcfdata;
graydata=(unsigned char *) xcfdata; /* used by gray and indexed */
count=ReadBlob(image,data_length,(unsigned char *) xcfdata);
if (count != (ssize_t) data_length)
{
xcfodata=(XCFPixelInfo *) RelinquishMagickMemory(xcfodata);
ThrowBinaryException(CorruptImageError,"NotEnoughPixelData",
image->filename);
}
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
q=GetAuthenticPixels(tile_image,0,y,tile_image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
if (inDocInfo->image_type == GIMP_GRAY)
{
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
SetPixelGray(tile_image,ScaleCharToQuantum(*graydata),q);
SetPixelAlpha(tile_image,ScaleCharToQuantum((unsigned char)
inLayerInfo->alpha),q);
graydata++;
q+=GetPixelChannels(tile_image);
}
}
else
if (inDocInfo->image_type == GIMP_RGB)
{
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
SetPixelRed(tile_image,ScaleCharToQuantum(xcfdata->red),q);
SetPixelGreen(tile_image,ScaleCharToQuantum(xcfdata->green),q);
SetPixelBlue(tile_image,ScaleCharToQuantum(xcfdata->blue),q);
SetPixelAlpha(tile_image,xcfdata->alpha == 255U ? TransparentAlpha :
ScaleCharToQuantum((unsigned char) inLayerInfo->alpha),q);
xcfdata++;
q+=GetPixelChannels(tile_image);
}
}
if (SyncAuthenticPixels(tile_image,exception) == MagickFalse)
break;
}
xcfodata=(XCFPixelInfo *) RelinquishMagickMemory(xcfodata);
return MagickTrue;
} | 0 | [
"CWE-770"
]
| ImageMagick | 19dbe11c5060f66abb393d1945107c5f54894fa8 | 73,372,050,219,339,550,000,000,000,000,000,000,000 | 85 | https://github.com/ImageMagick/ImageMagick/issues/679 |
plugin_can_trim (struct backend *b, struct connection *conn)
{
struct backend_plugin *p = container_of (b, struct backend_plugin, backend);
assert (connection_get_handle (conn, 0));
if (p->plugin.can_trim)
return p->plugin.can_trim (connection_get_handle (conn, 0));
else
return p->plugin.trim || p->plugin._trim_old;
} | 0 | [
"CWE-406"
]
| nbdkit | a6b88b195a959b17524d1c8353fd425d4891dc5f | 298,365,241,949,099,920,000,000,000,000,000,000,000 | 11 | server: Fix regression for NBD_OPT_INFO before NBD_OPT_GO
Most known NBD clients do not bother with NBD_OPT_INFO (except for
clients like 'qemu-nbd --list' that don't ever intend to connect), but
go straight to NBD_OPT_GO. However, it's not too hard to hack up qemu
to add in an extra client step (whether info on the same name, or more
interestingly, info on a different name), as a patch against qemu
commit 6f214b30445:
| diff --git i/nbd/client.c w/nbd/client.c
| index f6733962b49b..425292ac5ea9 100644
| --- i/nbd/client.c
| +++ w/nbd/client.c
| @@ -1038,6 +1038,14 @@ int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
| * TLS). If it is not available, fall back to
| * NBD_OPT_LIST for nicer error messages about a missing
| * export, then use NBD_OPT_EXPORT_NAME. */
| + if (getenv ("HACK"))
| + info->name[0]++;
| + result = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, info, errp);
| + if (getenv ("HACK"))
| + info->name[0]--;
| + if (result < 0) {
| + return -EINVAL;
| + }
| result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp);
| if (result < 0) {
| return -EINVAL;
This works just fine in 1.14.0, where we call .open only once (so the
INFO and GO repeat calls into the same plugin handle), but in 1.14.1
it regressed into causing an assertion failure: we are now calling
.open a second time on a connection that is already opened:
$ nbdkit -rfv null &
$ hacked-qemu-io -f raw -r nbd://localhost -c quit
...
nbdkit: null[1]: debug: null: open readonly=1
nbdkit: backend.c:179: backend_open: Assertion `h->handle == NULL' failed.
Worse, on the mainline development, we have recently made it possible
for plugins to actively report different information for different
export names; for example, a plugin may choose to report different
answers for .can_write on export A than for export B; but if we share
cached handles, then an NBD_OPT_INFO on one export prevents correct
answers for NBD_OPT_GO on the second export name. (The HACK envvar in
my qemu modifications can be used to demonstrate cross-name requests,
which are even less likely in a real client).
The solution is to call .close after NBD_OPT_INFO, coupled with enough
glue logic to reset cached connection handles back to the state
expected by .open. This in turn means factoring out another backend_*
function, but also gives us an opportunity to change
backend_set_handle to no longer accept NULL.
The assertion failure is, to some extent, a possible denial of service
attack (one client can force nbdkit to exit by merely sending OPT_INFO
before OPT_GO, preventing the next client from connecting), although
this is mitigated by using TLS to weed out untrusted clients. Still,
the fact that we introduced a potential DoS attack while trying to fix
a traffic amplification security bug is not very nice.
Sadly, as there are no known clients that easily trigger this mode of
operation (OPT_INFO before OPT_GO), there is no easy way to cover this
via a testsuite addition. I may end up hacking something into libnbd.
Fixes: c05686f957
Signed-off-by: Eric Blake <[email protected]> |
term_push_cmd(char c)
{
uint new_size;
/* Need 1 more for null byte */
if (term.cmd_len + 1 < term.cmd_buf_cap) {
term.cmd_buf[term.cmd_len++] = c;
term.cmd_buf[term.cmd_len] = 0;
return true;
}
if (term.cmd_buf_cap >= TERM_CMD_BUF_MAX_SIZE) {
/* Server sends too many cmd characters */
return false;
}
new_size = term.cmd_buf_cap + TERM_CMD_BUF_INC_STEP;
if (new_size >= TERM_CMD_BUF_MAX_SIZE) {
// cosmetic limitation (relevant limitation above)
new_size = TERM_CMD_BUF_MAX_SIZE;
}
term.cmd_buf = renewn(term.cmd_buf, new_size);
term.cmd_buf_cap = new_size;
term.cmd_buf[term.cmd_len++] = c;
term.cmd_buf[term.cmd_len] = 0;
return true;
} | 0 | [
"CWE-703",
"CWE-770"
]
| mintty | bd52109993440b6996760aaccb66e68e782762b9 | 18,974,588,239,600,753,000,000,000,000,000,000,000 | 26 | tame some window operations, just in case |
const char* ExpressionConcatArrays::getOpName() const {
return "$concatArrays";
} | 0 | [
"CWE-835"
]
| mongo | 0a076417d1d7fba3632b73349a1fd29a83e68816 | 99,870,958,343,605,230,000,000,000,000,000,000,000 | 3 | SERVER-38070 fix infinite loop in agg expression |
static Register parseReg(RAsm *a, const char *str, size_t *pos, ut32 *type) {
int i;
// Must be the same order as in enum register_t
const char *regs[] = { "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi", "eip", NULL };
const char *regsext[] = { "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d", NULL };
const char *regs8[] = { "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh", NULL };
const char *regs16[] = { "ax", "cx", "dx", "bx", "sp", "bp", "si", "di", NULL };
const char *regs64[] = { "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", "rip", NULL};
const char *regs64ext[] = { "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", NULL };
const char *sregs[] = { "es", "cs", "ss", "ds", "fs", "gs", NULL};
// Get token (especially the length)
size_t nextpos, length;
const char *token;
getToken (str, pos, &nextpos);
token = str + *pos;
length = nextpos - *pos;
*pos = nextpos;
// General purpose registers
if (length == 3 && token[0] == 'e') {
for (i = 0; regs[i]; i++) {
if (!r_str_ncasecmp (regs[i], token, length)) {
*type = (OT_GPREG & OT_REG (i)) | OT_DWORD;
return i;
}
}
}
if (length == 2 && (token[1] == 'l' || token[1] == 'h')) {
for (i = 0; regs8[i]; i++) {
if (!r_str_ncasecmp (regs8[i], token, length)) {
*type = (OT_GPREG & OT_REG (i)) | OT_BYTE;
return i;
}
}
}
if (length == 2) {
for (i = 0; regs16[i]; i++) {
if (!r_str_ncasecmp (regs16[i], token, length)) {
*type = (OT_GPREG & OT_REG (i)) | OT_WORD;
return i;
}
}
// This isn't working properly yet
for (i = 0; sregs[i]; i++) {
if (!r_str_ncasecmp (sregs[i], token, length)) {
*type = (OT_SEGMENTREG & OT_REG (i)) | OT_WORD;
return i;
}
}
}
if (token[0] == 'r') {
for (i = 0; regs64[i]; i++) {
if (!r_str_ncasecmp (regs64[i], token, length)) {
*type = (OT_GPREG & OT_REG (i)) | OT_QWORD;
a->bits = 64;
return i;
}
}
for (i = 0; regs64ext[i]; i++) {
if (!r_str_ncasecmp (regs64ext[i], token, length)) {
*type = (OT_GPREG & OT_REG (i)) | OT_QWORD;
a->bits = 64;
return i + 9;
}
}
for (i = 0; regsext[i]; i++) {
if (!r_str_ncasecmp (regsext[i], token, length)) {
*type = (OT_GPREG & OT_REG (i)) | OT_DWORD;
if (a->bits < 32) {
a->bits = 32;
}
return i + 9;
}
}
}
// Extended registers
if (!r_str_ncasecmp ("st", token, 2)) {
*type = (OT_FPUREG & ~OT_REGALL);
*pos = 3;
}
if (!r_str_ncasecmp ("mm", token, 2)) {
*type = (OT_MMXREG & ~OT_REGALL);
*pos = 3;
}
if (!r_str_ncasecmp ("xmm", token, 3)) {
*type = (OT_XMMREG & ~OT_REGALL);
*pos = 4;
}
// Now read number, possibly with parantheses
if (*type & (OT_FPUREG | OT_MMXREG | OT_XMMREG) & ~OT_REGALL) {
Register reg = X86R_UNDEFINED;
// pass by '(',if there is one
if (getToken (str, pos, &nextpos) == TT_SPECIAL && str[*pos] == '(') {
*pos = nextpos;
}
// read number
// const int maxreg = (a->bits == 64) ? 15 : 7;
if (getToken (str, pos, &nextpos) != TT_NUMBER ||
(reg = getnum (a, str + *pos)) > 7) {
if ((int)reg > 15) {
eprintf ("Too large register index!\n");
return X86R_UNDEFINED;
} else {
reg -= 8;
}
}
*pos = nextpos;
// pass by ')'
if (getToken (str, pos, &nextpos) == TT_SPECIAL && str[*pos] == ')') {
*pos = nextpos;
}
// Safety to prevent a shift bigger than 31. Reg
// should never be > 8 anyway
if (reg > 7) {
eprintf ("Too large register index!\n");
return X86R_UNDEFINED;
}
*type |= (OT_REG (reg) & ~OT_REGTYPE);
return reg;
}
return X86R_UNDEFINED;
} | 0 | [
"CWE-119",
"CWE-125",
"CWE-787"
]
| radare2 | 9b46d38dd3c4de6048a488b655c7319f845af185 | 88,952,055,943,497,730,000,000,000,000,000,000,000 | 130 | Fix #12372 and #12373 - Crash in x86 assembler (#12380)
0 ,0,[bP-bL-bP-bL-bL-r-bL-bP-bL-bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx--
leA ,0,[bP-bL-bL-bP-bL-bP-bL-60@bL-
leA ,0,[bP-bL-r-bP-bL-bP-bL-60@bL-
mov ,0,[ax+Bx-ax+Bx-ax+ax+Bx-ax+Bx-- |
xdr_chpass3_arg(XDR *xdrs, chpass3_arg *objp)
{
if (!xdr_ui_4(xdrs, &objp->api_version)) {
return (FALSE);
}
if (!xdr_krb5_principal(xdrs, &objp->princ)) {
return (FALSE);
}
if (!xdr_krb5_boolean(xdrs, &objp->keepold)) {
return (FALSE);
}
if (!xdr_array(xdrs, (caddr_t *)&objp->ks_tuple,
(unsigned int*)&objp->n_ks_tuple, ~0,
sizeof(krb5_key_salt_tuple),
xdr_krb5_key_salt_tuple)) {
return (FALSE);
}
if (!xdr_nullstring(xdrs, &objp->pass)) {
return (FALSE);
}
return (TRUE);
} | 0 | [
"CWE-703"
]
| krb5 | a197e92349a4aa2141b5dff12e9dd44c2a2166e3 | 272,619,175,519,179,750,000,000,000,000,000,000,000 | 22 | Fix kadm5/gssrpc XDR double free [CVE-2014-9421]
[MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free
partial deserialization results upon failure to deserialize. This
responsibility belongs to the callers, svctcp_getargs() and
svcudp_getargs(); doing it in the unwrap function results in freeing
the results twice.
In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers
we are freeing, as other XDR functions such as xdr_bytes() and
xdr_string().
ticket: 8056 (new)
target_version: 1.13.1
tags: pullup |
static int calcstepsizes(uint_fast16_t refstepsize, int numrlvls,
uint_fast16_t *stepsizes)
{
int bandno;
int numbands;
uint_fast16_t expn;
uint_fast16_t mant;
expn = JPC_QCX_GETEXPN(refstepsize);
mant = JPC_QCX_GETMANT(refstepsize);
numbands = 3 * numrlvls - 2;
for (bandno = 0; bandno < numbands; ++bandno) {
//jas_eprintf("DEBUG %d %d %d %d %d\n", bandno, expn, numrlvls, bandno, ((numrlvls - 1) - (numrlvls - 1 - ((bandno > 0) ? ((bandno + 2) / 3) : (0)))));
uint_fast16_t e = expn + (bandno + 2) / 3;
if (e >= 0x20)
return -1;
stepsizes[bandno] = JPC_QCX_MANT(mant) | JPC_QCX_EXPN(e);
}
return 0;
} | 0 | [
"CWE-617"
]
| jasper | 84d00fb29a22e360c2ff91bdc2cd81c288826bfc | 84,465,102,469,265,810,000,000,000,000,000,000,000 | 19 | jpc_dec: check for JPC_QCX_EXPN() parameter overflow
Avoid the assertion failure in the JPC_QCX_EXPN() function. While the
"expn" variable cannot be bigger than 0x1f, adding something to it may
exceed that limit.
This condition could be exploited with a malicious JP2 file, allowing
a denial of service attack on processes which parse JP2 files.
Fixes CVE-2016-9399 and CVE-2017-13751
Closes https://github.com/jasper-maint/jasper/issues/1 |
mtab_head() {
if (!got_mtab)
read_mounttable();
return &mounttable;
} | 0 | [
"CWE-399"
]
| util-linux | 4b39b6aefd5dd8ac68a92adc650dc13d5d54d704 | 334,603,814,694,525,320,000,000,000,000,000,000,000 | 5 | mount: use fflush() and temporary file for mtab updates (CVE-2011-1089)
http://thread.gmane.org/gmane.comp.security.oss.general/4374
Changes:
- force mount(8) to use /etc/mtab.tmp file every time. The original
code used the tmp file for remount/move operations only.
- call and check fflush() return code for the tmp file
Note mount(8) blocks all signals when writing to mtab, so it's not
affected by SIGXFSZ and the mtab lock file is always removed.
This patch does not fix the same issue in umount(8) and libmount.
Signed-off-by: Karel Zak <[email protected]> |
redraw_later_clear(void)
{
redraw_all_later(CLEAR);
reset_screen_attr();
} | 0 | [
"CWE-122"
]
| vim | 826bfe4bbd7594188e3d74d2539d9707b1c6a14b | 122,666,282,587,668,520,000,000,000,000,000,000,000 | 5 | patch 8.2.3487: illegal memory access if buffer name is very long
Problem: Illegal memory access if buffer name is very long.
Solution: Make sure not to go over the end of the buffer. |
gchar* tcp_follow_index_filter(int stream)
{
return g_strdup_printf("tcp.stream eq %d", stream);
} | 0 | [
"CWE-354"
]
| wireshark | 7f3fe6164a68b76d9988c4253b24d43f498f1753 | 75,536,849,908,396,590,000,000,000,000,000,000,000 | 4 | TCP: do not use an unknown status when the checksum is 0xffff
Otherwise it triggers an assert when adding the column as the field is
defined as BASE_NONE and not BASE_DEC or BASE_HEX. Thus an unknown value
(not in proto_checksum_vals[)array) cannot be represented.
Mark the checksum as bad even if we process the packet.
Closes #16816
Conflicts:
epan/dissectors/packet-tcp.c |
bool check_global_access(THD *thd, ulong want_access, bool no_errors)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
char command[128];
if ((thd->security_ctx->master_access & want_access))
return 0;
if (unlikely(!no_errors))
{
get_privilege_desc(command, sizeof(command), want_access);
my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command);
}
status_var_increment(thd->status_var.access_denied_errors);
return 1;
#else
return 0;
#endif
} | 0 | [
"CWE-703"
]
| server | 39feab3cd31b5414aa9b428eaba915c251ac34a2 | 149,890,143,591,257,090,000,000,000,000,000,000,000 | 17 | MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]> |
static int chip_write_masked(struct CHIPSTATE *chip, int subaddr, int val, int mask)
{
if (mask != 0) {
if (-1 == subaddr) {
val = (chip->shadow.bytes[1] & ~mask) | (val & mask);
} else {
val = (chip->shadow.bytes[subaddr+1] & ~mask) | (val & mask);
}
}
return chip_write(chip, subaddr, val);
} | 1 | []
| linux-2.6 | 494264379d186bf806613d27aafb7d88d42f4212 | 17,523,845,146,844,268,000,000,000,000,000,000,000 | 11 | V4L/DVB (9621): Avoid writing outside shadow.bytes[] array
There were no check about the limits of shadow.bytes array. This offers
a risk of writing values outside the limits, overriding other data
areas.
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
static INLINE INT32 planar_skip_plane_rle(const BYTE* pSrcData, UINT32 SrcSize, UINT32 nWidth,
UINT32 nHeight)
{
UINT32 x, y;
BYTE controlByte;
const BYTE* pRLE = pSrcData;
const BYTE* pEnd = &pSrcData[SrcSize];
for (y = 0; y < nHeight; y++)
{
for (x = 0; x < nWidth;)
{
int cRawBytes;
int nRunLength;
if (pRLE >= pEnd)
return -1;
controlByte = *pRLE++;
nRunLength = PLANAR_CONTROL_BYTE_RUN_LENGTH(controlByte);
cRawBytes = PLANAR_CONTROL_BYTE_RAW_BYTES(controlByte);
if (nRunLength == 1)
{
nRunLength = cRawBytes + 16;
cRawBytes = 0;
}
else if (nRunLength == 2)
{
nRunLength = cRawBytes + 32;
cRawBytes = 0;
}
pRLE += cRawBytes;
x += cRawBytes;
x += nRunLength;
if (x > nWidth)
return -1;
if (pRLE > pEnd)
return -1;
}
}
return (INT32)(pRLE - pSrcData);
} | 1 | [
"CWE-125"
]
| FreeRDP | 17f547ae11835bb11baa3d045245dc1694866845 | 68,410,235,112,205,440,000,000,000,000,000,000,000 | 47 | Fixed CVE-2020-11521: Out of bounds write in planar codec.
Thanks to Sunglin and HuanGMz from Knownsec 404 |
template<typename tf, typename t>
static CImg<floatT> ellipsoid3d(CImgList<tf>& primitives,
const CImg<t>& tensor, const unsigned int subdivisions=3) {
primitives.assign();
if (!subdivisions) return CImg<floatT>();
CImg<floatT> S, V;
tensor.symmetric_eigen(S,V);
const float orient =
(V(0,1)*V(1,2) - V(0,2)*V(1,1))*V(2,0) +
(V(0,2)*V(1,0) - V(0,0)*V(1,2))*V(2,1) +
(V(0,0)*V(1,1) - V(0,1)*V(1,0))*V(2,2);
if (orient<0) { V(2,0) = -V(2,0); V(2,1) = -V(2,1); V(2,2) = -V(2,2); }
const float l0 = S[0], l1 = S[1], l2 = S[2];
CImg<floatT> vertices = sphere3d(primitives,1.0,subdivisions);
vertices.get_shared_row(0)*=l0;
vertices.get_shared_row(1)*=l1;
vertices.get_shared_row(2)*=l2;
return V*vertices; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 40,342,077,330,890,130,000,000,000,000,000,000,000 | 18 | Fix other issues in 'CImg<T>::load_bmp()'. |
static inline void set_feature(enum features_id feature_id)
{
int slot = feature_id / 64;
uint64_t mask = 1ull << (feature_id & 63);
vrend_state.features[slot] |= mask;
} | 0 | [
"CWE-787"
]
| virglrenderer | 95e581fd181b213c2ed7cdc63f2abc03eaaa77ec | 170,557,457,730,770,300,000,000,000,000,000,000,000 | 6 | vrend: Add test to resource OOB write and fix it
v2: Also check that no depth != 1 has been send when none is due
Closes: #250
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Chia-I Wu <[email protected]> |
QualifyIpPacket(IPHeader *pIpHeader, ULONG len, BOOLEAN verifyLength)
{
tTcpIpPacketParsingResult res;
res.value = 0;
if (len < 4)
{
res.ipStatus = ppresNotIP;
return res;
}
UCHAR ver_len = pIpHeader->v4.ip_verlen;
UCHAR ip_version = (ver_len & 0xF0) >> 4;
USHORT ipHeaderSize = 0;
USHORT fullLength = 0;
res.value = 0;
if (ip_version == 4)
{
if (len < sizeof(IPv4Header))
{
res.ipStatus = ppresNotIP;
return res;
}
ipHeaderSize = (ver_len & 0xF) << 2;
fullLength = swap_short(pIpHeader->v4.ip_length);
DPrintf(3, ("ip_version %d, ipHeaderSize %d, protocol %d, iplen %d, L2 payload length %d\n",
ip_version, ipHeaderSize, pIpHeader->v4.ip_protocol, fullLength, len));
res.ipStatus = (ipHeaderSize >= sizeof(IPv4Header)) ? ppresIPV4 : ppresNotIP;
if (res.ipStatus == ppresNotIP)
{
return res;
}
if (ipHeaderSize >= fullLength || ( verifyLength && len < fullLength))
{
DPrintf(2, ("[%s] - truncated packet - ip_version %d, ipHeaderSize %d, protocol %d, iplen %d, L2 payload length %d, verify = %s\n", __FUNCTION__,
ip_version, ipHeaderSize, pIpHeader->v4.ip_protocol, fullLength, len, (verifyLength ? "true" : "false")));
res.ipCheckSum = ppresIPTooShort;
return res;
}
}
else if (ip_version == 6)
{
if (len < sizeof(IPv6Header))
{
res.ipStatus = ppresNotIP;
return res;
}
UCHAR nextHeader = pIpHeader->v6.ip6_next_header;
BOOLEAN bParsingDone = FALSE;
ipHeaderSize = sizeof(pIpHeader->v6);
res.ipStatus = ppresIPV6;
res.ipCheckSum = ppresCSOK;
fullLength = swap_short(pIpHeader->v6.ip6_payload_len);
fullLength += ipHeaderSize;
if (verifyLength && (len < fullLength))
{
res.ipStatus = ppresNotIP;
return res;
}
while (nextHeader != 59)
{
IPv6ExtHeader *pExt;
switch (nextHeader)
{
case PROTOCOL_TCP:
bParsingDone = TRUE;
res.xxpStatus = ppresXxpKnown;
res.TcpUdp = ppresIsTCP;
res.xxpFull = len >= fullLength ? 1 : 0;
res = ProcessTCPHeader(res, pIpHeader, len, ipHeaderSize);
break;
case PROTOCOL_UDP:
bParsingDone = TRUE;
res.xxpStatus = ppresXxpKnown;
res.TcpUdp = ppresIsUDP;
res.xxpFull = len >= fullLength ? 1 : 0;
res = ProcessUDPHeader(res, pIpHeader, len, ipHeaderSize);
break;
//existing extended headers
case 0:
case 60:
case 43:
case 44:
case 51:
case 50:
case 135:
if (len >= ((ULONG)ipHeaderSize + 8))
{
pExt = (IPv6ExtHeader *)((PUCHAR)pIpHeader + ipHeaderSize);
nextHeader = pExt->ip6ext_next_header;
ipHeaderSize += 8;
ipHeaderSize += pExt->ip6ext_hdr_len * 8;
}
else
{
DPrintf(0, ("[%s] ERROR: Break in the middle of ext. headers(len %d, hdr > %d)\n", __FUNCTION__, len, ipHeaderSize));
res.ipStatus = ppresNotIP;
bParsingDone = TRUE;
}
break;
//any other protocol
default:
res.xxpStatus = ppresXxpOther;
bParsingDone = TRUE;
break;
}
if (bParsingDone)
break;
}
if (ipHeaderSize <= MAX_SUPPORTED_IPV6_HEADERS)
{
DPrintf(3, ("ip_version %d, ipHeaderSize %d, protocol %d, iplen %d\n",
ip_version, ipHeaderSize, nextHeader, fullLength));
res.ipHeaderSize = ipHeaderSize;
}
else
{
DPrintf(0, ("[%s] ERROR: IP chain is too large (%d)\n", __FUNCTION__, ipHeaderSize));
res.ipStatus = ppresNotIP;
}
}
if (res.ipStatus == ppresIPV4)
{
res.ipHeaderSize = ipHeaderSize;
// bit "more fragments" or fragment offset mean the packet is fragmented
res.IsFragment = (pIpHeader->v4.ip_offset & ~0xC0) != 0;
switch (pIpHeader->v4.ip_protocol)
{
case PROTOCOL_TCP:
{
res = ProcessTCPHeader(res, pIpHeader, len, ipHeaderSize);
}
break;
case PROTOCOL_UDP:
{
res = ProcessUDPHeader(res, pIpHeader, len, ipHeaderSize);
}
break;
default:
res.xxpStatus = ppresXxpOther;
break;
}
}
return res;
} | 0 | [
"CWE-20"
]
| kvm-guest-drivers-windows | 723416fa4210b7464b28eab89cc76252e6193ac1 | 9,285,784,233,219,095,000,000,000,000,000,000,000 | 152 | NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <[email protected]> |
QString proxyToStr(const Proxy & p) {
QString res="";
if (p.type == QNetworkProxy::HttpProxy)
res += "http://";
else if (p.type == QNetworkProxy::Socks5Proxy)
res += "socks5://";
if (!p.user.isEmpty()) {
res += "@" + p.user;
if (!p.password.isEmpty()) res += ":" + p.password;
}
res += p.host;
if (!p.host.isEmpty()) res += ":" + QString::number(p.port);
return res;
} | 0 | [
"CWE-22"
]
| wkhtmltopdf | 2a5f25077895fb075812c0f599326f079a59d6cf | 154,901,187,151,215,100,000,000,000,000,000,000,000 | 15 | BREAKING CHANGE: block local filesystem access by default
fixes #4536 |
irc_server_set_prefix_modes_chars (struct t_irc_server *server,
const char *prefix)
{
char *pos;
int i, old_length_chars, length_modes, length_chars;
if (!server || !prefix)
return;
old_length_chars = (server->prefix_chars) ?
strlen (server->prefix_chars) : 0;
/* free previous values */
if (server->prefix_modes)
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
if (server->prefix_chars)
{
free (server->prefix_chars);
server->prefix_chars = NULL;
}
/* assign new values */
pos = strchr (prefix, ')');
if (pos)
{
server->prefix_modes = weechat_strndup (prefix + 1,
pos - prefix - 1);
if (server->prefix_modes)
{
pos++;
length_modes = strlen (server->prefix_modes);
length_chars = strlen (pos);
server->prefix_chars = malloc (length_modes + 1);
if (server->prefix_chars)
{
for (i = 0; i < length_modes; i++)
{
server->prefix_chars[i] = (i < length_chars) ? pos[i] : ' ';
}
server->prefix_chars[length_modes] = '\0';
}
else
{
free (server->prefix_modes);
server->prefix_modes = NULL;
}
}
}
length_chars = (server->prefix_chars) ? strlen (server->prefix_chars) : 0;
if (server->prefix_chars && (length_chars != old_length_chars))
irc_nick_realloc_prefixes (server, old_length_chars, length_chars);
} | 0 | [
"CWE-120",
"CWE-787"
]
| weechat | 40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f | 315,315,748,392,032,900,000,000,000,000,000,000,000 | 56 | irc: fix crash when a new message 005 is received with longer nick prefixes
Thanks to Stuart Nevans Locke for reporting the issue. |
bool testSingleInterval(IndexBounds bounds) {
BSONObj startKey;
bool startKeyIn;
BSONObj endKey;
bool endKeyIn;
return IndexBoundsBuilder::isSingleInterval(bounds, &startKey, &startKeyIn, &endKey, &endKeyIn);
} | 0 | [
"CWE-754"
]
| mongo | f8f55e1825ee5c7bdb3208fc7c5b54321d172732 | 116,219,776,271,496,400,000,000,000,000,000,000,000 | 7 | SERVER-44377 generate correct plan for indexed inequalities to null |
double bson_iterator_double_raw( const bson_iterator *i ) {
double out;
bson_little_endian64( &out, bson_iterator_value( i ) );
return out;
} | 0 | [
"CWE-190"
]
| mongo-c-driver-legacy | 1a1f5e26a4309480d88598913f9eebf9e9cba8ca | 239,353,860,516,770,750,000,000,000,000,000,000,000 | 5 | don't mix up int and size_t (first pass to fix that) |
void smtp_server_connection_set_proxy_data(
struct smtp_server_connection *conn,
const struct smtp_proxy_data *proxy_data)
{
if (proxy_data->source_ip.family != 0)
conn->conn.remote_ip = proxy_data->source_ip;
if (proxy_data->source_port != 0)
conn->conn.remote_port = proxy_data->source_port;
if (proxy_data->helo != NULL) {
i_free(conn->helo_domain);
conn->helo_domain = i_strdup(proxy_data->helo);
conn->helo.domain = conn->helo_domain;
conn->helo.domain_valid = TRUE;
}
if (proxy_data->login != NULL) {
i_free(conn->username);
conn->username = i_strdup(proxy_data->login);
}
if (proxy_data->proto != SMTP_PROXY_PROTOCOL_UNKNOWN)
conn->proxy_proto = proxy_data->proto;
if (proxy_data->ttl_plus_1 > 0)
conn->proxy_ttl_plus_1 = proxy_data->ttl_plus_1;
if (conn->proxy_timeout_secs > 0)
conn->proxy_timeout_secs = proxy_data->timeout_secs;
if (conn->callbacks != NULL &&
conn->callbacks->conn_proxy_data_updated != NULL) {
struct smtp_proxy_data full_data;
i_zero(&full_data);
full_data.source_ip = conn->conn.remote_ip;
full_data.source_port = conn->conn.remote_port;
full_data.helo = conn->helo.domain;
full_data.login = conn->username;
full_data.proto = conn->proxy_proto;
full_data.ttl_plus_1 = conn->proxy_ttl_plus_1;
full_data.timeout_secs = conn->proxy_timeout_secs;
conn->callbacks->
conn_proxy_data_updated(conn->context, &full_data);
}
} | 0 | [
"CWE-77"
]
| core | 321c339756f9b2b98fb7326359d1333adebb5295 | 118,731,360,157,125,880,000,000,000,000,000,000,000 | 43 | lib-smtp: smtp-server-connection - Fix STARTTLS command injection vulnerability.
The input handler kept reading more commands even though the input was locked by
the STARTTLS command, thereby causing it to read the command pipelined beyond
STARTTLS. This causes a STARTTLS command injection vulerability. |
static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
struct usb_port *port_dev)
{
struct usb_device *udev = port_dev->child;
int ret;
if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
ret = hub_set_port_link_state(hub, port_dev->portnum,
USB_SS_PORT_LS_U0);
if (!ret) {
msleep(USB_RESUME_TIMEOUT);
ret = usb_disable_remote_wakeup(udev);
}
if (ret)
dev_warn(&udev->dev,
"Port disable: can't disable remote wake\n");
udev->do_remote_wakeup = 0;
}
} | 0 | [
"CWE-400",
"CWE-703"
]
| linux | 704620afc70cf47abb9d6a1a57f3825d2bca49cf | 287,113,243,575,934,940,000,000,000,000,000,000,000 | 19 | USB: check usb_get_extra_descriptor for proper size
When reading an extra descriptor, we need to properly check the minimum
and maximum size allowed, to prevent from invalid data being sent by a
device.
Reported-by: Hui Peng <[email protected]>
Reported-by: Mathias Payer <[email protected]>
Co-developed-by: Linus Torvalds <[email protected]>
Signed-off-by: Hui Peng <[email protected]>
Signed-off-by: Mathias Payer <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
dns_zone_setstatistics(dns_zone_t *zone, bool on) {
/*
* This function is obsoleted.
*/
UNUSED(zone);
UNUSED(on);
return (ISC_R_NOTIMPLEMENTED);
} | 0 | [
"CWE-327"
]
| bind9 | f09352d20a9d360e50683cd1d2fc52ccedcd77a0 | 50,251,077,408,978,395,000,000,000,000,000,000,000 | 8 | Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key. |
static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 logical)
{
u64 length;
u64 stripe_len;
u16 num_stripes;
u16 sub_stripes;
u64 type;
u64 features;
bool mixed = false;
length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
type = btrfs_chunk_type(leaf, chunk);
if (!num_stripes) {
btrfs_err(fs_info, "invalid chunk num_stripes: %u",
num_stripes);
return -EIO;
}
if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
btrfs_err(fs_info, "invalid chunk logical %llu", logical);
return -EIO;
}
if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
btrfs_err(fs_info, "invalid chunk sectorsize %u",
btrfs_chunk_sector_size(leaf, chunk));
return -EIO;
}
if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
btrfs_err(fs_info, "invalid chunk length %llu", length);
return -EIO;
}
if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
btrfs_err(fs_info, "invalid chunk stripe length: %llu",
stripe_len);
return -EIO;
}
if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
type) {
btrfs_err(fs_info, "unrecognized chunk type: %llu",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk));
return -EIO;
}
if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
return -EIO;
}
if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
(type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
btrfs_err(fs_info,
"system chunk with data or metadata type: 0x%llx", type);
return -EIO;
}
features = btrfs_super_incompat_flags(fs_info->super_copy);
if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
mixed = true;
if (!mixed) {
if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
(type & BTRFS_BLOCK_GROUP_DATA)) {
btrfs_err(fs_info,
"mixed chunk type in non-mixed mode: 0x%llx", type);
return -EIO;
}
}
if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
(type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
(type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
(type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
num_stripes != 1)) {
btrfs_err(fs_info,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EIO;
}
return 0;
} | 0 | [
"CWE-476",
"CWE-284"
]
| linux | 09ba3bc9dd150457c506e4661380a6183af651c1 | 41,097,486,704,489,240,000,000,000,000,000,000,000 | 91 | btrfs: merge btrfs_find_device and find_device
Both btrfs_find_device() and find_device() does the same thing except
that the latter does not take the seed device onto account in the device
scanning context. We can merge them.
Signed-off-by: Anand Jain <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]> |
OFCondition DcmSCP::processAssociationRQ()
{
DcmSCPActionType desiredAction = DCMSCP_ACTION_UNDEFINED;
if ( (m_assoc == NULL) || (m_assoc->params == NULL) )
return ASC_NULLKEY;
// call notifier function
notifyAssociationRequest(*m_assoc->params, desiredAction);
if (desiredAction != DCMSCP_ACTION_UNDEFINED)
{
if (desiredAction == DCMSCP_ACTION_REFUSE_ASSOCIATION)
{
refuseAssociation( DCMSCP_INTERNAL_ERROR );
dropAndDestroyAssociation();
return EC_Normal;
}
else desiredAction = DCMSCP_ACTION_UNDEFINED; // reset for later use
}
// Now we have to figure out if we might have to refuse the association request.
// This is the case if at least one of five conditions is met:
// Condition 1: if option "--refuse" is set we want to refuse the association request.
if( m_cfg->getRefuseAssociation() )
{
refuseAssociation( DCMSCP_FORCED );
dropAndDestroyAssociation();
return EC_Normal;
}
// Condition 2: determine the application context name. If an error occurred or if the
// application context name is not supported we want to refuse the association request.
char buf[BUFSIZ];
OFCondition cond = ASC_getApplicationContextName( m_assoc->params, buf );
if( cond.bad() || strcmp( buf, DICOM_STDAPPLICATIONCONTEXT ) != 0 )
{
refuseAssociation( DCMSCP_BAD_APPLICATION_CONTEXT_NAME );
dropAndDestroyAssociation();
return EC_Normal;
}
// Condition 3: if the calling or called application entity title is not supported
// we want to refuse the association request
if (!checkCalledAETitleAccepted(m_assoc->params->DULparams.calledAPTitle))
{
refuseAssociation( DCMSCP_CALLED_AE_TITLE_NOT_RECOGNIZED );
dropAndDestroyAssociation();
return EC_Normal;
}
if (!checkCallingAETitleAccepted(m_assoc->params->DULparams.callingAPTitle))
{
refuseAssociation( DCMSCP_CALLING_AE_TITLE_NOT_RECOGNIZED );
dropAndDestroyAssociation();
return EC_Normal;
}
/* set our application entity title */
if (m_cfg->getRespondWithCalledAETitle())
ASC_setAPTitles(m_assoc->params, NULL, NULL, m_assoc->params->DULparams.calledAPTitle);
else
ASC_setAPTitles(m_assoc->params, NULL, NULL, m_cfg->getAETitle().c_str());
/* If we get to this point the association shall be negotiated.
Thus, for every presentation context it is checked whether
it can be accepted. However, this is only a "dry" run, i.e.
there is not yet sent a response message to the SCU
*/
cond = negotiateAssociation();
if( cond.bad() )
{
dropAndDestroyAssociation();
return EC_Normal;
}
// Reject association if no presentation context was negotiated
if( ASC_countAcceptedPresentationContexts( m_assoc->params ) == 0 )
{
// Dump some debug information
OFString tempStr;
DCMNET_INFO("No Acceptable Presentation Contexts");
if (m_cfg->getVerbosePCMode())
DCMNET_INFO(ASC_dumpParameters(tempStr, m_assoc->params, ASC_ASSOC_RJ));
else
DCMNET_DEBUG(ASC_dumpParameters(tempStr, m_assoc->params, ASC_ASSOC_RJ));
refuseAssociation( DCMSCP_NO_PRESENTATION_CONTEXTS );
dropAndDestroyAssociation();
return EC_Normal;
}
// If the negotiation was successful, accept the association request
cond = ASC_acknowledgeAssociation( m_assoc );
if( cond.bad() )
{
dropAndDestroyAssociation();
return EC_Normal;
}
notifyAssociationAcknowledge();
// Dump some debug information
OFString tempStr;
DCMNET_INFO("Association Acknowledged (Max Send PDV: " << OFstatic_cast(Uint32, m_assoc->sendPDVLength) << ")");
if (m_cfg->getVerbosePCMode())
DCMNET_INFO(ASC_dumpParameters(tempStr, m_assoc->params, ASC_ASSOC_AC));
else
DCMNET_DEBUG(ASC_dumpParameters(tempStr, m_assoc->params, ASC_ASSOC_AC));
// Go ahead and handle the association (i.e. handle the callers requests) in this process
handleAssociation();
return EC_Normal;
} | 0 | [
"CWE-264"
]
| dcmtk | beaf5a5c24101daeeafa48c375120b16197c9e95 | 99,474,505,736,081,460,000,000,000,000,000,000,000 | 111 | Make sure to handle setuid() return code properly.
In some tools the return value of setuid() is not checked. In the worst
case this could lead to privilege escalation since the process does not
give up its root privileges and continue as root. |
searchObjectForKeyRec(const char *searchKey, CtxJson *ctx,
ParseCtx *parseCtx, size_t *resultIndex, UA_UInt16 depth) {
UA_StatusCode ret = UA_STATUSCODE_BADNOTFOUND;
CHECK_TOKEN_BOUNDS;
if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) {
size_t objectCount = (size_t)parseCtx->tokenArray[parseCtx->index].size;
parseCtx->index++; /*Object to first Key*/
for(size_t i = 0; i < objectCount; i++) {
CHECK_TOKEN_BOUNDS;
if(depth == 0) { /* we search only on first layer */
if(jsoneq((char*)ctx->pos, &parseCtx->tokenArray[parseCtx->index], searchKey) == 0) {
/*found*/
parseCtx->index++; /*We give back a pointer to the value of the searched key!*/
if (parseCtx->index >= parseCtx->tokenCount)
/* We got invalid json. See https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=14620 */
return UA_STATUSCODE_BADOUTOFRANGE;
*resultIndex = parseCtx->index;
return UA_STATUSCODE_GOOD;
}
}
parseCtx->index++; /* value */
CHECK_TOKEN_BOUNDS;
if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) {
ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex,
(UA_UInt16)(depth + 1));
} else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) {
ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex,
(UA_UInt16)(depth + 1));
} else {
/* Only Primitive or string */
parseCtx->index++;
}
}
} else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) {
size_t arraySize = (size_t)parseCtx->tokenArray[parseCtx->index].size;
parseCtx->index++; /*Object to first element*/
for(size_t i = 0; i < arraySize; i++) {
CHECK_TOKEN_BOUNDS;
if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) {
ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex,
(UA_UInt16)(depth + 1));
} else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) {
ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex,
(UA_UInt16)(depth + 1));
} else {
/* Only Primitive or string */
parseCtx->index++;
}
}
}
return ret;
} | 0 | [
"CWE-703",
"CWE-787"
]
| open62541 | c800e2987b10bb3af6ef644b515b5d6392f8861d | 284,081,130,141,865,740,000,000,000,000,000,000,000 | 58 | fix(json): Check max recursion depth in more places |
static int atomic_open(struct nameidata *nd, struct dentry *dentry,
struct path *path, struct file *file,
const struct open_flags *op,
bool got_write, bool need_lookup,
int *opened)
{
struct inode *dir = nd->path.dentry->d_inode;
unsigned open_flag = open_to_namei_flags(op->open_flag);
umode_t mode;
int error;
int acc_mode;
int create_error = 0;
struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
bool excl;
BUG_ON(dentry->d_inode);
/* Don't create child dentry for a dead directory. */
if (unlikely(IS_DEADDIR(dir))) {
error = -ENOENT;
goto out;
}
mode = op->mode;
if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
mode &= ~current_umask();
excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT);
if (excl)
open_flag &= ~O_TRUNC;
/*
* Checking write permission is tricky, bacuse we don't know if we are
* going to actually need it: O_CREAT opens should work as long as the
* file exists. But checking existence breaks atomicity. The trick is
* to check access and if not granted clear O_CREAT from the flags.
*
* Another problem is returing the "right" error value (e.g. for an
* O_EXCL open we want to return EEXIST not EROFS).
*/
if (((open_flag & (O_CREAT | O_TRUNC)) ||
(open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) {
if (!(open_flag & O_CREAT)) {
/*
* No O_CREATE -> atomicity not a requirement -> fall
* back to lookup + open
*/
goto no_open;
} else if (open_flag & (O_EXCL | O_TRUNC)) {
/* Fall back and fail with the right error */
create_error = -EROFS;
goto no_open;
} else {
/* No side effects, safe to clear O_CREAT */
create_error = -EROFS;
open_flag &= ~O_CREAT;
}
}
if (open_flag & O_CREAT) {
error = may_o_create(&nd->path, dentry, mode);
if (error) {
create_error = error;
if (open_flag & O_EXCL)
goto no_open;
open_flag &= ~O_CREAT;
}
}
if (nd->flags & LOOKUP_DIRECTORY)
open_flag |= O_DIRECTORY;
file->f_path.dentry = DENTRY_NOT_SET;
file->f_path.mnt = nd->path.mnt;
error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode,
opened);
if (error < 0) {
if (create_error && error == -ENOENT)
error = create_error;
goto out;
}
if (error) { /* returned 1, that is */
if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
error = -EIO;
goto out;
}
if (file->f_path.dentry) {
dput(dentry);
dentry = file->f_path.dentry;
}
if (*opened & FILE_CREATED)
fsnotify_create(dir, dentry);
if (!dentry->d_inode) {
WARN_ON(*opened & FILE_CREATED);
if (create_error) {
error = create_error;
goto out;
}
} else {
if (excl && !(*opened & FILE_CREATED)) {
error = -EEXIST;
goto out;
}
}
goto looked_up;
}
/*
* We didn't have the inode before the open, so check open permission
* here.
*/
acc_mode = op->acc_mode;
if (*opened & FILE_CREATED) {
WARN_ON(!(open_flag & O_CREAT));
fsnotify_create(dir, dentry);
acc_mode = MAY_OPEN;
}
error = may_open(&file->f_path, acc_mode, open_flag);
if (error)
fput(file);
out:
dput(dentry);
return error;
no_open:
if (need_lookup) {
dentry = lookup_real(dir, dentry, nd->flags);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (create_error) {
int open_flag = op->open_flag;
error = create_error;
if ((open_flag & O_EXCL)) {
if (!dentry->d_inode)
goto out;
} else if (!dentry->d_inode) {
goto out;
} else if ((open_flag & O_TRUNC) &&
S_ISREG(dentry->d_inode->i_mode)) {
goto out;
}
/* will fail later, go on to get the right error */
}
}
looked_up:
path->dentry = dentry;
path->mnt = nd->path.mnt;
return 1;
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | 23adbe12ef7d3d4195e80800ab36b37bee28cd03 | 59,933,578,640,933,080,000,000,000,000,000,000,000 | 153 | fs,userns: Change inode_capable to capable_wrt_inode_uidgid
The kernel has no concept of capabilities with respect to inodes; inodes
exist independently of namespaces. For example, inode_capable(inode,
CAP_LINUX_IMMUTABLE) would be nonsense.
This patch changes inode_capable to check for uid and gid mappings and
renames it to capable_wrt_inode_uidgid, which should make it more
obvious what it does.
Fixes CVE-2014-4014.
Cc: Theodore Ts'o <[email protected]>
Cc: Serge Hallyn <[email protected]>
Cc: "Eric W. Biederman" <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: [email protected]
Signed-off-by: Andy Lutomirski <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
zip_read_data_zipx_ppmd(struct archive_read *a, const void **buff,
size_t *size, int64_t *offset)
{
struct zip* zip = (struct zip *)(a->format->data);
int ret;
size_t consumed_bytes = 0;
ssize_t bytes_avail = 0;
(void) offset; /* UNUSED */
/* If we're here for the first time, initialize Ppmd8 decompression
* context first. */
if(!zip->decompress_init) {
ret = zipx_ppmd8_init(a, zip);
if(ret != ARCHIVE_OK)
return ret;
}
/* Fetch for more data. We're reading 1 byte here, but libarchive should
* prefetch more bytes. */
(void) __archive_read_ahead(a, 1, &bytes_avail);
if(bytes_avail < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated PPMd8 file body");
return (ARCHIVE_FATAL);
}
/* This counter will be updated inside ppmd_read(), which at one
* point will be called by Ppmd8_DecodeSymbol. */
zip->zipx_ppmd_read_compressed = 0;
/* Decompression loop. */
do {
int sym = __archive_ppmd8_functions.Ppmd8_DecodeSymbol(&zip->ppmd8);
if(sym < 0) {
zip->end_of_entry = 1;
break;
}
/* This field is set by ppmd_read() when there was no more data
* to be read. */
if(zip->ppmd8_stream_failed) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated PPMd8 file body");
return (ARCHIVE_FATAL);
}
zip->uncompressed_buffer[consumed_bytes] = (uint8_t) sym;
++consumed_bytes;
} while(consumed_bytes < zip->uncompressed_buffer_size);
/* Update pointers for libarchive. */
*buff = zip->uncompressed_buffer;
*size = consumed_bytes;
/* Update pointers so we can continue decompression in another call. */
zip->entry_bytes_remaining -= zip->zipx_ppmd_read_compressed;
zip->entry_compressed_bytes_read += zip->zipx_ppmd_read_compressed;
zip->entry_uncompressed_bytes_read += consumed_bytes;
/* If we're at the end of stream, deinitialize Ppmd8 context. */
if(zip->end_of_entry) {
__archive_ppmd8_functions.Ppmd8_Free(&zip->ppmd8);
zip->ppmd8_valid = 0;
}
/* Seek for optional marker, same way as in each zip entry. */
ret = consume_optional_marker(a, zip);
if (ret != ARCHIVE_OK)
return ret;
return ARCHIVE_OK;
} | 0 | [
"CWE-399",
"CWE-401"
]
| libarchive | ba641f73f3d758d9032b3f0e5597a9c6e593a505 | 70,009,463,417,781,365,000,000,000,000,000,000,000 | 73 | Fix typo in preprocessor macro in archive_read_format_zip_cleanup()
Frees lzma_stream on cleanup()
Fixes #1165 |
static bool add_value_to_attrib(TALLOC_CTX *mem_ctx, struct ldb_val *value,
struct ldb_message_element *attrib)
{
attrib->values = talloc_realloc(mem_ctx,
attrib->values,
DATA_BLOB,
attrib->num_values+1);
if (attrib->values == NULL)
return false;
attrib->values[attrib->num_values].data = talloc_steal(attrib->values,
value->data);
attrib->values[attrib->num_values].length = value->length;
attrib->num_values += 1;
return true;
} | 0 | [
"CWE-399"
]
| samba | 530d50a1abdcdf4d1775652d4c456c1274d83d8d | 124,322,705,354,239,300,000,000,000,000,000,000,000 | 16 | CVE-2015-7540: s4: libcli: ldap message - Ensure all asn1_XX returns are checked.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Ronnie Sahlberg <[email protected]>
Autobuild-User(master): Jeremy Allison <[email protected]>
Autobuild-Date(master): Fri Sep 26 03:15:00 CEST 2014 on sn-devel-104
(cherry picked from commit 69a7e3cfdc8dbba9c8dcfdfae82d2894c7247e15) |
xmlSAXParseFileWithData(xmlSAXHandlerPtr sax, const char *filename,
int recovery, void *data) {
xmlDocPtr ret;
xmlParserCtxtPtr ctxt;
xmlInitParser();
ctxt = xmlCreateFileParserCtxt(filename);
if (ctxt == NULL) {
return(NULL);
}
if (sax != NULL) {
if (ctxt->sax != NULL)
xmlFree(ctxt->sax);
ctxt->sax = sax;
}
xmlDetectSAX2(ctxt);
if (data!=NULL) {
ctxt->_private = data;
}
if (ctxt->directory == NULL)
ctxt->directory = xmlParserGetDirectory(filename);
ctxt->recovery = recovery;
xmlParseDocument(ctxt);
if ((ctxt->wellFormed) || recovery) {
ret = ctxt->myDoc;
if (ret != NULL) {
if (ctxt->input->buf->compressed > 0)
ret->compression = 9;
else
ret->compression = ctxt->input->buf->compressed;
}
}
else {
ret = NULL;
xmlFreeDoc(ctxt->myDoc);
ctxt->myDoc = NULL;
}
if (sax != NULL)
ctxt->sax = NULL;
xmlFreeParserCtxt(ctxt);
return(ret);
} | 0 | [
"CWE-119"
]
| libxml2 | 6a36fbe3b3e001a8a840b5c1fdd81cefc9947f0d | 49,986,219,021,808,400,000,000,000,000,000,000,000 | 48 | Fix potential out of bound access |
int ssl_parse_clienthello_tlsext(SSL *s, unsigned char **p, unsigned char *d, int n, int *al)
{
unsigned short type;
unsigned short size;
unsigned short len;
unsigned char *data = *p;
int renegotiate_seen = 0;
s->servername_done = 0;
s->tlsext_status_type = -1;
if (data >= (d+n-2))
goto ri_check;
n2s(data,len);
if (data > (d+n-len))
goto ri_check;
while (data <= (d+n-4))
{
n2s(data,type);
n2s(data,size);
if (data+size > (d+n))
goto ri_check;
#if 0
fprintf(stderr,"Received extension type %d size %d\n",type,size);
#endif
if (s->tlsext_debug_cb)
s->tlsext_debug_cb(s, 0, type, data, size,
s->tlsext_debug_arg);
/* The servername extension is treated as follows:
- Only the hostname type is supported with a maximum length of 255.
- The servername is rejected if too long or if it contains zeros,
in which case an fatal alert is generated.
- The servername field is maintained together with the session cache.
- When a session is resumed, the servername call back invoked in order
to allow the application to position itself to the right context.
- The servername is acknowledged if it is new for a session or when
it is identical to a previously used for the same session.
Applications can control the behaviour. They can at any time
set a 'desirable' servername for a new SSL object. This can be the
case for example with HTTPS when a Host: header field is received and
a renegotiation is requested. In this case, a possible servername
presented in the new client hello is only acknowledged if it matches
the value of the Host: field.
- Applications must use SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
if they provide for changing an explicit servername context for the session,
i.e. when the session has been established with a servername extension.
- On session reconnect, the servername extension may be absent.
*/
if (type == TLSEXT_TYPE_server_name)
{
unsigned char *sdata;
int servname_type;
int dsize;
if (size < 2)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
n2s(data,dsize);
size -= 2;
if (dsize > size )
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
sdata = data;
while (dsize > 3)
{
servname_type = *(sdata++);
n2s(sdata,len);
dsize -= 3;
if (len > dsize)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
if (s->servername_done == 0)
switch (servname_type)
{
case TLSEXT_NAMETYPE_host_name:
if (s->session->tlsext_hostname == NULL)
{
if (len > TLSEXT_MAXLEN_host_name ||
((s->session->tlsext_hostname = OPENSSL_malloc(len+1)) == NULL))
{
*al = TLS1_AD_UNRECOGNIZED_NAME;
return 0;
}
memcpy(s->session->tlsext_hostname, sdata, len);
s->session->tlsext_hostname[len]='\0';
if (strlen(s->session->tlsext_hostname) != len) {
OPENSSL_free(s->session->tlsext_hostname);
s->session->tlsext_hostname = NULL;
*al = TLS1_AD_UNRECOGNIZED_NAME;
return 0;
}
s->servername_done = 1;
}
else
s->servername_done = strlen(s->session->tlsext_hostname) == len
&& strncmp(s->session->tlsext_hostname, (char *)sdata, len) == 0;
break;
default:
break;
}
dsize -= len;
}
if (dsize != 0)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
}
#ifndef OPENSSL_NO_EC
else if (type == TLSEXT_TYPE_ec_point_formats &&
s->version != DTLS1_VERSION)
{
unsigned char *sdata = data;
int ecpointformatlist_length = *(sdata++);
if (ecpointformatlist_length != size - 1)
{
*al = TLS1_AD_DECODE_ERROR;
return 0;
}
s->session->tlsext_ecpointformatlist_length = 0;
if (s->session->tlsext_ecpointformatlist != NULL) OPENSSL_free(s->session->tlsext_ecpointformatlist);
if ((s->session->tlsext_ecpointformatlist = OPENSSL_malloc(ecpointformatlist_length)) == NULL)
{
*al = TLS1_AD_INTERNAL_ERROR;
return 0;
}
s->session->tlsext_ecpointformatlist_length = ecpointformatlist_length;
memcpy(s->session->tlsext_ecpointformatlist, sdata, ecpointformatlist_length);
#if 0
fprintf(stderr,"ssl_parse_clienthello_tlsext s->session->tlsext_ecpointformatlist (length=%i) ", s->session->tlsext_ecpointformatlist_length);
sdata = s->session->tlsext_ecpointformatlist;
for (i = 0; i < s->session->tlsext_ecpointformatlist_length; i++)
fprintf(stderr,"%i ",*(sdata++));
fprintf(stderr,"\n");
#endif
}
else if (type == TLSEXT_TYPE_elliptic_curves &&
s->version != DTLS1_VERSION)
{
unsigned char *sdata = data;
int ellipticcurvelist_length = (*(sdata++) << 8);
ellipticcurvelist_length += (*(sdata++));
if (ellipticcurvelist_length != size - 2)
{
*al = TLS1_AD_DECODE_ERROR;
return 0;
}
s->session->tlsext_ellipticcurvelist_length = 0;
if (s->session->tlsext_ellipticcurvelist != NULL) OPENSSL_free(s->session->tlsext_ellipticcurvelist);
if ((s->session->tlsext_ellipticcurvelist = OPENSSL_malloc(ellipticcurvelist_length)) == NULL)
{
*al = TLS1_AD_INTERNAL_ERROR;
return 0;
}
s->session->tlsext_ellipticcurvelist_length = ellipticcurvelist_length;
memcpy(s->session->tlsext_ellipticcurvelist, sdata, ellipticcurvelist_length);
#if 0
fprintf(stderr,"ssl_parse_clienthello_tlsext s->session->tlsext_ellipticcurvelist (length=%i) ", s->session->tlsext_ellipticcurvelist_length);
sdata = s->session->tlsext_ellipticcurvelist;
for (i = 0; i < s->session->tlsext_ellipticcurvelist_length; i++)
fprintf(stderr,"%i ",*(sdata++));
fprintf(stderr,"\n");
#endif
}
#endif /* OPENSSL_NO_EC */
#ifdef TLSEXT_TYPE_opaque_prf_input
else if (type == TLSEXT_TYPE_opaque_prf_input &&
s->version != DTLS1_VERSION)
{
unsigned char *sdata = data;
if (size < 2)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
n2s(sdata, s->s3->client_opaque_prf_input_len);
if (s->s3->client_opaque_prf_input_len != size - 2)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
if (s->s3->client_opaque_prf_input != NULL) /* shouldn't really happen */
OPENSSL_free(s->s3->client_opaque_prf_input);
if (s->s3->client_opaque_prf_input_len == 0)
s->s3->client_opaque_prf_input = OPENSSL_malloc(1); /* dummy byte just to get non-NULL */
else
s->s3->client_opaque_prf_input = BUF_memdup(sdata, s->s3->client_opaque_prf_input_len);
if (s->s3->client_opaque_prf_input == NULL)
{
*al = TLS1_AD_INTERNAL_ERROR;
return 0;
}
}
#endif
else if (type == TLSEXT_TYPE_session_ticket)
{
if (s->tls_session_ticket_ext_cb &&
!s->tls_session_ticket_ext_cb(s, data, size, s->tls_session_ticket_ext_cb_arg))
{
*al = TLS1_AD_INTERNAL_ERROR;
return 0;
}
}
else if (type == TLSEXT_TYPE_renegotiate)
{
if(!ssl_parse_clienthello_renegotiate_ext(s, data, size, al))
return 0;
renegotiate_seen = 1;
}
else if (type == TLSEXT_TYPE_status_request &&
s->version != DTLS1_VERSION && s->ctx->tlsext_status_cb)
{
if (size < 5)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
s->tlsext_status_type = *data++;
size--;
if (s->tlsext_status_type == TLSEXT_STATUSTYPE_ocsp)
{
const unsigned char *sdata;
int dsize;
/* Read in responder_id_list */
n2s(data,dsize);
size -= 2;
if (dsize > size )
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
while (dsize > 0)
{
OCSP_RESPID *id;
int idsize;
if (dsize < 4)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
n2s(data, idsize);
dsize -= 2 + idsize;
if (dsize < 0)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
sdata = data;
data += idsize;
id = d2i_OCSP_RESPID(NULL,
&sdata, idsize);
if (!id)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
if (data != sdata)
{
OCSP_RESPID_free(id);
*al = SSL_AD_DECODE_ERROR;
return 0;
}
if (!s->tlsext_ocsp_ids
&& !(s->tlsext_ocsp_ids =
sk_OCSP_RESPID_new_null()))
{
OCSP_RESPID_free(id);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
}
if (!sk_OCSP_RESPID_push(
s->tlsext_ocsp_ids, id))
{
OCSP_RESPID_free(id);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
}
}
/* Read in request_extensions */
n2s(data,dsize);
size -= 2;
if (dsize > size)
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
sdata = data;
if (dsize > 0)
{
s->tlsext_ocsp_exts =
d2i_X509_EXTENSIONS(NULL,
&sdata, dsize);
if (!s->tlsext_ocsp_exts
|| (data + dsize != sdata))
{
*al = SSL_AD_DECODE_ERROR;
return 0;
}
}
}
/* We don't know what to do with any other type
* so ignore it.
*/
else
s->tlsext_status_type = -1;
}
/* session ticket processed earlier */
data+=size;
}
*p = data;
ri_check:
/* Need RI if renegotiating */
if (!renegotiate_seen && s->new_session &&
!(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION))
{
*al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL_PARSE_CLIENTHELLO_TLSEXT,
SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED);
return 0;
}
return 1;
} | 1 | []
| openssl | ee2ffc279417f15fef3b1073c7dc81a908991516 | 250,510,687,265,667,030,000,000,000,000,000,000,000 | 355 | Add Next Protocol Negotiation. |
static void ExportBGROQuantum(const Image *image,QuantumInfo *quantum_info,
const MagickSizeType number_pixels,const Quantum *magick_restrict p,
unsigned char *magick_restrict q,ExceptionInfo *exception)
{
QuantumAny
range;
ssize_t
x;
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
switch (quantum_info->depth)
{
case 8:
{
unsigned char
pixel;
for (x=0; x < (ssize_t) number_pixels; x++)
{
pixel=ScaleQuantumToChar(GetPixelBlue(image,p));
q=PopCharPixel(pixel,q);
pixel=ScaleQuantumToChar(GetPixelGreen(image,p));
q=PopCharPixel(pixel,q);
pixel=ScaleQuantumToChar(GetPixelRed(image,p));
q=PopCharPixel(pixel,q);
pixel=ScaleQuantumToChar(GetPixelOpacity(image,p));
q=PopCharPixel(pixel,q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
case 10:
{
unsigned int
pixel;
range=GetQuantumRange(quantum_info->depth);
if (quantum_info->pack == MagickFalse)
{
ssize_t
i;
size_t
quantum;
ssize_t
n;
n=0;
quantum=0;
pixel=0;
for (x=0; x < (ssize_t) number_pixels; x++)
{
for (i=0; i < 4; i++)
{
switch (i)
{
case 0: quantum=GetPixelRed(image,p); break;
case 1: quantum=GetPixelGreen(image,p); break;
case 2: quantum=GetPixelBlue(image,p); break;
case 3: quantum=GetPixelOpacity(image,p); break;
}
switch (n % 3)
{
case 0:
{
pixel|=(size_t) (ScaleQuantumToAny((Quantum) quantum,
range) << 22);
break;
}
case 1:
{
pixel|=(size_t) (ScaleQuantumToAny((Quantum) quantum,
range) << 12);
break;
}
case 2:
{
pixel|=(size_t) (ScaleQuantumToAny((Quantum) quantum,
range) << 2);
q=PopLongPixel(quantum_info->endian,pixel,q);
pixel=0;
break;
}
}
n++;
}
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
if (quantum_info->quantum == 32UL)
{
for (x=0; x < (ssize_t) number_pixels; x++)
{
pixel=(unsigned int) ScaleQuantumToAny(GetPixelRed(image,p),range);
q=PopQuantumLongPixel(quantum_info,pixel,q);
pixel=(unsigned int) ScaleQuantumToAny(GetPixelGreen(image,p),
range);
q=PopQuantumLongPixel(quantum_info,pixel,q);
pixel=(unsigned int) ScaleQuantumToAny(GetPixelBlue(image,p),range);
q=PopQuantumLongPixel(quantum_info,pixel,q);
pixel=(unsigned int) ScaleQuantumToAny(GetPixelOpacity(image,p),
range);
q=PopQuantumLongPixel(quantum_info,pixel,q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
for (x=0; x < (ssize_t) number_pixels; x++)
{
pixel=(unsigned int) ScaleQuantumToAny(GetPixelRed(image,p),range);
q=PopQuantumPixel(quantum_info,pixel,q);
pixel=(unsigned int) ScaleQuantumToAny(GetPixelGreen(image,p),range);
q=PopQuantumPixel(quantum_info,pixel,q);
pixel=(unsigned int) ScaleQuantumToAny(GetPixelBlue(image,p),range);
q=PopQuantumPixel(quantum_info,pixel,q);
pixel=(unsigned int) ScaleQuantumToAny(GetPixelOpacity(image,p),range);
q=PopQuantumPixel(quantum_info,pixel,q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
case 16:
{
unsigned short
pixel;
if (quantum_info->format == FloatingPointQuantumFormat)
{
for (x=0; x < (ssize_t) number_pixels; x++)
{
pixel=SinglePrecisionToHalf(QuantumScale*GetPixelBlue(image,p));
q=PopShortPixel(quantum_info->endian,pixel,q);
pixel=SinglePrecisionToHalf(QuantumScale*GetPixelGreen(image,p));
q=PopShortPixel(quantum_info->endian,pixel,q);
pixel=SinglePrecisionToHalf(QuantumScale*GetPixelRed(image,p));
q=PopShortPixel(quantum_info->endian,pixel,q);
pixel=SinglePrecisionToHalf(QuantumScale*GetPixelOpacity(image,p));
q=PopShortPixel(quantum_info->endian,pixel,q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
for (x=0; x < (ssize_t) number_pixels; x++)
{
pixel=ScaleQuantumToShort(GetPixelBlue(image,p));
q=PopShortPixel(quantum_info->endian,pixel,q);
pixel=ScaleQuantumToShort(GetPixelGreen(image,p));
q=PopShortPixel(quantum_info->endian,pixel,q);
pixel=ScaleQuantumToShort(GetPixelRed(image,p));
q=PopShortPixel(quantum_info->endian,pixel,q);
pixel=ScaleQuantumToShort(GetPixelOpacity(image,p));
q=PopShortPixel(quantum_info->endian,pixel,q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
case 32:
{
unsigned int
pixel;
if (quantum_info->format == FloatingPointQuantumFormat)
{
for (x=0; x < (ssize_t) number_pixels; x++)
{
float
float_pixel;
q=PopFloatPixel(quantum_info,(float) GetPixelRed(image,p),q);
q=PopFloatPixel(quantum_info,(float) GetPixelGreen(image,p),q);
q=PopFloatPixel(quantum_info,(float) GetPixelBlue(image,p),q);
float_pixel=(float) GetPixelOpacity(image,p);
q=PopFloatPixel(quantum_info,float_pixel,q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
for (x=0; x < (ssize_t) number_pixels; x++)
{
pixel=ScaleQuantumToLong(GetPixelBlue(image,p));
q=PopLongPixel(quantum_info->endian,pixel,q);
pixel=ScaleQuantumToLong(GetPixelGreen(image,p));
q=PopLongPixel(quantum_info->endian,pixel,q);
pixel=ScaleQuantumToLong(GetPixelRed(image,p));
q=PopLongPixel(quantum_info->endian,pixel,q);
pixel=ScaleQuantumToLong(GetPixelOpacity(image,p));
q=PopLongPixel(quantum_info->endian,pixel,q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
case 64:
{
if (quantum_info->format == FloatingPointQuantumFormat)
{
double
pixel;
for (x=0; x < (ssize_t) number_pixels; x++)
{
q=PopDoublePixel(quantum_info,(double) GetPixelRed(image,p),q);
q=PopDoublePixel(quantum_info,(double) GetPixelGreen(image,p),q);
q=PopDoublePixel(quantum_info,(double) GetPixelBlue(image,p),q);
pixel=(double) GetPixelOpacity(image,p);
q=PopDoublePixel(quantum_info,pixel,q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
}
default:
{
range=GetQuantumRange(quantum_info->depth);
for (x=0; x < (ssize_t) number_pixels; x++)
{
q=PopQuantumPixel(quantum_info,ScaleQuantumToAny(GetPixelBlue(image,p),
range),q);
q=PopQuantumPixel(quantum_info,ScaleQuantumToAny(GetPixelGreen(image,p),
range),q);
q=PopQuantumPixel(quantum_info,ScaleQuantumToAny(GetPixelRed(image,p),
range),q);
q=PopQuantumPixel(quantum_info,ScaleQuantumToAny(GetPixelOpacity(image,p),
range),q);
p+=GetPixelChannels(image);
q+=quantum_info->pad;
}
break;
}
}
} | 0 | [
"CWE-190"
]
| ImageMagick | 5af1dffa4b6ab984b5f13d1e91c95760d75f12a6 | 197,232,712,694,246,830,000,000,000,000,000,000,000 | 243 | outside the range of representable values of type 'unsigned char' (#3083)
Co-authored-by: Zhang Xiaohui <[email protected]> |
CImg<Tfloat> get_blur_guided(const CImg<t>& guide, const float radius, const float regularization) const {
if (!is_sameXYZ(guide))
throw CImgArgumentException(_cimg_instance
"blur_guided(): Invalid size for specified guide image (%u,%u,%u,%u,%p).",
cimg_instance,
guide._width,guide._height,guide._depth,guide._spectrum,guide._data);
if (is_empty() || !radius) return *this;
const int _radius = radius>=0?(int)radius:(int)(-radius*cimg::max(_width,_height,_depth)/100);
float _regularization = regularization;
if (regularization<0) {
T edge_min, edge_max = guide.max_min(edge_min);
if (edge_min==edge_max) return *this;
_regularization = -regularization*(edge_max - edge_min)/100;
}
_regularization = std::max(_regularization,0.01f);
const unsigned int psize = (unsigned int)(1 + 2*_radius);
CImg<Tfloat>
mean_p = get_blur_box(psize,true),
mean_I = guide.get_blur_box(psize,true).resize(mean_p),
cov_Ip = get_mul(guide).blur_box(psize,true)-=mean_p.get_mul(mean_I),
var_I = guide.get_sqr().blur_box(psize,true)-=mean_I.get_sqr(),
&a = cov_Ip.div(var_I+=_regularization),
&b = mean_p-=a.get_mul(mean_I);
a.blur_box(psize,true);
b.blur_box(psize,true);
return a.mul(guide)+=b;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 245,589,363,981,706,330,000,000,000,000,000,000,000 | 27 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
R_API bool r_sys_tts(const char *txt, bool bg) {
int i;
r_return_val_if_fail (txt, false);
const char *says[] = {
"say", "termux-tts-speak", NULL
};
for (i = 0; says[i]; i++) {
char *sayPath = r_file_path (says[i]);
if (sayPath) {
char *line = r_str_replace (strdup (txt), "'", "\"", 1);
r_sys_cmdf ("\"%s\" '%s'%s", sayPath, line, bg? " &": "");
free (line);
free (sayPath);
return true;
}
}
return false;
} | 0 | [
"CWE-78"
]
| radare2 | 04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9 | 212,809,080,512,070,000,000,000,000,000,000,000,000 | 18 | Fix command injection on PDB download (#16966)
* Fix r_sys_mkdirp with absolute path on Windows
* Fix build with --with-openssl
* Use RBuffer in r_socket_http_answer()
* r_socket_http_answer: Fix read for big responses
* Implement r_str_escape_sh()
* Cleanup r_socket_connect() on Windows
* Fix socket being created without a protocol
* Fix socket connect with SSL ##socket
* Use select() in r_socket_ready()
* Fix read failing if received only protocol answer
* Fix double-free
* r_socket_http_get: Fail if req. SSL with no support
* Follow redirects in r_socket_http_answer()
* Fix r_socket_http_get result length with R2_CURL=1
* Also follow redirects
* Avoid using curl for downloading PDBs
* Use r_socket_http_get() on UNIXs
* Use WinINet API on Windows for r_socket_http_get()
* Fix command injection
* Fix r_sys_cmd_str_full output for binary data
* Validate GUID on PDB download
* Pass depth to socket_http_get_recursive()
* Remove 'r_' and '__' from static function names
* Fix is_valid_guid
* Fix for comments |
TIFFRGBAImageOK(TIFF* tif, char emsg[1024])
{
TIFFDirectory* td = &tif->tif_dir;
uint16 photometric;
int colorchannels;
if (!tif->tif_decodestatus) {
sprintf(emsg, "Sorry, requested compression method is not configured");
return (0);
}
switch (td->td_bitspersample) {
case 1:
case 2:
case 4:
case 8:
case 16:
break;
default:
sprintf(emsg, "Sorry, can not handle images with %d-bit samples",
td->td_bitspersample);
return (0);
}
if (td->td_sampleformat == SAMPLEFORMAT_IEEEFP) {
sprintf(emsg, "Sorry, can not handle images with IEEE floating-point samples");
return (0);
}
colorchannels = td->td_samplesperpixel - td->td_extrasamples;
if (!TIFFGetField(tif, TIFFTAG_PHOTOMETRIC, &photometric)) {
switch (colorchannels) {
case 1:
photometric = PHOTOMETRIC_MINISBLACK;
break;
case 3:
photometric = PHOTOMETRIC_RGB;
break;
default:
sprintf(emsg, "Missing needed %s tag", photoTag);
return (0);
}
}
switch (photometric) {
case PHOTOMETRIC_MINISWHITE:
case PHOTOMETRIC_MINISBLACK:
case PHOTOMETRIC_PALETTE:
if (td->td_planarconfig == PLANARCONFIG_CONTIG
&& td->td_samplesperpixel != 1
&& td->td_bitspersample < 8 ) {
sprintf(emsg,
"Sorry, can not handle contiguous data with %s=%d, "
"and %s=%d and Bits/Sample=%d",
photoTag, photometric,
"Samples/pixel", td->td_samplesperpixel,
td->td_bitspersample);
return (0);
}
/*
* We should likely validate that any extra samples are either
* to be ignored, or are alpha, and if alpha we should try to use
* them. But for now we won't bother with this.
*/
break;
case PHOTOMETRIC_YCBCR:
/*
* TODO: if at all meaningful and useful, make more complete
* support check here, or better still, refactor to let supporting
* code decide whether there is support and what meaningfull
* error to return
*/
break;
case PHOTOMETRIC_RGB:
if (colorchannels < 3) {
sprintf(emsg, "Sorry, can not handle RGB image with %s=%d",
"Color channels", colorchannels);
return (0);
}
break;
case PHOTOMETRIC_SEPARATED:
{
uint16 inkset;
TIFFGetFieldDefaulted(tif, TIFFTAG_INKSET, &inkset);
if (inkset != INKSET_CMYK) {
sprintf(emsg,
"Sorry, can not handle separated image with %s=%d",
"InkSet", inkset);
return 0;
}
if (td->td_samplesperpixel < 4) {
sprintf(emsg,
"Sorry, can not handle separated image with %s=%d",
"Samples/pixel", td->td_samplesperpixel);
return 0;
}
break;
}
case PHOTOMETRIC_LOGL:
if (td->td_compression != COMPRESSION_SGILOG) {
sprintf(emsg, "Sorry, LogL data must have %s=%d",
"Compression", COMPRESSION_SGILOG);
return (0);
}
break;
case PHOTOMETRIC_LOGLUV:
if (td->td_compression != COMPRESSION_SGILOG &&
td->td_compression != COMPRESSION_SGILOG24) {
sprintf(emsg, "Sorry, LogLuv data must have %s=%d or %d",
"Compression", COMPRESSION_SGILOG, COMPRESSION_SGILOG24);
return (0);
}
if (td->td_planarconfig != PLANARCONFIG_CONTIG) {
sprintf(emsg, "Sorry, can not handle LogLuv images with %s=%d",
"Planarconfiguration", td->td_planarconfig);
return (0);
}
if ( td->td_samplesperpixel != 3 || colorchannels != 3 ) {
sprintf(emsg,
"Sorry, can not handle image with %s=%d, %s=%d",
"Samples/pixel", td->td_samplesperpixel,
"colorchannels", colorchannels);
return 0;
}
break;
case PHOTOMETRIC_CIELAB:
if ( td->td_samplesperpixel != 3 || colorchannels != 3 || td->td_bitspersample != 8 ) {
sprintf(emsg,
"Sorry, can not handle image with %s=%d, %s=%d and %s=%d",
"Samples/pixel", td->td_samplesperpixel,
"colorchannels", colorchannels,
"Bits/sample", td->td_bitspersample);
return 0;
}
break;
default:
sprintf(emsg, "Sorry, can not handle image with %s=%d",
photoTag, photometric);
return (0);
}
return (1);
} | 0 | [
"CWE-20"
]
| libtiff | 48780b4fcc425cddc4ef8ffdf536f96a0d1b313b | 132,556,542,486,698,310,000,000,000,000,000,000,000 | 138 | * libtiff/tif_getimage.c: add explicit uint32 cast in putagreytile to
avoid UndefinedBehaviorSanitizer warning.
Patch by Nicolás Peña.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2658 |
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_map *em;
u64 len = PAGE_CACHE_SIZE;
/*
* hopefully we have this extent in the tree already, try without
* the full extent lock
*/
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
read_unlock(&em_tree->lock);
if (!em) {
/* get the big lock and read metadata off disk */
lock_extent(io_tree, start, start + len - 1);
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
unlock_extent(io_tree, start, start + len - 1);
if (IS_ERR(em))
return NULL;
}
return em;
} | 0 | [
"CWE-310"
]
| linux-2.6 | 9c52057c698fb96f8f07e7a4bcf4801a092bda89 | 225,286,477,771,727,750,000,000,000,000,000,000,000 | 27 | Btrfs: fix hash overflow handling
The handling for directory crc hash overflows was fairly obscure,
split_leaf returns EOVERFLOW when we try to extend the item and that is
supposed to bubble up to userland. For a while it did so, but along the
way we added better handling of errors and forced the FS readonly if we
hit IO errors during the directory insertion.
Along the way, we started testing only for EEXIST and the EOVERFLOW case
was dropped. The end result is that we may force the FS readonly if we
catch a directory hash bucket overflow.
This fixes a few problem spots. First I add tests for EOVERFLOW in the
places where we can safely just return the error up the chain.
btrfs_rename is harder though, because it tries to insert the new
directory item only after it has already unlinked anything the rename
was going to overwrite. Rather than adding very complex logic, I added
a helper to test for the hash overflow case early while it is still safe
to bail out.
Snapshot and subvolume creation had a similar problem, so they are using
the new helper now too.
Signed-off-by: Chris Mason <[email protected]>
Reported-by: Pascal Junod <[email protected]> |
CtPtr ProtocolV2::handle_auth_signature(ceph::bufferlist &payload)
{
ldout(cct, 20) << __func__
<< " payload.length()=" << payload.length() << dendl;
if (state != AUTH_ACCEPTING_SIGN && state != AUTH_CONNECTING_SIGN) {
lderr(cct) << __func__
<< " pre-auth verification signature seen in wrong state!"
<< dendl;
return _fault();
}
auto sig_frame = AuthSignatureFrame::Decode(payload);
const auto actual_tx_sig = auth_meta->session_key.empty() ?
sha256_digest_t() : auth_meta->session_key.hmac_sha256(cct, pre_auth.txbuf);
if (sig_frame.signature() != actual_tx_sig) {
ldout(cct, 2) << __func__ << " pre-auth signature mismatch"
<< " actual_tx_sig=" << actual_tx_sig
<< " sig_frame.signature()=" << sig_frame.signature()
<< dendl;
return _fault();
} else {
ldout(cct, 20) << __func__ << " pre-auth signature success"
<< " sig_frame.signature()=" << sig_frame.signature()
<< dendl;
pre_auth.txbuf.clear();
}
if (state == AUTH_ACCEPTING_SIGN) {
// server had sent AuthDone and client responded with correct pre-auth
// signature. we can start accepting new sessions/reconnects.
state = SESSION_ACCEPTING;
return CONTINUE(read_frame);
} else if (state == AUTH_CONNECTING_SIGN) {
// this happened at client side
return finish_client_auth();
} else {
ceph_assert_always("state corruption" == nullptr);
}
} | 0 | [
"CWE-323"
]
| ceph | 47c7e623546a7a33bd6bbddfb899fa9c9a40f40a | 119,019,157,302,476,100,000,000,000,000,000,000,000 | 41 | msg/async/ProtocolV2: avoid AES-GCM nonce reuse vulnerabilities
The secure mode uses AES-128-GCM with 96-bit nonces consisting of a
32-bit counter followed by a 64-bit salt. The counter is incremented
after processing each frame, the salt is fixed for the duration of
the session. Both are initialized from the session key generated
during session negotiation, so the counter starts with essentially
a random value. It is allowed to wrap, and, after 2**32 frames, it
repeats, resulting in nonce reuse (the actual sequence numbers that
the messenger works with are 64-bit, so the session continues on).
Because of how GCM works, this completely breaks both confidentiality
and integrity aspects of the secure mode. A single nonce reuse reveals
the XOR of two plaintexts and almost completely reveals the subkey
used for producing authentication tags. After a few nonces get used
twice, all confidentiality and integrity goes out the window and the
attacker can potentially encrypt-authenticate plaintext of their
choice.
We can't easily change the nonce format to extend the counter to
64 bits (and possibly XOR it with a longer salt). Instead, just
remember the initial nonce and cut the session before it repeats,
forcing renegotiation.
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Radoslaw Zarzynski <[email protected]>
Reviewed-by: Sage Weil <[email protected]>
Conflicts:
src/msg/async/ProtocolV2.cc [ context: commit 697aafa2aad2
("msg/async/ProtocolV2: remove unused parameter") not in
nautilus ]
src/msg/async/ProtocolV2.h [ context: commit ed3ec4c01d17
("msg: Build target 'common' without using namespace in
headers") not in nautilus ] |
ipp_get_code(const char *value, /* I - Locale/charset string */
char *buffer, /* I - String buffer */
size_t bufsize) /* I - Size of string buffer */
{
char *bufptr, /* Pointer into buffer */
*bufend; /* End of buffer */
/*
* Convert values to lowercase and change _ to - as needed...
*/
for (bufptr = buffer, bufend = buffer + bufsize - 1;
*value && bufptr < bufend;
value ++)
if (*value == '_')
*bufptr++ = '-';
else
*bufptr++ = (char)_cups_tolower(*value);
*bufptr = '\0';
/*
* Return the converted string...
*/
return (buffer);
} | 0 | [
"CWE-120"
]
| cups | f24e6cf6a39300ad0c3726a41a4aab51ad54c109 | 192,127,867,907,374,800,000,000,000,000,000,000,000 | 28 | Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929) |
CAMLexport double caml_deserialize_float_8(void)
{
double f;
caml_deserialize_block_float_8(&f, 1);
return f;
} | 0 | [
"CWE-200"
]
| ocaml | 659615c7b100a89eafe6253e7a5b9d84d0e8df74 | 163,712,281,246,871,190,000,000,000,000,000,000,000 | 6 | fix PR#7003 and a few other bugs caused by misuse of Int_val
git-svn-id: http://caml.inria.fr/svn/ocaml/trunk@16525 f963ae5c-01c2-4b8c-9fe0-0dff7051ff02 |
void CCrypto::GenerateHMAC256( const uint8 *pubData, uint32 cubData, const uint8 *pubKey, uint32 cubKey, SHA256Digest_t *pOutputDigest )
{
VPROF_BUDGET( "CCrypto::GenerateHMAC256", VPROF_BUDGETGROUP_ENCRYPTION );
Assert( pubData );
Assert( cubData > 0 );
Assert( pubKey );
Assert( cubKey > 0 );
Assert( pOutputDigest );
Assert( sizeof(*pOutputDigest) == crypto_auth_hmacsha256_BYTES );
Assert( cubKey == crypto_auth_hmacsha256_KEYBYTES );
crypto_auth_hmacsha256( *pOutputDigest, pubData, cubData, pubKey );
} | 0 | [
"CWE-787"
]
| GameNetworkingSockets | bea84e2844b647532a9b7fbc3a6a8989d66e49e3 | 87,019,270,122,276,600,000,000,000,000,000,000,000 | 14 | Check if output buffer is too small.
It really seems like libsodium (whose entire purpose is to make crypto
idiot-proof) making me mess with these details is a flaw in the API design.
Also, correct Hungarian. |
const T& min_max(t& max_val) const {
if (is_empty())
throw CImgInstanceException(_cimg_instance
"min_max(): Empty instance.",
cimg_instance);
const T *ptr_min = _data;
T min_value = *ptr_min, max_value = min_value;
cimg_for(*this,ptrs,T) {
const T val = *ptrs;
if (val<min_value) { min_value = val; ptr_min = ptrs; }
if (val>max_value) max_value = val;
}
max_val = (t)max_value;
return *ptr_min;
} | 0 | [
"CWE-770"
]
| cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 291,622,749,673,720,460,000,000,000,000,000,000,000 | 15 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
int ret;
int copylen;
ret = -EOPNOTSUPP;
if (m->msg_flags&MSG_OOB)
goto read_error;
m->msg_namelen = 0;
skb = skb_recv_datagram(sk, flags, 0 , &ret);
if (!skb)
goto read_error;
copylen = skb->len;
if (len < copylen) {
m->msg_flags |= MSG_TRUNC;
copylen = len;
}
ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen);
if (ret)
goto out_free;
ret = (flags & MSG_TRUNC) ? skb->len : copylen;
out_free:
skb_free_datagram(sk, skb);
caif_check_flow_release(sk);
return ret;
read_error:
return ret;
} | 0 | [
"CWE-200"
]
| linux | 2d6fbfe733f35c6b355c216644e08e149c61b271 | 146,252,661,450,457,840,000,000,000,000,000,000,000 | 37 | caif: Fix missing msg_namelen update in caif_seqpkt_recvmsg()
The current code does not fill the msg_name member in case it is set.
It also does not set the msg_namelen member to 0 and therefore makes
net/socket.c leak the local, uninitialized sockaddr_storage variable
to userland -- 128 bytes of kernel stack memory.
Fix that by simply setting msg_namelen to 0 as obviously nobody cared
about caif_seqpkt_recvmsg() not filling the msg_name in case it was
set.
Cc: Sjur Braendeland <[email protected]>
Signed-off-by: Mathias Krause <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
__shadow_walk_next(iterator, *iterator->sptep);
} | 0 | [
"CWE-476"
]
| linux | 9f46c187e2e680ecd9de7983e4d081c3391acc76 | 234,292,231,774,891,240,000,000,000,000,000,000,000 | 4 | KVM: x86/mmu: fix NULL pointer dereference on guest INVPCID
With shadow paging enabled, the INVPCID instruction results in a call
to kvm_mmu_invpcid_gva. If INVPCID is executed with CR0.PG=0, the
invlpg callback is not set and the result is a NULL pointer dereference.
Fix it trivially by checking for mmu->invlpg before every call.
There are other possibilities:
- check for CR0.PG, because KVM (like all Intel processors after P5)
flushes guest TLB on CR0.PG changes so that INVPCID/INVLPG are a
nop with paging disabled
- check for EFER.LMA, because KVM syncs and flushes when switching
MMU contexts outside of 64-bit mode
All of these are tricky, go for the simple solution. This is CVE-2022-1789.
Reported-by: Yongkang Jia <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
int main(int argc, char** argv)
{
/* Kernel starts us with all fd's closed.
* But it's dangerous:
* fprintf(stderr) can dump messages into random fds, etc.
* Ensure that if any of fd 0,1,2 is closed, we open it to /dev/null.
*/
int fd = xopen("/dev/null", O_RDWR);
while (fd < 2)
fd = xdup(fd);
if (fd > 2)
close(fd);
if (argc < 8)
{
/* percent specifier: %s %c %p %u %g %t %e %h */
/* argv: [0] [1] [2] [3] [4] [5] [6] [7] [8]*/
error_msg_and_die("Usage: %s SIGNO CORE_SIZE_LIMIT PID UID GID TIME BINARY_NAME [HOSTNAME]", argv[0]);
}
/* Not needed on 2.6.30.
* At least 2.6.18 has a bug where
* argv[1] = "SIGNO CORE_SIZE_LIMIT PID ..."
* argv[2] = "CORE_SIZE_LIMIT PID ..."
* and so on. Fixing it:
*/
if (strchr(argv[1], ' '))
{
int i;
for (i = 1; argv[i]; i++)
{
strchrnul(argv[i], ' ')[0] = '\0';
}
}
logmode = LOGMODE_JOURNAL;
/* Parse abrt.conf */
load_abrt_conf();
/* ... and plugins/CCpp.conf */
bool setting_MakeCompatCore;
bool setting_SaveBinaryImage;
{
map_string_t *settings = new_map_string();
load_abrt_plugin_conf_file("CCpp.conf", settings);
const char *value;
value = get_map_string_item_or_NULL(settings, "MakeCompatCore");
setting_MakeCompatCore = value && string_to_bool(value);
value = get_map_string_item_or_NULL(settings, "SaveBinaryImage");
setting_SaveBinaryImage = value && string_to_bool(value);
value = get_map_string_item_or_NULL(settings, "VerboseLog");
if (value)
g_verbose = xatoi_positive(value);
free_map_string(settings);
}
errno = 0;
const char* signal_str = argv[1];
int signal_no = xatoi_positive(signal_str);
off_t ulimit_c = strtoull(argv[2], NULL, 10);
if (ulimit_c < 0) /* unlimited? */
{
/* set to max possible >0 value */
ulimit_c = ~((off_t)1 << (sizeof(off_t)*8-1));
}
const char *pid_str = argv[3];
pid_t pid = xatoi_positive(argv[3]);
uid_t uid = xatoi_positive(argv[4]);
if (errno || pid <= 0)
{
perror_msg_and_die("PID '%s' or limit '%s' is bogus", argv[3], argv[2]);
}
{
char *s = xmalloc_fopen_fgetline_fclose(VAR_RUN"/abrt/saved_core_pattern");
/* If we have a saved pattern and it's not a "|PROG ARGS" thing... */
if (s && s[0] != '|')
core_basename = s;
else
free(s);
}
struct utsname uts;
if (!argv[8]) /* no HOSTNAME? */
{
uname(&uts);
argv[8] = uts.nodename;
}
char path[PATH_MAX];
int src_fd_binary = -1;
char *executable = get_executable(pid, setting_SaveBinaryImage ? &src_fd_binary : NULL);
if (executable && strstr(executable, "/abrt-hook-ccpp"))
{
error_msg_and_die("PID %lu is '%s', not dumping it to avoid recursion",
(long)pid, executable);
}
user_pwd = get_cwd(pid); /* may be NULL on error */
log_notice("user_pwd:'%s'", user_pwd);
sprintf(path, "/proc/%lu/status", (long)pid);
proc_pid_status = xmalloc_xopen_read_close(path, /*maxsz:*/ NULL);
uid_t fsuid = uid;
uid_t tmp_fsuid = get_fsuid();
int suid_policy = dump_suid_policy();
if (tmp_fsuid != uid)
{
/* use root for suided apps unless it's explicitly set to UNSAFE */
fsuid = 0;
if (suid_policy == DUMP_SUID_UNSAFE)
{
fsuid = tmp_fsuid;
}
}
/* Open a fd to compat coredump, if requested and is possible */
if (setting_MakeCompatCore && ulimit_c != 0)
/* note: checks "user_pwd == NULL" inside; updates core_basename */
user_core_fd = open_user_core(uid, fsuid, pid, &argv[1]);
if (executable == NULL)
{
/* readlink on /proc/$PID/exe failed, don't create abrt dump dir */
error_msg("Can't read /proc/%lu/exe link", (long)pid);
goto create_user_core;
}
const char *signame = NULL;
switch (signal_no)
{
case SIGILL : signame = "ILL" ; break;
case SIGFPE : signame = "FPE" ; break;
case SIGSEGV: signame = "SEGV"; break;
case SIGBUS : signame = "BUS" ; break; //Bus error (bad memory access)
case SIGABRT: signame = "ABRT"; break; //usually when abort() was called
// We have real-world reports from users who see buggy programs
// dying with SIGTRAP, uncommented it too:
case SIGTRAP: signame = "TRAP"; break; //Trace/breakpoint trap
// These usually aren't caused by bugs:
//case SIGQUIT: signame = "QUIT"; break; //Quit from keyboard
//case SIGSYS : signame = "SYS" ; break; //Bad argument to routine (SVr4)
//case SIGXCPU: signame = "XCPU"; break; //CPU time limit exceeded (4.2BSD)
//case SIGXFSZ: signame = "XFSZ"; break; //File size limit exceeded (4.2BSD)
default: goto create_user_core; // not a signal we care about
}
if (!daemon_is_ok())
{
/* not an error, exit with exit code 0 */
log("abrtd is not running. If it crashed, "
"/proc/sys/kernel/core_pattern contains a stale value, "
"consider resetting it to 'core'"
);
goto create_user_core;
}
if (g_settings_nMaxCrashReportsSize > 0)
{
/* If free space is less than 1/4 of MaxCrashReportsSize... */
if (low_free_space(g_settings_nMaxCrashReportsSize, g_settings_dump_location))
goto create_user_core;
}
/* Check /var/tmp/abrt/last-ccpp marker, do not dump repeated crashes
* if they happen too often. Else, write new marker value.
*/
snprintf(path, sizeof(path), "%s/last-ccpp", g_settings_dump_location);
if (check_recent_crash_file(path, executable))
{
/* It is a repeating crash */
goto create_user_core;
}
const char *last_slash = strrchr(executable, '/');
if (last_slash && strncmp(++last_slash, "abrt", 4) == 0)
{
/* If abrtd/abrt-foo crashes, we don't want to create a _directory_,
* since that can make new copy of abrtd to process it,
* and maybe crash again...
* Unlike dirs, mere files are ignored by abrtd.
*/
snprintf(path, sizeof(path), "%s/%s-coredump", g_settings_dump_location, last_slash);
int abrt_core_fd = xopen3(path, O_WRONLY | O_CREAT | O_TRUNC, 0600);
off_t core_size = copyfd_eof(STDIN_FILENO, abrt_core_fd, COPYFD_SPARSE);
if (core_size < 0 || fsync(abrt_core_fd) != 0)
{
unlink(path);
/* copyfd_eof logs the error including errno string,
* but it does not log file name */
error_msg_and_die("Error saving '%s'", path);
}
log("Saved core dump of pid %lu (%s) to %s (%llu bytes)", (long)pid, executable, path, (long long)core_size);
return 0;
}
unsigned path_len = snprintf(path, sizeof(path), "%s/ccpp-%s-%lu.new",
g_settings_dump_location, iso_date_string(NULL), (long)pid);
if (path_len >= (sizeof(path) - sizeof("/"FILENAME_COREDUMP)))
{
goto create_user_core;
}
/* use fsuid instead of uid, so we don't expose any sensitive
* information of suided app in /var/tmp/abrt
*/
dd = dd_create(path, fsuid, DEFAULT_DUMP_DIR_MODE);
if (dd)
{
char *rootdir = get_rootdir(pid);
dd_create_basic_files(dd, fsuid, (rootdir && strcmp(rootdir, "/") != 0) ? rootdir : NULL);
char source_filename[sizeof("/proc/%lu/somewhat_long_name") + sizeof(long)*3];
int source_base_ofs = sprintf(source_filename, "/proc/%lu/smaps", (long)pid);
source_base_ofs -= strlen("smaps");
char *dest_filename = concat_path_file(dd->dd_dirname, "also_somewhat_longish_name");
char *dest_base = strrchr(dest_filename, '/') + 1;
// Disabled for now: /proc/PID/smaps tends to be BIG,
// and not much more informative than /proc/PID/maps:
//copy_file(source_filename, dest_filename, 0640);
//chown(dest_filename, dd->dd_uid, dd->dd_gid);
strcpy(source_filename + source_base_ofs, "maps");
strcpy(dest_base, FILENAME_MAPS);
copy_file(source_filename, dest_filename, DEFAULT_DUMP_DIR_MODE);
IGNORE_RESULT(chown(dest_filename, dd->dd_uid, dd->dd_gid));
strcpy(source_filename + source_base_ofs, "limits");
strcpy(dest_base, FILENAME_LIMITS);
copy_file(source_filename, dest_filename, DEFAULT_DUMP_DIR_MODE);
IGNORE_RESULT(chown(dest_filename, dd->dd_uid, dd->dd_gid));
strcpy(source_filename + source_base_ofs, "cgroup");
strcpy(dest_base, FILENAME_CGROUP);
copy_file(source_filename, dest_filename, DEFAULT_DUMP_DIR_MODE);
IGNORE_RESULT(chown(dest_filename, dd->dd_uid, dd->dd_gid));
strcpy(dest_base, FILENAME_OPEN_FDS);
if (dump_fd_info(dest_filename, source_filename, source_base_ofs))
IGNORE_RESULT(chown(dest_filename, dd->dd_uid, dd->dd_gid));
free(dest_filename);
dd_save_text(dd, FILENAME_ANALYZER, "CCpp");
dd_save_text(dd, FILENAME_TYPE, "CCpp");
dd_save_text(dd, FILENAME_EXECUTABLE, executable);
dd_save_text(dd, FILENAME_PID, pid_str);
dd_save_text(dd, FILENAME_PROC_PID_STATUS, proc_pid_status);
if (user_pwd)
dd_save_text(dd, FILENAME_PWD, user_pwd);
if (rootdir)
{
if (strcmp(rootdir, "/") != 0)
dd_save_text(dd, FILENAME_ROOTDIR, rootdir);
}
char *reason = xasprintf("%s killed by SIG%s",
last_slash, signame ? signame : signal_str);
dd_save_text(dd, FILENAME_REASON, reason);
free(reason);
char *cmdline = get_cmdline(pid);
dd_save_text(dd, FILENAME_CMDLINE, cmdline ? : "");
free(cmdline);
char *environ = get_environ(pid);
dd_save_text(dd, FILENAME_ENVIRON, environ ? : "");
free(environ);
char *fips_enabled = xmalloc_fopen_fgetline_fclose("/proc/sys/crypto/fips_enabled");
if (fips_enabled)
{
if (strcmp(fips_enabled, "0") != 0)
dd_save_text(dd, "fips_enabled", fips_enabled);
free(fips_enabled);
}
dd_save_text(dd, FILENAME_ABRT_VERSION, VERSION);
if (src_fd_binary > 0)
{
strcpy(path + path_len, "/"FILENAME_BINARY);
int dst_fd = create_or_die(path);
off_t sz = copyfd_eof(src_fd_binary, dst_fd, COPYFD_SPARSE);
if (fsync(dst_fd) != 0 || close(dst_fd) != 0 || sz < 0)
{
dd_delete(dd);
error_msg_and_die("Error saving '%s'", path);
}
close(src_fd_binary);
}
strcpy(path + path_len, "/"FILENAME_COREDUMP);
int abrt_core_fd = create_or_die(path);
/* We write both coredumps at once.
* We can't write user coredump first, since it might be truncated
* and thus can't be copied and used as abrt coredump;
* and if we write abrt coredump first and then copy it as user one,
* then we have a race when process exits but coredump does not exist yet:
* $ echo -e '#include<signal.h>\nmain(){raise(SIGSEGV);}' | gcc -o test -x c -
* $ rm -f core*; ulimit -c unlimited; ./test; ls -l core*
* 21631 Segmentation fault (core dumped) ./test
* ls: cannot access core*: No such file or directory <=== BAD
*/
off_t core_size = copyfd_sparse(STDIN_FILENO, abrt_core_fd, user_core_fd, ulimit_c);
if (fsync(abrt_core_fd) != 0 || close(abrt_core_fd) != 0 || core_size < 0)
{
unlink(path);
dd_delete(dd);
if (user_core_fd >= 0)
{
xchdir(user_pwd);
unlink(core_basename);
}
/* copyfd_sparse logs the error including errno string,
* but it does not log file name */
error_msg_and_die("Error writing '%s'", path);
}
if (user_core_fd >= 0
/* error writing user coredump? */
&& (fsync(user_core_fd) != 0 || close(user_core_fd) != 0
/* user coredump is too big? */
|| (ulimit_c == 0 /* paranoia */ || core_size > ulimit_c)
)
) {
/* nuke it (silently) */
xchdir(user_pwd);
unlink(core_basename);
}
/* Save JVM crash log if it exists. (JVM's coredump per se
* is nearly useless for JVM developers)
*/
{
char *java_log = xasprintf("/tmp/jvm-%lu/hs_error.log", (long)pid);
int src_fd = open(java_log, O_RDONLY);
free(java_log);
/* If we couldn't open the error log in /tmp directory we can try to
* read the log from the current directory. It may produce AVC, it
* may produce some error log but all these are expected.
*/
if (src_fd < 0)
{
java_log = xasprintf("%s/hs_err_pid%lu.log", user_pwd, (long)pid);
src_fd = open(java_log, O_RDONLY);
free(java_log);
}
if (src_fd >= 0)
{
strcpy(path + path_len, "/hs_err.log");
int dst_fd = create_or_die(path);
off_t sz = copyfd_eof(src_fd, dst_fd, COPYFD_SPARSE);
if (close(dst_fd) != 0 || sz < 0)
{
dd_delete(dd);
error_msg_and_die("Error saving '%s'", path);
}
close(src_fd);
}
}
/* We close dumpdir before we start catering for crash storm case.
* Otherwise, delete_dump_dir's from other concurrent
* CCpp's won't be able to delete our dump (their delete_dump_dir
* will wait for us), and we won't be able to delete their dumps.
* Classic deadlock.
*/
dd_close(dd);
path[path_len] = '\0'; /* path now contains only directory name */
char *newpath = xstrndup(path, path_len - (sizeof(".new")-1));
if (rename(path, newpath) == 0)
strcpy(path, newpath);
free(newpath);
log("Saved core dump of pid %lu (%s) to %s (%llu bytes)", (long)pid, executable, path, (long long)core_size);
notify_new_path(path);
/* rhbz#539551: "abrt going crazy when crashing process is respawned" */
if (g_settings_nMaxCrashReportsSize > 0)
{
/* x1.25 and round up to 64m: go a bit up, so that usual in-daemon trimming
* kicks in first, and we don't "fight" with it:
*/
unsigned maxsize = g_settings_nMaxCrashReportsSize + g_settings_nMaxCrashReportsSize / 4;
maxsize |= 63;
trim_problem_dirs(g_settings_dump_location, maxsize * (double)(1024*1024), path);
}
free(rootdir);
return 0;
}
/* We didn't create abrt dump, but may need to create compat coredump */
create_user_core:
if (user_core_fd >= 0)
{
off_t core_size = copyfd_size(STDIN_FILENO, user_core_fd, ulimit_c, COPYFD_SPARSE);
if (fsync(user_core_fd) != 0 || close(user_core_fd) != 0 || core_size < 0)
{
/* perror first, otherwise unlink may trash errno */
perror_msg("Error writing '%s'", full_core_basename);
xchdir(user_pwd);
unlink(core_basename);
return 1;
}
if (ulimit_c == 0 || core_size > ulimit_c)
{
xchdir(user_pwd);
unlink(core_basename);
return 1;
}
log("Saved core dump of pid %lu to %s (%llu bytes)", (long)pid, full_core_basename, (long long)core_size);
}
return 0;
} | 1 | [
"CWE-59"
]
| abrt | 80408e9e24a1c10f85fd969e1853e0f192157f92 | 273,047,294,329,773,340,000,000,000,000,000,000,000 | 424 | ccpp: fix symlink race conditions
Fix copy & chown race conditions
Related: #1211835
Signed-off-by: Jakub Filak <[email protected]> |
static inline Quantum ScaleCharToQuantum(const unsigned char value)
{
return((Quantum) (72340172838076673.0*value));
} | 0 | [
"CWE-190"
]
| ImageMagick | f60d59cc3a7e3402d403361e0985ffa56f746a82 | 91,145,712,137,706,170,000,000,000,000,000,000,000 | 4 | https://github.com/ImageMagick/ImageMagick/issues/1727 |
ldns_rr_set_pop_rr(ldns_rr_list *rr_list)
{
return ldns_rr_list_pop_rr(rr_list);
} | 0 | [
"CWE-415"
]
| ldns | 070b4595981f48a21cc6b4f5047fdc2d09d3da91 | 213,255,606,641,436,660,000,000,000,000,000,000,000 | 4 | CAA and URI |
folder_uncompressed_size(struct _7z_folder *f)
{
int n = (int)f->numOutStreams;
unsigned pairs = (unsigned)f->numBindPairs;
while (--n >= 0) {
unsigned i;
for (i = 0; i < pairs; i++) {
if (f->bindPairs[i].outIndex == (uint64_t)n)
break;
}
if (i >= pairs)
return (f->unPackSize[n]);
}
return (0);
} | 0 | [
"CWE-190",
"CWE-125"
]
| libarchive | e79ef306afe332faf22e9b442a2c6b59cb175573 | 74,952,978,807,657,220,000,000,000,000,000,000,000 | 16 | Issue #718: Fix TALOS-CAN-152
If a 7-Zip archive declares a rediculously large number of substreams,
it can overflow an internal counter, leading a subsequent memory
allocation to be too small for the substream data.
Thanks to the Open Source and Threat Intelligence project at Cisco
for reporting this issue. |
static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
{
struct fib6_config cfg;
int err;
err = rtm_to_fib6_config(skb, nlh, &cfg);
if (err < 0)
return err;
if (cfg.fc_mp)
return ip6_route_multipath(&cfg, 0);
else
return ip6_route_del(&cfg);
} | 0 | [
"CWE-119"
]
| net | c88507fbad8055297c1d1e21e599f46960cbee39 | 115,885,752,946,761,890,000,000,000,000,000,000,000 | 14 | ipv6: don't set DST_NOCOUNT for remotely added routes
DST_NOCOUNT should only be used if an authorized user adds routes
locally. In case of routes which are added on behalf of router
advertisments this flag must not get used as it allows an unlimited
number of routes getting added remotely.
Signed-off-by: Sabrina Dubroca <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
{
struct mount *mnt;
if (read_seqretry(&mount_lock, seq))
return 1;
if (bastard == NULL)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
mnt_add_count(mnt, -1);
return 1;
}
return -1;
} | 0 | [
"CWE-400",
"CWE-703"
]
| linux | d29216842a85c7970c536108e093963f02714498 | 100,057,202,523,669,970,000,000,000,000,000,000,000 | 17 | mnt: Add a per mount namespace limit on the number of mounts
CAI Qian <[email protected]> pointed out that the semantics
of shared subtrees make it possible to create an exponentially
increasing number of mounts in a mount namespace.
mkdir /tmp/1 /tmp/2
mount --make-rshared /
for i in $(seq 1 20) ; do mount --bind /tmp/1 /tmp/2 ; done
Will create create 2^20 or 1048576 mounts, which is a practical problem
as some people have managed to hit this by accident.
As such CVE-2016-6213 was assigned.
Ian Kent <[email protected]> described the situation for autofs users
as follows:
> The number of mounts for direct mount maps is usually not very large because of
> the way they are implemented, large direct mount maps can have performance
> problems. There can be anywhere from a few (likely case a few hundred) to less
> than 10000, plus mounts that have been triggered and not yet expired.
>
> Indirect mounts have one autofs mount at the root plus the number of mounts that
> have been triggered and not yet expired.
>
> The number of autofs indirect map entries can range from a few to the common
> case of several thousand and in rare cases up to between 30000 and 50000. I've
> not heard of people with maps larger than 50000 entries.
>
> The larger the number of map entries the greater the possibility for a large
> number of active mounts so it's not hard to expect cases of a 1000 or somewhat
> more active mounts.
So I am setting the default number of mounts allowed per mount
namespace at 100,000. This is more than enough for any use case I
know of, but small enough to quickly stop an exponential increase
in mounts. Which should be perfect to catch misconfigurations and
malfunctioning programs.
For anyone who needs a higher limit this can be changed by writing
to the new /proc/sys/fs/mount-max sysctl.
Tested-by: CAI Qian <[email protected]>
Signed-off-by: "Eric W. Biederman" <[email protected]> |
mb_strnicmp(char_u *s1, char_u *s2, size_t nn)
{
int i, l;
int cdiff;
int n = (int)nn;
if (enc_utf8)
{
return utf_strnicmp(s1, s2, nn, nn);
}
else
{
for (i = 0; i < n; i += l)
{
if (s1[i] == NUL && s2[i] == NUL) // both strings end
return 0;
l = (*mb_ptr2len)(s1 + i);
if (l <= 1)
{
// Single byte: first check normally, then with ignore case.
if (s1[i] != s2[i])
{
cdiff = MB_TOLOWER(s1[i]) - MB_TOLOWER(s2[i]);
if (cdiff != 0)
return cdiff;
}
}
else
{
// For non-Unicode multi-byte don't ignore case.
if (l > n - i)
l = n - i;
cdiff = STRNCMP(s1 + i, s2 + i, l);
if (cdiff != 0)
return cdiff;
}
}
}
return 0;
} | 0 | [
"CWE-122",
"CWE-787"
]
| vim | f6d39c31d2177549a986d170e192d8351bd571e2 | 211,409,191,766,864,160,000,000,000,000,000,000,000 | 41 | patch 9.0.0220: invalid memory access with for loop over NULL string
Problem: Invalid memory access with for loop over NULL string.
Solution: Make sure mb_ptr2len() consistently returns zero for NUL. |
static int userfaultfd_register(struct userfaultfd_ctx *ctx,
unsigned long arg)
{
struct mm_struct *mm = ctx->mm;
struct vm_area_struct *vma, *prev, *cur;
int ret;
struct uffdio_register uffdio_register;
struct uffdio_register __user *user_uffdio_register;
unsigned long vm_flags, new_flags;
bool found;
bool basic_ioctls;
unsigned long start, end, vma_end;
user_uffdio_register = (struct uffdio_register __user *) arg;
ret = -EFAULT;
if (copy_from_user(&uffdio_register, user_uffdio_register,
sizeof(uffdio_register)-sizeof(__u64)))
goto out;
ret = -EINVAL;
if (!uffdio_register.mode)
goto out;
if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
UFFDIO_REGISTER_MODE_WP))
goto out;
vm_flags = 0;
if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
vm_flags |= VM_UFFD_MISSING;
if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
vm_flags |= VM_UFFD_WP;
/*
* FIXME: remove the below error constraint by
* implementing the wprotect tracking mode.
*/
ret = -EINVAL;
goto out;
}
ret = validate_range(mm, uffdio_register.range.start,
uffdio_register.range.len);
if (ret)
goto out;
start = uffdio_register.range.start;
end = start + uffdio_register.range.len;
ret = -ENOMEM;
if (!mmget_not_zero(mm))
goto out;
down_write(&mm->mmap_sem);
vma = find_vma_prev(mm, start, &prev);
if (!vma)
goto out_unlock;
/* check that there's at least one vma in the range */
ret = -EINVAL;
if (vma->vm_start >= end)
goto out_unlock;
/*
* If the first vma contains huge pages, make sure start address
* is aligned to huge page size.
*/
if (is_vm_hugetlb_page(vma)) {
unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
if (start & (vma_hpagesize - 1))
goto out_unlock;
}
/*
* Search for not compatible vmas.
*/
found = false;
basic_ioctls = false;
for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
cond_resched();
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
!!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
/* check not compatible vmas */
ret = -EINVAL;
if (!vma_can_userfault(cur))
goto out_unlock;
/*
* If this vma contains ending address, and huge pages
* check alignment.
*/
if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
end > cur->vm_start) {
unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
ret = -EINVAL;
if (end & (vma_hpagesize - 1))
goto out_unlock;
}
/*
* Check that this vma isn't already owned by a
* different userfaultfd. We can't allow more than one
* userfaultfd to own a single vma simultaneously or we
* wouldn't know which one to deliver the userfaults to.
*/
ret = -EBUSY;
if (cur->vm_userfaultfd_ctx.ctx &&
cur->vm_userfaultfd_ctx.ctx != ctx)
goto out_unlock;
/*
* Note vmas containing huge pages
*/
if (is_vm_hugetlb_page(cur))
basic_ioctls = true;
found = true;
}
BUG_ON(!found);
if (vma->vm_start < start)
prev = vma;
ret = 0;
do {
cond_resched();
BUG_ON(!vma_can_userfault(vma));
BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
vma->vm_userfaultfd_ctx.ctx != ctx);
/*
* Nothing to do: this vma is already registered into this
* userfaultfd and with the right tracking mode too.
*/
if (vma->vm_userfaultfd_ctx.ctx == ctx &&
(vma->vm_flags & vm_flags) == vm_flags)
goto skip;
if (vma->vm_start > start)
start = vma->vm_start;
vma_end = min(end, vma->vm_end);
new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
((struct vm_userfaultfd_ctx){ ctx }));
if (prev) {
vma = prev;
goto next;
}
if (vma->vm_start < start) {
ret = split_vma(mm, vma, start, 1);
if (ret)
break;
}
if (vma->vm_end > end) {
ret = split_vma(mm, vma, end, 0);
if (ret)
break;
}
next:
/*
* In the vma_merge() successful mprotect-like case 8:
* the next vma was merged into the current one and
* the current one has not been updated yet.
*/
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx.ctx = ctx;
skip:
prev = vma;
start = vma->vm_end;
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
up_write(&mm->mmap_sem);
mmput(mm);
if (!ret) {
/*
* Now that we scanned all vmas we can already tell
* userland which ioctls methods are guaranteed to
* succeed on this range.
*/
if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
UFFD_API_RANGE_IOCTLS,
&user_uffdio_register->ioctls))
ret = -EFAULT;
}
out:
return ret;
} | 1 | [
"CWE-862",
"CWE-863"
]
| linux | 29ec90660d68bbdd69507c1c8b4e33aa299278b1 | 111,017,886,332,702,120,000,000,000,000,000,000,000 | 195 | userfaultfd: shmem/hugetlbfs: only allow to register VM_MAYWRITE vmas
After the VMA to register the uffd onto is found, check that it has
VM_MAYWRITE set before allowing registration. This way we inherit all
common code checks before allowing to fill file holes in shmem and
hugetlbfs with UFFDIO_COPY.
The userfaultfd memory model is not applicable for readonly files unless
it's a MAP_PRIVATE.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: ff62a3421044 ("hugetlb: implement memfd sealing")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Hugh Dickins <[email protected]>
Reported-by: Jann Horn <[email protected]>
Fixes: 4c27fe4c4c84 ("userfaultfd: shmem: add shmem_mcopy_atomic_pte for userfaultfd support")
Cc: <[email protected]>
Cc: "Dr. David Alan Gilbert" <[email protected]>
Cc: Mike Kravetz <[email protected]>
Cc: Peter Xu <[email protected]>
Cc: [email protected]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
client_send_params(gnutls_session_t session,
gnutls_buffer_t extdata,
const gnutls_psk_client_credentials_t cred)
{
int ret, ext_offset = 0;
uint8_t binder_value[MAX_HASH_SIZE];
size_t spos;
gnutls_datum_t username = {NULL, 0};
gnutls_datum_t user_key = {NULL, 0}, rkey = {NULL, 0};
unsigned client_hello_len;
unsigned next_idx;
const mac_entry_st *prf_res = NULL;
const mac_entry_st *prf_psk = NULL;
struct timespec cur_time;
uint32_t ticket_age, ob_ticket_age;
int free_username = 0;
psk_auth_info_t info = NULL;
unsigned psk_id_len = 0;
unsigned binders_len, binders_pos;
tls13_ticket_st *ticket = &session->internals.tls13_ticket;
if (((session->internals.flags & GNUTLS_NO_TICKETS) ||
session->internals.tls13_ticket.ticket.data == NULL) &&
(!cred || !_gnutls_have_psk_credentials(cred, session))) {
return 0;
}
binders_len = 0;
/* placeholder to be filled later */
spos = extdata->length;
ret = _gnutls_buffer_append_prefix(extdata, 16, 0);
if (ret < 0)
return gnutls_assert_val(ret);
/* First, let's see if we have a session ticket to send */
if (!(session->internals.flags & GNUTLS_NO_TICKETS) &&
ticket->ticket.data != NULL) {
/* We found a session ticket */
if (unlikely(ticket->prf == NULL)) {
tls13_ticket_deinit(ticket);
ret = gnutls_assert_val(GNUTLS_E_INTERNAL_ERROR);
goto cleanup;
}
prf_res = ticket->prf;
gnutls_gettime(&cur_time);
if (unlikely(_gnutls_timespec_cmp(&cur_time,
&ticket->arrival_time) < 0)) {
gnutls_assert();
tls13_ticket_deinit(ticket);
goto ignore_ticket;
}
/* Check whether the ticket is stale */
ticket_age = timespec_sub_ms(&cur_time, &ticket->arrival_time);
if (ticket_age / 1000 > ticket->lifetime) {
tls13_ticket_deinit(ticket);
goto ignore_ticket;
}
ret = compute_psk_from_ticket(ticket, &rkey);
if (ret < 0) {
tls13_ticket_deinit(ticket);
goto ignore_ticket;
}
/* Calculate obfuscated ticket age, in milliseconds, mod 2^32 */
ob_ticket_age = ticket_age + ticket->age_add;
if ((ret = _gnutls_buffer_append_data_prefix(extdata, 16,
ticket->ticket.data,
ticket->ticket.size)) < 0) {
gnutls_assert();
goto cleanup;
}
/* Now append the obfuscated ticket age */
if ((ret = _gnutls_buffer_append_prefix(extdata, 32, ob_ticket_age)) < 0) {
gnutls_assert();
goto cleanup;
}
psk_id_len += 6 + ticket->ticket.size;
binders_len += 1 + _gnutls_mac_get_algo_len(prf_res);
}
ignore_ticket:
if (cred && _gnutls_have_psk_credentials(cred, session)) {
gnutls_datum_t tkey;
if (cred->binder_algo == NULL) {
gnutls_assert();
ret = gnutls_assert_val(GNUTLS_E_INSUFFICIENT_CREDENTIALS);
goto cleanup;
}
prf_psk = cred->binder_algo;
ret = _gnutls_find_psk_key(session, cred, &username, &tkey, &free_username);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
if (username.size == 0 || username.size > UINT16_MAX) {
ret = gnutls_assert_val(GNUTLS_E_INVALID_PASSWORD);
goto cleanup;
}
if (!free_username) {
/* we need to copy the key */
ret = _gnutls_set_datum(&user_key, tkey.data, tkey.size);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
} else {
user_key.data = tkey.data;
user_key.size = tkey.size;
}
ret = _gnutls_auth_info_init(session, GNUTLS_CRD_PSK, sizeof(psk_auth_info_st), 1);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
info = _gnutls_get_auth_info(session, GNUTLS_CRD_PSK);
assert(info != NULL);
_gnutls_copy_psk_username(info, &username);
if ((ret = _gnutls_buffer_append_data_prefix(extdata, 16,
username.data,
username.size)) < 0) {
gnutls_assert();
goto cleanup;
}
/* Now append the obfuscated ticket age */
if ((ret = _gnutls_buffer_append_prefix(extdata, 32, 0)) < 0) {
gnutls_assert();
goto cleanup;
}
psk_id_len += 6 + username.size;
binders_len += 1 + _gnutls_mac_get_algo_len(prf_psk);
}
/* if no tickets or identities to be sent */
if (psk_id_len == 0) {
/* reset extensions buffer */
extdata->length = spos;
return 0;
}
_gnutls_write_uint16(psk_id_len, &extdata->data[spos]);
binders_pos = extdata->length-spos;
ext_offset = _gnutls_ext_get_extensions_offset(session);
/* Compute the binders. extdata->data points to the start
* of this client hello. */
assert(extdata->length >= sizeof(mbuffer_st));
assert(ext_offset >= (ssize_t)sizeof(mbuffer_st));
ext_offset -= sizeof(mbuffer_st);
client_hello_len = extdata->length-sizeof(mbuffer_st);
next_idx = 0;
ret = _gnutls_buffer_append_prefix(extdata, 16, binders_len);
if (ret < 0) {
gnutls_assert_val(ret);
goto cleanup;
}
if (prf_res && rkey.size > 0) {
gnutls_datum_t client_hello;
client_hello.data = extdata->data+sizeof(mbuffer_st);
client_hello.size = client_hello_len;
ret = compute_psk_binder(session, prf_res,
binders_len, binders_pos,
ext_offset, &rkey, &client_hello, 1,
binder_value);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
/* Associate the selected pre-shared key with the session */
gnutls_free(session->key.binders[next_idx].psk.data);
session->key.binders[next_idx].psk.data = rkey.data;
session->key.binders[next_idx].psk.size = rkey.size;
rkey.data = NULL;
session->key.binders[next_idx].prf = prf_res;
session->key.binders[next_idx].resumption = 1;
session->key.binders[next_idx].idx = next_idx;
_gnutls_handshake_log("EXT[%p]: sent PSK resumption identity (%d)\n", session, next_idx);
next_idx++;
/* Add the binder */
ret = _gnutls_buffer_append_data_prefix(extdata, 8, binder_value, prf_res->output_size);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
session->internals.hsk_flags |= HSK_TLS13_TICKET_SENT;
}
if (prf_psk && user_key.size > 0 && info) {
gnutls_datum_t client_hello;
client_hello.data = extdata->data+sizeof(mbuffer_st);
client_hello.size = client_hello_len;
ret = compute_psk_binder(session, prf_psk,
binders_len, binders_pos,
ext_offset, &user_key, &client_hello, 0,
binder_value);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
/* Associate the selected pre-shared key with the session */
gnutls_free(session->key.binders[next_idx].psk.data);
session->key.binders[next_idx].psk.data = user_key.data;
session->key.binders[next_idx].psk.size = user_key.size;
user_key.data = NULL;
session->key.binders[next_idx].prf = prf_psk;
session->key.binders[next_idx].resumption = 0;
session->key.binders[next_idx].idx = next_idx;
_gnutls_handshake_log("EXT[%p]: sent PSK identity '%s' (%d)\n", session, info->username, next_idx);
next_idx++;
/* Add the binder */
ret = _gnutls_buffer_append_data_prefix(extdata, 8, binder_value, prf_psk->output_size);
if (ret < 0) {
gnutls_assert();
goto cleanup;
}
}
ret = 0;
cleanup:
if (free_username)
_gnutls_free_datum(&username);
_gnutls_free_temp_key_datum(&user_key);
_gnutls_free_temp_key_datum(&rkey);
return ret;
} | 0 | [
"CWE-416"
]
| gnutls | 75a937d97f4fefc6f9b08e3791f151445f551cb3 | 305,570,908,249,782,030,000,000,000,000,000,000,000 | 267 | pre_shared_key: avoid use-after-free around realloc
Signed-off-by: Daiki Ueno <[email protected]> |
_load_job_limits(void)
{
List steps;
ListIterator step_iter;
step_loc_t *stepd;
int fd;
job_mem_limits_t *job_limits_ptr;
slurmstepd_mem_info_t stepd_mem_info;
if (!job_limits_list)
job_limits_list = list_create(_job_limits_free);
job_limits_loaded = true;
steps = stepd_available(conf->spooldir, conf->node_name);
step_iter = list_iterator_create(steps);
while ((stepd = list_next(step_iter))) {
job_limits_ptr = list_find_first(job_limits_list,
_step_limits_match, stepd);
if (job_limits_ptr) /* already processed */
continue;
fd = stepd_connect(stepd->directory, stepd->nodename,
stepd->jobid, stepd->stepid,
&stepd->protocol_version);
if (fd == -1)
continue; /* step completed */
if (stepd_get_mem_limits(fd, stepd->protocol_version,
&stepd_mem_info) != SLURM_SUCCESS) {
error("Error reading step %u.%u memory limits from "
"slurmstepd",
stepd->jobid, stepd->stepid);
close(fd);
continue;
}
if ((stepd_mem_info.job_mem_limit
|| stepd_mem_info.step_mem_limit)) {
/* create entry for this job */
job_limits_ptr = xmalloc(sizeof(job_mem_limits_t));
job_limits_ptr->job_id = stepd->jobid;
job_limits_ptr->step_id = stepd->stepid;
job_limits_ptr->job_mem =
stepd_mem_info.job_mem_limit;
job_limits_ptr->step_mem =
stepd_mem_info.step_mem_limit;
#if _LIMIT_INFO
info("RecLim step:%u.%u job_mem:%u step_mem:%u",
job_limits_ptr->job_id, job_limits_ptr->step_id,
job_limits_ptr->job_mem,
job_limits_ptr->step_mem);
#endif
list_append(job_limits_list, job_limits_ptr);
}
close(fd);
}
list_iterator_destroy(step_iter);
FREE_NULL_LIST(steps);
} | 0 | [
"CWE-284"
]
| slurm | 92362a92fffe60187df61f99ab11c249d44120ee | 63,237,771,374,834,540,000,000,000,000,000,000,000 | 59 | Fix security issue in _prolog_error().
Fix security issue caused by insecure file path handling triggered by
the failure of a Prolog script. To exploit this a user needs to
anticipate or cause the Prolog to fail for their job.
(This commit is slightly different from the fix to the 15.08 branch.)
CVE-2016-10030. |
Word grpcSendHandler(void* raw_context, Word token, Word message_ptr, Word message_size,
Word end_stream) {
auto context = WASM_CONTEXT(raw_context)->root_context();
auto message = context->wasmVm()->getMemory(message_ptr.u64_, message_size.u64_);
if (!message) {
return wasmResultToWord(WasmResult::InvalidMemoryAccess);
}
return wasmResultToWord(context->grpcSend(token.u64_, message.value(), end_stream.u64_));
} | 0 | [
"CWE-476"
]
| envoy | 8788a3cf255b647fd14e6b5e2585abaaedb28153 | 301,696,247,688,465,640,000,000,000,000,000,000,000 | 9 | 1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]> |
static int get_iovec_page_array(const struct iovec __user *iov,
unsigned int nr_vecs, struct page **pages,
struct partial_page *partial, bool aligned,
unsigned int pipe_buffers)
{
int buffers = 0, error = 0;
while (nr_vecs) {
unsigned long off, npages;
struct iovec entry;
void __user *base;
size_t len;
int i;
error = -EFAULT;
if (copy_from_user(&entry, iov, sizeof(entry)))
break;
base = entry.iov_base;
len = entry.iov_len;
/*
* Sanity check this iovec. 0 read succeeds.
*/
error = 0;
if (unlikely(!len))
break;
error = -EFAULT;
if (!access_ok(VERIFY_READ, base, len))
break;
/*
* Get this base offset and number of pages, then map
* in the user pages.
*/
off = (unsigned long) base & ~PAGE_MASK;
/*
* If asked for alignment, the offset must be zero and the
* length a multiple of the PAGE_SIZE.
*/
error = -EINVAL;
if (aligned && (off || len & ~PAGE_MASK))
break;
npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (npages > pipe_buffers - buffers)
npages = pipe_buffers - buffers;
error = get_user_pages_fast((unsigned long)base, npages,
0, &pages[buffers]);
if (unlikely(error <= 0))
break;
/*
* Fill this contiguous range into the partial page map.
*/
for (i = 0; i < error; i++) {
const int plen = min_t(size_t, len, PAGE_SIZE - off);
partial[buffers].offset = off;
partial[buffers].len = plen;
off = 0;
len -= plen;
buffers++;
}
/*
* We didn't complete this iov, stop here since it probably
* means we have to move some of this into a pipe to
* be able to continue.
*/
if (len)
break;
/*
* Don't continue if we mapped fewer pages than we asked for,
* or if we mapped the max number of pages that we have
* room for.
*/
if (error < npages || buffers == pipe_buffers)
break;
nr_vecs--;
iov++;
}
if (buffers)
return buffers;
return error;
} | 0 | [
"CWE-284",
"CWE-264"
]
| linux | 8d0207652cbe27d1f962050737848e5ad4671958 | 219,663,804,884,769,600,000,000,000,000,000,000,000 | 94 | ->splice_write() via ->write_iter()
iter_file_splice_write() - a ->splice_write() instance that gathers the
pipe buffers, builds a bio_vec-based iov_iter covering those and feeds
it to ->write_iter(). A bunch of simple cases coverted to that...
[AV: fixed the braino spotted by Cyrill]
Signed-off-by: Al Viro <[email protected]> |
check_entry_size_and_hooks(struct ipt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
(unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p\n", e);
return -EINVAL;
}
if (e->next_offset
< sizeof(struct ipt_entry) + sizeof(struct xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
if (!ip_checkentry(&e->ip))
return -EINVAL;
err = xt_check_entry_offsets(e, e->target_offset, e->next_offset);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e)) {
pr_debug("Underflows must be unconditional and "
"use the STANDARD target with "
"ACCEPT/DROP\n");
return -EINVAL;
}
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
} | 1 | [
"CWE-284",
"CWE-264"
]
| linux | ce683e5f9d045e5d67d1312a42b359cb2ab2a13c | 243,817,632,459,644,700,000,000,000,000,000,000,000 | 54 | netfilter: x_tables: check for bogus target offset
We're currently asserting that targetoff + targetsize <= nextoff.
Extend it to also check that targetoff is >= sizeof(xt_entry).
Since this is generic code, add an argument pointing to the start of the
match/target, we can then derive the base structure size from the delta.
We also need the e->elems pointer in a followup change to validate matches.
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
static void SFS_IncIndent(ScriptParser *pars) {
pars->indent++;
} | 0 | [
"CWE-476"
]
| gpac | 4e7736d7ec7bf64026daa611da951993bb42fdaf | 128,589,872,282,308,170,000,000,000,000,000,000,000 | 3 | fixed #2238 |
void Messageheader::Parser::checkHeaderspace(unsigned chars) const
{
if (headerdataPtr + chars >= header.rawdata + sizeof(header.rawdata))
{
header.rawdata[sizeof(header.rawdata) - 1] = '\0';
throw HttpError(HTTP_REQUEST_ENTITY_TOO_LARGE, "header too large");
}
} | 0 | [
"CWE-200"
]
| tntnet | 9bd3b14042e12d84f39ea9f55731705ba516f525 | 254,906,335,396,570,700,000,000,000,000,000,000,000 | 8 | fix possible information leak |
Bool Media_IsSampleSyncShadow(GF_ShadowSyncBox *stsh, u32 sampleNumber)
{
u32 i;
GF_StshEntry *ent;
if (!stsh) return 0;
i=0;
while ((ent = (GF_StshEntry*)gf_list_enum(stsh->entries, &i))) {
if ((u32) ent->syncSampleNumber == sampleNumber) return 1;
else if ((u32) ent->syncSampleNumber > sampleNumber) return 0;
}
return 0;
} | 0 | [
"CWE-787"
]
| gpac | 328def7d3b93847d64ecb6e9e0399684e57c3eca | 35,253,940,332,987,660,000,000,000,000,000,000,000 | 12 | fixed #1766 (fuzz) |
static void virtio_gpu_ctrl_bh(void *opaque)
{
VirtIOGPU *g = opaque;
virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
} | 0 | []
| qemu | acfc4846508a02cc4c83aa27799fd74ac280bdb2 | 276,802,279,001,281,800,000,000,000,000,000,000,000 | 5 | virtio-gpu: use VIRTIO_GPU_MAX_SCANOUTS
The value is defined in virtio_gpu.h already (changing from 4 to 16).
Signed-off-by: Marc-André Lureau <[email protected]>
Message-id: [email protected]
Signed-off-by: Gerd Hoffmann <[email protected]> |
static void sched_migrate_task(struct task_struct *p, int dest_cpu)
{
struct migration_req req;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(p, &flags);
if (!cpu_isset(dest_cpu, p->cpus_allowed)
|| unlikely(cpu_is_offline(dest_cpu)))
goto out;
/* force the process onto the specified CPU */
if (migrate_task(p, dest_cpu, &req)) {
/* Need to wait for migration thread (might exit: take ref). */
struct task_struct *mt = rq->migration_thread;
get_task_struct(mt);
task_rq_unlock(rq, &flags);
wake_up_process(mt);
put_task_struct(mt);
wait_for_completion(&req.done);
return;
}
out:
task_rq_unlock(rq, &flags);
} | 0 | []
| linux-2.6 | 8f1bc385cfbab474db6c27b5af1e439614f3025c | 170,034,660,277,541,020,000,000,000,000,000,000,000 | 27 | sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]> |
static struct binder_node *binder_get_node_from_ref(
struct binder_proc *proc,
u32 desc, bool need_strong_ref,
struct binder_ref_data *rdata)
{
struct binder_node *node;
struct binder_ref *ref;
binder_proc_lock(proc);
ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
if (!ref)
goto err_no_ref;
node = ref->node;
/*
* Take an implicit reference on the node to ensure
* it stays alive until the call to binder_put_node()
*/
binder_inc_node_tmpref(node);
if (rdata)
*rdata = ref->data;
binder_proc_unlock(proc);
return node;
err_no_ref:
binder_proc_unlock(proc);
return NULL;
} | 0 | [
"CWE-416"
]
| linux | 7bada55ab50697861eee6bb7d60b41e68a961a9c | 83,630,927,647,432,950,000,000,000,000,000,000,000 | 28 | binder: fix race that allows malicious free of live buffer
Malicious code can attempt to free buffers using the BC_FREE_BUFFER
ioctl to binder. There are protections against a user freeing a buffer
while in use by the kernel, however there was a window where
BC_FREE_BUFFER could be used to free a recently allocated buffer that
was not completely initialized. This resulted in a use-after-free
detected by KASAN with a malicious test program.
This window is closed by setting the buffer's allow_user_free attribute
to 0 when the buffer is allocated or when the user has previously freed
it instead of waiting for the caller to set it. The problem was that
when the struct buffer was recycled, allow_user_free was stale and set
to 1 allowing a free to go through.
Signed-off-by: Todd Kjos <[email protected]>
Acked-by: Arve Hjønnevåg <[email protected]>
Cc: stable <[email protected]> # 4.14
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
entry_guards_update_filtered_sets(guard_selection_t *gs)
{
const or_options_t *options = get_options();
SMARTLIST_FOREACH_BEGIN(gs->sampled_entry_guards, entry_guard_t *, guard) {
entry_guard_set_filtered_flags(options, gs, guard);
} SMARTLIST_FOREACH_END(guard);
} | 0 | [
"CWE-200"
]
| tor | 665baf5ed5c6186d973c46cdea165c0548027350 | 257,368,871,847,301,800,000,000,000,000,000,000,000 | 8 | Consider the exit family when applying guard restrictions.
When the new path selection logic went into place, I accidentally
dropped the code that considered the _family_ of the exit node when
deciding if the guard was usable, and we didn't catch that during
code review.
This patch makes the guard_restriction_t code consider the exit
family as well, and adds some (hopefully redundant) checks for the
case where we lack a node_t for a guard but we have a bridge_info_t
for it.
Fixes bug 22753; bugfix on 0.3.0.1-alpha. Tracked as TROVE-2016-006
and CVE-2017-0377. |
virtual void updateRender(GfxState * /*state*/) {} | 0 | []
| poppler | abf167af8b15e5f3b510275ce619e6fdb42edd40 | 10,709,898,443,401,400,000,000,000,000,000,000,000 | 1 | Implement tiling/patterns in SplashOutputDev
Fixes bug 13518 |
**/
CImg<T>& sequence(const T& a0, const T& a1) {
if (is_empty()) return *this;
const ulongT siz = size() - 1;
T* ptr = _data;
if (siz) {
const double delta = (double)a1 - (double)a0;
cimg_foroff(*this,l) *(ptr++) = (T)(a0 + delta*l/siz);
} else *ptr = a0;
return *this; | 0 | [
"CWE-125"
]
| CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 164,881,460,635,386,870,000,000,000,000,000,000,000 | 10 | Fix other issues in 'CImg<T>::load_bmp()'. |
ipf_addr_hash_add(uint32_t hash, const union ipf_addr *addr)
{
BUILD_ASSERT_DECL(sizeof *addr % 4 == 0);
return hash_add_bytes32(hash, (const uint32_t *) addr, sizeof *addr);
} | 0 | [
"CWE-401"
]
| ovs | 803ed12e31b0377c37d7aa8c94b3b92f2081e349 | 264,921,076,802,352,380,000,000,000,000,000,000,000 | 5 | ipf: release unhandled packets from the batch
Since 640d4db788ed ("ipf: Fix a use-after-free error, ...") the ipf
framework unconditionally allocates a new dp_packet to track
individual fragments. This prevents a use-after-free. However, an
additional issue was present - even when the packet buffer is cloned,
if the ip fragment handling code keeps it, the original buffer is
leaked during the refill loop. Even in the original processing code,
the hardcoded dnsteal branches would always leak a packet buffer from
the refill loop.
This can be confirmed with valgrind:
==717566== 16,672 (4,480 direct, 12,192 indirect) bytes in 8 blocks are definitely lost in loss record 390 of 390
==717566== at 0x484086F: malloc (vg_replace_malloc.c:380)
==717566== by 0x537BFD: xmalloc__ (util.c:137)
==717566== by 0x537BFD: xmalloc (util.c:172)
==717566== by 0x46DDD4: dp_packet_new (dp-packet.c:153)
==717566== by 0x46DDD4: dp_packet_new_with_headroom (dp-packet.c:163)
==717566== by 0x550AA6: netdev_linux_batch_rxq_recv_sock.constprop.0 (netdev-linux.c:1262)
==717566== by 0x5512AF: netdev_linux_rxq_recv (netdev-linux.c:1511)
==717566== by 0x4AB7E0: netdev_rxq_recv (netdev.c:727)
==717566== by 0x47F00D: dp_netdev_process_rxq_port (dpif-netdev.c:4699)
==717566== by 0x47FD13: dpif_netdev_run (dpif-netdev.c:5957)
==717566== by 0x4331D2: type_run (ofproto-dpif.c:370)
==717566== by 0x41DFD8: ofproto_type_run (ofproto.c:1768)
==717566== by 0x40A7FB: bridge_run__ (bridge.c:3245)
==717566== by 0x411269: bridge_run (bridge.c:3310)
==717566== by 0x406E6C: main (ovs-vswitchd.c:127)
The fix is to delete the original packet when it isn't able to be
reinserted into the packet batch. Subsequent valgrind runs show that
the packets are not leaked from the batch any longer.
Fixes: 640d4db788ed ("ipf: Fix a use-after-free error, and remove the 'do_not_steal' flag.")
Fixes: 4ea96698f667 ("Userspace datapath: Add fragmentation handling.")
Reported-by: Wan Junjie <[email protected]>
Reported-at: https://github.com/openvswitch/ovs-issues/issues/226
Signed-off-by: Aaron Conole <[email protected]>
Reviewed-by: David Marchand <[email protected]>
Tested-by: Wan Junjie <[email protected]>
Signed-off-by: Alin-Gabriel Serdean <[email protected]> |
isinsets(g, c)
register struct re_guts *g;
int c;
{
register uch *col;
register int i;
register int ncols = (g->ncsets+(CHAR_BIT-1)) / CHAR_BIT;
register unsigned uc = (unsigned char)c;
if (!g->setbits) {
return(0);
}
for (i = 0, col = g->setbits; i < ncols; i++, col += g->csetsize)
if (col[uc] != 0)
return(1);
return(0);
} | 0 | []
| php-src | 124fb22a13fafa3648e4e15b4f207c7096d8155e | 281,741,711,461,974,830,000,000,000,000,000,000,000 | 18 | Fixed bug #68739 #68740 #68741 |
static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
{
const struct xfrm_pol_inexact_bin *b = data;
return xfrm_pol_bin_key(&b->k, 0, seed);
} | 0 | [
"CWE-703"
]
| linux | f85daf0e725358be78dfd208dea5fd665d8cb901 | 275,538,204,614,153,300,000,000,000,000,000,000,000 | 6 | xfrm: xfrm_policy: fix a possible double xfrm_pols_put() in xfrm_bundle_lookup()
xfrm_policy_lookup() will call xfrm_pol_hold_rcu() to get a refcount of
pols[0]. This refcount can be dropped in xfrm_expand_policies() when
xfrm_expand_policies() return error. pols[0]'s refcount is balanced in
here. But xfrm_bundle_lookup() will also call xfrm_pols_put() with
num_pols == 1 to drop this refcount when xfrm_expand_policies() return
error.
This patch also fix an illegal address access. pols[0] will save a error
point when xfrm_policy_lookup fails. This lead to xfrm_pols_put to resolve
an illegal address in xfrm_bundle_lookup's error path.
Fix these by setting num_pols = 0 in xfrm_expand_policies()'s error path.
Fixes: 80c802f3073e ("xfrm: cache bundles instead of policies for outgoing flows")
Signed-off-by: Hangyu Hua <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]> |
RList *r_bin_wasm_get_elements (RBinWasmObj *bin) {
RBinWasmSection *element = NULL;
RList *elements = NULL;
if (!bin || !bin->g_sections) {
return NULL;
}
if (bin->g_elements) {
return bin->g_elements;
}
if (!(elements = r_bin_wasm_get_sections_by_id (bin->g_sections,
R_BIN_WASM_SECTION_ELEMENT))) {
return r_list_new();
}
// support for multiple export sections against spec
if (!(element = (RBinWasmSection*) r_list_first (elements))) {
return r_list_new();
}
bin->g_elements = r_bin_wasm_get_element_entries (bin, element);
return bin->g_elements;
} | 0 | [
"CWE-125",
"CWE-787"
]
| radare2 | d2632f6483a3ceb5d8e0a5fb11142c51c43978b4 | 145,820,319,470,149,450,000,000,000,000,000,000,000 | 26 | Fix crash in fuzzed wasm r2_hoobr_consume_init_expr |
ephy_embed_single_set_property (GObject *object,
guint prop_id,
const GValue *value,
GParamSpec *pspec)
{
EphyEmbedSingle *single = EPHY_EMBED_SINGLE (object);
switch (prop_id) {
case PROP_NETWORK_STATUS:
ephy_embed_single_set_network_status (single, g_value_get_boolean (value));
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
}
} | 0 | []
| epiphany | 3e0f7dea754381c5ad11a06ccc62eb153382b498 | 22,160,573,184,599,390,000,000,000,000,000,000,000 | 16 | Report broken certs through the padlock icon
This uses a new feature in libsoup that reports through a
SoupMessageFlag whether the message is talking to a server that has a
trusted server.
Bug #600663 |
static void v4l_print_exportbuffer(const void *arg, bool write_only)
{
const struct v4l2_exportbuffer *p = arg;
pr_cont("fd=%d, type=%s, index=%u, plane=%u, flags=0x%08x\n",
p->fd, prt_names(p->type, v4l2_type_names),
p->index, p->plane, p->flags);
} | 0 | [
"CWE-401"
]
| linux | fb18802a338b36f675a388fc03d2aa504a0d0899 | 150,165,961,958,496,250,000,000,000,000,000,000,000 | 8 | media: v4l: ioctl: Fix memory leak in video_usercopy
When an IOCTL with argument size larger than 128 that also used array
arguments were handled, two memory allocations were made but alas, only
the latter one of them was released. This happened because there was only
a single local variable to hold such a temporary allocation.
Fix this by adding separate variables to hold the pointers to the
temporary allocations.
Reported-by: Arnd Bergmann <[email protected]>
Reported-by: [email protected]
Fixes: d14e6d76ebf7 ("[media] v4l: Add multi-planar ioctl handling code")
Cc: [email protected]
Signed-off-by: Sakari Ailus <[email protected]>
Acked-by: Arnd Bergmann <[email protected]>
Acked-by: Hans Verkuil <[email protected]>
Reviewed-by: Laurent Pinchart <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]> |
acquire_buffer (gboolean *free_buffer)
{
HB_Buffer buffer;
if (G_LIKELY (G_TRYLOCK (cached_buffer)))
{
if (G_UNLIKELY (!cached_buffer))
hb_buffer_new (&cached_buffer);
buffer = cached_buffer;
*free_buffer = FALSE;
}
else
{
hb_buffer_new (&buffer);
*free_buffer = TRUE;
}
return buffer;
} | 1 | []
| pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 136,535,772,702,542,390,000,000,000,000,000,000,000 | 20 | [HB] Remove all references to the old code! |
static int DecodeAltNames(const byte* input, int sz, DecodedCert* cert)
{
word32 idx = 0;
int length = 0;
WOLFSSL_ENTER("DecodeAltNames");
if (GetSequence(input, &idx, &length, sz) < 0) {
WOLFSSL_MSG("\tBad Sequence");
return ASN_PARSE_E;
}
if (length == 0) {
/* RFC 5280 4.2.1.6. Subject Alternative Name
If the subjectAltName extension is present, the sequence MUST
contain at least one entry. */
return ASN_PARSE_E;
}
cert->weOwnAltNames = 1;
while (length > 0) {
byte b = input[idx++];
length--;
/* Save DNS Type names in the altNames list. */
/* Save Other Type names in the cert's OidMap */
if (b == (ASN_CONTEXT_SPECIFIC | ASN_DNS_TYPE)) {
DNS_entry* dnsEntry;
int strLen;
word32 lenStartIdx = idx;
if (GetLength(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: str length");
return ASN_PARSE_E;
}
length -= (idx - lenStartIdx);
dnsEntry = (DNS_entry*)XMALLOC(sizeof(DNS_entry), cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (dnsEntry == NULL) {
WOLFSSL_MSG("\tOut of Memory");
return MEMORY_E;
}
dnsEntry->type = ASN_DNS_TYPE;
dnsEntry->name = (char*)XMALLOC(strLen + 1, cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (dnsEntry->name == NULL) {
WOLFSSL_MSG("\tOut of Memory");
XFREE(dnsEntry, cert->heap, DYNAMIC_TYPE_ALTNAME);
return MEMORY_E;
}
dnsEntry->len = strLen;
XMEMCPY(dnsEntry->name, &input[idx], strLen);
dnsEntry->name[strLen] = '\0';
dnsEntry->next = cert->altNames;
cert->altNames = dnsEntry;
length -= strLen;
idx += strLen;
}
#ifndef IGNORE_NAME_CONSTRAINTS
else if (b == (ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED | ASN_DIR_TYPE)) {
DNS_entry* dirEntry;
int strLen;
word32 lenStartIdx = idx;
if (GetLength(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: str length");
return ASN_PARSE_E;
}
if (GetSequence(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: seq length");
return ASN_PARSE_E;
}
length -= (idx - lenStartIdx);
dirEntry = (DNS_entry*)XMALLOC(sizeof(DNS_entry), cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (dirEntry == NULL) {
WOLFSSL_MSG("\tOut of Memory");
return MEMORY_E;
}
dirEntry->type = ASN_DIR_TYPE;
dirEntry->name = (char*)XMALLOC(strLen + 1, cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (dirEntry->name == NULL) {
WOLFSSL_MSG("\tOut of Memory");
XFREE(dirEntry, cert->heap, DYNAMIC_TYPE_ALTNAME);
return MEMORY_E;
}
dirEntry->len = strLen;
XMEMCPY(dirEntry->name, &input[idx], strLen);
dirEntry->name[strLen] = '\0';
dirEntry->next = cert->altDirNames;
cert->altDirNames = dirEntry;
length -= strLen;
idx += strLen;
}
else if (b == (ASN_CONTEXT_SPECIFIC | ASN_RFC822_TYPE)) {
DNS_entry* emailEntry;
int strLen;
word32 lenStartIdx = idx;
if (GetLength(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: str length");
return ASN_PARSE_E;
}
length -= (idx - lenStartIdx);
emailEntry = (DNS_entry*)XMALLOC(sizeof(DNS_entry), cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (emailEntry == NULL) {
WOLFSSL_MSG("\tOut of Memory");
return MEMORY_E;
}
emailEntry->type = ASN_RFC822_TYPE;
emailEntry->name = (char*)XMALLOC(strLen + 1, cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (emailEntry->name == NULL) {
WOLFSSL_MSG("\tOut of Memory");
XFREE(emailEntry, cert->heap, DYNAMIC_TYPE_ALTNAME);
return MEMORY_E;
}
emailEntry->len = strLen;
XMEMCPY(emailEntry->name, &input[idx], strLen);
emailEntry->name[strLen] = '\0';
emailEntry->next = cert->altEmailNames;
cert->altEmailNames = emailEntry;
length -= strLen;
idx += strLen;
}
else if (b == (ASN_CONTEXT_SPECIFIC | ASN_URI_TYPE)) {
DNS_entry* uriEntry;
int strLen;
word32 lenStartIdx = idx;
WOLFSSL_MSG("\tPutting URI into list but not using");
if (GetLength(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: str length");
return ASN_PARSE_E;
}
length -= (idx - lenStartIdx);
/* check that strLen at index is not past input buffer */
if (strLen + (int)idx > sz) {
return BUFFER_E;
}
#ifndef WOLFSSL_NO_ASN_STRICT
/* Verify RFC 5280 Sec 4.2.1.6 rule:
"The name MUST NOT be a relative URI" */
{
int i;
/* skip past scheme (i.e http,ftp,...) finding first ':' char */
for (i = 0; i < strLen; i++) {
if (input[idx + i] == ':') {
break;
}
if (input[idx + i] == '/') {
WOLFSSL_MSG("\tAlt Name must be absolute URI");
return ASN_ALT_NAME_E;
}
}
/* test if no ':' char was found and test that the next two
* chars are // to match the pattern "://" */
if (i >= strLen - 2 || (input[idx + i + 1] != '/' ||
input[idx + i + 2] != '/')) {
WOLFSSL_MSG("\tAlt Name must be absolute URI");
return ASN_ALT_NAME_E;
}
}
#endif
uriEntry = (DNS_entry*)XMALLOC(sizeof(DNS_entry), cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (uriEntry == NULL) {
WOLFSSL_MSG("\tOut of Memory");
return MEMORY_E;
}
uriEntry->type = ASN_URI_TYPE;
uriEntry->name = (char*)XMALLOC(strLen + 1, cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (uriEntry->name == NULL) {
WOLFSSL_MSG("\tOut of Memory");
XFREE(uriEntry, cert->heap, DYNAMIC_TYPE_ALTNAME);
return MEMORY_E;
}
uriEntry->len = strLen;
XMEMCPY(uriEntry->name, &input[idx], strLen);
uriEntry->name[strLen] = '\0';
uriEntry->next = cert->altNames;
cert->altNames = uriEntry;
length -= strLen;
idx += strLen;
}
#if defined(WOLFSSL_QT) || defined(OPENSSL_ALL) || defined(WOLFSSL_IP_ALT_NAME)
else if (b == (ASN_CONTEXT_SPECIFIC | ASN_IP_TYPE)) {
DNS_entry* ipAddr;
int strLen;
word32 lenStartIdx = idx;
WOLFSSL_MSG("Decoding Subject Alt. Name: IP Address");
if (GetLength(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: str length");
return ASN_PARSE_E;
}
length -= (idx - lenStartIdx);
/* check that strLen at index is not past input buffer */
if (strLen + (int)idx > sz) {
return BUFFER_E;
}
ipAddr = (DNS_entry*)XMALLOC(sizeof(DNS_entry), cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (ipAddr == NULL) {
WOLFSSL_MSG("\tOut of Memory");
return MEMORY_E;
}
ipAddr->type = ASN_IP_TYPE;
ipAddr->name = (char*)XMALLOC(strLen + 1, cert->heap,
DYNAMIC_TYPE_ALTNAME);
if (ipAddr->name == NULL) {
WOLFSSL_MSG("\tOut of Memory");
XFREE(ipAddr, cert->heap, DYNAMIC_TYPE_ALTNAME);
return MEMORY_E;
}
ipAddr->len = strLen;
XMEMCPY(ipAddr->name, &input[idx], strLen);
ipAddr->name[strLen] = '\0';
ipAddr->next = cert->altNames;
cert->altNames = ipAddr;
length -= strLen;
idx += strLen;
}
#endif /* WOLFSSL_QT || OPENSSL_ALL */
#endif /* IGNORE_NAME_CONSTRAINTS */
#ifdef WOLFSSL_SEP
else if (b == (ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED | ASN_OTHER_TYPE))
{
int strLen;
word32 lenStartIdx = idx;
word32 oid = 0;
int ret;
byte tag;
if (GetLength(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: other name length");
return ASN_PARSE_E;
}
/* Consume the rest of this sequence. */
length -= (strLen + idx - lenStartIdx);
if (GetObjectId(input, &idx, &oid, oidCertAltNameType, sz) < 0) {
WOLFSSL_MSG("\tbad OID");
return ASN_PARSE_E;
}
if (oid != HW_NAME_OID) {
WOLFSSL_MSG("\tincorrect OID");
return ASN_PARSE_E;
}
/* Certiciates issued with this OID in the subject alt name are for
* verifying signatures created on a module.
* RFC 4108 Section 5. */
if (cert->hwType != NULL) {
WOLFSSL_MSG("\tAlready seen Hardware Module Name");
return ASN_PARSE_E;
}
if (GetASNTag(input, &idx, &tag, sz) < 0) {
return ASN_PARSE_E;
}
if (tag != (ASN_CONTEXT_SPECIFIC | ASN_CONSTRUCTED)) {
WOLFSSL_MSG("\twrong type");
return ASN_PARSE_E;
}
if (GetLength(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: str len");
return ASN_PARSE_E;
}
if (GetSequence(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tBad Sequence");
return ASN_PARSE_E;
}
ret = GetASNObjectId(input, &idx, &strLen, sz);
if (ret != 0) {
WOLFSSL_MSG("\tbad OID");
return ret;
}
cert->hwType = (byte*)XMALLOC(strLen, cert->heap,
DYNAMIC_TYPE_X509_EXT);
if (cert->hwType == NULL) {
WOLFSSL_MSG("\tOut of Memory");
return MEMORY_E;
}
XMEMCPY(cert->hwType, &input[idx], strLen);
cert->hwTypeSz = strLen;
idx += strLen;
ret = GetOctetString(input, &idx, &strLen, sz);
if (ret < 0)
return ret;
cert->hwSerialNum = (byte*)XMALLOC(strLen + 1, cert->heap,
DYNAMIC_TYPE_X509_EXT);
if (cert->hwSerialNum == NULL) {
WOLFSSL_MSG("\tOut of Memory");
return MEMORY_E;
}
XMEMCPY(cert->hwSerialNum, &input[idx], strLen);
cert->hwSerialNum[strLen] = '\0';
cert->hwSerialNumSz = strLen;
idx += strLen;
}
#endif /* WOLFSSL_SEP */
else {
int strLen;
word32 lenStartIdx = idx;
WOLFSSL_MSG("\tUnsupported name type, skipping");
if (GetLength(input, &idx, &strLen, sz) < 0) {
WOLFSSL_MSG("\tfail: unsupported name length");
return ASN_PARSE_E;
}
length -= (strLen + idx - lenStartIdx);
idx += strLen;
}
}
return 0;
} | 0 | [
"CWE-125",
"CWE-345"
]
| wolfssl | f93083be72a3b3d956b52a7ec13f307a27b6e093 | 131,896,888,506,020,260,000,000,000,000,000,000,000 | 359 | OCSP: improve handling of OCSP no check extension |
static void php_cli_server_client_dtor_wrapper(php_cli_server_client **p) /* {{{ */
{
closesocket((*p)->sock);
php_cli_server_poller_remove(&(*p)->server->poller, POLLIN | POLLOUT, (*p)->sock);
php_cli_server_client_dtor(*p);
pefree(*p, 1);
} /* }}} */ | 0 | []
| php-src | 2438490addfbfba51e12246a74588b2382caa08a | 176,809,483,976,706,800,000,000,000,000,000,000,000 | 7 | slim post data |
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
int jobnr, int threadnr)
{
return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
} | 0 | [
"CWE-119",
"CWE-787"
]
| FFmpeg | 6b5d3fb26fb4be48e4966e4b1d97c2165538d4ef | 233,097,365,336,387,400,000,000,000,000,000,000,000 | 5 | avcodec/webp: Always set pix_fmt
Fixes: out of array access
Fixes: 1434/clusterfuzz-testcase-minimized-6314998085189632
Fixes: 1435/clusterfuzz-testcase-minimized-6483783723253760
Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/targets/ffmpeg
Reviewed-by: "Ronald S. Bultje" <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
{
struct nfs4_lock_state *pos;
list_for_each_entry(pos, &state->lock_states, ls_locks) {
if (pos->ls_owner != fl_owner)
continue;
atomic_inc(&pos->ls_count);
return pos;
}
return NULL;
} | 0 | [
"CWE-703"
]
| linux | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | 43,050,783,884,383,060,000,000,000,000,000,000,000 | 11 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]> |
static int interface_req_cursor_notification(QXLInstance *sin)
{
dprint(1, "%s:\n", __FUNCTION__);
return 1;
} | 0 | []
| qemu-kvm | 5ff4e36c804157bd84af43c139f8cd3a59722db9 | 80,897,287,824,765,610,000,000,000,000,000,000,000 | 5 | qxl: async io support using new spice api
Some of the QXL port i/o commands are waiting for the spice server to
complete certain actions. Add async versions for these commands, so we
don't block the vcpu while the spice server processses the command.
Instead the qxl device will raise an IRQ when done.
The async command processing relies on an added QXLInterface::async_complete
and added QXLWorker::*_async additions, in spice server qxl >= 3.1
Signed-off-by: Gerd Hoffmann <[email protected]>
Signed-off-by: Alon Levy <[email protected]> |
Subsets and Splits