func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
HGDI_RGN gdi_CreateRectRgn(INT32 nLeftRect, INT32 nTopRect, INT32 nRightRect, INT32 nBottomRect)
{
HGDI_RGN hRgn = (HGDI_RGN)calloc(1, sizeof(GDI_RGN));
if (!hRgn)
return NULL;
hRgn->objectType = GDIOBJECT_REGION;
hRgn->x = nLeftRect;
hRgn->y = nTopRect;
hRgn->w = nRightRect - nLeftRect + 1;
hRgn->h = nBottomRect - nTopRect + 1;
hRgn->null = FALSE;
return hRgn;
} | 0 | [
"CWE-190"
] | FreeRDP | ce21b9d7ecd967e0bc98ed31a6b3757848aa6c9e | 17,464,023,555,417,375,000,000,000,000,000,000,000 | 15 | Fix CVE-2020-11523: clamp invalid rectangles to size 0
Thanks to Sunglin and HuanGMz from Knownsec 404 |
static int invalidate_drive(struct block_device *bdev)
{
/* invalidate the buffer track to force a reread */
set_bit((long)bdev->bd_disk->private_data, &fake_change);
process_fd_request();
check_disk_change(bdev);
return 0;
} | 0 | [
"CWE-264",
"CWE-754"
] | linux | ef87dbe7614341c2e7bfe8d32fcb7028cc97442c | 245,051,318,818,989,100,000,000,000,000,000,000,000 | 8 | floppy: ignore kernel-only members in FDRAWCMD ioctl input
Always clear out these floppy_raw_cmd struct members after copying the
entire structure from userspace so that the in-kernel version is always
valid and never left in an interdeterminate state.
Signed-off-by: Matthew Daley <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void sqlite3Fts3DoclistPrev(
int bDescIdx, /* True if the doclist is desc */
char *aDoclist, /* Pointer to entire doclist */
int nDoclist, /* Length of aDoclist in bytes */
char **ppIter, /* IN/OUT: Iterator pointer */
sqlite3_int64 *piDocid, /* IN/OUT: Docid pointer */
int *pnList, /* OUT: List length pointer */
u8 *pbEof /* OUT: End-of-file flag */
){
char *p = *ppIter;
assert( nDoclist>0 );
assert( *pbEof==0 );
assert( p || *piDocid==0 );
assert( !p || (p>aDoclist && p<&aDoclist[nDoclist]) );
if( p==0 ){
sqlite3_int64 iDocid = 0;
char *pNext = 0;
char *pDocid = aDoclist;
char *pEnd = &aDoclist[nDoclist];
int iMul = 1;
while( pDocid<pEnd ){
sqlite3_int64 iDelta;
pDocid += sqlite3Fts3GetVarint(pDocid, &iDelta);
iDocid += (iMul * iDelta);
pNext = pDocid;
fts3PoslistCopy(0, &pDocid);
while( pDocid<pEnd && *pDocid==0 ) pDocid++;
iMul = (bDescIdx ? -1 : 1);
}
*pnList = (int)(pEnd - pNext);
*ppIter = pNext;
*piDocid = iDocid;
}else{
int iMul = (bDescIdx ? -1 : 1);
sqlite3_int64 iDelta;
fts3GetReverseVarint(&p, aDoclist, &iDelta);
*piDocid -= (iMul * iDelta);
if( p==aDoclist ){
*pbEof = 1;
}else{
char *pSave = p;
fts3ReversePoslist(aDoclist, &p);
*pnList = (int)(pSave - p);
}
*ppIter = p;
}
} | 0 | [
"CWE-787"
] | sqlite | c72f2fb7feff582444b8ffdc6c900c69847ce8a9 | 211,647,834,549,260,600,000,000,000,000,000,000,000 | 52 | More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d |
static void ipv6_cleanup_mibs(struct net *net)
{
snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
kfree(net->mib.icmpv6msg_statistics);
} | 0 | [] | net | 5f81bd2e5d804ca93f3ec8873451b22d2f454721 | 60,993,710,703,681,920,000,000,000,000,000,000,000 | 8 | ipv6: export a stub for IPv6 symbols used by vxlan
In case IPv6 is compiled as a module, introduce a stub
for ipv6_sock_mc_join and ipv6_sock_mc_drop etc.. It will be used
by vxlan module. Suggested by Ben.
This is an ugly but easy solution for now.
Cc: Ben Hutchings <[email protected]>
Cc: Stephen Hemminger <[email protected]>
Cc: David S. Miller <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void* inPlaceSerialize_LineString(SGeometry * ge, int node_count, size_t seek_begin, void * serializeBegin)
{
uint8_t order = PLATFORM_HEADER;
uint32_t t = ge->get_axisCount() == 2 ? wkbLineString:
ge->get_axisCount() == 3 ? wkbLineString25D: wkbNone;
if(t == wkbNone) throw SG_Exception_BadFeature();
uint32_t nc = (uint32_t) node_count;
serializeBegin = memcpy_jump(serializeBegin, &order, 1);
serializeBegin = memcpy_jump(serializeBegin, &t, 4);
serializeBegin = memcpy_jump(serializeBegin, &nc, 4);
// Now serialize points
for(int ind = 0; ind < node_count; ind++)
{
Point & p = (*ge)[seek_begin + ind];
double x = p[0];
double y = p[1];
serializeBegin = memcpy_jump(serializeBegin, &x, 8);
serializeBegin = memcpy_jump(serializeBegin, &y, 8);
if(ge->get_axisCount() >= 3)
{
double z = p[2];
serializeBegin = memcpy_jump(serializeBegin, &z, 8);
}
}
return serializeBegin;
} | 0 | [
"CWE-787"
] | gdal | 767e3a56144f676ca738ef8f700e0e56035bd05a | 87,607,640,493,520,140,000,000,000,000,000,000,000 | 31 | netCDF: avoid buffer overflow. master only. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=15143. Credit to OSS Fuzz |
static int cdrom_load_unload(struct cdrom_device_info *cdi, int slot)
{
struct packet_command cgc;
cd_dbg(CD_CHANGER, "entering cdrom_load_unload()\n");
if (cdi->sanyo_slot && slot < 0)
return 0;
init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
cgc.cmd[0] = GPCMD_LOAD_UNLOAD;
cgc.cmd[4] = 2 + (slot >= 0);
cgc.cmd[8] = slot;
cgc.timeout = 60 * HZ;
/* The Sanyo 3 CD changer uses byte 7 of the
GPCMD_TEST_UNIT_READY to command to switch CDs instead of
using the GPCMD_LOAD_UNLOAD opcode. */
if (cdi->sanyo_slot && -1 < slot) {
cgc.cmd[0] = GPCMD_TEST_UNIT_READY;
cgc.cmd[7] = slot;
cgc.cmd[4] = cgc.cmd[8] = 0;
cdi->sanyo_slot = slot ? slot : 3;
}
return cdi->ops->generic_packet(cdi, &cgc);
} | 0 | [
"CWE-119",
"CWE-787"
] | linux | 9de4ee40547fd315d4a0ed1dd15a2fa3559ad707 | 235,376,657,212,440,900,000,000,000,000,000,000,000 | 26 | cdrom: information leak in cdrom_ioctl_media_changed()
This cast is wrong. "cdi->capacity" is an int and "arg" is an unsigned
long. The way the check is written now, if one of the high 32 bits is
set then we could read outside the info->slots[] array.
This bug is pretty old and it predates git.
Reviewed-by: Christoph Hellwig <[email protected]>
Cc: [email protected]
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
struct buffer_head *bh)
{
struct ext4_map_blocks *map = &mpd->map;
/* Buffer that doesn't need mapping for writeback? */
if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
(!buffer_delay(bh) && !buffer_unwritten(bh))) {
/* So far no extent to map => we write the buffer right away */
if (map->m_len == 0)
return true;
return false;
}
/* First block in the extent? */
if (map->m_len == 0) {
map->m_lblk = lblk;
map->m_len = 1;
map->m_flags = bh->b_state & BH_FLAGS;
return true;
}
/* Don't go larger than mballoc is willing to allocate */
if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
return false;
/* Can we merge the block to our big extent? */
if (lblk == map->m_lblk + map->m_len &&
(bh->b_state & BH_FLAGS) == map->m_flags) {
map->m_len++;
return true;
}
return false;
} | 0 | [
"CWE-362"
] | linux | ea3d7209ca01da209cda6f0dea8be9cc4b7a933b | 302,512,788,956,532,800,000,000,000,000,000,000,000 | 34 | ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]> |
void ConnectionManagerImpl::ActiveStream::completeRequest() {
filter_manager_.streamInfo().onRequestComplete();
if (connection_manager_.remote_close_) {
filter_manager_.streamInfo().setResponseCodeDetails(
StreamInfo::ResponseCodeDetails::get().DownstreamRemoteDisconnect);
filter_manager_.streamInfo().setResponseFlag(
StreamInfo::ResponseFlag::DownstreamConnectionTermination);
}
connection_manager_.stats_.named_.downstream_rq_active_.dec();
if (filter_manager_.streamInfo().healthCheck()) {
connection_manager_.config_.tracingStats().health_check_.inc();
}
if (active_span_) {
Tracing::HttpTracerUtility::finalizeDownstreamSpan(
*active_span_, request_headers_.get(), response_headers_.get(), response_trailers_.get(),
filter_manager_.streamInfo(), *this);
}
if (state_.successful_upgrade_) {
connection_manager_.stats_.named_.downstream_cx_upgrades_active_.dec();
}
} | 0 | [
"CWE-416"
] | envoy | 148de954ed3585d8b4298b424aa24916d0de6136 | 94,283,333,296,770,040,000,000,000,000,000,000,000 | 23 | CVE-2021-43825
Response filter manager crash
Signed-off-by: Yan Avlasov <[email protected]> |
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
return -EINVAL;
if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
return -EINVAL;
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
return -ENOMEM;
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
__sprint_engine_name(engine);
engine->props.heartbeat_interval_ms =
CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
engine->props.max_busywait_duration_ns =
CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
engine->props.preempt_timeout_ms =
CONFIG_DRM_I915_PREEMPT_TIMEOUT;
engine->props.stop_timeout_ms =
CONFIG_DRM_I915_STOP_TIMEOUT;
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
/* Override to uninterruptible for OpenCL workloads. */
if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
engine->props.preempt_timeout_ms = 0;
engine->defaults = engine->props; /* never to change again */
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
DRIVER_CAPS(i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
ewma__engine_latency_init(&engine->latency);
seqlock_init(&engine->stats.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
/* Scrub mmio state on takeover */
intel_engine_sanitize_mmio(engine);
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
return 0;
} | 1 | [
"CWE-20",
"CWE-190"
] | linux | c784e5249e773689e38d2bc1749f08b986621a26 | 75,452,809,966,664,430,000,000,000,000,000,000,000 | 79 | drm/i915/guc: Update to use firmware v49.0.1
The latest GuC firmware includes a number of interface changes that
require driver updates to match.
* Starting from Gen11, the ID to be provided to GuC needs to contain
the engine class in bits [0..2] and the instance in bits [3..6].
NOTE: this patch breaks pointer dereferences in some existing GuC
functions that use the guc_id to dereference arrays but these functions
are not used for now as we have GuC submission disabled and we will
update these functions in follow up patch which requires new IDs.
* The new GuC requires the additional data structure (ADS) and associated
'private_data' pointer to be setup. This is basically a scratch area
of memory that the GuC owns. The size is read from the CSS header.
* There is now a physical to logical engine mapping table in the ADS
which needs to be configured in order for the firmware to load. For
now, the table is initialised with a 1 to 1 mapping.
* GUC_CTL_CTXINFO has been removed from the initialization params.
* reg_state_buffer is maintained internally by the GuC as part of
the private data.
* The ADS layout has changed significantly. This patch updates the
shared structure and also adds better documentation of the layout.
* While i915 does not use GuC doorbells, the firmware now requires
that some initialisation is done.
* The number of engine classes and instances supported in the ADS has
been increased.
Signed-off-by: John Harrison <[email protected]>
Signed-off-by: Matthew Brost <[email protected]>
Signed-off-by: Daniele Ceraolo Spurio <[email protected]>
Signed-off-by: Oscar Mateo <[email protected]>
Signed-off-by: Michel Thierry <[email protected]>
Signed-off-by: Rodrigo Vivi <[email protected]>
Signed-off-by: Michal Wajdeczko <[email protected]>
Cc: Michal Winiarski <[email protected]>
Cc: Tomasz Lis <[email protected]>
Cc: Joonas Lahtinen <[email protected]>
Reviewed-by: Daniele Ceraolo Spurio <[email protected]>
Signed-off-by: Joonas Lahtinen <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected] |
xmlSchemaResolveAttrGroupReferences(xmlSchemaQNameRefPtr ref,
xmlSchemaParserCtxtPtr ctxt)
{
xmlSchemaAttributeGroupPtr group;
if (ref->item != NULL)
return(0);
group = xmlSchemaGetAttributeGroup(ctxt->schema,
ref->name,
ref->targetNamespace);
if (group == NULL) {
xmlSchemaPResCompAttrErr(ctxt,
XML_SCHEMAP_SRC_RESOLVE,
NULL, ref->node,
"ref", ref->name, ref->targetNamespace,
ref->itemType, NULL);
return(ctxt->err);
}
ref->item = WXS_BASIC_CAST group;
return(0);
} | 0 | [
"CWE-134"
] | libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 262,076,678,513,934,920,000,000,000,000,000,000,000 | 21 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
void Gfx::opSetFillGray(Object args[], int numArgs) {
GfxColor color;
if (textHaveCSPattern && drawText) {
GBool needFill = out->deviceHasTextClip(state);
out->endTextObject(state);
if (needFill) {
doPatternFill(gTrue);
}
out->restoreState(state);
}
state->setFillPattern(NULL);
state->setFillColorSpace(new GfxDeviceGrayColorSpace());
out->updateFillColorSpace(state);
color.c[0] = dblToCol(args[0].getNum());
state->setFillColor(&color);
out->updateFillColor(state);
if (textHaveCSPattern) {
out->beginTextObject(state);
out->updateRender(state);
out->updateTextMat(state);
out->updateTextPos(state);
textHaveCSPattern = gFalse;
}
} | 0 | [] | poppler | abf167af8b15e5f3b510275ce619e6fdb42edd40 | 259,458,252,631,472,720,000,000,000,000,000,000,000 | 25 | Implement tiling/patterns in SplashOutputDev
Fixes bug 13518 |
START_TEST(safe_atod_test)
{
struct atod_test {
char *str;
bool success;
double val;
} tests[] = {
{ "10", true, 10 },
{ "20", true, 20 },
{ "-1", true, -1 },
{ "2147483647", true, 2147483647 },
{ "-2147483648", true, -2147483648 },
{ "4294967295", true, 4294967295 },
{ "0x0", false, 0 },
{ "0x10", false, 0 },
{ "0xaf", false, 0 },
{ "x80", false, 0 },
{ "0.0", true, 0.0 },
{ "0.1", true, 0.1 },
{ "1.2", true, 1.2 },
{ "-324.9", true, -324.9 },
{ "9324.9", true, 9324.9 },
{ "NAN", false, 0 },
{ "INFINITY", false, 0 },
{ "-10x10", false, 0 },
{ "1x-99", false, 0 },
{ "", false, 0 },
{ "abd", false, 0 },
{ "xabd", false, 0 },
{ "0x0x", false, 0 },
{ NULL, false, 0 }
};
double v;
bool success;
for (int i = 0; tests[i].str != NULL; i++) {
v = 0xad;
success = safe_atod(tests[i].str, &v);
ck_assert(success == tests[i].success);
if (success)
ck_assert_int_eq(v, tests[i].val);
else
ck_assert_int_eq(v, 0xad);
}
} | 0 | [
"CWE-134"
] | libinput | a423d7d3269dc32a87384f79e29bb5ac021c83d1 | 60,421,558,918,372,820,000,000,000,000,000,000,000 | 45 | evdev: strip the device name of format directives
This fixes a format string vulnerabilty.
evdev_log_message() composes a format string consisting of a fixed
prefix (including the rendered device name) and the passed-in format
buffer. This format string is then passed with the arguments to the
actual log handler, which usually and eventually ends up being printf.
If the device name contains a printf-style format directive, these ended
up in the format string and thus get interpreted correctly, e.g. for a
device "Foo%sBar" the log message vs printf invocation ends up being:
evdev_log_message(device, "some message %s", "some argument");
printf("event9 - Foo%sBar: some message %s", "some argument");
This can enable an attacker to execute malicious code with the
privileges of the process using libinput.
To exploit this, an attacker needs to be able to create a kernel device
with a malicious name, e.g. through /dev/uinput or a Bluetooth device.
To fix this, convert any potential format directives in the device name
by duplicating percentages.
Pre-rendering the device to avoid the issue altogether would be nicer
but the current log level hooks do not easily allow for this. The device
name is the only user-controlled part of the format string.
A second potential issue is the sysname of the device which is also
sanitized.
This issue was found by Albin Eldstål-Ahrens and Benjamin Svensson from
Assured AB, and independently by Lukas Lamster.
Fixes #752
Signed-off-by: Peter Hutterer <[email protected]> |
double pb_controller::get_total_kbps() {
double result = 0.0;
for (auto dl : downloads_) {
if (dl.status() == dlstatus::DOWNLOADING) {
result += dl.kbps();
}
}
return result;
} | 0 | [
"CWE-78"
] | newsbeuter | c8fea2f60c18ed30bdd1bb6f798e994e51a58260 | 99,750,633,320,202,980,000,000,000,000,000,000,000 | 9 | Work around shell code in podcast names (#598) |
void CRYPTO_set_add_lock_callback(int (*func)(int *num,int mount,int type,
const char *file,int line))
{
add_lock_callback=func;
} | 0 | [
"CWE-310"
] | openssl | 270881316664396326c461ec7a124aec2c6cc081 | 23,616,496,165,612,340,000,000,000,000,000,000,000 | 5 | Add and use a constant-time memcmp.
This change adds CRYPTO_memcmp, which compares two vectors of bytes in
an amount of time that's independent of their contents. It also changes
several MAC compares in the code to use this over the standard memcmp,
which may leak information about the size of a matching prefix.
(cherry picked from commit 2ee798880a246d648ecddadc5b91367bee4a5d98)
Conflicts:
crypto/crypto.h
ssl/t1_lib.c
(cherry picked from commit dc406b59f3169fe191e58906df08dce97edb727c)
Conflicts:
crypto/crypto.h
ssl/d1_pkt.c
ssl/s3_pkt.c |
static int rr_cmp(uchar *a,uchar *b)
{
if (a[0] != b[0])
return (int) a[0] - (int) b[0];
if (a[1] != b[1])
return (int) a[1] - (int) b[1];
if (a[2] != b[2])
return (int) a[2] - (int) b[2];
#if MAX_REFLENGTH == 4
return (int) a[3] - (int) b[3];
#else
if (a[3] != b[3])
return (int) a[3] - (int) b[3];
if (a[4] != b[4])
return (int) a[4] - (int) b[4];
if (a[5] != b[5])
return (int) a[5] - (int) b[5];
if (a[6] != b[6])
return (int) a[6] - (int) b[6];
return (int) a[7] - (int) b[7];
#endif
} | 0 | [] | server | 1b8bb44106f528f742faa19d23bd6e822be04f39 | 94,152,869,496,712,750,000,000,000,000,000,000,000 | 22 | MDEV-26351 segfault - (MARIA_HA *) 0x0 in ha_maria::extra
use the correct check. before invoking handler methods we
need to know that the table was opened, not only created. |
void fpar_del(GF_Box *s)
{
FilePartitionBox *ptr = (FilePartitionBox *)s;
if (ptr == NULL) return;
if (ptr->scheme_specific_info) gf_free(ptr->scheme_specific_info);
if (ptr->entries) gf_free(ptr->entries);
gf_free(ptr); | 0 | [
"CWE-400",
"CWE-401"
] | gpac | d2371b4b204f0a3c0af51ad4e9b491144dd1225c | 31,980,197,215,409,305,000,000,000,000,000,000,000 | 8 | prevent dref memleak on invalid input (#1183) |
static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | 0 | [
"CWE-284",
"CWE-264"
] | linux | 26bef1318adc1b3a530ecc807ef99346db2aa8b0 | 283,737,837,356,942,570,000,000,000,000,000,000,000 | 1 | x86, fpu, amd: Clear exceptions in AMD FXSAVE workaround
Before we do an EMMS in the AMD FXSAVE information leak workaround we
need to clear any pending exceptions, otherwise we trap with a
floating-point exception inside this code.
Reported-by: halfdog <[email protected]>
Tested-by: Borislav Petkov <[email protected]>
Link: http://lkml.kernel.org/r/CA%2B55aFxQnY_PCG_n4=0w-VG=YLXL-yr7oMxyy0WU2gCBAf3ydg@mail.gmail.com
Signed-off-by: H. Peter Anvin <[email protected]> |
static void ctrycatchfinally(JF, js_Ast *trystm, js_Ast *catchvar, js_Ast *catchstm, js_Ast *finallystm)
{
int L1, L2, L3;
L1 = emitjump(J, F, OP_TRY);
{
/* if we get here, we have caught an exception in the try block */
L2 = emitjump(J, F, OP_TRY);
{
/* if we get here, we have caught an exception in the catch block */
cstm(J, F, finallystm); /* inline finally block */
emit(J, F, OP_THROW); /* rethrow exception */
}
label(J, F, L2);
if (J->strict) {
if (!strcmp(catchvar->string, "arguments"))
jsC_error(J, catchvar, "redefining 'arguments' is not allowed in strict mode");
if (!strcmp(catchvar->string, "eval"))
jsC_error(J, catchvar, "redefining 'eval' is not allowed in strict mode");
}
emitstring(J, F, OP_CATCH, catchvar->string);
cstm(J, F, catchstm);
emit(J, F, OP_ENDCATCH);
L3 = emitjump(J, F, OP_JUMP); /* skip past the try block to the finally block */
}
label(J, F, L1);
cstm(J, F, trystm);
emit(J, F, OP_ENDTRY);
label(J, F, L3);
cstm(J, F, finallystm);
} | 0 | [
"CWE-476"
] | mujs | 5008105780c0b0182ea6eda83ad5598f225be3ee | 292,151,734,475,207,640,000,000,000,000,000,000,000 | 30 | Fix 697172: degenerate labeled break/continue statement.
A labeled break statement will look for a matching label through
its chain of parent statements. We start looking at the break statement
though, so if the label is attached to the break, we'll return the break
statement itself as a break target.
Start looking for targets one level up instead. |
inline void wenc16(unsigned short a, unsigned short b, unsigned short &l,
unsigned short &h) {
int ao = (a + A_OFFSET) & MOD_MASK;
int m = ((ao + b) >> 1);
int d = ao - b;
if (d < 0) m = (m + M_OFFSET) & MOD_MASK;
d &= MOD_MASK;
l = static_cast<unsigned short>(m);
h = static_cast<unsigned short>(d);
} | 0 | [
"CWE-20",
"CWE-190"
] | tinyexr | a685e3332f61cd4e59324bf3f669d36973d64270 | 237,112,063,772,700,060,000,000,000,000,000,000,000 | 13 | Make line_no with too large value(2**20) invalid. Fixes #124 |
PJ_DEF(unsigned) pjsip_transport_get_flag_from_type(pjsip_transport_type_e type)
{
/* Return transport flag. */
return get_tpname(type)->flag;
} | 0 | [
"CWE-297",
"CWE-295"
] | pjproject | 67e46c1ac45ad784db5b9080f5ed8b133c122872 | 318,231,571,405,870,100,000,000,000,000,000,000,000 | 5 | Merge pull request from GHSA-8hcp-hm38-mfph
* Check hostname during TLS transport selection
* revision based on feedback
* remove the code in create_request that has been moved |
void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->result);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, step);
} | 0 | [
"CWE-284",
"CWE-120"
] | linux | 8e1278444446fc97778a5e5c99bca1ce0bbc5ec9 | 287,562,750,688,609,700,000,000,000,000,000,000,000 | 13 | powerpc/32: Fix overread/overwrite of thread_struct via ptrace
The ptrace PEEKUSR/POKEUSR (aka PEEKUSER/POKEUSER) API allows a process
to read/write registers of another process.
To get/set a register, the API takes an index into an imaginary address
space called the "USER area", where the registers of the process are
laid out in some fashion.
The kernel then maps that index to a particular register in its own data
structures and gets/sets the value.
The API only allows a single machine-word to be read/written at a time.
So 4 bytes on 32-bit kernels and 8 bytes on 64-bit kernels.
The way floating point registers (FPRs) are addressed is somewhat
complicated, because double precision float values are 64-bit even on
32-bit CPUs. That means on 32-bit kernels each FPR occupies two
word-sized locations in the USER area. On 64-bit kernels each FPR
occupies one word-sized location in the USER area.
Internally the kernel stores the FPRs in an array of u64s, or if VSX is
enabled, an array of pairs of u64s where one half of each pair stores
the FPR. Which half of the pair stores the FPR depends on the kernel's
endianness.
To handle the different layouts of the FPRs depending on VSX/no-VSX and
big/little endian, the TS_FPR() macro was introduced.
Unfortunately the TS_FPR() macro does not take into account the fact
that the addressing of each FPR differs between 32-bit and 64-bit
kernels. It just takes the index into the "USER area" passed from
userspace and indexes into the fp_state.fpr array.
On 32-bit there are 64 indexes that address FPRs, but only 32 entries in
the fp_state.fpr array, meaning the user can read/write 256 bytes past
the end of the array. Because the fp_state sits in the middle of the
thread_struct there are various fields than can be overwritten,
including some pointers. As such it may be exploitable.
It has also been observed to cause systems to hang or otherwise
misbehave when using gdbserver, and is probably the root cause of this
report which could not be easily reproduced:
https://lore.kernel.org/linuxppc-dev/[email protected]/
Rather than trying to make the TS_FPR() macro even more complicated to
fix the bug, or add more macros, instead add a special-case for 32-bit
kernels. This is more obvious and hopefully avoids a similar bug
happening again in future.
Note that because 32-bit kernels never have VSX enabled the code doesn't
need to consider TS_FPRWIDTH/OFFSET at all. Add a BUILD_BUG_ON() to
ensure that 32-bit && VSX is never enabled.
Fixes: 87fec0514f61 ("powerpc: PTRACE_PEEKUSR/PTRACE_POKEUSER of FPR registers in little endian builds")
Cc: [email protected] # v3.13+
Reported-by: Ariel Miculas <[email protected]>
Tested-by: Christophe Leroy <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]>
Link: https://lore.kernel.org/r/[email protected] |
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
if (vf >= adapter->num_vfs)
return -EINVAL;
adapter->vfinfo[vf].spoofchk_enabled = setting;
/* configure MAC spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
/* configure VLAN spoofing */
hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
/* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
* calling set_ethertype_anti_spoofing for each VF in loop below
*/
if (hw->mac.ops.set_ethertype_anti_spoofing) {
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
ETH_P_LLDP));
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
(IXGBE_ETQF_FILTER_EN |
IXGBE_ETQF_TX_ANTISPOOF |
ETH_P_PAUSE));
hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
}
return 0;
} | 0 | [
"CWE-20"
] | linux | 63e39d29b3da02e901349f6cd71159818a4737a6 | 44,713,679,143,714,850,000,000,000,000,000,000,000 | 35 | ixgbe: fix large MTU request from VF
Check that the MTU value requested by the VF is in the supported
range of MTUs before attempting to set the VF large packet enable,
otherwise reject the request. This also avoids unnecessary
register updates in the case of the 82599 controller.
Fixes: 872844ddb9e4 ("ixgbe: Enable jumbo frames support w/ SR-IOV")
Co-developed-by: Piotr Skajewski <[email protected]>
Signed-off-by: Piotr Skajewski <[email protected]>
Signed-off-by: Jesse Brandeburg <[email protected]>
Co-developed-by: Mateusz Palczewski <[email protected]>
Signed-off-by: Mateusz Palczewski <[email protected]>
Tested-by: Konrad Jankowski <[email protected]>
Signed-off-by: Tony Nguyen <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
st_select_lex_unit *TABLE_LIST::get_unit()
{
return (view ? &view->unit : derived);
} | 0 | [
"CWE-416"
] | server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 273,327,178,207,533,850,000,000,000,000,000,000,000 | 4 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
static void prepare_add_del_attrs(const map<string, bufferlist>& orig_attrs,
const set<string>& rmattr_names,
map<string, bufferlist>& out_attrs)
{
for (const auto& kv : orig_attrs) {
const string& name = kv.first;
/* Check if the attr is user-defined metadata item. */
if (name.compare(0, strlen(RGW_ATTR_META_PREFIX),
RGW_ATTR_META_PREFIX) == 0) {
/* For the buckets all existing meta attrs are preserved,
except those that are listed in rmattr_names. */
if (rmattr_names.find(name) != std::end(rmattr_names)) {
const auto aiter = out_attrs.find(name);
if (aiter != std::end(out_attrs)) {
out_attrs.erase(aiter);
}
} else {
/* emplace() won't alter the map if the key is already present.
* This behaviour is fully intensional here. */
out_attrs.emplace(kv);
}
} else if (out_attrs.find(name) == std::end(out_attrs)) {
out_attrs[name] = kv.second;
}
}
} | 0 | [
"CWE-770"
] | ceph | ab29bed2fc9f961fe895de1086a8208e21ddaddc | 39,541,221,088,810,906,000,000,000,000,000,000,000 | 28 | rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests |
size_t size() const { return buffer_.size() - 1; } | 0 | [
"CWE-134",
"CWE-119",
"CWE-787"
] | fmt | 8cf30aa2be256eba07bb1cefb998c52326e846e7 | 277,296,271,106,140,500,000,000,000,000,000,000,000 | 1 | Fix segfault on complex pointer formatting (#642) |
static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
{
struct io_ring_ctx *ctx_attach;
struct io_sq_data *sqd;
struct fd f;
f = fdget(p->wq_fd);
if (!f.file)
return ERR_PTR(-ENXIO);
if (f.file->f_op != &io_uring_fops) {
fdput(f);
return ERR_PTR(-EINVAL);
}
ctx_attach = f.file->private_data;
sqd = ctx_attach->sq_data;
if (!sqd) {
fdput(f);
return ERR_PTR(-EINVAL);
}
refcount_inc(&sqd->refs);
fdput(f);
return sqd; | 0 | [
"CWE-667"
] | linux | 3ebba796fa251d042be42b929a2d916ee5c34a49 | 63,718,459,933,507,730,000,000,000,000,000,000,000 | 25 | io_uring: ensure that SQPOLL thread is started for exit
If we create it in a disabled state because IORING_SETUP_R_DISABLED is
set on ring creation, we need to ensure that we've kicked the thread if
we're exiting before it's been explicitly disabled. Otherwise we can run
into a deadlock where exit is waiting go park the SQPOLL thread, but the
SQPOLL thread itself is waiting to get a signal to start.
That results in the below trace of both tasks hung, waiting on each other:
INFO: task syz-executor458:8401 blocked for more than 143 seconds.
Not tainted 5.11.0-next-20210226-syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread_park fs/io_uring.c:7115 [inline]
io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103
io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745
__io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840
io_uring_files_cancel include/linux/io_uring.h:47 [inline]
do_exit+0x299/0x2a60 kernel/exit.c:780
do_group_exit+0x125/0x310 kernel/exit.c:922
__do_sys_exit_group kernel/exit.c:933 [inline]
__se_sys_exit_group kernel/exit.c:931 [inline]
__x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x43e899
RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7
RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899
RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000
RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000
R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0
R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001
INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds.
task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294
INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds.
Reported-by: [email protected]
Signed-off-by: Jens Axboe <[email protected]> |
int JOIN::optimize_stage2()
{
ulonglong select_opts_for_readinfo;
uint no_jbuf_after;
JOIN_TAB *tab;
DBUG_ENTER("JOIN::optimize_stage2");
if (subq_exit_fl)
goto setup_subq_exit;
if (unlikely(thd->check_killed()))
DBUG_RETURN(1);
/* Generate an execution plan from the found optimal join order. */
if (get_best_combination())
DBUG_RETURN(1);
if (make_range_rowid_filters())
DBUG_RETURN(1);
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
if (optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_WITH_KEYS))
drop_unused_derived_keys();
if (rollup.state != ROLLUP::STATE_NONE)
{
if (rollup_process_const_fields())
{
DBUG_PRINT("error", ("Error: rollup_process_fields() failed"));
DBUG_RETURN(1);
}
}
else
{
/* Remove distinct if only const tables */
select_distinct= select_distinct && (const_tables != table_count);
}
THD_STAGE_INFO(thd, stage_preparing);
if (result->initialize_tables(this))
{
DBUG_PRINT("error",("Error: initialize_tables() failed"));
DBUG_RETURN(1); // error == -1
}
if (const_table_map != found_const_table_map &&
!(select_options & SELECT_DESCRIBE))
{
// There is at least one empty const table
zero_result_cause= "no matching row in const table";
DBUG_PRINT("error",("Error: %s", zero_result_cause));
error= 0;
handle_implicit_grouping_with_window_funcs();
goto setup_subq_exit;
}
if (!(thd->variables.option_bits & OPTION_BIG_SELECTS) &&
best_read > (double) thd->variables.max_join_size &&
!(select_options & SELECT_DESCRIBE))
{ /* purecov: inspected */
my_message(ER_TOO_BIG_SELECT, ER_THD(thd, ER_TOO_BIG_SELECT), MYF(0));
error= -1;
DBUG_RETURN(1);
}
if (const_tables && !thd->locked_tables_mode &&
!(select_options & SELECT_NO_UNLOCK))
{
/*
Unlock all tables, except sequences, as accessing these may still
require table updates
*/
mysql_unlock_some_tables(thd, table, const_tables,
GET_LOCK_SKIP_SEQUENCES);
}
if (!conds && outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
conds= new (thd->mem_root) Item_bool(thd, true); // Always true
}
if (impossible_where)
{
zero_result_cause=
"Impossible WHERE noticed after reading const tables";
select_lex->mark_const_derived(zero_result_cause);
handle_implicit_grouping_with_window_funcs();
goto setup_subq_exit;
}
select= make_select(*table, const_table_map,
const_table_map, conds, (SORT_INFO*) 0, 1, &error);
if (unlikely(error))
{ /* purecov: inspected */
error= -1; /* purecov: inspected */
DBUG_PRINT("error",("Error: make_select() failed"));
DBUG_RETURN(1);
}
reset_nj_counters(this, join_list);
if (make_outerjoin_info(this))
{
DBUG_RETURN(1);
}
/*
Among the equal fields belonging to the same multiple equality
choose the one that is to be retrieved first and substitute
all references to these in where condition for a reference for
the selected field.
*/
if (conds)
{
conds= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB, conds,
cond_equal, map2table, true);
if (unlikely(thd->is_error()))
{
error= 1;
DBUG_PRINT("error",("Error from substitute_for_best_equal"));
DBUG_RETURN(1);
}
conds->update_used_tables();
DBUG_EXECUTE("where",
print_where(conds,
"after substitute_best_equal",
QT_ORDINARY););
}
if (having)
{
having= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB, having,
having_equal, map2table, false);
if (thd->is_error())
{
error= 1;
DBUG_PRINT("error",("Error from substitute_for_best_equal"));
DBUG_RETURN(1);
}
if (having)
having->update_used_tables();
DBUG_EXECUTE("having",
print_where(having,
"after substitute_best_equal",
QT_ORDINARY););
}
/*
Perform the optimization on fields evaluation mentioned above
for all on expressions.
*/
for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
if (*tab->on_expr_ref)
{
*tab->on_expr_ref= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB,
*tab->on_expr_ref,
tab->cond_equal,
map2table, true);
if (unlikely(thd->is_error()))
{
error= 1;
DBUG_PRINT("error",("Error from substitute_for_best_equal"));
DBUG_RETURN(1);
}
(*tab->on_expr_ref)->update_used_tables();
}
}
/*
Perform the optimization on fields evaliation mentioned above
for all used ref items.
*/
for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES); tab;
tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
uint key_copy_index=0;
for (uint i=0; i < tab->ref.key_parts; i++)
{
Item **ref_item_ptr= tab->ref.items+i;
Item *ref_item= *ref_item_ptr;
if (!ref_item->used_tables() && !(select_options & SELECT_DESCRIBE))
continue;
COND_EQUAL *equals= cond_equal;
JOIN_TAB *first_inner= tab->first_inner;
while (equals)
{
ref_item= substitute_for_best_equal_field(thd, tab, ref_item,
equals, map2table, true);
if (unlikely(thd->is_fatal_error))
DBUG_RETURN(1);
if (first_inner)
{
equals= first_inner->cond_equal;
first_inner= first_inner->first_upper;
}
else
equals= 0;
}
ref_item->update_used_tables();
if (*ref_item_ptr != ref_item)
{
*ref_item_ptr= ref_item;
Item *item= ref_item->real_item();
store_key *key_copy= tab->ref.key_copy[key_copy_index];
if (key_copy->type() == store_key::FIELD_STORE_KEY)
{
if (item->basic_const_item())
{
/* It is constant propagated here */
tab->ref.key_copy[key_copy_index]=
new store_key_const_item(*tab->ref.key_copy[key_copy_index],
item);
}
else if (item->const_item())
{
tab->ref.key_copy[key_copy_index]=
new store_key_item(*tab->ref.key_copy[key_copy_index],
item, TRUE);
}
else
{
store_key_field *field_copy= ((store_key_field *)key_copy);
DBUG_ASSERT(item->type() == Item::FIELD_ITEM);
field_copy->change_source_field((Item_field *) item);
}
}
}
key_copy_index++;
}
}
if (conds && const_table_map != found_const_table_map &&
(select_options & SELECT_DESCRIBE))
{
conds=new (thd->mem_root) Item_bool(thd, false); // Always false
}
/* Cache constant expressions in WHERE, HAVING, ON clauses. */
cache_const_exprs();
if (setup_semijoin_loosescan(this))
DBUG_RETURN(1);
if (make_join_select(this, select, conds))
{
zero_result_cause=
"Impossible WHERE noticed after reading const tables";
select_lex->mark_const_derived(zero_result_cause);
handle_implicit_grouping_with_window_funcs();
goto setup_subq_exit;
}
error= -1; /* if goto err */
/* Optimize distinct away if possible */
{
ORDER *org_order= order;
order=remove_const(this, order,conds,1, &simple_order);
if (unlikely(thd->is_error()))
{
error= 1;
DBUG_RETURN(1);
}
/*
If we are using ORDER BY NULL or ORDER BY const_expression,
return result in any order (even if we are using a GROUP BY)
*/
if (!order && org_order)
skip_sort_order= 1;
}
/*
Check if we can optimize away GROUP BY/DISTINCT.
We can do that if there are no aggregate functions, the
fields in DISTINCT clause (if present) and/or columns in GROUP BY
(if present) contain direct references to all key parts of
an unique index (in whatever order) and if the key parts of the
unique index cannot contain NULLs.
Note that the unique keys for DISTINCT and GROUP BY should not
be the same (as long as they are unique).
The FROM clause must contain a single non-constant table.
*/
if (table_count - const_tables == 1 && (group || select_distinct) &&
!tmp_table_param.sum_func_count &&
(!join_tab[const_tables].select ||
!join_tab[const_tables].select->quick ||
join_tab[const_tables].select->quick->get_type() !=
QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) &&
!select_lex->have_window_funcs())
{
if (group && rollup.state == ROLLUP::STATE_NONE &&
list_contains_unique_index(join_tab[const_tables].table,
find_field_in_order_list,
(void *) group_list))
{
/*
We have found that grouping can be removed since groups correspond to
only one row anyway, but we still have to guarantee correct result
order. The line below effectively rewrites the query from GROUP BY
<fields> to ORDER BY <fields>. There are three exceptions:
- if skip_sort_order is set (see above), then we can simply skip
GROUP BY;
- if we are in a subquery, we don't have to maintain order unless there
is a limit clause in the subquery.
- we can only rewrite ORDER BY if the ORDER BY fields are 'compatible'
with the GROUP BY ones, i.e. either one is a prefix of another.
We only check if the ORDER BY is a prefix of GROUP BY. In this case
test_if_subpart() copies the ASC/DESC attributes from the original
ORDER BY fields.
If GROUP BY is a prefix of ORDER BY, then it is safe to leave
'order' as is.
*/
if (!order || test_if_subpart(group_list, order))
{
if (skip_sort_order ||
(select_lex->master_unit()->item && select_limit == HA_POS_ERROR)) // This is a subquery
order= NULL;
else
order= group_list;
}
/*
If we have an IGNORE INDEX FOR GROUP BY(fields) clause, this must be
rewritten to IGNORE INDEX FOR ORDER BY(fields).
*/
join_tab->table->keys_in_use_for_order_by=
join_tab->table->keys_in_use_for_group_by;
group_list= 0;
group= 0;
}
if (select_distinct &&
list_contains_unique_index(join_tab[const_tables].table,
find_field_in_item_list,
(void *) &fields_list))
{
select_distinct= 0;
}
}
if (group || tmp_table_param.sum_func_count)
{
if (! hidden_group_fields && rollup.state == ROLLUP::STATE_NONE
&& !select_lex->have_window_funcs())
select_distinct=0;
}
else if (select_distinct && table_count - const_tables == 1 &&
rollup.state == ROLLUP::STATE_NONE &&
!select_lex->have_window_funcs())
{
/*
We are only using one table. In this case we change DISTINCT to a
GROUP BY query if:
- The GROUP BY can be done through indexes (no sort) and the ORDER
BY only uses selected fields.
(In this case we can later optimize away GROUP BY and ORDER BY)
- We are scanning the whole table without LIMIT
This can happen if:
- We are using CALC_FOUND_ROWS
- We are using an ORDER BY that can't be optimized away.
We don't want to use this optimization when we are using LIMIT
because in this case we can just create a temporary table that
holds LIMIT rows and stop when this table is full.
*/
bool all_order_fields_used;
tab= &join_tab[const_tables];
if (order)
{
skip_sort_order=
test_if_skip_sort_order(tab, order, select_limit,
true, // no_changes
&tab->table->keys_in_use_for_order_by);
}
if ((group_list=create_distinct_group(thd, select_lex->ref_pointer_array,
order, fields_list, all_fields,
&all_order_fields_used)))
{
const bool skip_group=
skip_sort_order &&
test_if_skip_sort_order(tab, group_list, select_limit,
true, // no_changes
&tab->table->keys_in_use_for_group_by);
count_field_types(select_lex, &tmp_table_param, all_fields, 0);
if ((skip_group && all_order_fields_used) ||
select_limit == HA_POS_ERROR ||
(order && !skip_sort_order))
{
/* Change DISTINCT to GROUP BY */
select_distinct= 0;
no_order= !order;
if (all_order_fields_used)
{
if (order && skip_sort_order)
{
/*
Force MySQL to read the table in sorted order to get result in
ORDER BY order.
*/
tmp_table_param.quick_group=0;
}
order=0;
}
group=1; // For end_write_group
}
else
group_list= 0;
}
else if (thd->is_fatal_error) // End of memory
DBUG_RETURN(1);
}
simple_group= rollup.state == ROLLUP::STATE_NONE;
if (group)
{
/*
Update simple_group and group_list as we now have more information, like
which tables or columns are constant.
*/
group_list= remove_const(this, group_list, conds,
rollup.state == ROLLUP::STATE_NONE,
&simple_group);
if (unlikely(thd->is_error()))
{
error= 1;
DBUG_RETURN(1);
}
if (!group_list)
{
/* The output has only one row */
order=0;
simple_order=1;
select_distinct= 0;
group_optimized_away= 1;
}
}
calc_group_buffer(this, group_list);
send_group_parts= tmp_table_param.group_parts; /* Save org parts */
if (procedure && procedure->group)
{
group_list= procedure->group= remove_const(this, procedure->group, conds,
1, &simple_group);
if (unlikely(thd->is_error()))
{
error= 1;
DBUG_RETURN(1);
}
calc_group_buffer(this, group_list);
}
if (test_if_subpart(group_list, order) ||
(!group_list && tmp_table_param.sum_func_count))
{
order=0;
if (is_indexed_agg_distinct(this, NULL))
sort_and_group= 0;
}
// Can't use sort on head table if using join buffering
if (full_join || hash_join)
{
TABLE *stable= (sort_by_table == (TABLE *) 1 ?
join_tab[const_tables].table : sort_by_table);
/*
FORCE INDEX FOR ORDER BY can be used to prevent join buffering when
sorting on the first table.
*/
if (!stable || (!stable->force_index_order &&
!map2table[stable->tablenr]->keep_current_rowid))
{
if (group_list)
simple_group= 0;
if (order)
simple_order= 0;
}
}
need_tmp= test_if_need_tmp_table();
/*
If window functions are present then we can't have simple_order set to
TRUE as the window function needs a temp table for computation.
ORDER BY is computed after the window function computation is done, so
the sort will be done on the temp table.
*/
if (select_lex->have_window_funcs())
simple_order= FALSE;
/*
If the hint FORCE INDEX FOR ORDER BY/GROUP BY is used for the table
whose columns are required to be returned in a sorted order, then
the proper value for no_jbuf_after should be yielded by a call to
the make_join_orderinfo function.
Yet the current implementation of FORCE INDEX hints does not
allow us to do it in a clean manner.
*/
no_jbuf_after= 1 ? table_count : make_join_orderinfo(this);
// Don't use join buffering when we use MATCH
select_opts_for_readinfo=
(select_options & (SELECT_DESCRIBE | SELECT_NO_JOIN_CACHE)) |
(select_lex->ftfunc_list->elements ? SELECT_NO_JOIN_CACHE : 0);
if (select_lex->options & OPTION_SCHEMA_TABLE &&
optimize_schema_tables_reads(this))
DBUG_RETURN(1);
if (make_join_readinfo(this, select_opts_for_readinfo, no_jbuf_after))
DBUG_RETURN(1);
/* Perform FULLTEXT search before all regular searches */
if (!(select_options & SELECT_DESCRIBE))
if (init_ftfuncs(thd, select_lex, MY_TEST(order)))
DBUG_RETURN(1);
/*
It's necessary to check const part of HAVING cond as
there is a chance that some cond parts may become
const items after make_join_statistics(for example
when Item is a reference to cost table field from
outer join).
This check is performed only for those conditions
which do not use aggregate functions. In such case
temporary table may not be used and const condition
elements may be lost during further having
condition transformation in JOIN::exec.
*/
if (having && const_table_map && !having->with_sum_func())
{
having->update_used_tables();
having= having->remove_eq_conds(thd, &select_lex->having_value, true);
if (select_lex->having_value == Item::COND_FALSE)
{
having= new (thd->mem_root) Item_bool(thd, false);
zero_result_cause= "Impossible HAVING noticed after reading const tables";
error= 0;
select_lex->mark_const_derived(zero_result_cause);
goto setup_subq_exit;
}
}
if (optimize_unflattened_subqueries())
DBUG_RETURN(1);
int res;
if ((res= rewrite_to_index_subquery_engine(this)) != -1)
DBUG_RETURN(res);
if (setup_subquery_caches())
DBUG_RETURN(-1);
/*
Need to tell handlers that to play it safe, it should fetch all
columns of the primary key of the tables: this is because MySQL may
build row pointers for the rows, and for all columns of the primary key
the read set has not necessarily been set by the server code.
*/
if (need_tmp || select_distinct || group_list || order)
{
for (uint i= 0; i < table_count; i++)
{
if (!(table[i]->map & const_table_map))
table[i]->prepare_for_position();
}
}
DBUG_EXECUTE("info",TEST_join(this););
if (!only_const_tables())
{
JOIN_TAB *tab= &join_tab[const_tables];
if (order && !need_tmp)
{
/*
Force using of tmp table if sorting by a SP or UDF function due to
their expensive and probably non-deterministic nature.
*/
for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next)
{
Item *item= *tmp_order->item;
if (item->is_expensive())
{
/* Force tmp table without sort */
need_tmp=1; simple_order=simple_group=0;
break;
}
}
}
/*
Because filesort always does a full table scan or a quick range scan
we must add the removed reference to the select for the table.
We only need to do this when we have a simple_order or simple_group
as in other cases the join is done before the sort.
*/
if ((order || group_list) &&
tab->type != JT_ALL &&
tab->type != JT_FT &&
tab->type != JT_REF_OR_NULL &&
((order && simple_order) || (group_list && simple_group)))
{
if (add_ref_to_table_cond(thd,tab)) {
DBUG_RETURN(1);
}
}
/*
Investigate whether we may use an ordered index as part of either
DISTINCT, GROUP BY or ORDER BY execution. An ordered index may be
used for only the first of any of these terms to be executed. This
is reflected in the order which we check for test_if_skip_sort_order()
below. However we do not check for DISTINCT here, as it would have
been transformed to a GROUP BY at this stage if it is a candidate for
ordered index optimization.
If a decision was made to use an ordered index, the availability
of such an access path is stored in 'ordered_index_usage' for later
use by 'execute' or 'explain'
*/
DBUG_ASSERT(ordered_index_usage == ordered_index_void);
if (group_list) // GROUP BY honoured first
// (DISTINCT was rewritten to GROUP BY if skippable)
{
/*
When there is SQL_BIG_RESULT do not sort using index for GROUP BY,
and thus force sorting on disk unless a group min-max optimization
is going to be used as it is applied now only for one table queries
with covering indexes.
*/
if (!(select_options & SELECT_BIG_RESULT) ||
(tab->select &&
tab->select->quick &&
tab->select->quick->get_type() ==
QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX))
{
if (simple_group && // GROUP BY is possibly skippable
!select_distinct) // .. if not preceded by a DISTINCT
{
/*
Calculate a possible 'limit' of table rows for 'GROUP BY':
A specified 'LIMIT' is relative to the final resultset.
'need_tmp' implies that there will be more postprocessing
so the specified 'limit' should not be enforced yet.
*/
const ha_rows limit = need_tmp ? HA_POS_ERROR : select_limit;
if (test_if_skip_sort_order(tab, group_list, limit, false,
&tab->table->keys_in_use_for_group_by))
{
ordered_index_usage= ordered_index_group_by;
}
}
/*
If we are going to use semi-join LooseScan, it will depend
on the selected index scan to be used. If index is not used
for the GROUP BY, we risk that sorting is put on the LooseScan
table. In order to avoid this, force use of temporary table.
TODO: Explain the quick_group part of the test below.
*/
if ((ordered_index_usage != ordered_index_group_by) &&
((tmp_table_param.quick_group && !procedure) ||
(tab->emb_sj_nest &&
best_positions[const_tables].sj_strategy == SJ_OPT_LOOSE_SCAN)))
{
need_tmp=1;
simple_order= simple_group= false; // Force tmp table without sort
}
}
}
else if (order && // ORDER BY wo/ preceding GROUP BY
(simple_order || skip_sort_order)) // which is possibly skippable
{
if (test_if_skip_sort_order(tab, order, select_limit, false,
&tab->table->keys_in_use_for_order_by))
{
ordered_index_usage= ordered_index_order_by;
}
}
}
if (having)
having_is_correlated= MY_TEST(having->used_tables() & OUTER_REF_TABLE_BIT);
tmp_having= having;
if (unlikely(thd->is_error()))
DBUG_RETURN(TRUE);
/*
The loose index scan access method guarantees that all grouping or
duplicate row elimination (for distinct) is already performed
during data retrieval, and that all MIN/MAX functions are already
computed for each group. Thus all MIN/MAX functions should be
treated as regular functions, and there is no need to perform
grouping in the main execution loop.
Notice that currently loose index scan is applicable only for
single table queries, thus it is sufficient to test only the first
join_tab element of the plan for its access method.
*/
if (join_tab->is_using_loose_index_scan())
{
tmp_table_param.precomputed_group_by= TRUE;
if (join_tab->is_using_agg_loose_index_scan())
{
need_distinct= FALSE;
tmp_table_param.precomputed_group_by= FALSE;
}
}
if (make_aggr_tables_info())
DBUG_RETURN(1);
if (init_join_caches())
DBUG_RETURN(1);
if (init_range_rowid_filters())
DBUG_RETURN(1);
error= 0;
if (select_options & SELECT_DESCRIBE)
goto derived_exit;
DBUG_RETURN(0);
setup_subq_exit:
/* Choose an execution strategy for this JOIN. */
if (!tables_list || !table_count)
{
choose_tableless_subquery_plan();
/* The output has atmost one row */
if (group_list)
{
group_list= NULL;
group_optimized_away= 1;
rollup.state= ROLLUP::STATE_NONE;
}
order= NULL;
simple_order= TRUE;
select_distinct= FALSE;
if (select_lex->have_window_funcs())
{
if (!(join_tab= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB))))
DBUG_RETURN(1);
need_tmp= 1;
}
if (make_aggr_tables_info())
DBUG_RETURN(1);
/*
It could be that we've only done optimization stage 1 for
some of the derived tables, and never did stage 2.
Do it now, otherwise Explain data structure will not be complete.
*/
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
}
/*
Even with zero matching rows, subqueries in the HAVING clause may
need to be evaluated if there are aggregate functions in the query.
*/
if (optimize_unflattened_subqueries())
DBUG_RETURN(1);
error= 0;
derived_exit:
select_lex->mark_const_derived(zero_result_cause);
DBUG_RETURN(0);
} | 0 | [] | server | 8c34eab9688b4face54f15f89f5d62bdfd93b8a7 | 87,993,448,179,925,860,000,000,000,000,000,000,000 | 770 | MDEV-28094 Window function in expression in ORDER BY
call item->split_sum_func() in setup_order() just as
it's done in setup_fields() |
TEE_Result syscall_open_ta_session(const TEE_UUID *dest,
unsigned long cancel_req_to,
struct utee_params *usr_param, uint32_t *ta_sess,
uint32_t *ret_orig)
{
TEE_Result res;
uint32_t ret_o = TEE_ORIGIN_TEE;
struct tee_ta_session *s = NULL;
struct tee_ta_session *sess;
struct mobj *mobj_param = NULL;
TEE_UUID *uuid = malloc(sizeof(TEE_UUID));
struct tee_ta_param *param = malloc(sizeof(struct tee_ta_param));
TEE_Identity *clnt_id = malloc(sizeof(TEE_Identity));
void *tmp_buf_va[TEE_NUM_PARAMS] = { NULL };
struct user_ta_ctx *utc;
if (uuid == NULL || param == NULL || clnt_id == NULL) {
res = TEE_ERROR_OUT_OF_MEMORY;
goto out_free_only;
}
memset(param, 0, sizeof(struct tee_ta_param));
res = tee_ta_get_current_session(&sess);
if (res != TEE_SUCCESS)
goto out_free_only;
utc = to_user_ta_ctx(sess->ctx);
res = tee_svc_copy_from_user(uuid, dest, sizeof(TEE_UUID));
if (res != TEE_SUCCESS)
goto function_exit;
clnt_id->login = TEE_LOGIN_TRUSTED_APP;
memcpy(&clnt_id->uuid, &sess->ctx->uuid, sizeof(TEE_UUID));
res = tee_svc_copy_param(sess, NULL, usr_param, param, tmp_buf_va,
&mobj_param);
if (res != TEE_SUCCESS)
goto function_exit;
res = tee_ta_open_session(&ret_o, &s, &utc->open_sessions, uuid,
clnt_id, cancel_req_to, param);
tee_mmu_set_ctx(&utc->ctx);
if (res != TEE_SUCCESS)
goto function_exit;
res = tee_svc_update_out_param(param, tmp_buf_va, usr_param);
function_exit:
mobj_free(mobj_param);
if (res == TEE_SUCCESS)
tee_svc_copy_kaddr_to_uref(ta_sess, s);
tee_svc_copy_to_user(ret_orig, &ret_o, sizeof(ret_o));
out_free_only:
free(param);
free(uuid);
free(clnt_id);
return res;
} | 0 | [
"CWE-119",
"CWE-787"
] | optee_os | d5c5b0b77b2b589666024d219a8007b3f5b6faeb | 62,566,355,517,595,570,000,000,000,000,000,000,000 | 60 | core: svc: always check ta parameters
Always check TA parameters from a user TA. This prevents a user TA from
passing invalid pointers to a pseudo TA.
Fixes: OP-TEE-2018-0007: "Buffer checks missing when calling pseudo
TAs".
Signed-off-by: Jens Wiklander <[email protected]>
Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8)
Reviewed-by: Joakim Bech <[email protected]>
Reported-by: Riscure <[email protected]>
Reported-by: Alyssa Milburn <[email protected]>
Acked-by: Etienne Carriere <[email protected]> |
ecc_mod_submul_1 (const struct ecc_modulo *m, mp_limb_t *rp,
const mp_limb_t *ap, mp_limb_t b)
{
mp_limb_t hi;
assert (b <= 0xffffffff);
hi = mpn_submul_1 (rp, ap, m->size, b);
hi = mpn_submul_1 (rp, m->B, m->size, hi);
assert (hi <= 1);
hi = mpn_cnd_sub_n (hi, rp, rp, m->B, m->size);
/* Sufficient roughly if b < B^size / p */
assert (hi == 0);
} | 0 | [
"CWE-787"
] | nettle | a63893791280d441c713293491da97c79c0950fe | 42,883,297,744,284,200,000,000,000,000,000,000,000 | 13 | New functions ecc_mod_mul_canonical and ecc_mod_sqr_canonical.
* ecc-mod-arith.c (ecc_mod_mul_canonical, ecc_mod_sqr_canonical):
New functions.
* ecc-internal.h: Declare and document new functions.
* curve448-eh-to-x.c (curve448_eh_to_x): Use ecc_mod_sqr_canonical.
* curve25519-eh-to-x.c (curve25519_eh_to_x): Use ecc_mod_mul_canonical.
* ecc-eh-to-a.c (ecc_eh_to_a): Likewise.
* ecc-j-to-a.c (ecc_j_to_a): Likewise.
* ecc-mul-m.c (ecc_mul_m): Likewise.
(cherry picked from commit 2bf497ba4d6acc6f352bca015837fad33008565c) |
int (*SSL_CTX_get_verify_callback(const SSL_CTX *ctx)) (int, X509_STORE_CTX *) {
return (ctx->default_verify_callback);
} | 0 | [
"CWE-310"
] | openssl | 56f1acf5ef8a432992497a04792ff4b3b2c6f286 | 74,114,053,103,276,920,000,000,000,000,000,000,000 | 3 | Disable SSLv2 default build, default negotiation and weak ciphers.
SSLv2 is by default disabled at build-time. Builds that are not
configured with "enable-ssl2" will not support SSLv2. Even if
"enable-ssl2" is used, users who want to negotiate SSLv2 via the
version-flexible SSLv23_method() will need to explicitly call either
of:
SSL_CTX_clear_options(ctx, SSL_OP_NO_SSLv2);
or
SSL_clear_options(ssl, SSL_OP_NO_SSLv2);
as appropriate. Even if either of those is used, or the application
explicitly uses the version-specific SSLv2_method() or its client
or server variants, SSLv2 ciphers vulnerable to exhaustive search
key recovery have been removed. Specifically, the SSLv2 40-bit
EXPORT ciphers, and SSLv2 56-bit DES are no longer available.
Mitigation for CVE-2016-0800
Reviewed-by: Emilia Käsper <[email protected]> |
static swp_entry_t *shmem_swp_map(struct page *page)
{
return (swp_entry_t *)kmap_atomic(page, KM_USER1);
} | 0 | [
"CWE-200"
] | linux-2.6 | e84e2e132c9c66d8498e7710d4ea532d1feaaac5 | 97,016,038,900,229,000,000,000,000,000,000,000,000 | 4 | tmpfs: restore missing clear_highpage
tmpfs was misconverted to __GFP_ZERO in 2.6.11. There's an unusual case in
which shmem_getpage receives the page from its caller instead of allocating.
We must cover this case by clear_highpage before SetPageUptodate, as before.
Signed-off-by: Hugh Dickins <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int tty_register_driver(struct tty_driver *driver)
{
int error;
int i;
dev_t dev;
struct device *d;
if (!driver->major) {
error = alloc_chrdev_region(&dev, driver->minor_start,
driver->num, driver->name);
if (!error) {
driver->major = MAJOR(dev);
driver->minor_start = MINOR(dev);
}
} else {
dev = MKDEV(driver->major, driver->minor_start);
error = register_chrdev_region(dev, driver->num, driver->name);
}
if (error < 0)
goto err;
if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC) {
error = tty_cdev_add(driver, dev, 0, driver->num);
if (error)
goto err_unreg_char;
}
mutex_lock(&tty_mutex);
list_add(&driver->tty_drivers, &tty_drivers);
mutex_unlock(&tty_mutex);
if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
for (i = 0; i < driver->num; i++) {
d = tty_register_device(driver, i, NULL);
if (IS_ERR(d)) {
error = PTR_ERR(d);
goto err_unreg_devs;
}
}
}
proc_tty_register_driver(driver);
driver->flags |= TTY_DRIVER_INSTALLED;
return 0;
err_unreg_devs:
for (i--; i >= 0; i--)
tty_unregister_device(driver, i);
mutex_lock(&tty_mutex);
list_del(&driver->tty_drivers);
mutex_unlock(&tty_mutex);
err_unreg_char:
unregister_chrdev_region(dev, driver->num);
err:
return error;
} | 0 | [
"CWE-200",
"CWE-362"
] | linux | 5c17c861a357e9458001f021a7afa7aab9937439 | 50,127,357,038,825,220,000,000,000,000,000,000,000 | 57 | tty: Fix unsafe ldisc reference via ioctl(TIOCGETD)
ioctl(TIOCGETD) retrieves the line discipline id directly from the
ldisc because the line discipline id (c_line) in termios is untrustworthy;
userspace may have set termios via ioctl(TCSETS*) without actually
changing the line discipline via ioctl(TIOCSETD).
However, directly accessing the current ldisc via tty->ldisc is
unsafe; the ldisc ptr dereferenced may be stale if the line discipline
is changing via ioctl(TIOCSETD) or hangup.
Wait for the line discipline reference (just like read() or write())
to retrieve the "current" line discipline id.
Cc: <[email protected]>
Signed-off-by: Peter Hurley <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]> |
asmlinkage long sys_pivot_root(const char __user * new_root,
const char __user * put_old)
{
struct vfsmount *tmp;
struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
int error;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
lock_kernel();
error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
&new_nd);
if (error)
goto out0;
error = -EINVAL;
if (!check_mnt(new_nd.mnt))
goto out1;
error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
if (error)
goto out1;
error = security_sb_pivotroot(&old_nd, &new_nd);
if (error) {
path_release(&old_nd);
goto out1;
}
read_lock(¤t->fs->lock);
user_nd.mnt = mntget(current->fs->rootmnt);
user_nd.dentry = dget(current->fs->root);
read_unlock(¤t->fs->lock);
down_write(&namespace_sem);
mutex_lock(&old_nd.dentry->d_inode->i_mutex);
error = -EINVAL;
if (IS_MNT_SHARED(old_nd.mnt) ||
IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
IS_MNT_SHARED(user_nd.mnt->mnt_parent))
goto out2;
if (!check_mnt(user_nd.mnt))
goto out2;
error = -ENOENT;
if (IS_DEADDIR(new_nd.dentry->d_inode))
goto out2;
if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
goto out2;
if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
goto out2;
error = -EBUSY;
if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
goto out2; /* loop, on the same file system */
error = -EINVAL;
if (user_nd.mnt->mnt_root != user_nd.dentry)
goto out2; /* not a mountpoint */
if (user_nd.mnt->mnt_parent == user_nd.mnt)
goto out2; /* not attached */
if (new_nd.mnt->mnt_root != new_nd.dentry)
goto out2; /* not a mountpoint */
if (new_nd.mnt->mnt_parent == new_nd.mnt)
goto out2; /* not attached */
tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
spin_lock(&vfsmount_lock);
if (tmp != new_nd.mnt) {
for (;;) {
if (tmp->mnt_parent == tmp)
goto out3; /* already mounted on put_old */
if (tmp->mnt_parent == new_nd.mnt)
break;
tmp = tmp->mnt_parent;
}
if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
goto out3;
} else if (!is_subdir(old_nd.dentry, new_nd.dentry))
goto out3;
detach_mnt(new_nd.mnt, &parent_nd);
detach_mnt(user_nd.mnt, &root_parent);
attach_mnt(user_nd.mnt, &old_nd); /* mount old root on put_old */
attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
touch_mnt_namespace(current->nsproxy->mnt_ns);
spin_unlock(&vfsmount_lock);
chroot_fs_refs(&user_nd, &new_nd);
security_sb_post_pivotroot(&user_nd, &new_nd);
error = 0;
path_release(&root_parent);
path_release(&parent_nd);
out2:
mutex_unlock(&old_nd.dentry->d_inode->i_mutex);
up_write(&namespace_sem);
path_release(&user_nd);
path_release(&old_nd);
out1:
path_release(&new_nd);
out0:
unlock_kernel();
return error;
out3:
spin_unlock(&vfsmount_lock);
goto out2;
} | 0 | [
"CWE-269"
] | linux-2.6 | ee6f958291e2a768fd727e7a67badfff0b67711a | 254,118,756,418,828,720,000,000,000,000,000,000,000 | 101 | check privileges before setting mount propagation
There's a missing check for CAP_SYS_ADMIN in do_change_type().
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static vpx_codec_err_t ctrl_get_last_ref_updates(vpx_codec_alg_priv_t *ctx,
va_list args) {
int *const update_info = va_arg(args, int *);
if (update_info) {
if (ctx->pbi != NULL) {
*update_info = ctx->pbi->refresh_frame_flags;
return VPX_CODEC_OK;
} else {
return VPX_CODEC_ERROR;
}
}
return VPX_CODEC_INVALID_PARAM;
} | 0 | [
"CWE-125"
] | libvpx | 0681cff1ad36b3ef8ec242f59b5a6c4234ccfb88 | 113,647,928,095,135,370,000,000,000,000,000,000,000 | 15 | vp9: fix OOB read in decoder_peek_si_internal
Profile 1 or 3 bitstreams may require 11 bytes for the header in the
intra-only case.
Additionally add a check on the bit reader's error handler callback to
ensure it's non-NULL before calling to avoid future regressions.
This has existed since at least (pre-1.4.0):
09bf1d61c Changes hdr for profiles > 1 for intraonly frames
BUG=webm:1543
Change-Id: I23901e6e3a219170e8ea9efecc42af0be2e5c378 |
server_away_free_messages (server *serv)
{
GSList *list, *next;
struct away_msg *away;
list = away_list;
while (list)
{
away = list->data;
next = list->next;
if (away->server == serv)
{
away_list = g_slist_remove (away_list, away);
if (away->message)
free (away->message);
free (away);
next = away_list;
}
list = next;
}
} | 0 | [
"CWE-310"
] | hexchat | c9b63f7f9be01692b03fa15275135a4910a7e02d | 8,643,017,741,925,014,000,000,000,000,000,000,000 | 21 | ssl: Validate hostnames
Closes #524 |
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
} | 0 | [
"CWE-125"
] | ImageMagick | 76401e172ea3a55182be2b8e2aca4d07270f6da6 | 61,775,638,792,907,530,000,000,000,000,000,000,000 | 104 | Evaluate lazy pixel cache morphology to prevent buffer overflow (bug report from Ibrahim M. El-Sayed) |
njs_value_release(njs_vm_t *vm, njs_value_t *value)
{
njs_string_t *string;
if (njs_is_string(value)) {
if (value->long_string.external != 0xff) {
string = value->long_string.data;
njs_thread_log_debug("release:%uxD \"%*s\"", string->retain,
value->long_string.size, string->start);
if (string->retain != 0xffff) {
string->retain--;
#if 0
if (string->retain == 0) {
if ((u_char *) string + sizeof(njs_string_t)
!= string->start)
{
njs_memcache_pool_free(vm->mem_pool,
string->start);
}
njs_memcache_pool_free(vm->mem_pool, string);
}
#endif
}
}
}
} | 0 | [] | njs | 6549d49630ce5f5ac823fd3ae0c6c8558b8716ae | 13,916,823,089,822,863,000,000,000,000,000,000,000 | 31 | Fixed redefinition of special props in Object.defineProperty().
Previously, when NJS_PROPERTY_HANDLER property was updated it might be
left in inconsistent state. Namely, prop->type was left unchanged, but
prop->value did not have an expected property handler. As a result
consecutive reference to the property may result in a segment violation.
The fix is to update the prop->type during redefinition.
This closes #504 issue on Github. |
static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
{
to_svm(vcpu)->guest_state_loaded = false;
} | 0 | [
"CWE-862"
] | kvm | 0f923e07124df069ba68d8bb12324398f4b6b709 | 78,844,534,866,499,015,000,000,000,000,000,000,000 | 4 | KVM: nSVM: avoid picking up unsupported bits from L2 in int_ctl (CVE-2021-3653)
* Invert the mask of bits that we pick from L2 in
nested_vmcb02_prepare_control
* Invert and explicitly use VIRQ related bits bitmask in svm_clear_vintr
This fixes a security issue that allowed a malicious L1 to run L2 with
AVIC enabled, which allowed the L2 to exploit the uninitialized and enabled
AVIC to read/write the host physical memory at some offsets.
Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
Signed-off-by: Maxim Levitsky <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
if (!enable_vnmi ||
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
enable_irq_window(vcpu);
return;
}
vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_VIRTUAL_NMI_PENDING);
} | 0 | [
"CWE-284"
] | linux | 727ba748e110b4de50d142edca9d6a9b7e6111d8 | 33,623,943,047,535,650,000,000,000,000,000,000,000 | 11 | kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit
even when executed with cpl 3. This means we must perform the
privilege check in software.
Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks")
Cc: [email protected]
Signed-off-by: Felix Wilhelm <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
{
struct megasas_cmd *cmd;
int i;
u16 max_cmd = instance->max_fw_cmds;
u32 defer_index;
unsigned long flags;
defer_index = 0;
spin_lock_irqsave(&instance->mfi_pool_lock, flags);
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
if (cmd->sync_cmd == 1 || cmd->scmd) {
dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
"on the defer queue as internal\n",
defer_index, cmd, cmd->sync_cmd, cmd->scmd);
if (!list_empty(&cmd->list)) {
dev_notice(&instance->pdev->dev, "ERROR while"
" moving this cmd:%p, %d %p, it was"
"discovered on some list?\n",
cmd, cmd->sync_cmd, cmd->scmd);
list_del_init(&cmd->list);
}
defer_index++;
list_add_tail(&cmd->list,
&instance->internal_reset_pending_q);
}
}
spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
} | 0 | [
"CWE-476"
] | linux | bcf3b67d16a4c8ffae0aa79de5853435e683945c | 222,707,063,811,083,250,000,000,000,000,000,000,000 | 32 | scsi: megaraid_sas: return error when create DMA pool failed
when create DMA pool for cmd frames failed, we should return -ENOMEM,
instead of 0.
In some case in:
megasas_init_adapter_fusion()
-->megasas_alloc_cmds()
-->megasas_create_frame_pool
create DMA pool failed,
--> megasas_free_cmds() [1]
-->megasas_alloc_cmds_fusion()
failed, then goto fail_alloc_cmds.
-->megasas_free_cmds() [2]
we will call megasas_free_cmds twice, [1] will kfree cmd_list,
[2] will use cmd_list.it will cause a problem:
Unable to handle kernel NULL pointer dereference at virtual address
00000000
pgd = ffffffc000f70000
[00000000] *pgd=0000001fbf893003, *pud=0000001fbf893003,
*pmd=0000001fbf894003, *pte=006000006d000707
Internal error: Oops: 96000005 [#1] SMP
Modules linked in:
CPU: 18 PID: 1 Comm: swapper/0 Not tainted
task: ffffffdfb9290000 ti: ffffffdfb923c000 task.ti: ffffffdfb923c000
PC is at megasas_free_cmds+0x30/0x70
LR is at megasas_free_cmds+0x24/0x70
...
Call trace:
[<ffffffc0005b779c>] megasas_free_cmds+0x30/0x70
[<ffffffc0005bca74>] megasas_init_adapter_fusion+0x2f4/0x4d8
[<ffffffc0005b926c>] megasas_init_fw+0x2dc/0x760
[<ffffffc0005b9ab0>] megasas_probe_one+0x3c0/0xcd8
[<ffffffc0004a5abc>] local_pci_probe+0x4c/0xb4
[<ffffffc0004a5c40>] pci_device_probe+0x11c/0x14c
[<ffffffc00053a5e4>] driver_probe_device+0x1ec/0x430
[<ffffffc00053a92c>] __driver_attach+0xa8/0xb0
[<ffffffc000538178>] bus_for_each_dev+0x74/0xc8
[<ffffffc000539e88>] driver_attach+0x28/0x34
[<ffffffc000539a18>] bus_add_driver+0x16c/0x248
[<ffffffc00053b234>] driver_register+0x6c/0x138
[<ffffffc0004a5350>] __pci_register_driver+0x5c/0x6c
[<ffffffc000ce3868>] megasas_init+0xc0/0x1a8
[<ffffffc000082a58>] do_one_initcall+0xe8/0x1ec
[<ffffffc000ca7be8>] kernel_init_freeable+0x1c8/0x284
[<ffffffc0008d90b8>] kernel_init+0x1c/0xe4
Signed-off-by: Jason Yan <[email protected]>
Acked-by: Sumit Saxena <[email protected]>
Signed-off-by: Martin K. Petersen <[email protected]> |
const char *string_of_NPReason(int reason)
{
const char *str;
switch ((NPReason)reason) {
#define _(VAL) case VAL: str = #VAL; break;
_(NPRES_DONE);
_(NPRES_NETWORK_ERR);
_(NPRES_USER_BREAK);
#undef _
default:
str = "<unknown reason>";
break;
}
return str;
} | 0 | [
"CWE-264"
] | nspluginwrapper | 7e4ab8e1189846041f955e6c83f72bc1624e7a98 | 199,109,077,373,321,260,000,000,000,000,000,000,000 | 17 | Support all the new variables added |
parse_tag(char **s, int internal)
{
struct parsed_tag *tag = NULL;
int tag_id;
char tagname[MAX_TAG_LEN], attrname[MAX_TAG_LEN];
char *p, *q;
int i, attr_id = 0, nattr;
/* Parse tag name */
tagname[0] = '\0';
q = (*s) + 1;
p = tagname;
if (*q == '/') {
*(p++) = *(q++);
SKIP_BLANKS(q);
}
while (*q && !IS_SPACE(*q) && !(tagname[0] != '/' && *q == '/') &&
*q != '>' && p - tagname < MAX_TAG_LEN - 1) {
*(p++) = TOLOWER(*q);
q++;
}
*p = '\0';
while (*q && !IS_SPACE(*q) && !(tagname[0] != '/' && *q == '/') &&
*q != '>')
q++;
tag_id = getHash_si(&tagtable, tagname, HTML_UNKNOWN);
if (tag_id == HTML_UNKNOWN ||
(!internal && TagMAP[tag_id].flag & TFLG_INT))
goto skip_parse_tagarg;
tag = New(struct parsed_tag);
bzero(tag, sizeof(struct parsed_tag));
tag->tagid = tag_id;
if ((nattr = TagMAP[tag_id].max_attribute) > 0) {
tag->attrid = NewAtom_N(unsigned char, nattr);
tag->value = New_N(char *, nattr);
tag->map = NewAtom_N(unsigned char, MAX_TAGATTR);
memset(tag->map, MAX_TAGATTR, MAX_TAGATTR);
memset(tag->attrid, ATTR_UNKNOWN, nattr);
for (i = 0; i < nattr; i++)
tag->map[TagMAP[tag_id].accept_attribute[i]] = i;
}
/* Parse tag arguments */
SKIP_BLANKS(q);
while (1) {
Str value = NULL, value_tmp = NULL;
if (*q == '>' || *q == '\0')
goto done_parse_tag;
p = attrname;
while (*q && *q != '=' && !IS_SPACE(*q) &&
*q != '>' && p - attrname < MAX_TAG_LEN - 1) {
*(p++) = TOLOWER(*q);
q++;
}
*p = '\0';
while (*q && *q != '=' && !IS_SPACE(*q) && *q != '>')
q++;
SKIP_BLANKS(q);
if (*q == '=') {
/* get value */
value_tmp = Strnew();
q++;
SKIP_BLANKS(q);
if (*q == '"') {
q++;
while (*q && *q != '"') {
Strcat_char(value_tmp, *q);
if (!tag->need_reconstruct && is_html_quote(*q))
tag->need_reconstruct = TRUE;
q++;
}
if (*q == '"')
q++;
}
else if (*q == '\'') {
q++;
while (*q && *q != '\'') {
Strcat_char(value_tmp, *q);
if (!tag->need_reconstruct && is_html_quote(*q))
tag->need_reconstruct = TRUE;
q++;
}
if (*q == '\'')
q++;
}
else if (*q) {
while (*q && !IS_SPACE(*q) && *q != '>') {
Strcat_char(value_tmp, *q);
if (!tag->need_reconstruct && is_html_quote(*q))
tag->need_reconstruct = TRUE;
q++;
}
}
}
for (i = 0; i < nattr; i++) {
if ((tag)->attrid[i] == ATTR_UNKNOWN &&
strcmp(AttrMAP[TagMAP[tag_id].accept_attribute[i]].name,
attrname) == 0) {
attr_id = TagMAP[tag_id].accept_attribute[i];
break;
}
}
if (value_tmp) {
int j, hidden=FALSE;
for (j=0; j<i; j++) {
if (tag->attrid[j] == ATTR_TYPE &&
tag->value[j] &&
strcmp("hidden",tag->value[j]) == 0) {
hidden=TRUE;
break;
}
}
if ((tag_id == HTML_INPUT || tag_id == HTML_INPUT_ALT) &&
attr_id == ATTR_VALUE && hidden) {
value = value_tmp;
} else {
char *x;
value = Strnew();
for (x = value_tmp->ptr; *x; x++) {
if (*x != '\n')
Strcat_char(value, *x);
}
}
}
if (i != nattr) {
if (!internal &&
((AttrMAP[attr_id].flag & AFLG_INT) ||
(value && AttrMAP[attr_id].vtype == VTYPE_METHOD &&
!strcasecmp(value->ptr, "internal")))) {
tag->need_reconstruct = TRUE;
continue;
}
tag->attrid[i] = attr_id;
if (value)
tag->value[i] = html_unquote(value->ptr);
else
tag->value[i] = NULL;
}
else {
tag->need_reconstruct = TRUE;
}
}
skip_parse_tagarg:
while (*q != '>' && *q)
q++;
done_parse_tag:
if (*q == '>')
q++;
*s = q;
return tag;
} | 0 | [
"CWE-20",
"CWE-476"
] | w3m | 33509cc81ec5f2ba44eb6fd98bd5c1b5873e46bd | 313,282,257,564,857,700,000,000,000,000,000,000,000 | 158 | Fix uninitialised values for <i> and <dd>
Bug-Debian: https://github.com/tats/w3m/issues/16 |
httpGetSubField(http_t *http, /* I - HTTP connection */
http_field_t field, /* I - Field index */
const char *name, /* I - Name of sub-field */
char *value) /* O - Value string */
{
return (httpGetSubField2(http, field, name, value, HTTP_MAX_VALUE));
} | 0 | [
"CWE-120"
] | cups | f24e6cf6a39300ad0c3726a41a4aab51ad54c109 | 216,615,664,448,321,060,000,000,000,000,000,000,000 | 7 | Fix multiple security/disclosure issues:
- CVE-2019-8696 and CVE-2019-8675: Fixed SNMP buffer overflows (rdar://51685251)
- Fixed IPP buffer overflow (rdar://50035411)
- Fixed memory disclosure issue in the scheduler (rdar://51373853)
- Fixed DoS issues in the scheduler (rdar://51373929) |
proto_register_protocol_in_name_only(const char *name, const char *short_name, const char *filter_name, int parent_proto, enum ftenum field_type)
{
protocol_t *protocol;
header_field_info *hfinfo;
/*
* Helper protocols don't need the strict rules as a "regular" protocol
* Just register it in a list and make a hf_ field from it
*/
if ((field_type != FT_PROTOCOL) && (field_type != FT_BYTES)) {
g_error("Pino \"%s\" must be of type FT_PROTOCOL or FT_BYTES.", name);
}
if (parent_proto < 0) {
g_error("Must have a valid parent protocol for helper protocol \"%s\"!"
" This might be caused by an inappropriate plugin or a development error.", name);
}
check_valid_filter_name_or_fail(filter_name);
/* Add this protocol to the list of helper protocols (just so it can be properly freed) */
protocol = g_new(protocol_t, 1);
protocol->name = name;
protocol->short_name = short_name;
protocol->filter_name = filter_name;
protocol->fields = NULL; /* Delegate until actually needed */
/* Enabling and toggling is really determined by parent protocol,
but provide default values here */
protocol->is_enabled = TRUE;
protocol->enabled_by_default = TRUE;
protocol->can_toggle = TRUE;
protocol->parent_proto_id = parent_proto;
protocol->heur_list = NULL;
/* List will be sorted later by name, when all protocols completed registering */
protocols = g_list_prepend(protocols, protocol);
/* Here we allocate a new header_field_info struct */
hfinfo = g_slice_new(header_field_info);
hfinfo->name = name;
hfinfo->abbrev = filter_name;
hfinfo->type = field_type;
hfinfo->display = BASE_NONE;
if (field_type == FT_BYTES) {
hfinfo->display |= (BASE_NO_DISPLAY_VALUE|BASE_PROTOCOL_INFO);
}
hfinfo->strings = protocol;
hfinfo->bitmask = 0;
hfinfo->ref_type = HF_REF_TYPE_NONE;
hfinfo->blurb = NULL;
hfinfo->parent = -1; /* This field differentiates protos and fields */
protocol->proto_id = proto_register_field_init(hfinfo, hfinfo->parent);
return protocol->proto_id;
} | 0 | [
"CWE-401"
] | wireshark | a9fc769d7bb4b491efb61c699d57c9f35269d871 | 23,963,850,456,704,665,000,000,000,000,000,000,000 | 57 | epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032. |
static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *opt_skb = NULL;
/* Imagine: socket is IPv6. IPv4 packet arrives,
goes to IPv4 receive handler and backlogged.
From backlog it always goes here. Kerboom...
Fortunately, dccp_rcv_established and rcv_established
handle them correctly, but it is not case with
dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
*/
if (skb->protocol == htons(ETH_P_IP))
return dccp_v4_do_rcv(sk, skb);
if (sk_filter(sk, skb))
goto discard;
/*
* socket locking is here for SMP purposes as backlog rcv is currently
* called with bh processing disabled.
*/
/* Do Stevens' IPV6_PKTOPTIONS.
Yes, guys, it is the only place in our code, where we
may make it not affecting IPv4.
The rest of code is protocol independent,
and I do not like idea to uglify IPv4.
Actually, all the idea behind IPV6_PKTOPTIONS
looks not very well thought. For now we latch
options, received in the last packet, enqueued
by tcp. Feel free to propose better solution.
--ANK (980728)
*/
if (np->rxopt.all)
/*
* FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
* (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
*/
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
goto reset;
if (opt_skb) {
/* XXX This is where we would goto ipv6_pktoptions. */
__kfree_skb(opt_skb);
}
return 0;
}
/*
* Step 3: Process LISTEN state
* If S.state == LISTEN,
* If P.type == Request or P contains a valid Init Cookie option,
* (* Must scan the packet's options to check for Init
* Cookies. Only Init Cookies are processed here,
* however; other options are processed in Step 8. This
* scan need only be performed if the endpoint uses Init
* Cookies *)
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
* S.state = RESPOND
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
* Continue with S.state == RESPOND
* (* A Response packet will be generated in Step 11 *)
* Otherwise,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*
* NOTE: the check for the packet types is done in
* dccp_rcv_state_process
*/
if (sk->sk_state == DCCP_LISTEN) {
struct sock *nsk = dccp_v6_hnd_req(sk, skb);
if (nsk == NULL)
goto discard;
/*
* Queue it on the new socket if the new socket is active,
* otherwise we just shortcircuit this and continue with
* the new socket..
*/
if (nsk != sk) {
if (dccp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb != NULL)
__kfree_skb(opt_skb);
return 0;
}
}
if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
goto reset;
if (opt_skb) {
/* XXX This is where we would goto ipv6_pktoptions. */
__kfree_skb(opt_skb);
}
return 0;
reset:
dccp_v6_ctl_send_reset(sk, skb);
discard:
if (opt_skb != NULL)
__kfree_skb(opt_skb);
kfree_skb(skb);
return 0;
} | 0 | [
"CWE-362"
] | linux-2.6 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | 127,477,527,617,214,960,000,000,000,000,000,000,000 | 113 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
int luaopen_create(lua_State *L) {
int i;
/* Manually construct our module table instead of
* relying on _register or _newlib */
lua_newtable(L);
for (i = 0; i < (sizeof(cmds)/sizeof(*cmds) - 1); i++) {
lua_pushcfunction(L, cmds[i].func);
lua_setfield(L, -2, cmds[i].name);
}
/* Add metadata */
lua_pushliteral(L, LUACMSGPACK_NAME);
lua_setfield(L, -2, "_NAME");
lua_pushliteral(L, LUACMSGPACK_VERSION);
lua_setfield(L, -2, "_VERSION");
lua_pushliteral(L, LUACMSGPACK_COPYRIGHT);
lua_setfield(L, -2, "_COPYRIGHT");
lua_pushliteral(L, LUACMSGPACK_DESCRIPTION);
lua_setfield(L, -2, "_DESCRIPTION");
return 1;
} | 0 | [
"CWE-119",
"CWE-787"
] | redis | 52a00201fca331217c3b4b8b634f6a0f57d6b7d3 | 570,727,236,705,241,460,000,000,000,000,000,000 | 22 | Security: fix Lua cmsgpack library stack overflow.
During an auditing effort, the Apple Vulnerability Research team discovered
a critical Redis security issue affecting the Lua scripting part of Redis.
-- Description of the problem
Several years ago I merged a pull request including many small changes at
the Lua MsgPack library (that originally I authored myself). The Pull
Request entered Redis in commit 90b6337c1, in 2014.
Unfortunately one of the changes included a variadic Lua function that
lacked the check for the available Lua C stack. As a result, calling the
"pack" MsgPack library function with a large number of arguments, results
into pushing into the Lua C stack a number of new values proportional to
the number of arguments the function was called with. The pushed values,
moreover, are controlled by untrusted user input.
This in turn causes stack smashing which we believe to be exploitable,
while not very deterministic, but it is likely that an exploit could be
created targeting specific versions of Redis executables. However at its
minimum the issue results in a DoS, crashing the Redis server.
-- Versions affected
Versions greater or equal to Redis 2.8.18 are affected.
-- Reproducing
Reproduce with this (based on the original reproduction script by
Apple security team):
https://gist.github.com/antirez/82445fcbea6d9b19f97014cc6cc79f8a
-- Verification of the fix
The fix was tested in the following way:
1) I checked that the problem is no longer observable running the trigger.
2) The Lua code was analyzed to understand the stack semantics, and that
actually enough stack is allocated in all the cases of mp_pack() calls.
3) The mp_pack() function was modified in order to show exactly what items
in the stack were being set, to make sure that there is no silent overflow
even after the fix.
-- Credits
Thank you to the Apple team and to the other persons that helped me
checking the patch and coordinating this communication. |
client *createClient(int fd) {
client *c = zmalloc(sizeof(client));
/* passing -1 as fd it is possible to create a non connected client.
* This is useful since all the commands needs to be executed
* in the context of a client. When commands are executed in other
* contexts (for instance a Lua script) we need a non connected client. */
if (fd != -1) {
anetNonBlock(NULL,fd);
anetEnableTcpNoDelay(NULL,fd);
if (server.tcpkeepalive)
anetKeepAlive(NULL,fd,server.tcpkeepalive);
if (aeCreateFileEvent(server.el,fd,AE_READABLE,
readQueryFromClient, c) == AE_ERR)
{
close(fd);
zfree(c);
return NULL;
}
}
selectDb(c,0);
c->id = server.next_client_id++;
c->fd = fd;
c->name = NULL;
c->bufpos = 0;
c->querybuf = sdsempty();
c->querybuf_peak = 0;
c->reqtype = 0;
c->argc = 0;
c->argv = NULL;
c->cmd = c->lastcmd = NULL;
c->multibulklen = 0;
c->bulklen = -1;
c->sentlen = 0;
c->flags = 0;
c->ctime = c->lastinteraction = server.unixtime;
c->authenticated = 0;
c->replstate = REPL_STATE_NONE;
c->repl_put_online_on_ack = 0;
c->reploff = 0;
c->repl_ack_off = 0;
c->repl_ack_time = 0;
c->slave_listening_port = 0;
c->slave_ip[0] = '\0';
c->slave_capa = SLAVE_CAPA_NONE;
c->reply = listCreate();
c->reply_bytes = 0;
c->obuf_soft_limit_reached_time = 0;
listSetFreeMethod(c->reply,decrRefCountVoid);
listSetDupMethod(c->reply,dupClientReplyValue);
c->btype = BLOCKED_NONE;
c->bpop.timeout = 0;
c->bpop.keys = dictCreate(&setDictType,NULL);
c->bpop.target = NULL;
c->bpop.numreplicas = 0;
c->bpop.reploffset = 0;
c->woff = 0;
c->watched_keys = listCreate();
c->pubsub_channels = dictCreate(&setDictType,NULL);
c->pubsub_patterns = listCreate();
c->peerid = NULL;
listSetFreeMethod(c->pubsub_patterns,decrRefCountVoid);
listSetMatchMethod(c->pubsub_patterns,listMatchObjects);
if (fd != -1) listAddNodeTail(server.clients,c);
initClientMultiState(c);
return c;
} | 0 | [
"CWE-254"
] | redis | 874804da0c014a7d704b3d285aa500098a931f50 | 68,339,461,679,309,420,000,000,000,000,000,000,000 | 68 | Security: Cross Protocol Scripting protection.
This is an attempt at mitigating problems due to cross protocol
scripting, an attack targeting services using line oriented protocols
like Redis that can accept HTTP requests as valid protocol, by
discarding the invalid parts and accepting the payloads sent, for
example, via a POST request.
For this to be effective, when we detect POST and Host: and terminate
the connection asynchronously, the networking code was modified in order
to never process further input. It was later verified that in a
pipelined request containing a POST command, the successive commands are
not executed. |
set_init_1(int clean_arg)
{
char_u *p;
int opt_idx;
long_u n;
#ifdef FEAT_LANGMAP
langmap_init();
#endif
// Be Vi compatible by default
p_cp = TRUE;
// Use POSIX compatibility when $VIM_POSIX is set.
if (mch_getenv((char_u *)"VIM_POSIX") != NULL)
{
set_string_default("cpo", (char_u *)CPO_ALL);
set_string_default("shm", (char_u *)SHM_POSIX);
}
/*
* Find default value for 'shell' option.
* Don't use it if it is empty.
*/
if (((p = mch_getenv((char_u *)"SHELL")) != NULL && *p != NUL)
#if defined(MSWIN)
|| ((p = mch_getenv((char_u *)"COMSPEC")) != NULL && *p != NUL)
|| ((p = (char_u *)default_shell()) != NULL && *p != NUL)
#endif
)
#if defined(MSWIN)
{
// For MS-Windows put the path in quotes instead of escaping spaces.
char_u *cmd;
size_t len;
if (vim_strchr(p, ' ') != NULL)
{
len = STRLEN(p) + 3; // two quotes and a trailing NUL
cmd = alloc(len);
if (cmd != NULL)
{
vim_snprintf((char *)cmd, len, "\"%s\"", p);
set_string_default("sh", cmd);
vim_free(cmd);
}
}
else
set_string_default("sh", p);
}
#else
set_string_default_esc("sh", p, TRUE);
#endif
#ifdef FEAT_WILDIGN
/*
* Set the default for 'backupskip' to include environment variables for
* temp files.
*/
{
# ifdef UNIX
static char *(names[4]) = {"", "TMPDIR", "TEMP", "TMP"};
# else
static char *(names[3]) = {"TMPDIR", "TEMP", "TMP"};
# endif
int len;
garray_T ga;
int mustfree;
char_u *item;
opt_idx = findoption((char_u *)"backupskip");
ga_init2(&ga, 1, 100);
for (n = 0; n < (long)ARRAY_LENGTH(names); ++n)
{
mustfree = FALSE;
# ifdef UNIX
if (*names[n] == NUL)
# ifdef MACOS_X
p = (char_u *)"/private/tmp";
# else
p = (char_u *)"/tmp";
# endif
else
# endif
p = vim_getenv((char_u *)names[n], &mustfree);
if (p != NULL && *p != NUL)
{
// First time count the NUL, otherwise count the ','.
len = (int)STRLEN(p) + 3;
item = alloc(len);
STRCPY(item, p);
add_pathsep(item);
STRCAT(item, "*");
if (find_dup_item(ga.ga_data, item, options[opt_idx].flags)
== NULL
&& ga_grow(&ga, len) == OK)
{
if (ga.ga_len > 0)
STRCAT(ga.ga_data, ",");
STRCAT(ga.ga_data, item);
ga.ga_len += len;
}
vim_free(item);
}
if (mustfree)
vim_free(p);
}
if (ga.ga_data != NULL)
{
set_string_default("bsk", ga.ga_data);
vim_free(ga.ga_data);
}
}
#endif
/*
* 'maxmemtot' and 'maxmem' may have to be adjusted for available memory
*/
opt_idx = findoption((char_u *)"maxmemtot");
if (opt_idx >= 0)
{
#if !defined(HAVE_AVAIL_MEM) && !defined(HAVE_TOTAL_MEM)
if (options[opt_idx].def_val[VI_DEFAULT] == (char_u *)0L)
#endif
{
#ifdef HAVE_AVAIL_MEM
// Use amount of memory available at this moment.
n = (mch_avail_mem(FALSE) >> 1);
#else
# ifdef HAVE_TOTAL_MEM
// Use amount of memory available to Vim.
n = (mch_total_mem(FALSE) >> 1);
# else
n = (0x7fffffff >> 11);
# endif
#endif
options[opt_idx].def_val[VI_DEFAULT] = (char_u *)n;
opt_idx = findoption((char_u *)"maxmem");
if (opt_idx >= 0)
{
#if !defined(HAVE_AVAIL_MEM) && !defined(HAVE_TOTAL_MEM)
if ((long)(long_i)options[opt_idx].def_val[VI_DEFAULT] > (long)n
|| (long)(long_i)options[opt_idx].def_val[VI_DEFAULT] == 0L)
#endif
options[opt_idx].def_val[VI_DEFAULT] = (char_u *)n;
}
}
}
#ifdef FEAT_SEARCHPATH
{
char_u *cdpath;
char_u *buf;
int i;
int j;
int mustfree = FALSE;
// Initialize the 'cdpath' option's default value.
cdpath = vim_getenv((char_u *)"CDPATH", &mustfree);
if (cdpath != NULL)
{
buf = alloc((STRLEN(cdpath) << 1) + 2);
if (buf != NULL)
{
buf[0] = ','; // start with ",", current dir first
j = 1;
for (i = 0; cdpath[i] != NUL; ++i)
{
if (vim_ispathlistsep(cdpath[i]))
buf[j++] = ',';
else
{
if (cdpath[i] == ' ' || cdpath[i] == ',')
buf[j++] = '\\';
buf[j++] = cdpath[i];
}
}
buf[j] = NUL;
opt_idx = findoption((char_u *)"cdpath");
if (opt_idx >= 0)
{
options[opt_idx].def_val[VI_DEFAULT] = buf;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
else
vim_free(buf); // cannot happen
}
if (mustfree)
vim_free(cdpath);
}
}
#endif
#if defined(FEAT_POSTSCRIPT) && (defined(MSWIN) || defined(VMS) || defined(EBCDIC) || defined(MAC) || defined(hpux))
// Set print encoding on platforms that don't default to latin1
set_string_default("penc",
# if defined(MSWIN)
(char_u *)"cp1252"
# else
# ifdef VMS
(char_u *)"dec-mcs"
# else
# ifdef EBCDIC
(char_u *)"ebcdic-uk"
# else
# ifdef MAC
(char_u *)"mac-roman"
# else // HPUX
(char_u *)"hp-roman8"
# endif
# endif
# endif
# endif
);
#endif
#ifdef FEAT_POSTSCRIPT
// 'printexpr' must be allocated to be able to evaluate it.
set_string_default("pexpr",
# if defined(MSWIN)
(char_u *)"system('copy' . ' ' . v:fname_in . (&printdevice == '' ? ' LPT1:' : (' \"' . &printdevice . '\"'))) . delete(v:fname_in)"
# else
# ifdef VMS
(char_u *)"system('print/delete' . (&printdevice == '' ? '' : ' /queue=' . &printdevice) . ' ' . v:fname_in)"
# else
(char_u *)"system('lpr' . (&printdevice == '' ? '' : ' -P' . &printdevice) . ' ' . v:fname_in) . delete(v:fname_in) + v:shell_error"
# endif
# endif
);
#endif
/*
* Set all the options (except the terminal options) to their default
* value. Also set the global value for local options.
*/
set_options_default(0);
#ifdef CLEAN_RUNTIMEPATH
if (clean_arg)
{
opt_idx = findoption((char_u *)"runtimepath");
if (opt_idx >= 0)
{
options[opt_idx].def_val[VI_DEFAULT] = (char_u *)CLEAN_RUNTIMEPATH;
p_rtp = (char_u *)CLEAN_RUNTIMEPATH;
}
opt_idx = findoption((char_u *)"packpath");
if (opt_idx >= 0)
{
options[opt_idx].def_val[VI_DEFAULT] = (char_u *)CLEAN_RUNTIMEPATH;
p_pp = (char_u *)CLEAN_RUNTIMEPATH;
}
}
#endif
#ifdef FEAT_GUI
if (found_reverse_arg)
set_option_value((char_u *)"bg", 0L, (char_u *)"dark", 0);
#endif
curbuf->b_p_initialized = TRUE;
curbuf->b_p_ar = -1; // no local 'autoread' value
curbuf->b_p_ul = NO_LOCAL_UNDOLEVEL;
check_buf_options(curbuf);
check_win_options(curwin);
check_options();
// Must be before option_expand(), because that one needs vim_isIDc()
didset_options();
#ifdef FEAT_SPELL
// Use the current chartab for the generic chartab. This is not in
// didset_options() because it only depends on 'encoding'.
init_spell_chartab();
#endif
/*
* Expand environment variables and things like "~" for the defaults.
* If option_expand() returns non-NULL the variable is expanded. This can
* only happen for non-indirect options.
* Also set the default to the expanded value, so ":set" does not list
* them.
* Don't set the P_ALLOCED flag, because we don't want to free the
* default.
*/
for (opt_idx = 0; !istermoption_idx(opt_idx); opt_idx++)
{
if ((options[opt_idx].flags & P_GETTEXT)
&& options[opt_idx].var != NULL)
p = (char_u *)_(*(char **)options[opt_idx].var);
else
p = option_expand(opt_idx, NULL);
if (p != NULL && (p = vim_strsave(p)) != NULL)
{
*(char_u **)options[opt_idx].var = p;
// VIMEXP
// Defaults for all expanded options are currently the same for Vi
// and Vim. When this changes, add some code here! Also need to
// split P_DEF_ALLOCED in two.
if (options[opt_idx].flags & P_DEF_ALLOCED)
vim_free(options[opt_idx].def_val[VI_DEFAULT]);
options[opt_idx].def_val[VI_DEFAULT] = p;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
}
save_file_ff(curbuf); // Buffer is unchanged
#if defined(FEAT_ARABIC)
// Detect use of mlterm.
// Mlterm is a terminal emulator akin to xterm that has some special
// abilities (bidi namely).
// NOTE: mlterm's author is being asked to 'set' a variable
// instead of an environment variable due to inheritance.
if (mch_getenv((char_u *)"MLTERM") != NULL)
set_option_value((char_u *)"tbidi", 1L, NULL, 0);
#endif
didset_options2();
# if defined(MSWIN) && defined(FEAT_GETTEXT)
/*
* If $LANG isn't set, try to get a good value for it. This makes the
* right language be used automatically. Don't do this for English.
*/
if (mch_getenv((char_u *)"LANG") == NULL)
{
char buf[20];
// Could use LOCALE_SISO639LANGNAME, but it's not in Win95.
// LOCALE_SABBREVLANGNAME gives us three letters, like "enu", we use
// only the first two.
n = GetLocaleInfo(LOCALE_USER_DEFAULT, LOCALE_SABBREVLANGNAME,
(LPTSTR)buf, 20);
if (n >= 2 && STRNICMP(buf, "en", 2) != 0)
{
// There are a few exceptions (probably more)
if (STRNICMP(buf, "cht", 3) == 0 || STRNICMP(buf, "zht", 3) == 0)
STRCPY(buf, "zh_TW");
else if (STRNICMP(buf, "chs", 3) == 0
|| STRNICMP(buf, "zhc", 3) == 0)
STRCPY(buf, "zh_CN");
else if (STRNICMP(buf, "jp", 2) == 0)
STRCPY(buf, "ja");
else
buf[2] = NUL; // truncate to two-letter code
vim_setenv((char_u *)"LANG", (char_u *)buf);
}
}
# else
# ifdef MACOS_CONVERT
// Moved to os_mac_conv.c to avoid dependency problems.
mac_lang_init();
# endif
# endif
# ifdef MSWIN
// MS-Windows has builtin support for conversion to and from Unicode, using
// "utf-8" for 'encoding' should work best for most users.
p = vim_strsave((char_u *)ENC_DFLT);
# else
// enc_locale() will try to find the encoding of the current locale.
// This works best for properly configured systems, old and new.
p = enc_locale();
# endif
if (p != NULL)
{
char_u *save_enc;
// Try setting 'encoding' and check if the value is valid.
// If not, go back to the default encoding.
save_enc = p_enc;
p_enc = p;
if (STRCMP(p_enc, "gb18030") == 0)
{
// We don't support "gb18030", but "cp936" is a good substitute
// for practical purposes, thus use that. It's not an alias to
// still support conversion between gb18030 and utf-8.
p_enc = vim_strsave((char_u *)"cp936");
vim_free(p);
}
if (mb_init() == NULL)
{
opt_idx = findoption((char_u *)"encoding");
if (opt_idx >= 0)
{
options[opt_idx].def_val[VI_DEFAULT] = p_enc;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
#if defined(MSWIN) || defined(MACOS_X) || defined(VMS)
if (STRCMP(p_enc, "latin1") == 0 || enc_utf8)
{
// Adjust the default for 'isprint' and 'iskeyword' to match
// latin1. Also set the defaults for when 'nocompatible' is
// set.
set_string_option_direct((char_u *)"isp", -1,
ISP_LATIN1, OPT_FREE, SID_NONE);
set_string_option_direct((char_u *)"isk", -1,
ISK_LATIN1, OPT_FREE, SID_NONE);
opt_idx = findoption((char_u *)"isp");
if (opt_idx >= 0)
options[opt_idx].def_val[VIM_DEFAULT] = ISP_LATIN1;
opt_idx = findoption((char_u *)"isk");
if (opt_idx >= 0)
options[opt_idx].def_val[VIM_DEFAULT] = ISK_LATIN1;
(void)init_chartab();
}
#endif
#if defined(MSWIN) && (!defined(FEAT_GUI) || defined(VIMDLL))
// Win32 console: When GetACP() returns a different value from
// GetConsoleCP() set 'termencoding'.
if (
# ifdef VIMDLL
(!gui.in_use && !gui.starting) &&
# endif
GetACP() != GetConsoleCP())
{
char buf[50];
// Win32 console: In ConPTY, GetConsoleCP() returns zero.
// Use an alternative value.
if (GetConsoleCP() == 0)
sprintf(buf, "cp%ld", (long)GetACP());
else
sprintf(buf, "cp%ld", (long)GetConsoleCP());
p_tenc = vim_strsave((char_u *)buf);
if (p_tenc != NULL)
{
opt_idx = findoption((char_u *)"termencoding");
if (opt_idx >= 0)
{
options[opt_idx].def_val[VI_DEFAULT] = p_tenc;
options[opt_idx].flags |= P_DEF_ALLOCED;
}
convert_setup(&input_conv, p_tenc, p_enc);
convert_setup(&output_conv, p_enc, p_tenc);
}
else
p_tenc = empty_option;
}
#endif
#if defined(MSWIN)
// $HOME may have characters in active code page.
init_homedir();
#endif
}
else
{
vim_free(p_enc);
p_enc = save_enc;
}
}
#ifdef FEAT_MULTI_LANG
// Set the default for 'helplang'.
set_helplang_default(get_mess_lang());
#endif
} | 0 | [
"CWE-122"
] | vim | b7081e135a16091c93f6f5f7525a5c58fb7ca9f9 | 141,798,986,682,177,770,000,000,000,000,000,000,000 | 462 | patch 8.2.3402: invalid memory access when using :retab with large value
Problem: Invalid memory access when using :retab with large value.
Solution: Check the number is positive. |
void ConnectionHandlerImpl::stopListeners() {
for (auto& listener : listeners_) {
listener.second.listener_->destroy();
}
} | 0 | [
"CWE-835"
] | envoy | c8de199e2971f79cbcbc6b5eadc8c566b28705d1 | 205,046,315,459,816,820,000,000,000,000,000,000,000 | 5 | listener: clean up accept filter before creating connection (#8922)
Signed-off-by: Yuchen Dai <[email protected]> |
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, char *log_lvl)
{
struct unwind_state state;
struct stack_info stack_info = {0};
unsigned long visit_mask = 0;
int graph_idx = 0;
bool partial = false;
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
stack = stack ? : get_stack_pointer(task, regs);
regs = unwind_get_entry_regs(&state, &partial);
/*
* Iterate through the stacks, starting with the current stack pointer.
* Each stack has a pointer to the next one.
*
* x86-64 can have several stacks:
* - task stack
* - interrupt stack
* - HW exception stacks (double fault, nmi, debug, mce)
* - entry stack
*
* x86-32 can have up to four stacks:
* - task stack
* - softirq stack
* - hardirq stack
* - entry stack
*/
for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
const char *stack_name;
if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
/*
* We weren't on a valid stack. It's possible that
* we overflowed a valid stack into a guard page.
* See if the next page up is valid so that we can
* generate some kind of backtrace if this happens.
*/
stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
if (get_stack_info(stack, task, &stack_info, &visit_mask))
break;
}
stack_name = stack_type_name(stack_info.type);
if (stack_name)
printk("%s <%s>\n", log_lvl, stack_name);
if (regs)
show_regs_if_on_stack(&stack_info, regs, partial);
/*
* Scan the stack, printing any text addresses we find. At the
* same time, follow proper stack frames with the unwinder.
*
* Addresses found during the scan which are not reported by
* the unwinder are considered to be additional clues which are
* sometimes useful for debugging and are prefixed with '?'.
* This also serves as a failsafe option in case the unwinder
* goes off in the weeds.
*/
for (; stack < stack_info.end; stack++) {
unsigned long real_addr;
int reliable = 0;
unsigned long addr = READ_ONCE_NOCHECK(*stack);
unsigned long *ret_addr_p =
unwind_get_return_address_ptr(&state);
if (!__kernel_text_address(addr))
continue;
/*
* Don't print regs->ip again if it was already printed
* by show_regs_if_on_stack().
*/
if (regs && stack == ®s->ip)
goto next;
if (stack == ret_addr_p)
reliable = 1;
/*
* When function graph tracing is enabled for a
* function, its return address on the stack is
* replaced with the address of an ftrace handler
* (return_to_handler). In that case, before printing
* the "real" address, we want to print the handler
* address as an "unreliable" hint that function graph
* tracing was involved.
*/
real_addr = ftrace_graph_ret_addr(task, &graph_idx,
addr, stack);
if (real_addr != addr)
printk_stack_address(addr, 0, log_lvl);
printk_stack_address(real_addr, reliable, log_lvl);
if (!reliable)
continue;
next:
/*
* Get the next frame from the unwinder. No need to
* check for an error: if anything goes wrong, the rest
* of the addresses will just be printed as unreliable.
*/
unwind_next_frame(&state);
/* if the frame has entry regs, print them */
regs = unwind_get_entry_regs(&state, &partial);
if (regs)
show_regs_if_on_stack(&stack_info, regs, partial);
}
if (stack_name)
printk("%s </%s>\n", log_lvl, stack_name);
}
} | 0 | [
"CWE-20"
] | linux | 342db04ae71273322f0011384a9ed414df8bdae4 | 314,054,969,102,102,500,000,000,000,000,000,000,000 | 119 | x86/dumpstack: Don't dump kernel memory based on usermode RIP
show_opcodes() is used both for dumping kernel instructions and for dumping
user instructions. If userspace causes #PF by jumping to a kernel address,
show_opcodes() can be reached with regs->ip controlled by the user,
pointing to kernel code. Make sure that userspace can't trick us into
dumping kernel memory into dmesg.
Fixes: 7cccf0725cf7 ("x86/dumpstack: Add a show_ip() function")
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Reviewed-by: Kees Cook <[email protected]>
Reviewed-by: Borislav Petkov <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: [email protected]
Cc: [email protected]
Link: https://lkml.kernel.org/r/[email protected] |
void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
{
if (status == -NFS4ERR_BAD_SEQID) {
struct nfs4_state_owner *sp = container_of(seqid->sequence,
struct nfs4_state_owner, so_seqid);
nfs4_drop_state_owner(sp);
}
nfs_increment_seqid(status, seqid);
} | 0 | [
"CWE-703"
] | linux | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | 144,602,663,795,589,250,000,000,000,000,000,000,000 | 9 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <[email protected]> |
QPDFObjectHandle::rotatePage(int angle, bool relative)
{
if ((angle % 90) != 0)
{
throw std::runtime_error(
"QPDF::rotatePage called with an"
" angle that is not a multiple of 90");
}
int new_angle = angle;
if (relative)
{
int old_angle = 0;
bool found_rotate = false;
QPDFObjectHandle cur_obj = *this;
bool searched_parent = false;
std::set<QPDFObjGen> visited;
while (! found_rotate)
{
if (visited.count(cur_obj.getObjGen()))
{
// Don't get stuck in an infinite loop
break;
}
if (! visited.empty())
{
searched_parent = true;
}
visited.insert(cur_obj.getObjGen());
if (cur_obj.getKey("/Rotate").isInteger())
{
found_rotate = true;
old_angle = cur_obj.getKey("/Rotate").getIntValue();
}
else if (cur_obj.getKey("/Parent").isDictionary())
{
cur_obj = cur_obj.getKey("/Parent");
}
else
{
break;
}
}
QTC::TC("qpdf", "QPDFObjectHandle found old angle",
searched_parent ? 0 : 1);
if ((old_angle % 90) != 0)
{
old_angle = 0;
}
new_angle += old_angle;
}
new_angle = (new_angle + 360) % 360;
replaceKey("/Rotate", QPDFObjectHandle::newInteger(new_angle));
} | 1 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 215,942,866,726,188,070,000,000,000,000,000,000,000 | 53 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
static int add_init_command(struct st_mysql_options *options, const char *cmd)
{
char *tmp;
if (!options->init_commands)
{
options->init_commands= (DYNAMIC_ARRAY*)my_malloc(sizeof(DYNAMIC_ARRAY),
MYF(MY_WME));
init_dynamic_array(options->init_commands,sizeof(char*),0,5);
}
if (!(tmp= my_strdup(cmd,MYF(MY_WME))) ||
insert_dynamic(options->init_commands, &tmp))
{
my_free(tmp);
return 1;
}
return 0;
} | 0 | [
"CWE-416"
] | mysql-server | 4797ea0b772d5f4c5889bc552424132806f46e93 | 191,459,840,131,752,500,000,000,000,000,000,000,000 | 20 | BUG#17512527: LIST HANDLING INCORRECT IN MYSQL_PRUNE_STMT_LIST()
Analysis:
---------
Invalid memory access maybe observed when using prepared statements if:
a) The mysql client connection is lost after statement preparation
is complete and
b) There is at least one statement which is in initialized state but
not prepared yet.
When the client detects a closed connection, it calls end_server()
to shutdown the connection. As part of the clean up, the
mysql_prune_stmt_list() removes the statements which has transitioned
beyond the initialized state and retains only the statements which
are in a initialized state. During this processing, the initialized
statements are moved from 'mysql->stmts' to a temporary 'pruned_list'.
When moving the first 'INIT_DONE' element to the pruned_list,
'element->next' is set to NULL. Hence the rest of the list is never
traversed and the statements which have transitioned beyond the
initialized state are never invalidated.
When the mysql_stmt_close() is called for the statement which is not
invalidated; the statements list is updated in order to remove the
statement. This would end up accessing freed memory(freed by the
mysql_stmt_close() for a previous statement in the list).
Fix:
---
mysql_prune_stmt_list() called list_add() incorrectly to create a
temporary list. The use case of list_add() is to add a single
element to the front of the doubly linked list.
mysql_prune_stmt_list() called list_add() by passing an entire
list as the 'element'.
mysql_prune_stmt_list() now uses list_delete() to remove the
statement which has transitioned beyond the initialized phase.
Thus the statement list would contain only elements where the
the state of the statement is initialized.
Note: Run the test with valgrind-mysqltest and leak-check=full
option to see the invalid memory access. |
virtual bool is_json_type() { return false; } | 0 | [
"CWE-617"
] | server | 2e7891080667c59ac80f788eef4d59d447595772 | 331,520,262,867,977,150,000,000,000,000,000,000,000 | 1 | MDEV-25635 Assertion failure when pushing from HAVING into WHERE of view
This bug could manifest itself after pushing a where condition over a
mergeable derived table / view / CTE DT into a grouping view / derived
table / CTE V whose item list contained set functions with constant
arguments such as MIN(2), SUM(1) etc. In such cases the field references
used in the condition pushed into the view V that correspond set functions
are wrapped into Item_direct_view_ref wrappers. Due to a wrong implementation
of the virtual method const_item() for the class Item_direct_view_ref the
wrapped set functions with constant arguments could be erroneously taken
for constant items. This could lead to a wrong result set returned by the
main select query in 10.2. In 10.4 where a possibility of pushing condition
from HAVING into WHERE had been added this could cause a crash.
Approved by Sergey Petrunya <[email protected]> |
sub_append_string (source, target, indx, size)
char *source, *target;
int *indx, *size;
{
if (source)
{
int srclen, n;
srclen = STRLEN (source);
if (srclen >= (int)(*size - *indx))
{
n = srclen + *indx;
n = (n + DEFAULT_ARRAY_SIZE) - (n % DEFAULT_ARRAY_SIZE);
target = (char *)xrealloc (target, (*size = n));
}
FASTCOPY (source, target + *indx, srclen);
*indx += srclen;
target[*indx] = '\0';
free (source);
}
return (target);
} | 0 | [] | bash | 955543877583837c85470f7fb8a97b7aa8d45e6c | 268,921,084,582,156,700,000,000,000,000,000,000,000 | 24 | bash-4.4-rc2 release |
void RWFunction::BERDecode(BufferedTransformation &bt)
{
BERSequenceDecoder seq(bt);
m_n.BERDecode(seq);
seq.MessageEnd();
} | 0 | [
"CWE-200",
"CWE-399"
] | cryptopp | 9425e16437439e68c7d96abef922167d68fafaff | 239,520,229,400,407,800,000,000,000,000,000,000,000 | 6 | Fix for CVE-2015-2141. Thanks to Evgeny Sidorov for reporting. Squaring to satisfy Jacobi requirements suggested by JPM. |
string GetCollectiveKey(OpKernelContext* c) {
return CollectiveKey(c, col_params_->group.group_key,
col_params_->instance.instance_key);
} | 0 | [
"CWE-416"
] | tensorflow | ca38dab9d3ee66c5de06f11af9a4b1200da5ef75 | 104,519,811,387,354,070,000,000,000,000,000,000,000 | 4 | Fix undefined behavior in CollectiveReduceV2 and others
We should not call done after it's moved.
PiperOrigin-RevId: 400838185
Change-Id: Ifc979740054b8f8c6f4d50acc89472fe60c4fdb1 |
void HttpIntegrationTest::testRouterRequestAndResponseWithBody(
uint64_t request_size, uint64_t response_size, bool big_header,
ConnectionCreationFunction* create_connection) {
initialize();
codec_client_ = makeHttpConnection(
create_connection ? ((*create_connection)()) : makeClientConnection((lookupPort("http"))));
Http::TestHeaderMapImpl request_headers{
{":method", "POST"}, {":path", "/test/long/url"}, {":scheme", "http"},
{":authority", "host"}, {"x-lyft-user-id", "123"}, {"x-forwarded-for", "10.0.0.1"}};
if (big_header) {
request_headers.addCopy("big", std::string(4096, 'a'));
}
auto response = sendRequestAndWaitForResponse(request_headers, request_size,
default_response_headers_, response_size);
checkSimpleRequestSuccess(request_size, response_size, response.get());
} | 0 | [
"CWE-400",
"CWE-703"
] | envoy | afc39bea36fd436e54262f150c009e8d72db5014 | 336,998,181,920,472,040,000,000,000,000,000,000,000 | 16 | Track byteSize of HeaderMap internally.
Introduces a cached byte size updated internally in HeaderMap. The value
is stored as an optional, and is cleared whenever a non-const pointer or
reference to a HeaderEntry is accessed. The cached value can be set with
refreshByteSize() which performs an iteration over the HeaderMap to sum
the size of each key and value in the HeaderMap.
Signed-off-by: Asra Ali <[email protected]> |
static gboolean
is_sre_type_builder (MonoClass *class)
{
return FALSE; | 0 | [
"CWE-20"
] | mono | 65292a69c837b8a5f7a392d34db63de592153358 | 132,720,110,662,271,690,000,000,000,000,000,000,000 | 4 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
static pj_status_t file_get_frame(pjmedia_port *this_port,
pjmedia_frame *frame)
{
struct file_reader_port *fport = (struct file_reader_port*)this_port;
pj_size_t frame_size;
pj_status_t status = PJ_SUCCESS;
pj_assert(fport->base.info.signature == SIGNATURE);
pj_assert(frame->size <= fport->bufsize);
/* EOF is set and readpos already passed the eofpos */
if (fport->eof && fport->readpos >= fport->eofpos) {
PJ_LOG(5,(THIS_FILE, "File port %.*s EOF",
(int)fport->base.info.name.slen,
fport->base.info.name.ptr));
/* Call callback, if any */
if (fport->cb2) {
pj_bool_t no_loop = (fport->options & PJMEDIA_FILE_NO_LOOP);
if (!fport->subscribed) {
status = pjmedia_event_subscribe(NULL, &file_on_event,
fport, fport);
fport->subscribed = (status == PJ_SUCCESS)? PJ_TRUE:
PJ_FALSE;
}
if (fport->subscribed && fport->eof != 2) {
pjmedia_event event;
if (no_loop) {
/* To prevent the callback from being called repeatedly */
fport->eof = 2;
} else {
fport->eof = PJ_FALSE;
}
pjmedia_event_init(&event, PJMEDIA_EVENT_CALLBACK,
NULL, fport);
pjmedia_event_publish(NULL, fport, &event,
PJMEDIA_EVENT_PUBLISH_POST_EVENT);
}
/* Should not access player port after this since
* it might have been destroyed by the callback.
*/
frame->type = PJMEDIA_FRAME_TYPE_NONE;
frame->size = 0;
return (no_loop? PJ_EEOF: PJ_SUCCESS);
} else if (fport->cb) {
status = (*fport->cb)(this_port, fport->base.port_data.pdata);
}
/* If callback returns non PJ_SUCCESS or 'no loop' is specified,
* return immediately (and don't try to access player port since
* it might have been destroyed by the callback).
*/
if ((status != PJ_SUCCESS) || (fport->options & PJMEDIA_FILE_NO_LOOP))
{
frame->type = PJMEDIA_FRAME_TYPE_NONE;
frame->size = 0;
return PJ_EEOF;
}
/* Rewind file */
PJ_LOG(5,(THIS_FILE, "File port %.*s rewinding..",
(int)fport->base.info.name.slen,
fport->base.info.name.ptr));
fport->eof = PJ_FALSE;
}
//pj_assert(frame->size == fport->base.info.bytes_per_frame);
if (fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_PCM) {
frame_size = frame->size;
//frame->size = frame_size;
} else {
/* Must be ULAW or ALAW */
pj_assert(fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_ULAW ||
fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_ALAW);
frame_size = frame->size >> 1;
frame->size = frame_size << 1;
}
/* Copy frame from buffer. */
frame->type = PJMEDIA_FRAME_TYPE_AUDIO;
frame->timestamp.u64 = 0;
if ((fport->readpos + frame_size) <= (fport->buf + fport->bufsize))
{
/* Read contiguous buffer. */
pj_memcpy(frame->buf, fport->readpos, frame_size);
/* Fill up the buffer if all has been read. */
fport->readpos += frame_size;
if (fport->readpos == fport->buf + fport->bufsize) {
fport->readpos = fport->buf;
status = fill_buffer(fport);
if (status != PJ_SUCCESS) {
frame->type = PJMEDIA_FRAME_TYPE_NONE;
frame->size = 0;
fport->readpos = fport->buf + fport->bufsize;
return status;
}
}
} else {
unsigned endread;
/* Split read.
* First stage: read until end of buffer.
*/
endread = (unsigned)((fport->buf+fport->bufsize) - fport->readpos);
pj_memcpy(frame->buf, fport->readpos, endread);
/* End Of Buffer and EOF and NO LOOP */
if (fport->eof && (fport->options & PJMEDIA_FILE_NO_LOOP)) {
fport->readpos += endread;
if (fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_PCM) {
pj_bzero((char*)frame->buf + endread, frame_size - endread);
} else if (fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_ULAW) {
int val = pjmedia_linear2ulaw(0);
pj_memset((char*)frame->buf + endread, val,
frame_size - endread);
} else if (fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_ALAW) {
int val = pjmedia_linear2alaw(0);
pj_memset((char*)frame->buf + endread, val,
frame_size - endread);
}
return PJ_SUCCESS;
}
/* Second stage: fill up buffer, and read from the start of buffer. */
status = fill_buffer(fport);
if (status != PJ_SUCCESS) {
frame->type = PJMEDIA_FRAME_TYPE_NONE;
frame->size = 0;
fport->readpos = fport->buf + fport->bufsize;
return status;
}
pj_memcpy(((char*)frame->buf)+endread, fport->buf, frame_size-endread);
fport->readpos = fport->buf + (frame_size - endread);
}
if (fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_ULAW ||
fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_ALAW)
{
unsigned i;
pj_uint16_t *dst;
pj_uint8_t *src;
dst = (pj_uint16_t*)frame->buf + frame_size - 1;
src = (pj_uint8_t*)frame->buf + frame_size - 1;
if (fport->fmt_tag == PJMEDIA_WAVE_FMT_TAG_ULAW) {
for (i = 0; i < frame_size; ++i) {
*dst-- = (pj_uint16_t) pjmedia_ulaw2linear(*src--);
}
} else {
for (i = 0; i < frame_size; ++i) {
*dst-- = (pj_uint16_t) pjmedia_alaw2linear(*src--);
}
}
}
return PJ_SUCCESS;
} | 0 | [
"CWE-703",
"CWE-835"
] | pjproject | 947bc1ee6d05be10204b918df75a503415fd3213 | 86,785,793,192,782,480,000,000,000,000,000,000,000 | 172 | Merge pull request from GHSA-rwgw-vwxg-q799
* Prevent potential infinite loop when parsing WAV format file
* Check if subchunk is negative.
* Fix and add checks
* Change data type from pj_ssize_t to long.
* Modify check
* Fix leak file descriptor and modify check on wav_playlist
* Move overflow/underflow check to pj_file_setpos()
* Use macro to simplify check
* modification based on comments
* Remove unnecessary casting
* Modification based on comments |
int wc_RsaPrivateDecrypt_ex(const byte* in, word32 inLen, byte* out,
word32 outLen, RsaKey* key, int type,
enum wc_HashType hash, int mgf, byte* label,
word32 labelSz)
{
WC_RNG* rng;
#ifdef WC_RSA_BLINDING
rng = key->rng;
#else
rng = NULL;
#endif
return RsaPrivateDecryptEx((byte*)in, inLen, out, outLen, NULL, key,
RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, type, hash, mgf, label,
labelSz, 0, rng);
} | 0 | [
"CWE-310",
"CWE-787"
] | wolfssl | fb2288c46dd4c864b78f00a47a364b96a09a5c0f | 5,313,398,798,420,582,400,000,000,000,000,000,000 | 15 | RSA-PSS: Handle edge case with encoding message to hash
When the key is small relative to the digest (1024-bit key, 64-byte
hash, 61-byte salt length), the internal message to hash is larger than
the output size.
Allocate a buffer for the message when this happens. |
R_API RBinInfo *r_bin_get_info(RBin *bin) {
RBinObject *o = r_bin_cur_object (bin);
return o? o->info: NULL;
} | 0 | [
"CWE-125"
] | radare2 | d31c4d3cbdbe01ea3ded16a584de94149ecd31d9 | 147,981,618,864,075,230,000,000,000,000,000,000,000 | 4 | Fix #8748 - Fix oobread on string search |
PHP_FUNCTION(bcpowmod)
{
char *left, *right, *modulous;
int left_len, right_len, modulous_len;
bc_num first, second, mod, result;
long scale = BCG(bc_precision);
int scale_int;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sss|l", &left, &left_len, &right, &right_len, &modulous, &modulous_len, &scale) == FAILURE) {
return;
}
bc_init_num(&first TSRMLS_CC);
bc_init_num(&second TSRMLS_CC);
bc_init_num(&mod TSRMLS_CC);
bc_init_num(&result TSRMLS_CC);
php_str2num(&first, left TSRMLS_CC);
php_str2num(&second, right TSRMLS_CC);
php_str2num(&mod, modulous TSRMLS_CC);
scale_int = (int) ((int)scale < 0) ? 0 : scale;
if (bc_raisemod(first, second, mod, &result, scale_int TSRMLS_CC) != -1) {
if (result->n_scale > scale_int) {
result = split_bc_num(result);
result->n_scale = scale_int;
}
Z_STRVAL_P(return_value) = bc_num2str(result);
Z_STRLEN_P(return_value) = strlen(Z_STRVAL_P(return_value));
Z_TYPE_P(return_value) = IS_STRING;
} else {
RETVAL_FALSE;
}
bc_free_num(&first);
bc_free_num(&second);
bc_free_num(&mod);
bc_free_num(&result);
return;
} | 0 | [
"CWE-20"
] | php-src | ed52bcb3dcb2e7dbc009ef8c6579fb1276ca73c1 | 282,495,493,095,607,300,000,000,000,000,000,000,000 | 40 | Fix bug #72093: bcpowmod accepts negative scale and corrupts _one_ definition
We can not modify result since it can be copy of _zero_ or _one_, etc. and
"copy" in bcmath is just bumping the refcount.
Conflicts:
main/php_version.h |
inline void Mean(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& reduction_indices, T* output_data,
const Dims<4>& output_dims) {
tflite::MeanParams op_params;
op_params.axis_count = reduction_indices.size();
for (int i = 0; i < op_params.axis_count; ++i) {
op_params.axis[i] = reduction_indices[op_params.axis_count - 1 - i];
}
Mean(op_params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
} | 0 | [
"CWE-703",
"CWE-835"
] | tensorflow | dfa22b348b70bb89d6d6ec0ff53973bacb4f4695 | 143,098,895,217,959,830,000,000,000,000,000,000,000 | 12 | Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3 |
usage(void)
{
#define DATA(s) s "\n"
static const char msg[] =
{
DATA("Usage: tabs [options] [tabstop-list]")
DATA("")
DATA("Options:")
DATA(" -0 reset tabs")
DATA(" -8 set tabs to standard interval")
DATA(" -a Assembler, IBM S/370, first format")
DATA(" -a2 Assembler, IBM S/370, second format")
DATA(" -c COBOL, normal format")
DATA(" -c2 COBOL compact format")
DATA(" -c3 COBOL compact format extended")
DATA(" -d debug (show ruler with expected/actual tab positions)")
DATA(" -f FORTRAN")
DATA(" -n no-op (do not modify terminal settings)")
DATA(" -p PL/I")
DATA(" -s SNOBOL")
DATA(" -u UNIVAC 1100 Assembler")
DATA(" -T name use terminal type 'name'")
DATA(" -V print version")
DATA("")
DATA("A tabstop-list is an ordered list of column numbers, e.g., 1,11,21")
DATA("or 1,+10,+10 which is the same.")
};
#undef DATA
fflush(stdout);
fputs(msg, stderr);
ExitProgram(EXIT_FAILURE);
} | 0 | [] | ncurses | 790a85dbd4a81d5f5d8dd02a44d84f01512ef443 | 257,050,371,828,634,230,000,000,000,000,000,000,000 | 33 | ncurses 6.2 - patch 20200531
+ correct configure version-check/warnng for g++ to allow for 10.x
+ re-enable "bel" in konsole-base (report by Nia Huang)
+ add linux-s entry (patch by Alexandre Montaron).
+ drop long-obsolete convert_configure.pl
+ add test/test_parm.c, for checking tparm changes.
+ improve parameter-checking for tparm, adding function _nc_tiparm() to
handle the most-used case, which accepts only numeric parameters
(report/testcase by "puppet-meteor").
+ use a more conservative estimate of the buffer-size in lib_tparm.c's
save_text() and save_number(), in case the sprintf() function
passes-through unexpected characters from a format specifier
(report/testcase by "puppet-meteor").
+ add a check for end-of-string in cvtchar to handle a malformed
string in infotocap (report/testcase by "puppet-meteor"). |
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
return pmd_write(pmd) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
} | 1 | [
"CWE-362"
] | linux | 17839856fd588f4ab6b789f482ed3ffd7c403e1f | 45,079,457,499,928,670,000,000,000,000,000,000,000 | 5 | gup: document and work around "COW can break either way" issue
Doing a "get_user_pages()" on a copy-on-write page for reading can be
ambiguous: the page can be COW'ed at any time afterwards, and the
direction of a COW event isn't defined.
Yes, whoever writes to it will generally do the COW, but if the thread
that did the get_user_pages() unmapped the page before the write (and
that could happen due to memory pressure in addition to any outright
action), the writer could also just take over the old page instead.
End result: the get_user_pages() call might result in a page pointer
that is no longer associated with the original VM, and is associated
with - and controlled by - another VM having taken it over instead.
So when doing a get_user_pages() on a COW mapping, the only really safe
thing to do would be to break the COW when getting the page, even when
only getting it for reading.
At the same time, some users simply don't even care.
For example, the perf code wants to look up the page not because it
cares about the page, but because the code simply wants to look up the
physical address of the access for informational purposes, and doesn't
really care about races when a page might be unmapped and remapped
elsewhere.
This adds logic to force a COW event by setting FOLL_WRITE on any
copy-on-write mapping when FOLL_GET (or FOLL_PIN) is used to get a page
pointer as a result.
The current semantics end up being:
- __get_user_pages_fast(): no change. If you don't ask for a write,
you won't break COW. You'd better know what you're doing.
- get_user_pages_fast(): the fast-case "look it up in the page tables
without anything getting mmap_sem" now refuses to follow a read-only
page, since it might need COW breaking. Which happens in the slow
path - the fast path doesn't know if the memory might be COW or not.
- get_user_pages() (including the slow-path fallback for gup_fast()):
for a COW mapping, turn on FOLL_WRITE for FOLL_GET/FOLL_PIN, with
very similar semantics to FOLL_FORCE.
If it turns out that we want finer granularity (ie "only break COW when
it might actually matter" - things like the zero page are special and
don't need to be broken) we might need to push these semantics deeper
into the lookup fault path. So if people care enough, it's possible
that we might end up adding a new internal FOLL_BREAK_COW flag to go
with the internal FOLL_COW flag we already have for tracking "I had a
COW".
Alternatively, if it turns out that different callers might want to
explicitly control the forced COW break behavior, we might even want to
make such a flag visible to the users of get_user_pages() instead of
using the above default semantics.
But for now, this is mostly commentary on the issue (this commit message
being a lot bigger than the patch, and that patch in turn is almost all
comments), with that minimal "enable COW breaking early" logic using the
existing FOLL_WRITE behavior.
[ It might be worth noting that we've always had this ambiguity, and it
could arguably be seen as a user-space issue.
You only get private COW mappings that could break either way in
situations where user space is doing cooperative things (ie fork()
before an execve() etc), but it _is_ surprising and very subtle, and
fork() is supposed to give you independent address spaces.
So let's treat this as a kernel issue and make the semantics of
get_user_pages() easier to understand. Note that obviously a true
shared mapping will still get a page that can change under us, so this
does _not_ mean that get_user_pages() somehow returns any "stable"
page ]
Reported-by: Jann Horn <[email protected]>
Tested-by: Christoph Hellwig <[email protected]>
Acked-by: Oleg Nesterov <[email protected]>
Acked-by: Kirill Shutemov <[email protected]>
Acked-by: Jan Kara <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
void _TIFFsetShortArray(uint16** wpp, uint16* wp, uint32 n)
{ setByteArray((void**) wpp, (void*) wp, n, sizeof (uint16)); } | 0 | [
"CWE-20"
] | libtiff | 3144e57770c1e4d26520d8abee750f8ac8b75490 | 239,601,217,208,876,730,000,000,000,000,000,000,000 | 2 | * libtiff/tif_dir.c, tif_dirread.c, tif_dirwrite.c: implement various clampings
of double to other data types to avoid undefined behaviour if the output range
isn't big enough to hold the input value.
Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2643
http://bugzilla.maptools.org/show_bug.cgi?id=2642
http://bugzilla.maptools.org/show_bug.cgi?id=2646
http://bugzilla.maptools.org/show_bug.cgi?id=2647 |
static int mov_switch_root(AVFormatContext *s, int64_t target, int index)
{
int ret;
MOVContext *mov = s->priv_data;
if (index >= 0 && index < mov->frag_index.nb_items)
target = mov->frag_index.item[index].moof_offset;
if (avio_seek(s->pb, target, SEEK_SET) != target) {
av_log(mov->fc, AV_LOG_ERROR, "root atom offset 0x%"PRIx64": partial file\n", target);
return AVERROR_INVALIDDATA;
}
mov->next_root_atom = 0;
if (index < 0 || index >= mov->frag_index.nb_items)
index = search_frag_moof_offset(&mov->frag_index, target);
if (index < mov->frag_index.nb_items &&
mov->frag_index.item[index].moof_offset == target) {
if (index + 1 < mov->frag_index.nb_items)
mov->next_root_atom = mov->frag_index.item[index + 1].moof_offset;
if (mov->frag_index.item[index].headers_read)
return 0;
mov->frag_index.item[index].headers_read = 1;
}
mov->found_mdat = 0;
ret = mov_read_default(mov, s->pb, (MOVAtom){ AV_RL32("root"), INT64_MAX });
if (ret < 0)
return ret;
if (avio_feof(s->pb))
return AVERROR_EOF;
av_log(s, AV_LOG_TRACE, "read fragments, offset 0x%"PRIx64"\n", avio_tell(s->pb));
return 1;
} | 0 | [
"CWE-703"
] | FFmpeg | c953baa084607dd1d84c3bfcce3cf6a87c3e6e05 | 326,533,798,116,570,500,000,000,000,000,000,000,000 | 35 | avformat/mov: Check count sums in build_open_gop_key_points()
Fixes: ffmpeg.md
Fixes: Out of array access
Fixes: CVE-2022-2566
Found-by: Andy Nguyen <[email protected]>
Found-by: 3pvd <[email protected]>
Reviewed-by: Andy Nguyen <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]> |
static int rec_cmp(const struct dnsp_DnssrvRpcRecord *r1,
const struct dnsp_DnssrvRpcRecord *r2)
{
if (r1->wType != r2->wType) {
/*
* The records are sorted with higher types first,
* which puts tombstones (type 0) last.
*/
return r2->wType - r1->wType;
}
/*
* Then we need to sort from the oldest to newest timestamp.
*
* Note that dwTimeStamp == 0 (never expiring) records come first,
* then the ones whose expiry is soonest.
*/
return r1->dwTimeStamp - r2->dwTimeStamp;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 77,409,987,809,874,270,000,000,000,000,000,000,000 | 18 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
void HRangeAnalysis::RollBackTo(int index) {
for (int i = index + 1; i < changed_ranges_.length(); ++i) {
changed_ranges_[i]->RemoveLastAddedRange();
}
changed_ranges_.Rewind(index + 1);
} | 0 | [] | node | fd80a31e0697d6317ce8c2d289575399f4e06d21 | 334,740,732,746,436,930,000,000,000,000,000,000,000 | 6 | deps: backport 5f836c from v8 upstream
Original commit message:
Fix Hydrogen bounds check elimination
When combining bounds checks, they must all be moved before the first load/store
that they are guarding.
BUG=chromium:344186
LOG=y
[email protected]
Review URL: https://codereview.chromium.org/172093002
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
fix #8070 |
messageGetMimeType(const message *m)
{
assert(m != NULL);
return m->mimeType;
} | 0 | [
"CWE-125"
] | clamav-devel | 586a5180287262070637c8943f2f7efd652e4a2c | 327,092,565,684,248,660,000,000,000,000,000,000,000 | 6 | bb11797 - fix invalid read in fuzzed mail file. |
flatpak_dir_get_name_cached (FlatpakDir *self)
{
char *name;
name = g_object_get_data (G_OBJECT (self), "cached-name");
if (!name)
{
name = flatpak_dir_get_name (self),
g_object_set_data_full (G_OBJECT (self), "cached-name", name, g_free);
}
return (const char *)name;
} | 0 | [
"CWE-668"
] | flatpak | cd2142888fc4c199723a0dfca1f15ea8788a5483 | 40,838,497,110,408,717,000,000,000,000,000,000,000 | 13 | Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this. |
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
} | 0 | [
"CWE-665"
] | ImageMagick | ee3dae8624e69261760754442827aea4d0254a6f | 42,087,587,934,524,894,000,000,000,000,000,000,000 | 65 | https://github.com/ImageMagick/ImageMagick/issues/1522 |
rsvg_cairo_render_pango_layout (RsvgDrawingCtx * ctx, PangoLayout * layout, double x, double y)
{
RsvgCairoRender *render = (RsvgCairoRender *) ctx->render;
RsvgState *state = rsvg_current_state (ctx);
PangoRectangle ink;
RsvgBbox bbox;
_rsvg_cairo_set_text_antialias (render->cr, state->text_rendering_type);
_set_rsvg_affine (render, state->affine);
pango_layout_get_extents (layout, &ink, NULL);
rsvg_bbox_init (&bbox, state->affine);
bbox.x = x + ink.x / (double)PANGO_SCALE;
bbox.y = y + ink.y / (double)PANGO_SCALE;
bbox.w = ink.width / (double)PANGO_SCALE;
bbox.h = ink.height / (double)PANGO_SCALE;
bbox.virgin = 0;
if (state->fill) {
cairo_move_to (render->cr, x, y);
rsvg_bbox_insert (&render->bbox, &bbox);
_set_source_rsvg_paint_server (ctx,
state->current_color,
state->fill,
state->fill_opacity,
bbox, rsvg_current_state (ctx)->current_color);
pango_cairo_show_layout (render->cr, layout);
}
if (state->stroke) {
cairo_move_to (render->cr, x, y);
rsvg_bbox_insert (&render->bbox, &bbox);
pango_cairo_layout_path (render->cr, layout);
_set_source_rsvg_paint_server (ctx,
state->current_color,
state->stroke,
state->stroke_opacity,
bbox, rsvg_current_state (ctx)->current_color);
cairo_set_line_width (render->cr, _rsvg_css_normalize_length (&state->stroke_width, ctx, 'h'));
cairo_set_miter_limit (render->cr, state->miter_limit);
cairo_set_line_cap (render->cr, (cairo_line_cap_t) state->cap);
cairo_set_line_join (render->cr, (cairo_line_join_t) state->join);
cairo_set_dash (render->cr, state->dash.dash, state->dash.n_dash,
_rsvg_css_normalize_length (&state->dash.offset, ctx, 'o'));
cairo_stroke (render->cr);
}
} | 0 | [] | librsvg | 34c95743ca692ea0e44778e41a7c0a129363de84 | 50,396,642,792,979,430,000,000,000,000,000,000,000 | 52 | Store node type separately in RsvgNode
The node name (formerly RsvgNode:type) cannot be used to infer
the sub-type of RsvgNode that we're dealing with, since for unknown
elements we put type = node-name. This lead to a (potentially exploitable)
crash e.g. when the element name started with "fe" which tricked
the old code into considering it as a RsvgFilterPrimitive.
CVE-2011-3146
https://bugzilla.gnome.org/show_bug.cgi?id=658014 |
expand_case_fold_make_rem_string(Node** rnode, UChar *s, UChar *end, regex_t* reg)
{
int r;
Node *node;
node = onig_node_new_str(s, end);
if (IS_NULL(node)) return ONIGERR_MEMORY;
r = update_string_node_case_fold(reg, node);
if (r != 0) {
onig_node_free(node);
return r;
}
NODE_STRING_SET_AMBIG(node);
NODE_STRING_SET_DONT_GET_OPT_INFO(node);
*rnode = node;
return 0;
} | 0 | [
"CWE-476",
"CWE-125"
] | oniguruma | c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c | 239,874,895,705,916,560,000,000,000,000,000,000,000 | 19 | Fix CVE-2019-13225: problem in converting if-then-else pattern to bytecode. |
static void free_ptree_data(void *data) {
;
} | 0 | [
"CWE-416",
"CWE-787"
] | nDPI | 6a9f5e4f7c3fd5ddab3e6727b071904d76773952 | 318,774,980,934,128,150,000,000,000,000,000,000,000 | 3 | Fixed use after free caused by dangling pointer
* This fix also improved RCE Injection detection
Signed-off-by: Toni Uhlig <[email protected]> |
static int _server_handle_vKill(libgdbr_t *g, int (*cmd_cb) (void*, const char*, char*, size_t), void *core_ptr) {
if (send_ack (g) < 0) {
return -1;
}
// TODO handle killing of pid
send_msg (g, "OK");
return -1;
} | 0 | [
"CWE-703",
"CWE-787"
] | radare2 | 796dd28aaa6b9fa76d99c42c4d5ff8b257cc2191 | 27,522,240,320,822,265,000,000,000,000,000,000,000 | 8 | Fix ext2 buffer overflow in r2_sbu_grub_memmove |
TEST(ParseExpression, ShouldRejectExpressionWithWrongNumberOfArguments) {
ASSERT_THROWS(parseExpression(BSON("$strcasecmp" << BSON_ARRAY("foo"))), AssertionException);
} | 0 | [
"CWE-835"
] | mongo | 0a076417d1d7fba3632b73349a1fd29a83e68816 | 228,144,327,457,441,850,000,000,000,000,000,000,000 | 3 | SERVER-38070 fix infinite loop in agg expression |
int wc_RsaPublicKeyDecode_ex(const byte* input, word32* inOutIdx, word32 inSz,
const byte** n, word32* nSz, const byte** e, word32* eSz)
{
int ret = 0;
int length = 0;
#if defined(OPENSSL_EXTRA) || defined(RSA_DECODE_EXTRA)
word32 localIdx;
byte tag;
#endif
if (input == NULL || inOutIdx == NULL)
return BAD_FUNC_ARG;
if (GetSequence(input, inOutIdx, &length, inSz) < 0)
return ASN_PARSE_E;
#if defined(OPENSSL_EXTRA) || defined(RSA_DECODE_EXTRA)
localIdx = *inOutIdx;
if (GetASNTag(input, &localIdx, &tag, inSz) < 0)
return BUFFER_E;
if (tag != ASN_INTEGER) {
/* not from decoded cert, will have algo id, skip past */
if (GetSequence(input, inOutIdx, &length, inSz) < 0)
return ASN_PARSE_E;
if (SkipObjectId(input, inOutIdx, inSz) < 0)
return ASN_PARSE_E;
/* Option NULL ASN.1 tag */
if (*inOutIdx >= inSz) {
return BUFFER_E;
}
localIdx = *inOutIdx;
if (GetASNTag(input, &localIdx, &tag, inSz) < 0)
return ASN_PARSE_E;
if (tag == ASN_TAG_NULL) {
ret = GetASNNull(input, inOutIdx, inSz);
if (ret != 0)
return ret;
}
/* should have bit tag length and seq next */
ret = CheckBitString(input, inOutIdx, NULL, inSz, 1, NULL);
if (ret != 0)
return ret;
if (GetSequence(input, inOutIdx, &length, inSz) < 0)
return ASN_PARSE_E;
}
#endif /* OPENSSL_EXTRA */
/* Get modulus */
ret = GetASNInt(input, inOutIdx, &length, inSz);
if (ret < 0) {
return ASN_RSA_KEY_E;
}
if (nSz)
*nSz = length;
if (n)
*n = &input[*inOutIdx];
*inOutIdx += length;
/* Get exponent */
ret = GetASNInt(input, inOutIdx, &length, inSz);
if (ret < 0) {
return ASN_RSA_KEY_E;
}
if (eSz)
*eSz = length;
if (e)
*e = &input[*inOutIdx];
*inOutIdx += length;
return ret;
} | 0 | [
"CWE-125",
"CWE-345"
] | wolfssl | f93083be72a3b3d956b52a7ec13f307a27b6e093 | 178,483,572,661,476,300,000,000,000,000,000,000,000 | 78 | OCSP: improve handling of OCSP no check extension |
vte_sequence_handler_cl (VteTerminal *terminal, GValueArray *params)
{
_vte_terminal_clear_screen (terminal);
_vte_terminal_home_cursor (terminal);
/* We've modified the display. Make a note of it. */
terminal->pvt->text_deleted_flag = TRUE;
} | 0 | [] | vte | 58bc3a942f198a1a8788553ca72c19d7c1702b74 | 167,833,664,639,339,500,000,000,000,000,000,000,000 | 8 | fix bug #548272
svn path=/trunk/; revision=2365 |
rndr_table(struct buf *ob, const struct buf *header, const struct buf *body, void *opaque)
{
if (ob->size) bufputc(ob, '\n');
BUFPUTSL(ob, "<table><thead>\n");
if (header)
bufput(ob, header->data, header->size);
BUFPUTSL(ob, "</thead><tbody>\n");
if (body)
bufput(ob, body->data, body->size);
BUFPUTSL(ob, "</tbody></table>\n");
} | 0 | [
"CWE-79",
"CWE-74"
] | redcarpet | a699c82292b17c8e6a62e1914d5eccc252272793 | 213,946,594,988,090,800,000,000,000,000,000,000,000 | 11 | Fix a security issue using `:quote` with `:escape_html`
Reported by @johan-smits. |
void run() {
testRegex();
BSONObjBuilder A,B,C;
A.append("x", 2);
B.append("x", 2.0);
C.append("x", 2.1);
BSONObj a = A.done();
BSONObj b = B.done();
BSONObj c = C.done();
verify( !a.binaryEqual( b ) ); // comments on operator==
int cmp = a.woCompare(b);
verify( cmp == 0 );
cmp = a.woCompare(c);
verify( cmp < 0 );
testoid();
testbounds();
testorder();
} | 0 | [
"CWE-20"
] | mongo | f9817a6cf64bdba8e1e1cef30a798110df746b58 | 321,702,457,771,077,650,000,000,000,000,000,000,000 | 18 | SERVER-7769 - turn objcheck on by default and use new fast bson validate |
static unsigned long segment_base(u16 selector)
{
struct desc_ptr gdt;
struct desc_struct *d;
unsigned long table_base;
unsigned long v;
if (!(selector & ~3))
return 0;
native_store_gdt(&gdt);
table_base = gdt.address;
if (selector & 4) { /* from ldt */
u16 ldt_selector = kvm_read_ldt();
if (!(ldt_selector & ~3))
return 0;
table_base = segment_base(ldt_selector);
}
d = (struct desc_struct *)(table_base + (selector & ~7));
v = get_desc_base(d);
#ifdef CONFIG_X86_64
if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
#endif
return v;
} | 0 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 155,083,390,148,475,900,000,000,000,000,000,000,000 | 29 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
xcf_exit (Gimp *gimp)
{
g_return_if_fail (GIMP_IS_GIMP (gimp));
} | 0 | [
"CWE-125"
] | gimp | 702c4227e8b6169f781e4bb5ae4b5733f51ab126 | 314,493,299,654,769,600,000,000,000,000,000,000,000 | 4 | 790783 - buffer overread in XCF parser if version field...
...has no null terminator
Check for the presence of '\0' before using atoi() on the version
string. Patch slightly modified (mitch). |
static struct ldb_message *pdb_samba_dsdb_get_samu_private(
struct pdb_methods *m, struct samu *sam)
{
struct pdb_samba_dsdb_state *state = talloc_get_type_abort(
m->private_data, struct pdb_samba_dsdb_state);
struct ldb_message *msg;
struct dom_sid_buf sidstr;
char *filter;
NTSTATUS status;
msg = (struct ldb_message *)
pdb_get_backend_private_data(sam, m);
if (msg != NULL) {
return talloc_get_type_abort(msg, struct ldb_message);
}
filter = talloc_asprintf(
talloc_tos(),
"(&(objectsid=%s)(objectclass=user))",
dom_sid_str_buf(pdb_get_user_sid(sam), &sidstr));
if (filter == NULL) {
return NULL;
}
status = pdb_samba_dsdb_getsamupriv(state, filter, sam, &msg);
TALLOC_FREE(filter);
if (!NT_STATUS_IS_OK(status)) {
return NULL;
}
return msg;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 202,290,264,652,760,900,000,000,000,000,000,000,000 | 33 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
pcx_header_to_buffer (guint8 *buf)
{
gint i;
gint buf_offset = 0;
for (i = 0; pcx_header_buf_xlate[i].size != 0; i++)
{
g_memmove (buf + buf_offset, pcx_header_buf_xlate[i].address,
pcx_header_buf_xlate[i].size);
buf_offset += pcx_header_buf_xlate[i].size;
}
} | 0 | [
"CWE-190"
] | gimp | a9671395f6573e90316a9d748588c5435216f6ce | 214,100,950,188,447,970,000,000,000,000,000,000,000 | 12 | PCX: Avoid allocation overflows.
Multiplying gint values may overflow unless cast into a larger type. |
static const char *req_handler_field(request_rec *r)
{
return r->handler;
} | 0 | [
"CWE-20"
] | httpd | 78eb3b9235515652ed141353d98c239237030410 | 327,406,620,444,923,000,000,000,000,000,000,000,000 | 4 | *) SECURITY: CVE-2015-0228 (cve.mitre.org)
mod_lua: A maliciously crafted websockets PING after a script
calls r:wsupgrade() can cause a child process crash.
[Edward Lu <Chaosed0 gmail.com>]
Discovered by Guido Vranken <guidovranken gmail.com>
Submitted by: Edward Lu
Committed by: covener
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1657261 13f79535-47bb-0310-9956-ffa450edef68 |
static void svm_flush_tlb(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
else
svm->asid_generation--;
} | 0 | [] | kvm | 854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | 262,955,374,425,820,400,000,000,000,000,000,000,000 | 9 | KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: [email protected]
Signed-off-by: Nadav Amit <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
void Field_iterator_table_ref::set(TABLE_LIST *table)
{
DBUG_ASSERT(table);
first_leaf= table->first_leaf_for_name_resolution();
last_leaf= table->last_leaf_for_name_resolution();
DBUG_ASSERT(first_leaf && last_leaf);
table_ref= first_leaf;
set_field_iterator();
} | 0 | [
"CWE-416"
] | server | c02ebf3510850ba78a106be9974c94c3b97d8585 | 144,440,460,118,742,250,000,000,000,000,000,000,000 | 9 | MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments. |
envoy::extensions::transport_sockets::tls::v3::Secret getServerSecret() {
envoy::extensions::transport_sockets::tls::v3::Secret secret;
secret.set_name(server_cert_);
auto* tls_certificate = secret.mutable_tls_certificate();
tls_certificate->mutable_certificate_chain()->set_filename(
TestEnvironment::runfilesPath("test/config/integration/certs/servercert.pem"));
tls_certificate->mutable_private_key()->set_filename(
TestEnvironment::runfilesPath("test/config/integration/certs/serverkey.pem"));
return secret;
} | 0 | [
"CWE-400"
] | envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 16,527,101,412,742,420,000,000,000,000,000,000,000 | 10 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
ConnectTimeoutCallback() : SSLServerAcceptCallbackBase(nullptr) {
// We don't care if we get invoked or not.
// The client may time out and give up before connAccepted() is even
// called.
state = STATE_SUCCEEDED;
} | 0 | [
"CWE-125"
] | folly | c321eb588909646c15aefde035fd3133ba32cdee | 154,240,645,716,944,460,000,000,000,000,000,000,000 | 6 | Handle close_notify as standard writeErr in AsyncSSLSocket.
Summary: Fixes CVE-2019-11934
Reviewed By: mingtaoy
Differential Revision: D18020613
fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836 |
static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
{
struct hrtimer *timer = &cpuctx->hrtimer;
struct pmu *pmu = cpuctx->ctx.pmu;
u64 interval;
/* no multiplexing needed for SW PMU */
if (pmu->task_ctx_nr == perf_sw_context)
return;
/*
* check default is sane, if not set then force to
* default interval (1/tick)
*/
interval = pmu->hrtimer_interval_ms;
if (interval < 1)
interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
raw_spin_lock_init(&cpuctx->hrtimer_lock);
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
timer->function = perf_mux_hrtimer_handler;
} | 0 | [
"CWE-401"
] | tip | 7bdb157cdebbf95a1cd94ed2e01b338714075d00 | 281,026,105,115,163,500,000,000,000,000,000,000,000 | 24 | perf/core: Fix a memory leak in perf_event_parse_addr_filter()
As shown through runtime testing, the "filename" allocation is not
always freed in perf_event_parse_addr_filter().
There are three possible ways that this could happen:
- It could be allocated twice on subsequent iterations through the loop,
- or leaked on the success path,
- or on the failure path.
Clean up the code flow to make it obvious that 'filename' is always
freed in the reallocation path and in the two return paths as well.
We rely on the fact that kfree(NULL) is NOP and filename is initialized
with NULL.
This fixes the leak. No other side effects expected.
[ Dan Carpenter: cleaned up the code flow & added a changelog. ]
[ Ingo Molnar: updated the changelog some more. ]
Fixes: 375637bc5249 ("perf/core: Introduce address range filtering")
Signed-off-by: "kiyin(尹亮)" <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Cc: "Srivatsa S. Bhat" <[email protected]>
Cc: Anthony Liguori <[email protected]>
--
kernel/events/core.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-) |
write_viminfo(char_u *file, int forceit)
{
char_u *fname;
FILE *fp_in = NULL; /* input viminfo file, if any */
FILE *fp_out = NULL; /* output viminfo file */
char_u *tempname = NULL; /* name of temp viminfo file */
stat_T st_new; /* mch_stat() of potential new file */
#if defined(UNIX) || defined(VMS)
mode_t umask_save;
#endif
#ifdef UNIX
int shortname = FALSE; /* use 8.3 file name */
stat_T st_old; /* mch_stat() of existing viminfo file */
#endif
#ifdef WIN3264
int hidden = FALSE;
#endif
if (no_viminfo())
return;
fname = viminfo_filename(file); /* may set to default if NULL */
if (fname == NULL)
return;
fp_in = mch_fopen((char *)fname, READBIN);
if (fp_in == NULL)
{
int fd;
/* if it does exist, but we can't read it, don't try writing */
if (mch_stat((char *)fname, &st_new) == 0)
goto end;
/* Create the new .viminfo non-accessible for others, because it may
* contain text from non-accessible documents. It is up to the user to
* widen access (e.g. to a group). This may also fail if there is a
* race condition, then just give up. */
fd = mch_open((char *)fname,
O_CREAT|O_EXTRA|O_EXCL|O_WRONLY|O_NOFOLLOW, 0600);
if (fd < 0)
goto end;
fp_out = fdopen(fd, WRITEBIN);
}
else
{
/*
* There is an existing viminfo file. Create a temporary file to
* write the new viminfo into, in the same directory as the
* existing viminfo file, which will be renamed once all writing is
* successful.
*/
#ifdef UNIX
/*
* For Unix we check the owner of the file. It's not very nice to
* overwrite a user's viminfo file after a "su root", with a
* viminfo file that the user can't read.
*/
st_old.st_dev = (dev_t)0;
st_old.st_ino = 0;
st_old.st_mode = 0600;
if (mch_stat((char *)fname, &st_old) == 0
&& getuid() != ROOT_UID
&& !(st_old.st_uid == getuid()
? (st_old.st_mode & 0200)
: (st_old.st_gid == getgid()
? (st_old.st_mode & 0020)
: (st_old.st_mode & 0002))))
{
int tt = msg_didany;
/* avoid a wait_return for this message, it's annoying */
semsg(_("E137: Viminfo file is not writable: %s"), fname);
msg_didany = tt;
fclose(fp_in);
goto end;
}
#endif
#ifdef WIN3264
/* Get the file attributes of the existing viminfo file. */
hidden = mch_ishidden(fname);
#endif
/*
* Make tempname, find one that does not exist yet.
* Beware of a race condition: If someone logs out and all Vim
* instances exit at the same time a temp file might be created between
* stat() and open(). Use mch_open() with O_EXCL to avoid that.
* May try twice: Once normal and once with shortname set, just in
* case somebody puts his viminfo file in an 8.3 filesystem.
*/
for (;;)
{
int next_char = 'z';
char_u *wp;
tempname = buf_modname(
#ifdef UNIX
shortname,
#else
FALSE,
#endif
fname,
#ifdef VMS
(char_u *)"-tmp",
#else
(char_u *)".tmp",
#endif
FALSE);
if (tempname == NULL) /* out of memory */
break;
/*
* Try a series of names. Change one character, just before
* the extension. This should also work for an 8.3
* file name, when after adding the extension it still is
* the same file as the original.
*/
wp = tempname + STRLEN(tempname) - 5;
if (wp < gettail(tempname)) /* empty file name? */
wp = gettail(tempname);
for (;;)
{
/*
* Check if tempfile already exists. Never overwrite an
* existing file!
*/
if (mch_stat((char *)tempname, &st_new) == 0)
{
#ifdef UNIX
/*
* Check if tempfile is same as original file. May happen
* when modname() gave the same file back. E.g. silly
* link, or file name-length reached. Try again with
* shortname set.
*/
if (!shortname && st_new.st_dev == st_old.st_dev
&& st_new.st_ino == st_old.st_ino)
{
VIM_CLEAR(tempname);
shortname = TRUE;
break;
}
#endif
}
else
{
/* Try creating the file exclusively. This may fail if
* another Vim tries to do it at the same time. */
#ifdef VMS
/* fdopen() fails for some reason */
umask_save = umask(077);
fp_out = mch_fopen((char *)tempname, WRITEBIN);
(void)umask(umask_save);
#else
int fd;
/* Use mch_open() to be able to use O_NOFOLLOW and set file
* protection:
* Unix: same as original file, but strip s-bit. Reset
* umask to avoid it getting in the way.
* Others: r&w for user only. */
# ifdef UNIX
umask_save = umask(0);
fd = mch_open((char *)tempname,
O_CREAT|O_EXTRA|O_EXCL|O_WRONLY|O_NOFOLLOW,
(int)((st_old.st_mode & 0777) | 0600));
(void)umask(umask_save);
# else
fd = mch_open((char *)tempname,
O_CREAT|O_EXTRA|O_EXCL|O_WRONLY|O_NOFOLLOW, 0600);
# endif
if (fd < 0)
{
fp_out = NULL;
# ifdef EEXIST
/* Avoid trying lots of names while the problem is lack
* of premission, only retry if the file already
* exists. */
if (errno != EEXIST)
break;
# endif
}
else
fp_out = fdopen(fd, WRITEBIN);
#endif /* VMS */
if (fp_out != NULL)
break;
}
/* Assume file exists, try again with another name. */
if (next_char == 'a' - 1)
{
/* They all exist? Must be something wrong! Don't write
* the viminfo file then. */
semsg(_("E929: Too many viminfo temp files, like %s!"),
tempname);
break;
}
*wp = next_char;
--next_char;
}
if (tempname != NULL)
break;
/* continue if shortname was set */
}
#if defined(UNIX) && defined(HAVE_FCHOWN)
if (tempname != NULL && fp_out != NULL)
{
stat_T tmp_st;
/*
* Make sure the original owner can read/write the tempfile and
* otherwise preserve permissions, making sure the group matches.
*/
if (mch_stat((char *)tempname, &tmp_st) >= 0)
{
if (st_old.st_uid != tmp_st.st_uid)
/* Changing the owner might fail, in which case the
* file will now owned by the current user, oh well. */
vim_ignored = fchown(fileno(fp_out), st_old.st_uid, -1);
if (st_old.st_gid != tmp_st.st_gid
&& fchown(fileno(fp_out), -1, st_old.st_gid) == -1)
/* can't set the group to what it should be, remove
* group permissions */
(void)mch_setperm(tempname, 0600);
}
else
/* can't stat the file, set conservative permissions */
(void)mch_setperm(tempname, 0600);
}
#endif
}
/*
* Check if the new viminfo file can be written to.
*/
if (fp_out == NULL)
{
semsg(_("E138: Can't write viminfo file %s!"),
(fp_in == NULL || tempname == NULL) ? fname : tempname);
if (fp_in != NULL)
fclose(fp_in);
goto end;
}
if (p_verbose > 0)
{
verbose_enter();
smsg(_("Writing viminfo file \"%s\""), fname);
verbose_leave();
}
viminfo_errcnt = 0;
do_viminfo(fp_in, fp_out, forceit ? 0 : (VIF_WANT_INFO | VIF_WANT_MARKS));
if (fclose(fp_out) == EOF)
++viminfo_errcnt;
if (fp_in != NULL)
{
fclose(fp_in);
/* In case of an error keep the original viminfo file. Otherwise
* rename the newly written file. Give an error if that fails. */
if (viminfo_errcnt == 0)
{
if (vim_rename(tempname, fname) == -1)
{
++viminfo_errcnt;
semsg(_("E886: Can't rename viminfo file to %s!"), fname);
}
# ifdef WIN3264
/* If the viminfo file was hidden then also hide the new file. */
else if (hidden)
mch_hide(fname);
# endif
}
if (viminfo_errcnt > 0)
mch_remove(tempname);
}
end:
vim_free(fname);
vim_free(tempname);
} | 0 | [
"CWE-78"
] | vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 247,151,992,121,338,870,000,000,000,000,000,000,000 | 288 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
int mz_inflateInit(mz_streamp pStream) {
return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
} | 0 | [
"CWE-20",
"CWE-190"
] | tinyexr | a685e3332f61cd4e59324bf3f669d36973d64270 | 76,150,580,244,345,740,000,000,000,000,000,000,000 | 3 | Make line_no with too large value(2**20) invalid. Fixes #124 |
static PyObject *Adapter_ssl_var_lookup(AdapterObject *self, PyObject *args)
{
APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *ssl_var_lookup = 0;
PyObject *item = NULL;
char *name = 0;
char *value = 0;
if (!self->r) {
PyErr_SetString(PyExc_RuntimeError, "request object has expired");
return NULL;
}
if (!PyArg_ParseTuple(args, "O:ssl_var_lookup", &item))
return NULL;
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_Check(item)) {
PyObject *latin_item;
latin_item = PyUnicode_AsLatin1String(item);
if (!latin_item) {
PyErr_Format(PyExc_TypeError, "byte string value expected, "
"value containing non 'latin-1' characters found");
Py_DECREF(item);
return NULL;
}
Py_DECREF(item);
item = latin_item;
}
#endif
if (!PyString_Check(item)) {
PyErr_Format(PyExc_TypeError, "byte string value expected, value "
"of type %.200s found", item->ob_type->tp_name);
Py_DECREF(item);
return NULL;
}
name = PyString_AsString(item);
ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup);
if (ssl_var_lookup == 0)
{
Py_XINCREF(Py_None);
return Py_None;
}
value = ssl_var_lookup(self->r->pool, self->r->server,
self->r->connection, self->r, name);
if (!value) {
Py_XINCREF(Py_None);
return Py_None;
}
#if PY_MAJOR_VERSION >= 3
return PyUnicode_DecodeLatin1(value, strlen(value), NULL);
#else
return PyString_FromString(value);
#endif
} | 0 | [
"CWE-264"
] | mod_wsgi | d9d5fea585b23991f76532a9b07de7fcd3b649f4 | 35,200,459,761,959,860,000,000,000,000,000,000,000 | 66 | Local privilege escalation when using daemon mode. (CVE-2014-0240) |
_boot_id_get(void)
{
static const UuidData *volatile p_boot_id;
const UuidData *d;
again:
d = g_atomic_pointer_get(&p_boot_id);
if (G_UNLIKELY(!d)) {
static gsize lock;
static UuidData boot_id;
gs_free char * contents = NULL;
NMUuid uuid;
gboolean is_fake = FALSE;
nm_utils_file_get_contents(-1,
"/proc/sys/kernel/random/boot_id",
0,
NM_UTILS_FILE_GET_CONTENTS_FLAG_NONE,
&contents,
NULL,
NULL,
NULL);
if (!contents || !_nm_utils_uuid_parse(nm_strstrip(contents), &uuid)) {
/* generate a random UUID instead. */
is_fake = TRUE;
_nm_utils_uuid_generate_random(&uuid);
}
if (!g_once_init_enter(&lock))
goto again;
d = _uuid_data_init(&boot_id, FALSE, is_fake, &uuid);
g_atomic_pointer_set(&p_boot_id, d);
g_once_init_leave(&lock, 1);
}
return d;
} | 0 | [
"CWE-20"
] | NetworkManager | 420784e342da4883f6debdfe10cde68507b10d27 | 223,281,371,846,247,700,000,000,000,000,000,000,000 | 38 | core: fix crash in nm_wildcard_match_check()
It's not entirely clear how to treat %NULL.
Clearly "match.interface-name=eth0" should not
match with an interface %NULL. But what about
"match.interface-name=!eth0"? It's now implemented
that negative matches still succeed against %NULL.
What about "match.interface-name=*"? That probably
should also match with %NULL. So we treat %NULL really
like "".
Against commit 11cd443448bc ('iwd: Don't call IWD methods when device
unmanaged'), we got this backtrace:
#0 0x00007f1c164069f1 in __strnlen_avx2 () at ../sysdeps/x86_64/multiarch/strlen-avx2.S:62
#1 0x00007f1c1637ac9e in __fnmatch (pattern=<optimized out>, string=<optimized out>, string@entry=0x0, flags=flags@entry=0) at fnmatch.c:379
p = 0x0
res = <optimized out>
orig_pattern = <optimized out>
n = <optimized out>
wpattern = 0x7fff8d860730 L"pci-0000:03:00.0"
ps = {__count = 0, __value = {__wch = 0, __wchb = "\000\000\000"}}
wpattern_malloc = 0x0
wstring_malloc = 0x0
wstring = <optimized out>
alloca_used = 80
__PRETTY_FUNCTION__ = "__fnmatch"
#2 0x0000564484a978bf in nm_wildcard_match_check (str=0x0, patterns=<optimized out>, num_patterns=<optimized out>) at src/core/nm-core-utils.c:1959
is_inverted = 0
is_mandatory = 0
match = <optimized out>
p = 0x564486c43fa0 "pci-0000:03:00.0"
has_optional = 0
has_any_optional = 0
i = <optimized out>
#3 0x0000564484bf4797 in check_connection_compatible (self=<optimized out>, connection=<optimized out>, error=0x0) at src/core/devices/nm-device.c:7499
patterns = <optimized out>
device_driver = 0x564486c76bd0 "veth"
num_patterns = 1
priv = 0x564486cbe0b0
__func__ = "check_connection_compatible"
device_iface = <optimized out>
local = 0x564486c99a60
conn_iface = 0x0
klass = <optimized out>
s_match = 0x564486c63df0 [NMSettingMatch]
#4 0x0000564484c38491 in check_connection_compatible (device=0x564486cbe590 [NMDeviceVeth], connection=0x564486c6b160, error=0x0) at src/core/devices/nm-device-ethernet.c:348
self = 0x564486cbe590 [NMDeviceVeth]
s_wired = <optimized out>
Fixes: 3ced486f4162 ('libnm/match: extend syntax for match patterns with '|', '&', '!' and '\\'')
https://bugzilla.redhat.com/show_bug.cgi?id=1942741 |
_log_connection_sort_hashes(NMConnection *connection,
NMConnection *diff_base,
GHashTable * connection_diff)
{
GHashTableIter iter;
GArray * sorted_hashes;
LogConnectionSettingData setting_data;
sorted_hashes = g_array_sized_new(TRUE,
FALSE,
sizeof(LogConnectionSettingData),
g_hash_table_size(connection_diff));
g_hash_table_iter_init(&iter, connection_diff);
while (g_hash_table_iter_next(&iter,
(gpointer) &setting_data.name,
(gpointer) &setting_data.setting_diff)) {
setting_data.setting = nm_connection_get_setting_by_name(connection, setting_data.name);
setting_data.diff_base_setting =
diff_base ? nm_connection_get_setting_by_name(diff_base, setting_data.name) : NULL;
g_assert(setting_data.setting || setting_data.diff_base_setting);
g_array_append_val(sorted_hashes, setting_data);
}
g_array_sort(sorted_hashes, _log_connection_sort_hashes_fcn);
return sorted_hashes;
} | 0 | [
"CWE-20"
] | NetworkManager | 420784e342da4883f6debdfe10cde68507b10d27 | 226,765,615,049,424,280,000,000,000,000,000,000,000 | 27 | core: fix crash in nm_wildcard_match_check()
It's not entirely clear how to treat %NULL.
Clearly "match.interface-name=eth0" should not
match with an interface %NULL. But what about
"match.interface-name=!eth0"? It's now implemented
that negative matches still succeed against %NULL.
What about "match.interface-name=*"? That probably
should also match with %NULL. So we treat %NULL really
like "".
Against commit 11cd443448bc ('iwd: Don't call IWD methods when device
unmanaged'), we got this backtrace:
#0 0x00007f1c164069f1 in __strnlen_avx2 () at ../sysdeps/x86_64/multiarch/strlen-avx2.S:62
#1 0x00007f1c1637ac9e in __fnmatch (pattern=<optimized out>, string=<optimized out>, string@entry=0x0, flags=flags@entry=0) at fnmatch.c:379
p = 0x0
res = <optimized out>
orig_pattern = <optimized out>
n = <optimized out>
wpattern = 0x7fff8d860730 L"pci-0000:03:00.0"
ps = {__count = 0, __value = {__wch = 0, __wchb = "\000\000\000"}}
wpattern_malloc = 0x0
wstring_malloc = 0x0
wstring = <optimized out>
alloca_used = 80
__PRETTY_FUNCTION__ = "__fnmatch"
#2 0x0000564484a978bf in nm_wildcard_match_check (str=0x0, patterns=<optimized out>, num_patterns=<optimized out>) at src/core/nm-core-utils.c:1959
is_inverted = 0
is_mandatory = 0
match = <optimized out>
p = 0x564486c43fa0 "pci-0000:03:00.0"
has_optional = 0
has_any_optional = 0
i = <optimized out>
#3 0x0000564484bf4797 in check_connection_compatible (self=<optimized out>, connection=<optimized out>, error=0x0) at src/core/devices/nm-device.c:7499
patterns = <optimized out>
device_driver = 0x564486c76bd0 "veth"
num_patterns = 1
priv = 0x564486cbe0b0
__func__ = "check_connection_compatible"
device_iface = <optimized out>
local = 0x564486c99a60
conn_iface = 0x0
klass = <optimized out>
s_match = 0x564486c63df0 [NMSettingMatch]
#4 0x0000564484c38491 in check_connection_compatible (device=0x564486cbe590 [NMDeviceVeth], connection=0x564486c6b160, error=0x0) at src/core/devices/nm-device-ethernet.c:348
self = 0x564486cbe590 [NMDeviceVeth]
s_wired = <optimized out>
Fixes: 3ced486f4162 ('libnm/match: extend syntax for match patterns with '|', '&', '!' and '\\'')
https://bugzilla.redhat.com/show_bug.cgi?id=1942741 |
int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params *params)
{
snd_rawmidi_drain_input(substream);
return resize_runtime_buffer(substream->runtime, params, true);
} | 0 | [
"CWE-416"
] | linux | c1f6e3c818dd734c30f6a7eeebf232ba2cf3181d | 212,674,160,882,740,040,000,000,000,000,000,000,000 | 6 | ALSA: rawmidi: Fix racy buffer resize under concurrent accesses
The rawmidi core allows user to resize the runtime buffer via ioctl,
and this may lead to UAF when performed during concurrent reads or
writes: the read/write functions unlock the runtime lock temporarily
during copying form/to user-space, and that's the race window.
This patch fixes the hole by introducing a reference counter for the
runtime buffer read/write access and returns -EBUSY error when the
resize is performed concurrently against read/write.
Note that the ref count field is a simple integer instead of
refcount_t here, since the all contexts accessing the buffer is
basically protected with a spinlock, hence we need no expensive atomic
ops. Also, note that this busy check is needed only against read /
write functions, and not in receive/transmit callbacks; the race can
happen only at the spinlock hole mentioned in the above, while the
whole function is protected for receive / transmit callbacks.
Reported-by: butt3rflyh4ck <[email protected]>
Cc: <[email protected]>
Link: https://lore.kernel.org/r/CAFcO6XMWpUVK_yzzCpp8_XP7+=oUpQvuBeCbMffEDkpe8jWrfg@mail.gmail.com
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Takashi Iwai <[email protected]> |
static u32 nft_chain_hash(const void *data, u32 len, u32 seed)
{
const char *name = data;
return jhash(name, strlen(name), seed);
} | 0 | [
"CWE-665"
] | linux | ad9f151e560b016b6ad3280b48e42fa11e1a5440 | 131,910,340,310,684,800,000,000,000,000,000,000,000 | 6 | netfilter: nf_tables: initialize set before expression setup
nft_set_elem_expr_alloc() needs an initialized set if expression sets on
the NFT_EXPR_GC flag. Move set fields initialization before expression
setup.
[4512935.019450] ==================================================================
[4512935.019456] BUG: KASAN: null-ptr-deref in nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019487] Read of size 8 at addr 0000000000000070 by task nft/23532
[4512935.019494] CPU: 1 PID: 23532 Comm: nft Not tainted 5.12.0-rc4+ #48
[...]
[4512935.019502] Call Trace:
[4512935.019505] dump_stack+0x89/0xb4
[4512935.019512] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019536] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019560] kasan_report.cold.12+0x5f/0xd8
[4512935.019566] ? nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019590] nft_set_elem_expr_alloc+0x84/0xd0 [nf_tables]
[4512935.019615] nf_tables_newset+0xc7f/0x1460 [nf_tables]
Reported-by: [email protected]
Fixes: 65038428b2c6 ("netfilter: nf_tables: allow to specify stateful expression in set definition")
Signed-off-by: Pablo Neira Ayuso <[email protected]> |
virtual bool isMemoryMapped () const { return false;} | 0 | [
"CWE-787"
] | openexr | ae6d203892cc9311917a7f4f05354ef792b3e58e | 89,501,763,959,852,810,000,000,000,000,000,000,000 | 1 | Handle xsampling and bad seekg() calls in exrcheck (#872)
* fix exrcheck xsampling!=1
Signed-off-by: Peter Hillman <[email protected]>
* fix handling bad seekg() calls in exrcheck
Signed-off-by: Peter Hillman <[email protected]>
* fix deeptile detection in multipart files
Signed-off-by: Peter Hillman <[email protected]> |
Subsets and Splits