func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static uint16_t tsc2102_audio_register_read(TSC210xState *s, int reg)
{
int l_ch, r_ch;
uint16_t val;
switch (reg) {
case 0x00: /* Audio Control 1 */
return s->audio_ctrl1;
case 0x01:
return 0xff00;
case 0x02: /* DAC Volume Control */
return s->volume;
case 0x03:
return 0x8b00;
case 0x04: /* Audio Control 2 */
l_ch = 1;
r_ch = 1;
if (s->softstep && !(s->dac_power & (1 << 10))) {
l_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
s->volume_change + TSC_SOFTSTEP_DELAY);
r_ch = (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
s->volume_change + TSC_SOFTSTEP_DELAY);
}
return s->audio_ctrl2 | (l_ch << 3) | (r_ch << 2);
case 0x05: /* Stereo DAC Power Control */
return 0x2aa0 | s->dac_power |
(((s->dac_power & (1 << 10)) &&
(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >
s->powerdown + TSC_POWEROFF_DELAY)) << 6);
case 0x06: /* Audio Control 3 */
val = s->audio_ctrl3 | 0x0001;
s->audio_ctrl3 &= 0xff3f;
return val;
case 0x07: /* LCH_BASS_BOOST_N0 */
case 0x08: /* LCH_BASS_BOOST_N1 */
case 0x09: /* LCH_BASS_BOOST_N2 */
case 0x0a: /* LCH_BASS_BOOST_N3 */
case 0x0b: /* LCH_BASS_BOOST_N4 */
case 0x0c: /* LCH_BASS_BOOST_N5 */
case 0x0d: /* LCH_BASS_BOOST_D1 */
case 0x0e: /* LCH_BASS_BOOST_D2 */
case 0x0f: /* LCH_BASS_BOOST_D4 */
case 0x10: /* LCH_BASS_BOOST_D5 */
case 0x11: /* RCH_BASS_BOOST_N0 */
case 0x12: /* RCH_BASS_BOOST_N1 */
case 0x13: /* RCH_BASS_BOOST_N2 */
case 0x14: /* RCH_BASS_BOOST_N3 */
case 0x15: /* RCH_BASS_BOOST_N4 */
case 0x16: /* RCH_BASS_BOOST_N5 */
case 0x17: /* RCH_BASS_BOOST_D1 */
case 0x18: /* RCH_BASS_BOOST_D2 */
case 0x19: /* RCH_BASS_BOOST_D4 */
case 0x1a: /* RCH_BASS_BOOST_D5 */
return s->filter_data[reg - 0x07];
case 0x1b: /* PLL Programmability 1 */
return s->pll[0];
case 0x1c: /* PLL Programmability 2 */
return s->pll[1];
case 0x1d: /* Audio Control 4 */
return (!s->softstep) << 14;
default:
#ifdef TSC_VERBOSE
fprintf(stderr, "tsc2102_audio_register_read: "
"no such register: 0x%02x\n", reg);
#endif
return 0xffff;
}
} | 0 | [
"CWE-119"
] | qemu | 5193be3be35f29a35bc465036cd64ad60d43385f | 213,021,621,324,938,600,000,000,000,000,000,000,000 | 80 | tsc210x: fix buffer overrun on invalid state load
CVE-2013-4539
s->precision, nextprecision, function and nextfunction
come from wire and are used
as idx into resolution[] in TSC_CUT_RESOLUTION.
Validate after load to avoid buffer overrun.
Cc: Andreas Färber <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Juan Quintela <[email protected]> |
static int stbi__cpuid3(void)
{
int res;
__asm {
mov eax,1
cpuid
mov res,edx
}
return res;
} | 0 | [
"CWE-787"
] | stb | 5ba0baaa269b3fd681828e0e3b3ac0f1472eaf40 | 156,711,858,641,022,150,000,000,000,000,000,000,000 | 10 | stb_image: Reject fractional JPEG component subsampling ratios
The component resamplers are not written to support this and I've
never seen it happen in a real (non-crafted) JPEG file so I'm
fine rejecting this as outright corrupt.
Fixes issue #1178. |
int CephxSessionHandler::sign_message(Message *m)
{
// If runtime signing option is off, just return success without signing.
if (!cct->_conf->cephx_sign_messages) {
return 0;
}
uint64_t sig;
int r = _calc_signature(m, &sig);
if (r < 0)
return r;
ceph_msg_footer& f = m->get_footer();
f.sig = sig;
f.flags = (unsigned)f.flags | CEPH_MSG_FOOTER_SIGNED;
ldout(cct, 20) << "Putting signature in client message(seq # " << m->get_seq()
<< "): sig = " << sig << dendl;
return 0;
} | 0 | [
"CWE-287",
"CWE-284"
] | ceph | 8f396cf35a3826044b089141667a196454c0a587 | 120,522,896,284,131,660,000,000,000,000,000,000,000 | 19 | auth/cephx/CephxSessionHandler: implement CEPHX_V2 calculation mode
Derive the signature from the entire buffer (both cipher blocks).
Signed-off-by: Sage Weil <[email protected]> |
void addReplyPushLen(client *c, long length) {
serverAssert(c->resp >= 3);
addReplyAggregateLen(c,length,'>');
} | 0 | [
"CWE-770"
] | redis | 5674b0057ff2903d43eaff802017eddf37c360f8 | 315,831,822,806,756,640,000,000,000,000,000,000,000 | 4 | Prevent unauthenticated client from easily consuming lots of memory (CVE-2021-32675)
This change sets a low limit for multibulk and bulk length in the
protocol for unauthenticated connections, so that they can't easily
cause redis to allocate massive amounts of memory by sending just a few
characters on the network.
The new limits are 10 arguments of 16kb each (instead of 1m of 512mb) |
static char *_xml_decode_tag(xml_parser *parser, const char *tag)
{
char *newstr;
int out_len;
newstr = xml_utf8_decode(tag, strlen(tag), &out_len, parser->target_encoding);
if (parser->case_folding) {
php_strtoupper(newstr, out_len);
}
return newstr;
} | 0 | [
"CWE-787"
] | php-src | 7d163e8a0880ae8af2dd869071393e5dc07ef271 | 248,483,712,481,804,180,000,000,000,000,000,000,000 | 13 | truncate results at depth of 255 to prevent corruption |
flatpak_dir_find_remote_refs (FlatpakDir *self,
const char *remote,
const char *name,
const char *opt_branch,
const char *opt_default_branch,
const char *opt_arch,
const char *opt_default_arch,
FlatpakKinds kinds,
FindMatchingRefsFlags flags,
GCancellable *cancellable,
GError **error)
{
g_autofree char *collection_id = NULL;
g_autoptr(GHashTable) remote_refs = NULL;
g_autoptr(FlatpakRemoteState) state = NULL;
GPtrArray *matched_refs;
state = flatpak_dir_get_remote_state_optional (self, remote, cancellable, error);
if (state == NULL)
return NULL;
if (!flatpak_dir_list_all_remote_refs (self, state,
&remote_refs, cancellable, error))
return NULL;
collection_id = flatpak_dir_get_remote_collection_id (self, remote);
matched_refs = find_matching_refs (remote_refs,
name,
opt_branch,
opt_default_branch,
opt_arch,
opt_default_arch,
collection_id,
kinds,
flags,
error);
if (matched_refs == NULL)
return NULL;
g_ptr_array_add (matched_refs, NULL);
return (char **) g_ptr_array_free (matched_refs, FALSE);
} | 0 | [
"CWE-668"
] | flatpak | cd2142888fc4c199723a0dfca1f15ea8788a5483 | 260,794,347,971,241,360,000,000,000,000,000,000,000 | 42 | Don't expose /proc when running apply_extra
As shown by CVE-2019-5736, it is sometimes possible for the sandbox
app to access outside files using /proc/self/exe. This is not
typically an issue for flatpak as the sandbox runs as the user which
has no permissions to e.g. modify the host files.
However, when installing apps using extra-data into the system repo
we *do* actually run a sandbox as root. So, in this case we disable mounting
/proc in the sandbox, which will neuter attacks like this. |
Status ConnectionImpl::onMessageBegin() {
ENVOY_CONN_LOG(trace, "message begin", connection_);
// Make sure that if HTTP/1.0 and HTTP/1.1 requests share a connection Envoy correctly sets
// protocol for each request. Envoy defaults to 1.1 but sets the protocol to 1.0 where applicable
// in onHeadersCompleteBase
protocol_ = Protocol::Http11;
processing_trailers_ = false;
header_parsing_state_ = HeaderParsingState::Field;
allocHeaders(statefulFormatterFromSettings(codec_settings_));
return onMessageBeginBase();
} | 0 | [
"CWE-416"
] | envoy | fe7c69c248f4fe5a9080c7ccb35275b5218bb5ab | 73,869,044,353,621,835,000,000,000,000,000,000,000 | 11 | internal redirect: fix a lifetime bug (#785)
Signed-off-by: Alyssa Wilk <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]> |
str_time(pg_time_t tnow)
{
static char buf[128];
pg_strftime(buf, sizeof(buf),
"%Y-%m-%d %H:%M:%S %Z",
pg_localtime(&tnow, log_timezone));
return buf;
} | 0 | [
"CWE-119"
] | postgres | 01824385aead50e557ca1af28640460fa9877d51 | 115,694,313,666,752,480,000,000,000,000,000,000,000 | 10 | Prevent potential overruns of fixed-size buffers.
Coverity identified a number of places in which it couldn't prove that a
string being copied into a fixed-size buffer would fit. We believe that
most, perhaps all of these are in fact safe, or are copying data that is
coming from a trusted source so that any overrun is not really a security
issue. Nonetheless it seems prudent to forestall any risk by using
strlcpy() and similar functions.
Fixes by Peter Eisentraut and Jozef Mlich based on Coverity reports.
In addition, fix a potential null-pointer-dereference crash in
contrib/chkpass. The crypt(3) function is defined to return NULL on
failure, but chkpass.c didn't check for that before using the result.
The main practical case in which this could be an issue is if libc is
configured to refuse to execute unapproved hashing algorithms (e.g.,
"FIPS mode"). This ideally should've been a separate commit, but
since it touches code adjacent to one of the buffer overrun changes,
I included it in this commit to avoid last-minute merge issues.
This issue was reported by Honza Horak.
Security: CVE-2014-0065 for buffer overruns, CVE-2014-0066 for crypt() |
MemoryAllocator* memory_allocator() {
return memory_allocator_;
} | 0 | [
"CWE-20",
"CWE-119"
] | node | 530af9cb8e700e7596b3ec812bad123c9fa06356 | 151,693,370,268,084,880,000,000,000,000,000,000,000 | 3 | v8: Interrupts must not mask stack overflow.
Backport of https://codereview.chromium.org/339883002 |
void ResizeImage(typename TTypes<T, 4>::ConstTensor images,
const int batch_size, const int64 in_height,
const int64 in_width, const int64 out_height,
const int64 out_width, const int channels,
const float height_scale, const float width_scale,
const float in_min, const float in_max,
const bool half_pixel_centers,
typename TTypes<T, 4>::Tensor* output) {
ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height,
out_width, channels, height_scale, width_scale,
in_min, in_max, half_pixel_centers, output);
} | 0 | [
"CWE-787"
] | tensorflow | f6c40f0c6cbf00d46c7717a26419f2062f2f8694 | 30,619,112,308,258,730,000,000,000,000,000,000,000 | 12 | Validate min and max arguments to `QuantizedResizeBilinear`.
PiperOrigin-RevId: 369765091
Change-Id: I33be8b78273ab7d08b97541692fe05cb7f94963a |
int pop_fetch_data(struct PopAccountData *adata, const char *query,
struct Progress *progress, pop_fetch_t callback, void *data)
{
char buf[1024];
long pos = 0;
size_t lenbuf = 0;
mutt_str_strfcpy(buf, query, sizeof(buf));
int rc = pop_query(adata, buf, sizeof(buf));
if (rc < 0)
return rc;
char *inbuf = mutt_mem_malloc(sizeof(buf));
while (true)
{
const int chunk =
mutt_socket_readln_d(buf, sizeof(buf), adata->conn, MUTT_SOCK_LOG_FULL);
if (chunk < 0)
{
adata->status = POP_DISCONNECTED;
rc = -1;
break;
}
char *p = buf;
if (!lenbuf && (buf[0] == '.'))
{
if (buf[1] != '.')
break;
p++;
}
mutt_str_strfcpy(inbuf + lenbuf, p, sizeof(buf));
pos += chunk;
/* cast is safe since we break out of the loop when chunk<=0 */
if ((size_t) chunk >= sizeof(buf))
{
lenbuf += strlen(p);
}
else
{
if (progress)
mutt_progress_update(progress, pos, -1);
if ((rc == 0) && (callback(inbuf, data) < 0))
rc = -3;
lenbuf = 0;
}
mutt_mem_realloc(&inbuf, lenbuf + sizeof(buf));
}
FREE(&inbuf);
return rc;
} | 0 | [
"CWE-94",
"CWE-74"
] | neomutt | fb013ec666759cb8a9e294347c7b4c1f597639cc | 59,473,687,921,904,020,000,000,000,000,000,000,000 | 56 | tls: clear data after a starttls acknowledgement
After a starttls acknowledgement message, clear the buffers of any
incoming data / commands. This will ensure that all future data is
handled securely.
Co-authored-by: Pietro Cerutti <[email protected]> |
static void ims_pcu_buffers_free(struct ims_pcu *pcu)
{
usb_kill_urb(pcu->urb_in);
usb_free_urb(pcu->urb_in);
usb_free_coherent(pcu->udev, pcu->max_out_size,
pcu->urb_in_buf, pcu->read_dma);
kfree(pcu->urb_out_buf);
usb_kill_urb(pcu->urb_ctrl);
usb_free_urb(pcu->urb_ctrl);
usb_free_coherent(pcu->udev, pcu->max_ctrl_size,
pcu->urb_ctrl_buf, pcu->ctrl_dma); | 0 | [
"CWE-703"
] | linux | a0ad220c96692eda76b2e3fd7279f3dcd1d8a8ff | 6,228,071,656,811,553,000,000,000,000,000,000,000 | 16 | Input: ims-pcu - sanity check against missing interfaces
A malicious device missing interface can make the driver oops.
Add sanity checking.
Signed-off-by: Oliver Neukum <[email protected]>
CC: [email protected]
Signed-off-by: Dmitry Torokhov <[email protected]> |
MagickExport MagickBooleanType XGetWindowColor(Display *display,
XWindows *windows,char *name)
{
int
x,
y;
PixelPacket
pixel;
RectangleInfo
crop_info;
Status
status;
Window
child,
client_window,
root_window,
target_window;
XColor
color;
XImage
*ximage;
XWindowAttributes
window_attributes;
/*
Choose a pixel from the X server.
*/
assert(display != (Display *) NULL);
assert(name != (char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",name);
*name='\0';
target_window=XSelectWindow(display,&crop_info);
if (target_window == (Window) NULL)
return(MagickFalse);
root_window=XRootWindow(display,XDefaultScreen(display));
client_window=target_window;
if (target_window != root_window)
{
unsigned int
d;
/*
Get client window.
*/
status=XGetGeometry(display,target_window,&root_window,&x,&x,&d,&d,&d,&d);
if (status != False)
{
client_window=XClientWindow(display,target_window);
target_window=client_window;
}
}
/*
Verify window is viewable.
*/
status=XGetWindowAttributes(display,target_window,&window_attributes);
if ((status == False) || (window_attributes.map_state != IsViewable))
return(MagickFalse);
/*
Get window X image.
*/
(void) XTranslateCoordinates(display,root_window,target_window,
(int) crop_info.x,(int) crop_info.y,&x,&y,&child);
ximage=XGetImage(display,target_window,x,y,1,1,AllPlanes,ZPixmap);
if (ximage == (XImage *) NULL)
return(MagickFalse);
color.pixel=XGetPixel(ximage,0,0);
XDestroyImage(ximage);
/*
Match color against the color database.
*/
(void) XQueryColor(display,window_attributes.colormap,&color);
pixel.red=ScaleShortToQuantum(color.red);
pixel.green=ScaleShortToQuantum(color.green);
pixel.blue=ScaleShortToQuantum(color.blue);
pixel.opacity=OpaqueOpacity;
(void) QueryColorname(windows->image.image,&pixel,X11Compliance,name,
&windows->image.image->exception);
return(MagickTrue);
} | 0 | [
"CWE-401"
] | ImageMagick6 | 13801f5d0bd7a6fdb119682d34946636afdb2629 | 209,033,255,810,454,000,000,000,000,000,000,000,000 | 86 | https://github.com/ImageMagick/ImageMagick/issues/1531 |
static void entropy_decode_stereo_3860(APEContext *ctx, int blockstodecode)
{
int32_t *decoded0 = ctx->decoded[0];
int32_t *decoded1 = ctx->decoded[1];
int blocks = blockstodecode;
while (blockstodecode--)
*decoded0++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceY);
while (blocks--)
*decoded1++ = ape_decode_value_3860(ctx, &ctx->gb, &ctx->riceX);
} | 0 | [
"CWE-125"
] | FFmpeg | ba4beaf6149f7241c8bd85fe853318c2f6837ad0 | 263,924,049,691,283,500,000,000,000,000,000,000,000 | 11 | avcodec/apedec: Fix integer overflow
Fixes: out of array access
Fixes: PoC.ape and others
Found-by: Bingchang, Liu@VARAS of IIE
Signed-off-by: Michael Niedermayer <[email protected]> |
void read_prediction_unit_SKIP(thread_context* tctx,
int x0, int y0,
int nPbW, int nPbH)
{
int merge_idx = decode_merge_idx(tctx);
tctx->motion.merge_idx = merge_idx;
tctx->motion.merge_flag = true;
logtrace(LogSlice,"prediction skip 2Nx2N, merge_idx: %d\n",merge_idx);
} | 0 | [] | libde265 | e83f3798dd904aa579425c53020c67e03735138d | 129,813,988,843,136,170,000,000,000,000,000,000,000 | 11 | fix check for valid PPS idx (#298) |
static int ps_files_cleanup_dir(const char *dirname, int maxlifetime TSRMLS_DC)
{
DIR *dir;
char dentry[sizeof(struct dirent) + MAXPATHLEN];
struct dirent *entry = (struct dirent *) &dentry;
struct stat sbuf;
char buf[MAXPATHLEN];
time_t now;
int nrdels = 0;
size_t dirname_len;
dir = opendir(dirname);
if (!dir) {
php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ps_files_cleanup_dir: opendir(%s) failed: %s (%d)", dirname, strerror(errno), errno);
return (0);
}
time(&now);
dirname_len = strlen(dirname);
/* Prepare buffer (dirname never changes) */
memcpy(buf, dirname, dirname_len);
buf[dirname_len] = PHP_DIR_SEPARATOR;
while (php_readdir_r(dir, (struct dirent *) dentry, &entry) == 0 && entry) {
/* does the file start with our prefix? */
if (!strncmp(entry->d_name, FILE_PREFIX, sizeof(FILE_PREFIX) - 1)) {
size_t entry_len = strlen(entry->d_name);
/* does it fit into our buffer? */
if (entry_len + dirname_len + 2 < MAXPATHLEN) {
/* create the full path.. */
memcpy(buf + dirname_len + 1, entry->d_name, entry_len);
/* NUL terminate it and */
buf[dirname_len + entry_len + 1] = '\0';
/* check whether its last access was more than maxlifetime ago */
if (VCWD_STAT(buf, &sbuf) == 0 &&
(now - sbuf.st_mtime) > maxlifetime) {
VCWD_UNLINK(buf);
nrdels++;
}
}
}
}
closedir(dir);
return (nrdels);
} | 0 | [] | php-src | a793b709086eed655bc98f933d838b8679b28920 | 287,856,849,781,260,050,000,000,000,000,000,000,000 | 52 | refix bug #69111, crash in 5.6 only |
void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
__md_stop_writes(mddev);
mddev_unlock(mddev);
} | 0 | [
"CWE-200"
] | linux | b6878d9e03043695dbf3fa1caa6dfc09db225b16 | 321,773,243,007,714,300,000,000,000,000,000,000,000 | 6 | md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <[email protected]>
Signed-off-by: NeilBrown <[email protected]> |
explicit BufferBase(Allocator* alloc, void* data_ptr)
: TensorBuffer(data_ptr), alloc_(alloc) {} | 0 | [
"CWE-345"
] | tensorflow | abcced051cb1bd8fb05046ac3b6023a7ebcc4578 | 311,903,436,283,083,160,000,000,000,000,000,000,000 | 2 | Prevent crashes when loading tensor slices with unsupported types.
Also fix the `Tensor(const TensorShape&)` constructor swapping the LOG(FATAL)
messages for the unset and unsupported types.
PiperOrigin-RevId: 392695027
Change-Id: I4beda7db950db951d273e3259a7c8534ece49354 |
_TIFFprintAscii(FILE* fd, const char* cp)
{
_TIFFprintAsciiBounded( fd, cp, strlen(cp));
} | 0 | [
"CWE-476"
] | libtiff | c6f41df7b581402dfba3c19a1e3df4454c551a01 | 126,779,770,880,881,710,000,000,000,000,000,000,000 | 4 | libtiff/tif_print.c: TIFFPrintDirectory(): fix null pointer dereference on corrupted file. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2770 |
**/
CImg<T> get_slice(const int z0) const {
return get_slices(z0,z0); | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 337,195,331,405,727,500,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
dtls1_reassemble_fragment(SSL *s, struct hm_header_st* msg_hdr, int *ok)
{
hm_fragment *frag = NULL;
pitem *item = NULL;
int i = -1, is_complete;
unsigned char seq64be[8];
unsigned long frag_len = msg_hdr->frag_len, max_len;
if ((msg_hdr->frag_off+frag_len) > msg_hdr->msg_len)
goto err;
/* Determine maximum allowed message size. Depends on (user set)
* maximum certificate length, but 16k is minimum.
*/
if (DTLS1_HM_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH < s->max_cert_list)
max_len = s->max_cert_list;
else
max_len = DTLS1_HM_HEADER_LENGTH + SSL3_RT_MAX_ENCRYPTED_LENGTH;
if ((msg_hdr->frag_off+frag_len) > max_len)
goto err;
/* Try to find item in queue */
memset(seq64be,0,sizeof(seq64be));
seq64be[6] = (unsigned char) (msg_hdr->seq>>8);
seq64be[7] = (unsigned char) msg_hdr->seq;
item = pqueue_find(s->d1->buffered_messages, seq64be);
if (item == NULL)
{
frag = dtls1_hm_fragment_new(msg_hdr->msg_len, 1);
if ( frag == NULL)
goto err;
memcpy(&(frag->msg_header), msg_hdr, sizeof(*msg_hdr));
frag->msg_header.frag_len = frag->msg_header.msg_len;
frag->msg_header.frag_off = 0;
}
else
frag = (hm_fragment*) item->data;
/* If message is already reassembled, this must be a
* retransmit and can be dropped.
*/
if (frag->reassembly == NULL)
{
unsigned char devnull [256];
while (frag_len)
{
i = s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE,
devnull,
frag_len>sizeof(devnull)?sizeof(devnull):frag_len,0);
if (i<=0) goto err;
frag_len -= i;
}
return DTLS1_HM_FRAGMENT_RETRY;
}
/* read the body of the fragment (header has already been read */
i = s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE,
frag->fragment + msg_hdr->frag_off,frag_len,0);
if (i<=0 || (unsigned long)i!=frag_len)
goto err;
RSMBLY_BITMASK_MARK(frag->reassembly, (long)msg_hdr->frag_off,
(long)(msg_hdr->frag_off + frag_len));
RSMBLY_BITMASK_IS_COMPLETE(frag->reassembly, (long)msg_hdr->msg_len,
is_complete);
if (is_complete)
{
OPENSSL_free(frag->reassembly);
frag->reassembly = NULL;
}
if (item == NULL)
{
memset(seq64be,0,sizeof(seq64be));
seq64be[6] = (unsigned char)(msg_hdr->seq>>8);
seq64be[7] = (unsigned char)(msg_hdr->seq);
item = pitem_new(seq64be, frag);
if (item == NULL)
{
i = -1;
goto err;
}
pqueue_insert(s->d1->buffered_messages, item);
}
return DTLS1_HM_FRAGMENT_RETRY;
err:
if (frag != NULL) dtls1_hm_fragment_free(frag);
if (item != NULL) OPENSSL_free(item);
*ok = 0;
return i;
} | 1 | [
"CWE-119",
"CWE-120"
] | openssl | 1632ef744872edc2aa2a53d487d3e79c965a4ad3 | 6,698,459,094,252,552,000,000,000,000,000,000,000 | 100 | Fix for CVE-2014-0195
A buffer overrun attack can be triggered by sending invalid DTLS fragments
to an OpenSSL DTLS client or server. This is potentially exploitable to
run arbitrary code on a vulnerable client or server.
Fixed by adding consistency check for DTLS fragments.
Thanks to Jüri Aedla for reporting this issue. |
const Type_handler *type_handler() const { return &type_handler_int24; } | 0 | [
"CWE-416",
"CWE-703"
] | server | 08c7ab404f69d9c4ca6ca7a9cf7eec74c804f917 | 188,278,858,300,827,270,000,000,000,000,000,000,000 | 1 | MDEV-24176 Server crashes after insert in the table with virtual
column generated using date_format() and if()
vcol_info->expr is allocated on expr_arena at parsing stage. Since
expr item is allocated on expr_arena all its containee items must be
allocated on expr_arena too. Otherwise fix_session_expr() will
encounter prematurely freed item.
When table is reopened from cache vcol_info contains stale
expression. We refresh expression via TABLE::vcol_fix_exprs() but
first we must prepare a proper context (Vcol_expr_context) which meets
some requirements:
1. As noted above expr update must be done on expr_arena as there may
be new items created. It was a bug in fix_session_expr_for_read() and
was just not reproduced because of no second refix. Now refix is done
for more cases so it does reproduce. Tests affected: vcol.binlog
2. Also name resolution context must be narrowed to the single table.
Tested by: vcol.update main.default vcol.vcol_syntax gcol.gcol_bugfixes
3. sql_mode must be clean and not fail expr update.
sql_mode such as MODE_NO_BACKSLASH_ESCAPES, MODE_NO_ZERO_IN_DATE, etc
must not affect vcol expression update. If the table was created
successfully any further evaluation must not fail. Tests affected:
main.func_like
Reviewed by: Sergei Golubchik <[email protected]> |
int SSL_do_handshake(SSL* ssl)
{
if (ssl->getSecurity().get_parms().entity_ == client_end)
return SSL_connect(ssl);
else
return SSL_accept(ssl);
} | 0 | [
"CWE-254"
] | mysql-server | e7061f7e5a96c66cb2e0bf46bec7f6ff35801a69 | 169,181,106,466,470,300,000,000,000,000,000,000,000 | 7 | Bug #22738607: YASSL FUNCTION X509_NAME_GET_INDEX_BY_NID IS NOT WORKING AS EXPECTED. |
const char *format, ...) _PRINTF_ATTRIBUTE(6,7)
{
va_list ap;
int count, i;
const char *attrs[2] = { NULL, NULL };
struct ldb_message **res = NULL;
attrs[0] = attr_name;
va_start(ap, format);
count = gendb_search_v(sam_ldb, mem_ctx, basedn, &res, attrs, format, ap);
va_end(ap);
if (count <= 0) {
return count;
}
/* make sure its single valued */
for (i=0;i<count;i++) {
if (res[i]->num_elements != 1) {
DEBUG(1,("samdb: search for %s %s not single valued\n",
attr_name, format));
talloc_free(res);
return -1;
}
}
*strs = talloc_array(mem_ctx, const char *, count+1);
if (! *strs) {
talloc_free(res);
return -1;
}
for (i=0;i<count;i++) {
(*strs)[i] = ldb_msg_find_attr_as_string(res[i], attr_name, NULL);
}
(*strs)[count] = NULL;
return count;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 265,199,905,749,222,700,000,000,000,000,000,000,000 | 40 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
static void nvme_irq_check(NvmeCtrl *n)
{
uint32_t intms = ldl_le_p(&n->bar.intms);
if (msix_enabled(&(n->parent_obj))) {
return;
}
if (~intms & n->irq_status) {
pci_irq_assert(&n->parent_obj);
} else {
pci_irq_deassert(&n->parent_obj);
}
} | 0 | [] | qemu | 736b01642d85be832385063f278fe7cd4ffb5221 | 75,923,063,282,943,090,000,000,000,000,000,000,000 | 13 | hw/nvme: fix CVE-2021-3929
This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
device itself. This still allows DMA to MMIO regions of other devices
(e.g. doing P2P DMA to the controller memory buffer of another NVMe
device).
Fixes: CVE-2021-3929
Reported-by: Qiuhao Li <[email protected]>
Reviewed-by: Keith Busch <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Klaus Jensen <[email protected]> |
static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
{
struct inode *inode;
int retval = -EOPNOTSUPP;
struct p9_fid *v9fid, *dfid;
struct v9fs_session_info *v9ses;
p9_debug(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n",
dir, dentry, flags);
v9ses = v9fs_inode2v9ses(dir);
inode = d_inode(dentry);
dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
retval = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", retval);
return retval;
}
if (v9fs_proto_dotl(v9ses))
retval = p9_client_unlinkat(dfid, dentry->d_name.name,
v9fs_at_to_dotl_flags(flags));
if (retval == -EOPNOTSUPP) {
/* Try the one based on path */
v9fid = v9fs_fid_clone(dentry);
if (IS_ERR(v9fid))
return PTR_ERR(v9fid);
retval = p9_client_remove(v9fid);
}
if (!retval) {
/*
* directories on unlink should have zero
* link count
*/
if (flags & AT_REMOVEDIR) {
clear_nlink(inode);
v9fs_dec_count(dir);
} else
v9fs_dec_count(inode);
v9fs_invalidate_inode_attr(inode);
v9fs_invalidate_inode_attr(dir);
}
return retval;
} | 0 | [
"CWE-835"
] | linux | 5e3cc1ee1405a7eb3487ed24f786dec01b4cbe1f | 112,993,163,127,267,920,000,000,000,000,000,000,000 | 44 | 9p: use inode->i_lock to protect i_size_write() under 32-bit
Use inode->i_lock to protect i_size_write(), else i_size_read() in
generic_fillattr() may loop infinitely in read_seqcount_begin() when
multiple processes invoke v9fs_vfs_getattr() or v9fs_vfs_getattr_dotl()
simultaneously under 32-bit SMP environment, and a soft lockup will be
triggered as show below:
watchdog: BUG: soft lockup - CPU#5 stuck for 22s! [stat:2217]
Modules linked in:
CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4
Hardware name: Generic DT based system
PC is at generic_fillattr+0x104/0x108
LR is at 0xec497f00
pc : [<802b8898>] lr : [<ec497f00>] psr: 200c0013
sp : ec497e20 ip : ed608030 fp : ec497e3c
r10: 00000000 r9 : ec497f00 r8 : ed608030
r7 : ec497ebc r6 : ec497f00 r5 : ee5c1550 r4 : ee005780
r3 : 0000052d r2 : 00000000 r1 : ec497f00 r0 : ed608030
Flags: nzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment none
Control: 10c5387d Table: ac48006a DAC: 00000051
CPU: 5 PID: 2217 Comm: stat Not tainted 5.0.0-rc1-00005-g7f702faf5a9e #4
Hardware name: Generic DT based system
Backtrace:
[<8010d974>] (dump_backtrace) from [<8010dc88>] (show_stack+0x20/0x24)
[<8010dc68>] (show_stack) from [<80a1d194>] (dump_stack+0xb0/0xdc)
[<80a1d0e4>] (dump_stack) from [<80109f34>] (show_regs+0x1c/0x20)
[<80109f18>] (show_regs) from [<801d0a80>] (watchdog_timer_fn+0x280/0x2f8)
[<801d0800>] (watchdog_timer_fn) from [<80198658>] (__hrtimer_run_queues+0x18c/0x380)
[<801984cc>] (__hrtimer_run_queues) from [<80198e60>] (hrtimer_run_queues+0xb8/0xf0)
[<80198da8>] (hrtimer_run_queues) from [<801973e8>] (run_local_timers+0x28/0x64)
[<801973c0>] (run_local_timers) from [<80197460>] (update_process_times+0x3c/0x6c)
[<80197424>] (update_process_times) from [<801ab2b8>] (tick_nohz_handler+0xe0/0x1bc)
[<801ab1d8>] (tick_nohz_handler) from [<80843050>] (arch_timer_handler_virt+0x38/0x48)
[<80843018>] (arch_timer_handler_virt) from [<80180a64>] (handle_percpu_devid_irq+0x8c/0x240)
[<801809d8>] (handle_percpu_devid_irq) from [<8017ac20>] (generic_handle_irq+0x34/0x44)
[<8017abec>] (generic_handle_irq) from [<8017b344>] (__handle_domain_irq+0x6c/0xc4)
[<8017b2d8>] (__handle_domain_irq) from [<801022e0>] (gic_handle_irq+0x4c/0x88)
[<80102294>] (gic_handle_irq) from [<80101a30>] (__irq_svc+0x70/0x98)
[<802b8794>] (generic_fillattr) from [<8056b284>] (v9fs_vfs_getattr_dotl+0x74/0xa4)
[<8056b210>] (v9fs_vfs_getattr_dotl) from [<802b8904>] (vfs_getattr_nosec+0x68/0x7c)
[<802b889c>] (vfs_getattr_nosec) from [<802b895c>] (vfs_getattr+0x44/0x48)
[<802b8918>] (vfs_getattr) from [<802b8a74>] (vfs_statx+0x9c/0xec)
[<802b89d8>] (vfs_statx) from [<802b9428>] (sys_lstat64+0x48/0x78)
[<802b93e0>] (sys_lstat64) from [<80101000>] (ret_fast_syscall+0x0/0x28)
[[email protected]: updated comment to not refer to a function
in another subsystem]
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 7549ae3e81cc ("9p: Use the i_size_[read, write]() macros instead of using inode->i_size directly.")
Reported-by: Xing Gaopeng <[email protected]>
Signed-off-by: Hou Tao <[email protected]>
Signed-off-by: Dominique Martinet <[email protected]> |
static NTSTATUS idmap_xid_to_sid(struct idmap_context *idmap_ctx,
TALLOC_CTX *mem_ctx,
struct unixid *unixid,
struct dom_sid **sid)
{
int ret;
NTSTATUS status;
struct ldb_context *ldb = idmap_ctx->ldb_ctx;
struct ldb_result *res = NULL;
struct ldb_message *msg;
const struct dom_sid *unix_sid;
struct dom_sid *new_sid;
TALLOC_CTX *tmp_ctx = talloc_new(mem_ctx);
const char *id_type;
const char *sam_attrs[] = {"objectSid", NULL};
/*
* First check against our local DB, to see if this user has a
* mapping there. This means that the Samba4 AD DC behaves
* much like a winbindd member server running idmap_ad
*/
switch (unixid->type) {
case ID_TYPE_UID:
if (lpcfg_parm_bool(idmap_ctx->lp_ctx, NULL, "idmap_ldb", "use rfc2307", false)) {
ret = dsdb_search_one(idmap_ctx->samdb, tmp_ctx, &msg,
ldb_get_default_basedn(idmap_ctx->samdb),
LDB_SCOPE_SUBTREE,
sam_attrs, 0,
"(&(|(sAMaccountType=%u)(sAMaccountType=%u)(sAMaccountType=%u))"
"(uidNumber=%u)(objectSid=*))",
ATYPE_ACCOUNT, ATYPE_WORKSTATION_TRUST, ATYPE_INTERDOMAIN_TRUST, unixid->id);
} else {
/* If we are not to use the rfc2307 attributes, we just emulate a non-match */
ret = LDB_ERR_NO_SUCH_OBJECT;
}
if (ret == LDB_ERR_CONSTRAINT_VIOLATION) {
DEBUG(1, ("Search for uidNumber=%lu gave duplicate results, failing to map to a SID!\n",
(unsigned long)unixid->id));
status = NT_STATUS_NONE_MAPPED;
goto failed;
} else if (ret == LDB_SUCCESS) {
*sid = samdb_result_dom_sid(mem_ctx, msg, "objectSid");
if (*sid == NULL) {
DEBUG(1, ("Search for uidNumber=%lu did not return an objectSid!\n",
(unsigned long)unixid->id));
status = NT_STATUS_NONE_MAPPED;
goto failed;
}
talloc_free(tmp_ctx);
return NT_STATUS_OK;
} else if (ret != LDB_ERR_NO_SUCH_OBJECT) {
DEBUG(1, ("Search for uidNumber=%lu gave '%s', failing to map to a SID!\n",
(unsigned long)unixid->id, ldb_errstring(idmap_ctx->samdb)));
status = NT_STATUS_NONE_MAPPED;
goto failed;
}
id_type = "ID_TYPE_UID";
break;
case ID_TYPE_GID:
if (lpcfg_parm_bool(idmap_ctx->lp_ctx, NULL, "idmap_ldb", "use rfc2307", false)) {
ret = dsdb_search_one(idmap_ctx->samdb, tmp_ctx, &msg,
ldb_get_default_basedn(idmap_ctx->samdb),
LDB_SCOPE_SUBTREE,
sam_attrs, 0,
"(&(|(sAMaccountType=%u)(sAMaccountType=%u))(gidNumber=%u))",
ATYPE_SECURITY_GLOBAL_GROUP, ATYPE_SECURITY_LOCAL_GROUP,
unixid->id);
} else {
/* If we are not to use the rfc2307 attributes, we just emulate a non-match */
ret = LDB_ERR_NO_SUCH_OBJECT;
}
if (ret == LDB_ERR_CONSTRAINT_VIOLATION) {
DEBUG(1, ("Search for gidNumber=%lu gave duplicate results, failing to map to a SID!\n",
(unsigned long)unixid->id));
status = NT_STATUS_NONE_MAPPED;
goto failed;
} else if (ret == LDB_SUCCESS) {
*sid = samdb_result_dom_sid(mem_ctx, msg, "objectSid");
if (*sid == NULL) {
DEBUG(1, ("Search for gidNumber=%lu did not return an objectSid!\n",
(unsigned long)unixid->id));
status = NT_STATUS_NONE_MAPPED;
goto failed;
}
talloc_free(tmp_ctx);
return NT_STATUS_OK;
} else if (ret != LDB_ERR_NO_SUCH_OBJECT) {
DEBUG(1, ("Search for gidNumber=%lu gave '%s', failing to map to a SID!\n",
(unsigned long)unixid->id, ldb_errstring(idmap_ctx->samdb)));
status = NT_STATUS_NONE_MAPPED;
goto failed;
}
id_type = "ID_TYPE_GID";
break;
default:
DEBUG(1, ("unixid->type must be type gid or uid (got %u) for lookup with id %lu\n",
(unsigned)unixid->type, (unsigned long)unixid->id));
status = NT_STATUS_NONE_MAPPED;
goto failed;
}
ret = ldb_search(ldb, tmp_ctx, &res, NULL, LDB_SCOPE_SUBTREE,
NULL, "(&(|(type=ID_TYPE_BOTH)(type=%s))"
"(xidNumber=%u))", id_type, unixid->id);
if (ret != LDB_SUCCESS) {
DEBUG(1, ("Search failed: %s\n", ldb_errstring(ldb)));
status = NT_STATUS_NONE_MAPPED;
goto failed;
}
if (res->count == 1) {
const char *type = ldb_msg_find_attr_as_string(res->msgs[0],
"type", NULL);
*sid = idmap_msg_get_dom_sid(mem_ctx, res->msgs[0],
"objectSid");
if (*sid == NULL) {
DEBUG(1, ("Failed to get sid from db: %u\n", ret));
status = NT_STATUS_NONE_MAPPED;
goto failed;
}
if (type == NULL) {
DEBUG(1, ("Invalid type for mapping entry.\n"));
talloc_free(tmp_ctx);
return NT_STATUS_NONE_MAPPED;
}
if (strcmp(type, "ID_TYPE_BOTH") == 0) {
unixid->type = ID_TYPE_BOTH;
} else if (strcmp(type, "ID_TYPE_UID") == 0) {
unixid->type = ID_TYPE_UID;
} else {
unixid->type = ID_TYPE_GID;
}
talloc_free(tmp_ctx);
return NT_STATUS_OK;
}
DEBUG(6, ("xid not found in idmap db, create S-1-22- SID.\n"));
/* For local users/groups , we just create a rid = uid/gid */
if (unixid->type == ID_TYPE_UID) {
unix_sid = &global_sid_Unix_Users;
} else {
unix_sid = &global_sid_Unix_Groups;
}
new_sid = dom_sid_add_rid(mem_ctx, unix_sid, unixid->id);
if (new_sid == NULL) {
status = NT_STATUS_NO_MEMORY;
goto failed;
}
*sid = new_sid;
talloc_free(tmp_ctx);
return NT_STATUS_OK;
failed:
talloc_free(tmp_ctx);
return status;
} | 0 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 47,353,274,487,421,400,000,000,000,000,000,000,000 | 168 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
hb_ot_layout_language_get_required_feature_index (hb_face_t *face,
hb_tag_t table_tag,
unsigned int script_index,
unsigned int language_index,
unsigned int *feature_index)
{
const LangSys &l = get_gsubgpos_table (face, table_tag).get_script (script_index).get_lang_sys (language_index);
if (feature_index) *feature_index = l.get_required_feature_index ();
return l.has_required_feature ();
} | 0 | [
"CWE-119"
] | pango | 797d46714d27f147277fdd5346648d838c68fb8c | 295,736,534,146,586,540,000,000,000,000,000,000,000 | 12 | [HB/GDEF] Fix bug in building synthetic GDEF table |
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
const struct rtable *rt;
struct neighbour *n;
rt = (const struct rtable *) dst;
if (rt->rt_gateway)
pkey = (const __be32 *) &rt->rt_gateway;
else if (skb)
pkey = &ip_hdr(skb)->daddr;
n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
if (n)
return n;
return neigh_create(&arp_tbl, pkey, dev);
} | 0 | [
"CWE-17"
] | linux | df4d92549f23e1c037e83323aff58a21b3de7fe0 | 65,792,586,387,933,060,000,000,000,000,000,000,000 | 20 | ipv4: try to cache dst_entries which would cause a redirect
Not caching dst_entries which cause redirects could be exploited by hosts
on the same subnet, causing a severe DoS attack. This effect aggravated
since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()").
Lookups causing redirects will be allocated with DST_NOCACHE set which
will force dst_release to free them via RCU. Unfortunately waiting for
RCU grace period just takes too long, we can end up with >1M dst_entries
waiting to be released and the system will run OOM. rcuos threads cannot
catch up under high softirq load.
Attaching the flag to emit a redirect later on to the specific skb allows
us to cache those dst_entries thus reducing the pressure on allocation
and deallocation.
This issue was discovered by Marcelo Leitner.
Cc: Julian Anastasov <[email protected]>
Signed-off-by: Marcelo Leitner <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: Julian Anastasov <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
find_cu_tu_set (void *file, unsigned int shndx)
{
unsigned int i;
if (! load_cu_tu_indexes (file))
return NULL;
/* Find SHNDX in the shndx pool. */
for (i = 0; i < shndx_pool_used; i++)
if (shndx_pool [i] == shndx)
break;
if (i >= shndx_pool_used)
return NULL;
/* Now backup to find the first entry in the set. */
while (i > 0 && shndx_pool [i - 1] != 0)
i--;
return shndx_pool + i;
} | 0 | [
"CWE-703"
] | binutils-gdb | 695c6dfe7e85006b98c8b746f3fd5f913c94ebff | 179,207,790,974,705,500,000,000,000,000,000,000,000 | 21 | PR29370, infinite loop in display_debug_abbrev
The PR29370 testcase is a fuzzed object file with multiple
.trace_abbrev sections. Multiple .trace_abbrev or .debug_abbrev
sections are not a violation of the DWARF standard. The DWARF5
standard even gives an example of multiple .debug_abbrev sections
contained in groups. Caching and lookup of processed abbrevs thus
needs to be done by section and offset rather than base and offset.
(Why base anyway?) Or, since section contents are kept, by a pointer
into the contents.
PR 29370
* dwarf.c (struct abbrev_list): Replace abbrev_base and
abbrev_offset with raw field.
(find_abbrev_list_by_abbrev_offset): Delete.
(find_abbrev_list_by_raw_abbrev): New function.
(process_abbrev_set): Set list->raw and list->next.
(find_and_process_abbrev_set): Replace abbrev list lookup with
new function. Don't set list abbrev_base, abbrev_offset or next. |
static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
struct in6_addr *saddr)
{
struct fib6_node *pn;
while (1) {
if (fn->fn_flags & RTN_TL_ROOT)
return NULL;
pn = fn->parent;
if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
else
fn = pn;
if (fn->fn_flags & RTN_RTINFO)
return fn;
}
} | 0 | [
"CWE-17"
] | linux-stable | 9d289715eb5c252ae15bd547cb252ca547a3c4f2 | 54,801,594,327,459,850,000,000,000,000,000,000,000 | 16 | ipv6: stop sending PTB packets for MTU < 1280
Reduce the attack vector and stop generating IPv6 Fragment Header for
paths with an MTU smaller than the minimum required IPv6 MTU
size (1280 byte) - called atomic fragments.
See IETF I-D "Deprecating the Generation of IPv6 Atomic Fragments" [1]
for more information and how this "feature" can be misused.
[1] https://tools.ietf.org/html/draft-ietf-6man-deprecate-atomfrag-generation-00
Signed-off-by: Fernando Gont <[email protected]>
Signed-off-by: Hagen Paul Pfeifer <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
template<typename T>
inline CImg<_cimg_Tfloat> log(const CImg<T>& instance) {
return instance.get_log(); | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 28,724,605,231,452,150,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
{
return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
} | 0 | [
"CWE-459"
] | linux | 683412ccf61294d727ead4a73d97397396e69a6b | 142,826,802,876,157,830,000,000,000,000,000,000,000 | 4 | KVM: SEV: add cache flush to solve SEV cache incoherency issues
Flush the CPU caches when memory is reclaimed from an SEV guest (where
reclaim also includes it being unmapped from KVM's memslots). Due to lack
of coherency for SEV encrypted memory, failure to flush results in silent
data corruption if userspace is malicious/broken and doesn't ensure SEV
guest memory is properly pinned and unpinned.
Cache coherency is not enforced across the VM boundary in SEV (AMD APM
vol.2 Section 15.34.7). Confidential cachelines, generated by confidential
VM guests have to be explicitly flushed on the host side. If a memory page
containing dirty confidential cachelines was released by VM and reallocated
to another user, the cachelines may corrupt the new user at a later time.
KVM takes a shortcut by assuming all confidential memory remain pinned
until the end of VM lifetime. Therefore, KVM does not flush cache at
mmu_notifier invalidation events. Because of this incorrect assumption and
the lack of cache flushing, malicous userspace can crash the host kernel:
creating a malicious VM and continuously allocates/releases unpinned
confidential memory pages when the VM is running.
Add cache flush operations to mmu_notifier operations to ensure that any
physical memory leaving the guest VM get flushed. In particular, hook
mmu_notifier_invalidate_range_start and mmu_notifier_release events and
flush cache accordingly. The hook after releasing the mmu lock to avoid
contention with other vCPUs.
Cc: [email protected]
Suggested-by: Sean Christpherson <[email protected]>
Reported-by: Mingwei Zhang <[email protected]>
Signed-off-by: Mingwei Zhang <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
void ClientConnectionImpl::onBody(Buffer::Instance& data) {
ASSERT(!deferred_end_stream_headers_);
if (pending_response_.has_value()) {
ASSERT(!pending_response_done_);
pending_response_.value().decoder_->decodeData(data, false);
}
} | 0 | [
"CWE-770"
] | envoy | 7ca28ff7d46454ae930e193d97b7d08156b1ba59 | 139,000,558,096,919,850,000,000,000,000,000,000,000 | 7 | [http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]> |
TEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimits) {
config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream.
simultaneousRequest(1024 * 20, 1024 * 14 + 2, 1024 * 10 + 5, 1024 * 16);
} | 0 | [
"CWE-400"
] | envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 296,215,839,022,455,580,000,000,000,000,000,000,000 | 4 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
u_freeentry(u_entry_T *uep, long n)
{
while (n > 0)
vim_free(uep->ue_array[--n].ul_line);
vim_free((char_u *)uep->ue_array);
#ifdef U_DEBUG
uep->ue_magic = 0;
#endif
vim_free((char_u *)uep);
} | 0 | [
"CWE-125",
"CWE-787"
] | vim | 8d02ce1ed75d008c34a5c9aaa51b67cbb9d33baa | 257,020,227,296,306,280,000,000,000,000,000,000,000 | 10 | patch 8.2.4217: illegal memory access when undo makes Visual area invalid
Problem: Illegal memory access when undo makes Visual area invalid.
Solution: Correct the Visual area after undo. |
static int ZEND_FASTCALL ZEND_CASE_SPEC_TMP_CV_HANDLER(ZEND_OPCODE_HANDLER_ARGS)
{
zend_op *opline = EX(opline);
int switch_expr_is_overloaded=0;
zend_free_op free_op1;
if (IS_TMP_VAR==IS_VAR) {
if (EX_T(opline->op1.u.var).var.ptr_ptr) {
PZVAL_LOCK(EX_T(opline->op1.u.var).var.ptr);
} else {
switch_expr_is_overloaded = 1;
Z_ADDREF_P(EX_T(opline->op1.u.var).str_offset.str);
}
}
is_equal_function(&EX_T(opline->result.u.var).tmp_var,
_get_zval_ptr_tmp(&opline->op1, EX(Ts), &free_op1 TSRMLS_CC),
_get_zval_ptr_cv(&opline->op2, EX(Ts), BP_VAR_R TSRMLS_CC) TSRMLS_CC);
if (switch_expr_is_overloaded) {
/* We only free op1 if this is a string offset,
* Since if it is a TMP_VAR, it'll be reused by
* other CASE opcodes (whereas string offsets
* are allocated at each get_zval_ptr())
*/
zval_dtor(free_op1.var);
EX_T(opline->op1.u.var).var.ptr_ptr = NULL;
EX_T(opline->op1.u.var).var.ptr = NULL;
}
ZEND_VM_NEXT_OPCODE();
} | 0 | [] | php-src | ce96fd6b0761d98353761bf78d5bfb55291179fd | 113,904,471,152,167,250,000,000,000,000,000,000,000 | 30 | - fix #39863, do not accept paths with NULL in them. See http://news.php.net/php.internals/50191, trunk will have the patch later (adding a macro and/or changing (some) APIs. Patch by Rasmus |
vector<string> get_zone_names_from_ids(rgw::sal::RGWRadosStore *store,
const set<rgw_zone_id>& zone_ids) const {
vector<string> names;
for (auto& id : zone_ids) {
RGWZone *zone;
if (store->svc()->zone->find_zone(id, &zone)) {
names.emplace_back(zone->name);
}
}
return names;
} | 0 | [
"CWE-79"
] | ceph | 8f90658c731499722d5f4393c8ad70b971d05f77 | 140,067,828,601,482,140,000,000,000,000,000,000,000 | 13 | rgw: reject unauthenticated response-header actions
Signed-off-by: Matt Benjamin <[email protected]>
Reviewed-by: Casey Bodley <[email protected]>
(cherry picked from commit d8dd5e513c0c62bbd7d3044d7e2eddcd897bd400) |
ASC_dropSCPAssociation(T_ASC_Association * association, int timeout)
{
/* if already dead don't worry */
if (association == NULL) return EC_Normal;
if (association->DULassociation == NULL) return EC_Normal;
ASC_dataWaiting(association, timeout);
OFCondition cond = DUL_DropAssociation(&association->DULassociation);
return cond;
} | 0 | [
"CWE-415",
"CWE-703",
"CWE-401"
] | dcmtk | a9697dfeb672b0b9412c00c7d36d801e27ec85cb | 321,091,758,287,673,800,000,000,000,000,000,000,000 | 11 | Fixed poss. NULL pointer dereference/double free.
Thanks to Jinsheng Ba <[email protected]> for the report and some patches. |
SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname)
{
return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
} | 0 | [
"CWE-416"
] | linux | f15133df088ecadd141ea1907f2c96df67c729f0 | 245,714,100,026,479,760,000,000,000,000,000,000,000 | 4 | path_openat(): fix double fput()
path_openat() jumps to the wrong place after do_tmpfile() - it has
already done path_cleanup() (as part of path_lookupat() called by
do_tmpfile()), so doing that again can lead to double fput().
Cc: [email protected] # v3.11+
Signed-off-by: Al Viro <[email protected]> |
aspath_highest (struct aspath *aspath)
{
struct assegment *seg = aspath->segments;
as_t highest = 0;
unsigned int i;
while (seg)
{
for (i = 0; i < seg->length; i++)
if (seg->as[i] > highest
&& !BGP_AS_IS_PRIVATE(seg->as[i]))
highest = seg->as[i];
seg = seg->next;
}
return highest;
} | 0 | [
"CWE-20"
] | quagga | 7a42b78be9a4108d98833069a88e6fddb9285008 | 242,351,090,258,026,550,000,000,000,000,000,000,000 | 16 | bgpd: Fix AS_PATH size calculation for long paths
If you have an AS_PATH with more entries than
what can be written into a single AS_SEGMENT_MAX
it needs to be broken up. The code that noticed
that the AS_PATH needs to be broken up was not
correctly calculating the size of the resulting
message. This patch addresses this issue. |
XmlDocument::Initialize(v8::Local<v8::Object> target)
{
Nan::HandleScope scope;
v8::Local<v8::FunctionTemplate> tmpl =
Nan::New<v8::FunctionTemplate>(New);
tmpl->SetClassName(Nan::New<v8::String>("Document").ToLocalChecked());
constructor_template.Reset( tmpl);
tmpl->InstanceTemplate()->SetInternalFieldCount(1);
/// setup internal methods for bindings
Nan::SetPrototypeMethod(tmpl,
"_root",
XmlDocument::Root);
Nan::SetPrototypeMethod(tmpl,
"_version",
XmlDocument::Version);
Nan::SetPrototypeMethod(tmpl,
"_encoding",
XmlDocument::Encoding);
Nan::SetPrototypeMethod(tmpl,
"_toString",
XmlDocument::ToString);
Nan::SetPrototypeMethod(tmpl,
"_validate",
XmlDocument::Validate);
Nan::SetPrototypeMethod(tmpl,
"_rngValidate",
XmlDocument::RngValidate);
Nan::SetPrototypeMethod(tmpl,
"_setDtd",
XmlDocument::SetDtd);
Nan::SetPrototypeMethod(tmpl,
"_getDtd",
XmlDocument::GetDtd);
Nan::SetMethod(target, "fromXml", XmlDocument::FromXml);
Nan::SetMethod(target, "fromHtml", XmlDocument::FromHtml);
// used to create new document handles
Nan::Set(target, Nan::New<v8::String>("Document").ToLocalChecked(), Nan::GetFunction(tmpl).ToLocalChecked());
XmlNode::Initialize(target);
XmlNamespace::Initialize(target);
} | 0 | [
"CWE-400"
] | libxmljs | 2501807bde9b38cfaed06d1e140487516d91379d | 333,713,086,327,483,200,000,000,000,000,000,000,000 | 51 | Ensure parseXml/parseHtml input is string or buffer (#594) |
static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
{
u32 qid_mod = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
if (!dsaf_get_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
PPE_CFG_QID_MODE_DEF_QID_S)) {
dsaf_set_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
PPE_CFG_QID_MODE_DEF_QID_S, qid);
dsaf_write_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG, qid_mod);
}
} | 0 | [
"CWE-119",
"CWE-703"
] | linux | 412b65d15a7f8a93794653968308fc100f2aa87c | 291,336,532,625,339,620,000,000,000,000,000,000,000 | 11 | net: hns: fix ethtool_get_strings overflow in hns driver
hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated
is not enough for ethtool_get_strings(), which will cause random memory
corruption.
When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the
the following can be observed without this patch:
[ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80
[ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070.
[ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70)
[ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk
[ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k
[ 43.115218] Next obj: start=ffff801fb0b69098, len=80
[ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b.
[ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38)
[ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_
[ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai
Signed-off-by: Timmy Li <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
const int default_width = 2 * sizeof(void *);
if (!ptr && *fmt != 'K') {
/*
* Print (null) with the same width as a pointer so it makes
* tabular output look nice.
*/
if (spec.field_width == -1)
spec.field_width = default_width;
return string(buf, end, "(null)", spec);
}
switch (*fmt) {
case 'F':
case 'f':
ptr = dereference_function_descriptor(ptr);
/* Fallthrough */
case 'S':
case 's':
case 'B':
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
case 'r':
return resource_string(buf, end, ptr, spec, fmt);
case 'h':
return hex_string(buf, end, ptr, spec, fmt);
case 'b':
switch (fmt[1]) {
case 'l':
return bitmap_list_string(buf, end, ptr, spec, fmt);
default:
return bitmap_string(buf, end, ptr, spec, fmt);
}
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
/* [mM]F (FDDI) */
/* [mM]R (Reverse order; Bluetooth) */
return mac_address_string(buf, end, ptr, spec, fmt);
case 'I': /* Formatted IP supported
* 4: 1.2.3.4
* 6: 0001:0203:...:0708
* 6c: 1::708 or 1::1.2.3.4
*/
case 'i': /* Contiguous:
* 4: 001.002.003.004
* 6: 000102...0f
*/
switch (fmt[1]) {
case '6':
return ip6_addr_string(buf, end, ptr, spec, fmt);
case '4':
return ip4_addr_string(buf, end, ptr, spec, fmt);
case 'S': {
const union {
struct sockaddr raw;
struct sockaddr_in v4;
struct sockaddr_in6 v6;
} *sa = ptr;
switch (sa->raw.sa_family) {
case AF_INET:
return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
case AF_INET6:
return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
default:
return string(buf, end, "(invalid address)", spec);
}}
}
break;
case 'E':
return escaped_string(buf, end, ptr, spec, fmt);
case 'U':
return uuid_string(buf, end, ptr, spec, fmt);
case 'V':
{
va_list va;
va_copy(va, *((struct va_format *)ptr)->va);
buf += vsnprintf(buf, end > buf ? end - buf : 0,
((struct va_format *)ptr)->fmt, va);
va_end(va);
return buf;
}
case 'K':
return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, fmt);
case 'a':
return address_val(buf, end, ptr, fmt);
case 'd':
return dentry_name(buf, end, ptr, spec, fmt);
case 'C':
return clock(buf, end, ptr, spec, fmt);
case 'D':
return dentry_name(buf, end,
((const struct file *)ptr)->f_path.dentry,
spec, fmt);
#ifdef CONFIG_BLOCK
case 'g':
return bdev_name(buf, end, ptr, spec, fmt);
#endif
case 'G':
return flags_string(buf, end, ptr, fmt);
case 'O':
switch (fmt[1]) {
case 'F':
return device_node_string(buf, end, ptr, spec, fmt + 1);
}
}
/* default is to _not_ leak addresses, hash before printing */
return ptr_to_id(buf, end, ptr, spec);
} | 0 | [
"CWE-200"
] | linux | ad67b74d2469d9b82aaa572d76474c95bc484d57 | 69,168,966,627,178,865,000,000,000,000,000,000,000 | 117 | printk: hash addresses printed with %p
Currently there exist approximately 14 000 places in the kernel where
addresses are being printed using an unadorned %p. This potentially
leaks sensitive information regarding the Kernel layout in memory. Many
of these calls are stale, instead of fixing every call lets hash the
address by default before printing. This will of course break some
users, forcing code printing needed addresses to be updated.
Code that _really_ needs the address will soon be able to use the new
printk specifier %px to print the address.
For what it's worth, usage of unadorned %p can be broken down as
follows (thanks to Joe Perches).
$ git grep -E '%p[^A-Za-z0-9]' | cut -f1 -d"/" | sort | uniq -c
1084 arch
20 block
10 crypto
32 Documentation
8121 drivers
1221 fs
143 include
101 kernel
69 lib
100 mm
1510 net
40 samples
7 scripts
11 security
166 sound
152 tools
2 virt
Add function ptr_to_id() to map an address to a 32 bit unique
identifier. Hash any unadorned usage of specifier %p and any malformed
specifiers.
Signed-off-by: Tobin C. Harding <[email protected]> |
void ConnectionManagerImpl::ActiveStream::decodeData(
ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream,
FilterIterationStartState filter_iteration_start_state) {
ScopeTrackerScopeState scope(this,
connection_manager_.read_callbacks_->connection().dispatcher());
resetIdleTimer();
// If we previously decided to decode only the headers, do nothing here.
if (state_.decoding_headers_only_) {
return;
}
// If a response is complete or a reset has been sent, filters do not care about further body
// data. Just drop it.
if (state_.local_complete_) {
return;
}
auto trailers_added_entry = decoder_filters_.end();
const bool trailers_exists_at_start = request_trailers_ != nullptr;
// Filter iteration may start at the current filter.
std::list<ActiveStreamDecoderFilterPtr>::iterator entry =
commonDecodePrefix(filter, filter_iteration_start_state);
for (; entry != decoder_filters_.end(); entry++) {
// If the filter pointed by entry has stopped for all frame types, return now.
if (handleDataIfStopAll(**entry, data, state_.decoder_filters_streaming_)) {
return;
}
// If end_stream_ is marked for a filter, the data is not for this filter and filters after.
//
// In following case, ActiveStreamFilterBase::commonContinue() could be called recursively and
// its doData() is called with wrong data.
//
// There are 3 decode filters and "wrapper" refers to ActiveStreamFilter object.
//
// filter0->decodeHeaders(_, true)
// return STOP
// filter0->continueDecoding()
// wrapper0->commonContinue()
// wrapper0->decodeHeaders(_, _, true)
// filter1->decodeHeaders(_, true)
// filter1->addDecodeData()
// return CONTINUE
// filter2->decodeHeaders(_, false)
// return CONTINUE
// wrapper1->commonContinue() // Detects data is added.
// wrapper1->doData()
// wrapper1->decodeData()
// filter2->decodeData(_, true)
// return CONTINUE
// wrapper0->doData() // This should not be called
// wrapper0->decodeData()
// filter1->decodeData(_, true) // It will cause assertions.
//
// One way to solve this problem is to mark end_stream_ for each filter.
// If a filter is already marked as end_stream_ when decodeData() is called, bails out the
// whole function. If just skip the filter, the codes after the loop will be called with
// wrong data. For encodeData, the response_encoder->encode() will be called.
if ((*entry)->end_stream_) {
return;
}
ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeData));
// We check the request_trailers_ pointer here in case addDecodedTrailers
// is called in decodeData during a previous filter invocation, at which point we communicate to
// the current and future filters that the stream has not yet ended.
if (end_stream) {
state_.filter_call_state_ |= FilterCallState::LastDataFrame;
}
recordLatestDataFilter(entry, state_.latest_data_decoding_filter_, decoder_filters_);
state_.filter_call_state_ |= FilterCallState::DecodeData;
(*entry)->end_stream_ = end_stream && !request_trailers_;
FilterDataStatus status = (*entry)->handle_->decodeData(data, (*entry)->end_stream_);
if ((*entry)->end_stream_) {
(*entry)->handle_->decodeComplete();
}
state_.filter_call_state_ &= ~FilterCallState::DecodeData;
if (end_stream) {
state_.filter_call_state_ &= ~FilterCallState::LastDataFrame;
}
ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", *this,
static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status));
processNewlyAddedMetadata();
if (!trailers_exists_at_start && request_trailers_ &&
trailers_added_entry == decoder_filters_.end()) {
trailers_added_entry = entry;
}
if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.decoder_filters_streaming_) &&
std::next(entry) != decoder_filters_.end()) {
// Stop iteration IFF this is not the last filter. If it is the last filter, continue with
// processing since we need to handle the case where a terminal filter wants to buffer, but
// a previous filter has added trailers.
return;
}
}
// If trailers were adding during decodeData we need to trigger decodeTrailers in order
// to allow filters to process the trailers.
if (trailers_added_entry != decoder_filters_.end()) {
decodeTrailers(trailers_added_entry->get(), *request_trailers_);
}
if (end_stream) {
disarmRequestTimeout();
}
} | 0 | [
"CWE-400"
] | envoy | 0e49a495826ea9e29134c1bd54fdeb31a034f40c | 12,202,286,085,743,331,000,000,000,000,000,000,000 | 112 | http/2: add stats and stream flush timeout (#139)
This commit adds a new stream flush timeout to guard against a
remote server that does not open window once an entire stream has
been buffered for flushing. Additional stats have also been added
to better understand the codecs view of active streams as well as
amount of data buffered.
Signed-off-by: Matt Klein <[email protected]> |
static void check_ept_pointer_match(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
u64 tmp_eptp = INVALID_PAGE;
int i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!VALID_PAGE(tmp_eptp)) {
tmp_eptp = to_vmx(vcpu)->ept_pointer;
} else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
to_kvm_vmx(kvm)->ept_pointers_match
= EPT_POINTERS_MISMATCH;
return;
}
}
to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
} | 0 | [
"CWE-787"
] | linux | 04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a | 197,912,658,110,050,640,000,000,000,000,000,000,000 | 18 | KVM: VMX: Don't use vcpu->run->internal.ndata as an array index
__vmx_handle_exit() uses vcpu->run->internal.ndata as an index for
an array access. Since vcpu->run is (can be) mapped to a user address
space with a writer permission, the 'ndata' could be updated by the
user process at anytime (the user process can set it to outside the
bounds of the array).
So, it is not safe that __vmx_handle_exit() uses the 'ndata' that way.
Fixes: 1aa561b1a4c0 ("kvm: x86: Add "last CPU" to some KVM_EXIT information")
Signed-off-by: Reiji Watanabe <[email protected]>
Reviewed-by: Jim Mattson <[email protected]>
Message-Id: <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]> |
g_file_read(int fd, char* ptr, int len)
{
#if defined(_WIN32)
if (ReadFile((HANDLE)fd, (LPVOID)ptr, (DWORD)len, (LPDWORD)&len, 0))
{
return len;
}
else
{
return -1;
}
#else
return read(fd, ptr, len);
#endif
} | 0 | [] | xrdp | d8f9e8310dac362bb9578763d1024178f94f4ecc | 67,099,790,627,240,190,000,000,000,000,000,000,000 | 15 | move temp files from /tmp to /tmp/.xrdp |
*to_f_printer_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_printer_opts,
func_inst.group);
} | 0 | [
"CWE-416"
] | linux | e8d5f92b8d30bb4ade76494490c3c065e12411b1 | 338,104,580,324,347,140,000,000,000,000,000,000,000 | 5 | usb: gadget: function: printer: fix use-after-free in __lock_acquire
Fix this by increase object reference count.
BUG: KASAN: use-after-free in __lock_acquire+0x3fd4/0x4180
kernel/locking/lockdep.c:3831
Read of size 8 at addr ffff8880683b0018 by task syz-executor.0/3377
CPU: 1 PID: 3377 Comm: syz-executor.0 Not tainted 5.6.11 #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0xce/0x128 lib/dump_stack.c:118
print_address_description.constprop.4+0x21/0x3c0 mm/kasan/report.c:374
__kasan_report+0x131/0x1b0 mm/kasan/report.c:506
kasan_report+0x12/0x20 mm/kasan/common.c:641
__asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135
__lock_acquire+0x3fd4/0x4180 kernel/locking/lockdep.c:3831
lock_acquire+0x127/0x350 kernel/locking/lockdep.c:4488
__raw_spin_lock_irqsave include/linux/spinlock_api_smp.h:110 [inline]
_raw_spin_lock_irqsave+0x35/0x50 kernel/locking/spinlock.c:159
printer_ioctl+0x4a/0x110 drivers/usb/gadget/function/f_printer.c:723
vfs_ioctl fs/ioctl.c:47 [inline]
ksys_ioctl+0xfb/0x130 fs/ioctl.c:763
__do_sys_ioctl fs/ioctl.c:772 [inline]
__se_sys_ioctl fs/ioctl.c:770 [inline]
__x64_sys_ioctl+0x73/0xb0 fs/ioctl.c:770
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x4531a9
Code: ed 60 fc ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48
89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d
01 f0 ff ff 0f 83 bb 60 fc ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007fd14ad72c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
RAX: ffffffffffffffda RBX: 000000000073bfa8 RCX: 00000000004531a9
RDX: fffffffffffffff9 RSI: 000000000000009e RDI: 0000000000000003
RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00000000004bbd61
R13: 00000000004d0a98 R14: 00007fd14ad736d4 R15: 00000000ffffffff
Allocated by task 2393:
save_stack+0x21/0x90 mm/kasan/common.c:72
set_track mm/kasan/common.c:80 [inline]
__kasan_kmalloc.constprop.3+0xa7/0xd0 mm/kasan/common.c:515
kasan_kmalloc+0x9/0x10 mm/kasan/common.c:529
kmem_cache_alloc_trace+0xfa/0x2d0 mm/slub.c:2813
kmalloc include/linux/slab.h:555 [inline]
kzalloc include/linux/slab.h:669 [inline]
gprinter_alloc+0xa1/0x870 drivers/usb/gadget/function/f_printer.c:1416
usb_get_function+0x58/0xc0 drivers/usb/gadget/functions.c:61
config_usb_cfg_link+0x1ed/0x3e0 drivers/usb/gadget/configfs.c:444
configfs_symlink+0x527/0x11d0 fs/configfs/symlink.c:202
vfs_symlink+0x33d/0x5b0 fs/namei.c:4201
do_symlinkat+0x11b/0x1d0 fs/namei.c:4228
__do_sys_symlinkat fs/namei.c:4242 [inline]
__se_sys_symlinkat fs/namei.c:4239 [inline]
__x64_sys_symlinkat+0x73/0xb0 fs/namei.c:4239
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
Freed by task 3368:
save_stack+0x21/0x90 mm/kasan/common.c:72
set_track mm/kasan/common.c:80 [inline]
kasan_set_free_info mm/kasan/common.c:337 [inline]
__kasan_slab_free+0x135/0x190 mm/kasan/common.c:476
kasan_slab_free+0xe/0x10 mm/kasan/common.c:485
slab_free_hook mm/slub.c:1444 [inline]
slab_free_freelist_hook mm/slub.c:1477 [inline]
slab_free mm/slub.c:3034 [inline]
kfree+0xf7/0x410 mm/slub.c:3995
gprinter_free+0x49/0xd0 drivers/usb/gadget/function/f_printer.c:1353
usb_put_function+0x38/0x50 drivers/usb/gadget/functions.c:87
config_usb_cfg_unlink+0x2db/0x3b0 drivers/usb/gadget/configfs.c:485
configfs_unlink+0x3b9/0x7f0 fs/configfs/symlink.c:250
vfs_unlink+0x287/0x570 fs/namei.c:4073
do_unlinkat+0x4f9/0x620 fs/namei.c:4137
__do_sys_unlink fs/namei.c:4184 [inline]
__se_sys_unlink fs/namei.c:4182 [inline]
__x64_sys_unlink+0x42/0x50 fs/namei.c:4182
do_syscall_64+0x9e/0x510 arch/x86/entry/common.c:294
entry_SYSCALL_64_after_hwframe+0x49/0xbe
The buggy address belongs to the object at ffff8880683b0000
which belongs to the cache kmalloc-1k of size 1024
The buggy address is located 24 bytes inside of
1024-byte region [ffff8880683b0000, ffff8880683b0400)
The buggy address belongs to the page:
page:ffffea0001a0ec00 refcount:1 mapcount:0 mapping:ffff88806c00e300
index:0xffff8880683b1800 compound_mapcount: 0
flags: 0x100000000010200(slab|head)
raw: 0100000000010200 0000000000000000 0000000600000001 ffff88806c00e300
raw: ffff8880683b1800 000000008010000a 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Reported-by: Kyungtae Kim <[email protected]>
Signed-off-by: Zqiang <[email protected]>
Signed-off-by: Felipe Balbi <[email protected]> |
keydb_new (void)
{
KEYDB_HANDLE hd;
int i, j;
if (DBG_CLOCK)
log_clock ("keydb_new");
hd = xmalloc_clear (sizeof *hd);
hd->found = -1;
assert (used_resources <= MAX_KEYDB_RESOURCES);
for (i=j=0; i < used_resources; i++)
{
switch (all_resources[i].type)
{
case KEYDB_RESOURCE_TYPE_NONE: /* ignore */
break;
case KEYDB_RESOURCE_TYPE_KEYRING:
hd->active[j].type = all_resources[i].type;
hd->active[j].token = all_resources[i].token;
hd->active[j].u.kr = keyring_new (all_resources[i].token);
if (!hd->active[j].u.kr) {
xfree (hd);
return NULL; /* fixme: release all previously allocated handles*/
}
j++;
break;
case KEYDB_RESOURCE_TYPE_KEYBOX:
hd->active[j].type = all_resources[i].type;
hd->active[j].token = all_resources[i].token;
hd->active[j].u.kb = keybox_new_openpgp (all_resources[i].token, 0);
if (!hd->active[j].u.kb)
{
xfree (hd);
return NULL; /* fixme: release all previously allocated handles*/
}
j++;
break;
}
}
hd->used = j;
active_handles++;
return hd;
} | 0 | [
"CWE-416"
] | gnupg | f0f71a721ccd7ab9e40b8b6b028b59632c0cc648 | 74,147,028,297,949,480,000,000,000,000,000,000,000 | 46 | gpg: Prevent an invalid memory read using a garbled keyring.
* g10/keyring.c (keyring_get_keyblock): Whitelist allowed packet
types.
* g10/keydb.c (parse_keyblock_image): Ditto.
--
The keyring DB code did not reject packets which don't belong into a
keyring. If for example the keyblock contains a literal data packet
it is expected that the processing code stops at the data packet and
reads from the input stream which is referenced from the data packets.
Obviously the keyring processing code does not and cannot do that.
However, when exporting this messes up the IOBUF and leads to an
invalid read of sizeof (int).
We now skip all packets which are not allowed in a keyring.
Reported-by: Hanno Böck <[email protected]>
Test data:
gpg2 --no-default-keyring --keyring FILE --export >/dev/null
With this unpacked data for FILE:
-----BEGIN PGP ARMORED FILE-----
mI0EVNP2zQEEALvETPVDCJDBXkegF4esiV1fqlne40yJnCmJeDEJYocwFPXfFA86
sSGjInzgDbpbC9gQPwq91Qe9x3Vy81CkyVonPOejhINlzfpzqAAa3A6viJccZTwt
DJ8E/I9jg53sbYW8q+VgfLn1hlggH/XQRT0HkXMP5y9ClURYnTsNwJhXABEBAAGs
CXRlc3QgdGVzdIi5BBMBCgAjBQJU0/bNAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwEC
HgECF4AACgkQlsmuCapsqYLvtQP/byY0tM0Lc3moftbHQZ2eHj9ykLjsCjeMDfPx
kZUUtUS3HQaqgZLZOeqPjM7XgGh5hJsd9pfhmRWJ0x+iGB47XQNpRTtdLBV/WMCS
l5z3uW7e9Md7QVUVuSlJnBgQHTS6EgP8JQadPkAiF+jgpJZXP+gFs2j3gobS0qUF
eyTtxs+wAgAD
=uIt9
-----END PGP ARMORED FILE-----
Signed-off-by: Werner Koch <[email protected]> |
download_data( struct net_device *dev, u32 *crc_p )
{
struct net_local *nl = (struct net_local *) dev->priv;
struct sk_buff *skb = nl->tx_buf_p;
unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
*crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
/* if packet too short we should write some more bytes to pad */
for( len = nl->framelen - len; len--; )
outb( 0, dev->base_addr + DAT ),
*crc_p = CRC32( 0, *crc_p );
} | 0 | [
"CWE-264"
] | linux-2.6 | f2455eb176ac87081bbfc9a44b21c7cd2bc1967e | 15,169,222,793,996,351,000,000,000,000,000,000,000 | 15 | wan: Missing capability checks in sbni_ioctl()
There are missing capability checks in the following code:
1300 static int
1301 sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd)
1302 {
[...]
1319 case SIOCDEVRESINSTATS :
1320 if( current->euid != 0 ) /* root only */
1321 return -EPERM;
[...]
1336 case SIOCDEVSHWSTATE :
1337 if( current->euid != 0 ) /* root only */
1338 return -EPERM;
[...]
1357 case SIOCDEVENSLAVE :
1358 if( current->euid != 0 ) /* root only */
1359 return -EPERM;
[...]
1372 case SIOCDEVEMANSIPATE :
1373 if( current->euid != 0 ) /* root only */
1374 return -EPERM;
Here's my proposed fix:
Missing capability checks.
Signed-off-by: Eugene Teo <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void Item_func::count_only_length(Item **item, uint nitems)
{
uint32 char_length= 0;
unsigned_flag= 0;
for (uint i= 0; i < nitems ; i++)
{
set_if_bigger(char_length, item[i]->max_char_length());
set_if_bigger(unsigned_flag, item[i]->unsigned_flag);
}
fix_char_length(char_length);
} | 0 | [
"CWE-120"
] | server | eca207c46293bc72dd8d0d5622153fab4d3fccf1 | 187,795,675,323,742,000,000,000,000,000,000,000,000 | 11 | MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix. |
static inline bool __io_fill_cqe_req_filled(struct io_ring_ctx *ctx,
struct io_kiocb *req)
{
struct io_uring_cqe *cqe;
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
req->cqe.res, req->cqe.flags, 0, 0);
/*
* If we can't get a cq entry, userspace overflowed the
* submission (by quite a lot). Increment the overflow count in
* the ring.
*/
cqe = io_get_cqe(ctx);
if (likely(cqe)) {
memcpy(cqe, &req->cqe, sizeof(*cqe));
return true;
}
return io_cqring_event_overflow(ctx, req->cqe.user_data,
req->cqe.res, req->cqe.flags, 0, 0);
} | 0 | [
"CWE-416"
] | linux | 9cae36a094e7e9d6e5fe8b6dcd4642138b3eb0c7 | 193,726,020,130,478,300,000,000,000,000,000,000,000 | 21 | io_uring: reinstate the inflight tracking
After some debugging, it was realized that we really do still need the
old inflight tracking for any file type that has io_uring_fops assigned.
If we don't, then trivial circular references will mean that we never get
the ctx cleaned up and hence it'll leak.
Just bring back the inflight tracking, which then also means we can
eliminate the conditional dropping of the file when task_work is queued.
Fixes: d5361233e9ab ("io_uring: drop the old style inflight file tracking")
Signed-off-by: Jens Axboe <[email protected]> |
static void mld_send_initial_cr(struct inet6_dev *idev)
{
struct sk_buff *skb;
struct ifmcaddr6 *pmc;
int type;
if (mld_in_v1_mode(idev))
return;
skb = NULL;
for_each_mc_mclock(idev, pmc) {
if (pmc->mca_sfcount[MCAST_EXCLUDE])
type = MLD2_CHANGE_TO_EXCLUDE;
else
type = MLD2_ALLOW_NEW_SOURCES;
skb = add_grec(skb, pmc, type, 0, 0, 1);
}
if (skb)
mld_sendpack(skb);
} | 0 | [
"CWE-703"
] | linux | 2d3916f3189172d5c69d33065c3c21119fe539fc | 214,669,226,462,940,620,000,000,000,000,000,000,000 | 20 | ipv6: fix skb drops in igmp6_event_query() and igmp6_event_report()
While investigating on why a synchronize_net() has been added recently
in ipv6_mc_down(), I found that igmp6_event_query() and igmp6_event_report()
might drop skbs in some cases.
Discussion about removing synchronize_net() from ipv6_mc_down()
will happen in a different thread.
Fixes: f185de28d9ae ("mld: add new workqueues for process mld events")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Taehee Yoo <[email protected]>
Cc: Cong Wang <[email protected]>
Cc: David Ahern <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]> |
card_start( struct net_device *dev )
{
struct net_local *nl = (struct net_local *) dev->priv;
nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
nl->state |= FL_PREV_OK;
nl->inppos = nl->outpos = 0;
nl->wait_frameno = 0;
nl->tx_frameno = 0;
nl->framelen = 0;
outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
outb( EN_INT, dev->base_addr + CSR0 );
} | 0 | [
"CWE-264"
] | linux-2.6 | f2455eb176ac87081bbfc9a44b21c7cd2bc1967e | 53,319,060,515,251,240,000,000,000,000,000,000,000 | 16 | wan: Missing capability checks in sbni_ioctl()
There are missing capability checks in the following code:
1300 static int
1301 sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd)
1302 {
[...]
1319 case SIOCDEVRESINSTATS :
1320 if( current->euid != 0 ) /* root only */
1321 return -EPERM;
[...]
1336 case SIOCDEVSHWSTATE :
1337 if( current->euid != 0 ) /* root only */
1338 return -EPERM;
[...]
1357 case SIOCDEVENSLAVE :
1358 if( current->euid != 0 ) /* root only */
1359 return -EPERM;
[...]
1372 case SIOCDEVEMANSIPATE :
1373 if( current->euid != 0 ) /* root only */
1374 return -EPERM;
Here's my proposed fix:
Missing capability checks.
Signed-off-by: Eugene Teo <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
MONGO_EXPORT const bson *mongo_cursor_bson( mongo_cursor *cursor ) {
return (const bson *)&(cursor->current);
} | 0 | [
"CWE-190"
] | mongo-c-driver-legacy | 1a1f5e26a4309480d88598913f9eebf9e9cba8ca | 38,353,095,436,773,250,000,000,000,000,000,000,000 | 3 | don't mix up int and size_t (first pass to fix that) |
unsigned long cma_get_size(const struct cma *cma)
{
return cma->count << PAGE_SHIFT;
} | 0 | [
"CWE-682"
] | linux | 67a2e213e7e937c41c52ab5bc46bf3f4de469f6e | 281,242,572,737,654,780,000,000,000,000,000,000,000 | 4 | mm: cma: fix incorrect type conversion for size during dma allocation
This was found during userspace fuzzing test when a large size dma cma
allocation is made by driver(like ion) through userspace.
show_stack+0x10/0x1c
dump_stack+0x74/0xc8
kasan_report_error+0x2b0/0x408
kasan_report+0x34/0x40
__asan_storeN+0x15c/0x168
memset+0x20/0x44
__dma_alloc_coherent+0x114/0x18c
Signed-off-by: Rohit Vaswani <[email protected]>
Acked-by: Greg Kroah-Hartman <[email protected]>
Cc: Marek Szyprowski <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
EXPORTED int mboxlist_alluser(user_cb *proc, void *rock)
{
struct alluser_rock urock;
int r = 0;
urock.prev = NULL;
urock.proc = proc;
urock.rock = rock;
r = mboxlist_allmbox(NULL, alluser_cb, &urock, /*flags*/0);
free(urock.prev);
return r;
} | 0 | [
"CWE-20"
] | cyrus-imapd | 6bd33275368edfa71ae117de895488584678ac79 | 21,889,173,868,680,580,000,000,000,000,000,000,000 | 11 | mboxlist: fix uninitialised memory use where pattern is "Other Users" |
static OPJ_BOOL opj_j2k_read_ppt ( opj_j2k_t *p_j2k,
OPJ_BYTE * p_header_data,
OPJ_UINT32 p_header_size,
opj_event_mgr_t * p_manager
)
{
opj_cp_t *l_cp = 00;
opj_tcp_t *l_tcp = 00;
OPJ_UINT32 l_Z_ppt;
/* preconditions */
assert(p_header_data != 00);
assert(p_j2k != 00);
assert(p_manager != 00);
/* We need to have the Z_ppt element + 1 byte of Ippt at minimum */
if (p_header_size < 2) {
opj_event_msg(p_manager, EVT_ERROR, "Error reading PPT marker\n");
return OPJ_FALSE;
}
l_cp = &(p_j2k->m_cp);
if (l_cp->ppm){
opj_event_msg(p_manager, EVT_ERROR, "Error reading PPT marker: packet header have been previously found in the main header (PPM marker).\n");
return OPJ_FALSE;
}
l_tcp = &(l_cp->tcps[p_j2k->m_current_tile_number]);
l_tcp->ppt = 1;
opj_read_bytes(p_header_data,&l_Z_ppt,1); /* Z_ppt */
++p_header_data;
--p_header_size;
/* check allocation needed */
if (l_tcp->ppt_markers == NULL) { /* first PPT marker */
OPJ_UINT32 l_newCount = l_Z_ppt + 1U; /* can't overflow, l_Z_ppt is UINT8 */
assert(l_tcp->ppt_markers_count == 0U);
l_tcp->ppt_markers = (opj_ppx *) opj_calloc(l_newCount, sizeof(opj_ppx));
if (l_tcp->ppt_markers == NULL) {
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers_count = l_newCount;
} else if (l_tcp->ppt_markers_count <= l_Z_ppt) {
OPJ_UINT32 l_newCount = l_Z_ppt + 1U; /* can't overflow, l_Z_ppt is UINT8 */
opj_ppx *new_ppt_markers;
new_ppt_markers = (opj_ppx *) opj_realloc(l_tcp->ppt_markers, l_newCount * sizeof(opj_ppx));
if (new_ppt_markers == NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers = new_ppt_markers;
memset(l_tcp->ppt_markers + l_tcp->ppt_markers_count, 0, (l_newCount - l_tcp->ppt_markers_count) * sizeof(opj_ppx));
l_tcp->ppt_markers_count = l_newCount;
}
if (l_tcp->ppt_markers[l_Z_ppt].m_data != NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Zppt %u already read\n", l_Z_ppt);
return OPJ_FALSE;
}
l_tcp->ppt_markers[l_Z_ppt].m_data = opj_malloc(p_header_size);
if (l_tcp->ppt_markers[l_Z_ppt].m_data == NULL) {
/* clean up to be done on l_tcp destruction */
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to read PPT marker\n");
return OPJ_FALSE;
}
l_tcp->ppt_markers[l_Z_ppt].m_data_size = p_header_size;
memcpy(l_tcp->ppt_markers[l_Z_ppt].m_data, p_header_data, p_header_size);
return OPJ_TRUE;
} | 0 | [
"CWE-416"
] | openjpeg | 940100c28ae28931722290794889cf84a92c5f6f | 118,442,646,644,080,460,000,000,000,000,000,000,000 | 75 | Fix potential use-after-free in opj_j2k_write_mco function
Fixes #563 |
static char *find_conversion(const SERVER_REC *server, const char *target)
{
char *conv = NULL;
if (server != NULL && target != NULL) {
char *tagtarget = g_strdup_printf("%s/%s", server->tag, target);
conv = iconfig_get_str("conversions", tagtarget, NULL);
g_free(tagtarget);
}
if (conv == NULL && target != NULL)
conv = iconfig_get_str("conversions", target, NULL);
if (conv == NULL && server != NULL)
conv = iconfig_get_str("conversions", server->tag, NULL);
return conv;
} | 0 | [
"CWE-416"
] | irssi | 43e44d553d44e313003cee87e6ea5e24d68b84a1 | 90,657,451,829,480,050,000,000,000,000,000,000,000 | 15 | Merge branch 'security' into 'master'
Security
Closes GL#12, GL#13, GL#14, GL#15, GL#16
See merge request irssi/irssi!23 |
static UChar32 leftArrowKeyRoutine(UChar32 c) {
return thisKeyMetaCtrl | LEFT_ARROW_KEY;
} | 0 | [
"CWE-200"
] | mongo | 035cf2afc04988b22cb67f4ebfd77e9b344cb6e0 | 320,498,531,478,067,300,000,000,000,000,000,000,000 | 3 | SERVER-25335 avoid group and other permissions when creating .dbshell history file |
prologInitProcessor(XML_Parser parser,
const char *s,
const char *end,
const char **nextPtr)
{
enum XML_Error result = initializeEncoding(parser);
if (result != XML_ERROR_NONE)
return result;
processor = prologProcessor;
return prologProcessor(parser, s, end, nextPtr);
} | 0 | [
"CWE-119"
] | libexpat | ba0f9c3b40c264b8dd392e02a7a060a8fa54f032 | 67,023,122,293,533,180,000,000,000,000,000,000,000 | 11 | CVE-2015-1283 Sanity check size calculations. r=peterv, a=abillings
https://sourceforge.net/p/expat/bugs/528/ |
static int rev_compare_tree(struct rev_info *revs,
struct commit *parent, struct commit *commit)
{
struct tree *t1 = parent->tree;
struct tree *t2 = commit->tree;
if (!t1)
return REV_TREE_NEW;
if (!t2)
return REV_TREE_OLD;
if (revs->simplify_by_decoration) {
/*
* If we are simplifying by decoration, then the commit
* is worth showing if it has a tag pointing at it.
*/
if (get_name_decoration(&commit->object))
return REV_TREE_DIFFERENT;
/*
* A commit that is not pointed by a tag is uninteresting
* if we are not limited by path. This means that you will
* see the usual "commits that touch the paths" plus any
* tagged commit by specifying both --simplify-by-decoration
* and pathspec.
*/
if (!revs->prune_data.nr)
return REV_TREE_SAME;
}
tree_difference = REV_TREE_SAME;
DIFF_OPT_CLR(&revs->pruning, HAS_CHANGES);
if (diff_tree_sha1(t1->object.oid.hash, t2->object.oid.hash, "",
&revs->pruning) < 0)
return REV_TREE_DIFFERENT;
return tree_difference;
} | 0 | [] | git | a937b37e766479c8e780b17cce9c4b252fd97e40 | 176,876,557,274,830,100,000,000,000,000,000,000,000 | 36 | revision: quit pruning diff more quickly when possible
When the revision traversal machinery is given a pathspec,
we must compute the parent-diff for each commit to determine
which ones are TREESAME. We set the QUICK diff flag to avoid
looking at more entries than we need; we really just care
whether there are any changes at all.
But there is one case where we want to know a bit more: if
--remove-empty is set, we care about finding cases where the
change consists only of added entries (in which case we may
prune the parent in try_to_simplify_commit()). To cover that
case, our file_add_remove() callback does not quit the diff
upon seeing an added entry; it keeps looking for other types
of entries.
But this means when --remove-empty is not set (and it is not
by default), we compute more of the diff than is necessary.
You can see this in a pathological case where a commit adds
a very large number of entries, and we limit based on a
broad pathspec. E.g.:
perl -e '
chomp(my $blob = `git hash-object -w --stdin </dev/null`);
for my $a (1..1000) {
for my $b (1..1000) {
print "100644 $blob\t$a/$b\n";
}
}
' | git update-index --index-info
git commit -qm add
git rev-list HEAD -- .
This case takes about 100ms now, but after this patch only
needs 6ms. That's not a huge improvement, but it's easy to
get and it protects us against even more pathological cases
(e.g., going from 1 million to 10 million files would take
ten times as long with the current code, but not increase at
all after this patch).
This is reported to minorly speed-up pathspec limiting in
real world repositories (like the 100-million-file Windows
repository), but probably won't make a noticeable difference
outside of pathological setups.
This patch actually covers the case without --remove-empty,
and the case where we see only deletions. See the in-code
comment for details.
Note that we have to add a new member to the diff_options
struct so that our callback can see the value of
revs->remove_empty_trees. This callback parameter could be
passed to the "add_remove" and "change" callbacks, but
there's not much point. They already receive the
diff_options struct, and doing it this way avoids having to
update the function signature of the other callbacks
(arguably the format_callback and output_prefix functions
could benefit from the same simplification).
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]> |
Tfloat linear_atXY_p(const float fx, const float fy, const int z=0, const int c=0) const {
if (is_empty())
throw CImgInstanceException(_cimg_instance
"linear_atXY_p(): Empty instance.",
cimg_instance);
return _linear_atXY_p(fx,fy,z,c);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 73,962,874,262,486,290,000,000,000,000,000,000,000 | 8 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static unsigned long fli_read_long(FILE *f)
{
unsigned char b[4];
fread(&b,1,4,f);
return (unsigned long)(b[3]<<24) | (b[2]<<16) | (b[1]<<8) | b[0];
} | 0 | [
"CWE-787"
] | GIMP | edb251a7ef1602d20a5afcbf23f24afb163de63b | 52,315,749,696,843,860,000,000,000,000,000,000,000 | 6 | Bug 739133 - (CVE-2017-17785) Heap overflow while parsing FLI files.
It is possible to trigger a heap overflow while parsing FLI files. The
RLE decoder is vulnerable to out of boundary writes due to lack of
boundary checks.
The variable "framebuf" points to a memory area which was allocated
with fli_header->width * fli_header->height bytes. The RLE decoder
therefore must never write beyond that limit.
If an illegal frame is detected, the parser won't stop, which means
that the next valid sequence is properly parsed again. This should
allow GIMP to parse FLI files as good as possible even if they are
broken by an attacker or by accident.
While at it, I changed the variable xc to be of type size_t, because
the multiplication of width and height could overflow a 16 bit type.
Signed-off-by: Tobias Stoeckmann <[email protected]> |
virtual bool is_next_file_to_upload() {
return false;
} | 0 | [
"CWE-770"
] | ceph | ab29bed2fc9f961fe895de1086a8208e21ddaddc | 68,848,095,365,572,630,000,000,000,000,000,000,000 | 3 | rgw: fix issues with 'enforce bounds' patch
The patch to enforce bounds on max-keys/max-uploads/max-parts had a few
issues that would prevent us from compiling it. Instead of changing the
code provided by the submitter, we're addressing them in a separate
commit to maintain the DCO.
Signed-off-by: Joao Eduardo Luis <[email protected]>
Signed-off-by: Abhishek Lekshmanan <[email protected]>
(cherry picked from commit 29bc434a6a81a2e5c5b8cfc4c8d5c82ca5bf538a)
mimic specific fixes:
As the largeish change from master g_conf() isn't in mimic yet, use the g_conf
global structure, also make rgw_op use the value from req_info ceph context as
we do for all the requests |
DeepTiledInputFile::numLevels () const
{
if (levelMode() == RIPMAP_LEVELS)
THROW (IEX_NAMESPACE::LogicExc, "Error calling numLevels() on image "
"file \"" << fileName() << "\" "
"(numLevels() is not defined for files "
"with RIPMAP level mode).");
return _data->numXLevels;
} | 0 | [
"CWE-125"
] | openexr | e79d2296496a50826a15c667bf92bdc5a05518b4 | 267,823,336,831,691,700,000,000,000,000,000,000,000 | 10 | fix memory leaks and invalid memory accesses
Signed-off-by: Peter Hillman <[email protected]> |
void lftp_ssl_base::set_cert_error(const char *s)
{
bool verify=ResMgr::QueryBool("ssl:verify-certificate",hostname);
const char *const warn=verify?"ERROR":"WARNING";
Log::global->Format(0,"%s: Certificate verification: %s\n",warn,s);
if(verify && !error)
{
set_error("Certificate verification",s);
fatal=true;
cert_error=true;
}
} | 0 | [
"CWE-310"
] | lftp | 6357bed2583171b7515af6bb6585cf56d2117e3f | 287,999,831,442,904,500,000,000,000,000,000,000,000 | 12 | use hostmatch function from latest curl (addresses CVE-2014-0139) |
const uint8_t* decodeVarint(const uint8_t* pos, const uint8_t* end, uint32_t* out) {
uint32_t ret = 0;
int shift = 0;
while (pos < end && (*pos & 0x80)) {
ret |= (*pos & 0x7f) << shift;
shift += 7;
pos++;
}
if (pos < end) {
ret |= *pos << shift;
pos++;
}
*out = ret;
return pos;
} | 0 | [
"CWE-476"
] | envoy | 8788a3cf255b647fd14e6b5e2585abaaedb28153 | 213,889,875,998,904,670,000,000,000,000,000,000,000 | 15 | 1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]> |
int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
const char *name, unsigned int flags,
struct path *path)
{
struct path root = {.mnt = mnt, .dentry = dentry};
/* the first argument of filename_lookup() is ignored with root */
return filename_lookup(AT_FDCWD, getname_kernel(name),
flags , path, &root);
} | 0 | [
"CWE-284"
] | linux | 9409e22acdfc9153f88d9b1ed2bd2a5b34d2d3ca | 313,868,203,443,789,030,000,000,000,000,000,000,000 | 9 | vfs: rename: check backing inode being equal
If a file is renamed to a hardlink of itself POSIX specifies that rename(2)
should do nothing and return success.
This condition is checked in vfs_rename(). However it won't detect hard
links on overlayfs where these are given separate inodes on the overlayfs
layer.
Overlayfs itself detects this condition and returns success without doing
anything, but then vfs_rename() will proceed as if this was a successful
rename (detach_mounts(), d_move()).
The correct thing to do is to detect this condition before even calling
into overlayfs. This patch does this by calling vfs_select_inode() to get
the underlying inodes.
Signed-off-by: Miklos Szeredi <[email protected]>
Cc: <[email protected]> # v4.2+ |
BSONObj spec() {
return BSON("$and" << BSON_ARRAY(false));
} | 0 | [
"CWE-835"
] | mongo | 0a076417d1d7fba3632b73349a1fd29a83e68816 | 64,720,615,724,573,140,000,000,000,000,000,000,000 | 3 | SERVER-38070 fix infinite loop in agg expression |
onig_copy_encoding(OnigEncoding to, OnigEncoding from)
{
*to = *from;
} | 0 | [
"CWE-125"
] | oniguruma | 690313a061f7a4fa614ec5cc8368b4f2284e059b | 150,263,257,304,803,400,000,000,000,000,000,000,000 | 4 | fix #57 : DATA_ENSURE() check must be before data access |
set_num_712(unsigned char *p, char value)
{
*((char *)p) = value;
} | 0 | [
"CWE-190"
] | libarchive | 3014e19820ea53c15c90f9d447ca3e668a0b76c6 | 115,032,655,388,252,050,000,000,000,000,000,000,000 | 4 | Issue 711: Be more careful about verifying filename lengths when writing ISO9660 archives
* Don't cast size_t to int, since this can lead to overflow
on machines where sizeof(int) < sizeof(size_t)
* Check a + b > limit by writing it as
a > limit || b > limit || a + b > limit
to avoid problems when a + b wraps around. |
append_opt(char *s, const char *opt, const char *val)
{
if (!opt)
return s;
if (!s) {
if (!val)
return xstrdup(opt); /* opt */
return xstrconcat3(NULL, opt, val); /* opt=val */
}
if (!val)
return xstrconcat3(s, ",", opt); /* s,opt */
return xstrconcat4(s, ",", opt, val); /* s,opt=val */
} | 0 | [
"CWE-200"
] | util-linux | 0377ef91270d06592a0d4dd009c29e7b1ff9c9b8 | 1,756,430,711,982,231,000,000,000,000,000,000,000 | 15 | mount: (deprecated) drop --guess-fstype
The option is undocumented and unnecessary.
Signed-off-by: Karel Zak <[email protected]> |
double64_le_write (double in, unsigned char *out)
{ int exponent, mantissa ;
memset (out, 0, sizeof (double)) ;
if (fabs (in) < 1e-30)
return ;
if (in < 0.0)
{ in *= -1.0 ;
out [7] |= 0x80 ;
} ;
in = frexp (in, &exponent) ;
exponent += 1022 ;
out [7] |= (exponent >> 4) & 0x7F ;
out [6] |= (exponent << 4) & 0xF0 ;
in *= 0x20000000 ;
mantissa = lrint (floor (in)) ;
out [6] |= (mantissa >> 24) & 0xF ;
out [5] = (mantissa >> 16) & 0xFF ;
out [4] = (mantissa >> 8) & 0xFF ;
out [3] = mantissa & 0xFF ;
in = fmod (in, 1.0) ;
in *= 0x1000000 ;
mantissa = lrint (floor (in)) ;
out [2] = (mantissa >> 16) & 0xFF ;
out [1] = (mantissa >> 8) & 0xFF ;
out [0] = mantissa & 0xFF ;
return ;
} /* double64_le_write */ | 0 | [
"CWE-369"
] | libsndfile | 85c877d5072866aadbe8ed0c3e0590fbb5e16788 | 288,613,418,754,216,640,000,000,000,000,000,000,000 | 38 | double64_init: Check psf->sf.channels against upper bound
This prevents division by zero later in the code.
While the trivial case to catch this (i.e. sf.channels < 1) has already
been covered, a crafted file may report a number of channels that is
so high (i.e. > INT_MAX/sizeof(double)) that it "somehow" gets
miscalculated to zero (if this makes sense) in the determination of the
blockwidth. Since we only support a limited number of channels anyway,
make sure to check here as well.
CVE-2017-14634
Closes: https://github.com/erikd/libsndfile/issues/318
Signed-off-by: Erik de Castro Lopo <[email protected]> |
static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
{
if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
/* ATR uses the same filtering logic as SB rules. It only
* functions properly if the input set mask is at the default
* settings. It is safe to restore the default input set
* because there are no active TCPv4 filter rules.
*/
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
} | 0 | [
"CWE-400",
"CWE-401"
] | linux | 27d461333459d282ffa4a2bdb6b215a59d493a8f | 285,717,879,842,811,740,000,000,000,000,000,000,000 | 17 | i40e: prevent memory leak in i40e_setup_macvlans
In i40e_setup_macvlans if i40e_setup_channel fails the allocated memory
for ch should be released.
Signed-off-by: Navid Emamdoost <[email protected]>
Tested-by: Andrew Bowers <[email protected]>
Signed-off-by: Jeff Kirsher <[email protected]> |
xmlSchemaGetComponentQName(xmlChar **buf,
void *item)
{
return (xmlSchemaFormatQName(buf,
xmlSchemaGetComponentTargetNs((xmlSchemaBasicItemPtr) item),
xmlSchemaGetComponentName((xmlSchemaBasicItemPtr) item)));
} | 0 | [
"CWE-134"
] | libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 332,671,199,305,603,970,000,000,000,000,000,000,000 | 7 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
int cil_resolve_userprefix(struct cil_tree_node *current, void *extra_args)
{
struct cil_userprefix *userprefix = current->data;
struct cil_symtab_datum *user_datum = NULL;
struct cil_tree_node *user_node = NULL;
int rc = SEPOL_ERR;
rc = cil_resolve_name(current, userprefix->user_str, CIL_SYM_USERS, extra_args, &user_datum);
if (rc != SEPOL_OK) {
goto exit;
}
user_node = NODE(user_datum);
if (user_node->flavor != CIL_USER) {
cil_log(CIL_ERR, "Userprefix must be a user: %s\n", user_datum->fqn);
rc = SEPOL_ERR;
goto exit;
}
userprefix->user = (struct cil_user*)user_datum;
exit:
return rc;
} | 0 | [
"CWE-125"
] | selinux | 340f0eb7f3673e8aacaf0a96cbfcd4d12a405521 | 111,789,474,804,201,940,000,000,000,000,000,000,000 | 25 | libsepol/cil: Check for statements not allowed in optional blocks
While there are some checks for invalid statements in an optional
block when resolving the AST, there are no checks when building the
AST.
OSS-Fuzz found the following policy which caused a null dereference
in cil_tree_get_next_path().
(blockinherit b3)
(sid SID)
(sidorder(SID))
(optional o
(ibpkeycon :(1 0)s)
(block b3
(filecon""block())
(filecon""block())))
The problem is that the blockinherit copies block b3 before
the optional block is disabled. When the optional is disabled,
block b3 is deleted along with everything else in the optional.
Later, when filecon statements with the same path are found an
error message is produced and in trying to find out where the block
was copied from, the reference to the deleted block is used. The
error handling code assumes (rightly) that if something was copied
from a block then that block should still exist.
It is clear that in-statements, blocks, and macros cannot be in an
optional, because that allows nodes to be copied from the optional
block to somewhere outside even though the optional could be disabled
later. When optionals are disabled the AST is reset and the
resolution is restarted at the point of resolving macro calls, so
anything resolved before macro calls will never be re-resolved.
This includes tunableifs, in-statements, blockinherits,
blockabstracts, and macro definitions. Tunable declarations also
cannot be in an optional block because they are needed to resolve
tunableifs. It should be fine to allow blockinherit statements in
an optional, because that is copying nodes from outside the optional
to the optional and if the optional is later disabled, everything
will be deleted anyway.
Check and quit with an error if a tunable declaration, in-statement,
block, blockabstract, or macro definition is found within an
optional when either building or resolving the AST.
Signed-off-by: James Carter <[email protected]> |
**/
int window_x() const {
return _window_x; | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 183,609,786,632,527,600,000,000,000,000,000,000,000 | 3 | Fix other issues in 'CImg<T>::load_bmp()'. |
AP_DECLARE(int) ap_getline(char *s, int n, request_rec *r, int flags)
{
char *tmp_s = s;
apr_status_t rv;
apr_size_t len;
apr_bucket_brigade *tmp_bb;
if (n < 1) {
/* Can't work since we always NUL terminate */
return -1;
}
tmp_bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
rv = ap_rgetline(&tmp_s, n, &len, r, flags, tmp_bb);
apr_brigade_destroy(tmp_bb);
/* Map the out-of-space condition to the old API. */
if (rv == APR_ENOSPC) {
return n;
}
/* Anything else is just bad. */
if (rv != APR_SUCCESS) {
return -1;
}
return (int)len;
} | 0 | [] | httpd | ecebcc035ccd8d0e2984fe41420d9e944f456b3c | 194,230,737,327,922,240,000,000,000,000,000,000,000 | 28 | Merged r1734009,r1734231,r1734281,r1838055,r1838079,r1840229,r1876664,r1876674,r1876784,r1879078,r1881620,r1887311,r1888871 from trunk:
*) core: Split ap_create_request() from ap_read_request(). [Graham Leggett]
*) core, h2: common ap_parse_request_line() and ap_check_request_header()
code. [Yann Ylavic]
*) core: Add StrictHostCheck to allow unconfigured hostnames to be
rejected. [Eric Covener]
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1890245 13f79535-47bb-0310-9956-ffa450edef68 |
static int ethtool_get_sg(struct net_device *dev, char __user *useraddr)
{
struct ethtool_value edata = { ETHTOOL_GSG };
if (!dev->ethtool_ops->get_sg)
return -EOPNOTSUPP;
edata.data = dev->ethtool_ops->get_sg(dev);
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
return 0;
} | 0 | [] | linux | e89e9cf539a28df7d0eb1d0a545368e9920b34ac | 149,244,057,656,677,690,000,000,000,000,000,000,000 | 13 | [IPv4/IPv6]: UFO Scatter-gather approach
Attached is kernel patch for UDP Fragmentation Offload (UFO) feature.
1. This patch incorporate the review comments by Jeff Garzik.
2. Renamed USO as UFO (UDP Fragmentation Offload)
3. udp sendfile support with UFO
This patches uses scatter-gather feature of skb to generate large UDP
datagram. Below is a "how-to" on changes required in network device
driver to use the UFO interface.
UDP Fragmentation Offload (UFO) Interface:
-------------------------------------------
UFO is a feature wherein the Linux kernel network stack will offload the
IP fragmentation functionality of large UDP datagram to hardware. This
will reduce the overhead of stack in fragmenting the large UDP datagram to
MTU sized packets
1) Drivers indicate their capability of UFO using
dev->features |= NETIF_F_UFO | NETIF_F_HW_CSUM | NETIF_F_SG
NETIF_F_HW_CSUM is required for UFO over ipv6.
2) UFO packet will be submitted for transmission using driver xmit routine.
UFO packet will have a non-zero value for
"skb_shinfo(skb)->ufo_size"
skb_shinfo(skb)->ufo_size will indicate the length of data part in each IP
fragment going out of the adapter after IP fragmentation by hardware.
skb->data will contain MAC/IP/UDP header and skb_shinfo(skb)->frags[]
contains the data payload. The skb->ip_summed will be set to CHECKSUM_HW
indicating that hardware has to do checksum calculation. Hardware should
compute the UDP checksum of complete datagram and also ip header checksum of
each fragmented IP packet.
For IPV6 the UFO provides the fragment identification-id in
skb_shinfo(skb)->ip6_frag_id. The adapter should use this ID for generating
IPv6 fragments.
Signed-off-by: Ananda Raju <[email protected]>
Signed-off-by: Rusty Russell <[email protected]> (forwarded)
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]> |
static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
{
const unsigned char *nh = skb_network_header(skb);
if (nh[optoff + 1] == 2) {
IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
return true;
}
net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n",
nh[optoff + 1]);
kfree_skb(skb);
return false;
} | 0 | [
"CWE-416",
"CWE-284",
"CWE-264"
] | linux | 45f6fad84cc305103b28d73482b344d7f5b76f39 | 154,749,292,740,750,080,000,000,000,000,000,000,000 | 14 | ipv6: add complete rcu protection around np->opt
This patch addresses multiple problems :
UDP/RAW sendmsg() need to get a stable struct ipv6_txoptions
while socket is not locked : Other threads can change np->opt
concurrently. Dmitry posted a syzkaller
(http://github.com/google/syzkaller) program desmonstrating
use-after-free.
Starting with TCP/DCCP lockless listeners, tcp_v6_syn_recv_sock()
and dccp_v6_request_recv_sock() also need to use RCU protection
to dereference np->opt once (before calling ipv6_dup_options())
This patch adds full RCU protection to np->opt
Reported-by: Dmitry Vyukov <[email protected]>
Signed-off-by: Eric Dumazet <[email protected]>
Acked-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
srs_parse_shortcut(srs_t *srs, char *buf, unsigned buflen, char *senduser)
{
char *srshash;
char *srsstamp;
char *srshost;
char *srsuser;
int ret;
if (strncasecmp(senduser, SRS0TAG, 4) == 0) {
srshash = senduser + 5;
if (!STRINGP(srshash))
return SRS_ENOSRS0HASH;
srsstamp = strchr(srshash, SRSSEP);
if (!STRINGP(srsstamp))
return SRS_ENOSRS0STAMP;
*srsstamp++ = '\0';
srshost = strchr(srsstamp, SRSSEP);
if (!STRINGP(srshost))
return SRS_ENOSRS0HOST;
*srshost++ = '\0';
srsuser = strchr(srshost, SRSSEP);
if (!STRINGP(srsuser))
return SRS_ENOSRS0USER;
*srsuser++ = '\0';
ret = srs_timestamp_check(srs, srsstamp);
if (ret != SRS_SUCCESS)
return ret;
ret = srs_hash_check(srs, srshash, 3, srsstamp,
srshost, srsuser);
if (ret != SRS_SUCCESS)
return ret;
snprintf(buf, buflen, "%s@%s", srsuser, srshost);
return SRS_SUCCESS;
}
return SRS_ENOTSRSADDRESS;
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-834"
] | postsrsd | 4733fb11f6bec6524bb8518c5e1a699288c26bac | 85,081,223,926,989,970,000,000,000,000,000,000,000 | 37 | SECURITY: Fix potential denial of service attack against PostSRSd
I discovered that PostSRSd could be tricked into consuming a lot of CPU
time with an SRS address that has an excessively long time stamp tag,
e.g.
SRS0=HHHH=TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT=0@example.com |
num_stmts(const node *n)
{
int i, l;
node *ch;
switch (TYPE(n)) {
case single_input:
if (TYPE(CHILD(n, 0)) == NEWLINE)
return 0;
else
return num_stmts(CHILD(n, 0));
case file_input:
l = 0;
for (i = 0; i < NCH(n); i++) {
ch = CHILD(n, i);
if (TYPE(ch) == stmt)
l += num_stmts(ch);
}
return l;
case stmt:
return num_stmts(CHILD(n, 0));
case compound_stmt:
return 1;
case simple_stmt:
return NCH(n) / 2; /* Divide by 2 to remove count of semi-colons */
case suite:
if (NCH(n) == 1)
return num_stmts(CHILD(n, 0));
else {
l = 0;
for (i = 2; i < (NCH(n) - 1); i++)
l += num_stmts(CHILD(n, i));
return l;
}
default: {
char buf[128];
sprintf(buf, "Non-statement found: %d %d",
TYPE(n), NCH(n));
Py_FatalError(buf);
}
}
Py_UNREACHABLE();
} | 1 | [
"CWE-125"
] | cpython | dcfcd146f8e6fc5c2fc16a4c192a0c5f5ca8c53c | 79,279,365,935,195,720,000,000,000,000,000,000,000 | 44 | bpo-35766: Merge typed_ast back into CPython (GH-11645) |
static ssize_t description_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr, char *buf)
{
const struct mbochs_type *type =
&mbochs_types[mtype_get_type_group_id(mtype)];
return sprintf(buf, "virtual display, %d MB video memory\n",
type ? type->mbytes : 0);
} | 0 | [
"CWE-200",
"CWE-401"
] | linux | de5494af4815a4c9328536c72741229b7de88e7f | 318,294,626,200,601,660,000,000,000,000,000,000,000 | 9 | vfio/mbochs: Fix missing error unwind of mbochs_used_mbytes
Convert mbochs to use an atomic scheme for this like mtty was changed
into. The atomic fixes various race conditions with probing. Add the
missing error unwind. Also add the missing kfree of mdev_state->pages.
Fixes: 681c1615f891 ("vfio/mbochs: Convert to use vfio_register_group_dev()")
Reported-by: Cornelia Huck <[email protected]>
Co-developed-by: Alex Williamson <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
Reviewed-by: Cornelia Huck <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Alex Williamson <[email protected]> |
pch_line_len (lin line)
{
return p_len[line];
} | 0 | [
"CWE-59"
] | patch | 44a987e02f04b9d81a0db4a611145cad1093a2d3 | 304,279,172,750,217,730,000,000,000,000,000,000,000 | 4 | Add line number overflow checking
* bootstrap.conf: use intprops module.
* src/common.h: Define LINENUM_MIN and LINENUM_MAX macros.
* src/pch.c (another_hunk): Add line number overflow checking. Based on Robert
C. Seacord's INT32-C document for integer overflow checking and Tobias
Stoeckmann's "integer overflows and oob memory access" patch for FreeBSD. |
void responseAction(StreamState& state,
const test::common::http::ResponseAction& response_action) {
const bool end_stream = response_action.end_stream();
switch (response_action.response_action_selector_case()) {
case test::common::http::ResponseAction::kContinueHeaders: {
if (state == StreamState::PendingHeaders) {
auto headers = std::make_unique<TestResponseHeaderMapImpl>(
Fuzz::fromHeaders<TestResponseHeaderMapImpl>(response_action.continue_headers()));
headers->setReferenceKey(Headers::get().Status, "100");
decoder_filter_->callbacks_->encode100ContinueHeaders(std::move(headers));
// We don't allow multiple 100-continue headers in HCM, UpstreamRequest is responsible
// for coalescing.
state = StreamState::PendingNonInformationalHeaders;
}
break;
}
case test::common::http::ResponseAction::kHeaders: {
if (state == StreamState::PendingHeaders ||
state == StreamState::PendingNonInformationalHeaders) {
auto headers = std::make_unique<TestResponseHeaderMapImpl>(
Fuzz::fromHeaders<TestResponseHeaderMapImpl>(response_action.headers()));
// The client codec will ensure we always have a valid :status.
// Similarly, local replies should always contain this.
uint64_t status;
try {
status = Utility::getResponseStatus(*headers);
} catch (const CodecClientException&) {
headers->setReferenceKey(Headers::get().Status, "200");
}
// The only 1xx header that may be provided to encodeHeaders() is a 101 upgrade,
// guaranteed by the codec parsers. See include/envoy/http/filter.h.
if (CodeUtility::is1xx(status) && status != enumToInt(Http::Code::SwitchingProtocols)) {
headers->setReferenceKey(Headers::get().Status, "200");
}
decoder_filter_->callbacks_->encodeHeaders(std::move(headers), end_stream, "details");
state = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers;
}
break;
}
case test::common::http::ResponseAction::kData: {
if (state == StreamState::PendingDataOrTrailers) {
Buffer::OwnedImpl buf(std::string(response_action.data() % (1024 * 1024), 'a'));
decoder_filter_->callbacks_->encodeData(buf, end_stream);
state = end_stream ? StreamState::Closed : StreamState::PendingDataOrTrailers;
}
break;
}
case test::common::http::ResponseAction::kTrailers: {
if (state == StreamState::PendingDataOrTrailers) {
decoder_filter_->callbacks_->encodeTrailers(std::make_unique<TestResponseTrailerMapImpl>(
Fuzz::fromHeaders<TestResponseTrailerMapImpl>(response_action.trailers())));
state = StreamState::Closed;
}
break;
}
default:
// Maybe nothing is set?
break;
}
} | 0 | [
"CWE-22"
] | envoy | 5333b928d8bcffa26ab19bf018369a835f697585 | 225,834,781,086,448,530,000,000,000,000,000,000,000 | 60 | Implement handling of escaped slash characters in URL path
Fixes: CVE-2021-29492
Signed-off-by: Yan Avlasov <[email protected]> |
static void intel_hda_set_state_sts(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old)
{
intel_hda_update_irq(d);
} | 0 | [
"CWE-787"
] | qemu | 79fa99831debc9782087e834382c577215f2f511 | 115,047,933,504,602,860,000,000,000,000,000,000,000 | 4 | hw/audio/intel-hda: Restrict DMA engine to memories (not MMIO devices)
Issue #542 reports a reentrancy problem when the DMA engine accesses
the HDA controller I/O registers. Fix by restricting the DMA engine
to memories regions (forbidding MMIO devices such the HDA controller).
Reported-by: OSS-Fuzz (Issue 28435)
Reported-by: Alexander Bulekov <[email protected]>
Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
Reviewed-by: Thomas Huth <[email protected]>
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/542
CVE: CVE-2021-3611
Message-Id: <[email protected]>
Signed-off-by: Thomas Huth <[email protected]> |
DEFUN(ldDL, DOWNLOAD_LIST, "Display downloads panel")
{
Buffer *buf;
int replace = FALSE, new_tab = FALSE;
#ifdef USE_ALARM
int reload;
#endif
if (Currentbuf->bufferprop & BP_INTERNAL &&
!strcmp(Currentbuf->buffername, DOWNLOAD_LIST_TITLE))
replace = TRUE;
if (!FirstDL) {
if (replace) {
if (Currentbuf == Firstbuf && Currentbuf->nextBuffer == NULL) {
if (nTab > 1)
deleteTab(CurrentTab);
}
else
delBuffer(Currentbuf);
displayBuffer(Currentbuf, B_FORCE_REDRAW);
}
return;
}
#ifdef USE_ALARM
reload = checkDownloadList();
#endif
buf = DownloadListBuffer();
if (!buf) {
displayBuffer(Currentbuf, B_NORMAL);
return;
}
buf->bufferprop |= (BP_INTERNAL | BP_NO_URL);
if (replace) {
COPY_BUFROOT(buf, Currentbuf);
restorePosition(buf, Currentbuf);
}
if (!replace && open_tab_dl_list) {
_newT();
new_tab = TRUE;
}
pushBuffer(buf);
if (replace || new_tab)
deletePrevBuf();
#ifdef USE_ALARM
if (reload)
Currentbuf->event = setAlarmEvent(Currentbuf->event, 1, AL_IMPLICIT,
FUNCNAME_reload, NULL);
#endif
displayBuffer(Currentbuf, B_FORCE_REDRAW);
} | 0 | [
"CWE-59",
"CWE-241"
] | w3m | 18dcbadf2771cdb0c18509b14e4e73505b242753 | 141,493,665,809,358,680,000,000,000,000,000,000,000 | 50 | Make temporary directory safely when ~/.w3m is unwritable |
PHP_METHOD(Phar, unlinkArchive)
{
char *fname, *error, *zname, *arch, *entry;
size_t fname_len;
int zname_len, arch_len, entry_len;
phar_archive_data *phar;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "p", &fname, &fname_len) == FAILURE) {
RETURN_FALSE;
}
if (ZEND_SIZE_T_INT_OVFL(fname_len)) {
RETURN_FALSE;
}
if (!fname_len) {
zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"\"");
return;
}
if (FAILURE == phar_open_from_filename(fname, (int)fname_len, NULL, 0, REPORT_ERRORS, &phar, &error)) {
if (error) {
zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"%s\": %s", fname, error);
efree(error);
} else {
zend_throw_exception_ex(phar_ce_PharException, 0, "Unknown phar archive \"%s\"", fname);
}
return;
}
zname = (char*)zend_get_executed_filename();
zname_len = (int)strlen(zname);
if (zname_len > 7 && !memcmp(zname, "phar://", 7) && SUCCESS == phar_split_fname(zname, zname_len, &arch, &arch_len, &entry, &entry_len, 2, 0)) {
if ((size_t)arch_len == fname_len && !memcmp(arch, fname, arch_len)) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" cannot be unlinked from within itself", fname);
efree(arch);
efree(entry);
return;
}
efree(arch);
efree(entry);
}
if (phar->is_persistent) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" is in phar.cache_list, cannot unlinkArchive()", fname);
return;
}
if (phar->refcount) {
zend_throw_exception_ex(phar_ce_PharException, 0, "phar archive \"%s\" has open file handles or objects. fclose() all file handles, and unset() all objects prior to calling unlinkArchive()", fname);
return;
}
fname = estrndup(phar->fname, phar->fname_len);
/* invalidate phar cache */
PHAR_G(last_phar) = NULL;
PHAR_G(last_phar_name) = PHAR_G(last_alias) = NULL;
phar_archive_delref(phar);
unlink(fname);
efree(fname);
RETURN_TRUE;
} | 0 | [
"CWE-281"
] | php-src | e5c95234d87fcb8f6b7569a96a89d1e1544749a6 | 304,928,975,150,475,500,000,000,000,000,000,000,000 | 64 | Fix bug #79082 - Files added to tar with Phar::buildFromIterator have all-access permissions |
receive_check_fs(int msg_size)
{
int space, inodes;
if (check_spool_space > 0 || msg_size > 0 || check_spool_inodes > 0)
{
space = receive_statvfs(TRUE, &inodes);
DEBUG(D_receive)
debug_printf("spool directory space = %dK inodes = %d "
"check_space = %dK inodes = %d msg_size = %d\n",
space, inodes, check_spool_space, check_spool_inodes, msg_size);
if ((space >= 0 && space < check_spool_space) ||
(inodes >= 0 && inodes < check_spool_inodes))
{
log_write(0, LOG_MAIN, "spool directory space check failed: space=%d "
"inodes=%d", space, inodes);
return FALSE;
}
}
if (check_log_space > 0 || check_log_inodes > 0)
{
space = receive_statvfs(FALSE, &inodes);
DEBUG(D_receive)
debug_printf("log directory space = %dK inodes = %d "
"check_space = %dK inodes = %d\n",
space, inodes, check_log_space, check_log_inodes);
if ((space >= 0 && space < check_log_space) ||
(inodes >= 0 && inodes < check_log_inodes))
{
log_write(0, LOG_MAIN, "log directory space check failed: space=%d "
"inodes=%d", space, inodes);
return FALSE;
}
}
return TRUE;
} | 0 | [
"CWE-416"
] | exim | 4e6ae6235c68de243b1c2419027472d7659aa2b4 | 281,366,162,610,758,980,000,000,000,000,000,000,000 | 42 | Avoid release of store if there have been later allocations. Bug 2199 |
vte_sequence_handler_ke (VteTerminal *terminal, GValueArray *params)
{
terminal->pvt->keypad_mode = VTE_KEYMODE_NORMAL;
} | 0 | [] | vte | 58bc3a942f198a1a8788553ca72c19d7c1702b74 | 17,064,873,316,150,595,000,000,000,000,000,000,000 | 4 | fix bug #548272
svn path=/trunk/; revision=2365 |
static int key_notify_policy_flush(const struct km_event *c)
{
struct sk_buff *skb_out;
struct sadb_msg *hdr;
skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
if (!skb_out)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
hdr->sadb_msg_type = SADB_X_SPDFLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
} | 1 | [
"CWE-119",
"CWE-787"
] | linux | a5cc68f3d63306d0d288f31edfc2ae6ef8ecd887 | 67,521,643,653,864,070,000,000,000,000,000,000,000 | 20 | af_key: fix info leaks in notify messages
key_notify_sa_flush() and key_notify_policy_flush() miss to initialize
the sadb_msg_reserved member of the broadcasted message and thereby
leak 2 bytes of heap memory to listeners. Fix that.
Signed-off-by: Mathias Krause <[email protected]>
Cc: Steffen Klassert <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);
kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
return ret;
} | 0 | [
"CWE-703"
] | linux | c4e490cf148e85ead0d1b1c2caaba833f1d5b29f | 280,068,590,080,228,900,000,000,000,000,000,000,000 | 10 | mm/slab.c: fix SLAB freelist randomization duplicate entries
This patch fixes a bug in the freelist randomization code. When a high
random number is used, the freelist will contain duplicate entries. It
will result in different allocations sharing the same chunk.
It will result in odd behaviours and crashes. It should be uncommon but
it depends on the machines. We saw it happening more often on some
machines (every few hours of running tests).
Fixes: c7ce4f60ac19 ("mm: SLAB freelist randomization")
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: John Sperbeck <[email protected]>
Signed-off-by: Thomas Garnier <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
bool build_huffcodes( unsigned char *clen, uint32_t clenlen, unsigned char *cval, uint32_t cvallen, huffCodes *hc, huffTree *ht )
{
int nextfree;
int code;
int node;
int i, j, k;
// fill with zeroes
memset( hc->clen, 0, 256 * sizeof( short ) );
memset( hc->cval, 0, 256 * sizeof( short ) );
memset( ht->l, 0, 256 * sizeof( short ) );
memset( ht->r, 0, 256 * sizeof( short ) );
// 1st part -> build huffman codes
// creating huffman-codes
k = 0;
code = 0;
// symbol-value of code is its position in the table
for( i = 0; i < 16; i++ ) {
uint32_t clen_index = i & 0xff;
for( j = 0; j < (int) (clen_index < clenlen ? clen[clen_index] : 0); j++ ) {
uint32_t cval_index = k&0xff;
uint8_t cval_val= cval_index < cvallen ? cval[cval_index] : 0;
hc->clen[ (int) cval_val&0xff] = 1 + i;
hc->cval[ (int) cval_val&0xff] = code;
k++;
code++;
}
code = code << 1;
}
// find out eobrun max value
hc->max_eobrun = 0;
for ( i = 14; i >= 0; i-- ) {
if ( hc->clen[(i << 4) & 255] > 0 ) {
hc->max_eobrun = ( 2 << i ) - 1;
break;
}
}
// 2nd -> part use codes to build the coding tree
// initial value for next free place
nextfree = 1;
const char * huffman_no_space = "Huffman table out of space\n";
// work through every code creating links between the nodes (represented through ints)
for ( i = 0; i < 256; i++ ) {
// (re)set current node
node = 0;
// go through each code & store path
for ( j = hc->clen[i] - 1; j > 0; j-- ) {
if (node <= 0xff) {
if ( BITN( hc->cval[i], j ) == 1 ) {
if ( ht->r[node] == 0 ) {
ht->r[node] = nextfree++;
}
node = ht->r[node];
}
else {
if ( ht->l[node] == 0 ) {
ht->l[node] = nextfree++;
}
node = ht->l[node];
}
} else {
while(write(2, huffman_no_space, strlen(huffman_no_space)) == -1 && errno == EINTR) {}
if (filetype == JPEG) {
return false;
}
}
}
if (node <= 0xff) {
// last link is number of targetvalue + 256
if ( hc->clen[i] > 0 ) {
if ( BITN( hc->cval[i], 0 ) == 1 ) {
ht->r[node] = i + 256;
} else {
ht->l[node] = i + 256;
}
}
} else {
while(write(2, huffman_no_space, strlen(huffman_no_space)) == -1 && errno == EINTR) {}
if (filetype == JPEG) {
return false; // we accept any .lep file that was encoded this way
}
}
}
return true;
} | 0 | [
"CWE-399",
"CWE-190"
] | lepton | 6a5ceefac1162783fffd9506a3de39c85c725761 | 28,059,520,828,356,880,000,000,000,000,000,000,000 | 93 | fix #111 |
__fastcall TSaveSessionDialog::TSaveSessionDialog(TComponent* /*AOwner*/) :
TCustomDialog(HELP_SESSION_SAVE)
{
}
| 0 | [
"CWE-787"
] | winscp | faa96e8144e6925a380f94a97aa382c9427f688d | 11,193,945,879,152,044,000,000,000,000,000,000,000 | 4 | Bug 1943: Prevent loading session settings that can lead to remote code execution from handled URLs
https://winscp.net/tracker/1943
(cherry picked from commit ec584f5189a856cd79509f754722a6898045c5e0)
Source commit: 0f4be408b3f01132b00682da72d925d6c4ee649b |
tsize_t t2p_write_pdf_string(char* pdfstr, TIFF* output)
{
tsize_t written = 0;
uint32 i = 0;
char buffer[64];
size_t len = 0;
len = strlen(pdfstr);
written += t2pWriteFile(output, (tdata_t) "(", 1);
for (i=0; i<len; i++) {
if((pdfstr[i]&0x80) || (pdfstr[i]==127) || (pdfstr[i]<32)){
snprintf(buffer, sizeof(buffer), "\\%.3o", ((unsigned char)pdfstr[i]));
written += t2pWriteFile(output, (tdata_t)buffer, 4);
} else {
switch (pdfstr[i]){
case 0x08:
written += t2pWriteFile(output, (tdata_t) "\\b", 2);
break;
case 0x09:
written += t2pWriteFile(output, (tdata_t) "\\t", 2);
break;
case 0x0A:
written += t2pWriteFile(output, (tdata_t) "\\n", 2);
break;
case 0x0C:
written += t2pWriteFile(output, (tdata_t) "\\f", 2);
break;
case 0x0D:
written += t2pWriteFile(output, (tdata_t) "\\r", 2);
break;
case 0x28:
written += t2pWriteFile(output, (tdata_t) "\\(", 2);
break;
case 0x29:
written += t2pWriteFile(output, (tdata_t) "\\)", 2);
break;
case 0x5C:
written += t2pWriteFile(output, (tdata_t) "\\\\", 2);
break;
default:
written += t2pWriteFile(output, (tdata_t) &pdfstr[i], 1);
}
}
}
written += t2pWriteFile(output, (tdata_t) ") ", 1);
return(written);
} | 0 | [
"CWE-119"
] | libtiff | b5d6803f0898e931cf772d3d0755704ab8488e63 | 21,737,954,374,050,646,000,000,000,000,000,000,000 | 48 | * tools/tiff2pdf.c: fix write buffer overflow of 2 bytes on JPEG
compressed images. Reported by Tyler Bohan of Cisco Talos as
TALOS-CAN-0187 / CVE-2016-5652.
Also prevents writing 2 extra uninitialized bytes to the file stream. |
static Param parse(Buf handshakeMsg, Buf original) {
auto msg = decode<T>(std::move(handshakeMsg));
msg.originalEncoding = std::move(original);
return std::move(msg);
} | 0 | [
"CWE-400",
"CWE-703",
"CWE-770"
] | fizz | 3eaddb33619eaaf74a760872850c550ad8f5c52f | 238,477,068,436,613,080,000,000,000,000,000,000,000 | 5 | Coalesce handshake buffers
Summary:
It is possible that a peer might send us records in a manner such
that there is a 16KB record and only 1 byte of handshake message in
each record. Since we normally just trim the IOBuf, we would end up
holding 16K of data per actual byte of data. To prevent this we allocate a contiguous
buffer to copy over these bytes for handshake messages for now.
This is a partial fix for CVE-2019-11924
Reviewed By: ngoyal
Differential Revision: D16478044
fbshipit-source-id: 464bc68eaefda065d9a327818100427377293fbd |
sql_real_connect(char *host,char *database,char *user,char *password,
uint silent)
{
if (connected)
{
connected= 0;
mysql_close(&mysql);
}
mysql_init(&mysql);
if (opt_init_command)
mysql_options(&mysql, MYSQL_INIT_COMMAND, opt_init_command);
if (opt_connect_timeout)
{
uint timeout=opt_connect_timeout;
mysql_options(&mysql,MYSQL_OPT_CONNECT_TIMEOUT,
(char*) &timeout);
}
if (opt_compress)
mysql_options(&mysql,MYSQL_OPT_COMPRESS,NullS);
if (opt_secure_auth)
mysql_options(&mysql, MYSQL_SECURE_AUTH, (char *) &opt_secure_auth);
if (using_opt_local_infile)
mysql_options(&mysql,MYSQL_OPT_LOCAL_INFILE, (char*) &opt_local_infile);
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
if (opt_use_ssl)
mysql_ssl_set(&mysql, opt_ssl_key, opt_ssl_cert, opt_ssl_ca,
opt_ssl_capath, opt_ssl_cipher);
mysql_options(&mysql,MYSQL_OPT_SSL_VERIFY_SERVER_CERT,
(char*)&opt_ssl_verify_server_cert);
#endif
if (opt_protocol)
mysql_options(&mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
#ifdef HAVE_SMEM
if (shared_memory_base_name)
mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
#endif
if (safe_updates)
{
char init_command[100];
sprintf(init_command,
"SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=%lu,MAX_JOIN_SIZE=%lu",
select_limit,max_join_size);
mysql_options(&mysql, MYSQL_INIT_COMMAND, init_command);
}
mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset);
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(&mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir);
if (opt_default_auth && *opt_default_auth)
mysql_options(&mysql, MYSQL_DEFAULT_AUTH, opt_default_auth);
if (!mysql_real_connect(&mysql, host, user, password,
database, opt_mysql_port, opt_mysql_unix_port,
connect_flag | CLIENT_MULTI_STATEMENTS))
{
if (!silent ||
(mysql_errno(&mysql) != CR_CONN_HOST_ERROR &&
mysql_errno(&mysql) != CR_CONNECTION_ERROR))
{
(void) put_error(&mysql);
(void) fflush(stdout);
return ignore_errors ? -1 : 1; // Abort
}
return -1; // Retryable
}
charset_info= mysql.charset;
connected=1;
#ifndef EMBEDDED_LIBRARY
mysql.reconnect= debug_info_flag; // We want to know if this happens
/*
CLIENT_PROGRESS is set only if we requsted it in mysql_real_connect()
and the server also supports it
*/
if (mysql.client_flag & CLIENT_PROGRESS)
mysql_options(&mysql, MYSQL_PROGRESS_CALLBACK, (void*) report_progress);
#else
mysql.reconnect= 1;
#endif
#ifdef HAVE_READLINE
build_completion_hash(opt_rehash, 1);
#endif
return 0;
} | 0 | [] | server | 383007c75d6ef5043fa5781956a6a02b24e2b79e | 339,290,687,733,768,720,000,000,000,000,000,000,000 | 88 | mysql cli: fix USE command quoting
* use proper sql quoting rules for USE, while preserving
as much of historical behavior as possible
* short commands (\u) behave as before |
static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_keypress_notify *ev = (void *) skb->data;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
return;
switch (ev->type) {
case HCI_KEYPRESS_STARTED:
conn->passkey_entered = 0;
return;
case HCI_KEYPRESS_ENTERED:
conn->passkey_entered++;
break;
case HCI_KEYPRESS_ERASED:
conn->passkey_entered--;
break;
case HCI_KEYPRESS_CLEARED:
conn->passkey_entered = 0;
break;
case HCI_KEYPRESS_COMPLETED:
return;
}
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
conn->dst_type, conn->passkey_notify,
conn->passkey_entered);
} | 0 | [
"CWE-290"
] | linux | 3ca44c16b0dcc764b641ee4ac226909f5c421aa3 | 168,478,325,647,714,800,000,000,000,000,000,000,000 | 37 | Bluetooth: Consolidate encryption handling in hci_encrypt_cfm
This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection
state is BT_CONFIG so callers don't have to check the state.
Signed-off-by: Luiz Augusto von Dentz <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]> |
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
if (sd->current_vmcb != svm->vmcb) {
sd->current_vmcb = svm->vmcb;
indirect_branch_prediction_barrier();
}
if (kvm_vcpu_apicv_active(vcpu))
__avic_vcpu_load(vcpu, cpu);
} | 0 | [
"CWE-703"
] | linux | 6cd88243c7e03845a450795e134b488fc2afb736 | 249,070,661,730,025,950,000,000,000,000,000,000,000 | 12 | KVM: x86: do not report a vCPU as preempted outside instruction boundaries
If a vCPU is outside guest mode and is scheduled out, it might be in the
process of making a memory access. A problem occurs if another vCPU uses
the PV TLB flush feature during the period when the vCPU is scheduled
out, and a virtual address has already been translated but has not yet
been accessed, because this is equivalent to using a stale TLB entry.
To avoid this, only report a vCPU as preempted if sure that the guest
is at an instruction boundary. A rescheduling request will be delivered
to the host physical CPU as an external interrupt, so for simplicity
consider any vmexit *not* instruction boundary except for external
interrupts.
It would in principle be okay to report the vCPU as preempted also
if it is sleeping in kvm_vcpu_block(): a TLB flush IPI will incur the
vmentry/vmexit overhead unnecessarily, and optimistic spinning is
also unlikely to succeed. However, leave it for later because right
now kvm_vcpu_check_block() is doing memory accesses. Even
though the TLB flush issue only applies to virtual memory address,
it's very much preferrable to be conservative.
Reported-by: Jann Horn <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]> |
Subsets and Splits