func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequencelengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static PyObject *Adapter_start_response(AdapterObject *self, PyObject *args)
{
PyObject *result = NULL;
PyObject *status_line = NULL;
PyObject *headers = NULL;
PyObject *exc_info = Py_None;
PyObject *status_line_as_bytes = NULL;
PyObject *headers_as_bytes = NULL;
if (!self->r) {
PyErr_SetString(PyExc_RuntimeError, "request object has expired");
return NULL;
}
if (!PyArg_ParseTuple(args, "OO!|O:start_response",
&status_line, &PyList_Type, &headers, &exc_info)) {
return NULL;
}
if (exc_info != Py_None && !PyTuple_Check(exc_info)) {
PyErr_SetString(PyExc_RuntimeError, "exception info must be a tuple");
return NULL;
}
if (exc_info != Py_None) {
if (self->status_line && !self->headers) {
PyObject *type = NULL;
PyObject *value = NULL;
PyObject *traceback = NULL;
if (!PyArg_ParseTuple(exc_info, "OOO", &type,
&value, &traceback)) {
return NULL;
}
Py_INCREF(type);
Py_INCREF(value);
Py_INCREF(traceback);
PyErr_Restore(type, value, traceback);
return NULL;
}
}
else if (self->status_line && !self->headers) {
PyErr_SetString(PyExc_RuntimeError, "headers have already been sent");
return NULL;
}
status_line_as_bytes = wsgi_convert_status_line_to_bytes(status_line);
if (!status_line_as_bytes)
goto finally;
headers_as_bytes = wsgi_convert_headers_to_bytes(headers);
if (!headers_as_bytes)
goto finally;
self->status_line = apr_pstrdup(self->r->pool, PyString_AsString(
status_line_as_bytes));
self->status = (int)strtol(self->status_line, NULL, 10);
Py_XDECREF(self->headers);
self->headers = headers_as_bytes;
Py_INCREF(headers_as_bytes);
result = PyObject_GetAttrString((PyObject *)self, "write");
finally:
Py_XDECREF(status_line_as_bytes);
Py_XDECREF(headers_as_bytes);
return result;
} | 0 | [
"CWE-254"
] | mod_wsgi | 545354a80b9cc20d8b6916ca30542eab36c3b8bd | 3,061,132,230,730,296,500,000,000,000,000,000,000 | 77 | When there is any sort of error in setting up daemon process group, kill the process rather than risk running in an unexpected state. |
char *X509_TRUST_get0_name(X509_TRUST *xp)
{
return xp->name;
} | 0 | [] | openssl | d65b8b2162f33ac0d53dace588a0847ed827626c | 267,087,499,524,007,400,000,000,000,000,000,000,000 | 4 | Backport OCSP fixes. |
static void xfm_error(struct tree *xfm, const char *msg) {
char *v = strdup(msg);
char *l = strdup("error");
if (l == NULL || v == NULL)
return;
tree_append(xfm, l, v);
} | 0 | [] | augeas | 051c73a9a7ffe9e525f6f0a1b8f5198ff8cc6752 | 294,860,764,382,698,680,000,000,000,000,000,000,000 | 8 | Fix regression in permissions of created files
Commit 16387744 changed temporary file creation to use mkstemp, resulting in
new files being created with 0600 permissions. For brand new files created
through Augeas, their permissions stayed at 0600 rather than being set by the
umask as before.
* src/transform.c (transform_save): chmod after creating new files to
permissions implied by the umask |
uint64 EbmlElementSize(uint64 type, uint64 value) {
return EbmlElementSize(type, value, 0);
} | 0 | [
"CWE-20"
] | libvpx | 34d54b04e98dd0bac32e9aab0fbda0bf501bc742 | 146,275,359,372,499,550,000,000,000,000,000,000,000 | 3 | update libwebm to libwebm-1.0.0.27-358-gdbf1d10
changelog:
https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10
Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3 |
bash_event_hook ()
{
/* If we're going to longjmp to top_level, make sure we clean up readline.
check_signals will call QUIT, which will eventually longjmp to top_level,
calling run_interrupt_trap along the way. The check for sigalrm_seen is
to clean up the read builtin's state. */
if (terminating_signal || interrupt_state || sigalrm_seen)
rl_cleanup_after_signal ();
bashline_reset_event_hook ();
check_signals_and_traps (); /* XXX */
return 0;
} | 0 | [
"CWE-20"
] | bash | 4f747edc625815f449048579f6e65869914dd715 | 160,879,460,893,384,000,000,000,000,000,000,000,000 | 12 | Bash-4.4 patch 7 |
iperf_get_test_get_server_output(struct iperf_test *ipt)
{
return ipt->get_server_output;
} | 0 | [
"CWE-120",
"CWE-119",
"CWE-787"
] | iperf | 91f2fa59e8ed80dfbf400add0164ee0e508e412a | 109,360,347,223,960,040,000,000,000,000,000,000,000 | 4 | Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <[email protected]> |
static Image *ReadJP2Image(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
Image
*image;
int
jp2_status;
MagickBooleanType
status;
opj_codec_t
*jp2_codec;
opj_dparameters_t
parameters;
opj_image_t
*jp2_image;
opj_stream_t
*jp2_stream;
register ssize_t
i;
ssize_t
y;
unsigned char
sans[4];
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize JP2 codec.
*/
if (ReadBlob(image,4,sans) != 4)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) SeekBlob(image,SEEK_SET,0);
if (LocaleCompare(image_info->magick,"JPT") == 0)
jp2_codec=opj_create_decompress(OPJ_CODEC_JPT);
else
if (IsJ2K(sans,4) != MagickFalse)
jp2_codec=opj_create_decompress(OPJ_CODEC_J2K);
else
jp2_codec=opj_create_decompress(OPJ_CODEC_JP2);
opj_set_warning_handler(jp2_codec,JP2WarningHandler,exception);
opj_set_error_handler(jp2_codec,JP2ErrorHandler,exception);
opj_set_default_decoder_parameters(¶meters);
option=GetImageOption(image_info,"jp2:reduce-factor");
if (option != (const char *) NULL)
parameters.cp_reduce=StringToInteger(option);
option=GetImageOption(image_info,"jp2:quality-layers");
if (option != (const char *) NULL)
parameters.cp_layer=StringToInteger(option);
if (opj_setup_decoder(jp2_codec,¶meters) == 0)
{
opj_destroy_codec(jp2_codec);
ThrowReaderException(DelegateError,"UnableToManageJP2Stream");
}
jp2_stream=opj_stream_create(OPJ_J2K_STREAM_CHUNK_SIZE,1);
opj_stream_set_read_function(jp2_stream,JP2ReadHandler);
opj_stream_set_write_function(jp2_stream,JP2WriteHandler);
opj_stream_set_seek_function(jp2_stream,JP2SeekHandler);
opj_stream_set_skip_function(jp2_stream,JP2SkipHandler);
opj_stream_set_user_data(jp2_stream,image,NULL);
opj_stream_set_user_data_length(jp2_stream,GetBlobSize(image));
if (opj_read_header(jp2_stream,jp2_codec,&jp2_image) == 0)
{
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
ThrowReaderException(DelegateError,"UnableToDecodeImageFile");
}
jp2_status=OPJ_TRUE;
if (image->ping == MagickFalse)
{
if ((image->columns != 0) && (image->rows != 0))
/*
Extract an area from the image.
*/
jp2_status=opj_set_decode_area(jp2_codec,jp2_image,
(OPJ_INT32) image->extract_info.x,(OPJ_INT32) image->extract_info.y,
(OPJ_INT32) (image->extract_info.x+(ssize_t) image->columns),
(OPJ_INT32) (image->extract_info.y+(ssize_t) image->rows));
else
jp2_status=opj_set_decode_area(jp2_codec,jp2_image,0,0,
jp2_image->comps[0].w,jp2_image->comps[0].h);
if (jp2_status == OPJ_FALSE)
{
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
ThrowReaderException(DelegateError,"UnableToDecodeImageFile");
}
}
if ((AcquireMagickResource(WidthResource,(size_t) jp2_image->comps[0].w) == MagickFalse) ||
(AcquireMagickResource(HeightResource,(size_t) jp2_image->comps[0].h) == MagickFalse))
{
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
ThrowReaderException(DelegateError,"UnableToDecodeImageFile");
}
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
jp2_status=opj_get_decoded_tile(jp2_codec,jp2_stream,jp2_image,
(unsigned int) image_info->scene-1);
else
if (image->ping == MagickFalse)
{
jp2_status=opj_decode(jp2_codec,jp2_stream,jp2_image);
if (jp2_status != OPJ_FALSE)
jp2_status=opj_end_decompress(jp2_codec,jp2_stream);
}
if (jp2_status == OPJ_FALSE)
{
opj_stream_destroy(jp2_stream);
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
ThrowReaderException(DelegateError,"UnableToDecodeImageFile");
}
opj_stream_destroy(jp2_stream);
for (i=0; i < (ssize_t) jp2_image->numcomps; i++)
{
if ((jp2_image->comps[0].dx == 0) || (jp2_image->comps[0].dy == 0) ||
(jp2_image->comps[0].prec != jp2_image->comps[i].prec) ||
(jp2_image->comps[0].sgnd != jp2_image->comps[i].sgnd) ||
((image->ping == MagickFalse) && (jp2_image->comps[i].data == NULL)))
{
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
ThrowReaderException(CoderError,"IrregularChannelGeometryNotSupported")
}
}
/*
Convert JP2 image.
*/
image->columns=(size_t) jp2_image->comps[0].w;
image->rows=(size_t) jp2_image->comps[0].h;
image->depth=jp2_image->comps[0].prec;
image->compression=JPEG2000Compression;
if (jp2_image->numcomps == 1)
SetImageColorspace(image,GRAYColorspace,exception);
else
if (jp2_image->color_space == 2)
{
SetImageColorspace(image,GRAYColorspace,exception);
if (jp2_image->numcomps > 1)
image->alpha_trait=BlendPixelTrait;
}
else
if (jp2_image->color_space == 3)
SetImageColorspace(image,Rec601YCbCrColorspace,exception);
if (jp2_image->numcomps > 3)
image->alpha_trait=BlendPixelTrait;
if (jp2_image->icc_profile_buf != (unsigned char *) NULL)
{
StringInfo
*profile;
profile=BlobToStringInfo(jp2_image->icc_profile_buf,
jp2_image->icc_profile_len);
if (profile != (StringInfo *) NULL)
{
SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
}
}
if (image->ping != MagickFalse)
{
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
return(DestroyImageList(image));
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) jp2_image->numcomps; i++)
{
double
pixel,
scale;
scale=QuantumRange/(double) ((1UL << jp2_image->comps[i].prec)-1);
pixel=scale*(jp2_image->comps[i].data[y/jp2_image->comps[i].dy*
image->columns/jp2_image->comps[i].dx+x/jp2_image->comps[i].dx]+
(jp2_image->comps[i].sgnd ? 1UL << (jp2_image->comps[i].prec-1) : 0));
switch (i)
{
case 0:
{
if (jp2_image->numcomps == 1)
{
SetPixelGray(image,ClampToQuantum(pixel),q);
SetPixelAlpha(image,OpaqueAlpha,q);
break;
}
SetPixelRed(image,ClampToQuantum(pixel),q);
SetPixelGreen(image,ClampToQuantum(pixel),q);
SetPixelBlue(image,ClampToQuantum(pixel),q);
SetPixelAlpha(image,OpaqueAlpha,q);
break;
}
case 1:
{
if (jp2_image->numcomps == 2)
{
SetPixelAlpha(image,ClampToQuantum(pixel),q);
break;
}
SetPixelGreen(image,ClampToQuantum(pixel),q);
break;
}
case 2:
{
SetPixelBlue(image,ClampToQuantum(pixel),q);
break;
}
case 3:
{
SetPixelAlpha(image,ClampToQuantum(pixel),q);
break;
}
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
/*
Free resources.
*/
opj_destroy_codec(jp2_codec);
opj_image_destroy(jp2_image);
(void) CloseBlob(image);
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
AppendImageToList(&image,CloneImage(image,0,0,MagickTrue,exception));
return(GetFirstImageInList(image));
} | 0 | [
"CWE-665"
] | ImageMagick | 90c4afcde1bf3ad5aead4477716161c350b049f8 | 178,609,751,982,075,100,000,000,000,000,000,000,000 | 282 | https://github.com/ImageMagick/ImageMagick/issues/1518 |
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
{
pr_debug("%s: The handling func for control queue.\n", __func__);
} | 0 | [
"CWE-200",
"CWE-119"
] | linux | 59c816c1f24df0204e01851431d3bab3eb76719c | 93,097,050,143,580,470,000,000,000,000,000,000,000 | 4 | vhost/scsi: potential memory corruption
This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt"
to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16.
I looked at the context and it turns out that in
vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into
the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so
anything higher than 255 then it is invalid. I have made that the limit
now.
In vhost_scsi_send_evt() we mask away values higher than 255, but now
that the limit has changed, we don't need the mask.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Nicholas Bellinger <[email protected]> |
static int ssl_rsa_decrypt( void *ctx,
int (*f_rng)(void *, unsigned char *, size_t),
void *p_rng, int mode, size_t *olen,
const unsigned char *input, unsigned char *output,
size_t output_max_len )
{
return rsa_pkcs1_decrypt( (rsa_context *) ctx, f_rng, p_rng, mode, olen,
input, output, output_max_len );
} | 0 | [
"CWE-310"
] | polarssl | 43f9799ce61c6392a014d0a2ea136b4b3a9ee194 | 58,833,855,150,109,940,000,000,000,000,000,000,000 | 9 | RSA blinding on CRT operations to counter timing attacks |
**/
Tfloat linear_atXYZC(const float fx, const float fy, const float fz, const float fc, const T& out_value) const {
const int
x = (int)fx - (fx>=0?0:1), nx = x + 1,
y = (int)fy - (fy>=0?0:1), ny = y + 1,
z = (int)fz - (fz>=0?0:1), nz = z + 1,
c = (int)fc - (fc>=0?0:1), nc = c + 1;
const float
dx = fx - x,
dy = fy - y,
dz = fz - z,
dc = fc - c;
const Tfloat
Icccc = (Tfloat)atXYZC(x,y,z,c,out_value), Inccc = (Tfloat)atXYZC(nx,y,z,c,out_value),
Icncc = (Tfloat)atXYZC(x,ny,z,c,out_value), Inncc = (Tfloat)atXYZC(nx,ny,z,c,out_value),
Iccnc = (Tfloat)atXYZC(x,y,nz,c,out_value), Incnc = (Tfloat)atXYZC(nx,y,nz,c,out_value),
Icnnc = (Tfloat)atXYZC(x,ny,nz,c,out_value), Innnc = (Tfloat)atXYZC(nx,ny,nz,c,out_value),
Icccn = (Tfloat)atXYZC(x,y,z,nc,out_value), Inccn = (Tfloat)atXYZC(nx,y,z,nc,out_value),
Icncn = (Tfloat)atXYZC(x,ny,z,nc,out_value), Inncn = (Tfloat)atXYZC(nx,ny,z,nc,out_value),
Iccnn = (Tfloat)atXYZC(x,y,nz,nc,out_value), Incnn = (Tfloat)atXYZC(nx,y,nz,nc,out_value),
Icnnn = (Tfloat)atXYZC(x,ny,nz,nc,out_value), Innnn = (Tfloat)atXYZC(nx,ny,nz,nc,out_value);
return Icccc +
dx*(Inccc - Icccc +
dy*(Icccc + Inncc - Icncc - Inccc +
dz*(Iccnc + Innnc + Icncc + Inccc - Icnnc - Incnc - Icccc - Inncc +
dc*(Iccnn + Innnn + Icncn + Inccn + Icnnc + Incnc + Icccc + Inncc -
Icnnn - Incnn - Icccn - Inncn - Iccnc - Innnc - Icncc - Inccc)) +
dc*(Icccn + Inncn + Icncc + Inccc - Icncn - Inccn - Icccc - Inncc)) +
dz*(Icccc + Incnc - Iccnc - Inccc +
dc*(Icccn + Incnn + Iccnc + Inccc - Iccnn - Inccn - Icccc - Incnc)) +
dc*(Icccc + Inccn - Inccc - Icccn)) +
dy*(Icncc - Icccc +
dz*(Icccc + Icnnc - Iccnc - Icncc +
dc*(Icccn + Icnnn + Iccnc + Icncc - Iccnn - Icncn - Icccc - Icnnc)) +
dc*(Icccc + Icncn - Icncc - Icccn)) +
dz*(Iccnc - Icccc +
dc*(Icccc + Iccnn - Iccnc - Icccn)) +
dc*(Icccn -Icccc); | 0 | [
"CWE-125"
] | CImg | 10af1e8c1ad2a58a0a3342a856bae63e8f257abb | 37,655,790,581,854,157,000,000,000,000,000,000,000 | 38 | Fix other issues in 'CImg<T>::load_bmp()'. |
static void bm_evict_inode(struct inode *inode)
{
clear_inode(inode);
kfree(inode->i_private);
} | 0 | [
"CWE-200"
] | linux-2.6 | b66c5984017533316fd1951770302649baf1aa33 | 42,175,784,199,791,713,000,000,000,000,000,000,000 | 5 | exec: do not leave bprm->interp on stack
If a series of scripts are executed, each triggering module loading via
unprintable bytes in the script header, kernel stack contents can leak
into the command line.
Normally execution of binfmt_script and binfmt_misc happens recursively.
However, when modules are enabled, and unprintable bytes exist in the
bprm->buf, execution will restart after attempting to load matching
binfmt modules. Unfortunately, the logic in binfmt_script and
binfmt_misc does not expect to get restarted. They leave bprm->interp
pointing to their local stack. This means on restart bprm->interp is
left pointing into unused stack memory which can then be copied into the
userspace argv areas.
After additional study, it seems that both recursion and restart remains
the desirable way to handle exec with scripts, misc, and modules. As
such, we need to protect the changes to interp.
This changes the logic to require allocation for any changes to the
bprm->interp. To avoid adding a new kmalloc to every exec, the default
value is left as-is. Only when passing through binfmt_script or
binfmt_misc does an allocation take place.
For a proof of concept, see DoTest.sh from:
http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/
Signed-off-by: Kees Cook <[email protected]>
Cc: halfdog <[email protected]>
Cc: P J P <[email protected]>
Cc: Alexander Viro <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
static int huft_build(const unsigned *b, const unsigned n,
const unsigned s, const unsigned short *d,
const unsigned char *e, huft_t **t, unsigned *m)
{
unsigned a; /* counter for codes of length k */
unsigned c[BMAX + 1]; /* bit length count table */
unsigned eob_len; /* length of end-of-block code (value 256) */
unsigned f; /* i repeats in table every f entries */
int g; /* maximum code length */
int htl; /* table level */
unsigned i; /* counter, current code */
unsigned j; /* counter */
int k; /* number of bits in current code */
unsigned *p; /* pointer into c[], b[], or v[] */
huft_t *q; /* points to current table */
huft_t r; /* table entry for structure assignment */
huft_t *u[BMAX]; /* table stack */
unsigned v[N_MAX]; /* values in order of bit length */
int ws[BMAX + 1]; /* bits decoded stack */
int w; /* bits decoded */
unsigned x[BMAX + 1]; /* bit offsets, then code stack */
unsigned *xp; /* pointer into x */
int y; /* number of dummy codes added */
unsigned z; /* number of entries in current table */
/* Length of EOB code, if any */
eob_len = n > 256 ? b[256] : BMAX;
*t = NULL;
/* Generate counts for each bit length */
memset(c, 0, sizeof(c));
p = (unsigned *) b; /* cast allows us to reuse p for pointing to b */
i = n;
do {
c[*p]++; /* assume all entries <= BMAX */
p++; /* can't combine with above line (Solaris bug) */
} while (--i);
if (c[0] == n) { /* null input - all zero length codes */
*m = 0;
return 2;
}
/* Find minimum and maximum length, bound *m by those */
for (j = 1; (j <= BMAX) && (c[j] == 0); j++)
continue;
k = j; /* minimum code length */
for (i = BMAX; (c[i] == 0) && i; i--)
continue;
g = i; /* maximum code length */
*m = (*m < j) ? j : ((*m > i) ? i : *m);
/* Adjust last length count to fill out codes, if needed */
for (y = 1 << j; j < i; j++, y <<= 1) {
y -= c[j];
if (y < 0)
return 2; /* bad input: more codes than bits */
}
y -= c[i];
if (y < 0)
return 2;
c[i] += y;
/* Generate starting offsets into the value table for each length */
x[1] = j = 0;
p = c + 1;
xp = x + 2;
while (--i) { /* note that i == g from above */
j += *p++;
*xp++ = j;
}
/* Make a table of values in order of bit lengths */
p = (unsigned *) b;
i = 0;
do {
j = *p++;
if (j != 0) {
v[x[j]++] = i;
}
} while (++i < n);
/* Generate the Huffman codes and for each, make the table entries */
x[0] = i = 0; /* first Huffman code is zero */
p = v; /* grab values in bit order */
htl = -1; /* no tables yet--level -1 */
w = ws[0] = 0; /* bits decoded */
u[0] = NULL; /* just to keep compilers happy */
q = NULL; /* ditto */
z = 0; /* ditto */
/* go through the bit lengths (k already is bits in shortest code) */
for (; k <= g; k++) {
a = c[k];
while (a--) {
/* here i is the Huffman code of length k bits for value *p */
/* make tables up to required level */
while (k > ws[htl + 1]) {
w = ws[++htl];
/* compute minimum size table less than or equal to *m bits */
z = g - w;
z = z > *m ? *m : z; /* upper limit on table size */
j = k - w;
f = 1 << j;
if (f > a + 1) { /* try a k-w bit table */
/* too few codes for k-w bit table */
f -= a + 1; /* deduct codes from patterns left */
xp = c + k;
while (++j < z) { /* try smaller tables up to z bits */
f <<= 1;
if (f <= *++xp) {
break; /* enough codes to use up j bits */
}
f -= *xp; /* else deduct codes from patterns */
}
}
j = (w + j > eob_len && w < eob_len) ? eob_len - w : j; /* make EOB code end at table */
z = 1 << j; /* table entries for j-bit table */
ws[htl+1] = w + j; /* set bits decoded in stack */
/* allocate and link in new table */
q = xzalloc((z + 1) * sizeof(huft_t));
*t = q + 1; /* link to list for huft_free() */
t = &(q->v.t);
u[htl] = ++q; /* table starts after link */
/* connect to last table, if there is one */
if (htl) {
x[htl] = i; /* save pattern for backing up */
r.b = (unsigned char) (w - ws[htl - 1]); /* bits to dump before this table */
r.e = (unsigned char) (16 + j); /* bits in this table */
r.v.t = q; /* pointer to this table */
j = (i & ((1 << w) - 1)) >> ws[htl - 1];
u[htl - 1][j] = r; /* connect to last table */
}
}
/* set up table entry in r */
r.b = (unsigned char) (k - w);
if (p >= v + n) {
r.e = 99; /* out of values--invalid code */
} else if (*p < s) {
r.e = (unsigned char) (*p < 256 ? 16 : 15); /* 256 is EOB code */
r.v.n = (unsigned short) (*p++); /* simple code is just the value */
} else {
r.e = (unsigned char) e[*p - s]; /* non-simple--look up in lists */
r.v.n = d[*p++ - s];
}
/* fill code-like entries with r */
f = 1 << (k - w);
for (j = i >> w; j < z; j += f) {
q[j] = r;
}
/* backwards increment the k-bit code i */
for (j = 1 << (k - 1); i & j; j >>= 1) {
i ^= j;
}
i ^= j;
/* backup over finished tables */
while ((i & ((1 << w) - 1)) != x[htl]) {
w = ws[--htl];
}
}
}
/* return actual size of base table */
*m = ws[1];
/* Return 1 if we were given an incomplete table */
return y != 0 && g != 1;
} | 1 | [
"CWE-476"
] | busybox | 1de25a6e87e0e627aa34298105a3d17c60a1f44e | 69,924,166,591,076,525,000,000,000,000,000,000,000 | 175 | unzip: test for bad archive SEGVing
function old new delta
huft_build 1296 1300 +4
Signed-off-by: Denys Vlasenko <[email protected]> |
handle_update_in_thread_func (GTask *task,
gpointer source_object,
gpointer task_data,
GCancellable *cancellable)
{
PortalFlatpakUpdateMonitor *monitor = source_object;
UpdateMonitorData *m = update_monitor_get_data (monitor);
g_autoptr(GError) error = NULL;
const char *window;
window = (const char *)g_object_get_data (G_OBJECT (task), "window");
if (request_update_permissions_sync (monitor, m->name, window, &error))
{
g_autoptr(GFile) installation_path = update_monitor_get_installation_path (monitor);
g_autofree char *ref = flatpak_build_app_ref (m->name, m->branch, m->arch);
const char *argv[] = { "/proc/self/exe", "flatpak-portal", "--update", flatpak_file_get_path_cached (installation_path), ref, NULL };
int sockets[2];
GPid pid;
if (socketpair (AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, sockets) != 0)
{
glnx_throw_errno (&error);
}
else
{
gboolean spawn_ok;
spawn_ok = g_spawn_async (NULL, (char **)argv, NULL,
G_SPAWN_FILE_AND_ARGV_ZERO |
G_SPAWN_LEAVE_DESCRIPTORS_OPEN,
update_child_setup_func, &sockets[1],
&pid, &error);
close (sockets[1]); // Close remote side
if (spawn_ok)
{
if (!handle_update_responses (monitor, sockets[0], &error))
{
if (g_error_matches (error, G_IO_ERROR, G_IO_ERROR_CANCELLED))
kill (pid, SIGINT);
}
}
close (sockets[0]); // Close local side
}
}
if (error)
emit_progress_error (monitor, error);
g_mutex_lock (&m->lock);
m->installing = FALSE;
g_mutex_unlock (&m->lock);
} | 0 | [
"CWE-94",
"CWE-74"
] | flatpak | aeb6a7ab0abaac4a8f4ad98b3df476d9de6b8bd4 | 197,833,277,865,661,430,000,000,000,000,000,000,000 | 53 | portal: Convert --env in extra-args into --env-fd
This hides overridden variables from the command-line, which means
processes running under other uids can't see them in /proc/*/cmdline,
which might be important if they contain secrets.
Signed-off-by: Simon McVittie <[email protected]>
Part-of: https://github.com/flatpak/flatpak/security/advisories/GHSA-4ppf-fxf6-vxg2 |
static CImg<T> get_load_inr(const char *const filename, float *const voxel_size=0) {
return CImg<T>().load_inr(filename,voxel_size);
} | 0 | [
"CWE-770"
] | cimg | 619cb58dd90b4e03ac68286c70ed98acbefd1c90 | 84,980,554,923,391,390,000,000,000,000,000,000,000 | 3 | CImg<>::load_bmp() and CImg<>::load_pandore(): Check that dimensions encoded in file does not exceed file size. |
static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
const char **cipher_str_ret, int *keysize_ret)
{
if (S_ISREG(inode->i_mode)) {
if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
*cipher_str_ret = "xts(aes)";
*keysize_ret = FS_AES_256_XTS_KEY_SIZE;
return 0;
}
pr_warn_once("fscrypto: unsupported contents encryption mode "
"%d for inode %lu\n",
ci->ci_data_mode, inode->i_ino);
return -ENOKEY;
}
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
*cipher_str_ret = "cts(cbc(aes))";
*keysize_ret = FS_AES_256_CTS_KEY_SIZE;
return 0;
}
pr_warn_once("fscrypto: unsupported filenames encryption mode "
"%d for inode %lu\n",
ci->ci_filename_mode, inode->i_ino);
return -ENOKEY;
}
pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
(inode->i_mode & S_IFMT), inode->i_ino);
return -ENOKEY;
} | 0 | [
"CWE-416",
"CWE-476"
] | linux | 1b53cf9815bb4744958d41f3795d5d5a1d365e2d | 56,436,063,930,936,180,000,000,000,000,000,000,000 | 31 | fscrypt: remove broken support for detecting keyring key revocation
Filesystem encryption ostensibly supported revoking a keyring key that
had been used to "unlock" encrypted files, causing those files to become
"locked" again. This was, however, buggy for several reasons, the most
severe of which was that when key revocation happened to be detected for
an inode, its fscrypt_info was immediately freed, even while other
threads could be using it for encryption or decryption concurrently.
This could be exploited to crash the kernel or worse.
This patch fixes the use-after-free by removing the code which detects
the keyring key having been revoked, invalidated, or expired. Instead,
an encrypted inode that is "unlocked" now simply remains unlocked until
it is evicted from memory. Note that this is no worse than the case for
block device-level encryption, e.g. dm-crypt, and it still remains
possible for a privileged user to evict unused pages, inodes, and
dentries by running 'sync; echo 3 > /proc/sys/vm/drop_caches', or by
simply unmounting the filesystem. In fact, one of those actions was
already needed anyway for key revocation to work even somewhat sanely.
This change is not expected to break any applications.
In the future I'd like to implement a real API for fscrypt key
revocation that interacts sanely with ongoing filesystem operations ---
waiting for existing operations to complete and blocking new operations,
and invalidating and sanitizing key material and plaintext from the VFS
caches. But this is a hard problem, and for now this bug must be fixed.
This bug affected almost all versions of ext4, f2fs, and ubifs
encryption, and it was potentially reachable in any kernel configured
with encryption support (CONFIG_EXT4_ENCRYPTION=y,
CONFIG_EXT4_FS_ENCRYPTION=y, CONFIG_F2FS_FS_ENCRYPTION=y, or
CONFIG_UBIFS_FS_ENCRYPTION=y). Note that older kernels did not use the
shared fs/crypto/ code, but due to the potential security implications
of this bug, it may still be worthwhile to backport this fix to them.
Fixes: b7236e21d55f ("ext4 crypto: reorganize how we store keys in the inode")
Cc: [email protected] # v4.2+
Signed-off-by: Eric Biggers <[email protected]>
Signed-off-by: Theodore Ts'o <[email protected]>
Acked-by: Michael Halcrow <[email protected]> |
xmlSchemaCheckElementDeclConsistent(xmlSchemaParserCtxtPtr pctxt,
xmlSchemaBasicItemPtr ctxtComponent,
xmlSchemaParticlePtr ctxtParticle,
xmlSchemaParticlePtr searchParticle,
xmlSchemaParticlePtr curParticle,
int search)
{
return(0);
int ret = 0;
xmlSchemaParticlePtr cur = curParticle;
if (curParticle == NULL) {
return(0);
}
if (WXS_PARTICLE_TERM(curParticle) == NULL) {
/*
* Just return in this case. A missing "term" of the particle
* might arise due to an invalid "term" component.
*/
return(0);
}
while (cur != NULL) {
switch (WXS_PARTICLE_TERM(cur)->type) {
case XML_SCHEMA_TYPE_ANY:
break;
case XML_SCHEMA_TYPE_ELEMENT:
if (search == 0) {
ret = xmlSchemaCheckElementDeclConsistent(pctxt,
ctxtComponent, ctxtParticle, cur, ctxtParticle, 1);
if (ret != 0)
return(ret);
} else {
xmlSchemaElementPtr elem =
WXS_ELEM_CAST(WXS_PARTICLE_TERM(cur));
/*
* SPEC Element Declarations Consistent:
* "If the {particles} contains, either directly,
* indirectly (that is, within the {particles} of a
* contained model group, recursively) or `implicitly`
* two or more element declaration particles with
* the same {name} and {target namespace}, then
* all their type definitions must be the same
* top-level definition [...]"
*/
if (xmlStrEqual(WXS_PARTICLE_TERM_AS_ELEM(cur)->name,
WXS_PARTICLE_TERM_AS_ELEM(searchParticle)->name) &&
xmlStrEqual(WXS_PARTICLE_TERM_AS_ELEM(cur)->targetNamespace,
WXS_PARTICLE_TERM_AS_ELEM(searchParticle)->targetNamespace))
{
xmlChar *strA = NULL, *strB = NULL;
xmlSchemaCustomErr(ACTXT_CAST pctxt,
/* TODO: error code */
XML_SCHEMAP_COS_NONAMBIG,
WXS_ITEM_NODE(cur), NULL,
"In the content model of %s, there are multiple "
"element declarations for '%s' with different "
"type definitions",
xmlSchemaGetComponentDesignation(&strA,
ctxtComponent),
xmlSchemaFormatQName(&strB,
WXS_PARTICLE_TERM_AS_ELEM(cur)->targetNamespace,
WXS_PARTICLE_TERM_AS_ELEM(cur)->name));
FREE_AND_NULL(strA);
FREE_AND_NULL(strB);
return(XML_SCHEMAP_COS_NONAMBIG);
}
}
break;
case XML_SCHEMA_TYPE_SEQUENCE: {
break;
}
case XML_SCHEMA_TYPE_CHOICE:{
/*
xmlSchemaTreeItemPtr sub;
sub = WXS_PARTICLE_TERM(particle)->children; (xmlSchemaParticlePtr)
while (sub != NULL) {
ret = xmlSchemaCheckElementDeclConsistent(pctxt, ctxtComponent,
ctxtParticle, ctxtElem);
if (ret != 0)
return(ret);
sub = sub->next;
}
*/
break;
}
case XML_SCHEMA_TYPE_ALL:
break;
case XML_SCHEMA_TYPE_GROUP:
break;
default:
xmlSchemaInternalErr2(ACTXT_CAST pctxt,
"xmlSchemaCheckElementDeclConsistent",
"found unexpected term of type '%s' in content model",
WXS_ITEM_TYPE_NAME(WXS_PARTICLE_TERM(cur)), NULL);
return(-1);
}
cur = (xmlSchemaParticlePtr) cur->next;
}
exit:
return(ret);
} | 0 | [
"CWE-134"
] | libxml2 | 4472c3a5a5b516aaf59b89be602fbce52756c3e9 | 222,630,376,885,481,540,000,000,000,000,000,000,000 | 104 | Fix some format string warnings with possible format string vulnerability
For https://bugzilla.gnome.org/show_bug.cgi?id=761029
Decorate every method in libxml2 with the appropriate
LIBXML_ATTR_FORMAT(fmt,args) macro and add some cleanups
following the reports. |
int dtls1_dispatch_alert(SSL *s)
{
int i, j;
void (*cb) (const SSL *ssl, int type, int val) = NULL;
unsigned char buf[DTLS1_AL_HEADER_LENGTH];
unsigned char *ptr = &buf[0];
s->s3->alert_dispatch = 0;
memset(buf, 0x00, sizeof(buf));
*ptr++ = s->s3->send_alert[0];
*ptr++ = s->s3->send_alert[1];
#ifdef DTLS1_AD_MISSING_HANDSHAKE_MESSAGE
if (s->s3->send_alert[1] == DTLS1_AD_MISSING_HANDSHAKE_MESSAGE) {
s2n(s->d1->handshake_read_seq, ptr);
# if 0
if (s->d1->r_msg_hdr.frag_off == 0)
/*
* waiting for a new msg
*/
else
s2n(s->d1->r_msg_hdr.seq, ptr); /* partial msg read */
# endif
# if 0
fprintf(stderr,
"s->d1->handshake_read_seq = %d, s->d1->r_msg_hdr.seq = %d\n",
s->d1->handshake_read_seq, s->d1->r_msg_hdr.seq);
# endif
l2n3(s->d1->r_msg_hdr.frag_off, ptr);
}
#endif
i = do_dtls1_write(s, SSL3_RT_ALERT, &buf[0], sizeof(buf), 0);
if (i <= 0) {
s->s3->alert_dispatch = 1;
/* fprintf( stderr, "not done with alert\n" ); */
} else {
if (s->s3->send_alert[0] == SSL3_AL_FATAL
#ifdef DTLS1_AD_MISSING_HANDSHAKE_MESSAGE
|| s->s3->send_alert[1] == DTLS1_AD_MISSING_HANDSHAKE_MESSAGE
#endif
)
(void)BIO_flush(s->wbio);
if (s->msg_callback)
s->msg_callback(1, s->version, SSL3_RT_ALERT, s->s3->send_alert,
2, s, s->msg_callback_arg);
if (s->info_callback != NULL)
cb = s->info_callback;
else if (s->ctx->info_callback != NULL)
cb = s->ctx->info_callback;
if (cb != NULL) {
j = (s->s3->send_alert[0] << 8) | s->s3->send_alert[1];
cb(s, SSL_CB_WRITE_ALERT, j);
}
}
return (i);
} | 0 | [
"CWE-189"
] | openssl | b77ab018b79a00f789b0fb85596b446b08be4c9d | 295,364,903,227,056,360,000,000,000,000,000,000,000 | 62 | Fix DTLS replay protection
The DTLS implementation provides some protection against replay attacks
in accordance with RFC6347 section 4.1.2.6.
A sliding "window" of valid record sequence numbers is maintained with
the "right" hand edge of the window set to the highest sequence number we
have received so far. Records that arrive that are off the "left" hand
edge of the window are rejected. Records within the window are checked
against a list of records received so far. If we already received it then
we also reject the new record.
If we have not already received the record, or the sequence number is off
the right hand edge of the window then we verify the MAC of the record.
If MAC verification fails then we discard the record. Otherwise we mark
the record as received. If the sequence number was off the right hand edge
of the window, then we slide the window along so that the right hand edge
is in line with the newly received sequence number.
Records may arrive for future epochs, i.e. a record from after a CCS being
sent, can arrive before the CCS does if the packets get re-ordered. As we
have not yet received the CCS we are not yet in a position to decrypt or
validate the MAC of those records. OpenSSL places those records on an
unprocessed records queue. It additionally updates the window immediately,
even though we have not yet verified the MAC. This will only occur if
currently in a handshake/renegotiation.
This could be exploited by an attacker by sending a record for the next
epoch (which does not have to decrypt or have a valid MAC), with a very
large sequence number. This means the right hand edge of the window is
moved very far to the right, and all subsequent legitimate packets are
dropped causing a denial of service.
A similar effect can be achieved during the initial handshake. In this
case there is no MAC key negotiated yet. Therefore an attacker can send a
message for the current epoch with a very large sequence number. The code
will process the record as normal. If the hanshake message sequence number
(as opposed to the record sequence number that we have been talking about
so far) is in the future then the injected message is bufferred to be
handled later, but the window is still updated. Therefore all subsequent
legitimate handshake records are dropped. This aspect is not considered a
security issue because there are many ways for an attacker to disrupt the
initial handshake and prevent it from completing successfully (e.g.
injection of a handshake message will cause the Finished MAC to fail and
the handshake to be aborted). This issue comes about as a result of trying
to do replay protection, but having no integrity mechanism in place yet.
Does it even make sense to have replay protection in epoch 0? That
issue isn't addressed here though.
This addressed an OCAP Audit issue.
CVE-2016-2181
Reviewed-by: Richard Levitte <[email protected]> |
static void test_bug6049()
{
MYSQL_STMT *stmt;
MYSQL_BIND my_bind[1];
MYSQL_RES *res;
MYSQL_ROW row;
const char *stmt_text;
char buffer[30];
ulong length;
int rc;
myheader("test_bug6049");
stmt_text= "SELECT MAKETIME(-25, 12, 12)";
rc= mysql_real_query(mysql, stmt_text, strlen(stmt_text));
myquery(rc);
res= mysql_store_result(mysql);
row= mysql_fetch_row(res);
stmt= mysql_stmt_init(mysql);
rc= mysql_stmt_prepare(stmt, stmt_text, strlen(stmt_text));
check_execute(stmt, rc);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
bzero((char*) my_bind, sizeof(my_bind));
my_bind[0].buffer_type = MYSQL_TYPE_STRING;
my_bind[0].buffer = &buffer;
my_bind[0].buffer_length = sizeof(buffer);
my_bind[0].length = &length;
mysql_stmt_bind_result(stmt, my_bind);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == 0);
if (!opt_silent)
{
printf("Result from query: %s\n", row[0]);
printf("Result from prepared statement: %s\n", (char*) buffer);
}
DIE_UNLESS(strcmp(row[0], (char*) buffer) == 0);
mysql_free_result(res);
mysql_stmt_close(stmt);
} | 0 | [
"CWE-416"
] | server | eef21014898d61e77890359d6546d4985d829ef6 | 255,813,629,169,167,600,000,000,000,000,000,000,000 | 47 | MDEV-11933 Wrong usage of linked list in mysql_prune_stmt_list
mysql_prune_stmt_list() was walking the list following
element->next pointers, but inside the loop it was invoking
list_add(element) that modified element->next. So, mysql_prune_stmt_list()
failed to visit and reset all elements, and some of them were left
with pointers to invalid MYSQL. |
void MirrorJob::InitSets()
{
if(FlagSet(TARGET_FLAT) && !parent_mirror && target_set)
source_set->Sort(FileSet::BYNAME_FLAT);
source_set->Count(NULL,&stats.tot_files,&stats.tot_symlinks,&stats.tot_files);
to_rm=new FileSet(target_set);
to_rm->SubtractAny(source_set);
if(FlagSet(DELETE_EXCLUDED) && target_set_excluded)
to_rm->Merge(target_set_excluded);
to_transfer=new FileSet(source_set);
if(!FlagSet(TRANSFER_ALL)) {
same=new FileSet(source_set);
int ignore=0;
if(FlagSet(ONLY_NEWER))
ignore|=FileInfo::IGNORE_SIZE_IF_OLDER|FileInfo::IGNORE_DATE_IF_OLDER;
if(!FlagSet(UPLOAD_OLDER) && strcmp(target_session->GetProto(),"file"))
ignore|=FileInfo::IGNORE_DATE_IF_OLDER;
if(FlagSet(IGNORE_TIME))
ignore|=FileInfo::DATE;
if(FlagSet(IGNORE_SIZE))
ignore|=FileInfo::SIZE;
to_transfer->SubtractSame(target_set,ignore);
same->SubtractAny(to_transfer);
}
if(newer_than!=NO_DATE)
to_transfer->SubtractNotNewerThan(newer_than);
if(older_than!=NO_DATE)
to_transfer->SubtractNotOlderThan(older_than);
if(size_range)
to_transfer->SubtractSizeOutside(size_range);
if(FlagSet(SCAN_ALL_FIRST)) {
to_mkdir=new FileSet(to_transfer);
to_mkdir->SubtractNotDirs();
to_mkdir->SubtractAny(target_set);
}
switch(recursion_mode) {
case RECURSION_NEVER:
to_transfer->SubtractDirs();
break;
case RECURSION_MISSING:
to_transfer->SubtractDirs(target_set);
break;
case RECURSION_NEWER:
to_transfer->SubtractNotOlderDirs(target_set);
break;
case RECURSION_ALWAYS:
break;
}
if(skip_noaccess)
to_transfer->ExcludeUnaccessible(source_session->GetUser());
new_files_set=new FileSet(to_transfer);
new_files_set->SubtractAny(target_set);
old_files_set=new FileSet(target_set);
old_files_set->SubtractNotIn(to_transfer);
to_rm_mismatched=new FileSet(old_files_set);
to_rm_mismatched->SubtractSameType(to_transfer);
to_rm_mismatched->SubtractNotDirs();
if(!FlagSet(DELETE))
to_transfer->SubtractAny(to_rm_mismatched);
if(FlagSet(TARGET_FLAT) && !parent_mirror && target_set) {
source_set->Unsort();
to_transfer->UnsortFlat();
to_transfer->SubtractDirs();
same->UnsortFlat();
to_mkdir->Empty();
new_files_set->UnsortFlat();
}
const char *sort_by=ResMgr::Query("mirror:sort-by",0);
bool desc=strstr(sort_by,"-desc");
if(!strncmp(sort_by,"name",4))
to_transfer->SortByPatternList(ResMgr::Query("mirror:order",0));
else if(!strncmp(sort_by,"date",4))
to_transfer->Sort(FileSet::BYDATE);
else if(!strncmp(sort_by,"size",4))
to_transfer->Sort(FileSet::BYSIZE,false,true);
if(desc)
to_transfer->ReverseSort();
int dir_count=0;
if(to_mkdir) {
to_mkdir->Count(&dir_count,NULL,NULL,NULL);
only_dirs = (dir_count==to_mkdir->count());
} else {
to_transfer->Count(&dir_count,NULL,NULL,NULL);
only_dirs = (dir_count==to_transfer->count());
}
} | 0 | [
"CWE-20",
"CWE-401"
] | lftp | a27e07d90a4608ceaf928b1babb27d4d803e1992 | 188,904,350,674,090,800,000,000,000,000,000,000,000 | 103 | mirror: prepend ./ to rm and chmod arguments to avoid URL recognition (fix #452) |
static TEE_Result tee_svc_update_out_param(
struct tee_ta_param *param,
void *tmp_buf_va[TEE_NUM_PARAMS],
struct utee_params *usr_param)
{
size_t n;
uint64_t *vals = usr_param->vals;
for (n = 0; n < TEE_NUM_PARAMS; n++) {
switch (TEE_PARAM_TYPE_GET(param->types, n)) {
case TEE_PARAM_TYPE_MEMREF_OUTPUT:
case TEE_PARAM_TYPE_MEMREF_INOUT:
/*
* Memory copy is only needed if there's a temporary
* buffer involved, tmp_buf_va[n] is only update if
* a temporary buffer is used. Otherwise only the
* size needs to be updated.
*/
if (tmp_buf_va[n] &&
param->u[n].mem.size <= vals[n * 2 + 1]) {
void *src = tmp_buf_va[n];
void *dst = (void *)(uintptr_t)vals[n * 2];
TEE_Result res;
res = tee_svc_copy_to_user(dst, src,
param->u[n].mem.size);
if (res != TEE_SUCCESS)
return res;
}
usr_param->vals[n * 2 + 1] = param->u[n].mem.size;
break;
case TEE_PARAM_TYPE_VALUE_OUTPUT:
case TEE_PARAM_TYPE_VALUE_INOUT:
vals[n * 2] = param->u[n].val.a;
vals[n * 2 + 1] = param->u[n].val.b;
break;
default:
continue;
}
}
return TEE_SUCCESS;
} | 0 | [
"CWE-119",
"CWE-787"
] | optee_os | d5c5b0b77b2b589666024d219a8007b3f5b6faeb | 152,180,582,829,434,460,000,000,000,000,000,000,000 | 46 | core: svc: always check ta parameters
Always check TA parameters from a user TA. This prevents a user TA from
passing invalid pointers to a pseudo TA.
Fixes: OP-TEE-2018-0007: "Buffer checks missing when calling pseudo
TAs".
Signed-off-by: Jens Wiklander <[email protected]>
Tested-by: Joakim Bech <[email protected]> (QEMU v7, v8)
Reviewed-by: Joakim Bech <[email protected]>
Reported-by: Riscure <[email protected]>
Reported-by: Alyssa Milburn <[email protected]>
Acked-by: Etienne Carriere <[email protected]> |
main(int argc,char *argv[])
{
unsigned char fpr[20];
char *uri;
int i;
if (argc < 2)
{
fprintf (stderr, "usage: pka mail-addresses\n");
return 1;
}
argc--;
argv++;
for (; argc; argc--, argv++)
{
uri = get_pka_info ( *argv, fpr );
printf ("%s", *argv);
if (uri)
{
putchar (' ');
for (i=0; i < 20; i++)
printf ("%02X", fpr[i]);
if (*uri)
printf (" %s", uri);
xfree (uri);
}
putchar ('\n');
}
return 0;
} | 0 | [
"CWE-20"
] | gnupg | 2183683bd633818dd031b090b5530951de76f392 | 213,396,694,160,201,900,000,000,000,000,000,000,000 | 31 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
static void schedule_async_request(struct winbindd_child *child)
{
struct winbindd_async_request *request = child->requests;
if (request == NULL) {
return;
}
if (child->event.flags != 0) {
return; /* Busy */
}
if ((child->pid == 0) && (!fork_domain_child(child))) {
/* Cancel all outstanding requests */
while (request != NULL) {
/* request might be free'd in the continuation */
struct winbindd_async_request *next = request->next;
request->continuation(request->private_data, False);
request = next;
}
return;
}
/* Now we know who we're sending to - remember the pid. */
request->child_pid = child->pid;
setup_async_write(&child->event, request->request,
sizeof(*request->request),
async_main_request_sent, request);
return;
} | 0 | [] | samba | c93d42969451949566327e7fdbf29bfcee2c8319 | 265,063,508,268,353,080,000,000,000,000,000,000,000 | 33 | Back-port of Volkers fix.
Fix a race condition in winbind leading to a crash
When SIGCHLD handling is delayed for some reason, sending a request to a child
can fail early because the child has died already. In this case
async_main_request_sent() directly called the continuation function without
properly removing the malfunctioning child process and the requests in the
queue. The next request would then crash in the DLIST_ADD_END() in
async_request() because the request pending for the child had been
talloc_free()'ed and yet still was referenced in the list.
This one is *old*...
Volker
Jeremy. |
static struct socket *get_socket(int fd)
{
struct socket *sock;
/* special case to disable backend */
if (fd == -1)
return NULL;
sock = get_raw_socket(fd);
if (!IS_ERR(sock))
return sock;
sock = get_tap_socket(fd);
if (!IS_ERR(sock))
return sock;
return ERR_PTR(-ENOTSOCK);
} | 0 | [
"CWE-399"
] | linux | dd7633ecd553a5e304d349aa6f8eb8a0417098c5 | 240,773,782,291,057,300,000,000,000,000,000,000,000 | 15 | vhost-net: fix use-after-free in vhost_net_flush
vhost_net_ubuf_put_and_wait has a confusing name:
it will actually also free it's argument.
Thus since commit 1280c27f8e29acf4af2da914e80ec27c3dbd5c01
"vhost-net: flush outstanding DMAs on memory change"
vhost_net_flush tries to use the argument after passing it
to vhost_net_ubuf_put_and_wait, this results
in use after free.
To fix, don't free the argument in vhost_net_ubuf_put_and_wait,
add an new API for callers that want to free ubufs.
Acked-by: Asias He <[email protected]>
Acked-by: Jason Wang <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
u32 opt;
int err = 0;
pr_debug("%p optname %d\n", sk, optname);
if (level != SOL_NFC)
return -ENOPROTOOPT;
lock_sock(sk);
switch (optname) {
case NFC_LLCP_RW:
if (sk->sk_state == LLCP_CONNECTED ||
sk->sk_state == LLCP_BOUND ||
sk->sk_state == LLCP_LISTEN) {
err = -EINVAL;
break;
}
if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
err = -EFAULT;
break;
}
if (opt > LLCP_MAX_RW) {
err = -EINVAL;
break;
}
llcp_sock->rw = (u8) opt;
break;
case NFC_LLCP_MIUX:
if (sk->sk_state == LLCP_CONNECTED ||
sk->sk_state == LLCP_BOUND ||
sk->sk_state == LLCP_LISTEN) {
err = -EINVAL;
break;
}
if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
err = -EFAULT;
break;
}
if (opt > LLCP_MAX_MIUX) {
err = -EINVAL;
break;
}
llcp_sock->miux = cpu_to_be16((u16) opt);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
pr_debug("%p rw %d miux %d\n", llcp_sock,
llcp_sock->rw, llcp_sock->miux);
return err;
} | 0 | [] | net | c61760e6940dd4039a7f5e84a6afc9cdbf4d82b6 | 41,461,758,266,238,230,000,000,000,000,000,000,000 | 72 | net/nfc: fix use-after-free llcp_sock_bind/connect
Commits 8a4cd82d ("nfc: fix refcount leak in llcp_sock_connect()")
and c33b1cc62 ("nfc: fix refcount leak in llcp_sock_bind()")
fixed a refcount leak bug in bind/connect but introduced a
use-after-free if the same local is assigned to 2 different sockets.
This can be triggered by the following simple program:
int sock1 = socket( AF_NFC, SOCK_STREAM, NFC_SOCKPROTO_LLCP );
int sock2 = socket( AF_NFC, SOCK_STREAM, NFC_SOCKPROTO_LLCP );
memset( &addr, 0, sizeof(struct sockaddr_nfc_llcp) );
addr.sa_family = AF_NFC;
addr.nfc_protocol = NFC_PROTO_NFC_DEP;
bind( sock1, (struct sockaddr*) &addr, sizeof(struct sockaddr_nfc_llcp) )
bind( sock2, (struct sockaddr*) &addr, sizeof(struct sockaddr_nfc_llcp) )
close(sock1);
close(sock2);
Fix this by assigning NULL to llcp_sock->local after calling
nfc_llcp_local_put.
This addresses CVE-2021-23134.
Reported-by: Or Cohen <[email protected]>
Reported-by: Nadav Markus <[email protected]>
Fixes: c33b1cc62 ("nfc: fix refcount leak in llcp_sock_bind()")
Signed-off-by: Or Cohen <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs)
{
ram_addr_t ram_addr;
MemoryRegion *mr;
hwaddr l = 1;
mr = address_space_translate(as, addr, &addr, &l, false, attrs);
if (!memory_region_is_ram(mr)) {
return;
}
ram_addr = memory_region_get_ram_addr(mr) + addr;
tb_invalidate_phys_page_range(as->uc, ram_addr, ram_addr + 1);
} | 0 | [
"CWE-476"
] | unicorn | 3d3deac5e6d38602b689c4fef5dac004f07a2e63 | 317,073,853,910,934,870,000,000,000,000,000,000,000 | 14 | Fix crash when mapping a big memory and calling uc_close |
static inline void authenticate_cookie(void)
{
/* If we don't have an auth_filter, consider all cookies valid, and thus return early. */
if (!ctx.cfg.auth_filter) {
ctx.env.authenticated = 1;
return;
}
/* If we're having something POST'd to /login, we're authenticating POST,
* instead of the cookie, so call authenticate_post and bail out early.
* This pattern here should match /?p=login with POST. */
if (ctx.env.request_method && ctx.qry.page && !ctx.repo && \
!strcmp(ctx.env.request_method, "POST") && !strcmp(ctx.qry.page, "login")) {
authenticate_post();
return;
}
/* If we've made it this far, we're authenticating the cookie for real, so do that. */
open_auth_filter("authenticate-cookie");
ctx.env.authenticated = cgit_close_filter(ctx.cfg.auth_filter);
} | 0 | [] | cgit | 1c581a072651524f3b0d91f33e22a42c4166dd96 | 47,302,798,376,065,070,000,000,000,000,000,000,000 | 21 | ui-blob: Do not accept mimetype from user |
int check_srvr_ecc_cert_and_alg(X509 *x, SSL_CIPHER *cs)
{
unsigned long alg = cs->algorithms;
EVP_PKEY *pkey = NULL;
int keysize = 0;
int signature_nid = 0;
if (SSL_C_IS_EXPORT(cs))
{
/* ECDH key length in export ciphers must be <= 163 bits */
pkey = X509_get_pubkey(x);
if (pkey == NULL) return 0;
keysize = EVP_PKEY_bits(pkey);
EVP_PKEY_free(pkey);
if (keysize > 163) return 0;
}
/* This call populates the ex_flags field correctly */
X509_check_purpose(x, -1, 0);
if ((x->sig_alg) && (x->sig_alg->algorithm))
signature_nid = OBJ_obj2nid(x->sig_alg->algorithm);
if (alg & SSL_kECDH)
{
/* key usage, if present, must allow key agreement */
if (ku_reject(x, X509v3_KU_KEY_AGREEMENT))
{
return 0;
}
if (alg & SSL_aECDSA)
{
/* signature alg must be ECDSA */
if (signature_nid != NID_ecdsa_with_SHA1)
{
return 0;
}
}
if (alg & SSL_aRSA)
{
/* signature alg must be RSA */
if ((signature_nid != NID_md5WithRSAEncryption) &&
(signature_nid != NID_md4WithRSAEncryption) &&
(signature_nid != NID_md2WithRSAEncryption))
{
return 0;
}
}
}
else if (alg & SSL_aECDSA)
{
/* key usage, if present, must allow signing */
if (ku_reject(x, X509v3_KU_DIGITAL_SIGNATURE))
{
return 0;
}
}
return 1; /* all checks are ok */
} | 0 | [
"CWE-310"
] | openssl | c6a876473cbff0fd323c8abcaace98ee2d21863d | 121,099,664,026,781,300,000,000,000,000,000,000,000 | 58 | Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]> |
make_libversion (const char *libname, const char *(*getfnc)(const char*))
{
const char *s;
char *result;
if (maybe_setuid)
{
gcry_control (GCRYCTL_INIT_SECMEM, 0, 0); /* Drop setuid. */
maybe_setuid = 0;
}
s = getfnc (NULL);
result = xmalloc (strlen (libname) + 1 + strlen (s) + 1);
strcpy (stpcpy (stpcpy (result, libname), " "), s);
return result;
} | 0 | [] | gnupg | abd5f6752d693b7f313c19604f0723ecec4d39a6 | 60,779,458,361,618,480,000,000,000,000,000,000,000 | 15 | dirmngr,gpgsm: Return NULL on fail
* dirmngr/ldapserver.c (ldapserver_parse_one): Set SERVER to NULL.
* sm/gpgsm.c (parse_keyserver_line): Ditto.
--
Reported-by: Joshua Rogers <[email protected]>
"If something inside the ldapserver_parse_one function failed,
'server' would be freed, then returned, leading to a
use-after-free. This code is likely copied from sm/gpgsm.c, which
was also susceptible to this bug."
Signed-off-by: Werner Koch <[email protected]> |
theme_adium_constructed (GObject *object)
{
EmpathyThemeAdiumPriv *priv = GET_PRIV (object);
const gchar *font_family = NULL;
gint font_size = 0;
WebKitWebView *webkit_view = WEBKIT_WEB_VIEW (object);
WebKitWebInspector *webkit_inspector;
/* Set default settings */
font_family = tp_asv_get_string (priv->data->info, "DefaultFontFamily");
font_size = tp_asv_get_int32 (priv->data->info, "DefaultFontSize", NULL);
if (font_family && font_size) {
g_object_set (webkit_web_view_get_settings (webkit_view),
"default-font-family", font_family,
"default-font-size", font_size,
NULL);
} else {
empathy_webkit_bind_font_setting (webkit_view,
priv->gsettings_desktop,
EMPATHY_PREFS_DESKTOP_INTERFACE_DOCUMENT_FONT_NAME);
}
/* Setup webkit inspector */
webkit_inspector = webkit_web_view_get_inspector (webkit_view);
g_signal_connect (webkit_inspector, "inspect-web-view",
G_CALLBACK (theme_adium_inspect_web_view_cb),
object);
g_signal_connect (webkit_inspector, "show-window",
G_CALLBACK (theme_adium_inspector_show_window_cb),
object);
g_signal_connect (webkit_inspector, "close-window",
G_CALLBACK (theme_adium_inspector_close_window_cb),
object);
/* Load template */
theme_adium_load_template (EMPATHY_THEME_ADIUM (object));
priv->in_construction = FALSE;
} | 0 | [
"CWE-79"
] | empathy | 739aca418457de752be13721218aaebc74bd9d36 | 273,147,691,853,130,380,000,000,000,000,000,000,000 | 40 | theme_adium_append_message: escape alias before displaying it
Not doing so can lead to nasty HTML injection from hostile users.
https://bugzilla.gnome.org/show_bug.cgi?id=662035 |
static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct wireless_dev *wdev = info->user_ptr[1];
if (wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)
return -EOPNOTSUPP;
if (!rdev->ops->stop_p2p_device)
return -EOPNOTSUPP;
cfg80211_stop_p2p_device(rdev, wdev);
return 0;
} | 0 | [
"CWE-120"
] | linux | f88eb7c0d002a67ef31aeb7850b42ff69abc46dc | 10,383,782,839,363,222,000,000,000,000,000,000,000 | 15 | nl80211: validate beacon head
We currently don't validate the beacon head, i.e. the header,
fixed part and elements that are to go in front of the TIM
element. This means that the variable elements there can be
malformed, e.g. have a length exceeding the buffer size, but
most downstream code from this assumes that this has already
been checked.
Add the necessary checks to the netlink policy.
Cc: [email protected]
Fixes: ed1b6cc7f80f ("cfg80211/nl80211: add beacon settings")
Link: https://lore.kernel.org/r/1569009255-I7ac7fbe9436e9d8733439eab8acbbd35e55c74ef@changeid
Signed-off-by: Johannes Berg <[email protected]> |
static int parse_picture_header_intra(davs2_t *h, davs2_bs_t *bs)
{
int time_code_flag;
int progressive_frame;
int predict;
int i;
h->i_frame_type = AVS2_I_SLICE;
/* skip start code */
bs->i_bit_pos += 32;
u_v(bs, 32, "bbv_delay");
time_code_flag = u_v(bs, 1, "time_code_flag");
if (time_code_flag) {
/* time_code = */ u_v(bs, 24, "time_code");
}
if (h->b_bkgnd_picture) {
int background_picture_flag = u_v(bs, 1, "background_picture_flag");
if (background_picture_flag) {
int b_output = u_v(bs, 1, "background_picture_output_flag");
if (b_output) {
h->i_frame_type = AVS2_G_SLICE;
} else {
h->i_frame_type = AVS2_GB_SLICE;
}
}
}
h->i_coi = u_v(bs, 8, "coding_order");
if (h->seq_info.b_temporal_id_exist == 1) {
h->i_cur_layer = u_v(bs, TEMPORAL_MAXLEVEL_BIT, "temporal_id");
}
if (h->seq_info.head.low_delay == 0) {
h->i_display_delay = ue_v(bs, "picture_output_delay");
if (h->i_display_delay >= 64) {
davs2_log(h, DAVS2_LOG_ERROR, "invalid picture output delay intra.");
return -1;
}
}
predict = u_v(bs, 1, "use RCS in SPS");
if (predict) {
int index = u_v(bs, 5, "predict for RCS");
if (index >= h->seq_info.num_of_rps) {
davs2_log(h, DAVS2_LOG_ERROR, "invalid rps index.");
return -1;
}
h->rps = h->seq_info.seq_rps[index];
} else {
h->rps.refered_by_others = u_v(bs, 1, "refered by others");
h->rps.num_of_ref = u_v(bs, 3, "num of reference picture");
if (h->rps.num_of_ref > AVS2_MAX_REFS) {
davs2_log(h, DAVS2_LOG_ERROR, "invalid number of references.");
return -1;
}
for (i = 0; i < h->rps.num_of_ref; i++) {
h->rps.ref_pic[i] = u_v(bs, 6, "delta COI of ref pic");
}
h->rps.num_to_remove = u_v(bs, 3, "num of removed picture");
assert((unsigned int)h->rps.num_to_remove <= sizeof(h->rps.remove_pic) / sizeof(h->rps.remove_pic[0]));
for (i = 0; i < h->rps.num_to_remove; i++) {
h->rps.remove_pic[i] = u_v(bs, 6, "delta COI of removed pic");
}
u_v(bs, 1, "marker bit");
}
if (h->seq_info.head.low_delay) {
/* bbv_check_times = */ ue_v(bs, "bbv check times");
}
progressive_frame = u_v(bs, 1, "progressive_frame");
if (!progressive_frame) {
h->i_pic_coding_type = (int8_t)u_v(bs, 1, "picture_structure");
} else {
h->i_pic_coding_type = FRAME;
}
h->b_top_field_first = u_flag(bs, "top_field_first");
h->b_repeat_first_field = u_flag(bs, "repeat_first_field");
if (h->seq_info.b_field_coding) {
h->b_top_field = u_flag(bs, "is_top_field");
/* reserved = */ u_v(bs, 1, "reserved bit for interlace coding");
}
h->b_fixed_picture_qp = u_flag(bs, "fixed_picture_qp");
h->i_picture_qp = u_v(bs, 7, "picture_qp");
h->b_loop_filter = u_v(bs, 1, "loop_filter_disable") ^ 0x01;
if (h->b_loop_filter) {
int loop_filter_parameter_flag = u_v(bs, 1, "loop_filter_parameter_flag");
if (loop_filter_parameter_flag) {
h->i_alpha_offset = se_v(bs, "alpha_offset");
h->i_beta_offset = se_v(bs, "beta_offset");
} else {
h->i_alpha_offset = 0;
h->i_beta_offset = 0;
}
deblock_init_frame_parames(h);
}
h->enable_chroma_quant_param = !u_flag(bs, "chroma_quant_param_disable");
if (h->enable_chroma_quant_param) {
h->chroma_quant_param_delta_u = se_v(bs, "chroma_quant_param_delta_cb");
h->chroma_quant_param_delta_v = se_v(bs, "chroma_quant_param_delta_cr");
} else {
h->chroma_quant_param_delta_u = 0;
h->chroma_quant_param_delta_v = 0;
}
// adaptive frequency weighting quantization
h->seq_info.enable_weighted_quant = 0;
if (h->seq_info.enable_weighted_quant) {
int pic_weight_quant_enable = u_v(bs, 1, "pic_weight_quant_enable");
if (pic_weight_quant_enable) {
weighted_quant_t *p = &h->wq;
p->pic_wq_data_index = u_v(bs, 2, "pic_wq_data_index");
if (p->pic_wq_data_index == 1) {
/* int mb_adapt_wq_disable = */ u_v(bs, 1, "reserved_bits");
p->wq_param = u_v(bs, 2, "weighting_quant_param_index");
p->wq_model = u_v(bs, 2, "wq_model");
if (p->wq_param == 1) {
for (i = 0; i < 6; i++) {
p->quant_param_undetail[i] = (int16_t)se_v(bs, "quant_param_delta_u") + wq_param_default[UNDETAILED][i];
}
}
if (p->wq_param == 2) {
for (i = 0; i < 6; i++) {
p->quant_param_detail[i] = (int16_t)se_v(bs, "quant_param_delta_d") + wq_param_default[DETAILED][i];
}
}
} else if (p->pic_wq_data_index == 2) {
int x, y, sizeId, uiWqMSize;
for (sizeId = 0; sizeId < 2; sizeId++) {
i = 0;
uiWqMSize = DAVS2_MIN(1 << (sizeId + 2), 8);
for (y = 0; y < uiWqMSize; y++) {
for (x = 0; x < uiWqMSize; x++) {
p->pic_user_wq_matrix[sizeId][i++] = (int16_t)ue_v(bs, "weight_quant_coeff");
}
}
}
}
h->seq_info.enable_weighted_quant = 1;
}
}
alf_read_param(h, bs);
h->i_qp = h->i_picture_qp;
if (!is_valid_qp(h, h->i_qp)) {
davs2_log(h, DAVS2_LOG_ERROR, "Invalid I Picture QP: %d\n", h->i_qp);
}
/* align position in bitstream buffer */
bs_align(bs);
return 0;
} | 0 | [
"CWE-787"
] | davs2 | b41cf117452e2d73d827f02d3e30aa20f1c721ac | 269,162,153,793,372,370,000,000,000,000,000,000,000 | 181 | Add checking for `frame_rate_code`.
Signed-off-by: luofl <[email protected]> |
bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
bool *error_reported, vcol_init_mode mode)
{
CHARSET_INFO *save_character_set_client= thd->variables.character_set_client;
CHARSET_INFO *save_collation= thd->variables.collation_connection;
Query_arena *backup_stmt_arena_ptr= thd->stmt_arena;
const uchar *pos= table->s->vcol_defs.str;
const uchar *end= pos + table->s->vcol_defs.length;
Field **field_ptr= table->field - 1;
Field **vfield_ptr= table->vfield;
Field **dfield_ptr= table->default_field;
Virtual_column_info **check_constraint_ptr= table->check_constraints;
sql_mode_t saved_mode= thd->variables.sql_mode;
Query_arena backup_arena;
Virtual_column_info *vcol= 0;
StringBuffer<MAX_FIELD_WIDTH> expr_str;
bool res= 1;
DBUG_ENTER("parse_vcol_defs");
if (check_constraint_ptr)
memcpy(table->check_constraints + table->s->field_check_constraints,
table->s->check_constraints,
table->s->table_check_constraints * sizeof(Virtual_column_info*));
DBUG_ASSERT(table->expr_arena == NULL);
/*
We need to use CONVENTIONAL_EXECUTION here to ensure that
any new items created by fix_fields() are not reverted.
*/
table->expr_arena= new (alloc_root(mem_root, sizeof(Table_arena)))
Table_arena(mem_root,
Query_arena::STMT_CONVENTIONAL_EXECUTION);
if (!table->expr_arena)
DBUG_RETURN(1);
thd->set_n_backup_active_arena(table->expr_arena, &backup_arena);
thd->stmt_arena= table->expr_arena;
thd->update_charset(&my_charset_utf8mb4_general_ci, table->s->table_charset);
expr_str.append(&parse_vcol_keyword);
thd->variables.sql_mode &= ~MODE_NO_BACKSLASH_ESCAPES;
while (pos < end)
{
uint type, expr_length;
if (table->s->frm_version >= FRM_VER_EXPRESSSIONS)
{
uint field_nr, name_length;
/* see pack_expression() for how data is stored */
type= pos[0];
field_nr= uint2korr(pos+1);
expr_length= uint2korr(pos+3);
name_length= pos[5];
pos+= FRM_VCOL_NEW_HEADER_SIZE + name_length;
field_ptr= table->field + field_nr;
}
else
{
/*
see below in ::init_from_binary_frm_image for how data is stored
in versions below 10.2 (that includes 5.7 too)
*/
while (*++field_ptr && !(*field_ptr)->vcol_info) /* no-op */;
if (!*field_ptr)
{
open_table_error(table->s, OPEN_FRM_CORRUPTED, 1);
goto end;
}
type= (*field_ptr)->vcol_info->stored_in_db
? VCOL_GENERATED_STORED : VCOL_GENERATED_VIRTUAL;
expr_length= uint2korr(pos+1);
if (table->s->mysql_version > 50700 && table->s->mysql_version < 100000)
pos+= 4; // MySQL from 5.7
else
pos+= pos[0] == 2 ? 4 : 3; // MariaDB from 5.2 to 10.1
}
expr_str.length(parse_vcol_keyword.length);
expr_str.append((char*)pos, expr_length);
thd->where= vcol_type_name(static_cast<enum_vcol_info_type>(type));
switch (type) {
case VCOL_GENERATED_VIRTUAL:
case VCOL_GENERATED_STORED:
vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
&((*field_ptr)->vcol_info), error_reported);
*(vfield_ptr++)= *field_ptr;
if (vcol && field_ptr[0]->check_vcol_sql_mode_dependency(thd, mode))
{
DBUG_ASSERT(thd->is_error());
*error_reported= true;
goto end;
}
break;
case VCOL_DEFAULT:
vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
&((*field_ptr)->default_value),
error_reported);
*(dfield_ptr++)= *field_ptr;
if (vcol && (vcol->flags & (VCOL_NON_DETERMINISTIC | VCOL_SESSION_FUNC)))
table->s->non_determinstic_insert= true;
break;
case VCOL_CHECK_FIELD:
vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
&((*field_ptr)->check_constraint),
error_reported);
*check_constraint_ptr++= (*field_ptr)->check_constraint;
break;
case VCOL_CHECK_TABLE:
vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
check_constraint_ptr, error_reported);
check_constraint_ptr++;
break;
}
if (!vcol)
goto end;
pos+= expr_length;
}
/* Now, initialize CURRENT_TIMESTAMP fields */
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
Field *field= *field_ptr;
if (field->has_default_now_unireg_check())
{
expr_str.length(parse_vcol_keyword.length);
expr_str.append(STRING_WITH_LEN("current_timestamp("));
expr_str.append_ulonglong(field->decimals());
expr_str.append(')');
vcol= unpack_vcol_info_from_frm(thd, mem_root, table, &expr_str,
&((*field_ptr)->default_value),
error_reported);
*(dfield_ptr++)= *field_ptr;
if (!field->default_value->expr)
goto end;
}
else if (field->has_update_default_function() && !field->default_value)
*(dfield_ptr++)= *field_ptr;
}
if (vfield_ptr)
*vfield_ptr= 0;
if (dfield_ptr)
*dfield_ptr= 0;
if (check_constraint_ptr)
*check_constraint_ptr= 0;
/* Check that expressions aren't referring to not yet initialized fields */
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
Field *field= *field_ptr;
if (check_vcol_forward_refs(field, field->vcol_info) ||
check_vcol_forward_refs(field, field->check_constraint) ||
check_vcol_forward_refs(field, field->default_value))
{
*error_reported= true;
goto end;
}
}
res=0;
end:
thd->restore_active_arena(table->expr_arena, &backup_arena);
thd->stmt_arena= backup_stmt_arena_ptr;
if (save_character_set_client)
thd->update_charset(save_character_set_client, save_collation);
thd->variables.sql_mode= saved_mode;
DBUG_RETURN(res);
} | 1 | [
"CWE-416"
] | server | 4681b6f2d8c82b4ec5cf115e83698251963d80d5 | 318,989,147,833,268,280,000,000,000,000,000,000,000 | 170 | MDEV-26281 ASAN use-after-poison when complex conversion is involved in blob
the bug was that in_vector array in Item_func_in was allocated in the
statement arena, not in the table->expr_arena.
revert part of the 5acd391e8b2d. Instead, change the arena correctly
in fix_all_session_vcol_exprs().
Remove TABLE_ARENA, that was introduced in 5acd391e8b2d to force
item tree changes to be rolled back (because they were allocated in the
wrong arena and didn't persist. now they do) |
static int vhost_scsi_register_configfs(void)
{
struct target_fabric_configfs *fabric;
int ret;
pr_debug("vhost-scsi fabric module %s on %s/%s"
" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
/*
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
if (IS_ERR(fabric)) {
pr_err("target_fabric_configfs_init() failed\n");
return PTR_ERR(fabric);
}
/*
* Setup fabric->tf_ops from our local vhost_scsi_ops
*/
fabric->tf_ops = vhost_scsi_ops;
/*
* Setup default attribute lists for various fabric->tf_cit_tmpl
*/
fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
/*
* Register the fabric for use within TCM
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
pr_err("target_fabric_configfs_register() failed"
" for TCM_VHOST\n");
return ret;
}
/*
* Setup our local pointer to *fabric
*/
vhost_scsi_fabric_configfs = fabric;
pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
return 0;
}; | 0 | [
"CWE-200",
"CWE-119"
] | linux | 59c816c1f24df0204e01851431d3bab3eb76719c | 6,578,595,844,588,284,000,000,000,000,000,000,000 | 48 | vhost/scsi: potential memory corruption
This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt"
to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16.
I looked at the context and it turns out that in
vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into
the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so
anything higher than 255 then it is invalid. I have made that the limit
now.
In vhost_scsi_send_evt() we mask away values higher than 255, but now
that the limit has changed, we don't need the mask.
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Nicholas Bellinger <[email protected]> |
zfs_acl_valid_ace_type(uint_t type, uint_t flags)
{
uint16_t entry_type;
switch (type) {
case ALLOW:
case DENY:
case ACE_SYSTEM_AUDIT_ACE_TYPE:
case ACE_SYSTEM_ALARM_ACE_TYPE:
entry_type = flags & ACE_TYPE_FLAGS;
return (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE || entry_type == 0 ||
entry_type == ACE_IDENTIFIER_GROUP);
default:
if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE)
return (B_TRUE);
}
return (B_FALSE);
} | 0 | [
"CWE-200",
"CWE-732"
] | zfs | 716b53d0a14c72bda16c0872565dd1909757e73f | 306,795,040,800,929,600,000,000,000,000,000,000,000 | 20 | FreeBSD: Fix UNIX permissions checking
Reviewed-by: Ryan Moeller <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Matt Macy <[email protected]>
Closes #10727 |
f_winbufnr(typval_T *argvars, typval_T *rettv)
{
win_T *wp;
wp = find_win_by_nr_or_id(&argvars[0]);
if (wp == NULL)
rettv->vval.v_number = -1;
else
rettv->vval.v_number = wp->w_buffer->b_fnum;
} | 0 | [
"CWE-78"
] | vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 66,856,774,708,242,000,000,000,000,000,000,000,000 | 10 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
unsigned long old)
{
return NULL;
} | 0 | [
"CWE-17"
] | linux | df4d92549f23e1c037e83323aff58a21b3de7fe0 | 153,964,883,034,818,650,000,000,000,000,000,000,000 | 5 | ipv4: try to cache dst_entries which would cause a redirect
Not caching dst_entries which cause redirects could be exploited by hosts
on the same subnet, causing a severe DoS attack. This effect aggravated
since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()").
Lookups causing redirects will be allocated with DST_NOCACHE set which
will force dst_release to free them via RCU. Unfortunately waiting for
RCU grace period just takes too long, we can end up with >1M dst_entries
waiting to be released and the system will run OOM. rcuos threads cannot
catch up under high softirq load.
Attaching the flag to emit a redirect later on to the specific skb allows
us to cache those dst_entries thus reducing the pressure on allocation
and deallocation.
This issue was discovered by Marcelo Leitner.
Cc: Julian Anastasov <[email protected]>
Signed-off-by: Marcelo Leitner <[email protected]>
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Hannes Frederic Sowa <[email protected]>
Signed-off-by: Julian Anastasov <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
size_t *start)
{
size_t offset = i->iov_offset;
const struct iovec *iov = i->iov;
size_t len;
unsigned long addr;
void *p;
int n;
int res;
len = iov->iov_len - offset;
if (len > i->count)
len = i->count;
if (len > maxsize)
len = maxsize;
addr = (unsigned long)iov->iov_base + offset;
len += *start = addr & (PAGE_SIZE - 1);
addr &= ~(PAGE_SIZE - 1);
n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
if (!p)
p = vmalloc(n * sizeof(struct page *));
if (!p)
return -ENOMEM;
res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
if (unlikely(res < 0)) {
kvfree(p);
return res;
}
*pages = p;
return (res == n ? len : res * PAGE_SIZE) - *start;
} | 0 | [
"CWE-17"
] | linux | f0d1bec9d58d4c038d0ac958c9af82be6eb18045 | 159,784,485,532,743,760,000,000,000,000,000,000,000 | 36 | new helper: copy_page_from_iter()
parallel to copy_page_to_iter(). pipe_write() switched to it (and became
->write_iter()).
Signed-off-by: Al Viro <[email protected]> |
static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
write_unlock_irq(&disc_data_lock);
if (!sp)
return;
/*
* We have now ensured that nobody can start using ap from now on, but
* we have to wait for all existing users to finish.
*/
if (!refcount_dec_and_test(&sp->refcnt))
wait_for_completion(&sp->dead);
/* We must stop the queue to avoid potentially scribbling
* on the free buffers. The sp->dead completion is not sufficient
* to protect us from sp->xbuff access.
*/
netif_stop_queue(sp->dev);
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers. */
kfree(sp->rbuff);
kfree(sp->xbuff);
unregister_netdev(sp->dev);
} | 1 | [
"CWE-416"
] | linux | 0b9111922b1f399aba6ed1e1b8f2079c3da1aed8 | 249,805,018,278,643,940,000,000,000,000,000,000,000 | 33 | hamradio: defer 6pack kfree after unregister_netdev
There is a possible race condition (use-after-free) like below
(USE) | (FREE)
dev_queue_xmit |
__dev_queue_xmit |
__dev_xmit_skb |
sch_direct_xmit | ...
xmit_one |
netdev_start_xmit | tty_ldisc_kill
__netdev_start_xmit | 6pack_close
sp_xmit | kfree
sp_encaps |
|
According to the patch "defer ax25 kfree after unregister_netdev", this
patch reorder the kfree after the unregister_netdev to avoid the possible
UAF as the unregister_netdev() is well synchronized and won't return if
there is a running routine.
Signed-off-by: Lin Ma <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static int rdn_name_modify(struct ldb_module *module, struct ldb_request *req)
{
struct ldb_context *ldb;
const struct ldb_val *rdn_val_p;
struct ldb_message_element *e = NULL;
struct ldb_control *recalculate_rdn_control = NULL;
ldb = ldb_module_get_ctx(module);
/* do not manipulate our control entries */
if (ldb_dn_is_special(req->op.mod.message->dn)) {
return ldb_next_request(module, req);
}
recalculate_rdn_control = ldb_request_get_control(req,
LDB_CONTROL_RECALCULATE_RDN_OID);
if (recalculate_rdn_control != NULL) {
struct ldb_message *msg = NULL;
const char *rdn_name = NULL;
struct ldb_val rdn_val;
const struct ldb_schema_attribute *a = NULL;
struct ldb_request *mod_req = NULL;
int ret;
struct ldb_message_element *rdn_del = NULL;
struct ldb_message_element *name_del = NULL;
recalculate_rdn_control->critical = false;
msg = ldb_msg_copy_shallow(req, req->op.mod.message);
if (msg == NULL) {
return ldb_module_oom(module);
}
/*
* The caller must pass a dummy 'name' attribute
* in order to bypass some high level checks.
*
* We just remove it and check nothing is left.
*/
ldb_msg_remove_attr(msg, "name");
if (msg->num_elements != 0) {
return ldb_module_operr(module);
}
rdn_name = ldb_dn_get_rdn_name(msg->dn);
if (rdn_name == NULL) {
return ldb_module_oom(module);
}
a = ldb_schema_attribute_by_name(ldb, rdn_name);
if (a == NULL) {
return ldb_module_operr(module);
}
if (a->name != NULL && strcmp(a->name, "*") != 0) {
rdn_name = a->name;
}
rdn_val_p = ldb_dn_get_rdn_val(msg->dn);
if (rdn_val_p == NULL) {
return ldb_module_oom(module);
}
rdn_val = ldb_val_dup(msg, rdn_val_p);
if (rdn_val.length == 0) {
return ldb_module_oom(module);
}
/*
* This is a bit tricky:
*
* We want _DELETE elements (as "rdn_del" and "name_del" without
* values) first, followed by _ADD (with the real names)
* elements (with values). Then we fix up the "rdn_del" and
* "name_del" attributes.
*/
ret = ldb_msg_add_empty(msg, "rdn_del", LDB_FLAG_MOD_DELETE, NULL);
if (ret != 0) {
return ldb_module_oom(module);
}
ret = ldb_msg_add_empty(msg, rdn_name, LDB_FLAG_MOD_ADD, NULL);
if (ret != 0) {
return ldb_module_oom(module);
}
ret = ldb_msg_add_value(msg, rdn_name, &rdn_val, NULL);
if (ret != 0) {
return ldb_module_oom(module);
}
ret = ldb_msg_add_empty(msg, "name_del", LDB_FLAG_MOD_DELETE, NULL);
if (ret != 0) {
return ldb_module_oom(module);
}
ret = ldb_msg_add_empty(msg, "name", LDB_FLAG_MOD_ADD, NULL);
if (ret != 0) {
return ldb_module_oom(module);
}
ret = ldb_msg_add_value(msg, "name", &rdn_val, NULL);
if (ret != 0) {
return ldb_module_oom(module);
}
rdn_del = ldb_msg_find_element(msg, "rdn_del");
if (rdn_del == NULL) {
return ldb_module_operr(module);
}
rdn_del->name = talloc_strdup(msg->elements, rdn_name);
if (rdn_del->name == NULL) {
return ldb_module_oom(module);
}
name_del = ldb_msg_find_element(msg, "name_del");
if (name_del == NULL) {
return ldb_module_operr(module);
}
name_del->name = talloc_strdup(msg->elements, "name");
if (name_del->name == NULL) {
return ldb_module_oom(module);
}
ret = ldb_build_mod_req(&mod_req, ldb,
req, msg, NULL,
req, rdn_recalculate_callback,
req);
if (ret != LDB_SUCCESS) {
return ldb_module_done(req, NULL, NULL, ret);
}
talloc_steal(mod_req, msg);
ret = ldb_request_add_control(mod_req,
LDB_CONTROL_RECALCULATE_RDN_OID,
false, NULL);
if (ret != LDB_SUCCESS) {
return ldb_module_done(req, NULL, NULL, ret);
}
ret = ldb_request_add_control(mod_req,
LDB_CONTROL_PERMISSIVE_MODIFY_OID,
false, NULL);
if (ret != LDB_SUCCESS) {
return ldb_module_done(req, NULL, NULL, ret);
}
/* go on with the call chain */
return ldb_next_request(module, mod_req);
}
rdn_val_p = ldb_dn_get_rdn_val(req->op.mod.message->dn);
if (rdn_val_p == NULL) {
return LDB_ERR_OPERATIONS_ERROR;
}
if (rdn_val_p->length == 0) {
ldb_asprintf_errstring(ldb, "Empty RDN value on %s not permitted!",
ldb_dn_get_linearized(req->op.mod.message->dn));
return LDB_ERR_INVALID_DN_SYNTAX;
}
e = ldb_msg_find_element(req->op.mod.message, "distinguishedName");
if (e != NULL) {
ldb_asprintf_errstring(ldb, "Modify of 'distinguishedName' on %s not permitted, must use 'rename' operation instead",
ldb_dn_get_linearized(req->op.mod.message->dn));
if (LDB_FLAG_MOD_TYPE(e->flags) == LDB_FLAG_MOD_REPLACE) {
return LDB_ERR_CONSTRAINT_VIOLATION;
} else {
return LDB_ERR_UNWILLING_TO_PERFORM;
}
}
if (ldb_msg_find_element(req->op.mod.message, "name")) {
ldb_asprintf_errstring(ldb, "Modify of 'name' on %s not permitted, must use 'rename' operation instead",
ldb_dn_get_linearized(req->op.mod.message->dn));
return LDB_ERR_NOT_ALLOWED_ON_RDN;
}
if (ldb_msg_find_element(req->op.mod.message, ldb_dn_get_rdn_name(req->op.mod.message->dn))) {
ldb_asprintf_errstring(ldb, "Modify of RDN '%s' on %s not permitted, must use 'rename' operation instead",
ldb_dn_get_rdn_name(req->op.mod.message->dn), ldb_dn_get_linearized(req->op.mod.message->dn));
return LDB_ERR_NOT_ALLOWED_ON_RDN;
}
/* All OK, they kept their fingers out of the special attributes */
return ldb_next_request(module, req);
} | 1 | [
"CWE-200"
] | samba | 0a3aa5f908e351201dc9c4d4807b09ed9eedff77 | 213,884,585,609,644,800,000,000,000,000,000,000,000 | 182 | CVE-2022-32746 ldb: Make use of functions for appending to an ldb_message
This aims to minimise usage of the error-prone pattern of searching for
a just-added message element in order to make modifications to it (and
potentially finding the wrong element).
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15009
Signed-off-by: Joseph Sutton <[email protected]> |
get_display_and_details_for_bus_sender (GdmManager *self,
GDBusConnection *connection,
const char *sender,
GdmDisplay **out_display,
char **out_seat_id,
char **out_session_id,
char **out_tty,
GPid *out_pid,
uid_t *out_uid,
gboolean *out_is_login_screen,
gboolean *out_is_remote)
{
GdmDisplay *display = NULL;
char *session_id = NULL;
GError *error = NULL;
int ret;
GPid pid;
uid_t caller_uid, session_uid;
ret = gdm_dbus_get_pid_for_name (sender, &pid, &error);
if (!ret) {
g_debug ("GdmManager: Error while retrieving pid for sender: %s",
error->message);
g_error_free (error);
goto out;
}
if (out_pid != NULL) {
*out_pid = pid;
}
ret = gdm_dbus_get_uid_for_name (sender, &caller_uid, &error);
if (!ret) {
g_debug ("GdmManager: Error while retrieving uid for sender: %s",
error->message);
g_error_free (error);
goto out;
}
session_id = get_session_id_for_pid (pid, &error);
if (session_id == NULL) {
g_debug ("GdmManager: Error while retrieving session id for sender: %s",
error->message);
g_error_free (error);
goto out;
}
if (out_session_id != NULL) {
*out_session_id = g_strdup (session_id);
}
if (out_is_login_screen != NULL) {
*out_is_login_screen = is_login_session (self, session_id, &error);
if (error != NULL) {
g_debug ("GdmManager: Error while checking if sender is login screen: %s",
error->message);
g_error_free (error);
goto out;
}
}
if (!get_uid_for_session_id (session_id, &session_uid, &error)) {
g_debug ("GdmManager: Error while retrieving uid for session: %s",
error->message);
g_error_free (error);
goto out;
}
if (out_uid != NULL) {
*out_uid = caller_uid;
}
if (caller_uid != session_uid) {
g_debug ("GdmManager: uid for sender and uid for session don't match");
goto out;
}
if (out_seat_id != NULL) {
*out_seat_id = get_seat_id_for_session_id (session_id, &error);
if (error != NULL) {
g_debug ("GdmManager: Error while retrieving seat id for session: %s",
error->message);
g_clear_error (&error);
}
}
if (out_is_remote != NULL) {
*out_is_remote = is_remote_session (self, session_id, &error);
if (error != NULL) {
g_debug ("GdmManager: Error while retrieving remoteness for session: %s",
error->message);
g_clear_error (&error);
}
}
if (out_tty != NULL) {
*out_tty = get_tty_for_session_id (session_id, &error);
if (error != NULL) {
g_debug ("GdmManager: Error while retrieving tty for session: %s",
error->message);
g_clear_error (&error);
}
}
display = gdm_display_store_find (self->priv->display_store,
lookup_by_session_id,
(gpointer) session_id);
if (out_display != NULL) {
*out_display = display;
}
out:
g_free (session_id);
} | 0 | [] | gdm | ff98b2817014684ae1acec78ff06f0f461a56a9f | 293,650,949,844,519,300,000,000,000,000,000,000,000 | 121 | manager: if falling back to X11 retry autologin
Right now, we get one shot to autologin. If it fails, we fall back to
the greeter. We should give it another go if the reason for the failure
was wayland fallback to X.
https://bugzilla.gnome.org/show_bug.cgi?id=780520 |
ChkFileUploadWriteErr(rfbClientPtr cl, rfbTightClientPtr rtcp, char* pBuf)
{
FileTransferMsg ftm;
unsigned long numOfBytesWritten = 0;
memset(&ftm, 0, sizeof(FileTransferMsg));
numOfBytesWritten = write(rtcp->rcft.rcfu.uploadFD, pBuf, rtcp->rcft.rcfu.fSize);
if(numOfBytesWritten != rtcp->rcft.rcfu.fSize) {
char reason[] = "Error writing file data";
int reasonLen = strlen(reason);
ftm = CreateFileUploadErrMsg(reason, reasonLen);
CloseUndoneFileUpload(cl, rtcp);
}
return ftm;
} | 0 | [
"CWE-416"
] | libvncserver | 73cb96fec028a576a5a24417b57723b55854ad7b | 36,172,249,247,487,710,000,000,000,000,000,000,000 | 17 | tightvnc-filetransfer: wait for download thread end in CloseUndoneFileDownload()
...and use it when deregistering the file transfer extension.
Closes #242 |
void opj_j2k_tcp_data_destroy (opj_tcp_t *p_tcp)
{
if (p_tcp->m_data) {
opj_free(p_tcp->m_data);
p_tcp->m_data = NULL;
p_tcp->m_data_size = 0;
}
} | 0 | [] | openjpeg | 0fa5a17c98c4b8f9ee2286f4f0a50cf52a5fccb0 | 283,076,098,776,723,450,000,000,000,000,000,000,000 | 8 | [trunk] Correct potential double free on malloc failure in opj_j2k_copy_default_tcp_and_create_tcp (fixes issue 492) |
callbacks_layer_tree_key_press (GtkWidget *widget, GdkEventKey *event, gpointer user_data) {
/* if space is pressed while a color picker icon is in focus,
show the color picker dialog. */
if(event->keyval == GDK_space){
GtkTreeView *tree;
GtkTreePath *path;
GtkTreeViewColumn *col;
gint *indices;
gint idx;
tree = (GtkTreeView *) screen.win.layerTree;
gtk_tree_view_get_cursor (tree, &path, &col);
if (path) {
indices = gtk_tree_path_get_indices (path);
if (indices) {
idx = callbacks_get_col_num_from_tree_view_col (col);
if ((idx == 1) && (indices[0] <= mainProject->last_loaded)){
callbacks_show_color_picker_dialog (indices[0]);
}
}
gtk_tree_path_free (path);
}
}
/* by default propagate the key press */
return FALSE;
} | 0 | [
"CWE-200"
] | gerbv | 319a8af890e4d0a5c38e6d08f510da8eefc42537 | 10,618,619,373,579,909,000,000,000,000,000,000,000 | 27 | Remove local alias to parameter array
Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402 |
config_tinker(
config_tree *ptree
)
{
attr_val * tinker;
int item;
item = -1; /* quiet warning */
tinker = HEAD_PFIFO(ptree->tinker);
for (; tinker != NULL; tinker = tinker->link) {
switch (tinker->attr) {
default:
NTP_INSIST(0);
break;
case T_Allan:
item = LOOP_ALLAN;
break;
case T_Dispersion:
item = LOOP_PHI;
break;
case T_Freq:
item = LOOP_FREQ;
break;
case T_Huffpuff:
item = LOOP_HUFFPUFF;
break;
case T_Panic:
item = LOOP_PANIC;
break;
case T_Step:
item = LOOP_MAX;
break;
case T_Stepout:
item = LOOP_MINSTEP;
break;
}
loop_config(item, tinker->value.d);
}
} | 0 | [
"CWE-20"
] | ntp | 52e977d79a0c4ace997e5c74af429844da2f27be | 128,592,760,261,371,040,000,000,000,000,000,000,000 | 47 | [Bug 1773] openssl not detected during ./configure.
[Bug 1774] Segfaults if cryptostats enabled and built without OpenSSL. |
k5_asn1_encode_bool(asn1buf *buf, intmax_t val)
{
insert_byte(buf, val ? 0xFF : 0x00);
} | 0 | [
"CWE-674",
"CWE-787"
] | krb5 | 57415dda6cf04e73ffc3723be518eddfae599bfd | 55,775,380,115,599,210,000,000,000,000,000,000,000 | 4 | Add recursion limit for ASN.1 indefinite lengths
The libkrb5 ASN.1 decoder supports BER indefinite lengths. It
computes the tag length using recursion; the lack of a recursion limit
allows an attacker to overrun the stack and cause the process to
crash. Reported by Demi Obenour.
CVE-2020-28196:
In MIT krb5 releases 1.11 and later, an unauthenticated attacker can
cause a denial of service for any client or server to which it can
send an ASN.1-encoded Kerberos message of sufficient length.
ticket: 8959 (new)
tags: pullup
target_version: 1.18-next
target_version: 1.17-next |
R_API void r_anal_extract_rarg(RAnal *anal, RAnalOp *op, RAnalFunction *fcn, int *reg_set, int *count) {
int i, argc = 0;
r_return_if_fail (anal && op && fcn);
const char *opsreg = op->src[0] ? get_regname (anal, op->src[0]) : NULL;
const char *opdreg = op->dst ? get_regname (anal, op->dst) : NULL;
const int size = (fcn->bits ? fcn->bits : anal->bits) / 8;
if (!fcn->cc) {
R_LOG_DEBUG ("No calling convention for function '%s' to extract register arguments\n", fcn->name);
return;
}
char *fname = r_type_func_guess (anal->sdb_types, fcn->name);
Sdb *TDB = anal->sdb_types;
int max_count = r_anal_cc_max_arg (anal, fcn->cc);
if (!max_count || (*count >= max_count)) {
free (fname);
return;
}
if (fname) {
argc = r_type_func_args_count (TDB, fname);
}
bool is_call = (op->type & 0xf) == R_ANAL_OP_TYPE_CALL || (op->type & 0xf) == R_ANAL_OP_TYPE_UCALL;
if (is_call && *count < max_count) {
RList *callee_rargs_l = NULL;
int callee_rargs = 0;
char *callee = NULL;
ut64 offset = op->jump == UT64_MAX ? op->ptr : op->jump;
RAnalFunction *f = r_anal_get_function_at (anal, offset);
if (!f) {
RCore *core = (RCore *)anal->coreb.core;
RFlagItem *flag = r_flag_get_by_spaces (core->flags, offset, R_FLAGS_FS_IMPORTS, NULL);
if (flag) {
callee = r_type_func_guess (TDB, flag->name);
if (callee) {
const char *cc = r_anal_cc_func (anal, callee);
if (cc && !strcmp (fcn->cc, cc)) {
callee_rargs = R_MIN (max_count, r_type_func_args_count (TDB, callee));
}
}
}
} else if (!f->is_variadic && !strcmp (fcn->cc, f->cc)) {
callee = r_type_func_guess (TDB, f->name);
if (callee) {
callee_rargs = R_MIN (max_count, r_type_func_args_count (TDB, callee));
}
callee_rargs = callee_rargs
? callee_rargs
: r_anal_var_count (anal, f, R_ANAL_VAR_KIND_REG, 1);
callee_rargs_l = r_anal_var_list (anal, f, R_ANAL_VAR_KIND_REG);
}
int i;
for (i = 0; i < callee_rargs; i++) {
if (reg_set[i]) {
continue;
}
const char *vname = NULL;
char *type = NULL;
char *name = NULL;
int delta = 0;
const char *regname = r_anal_cc_arg (anal, fcn->cc, i);
RRegItem *ri = r_reg_get (anal->reg, regname, -1);
if (ri) {
delta = ri->index;
}
if (fname) {
type = r_type_func_args_type (TDB, fname, i);
vname = r_type_func_args_name (TDB, fname, i);
}
if (!vname && callee) {
type = r_type_func_args_type (TDB, callee, i);
vname = r_type_func_args_name (TDB, callee, i);
}
if (vname) {
reg_set[i] = 1;
} else {
RListIter *it;
RAnalVar *arg, *found_arg = NULL;
r_list_foreach (callee_rargs_l, it, arg) {
if (r_anal_var_get_argnum (arg) == i) {
found_arg = arg;
break;
}
}
if (found_arg) {
type = strdup (found_arg->type);
vname = name = strdup (found_arg->name);
}
}
if (!vname) {
name = r_str_newf ("arg%u", (int)i + 1);
vname = name;
}
r_anal_function_set_var (fcn, delta, R_ANAL_VAR_KIND_REG, type, size, true, vname);
(*count)++;
free (name);
free (type);
}
free (callee);
r_list_free (callee_rargs_l);
free (fname);
return;
}
for (i = 0; i < max_count; i++) {
const char *regname = r_anal_cc_arg (anal, fcn->cc, i);
if (regname) {
int delta = 0;
RRegItem *ri = NULL;
RAnalVar *var = NULL;
bool is_used_like_an_arg = is_used_like_arg (regname, opsreg, opdreg, op, anal);
if (reg_set[i] != 2 && is_used_like_an_arg) {
ri = r_reg_get (anal->reg, regname, -1);
if (ri) {
delta = ri->index;
}
}
if (reg_set[i] == 1 && is_used_like_an_arg) {
var = r_anal_function_get_var (fcn, R_ANAL_VAR_KIND_REG, delta);
} else if (reg_set[i] != 2 && is_used_like_an_arg) {
const char *vname = NULL;
char *type = NULL;
char *name = NULL;
if ((i < argc) && fname) {
type = r_type_func_args_type (TDB, fname, i);
vname = r_type_func_args_name (TDB, fname, i);
}
if (!vname) {
name = r_str_newf ("arg%d", i + 1);
vname = name;
}
var = r_anal_function_set_var (fcn, delta, R_ANAL_VAR_KIND_REG, type, size, true, vname);
free (name);
free (type);
(*count)++;
} else {
if (is_reg_in_src (regname, anal, op) || STR_EQUAL (opdreg, regname)) {
reg_set[i] = 2;
}
continue;
}
if (is_reg_in_src (regname, anal, op) || STR_EQUAL (regname, opdreg)) {
reg_set[i] = 1;
}
if (var) {
r_anal_var_set_access (var, var->regname, op->addr, R_ANAL_VAR_ACCESS_TYPE_READ, 0);
r_meta_set_string (anal, R_META_TYPE_VARTYPE, op->addr, var->name);
}
}
}
const char *selfreg = r_anal_cc_self (anal, fcn->cc);
if (selfreg) {
bool is_used_like_an_arg = is_used_like_arg (selfreg, opsreg, opdreg, op, anal);
if (reg_set[i] != 2 && is_used_like_an_arg) {
int delta = 0;
char *vname = strdup ("self");
RRegItem *ri = r_reg_get (anal->reg, selfreg, -1);
if (ri) {
delta = ri->index;
}
RAnalVar *newvar = r_anal_function_set_var (fcn, delta, R_ANAL_VAR_KIND_REG, 0, size, true, vname);
if (newvar) {
r_anal_var_set_access (newvar, newvar->regname, op->addr, R_ANAL_VAR_ACCESS_TYPE_READ, 0);
}
r_meta_set_string (anal, R_META_TYPE_VARTYPE, op->addr, vname);
free (vname);
(*count)++;
} else {
if (is_reg_in_src (selfreg, anal, op) || STR_EQUAL (opdreg, selfreg)) {
reg_set[i] = 2;
}
}
i++;
}
const char *errorreg = r_anal_cc_error (anal, fcn->cc);
if (errorreg) {
if (reg_set[i] == 0 && STR_EQUAL (opdreg, errorreg)) {
int delta = 0;
char *vname = strdup ("error");
RRegItem *ri = r_reg_get (anal->reg, errorreg, -1);
if (ri) {
delta = ri->index;
}
RAnalVar *newvar = r_anal_function_set_var (fcn, delta, R_ANAL_VAR_KIND_REG, 0, size, true, vname);
if (newvar) {
r_anal_var_set_access (newvar, newvar->regname, op->addr, R_ANAL_VAR_ACCESS_TYPE_READ, 0);
}
r_meta_set_string (anal, R_META_TYPE_VARTYPE, op->addr, vname);
free (vname);
(*count)++;
reg_set[i] = 2;
}
}
free (fname);
} | 0 | [
"CWE-416"
] | radare2 | a7ce29647fcb38386d7439696375e16e093d6acb | 271,019,581,937,604,200,000,000,000,000,000,000,000 | 196 | Fix UAF in aaaa on arm/thumb switching ##crash
* Reported by @peacock-doris via huntr.dev
* Reproducer tests_65185
* This is a logic fix, but not the fully safe as changes in the code
can result on UAF again, to properly protect r2 from crashing we
need to break the ABI and add refcounting to RRegItem, which can't
happen in 5.6.x because of abi-compat rules |
ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags)
{
ssize_t res;
struct sock *sk = sock->sk;
if (!(sk->sk_route_caps & NETIF_F_SG) ||
!(sk->sk_route_caps & NETIF_F_ALL_CSUM))
return sock_no_sendpage(sock, page, offset, size, flags);
lock_sock(sk);
TCP_CHECK_TIMER(sk);
res = do_tcp_sendpages(sk, &page, offset, size, flags);
TCP_CHECK_TIMER(sk);
release_sock(sk);
return res;
} | 0 | [
"CWE-400",
"CWE-119",
"CWE-703"
] | linux | baff42ab1494528907bf4d5870359e31711746ae | 29,005,486,307,753,690,000,000,000,000,000,000,000 | 17 | net: Fix oops from tcp_collapse() when using splice()
tcp_read_sock() can have a eat skbs without immediately advancing copied_seq.
This can cause a panic in tcp_collapse() if it is called as a result
of the recv_actor dropping the socket lock.
A userspace program that splices data from a socket to either another
socket or to a file can trigger this bug.
Signed-off-by: Steven J. Magnani <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
char *findFill(node_t * n)
{
return (findFillDflt(n, DEFAULT_FILL));
} | 0 | [
"CWE-120"
] | graphviz | 784411ca3655c80da0f6025ab20634b2a6ff696b | 234,947,337,491,924,400,000,000,000,000,000,000,000 | 4 | fix: out-of-bounds write on invalid label
When the label for a node cannot be parsed (due to it being malformed), it falls
back on the symbol name of the node itself. I.e. the default label the node
would have had if it had no label attribute at all. However, this is applied by
dynamically altering the node's label to "\N", a shortcut for the symbol name of
the node. All of this is fine, however if the hand written label itself is
shorter than the literal string "\N", not enough memory would have been
allocated to write "\N" into the label text.
Here we account for the possibility of error during label parsing, and assume
that the label text may need to be overwritten with "\N" after the fact. Fixes
issue #1700. |
void session_set_recv_message_notify(Session *session,
RecvMsgNotify notify_func, gpointer data)
{
session->recv_msg_notify = notify_func;
session->recv_msg_notify_data = data;
} | 0 | [] | claws | fcc25329049b6f9bd8d890f1197ed61eb12e14d5 | 259,535,937,661,205,620,000,000,000,000,000,000,000 | 6 | fix STARTTLS protocol violation
with thanks to, and patch by Damian Poddebniak and Hanno Böck |
get_seckey_byname (PKT_public_key *pk, const char *name)
{
gpg_error_t err;
strlist_t namelist = NULL;
int include_unusable = 1;
/* If we have no name, try to use the default secret key. If we
have no default, we'll use the first usable one. */
if (!name && opt.def_secret_key && *opt.def_secret_key)
add_to_strlist (&namelist, opt.def_secret_key);
else if (name)
add_to_strlist (&namelist, name);
else
include_unusable = 0;
err = key_byname (NULL, namelist, pk, 1, include_unusable, NULL, NULL);
free_strlist (namelist);
return err;
} | 0 | [
"CWE-310"
] | gnupg | 4bde12206c5bf199dc6e12a74af8da4558ba41bf | 142,549,320,884,069,300,000,000,000,000,000,000,000 | 22 | gpg: Distinguish between missing and cleared key flags.
* include/cipher.h (PUBKEY_USAGE_NONE): New.
* g10/getkey.c (parse_key_usage): Set new flag.
--
We do not want to use the default capabilities (derived from the
algorithm) if any key flags are given in a signature. Thus if key
flags are used in any way, the default key capabilities are never
used.
This allows to create a key with key flags set to all zero so it can't
be used. This better reflects common sense. |
ZEND_API int zend_ts_hash_minmax(TsHashTable *ht, compare_func_t compar, int flag, void **pData TSRMLS_DC)
{
int retval;
begin_read(ht);
retval = zend_hash_minmax(TS_HASH(ht), compar, flag, pData TSRMLS_CC);
end_read(ht);
return retval;
} | 0 | [] | php-src | 24125f0f26f3787c006e4a51611ba33ee3b841cb | 202,238,979,758,836,780,000,000,000,000,000,000,000 | 10 | Fixed bug #68676 (Explicit Double Free) |
static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
{
ZSTD_matchState_t* const ms = &zc->blockState.matchState;
{ U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
}
if (zc->appliedParams.cParams.strategy != ZSTD_fast) {
U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog;
if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2)
ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
else
ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
}
if (ms->hashLog3) {
U32 const h3Size = (U32)1 << ms->hashLog3;
ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
}
} | 0 | [
"CWE-362"
] | zstd | 3e5cdf1b6a85843e991d7d10f6a2567c15580da0 | 170,446,796,246,225,460,000,000,000,000,000,000,000 | 20 | fixed T36302429 |
kill_threads_callback_arg(THD *thd_arg, LEX_USER *user_arg):
thd(thd_arg), user(user_arg) {} | 0 | [
"CWE-703"
] | server | 39feab3cd31b5414aa9b428eaba915c251ac34a2 | 263,662,701,829,637,800,000,000,000,000,000,000,000 | 2 | MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]> |
theme_adium_clear (EmpathyChatView *view)
{
EmpathyThemeAdiumPriv *priv = GET_PRIV (view);
theme_adium_load_template (EMPATHY_THEME_ADIUM (view));
/* Clear last contact to avoid trying to add a 'joined'
* message when we don't have an insertion point. */
if (priv->last_contact) {
g_object_unref (priv->last_contact);
priv->last_contact = NULL;
}
} | 0 | [
"CWE-79"
] | empathy | 739aca418457de752be13721218aaebc74bd9d36 | 178,398,110,314,333,700,000,000,000,000,000,000,000 | 13 | theme_adium_append_message: escape alias before displaying it
Not doing so can lead to nasty HTML injection from hostile users.
https://bugzilla.gnome.org/show_bug.cgi?id=662035 |
static inline unsigned short ScaleQuantumToShort(const Quantum quantum)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
return((unsigned short) (257UL*quantum));
#else
if (quantum <= 0.0)
return(0);
if ((257.0*quantum) >= 65535.0)
return(65535);
return((unsigned short) (257.0*quantum+0.5));
#endif
} | 1 | [
"CWE-190"
] | ImageMagick | 95d4e94e0353e503b71a53f5e6fad173c7c70c90 | 131,056,311,891,151,100,000,000,000,000,000,000,000 | 12 | https://github.com/ImageMagick/ImageMagick/issues/1751 |
xmlXPathCacheNewFloat(xmlXPathContextPtr ctxt, double val)
{
if ((ctxt != NULL) && (ctxt->cache)) {
xmlXPathContextCachePtr cache = (xmlXPathContextCachePtr) ctxt->cache;
if ((cache->numberObjs != NULL) &&
(cache->numberObjs->number != 0))
{
xmlXPathObjectPtr ret;
ret = (xmlXPathObjectPtr)
cache->numberObjs->items[--cache->numberObjs->number];
ret->type = XPATH_NUMBER;
ret->floatval = val;
#ifdef XP_DEBUG_OBJ_USAGE
xmlXPathDebugObjUsageRequested(ctxt, XPATH_NUMBER);
#endif
return(ret);
} else if ((cache->miscObjs != NULL) &&
(cache->miscObjs->number != 0))
{
xmlXPathObjectPtr ret;
ret = (xmlXPathObjectPtr)
cache->miscObjs->items[--cache->miscObjs->number];
ret->type = XPATH_NUMBER;
ret->floatval = val;
#ifdef XP_DEBUG_OBJ_USAGE
xmlXPathDebugObjUsageRequested(ctxt, XPATH_NUMBER);
#endif
return(ret);
}
}
return(xmlXPathNewFloat(val));
} | 0 | [
"CWE-119"
] | libxml2 | 91d19754d46acd4a639a8b9e31f50f31c78f8c9c | 165,548,715,694,111,780,000,000,000,000,000,000,000 | 36 | Fix the semantic of XPath axis for namespace/attribute context nodes
The processing of namespace and attributes nodes was not compliant
to the XPath-1.0 specification |
QueryParams::size_type QueryParams::paramcount(const std::string& name) const
{
size_type count = 0;
for (size_type nn = 0; nn < _values.size(); ++nn)
if (_values[nn].name == name)
++count;
return count;
} | 0 | [
"CWE-399"
] | cxxtools | 142bb2589dc184709857c08c1e10570947c444e3 | 146,546,586,358,244,790,000,000,000,000,000,000,000 | 10 | fix parsing double % in query parameters |
int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
{
int rc;
dn->parent = parent;
rc = of_attach_node(dn);
if (rc) {
printk(KERN_ERR "Failed to add device node %pOF\n", dn);
return rc;
}
return 0;
} | 0 | [
"CWE-476"
] | linux | efa9ace68e487ddd29c2b4d6dd23242158f1f607 | 11,661,262,431,224,115,000,000,000,000,000,000,000 | 14 | powerpc/pseries/dlpar: Fix a missing check in dlpar_parse_cc_property()
In dlpar_parse_cc_property(), 'prop->name' is allocated by kstrdup().
kstrdup() may return NULL, so it should be checked and handle error.
And prop should be freed if 'prop->name' is NULL.
Signed-off-by: Gen Zhang <[email protected]>
Signed-off-by: Michael Ellerman <[email protected]> |
plpgsql_validator(PG_FUNCTION_ARGS)
{
Oid funcoid = PG_GETARG_OID(0);
HeapTuple tuple;
Form_pg_proc proc;
char functyptype;
int numargs;
Oid *argtypes;
char **argnames;
char *argmodes;
bool is_dml_trigger = false;
bool is_event_trigger = false;
int i;
/* Get the new function's pg_proc entry */
tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcoid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for function %u", funcoid);
proc = (Form_pg_proc) GETSTRUCT(tuple);
functyptype = get_typtype(proc->prorettype);
/* Disallow pseudotype result */
/* except for TRIGGER, RECORD, VOID, or polymorphic */
if (functyptype == TYPTYPE_PSEUDO)
{
/* we assume OPAQUE with no arguments means a trigger */
if (proc->prorettype == TRIGGEROID ||
(proc->prorettype == OPAQUEOID && proc->pronargs == 0))
is_dml_trigger = true;
else if (proc->prorettype == EVTTRIGGEROID)
is_event_trigger = true;
else if (proc->prorettype != RECORDOID &&
proc->prorettype != VOIDOID &&
!IsPolymorphicType(proc->prorettype))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("PL/pgSQL functions cannot return type %s",
format_type_be(proc->prorettype))));
}
/* Disallow pseudotypes in arguments (either IN or OUT) */
/* except for polymorphic */
numargs = get_func_arg_info(tuple,
&argtypes, &argnames, &argmodes);
for (i = 0; i < numargs; i++)
{
if (get_typtype(argtypes[i]) == TYPTYPE_PSEUDO)
{
if (!IsPolymorphicType(argtypes[i]))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("PL/pgSQL functions cannot accept type %s",
format_type_be(argtypes[i]))));
}
}
/* Postpone body checks if !check_function_bodies */
if (check_function_bodies)
{
FunctionCallInfoData fake_fcinfo;
FmgrInfo flinfo;
int rc;
TriggerData trigdata;
EventTriggerData etrigdata;
/*
* Connect to SPI manager (is this needed for compilation?)
*/
if ((rc = SPI_connect()) != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed: %s", SPI_result_code_string(rc));
/*
* Set up a fake fcinfo with just enough info to satisfy
* plpgsql_compile().
*/
MemSet(&fake_fcinfo, 0, sizeof(fake_fcinfo));
MemSet(&flinfo, 0, sizeof(flinfo));
fake_fcinfo.flinfo = &flinfo;
flinfo.fn_oid = funcoid;
flinfo.fn_mcxt = CurrentMemoryContext;
if (is_dml_trigger)
{
MemSet(&trigdata, 0, sizeof(trigdata));
trigdata.type = T_TriggerData;
fake_fcinfo.context = (Node *) &trigdata;
}
else if (is_event_trigger)
{
MemSet(&etrigdata, 0, sizeof(etrigdata));
etrigdata.type = T_EventTriggerData;
fake_fcinfo.context = (Node *) &etrigdata;
}
/* Test-compile the function */
plpgsql_compile(&fake_fcinfo, true);
/*
* Disconnect from SPI manager
*/
if ((rc = SPI_finish()) != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed: %s", SPI_result_code_string(rc));
}
ReleaseSysCache(tuple);
PG_RETURN_VOID();
} | 1 | [
"CWE-264"
] | postgres | 537cbd35c893e67a63c59bc636c3e888bd228bc7 | 330,594,102,185,345,200,000,000,000,000,000,000,000 | 108 | Prevent privilege escalation in explicit calls to PL validators.
The primary role of PL validators is to be called implicitly during
CREATE FUNCTION, but they are also normal functions that a user can call
explicitly. Add a permissions check to each validator to ensure that a
user cannot use explicit validator calls to achieve things he could not
otherwise achieve. Back-patch to 8.4 (all supported versions).
Non-core procedural language extensions ought to make the same two-line
change to their own validators.
Andres Freund, reviewed by Tom Lane and Noah Misch.
Security: CVE-2014-0061 |
static int vrend_draw_bind_samplers_shader(struct vrend_context *ctx,
int shader_type,
int next_sampler_id)
{
int index = 0;
uint32_t dirty = ctx->sub->sampler_views_dirty[shader_type];
uint32_t mask = ctx->sub->prog->samplers_used_mask[shader_type];
while (mask) {
int i = u_bit_scan(&mask);
struct vrend_sampler_view *tview = ctx->sub->views[shader_type].views[i];
if (dirty & (1 << i) && tview) {
if (ctx->sub->prog->shadow_samp_mask[shader_type] & (1 << i)) {
glUniform4f(ctx->sub->prog->shadow_samp_mask_locs[shader_type][index],
(tview->gl_swizzle_r == GL_ZERO || tview->gl_swizzle_r == GL_ONE) ? 0.0 : 1.0,
(tview->gl_swizzle_g == GL_ZERO || tview->gl_swizzle_g == GL_ONE) ? 0.0 : 1.0,
(tview->gl_swizzle_b == GL_ZERO || tview->gl_swizzle_b == GL_ONE) ? 0.0 : 1.0,
(tview->gl_swizzle_a == GL_ZERO || tview->gl_swizzle_a == GL_ONE) ? 0.0 : 1.0);
glUniform4f(ctx->sub->prog->shadow_samp_add_locs[shader_type][index],
tview->gl_swizzle_r == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_g == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_b == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_a == GL_ONE ? 1.0 : 0.0);
}
if (tview->texture) {
GLuint id;
struct vrend_resource *texture = tview->texture;
GLenum target = tview->target;
debug_texture(__func__, tview->texture);
if (texture->storage == VREND_RESOURCE_STORAGE_BUFFER) {
id = texture->tbo_tex_id;
target = GL_TEXTURE_BUFFER;
} else
id = tview->id;
glActiveTexture(GL_TEXTURE0 + next_sampler_id);
glBindTexture(target, id);
if (ctx->sub->views[shader_type].old_ids[i] != id ||
ctx->sub->sampler_views_dirty[shader_type] & (1 << i)) {
vrend_apply_sampler_state(ctx, texture, shader_type, i,
next_sampler_id, tview);
ctx->sub->views[shader_type].old_ids[i] = id;
}
dirty &= ~(1 << i);
}
}
next_sampler_id++;
index++;
}
ctx->sub->sampler_views_dirty[shader_type] = dirty;
return next_sampler_id;
} | 0 | [
"CWE-787"
] | virglrenderer | cbc8d8b75be360236cada63784046688aeb6d921 | 65,172,788,959,294,640,000,000,000,000,000,000,000 | 59 | vrend: check transfer bounds for negative values too and report error
Closes #138
Signed-off-by: Gert Wollny <[email protected]>
Reviewed-by: Emil Velikov <[email protected]> |
line_construct_pts(LINE *line, Point *pt1, Point *pt2)
{
if (FPeq(pt1->x, pt2->x))
{ /* vertical */
/* use "x = C" */
line->A = -1;
line->B = 0;
line->C = pt1->x;
#ifdef GEODEBUG
printf("line_construct_pts- line is vertical\n");
#endif
}
else if (FPeq(pt1->y, pt2->y))
{ /* horizontal */
/* use "y = C" */
line->A = 0;
line->B = -1;
line->C = pt1->y;
#ifdef GEODEBUG
printf("line_construct_pts- line is horizontal\n");
#endif
}
else
{
/* use "mx - y + yinter = 0" */
line->A = (pt2->y - pt1->y) / (pt2->x - pt1->x);
line->B = -1.0;
line->C = pt1->y - line->A * pt1->x;
/* on some platforms, the preceding expression tends to produce -0 */
if (line->C == 0.0)
line->C = 0.0;
#ifdef GEODEBUG
printf("line_construct_pts- line is neither vertical nor horizontal (diffs x=%.*g, y=%.*g\n",
DBL_DIG, (pt2->x - pt1->x), DBL_DIG, (pt2->y - pt1->y));
#endif
}
} | 0 | [
"CWE-703",
"CWE-189"
] | postgres | 31400a673325147e1205326008e32135a78b4d8a | 197,448,199,279,544,660,000,000,000,000,000,000,000 | 37 | Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064 |
get_buffer_info(buf_T *buf)
{
dict_T *dict;
tabpage_T *tp;
win_T *wp;
list_T *windows;
dict = dict_alloc();
if (dict == NULL)
return NULL;
dict_add_number(dict, "bufnr", buf->b_fnum);
dict_add_string(dict, "name", buf->b_ffname);
dict_add_number(dict, "lnum", buf == curbuf ? curwin->w_cursor.lnum
: buflist_findlnum(buf));
dict_add_number(dict, "loaded", buf->b_ml.ml_mfp != NULL);
dict_add_number(dict, "listed", buf->b_p_bl);
dict_add_number(dict, "changed", bufIsChanged(buf));
dict_add_number(dict, "changedtick", CHANGEDTICK(buf));
dict_add_number(dict, "hidden",
buf->b_ml.ml_mfp != NULL && buf->b_nwindows == 0);
/* Get a reference to buffer variables */
dict_add_dict(dict, "variables", buf->b_vars);
/* List of windows displaying this buffer */
windows = list_alloc();
if (windows != NULL)
{
FOR_ALL_TAB_WINDOWS(tp, wp)
if (wp->w_buffer == buf)
list_append_number(windows, (varnumber_T)wp->w_id);
dict_add_list(dict, "windows", windows);
}
#ifdef FEAT_SIGNS
if (buf->b_signlist != NULL)
{
/* List of signs placed in this buffer */
list_T *signs = list_alloc();
if (signs != NULL)
{
get_buffer_signs(buf, signs);
dict_add_list(dict, "signs", signs);
}
}
#endif
return dict;
} | 0 | [
"CWE-78"
] | vim | 8c62a08faf89663e5633dc5036cd8695c80f1075 | 39,203,504,379,705,645,000,000,000,000,000,000,000 | 50 | patch 8.1.0881: can execute shell commands in rvim through interfaces
Problem: Can execute shell commands in rvim through interfaces.
Solution: Disable using interfaces in restricted mode. Allow for writing
file with writefile(), histadd() and a few others. |
TEST_F(RouterTest, UpstreamPerTryTimeout) {
NiceMock<Http::MockRequestEncoder> encoder;
Http::ResponseDecoder* response_decoder = nullptr;
EXPECT_CALL(cm_.thread_local_cluster_.conn_pool_, newStream(_, _))
.WillOnce(Invoke(
[&](Http::ResponseDecoder& decoder,
Http::ConnectionPool::Callbacks& callbacks) -> Http::ConnectionPool::Cancellable* {
response_decoder = &decoder;
callbacks.onPoolReady(encoder, cm_.thread_local_cluster_.conn_pool_.host_,
upstream_stream_info_, Http::Protocol::Http10);
return nullptr;
}));
Http::TestRequestHeaderMapImpl headers{{"x-envoy-internal", "true"},
{"x-envoy-upstream-rq-per-try-timeout-ms", "5"}};
HttpTestUtility::addDefaultHeaders(headers);
router_.decodeHeaders(headers, false);
// We verify that both timeouts are started after decodeData(_, true) is called. This
// verifies that we are not starting the initial per try timeout on the first onPoolReady.
expectPerTryTimerCreate();
expectResponseTimerCreate();
Buffer::OwnedImpl data;
router_.decodeData(data, true);
EXPECT_EQ(1U,
callbacks_.route_->route_entry_.virtual_cluster_.stats().upstream_rq_total_.value());
EXPECT_CALL(callbacks_.stream_info_,
setResponseFlag(StreamInfo::ResponseFlag::UpstreamRequestTimeout));
EXPECT_CALL(encoder.stream_, resetStream(Http::StreamResetReason::LocalReset));
Http::TestResponseHeaderMapImpl response_headers{
{":status", "504"}, {"content-length", "24"}, {"content-type", "text/plain"}};
EXPECT_CALL(callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false));
EXPECT_CALL(callbacks_, encodeData(_, true));
EXPECT_CALL(
cm_.thread_local_cluster_.conn_pool_.host_->outlier_detector_,
putResult(Upstream::Outlier::Result::LocalOriginTimeout, absl::optional<uint64_t>(504)));
per_try_timeout_->invokeCallback();
EXPECT_EQ(1U, cm_.thread_local_cluster_.cluster_.info_->stats_store_
.counter("upstream_rq_per_try_timeout")
.value());
EXPECT_EQ(1UL, cm_.thread_local_cluster_.conn_pool_.host_->stats().rq_timeout_.value());
EXPECT_TRUE(verifyHostUpstreamStats(0, 1));
} | 0 | [
"CWE-703"
] | envoy | 5bf9b0f1e7f247a4eee7180849cb0823926f7fff | 137,612,961,112,371,090,000,000,000,000,000,000,000 | 45 | [1.21] CVE-2022-21655
Signed-off-by: Otto van der Schaaf <[email protected]> |
QPDFNameTreeObjectHelper::updateMap(QPDFObjectHandle oh)
{
if (this->m->seen.count(oh.getObjGen()))
{
return;
}
this->m->seen.insert(oh.getObjGen());
QPDFObjectHandle names = oh.getKey("/Names");
if (names.isArray())
{
size_t nitems = names.getArrayNItems();
size_t i = 0;
while (i < nitems - 1)
{
QPDFObjectHandle name = names.getArrayItem(i);
if (name.isString())
{
++i;
QPDFObjectHandle obj = names.getArrayItem(i);
this->m->entries[name.getUTF8Value()] = obj;
}
++i;
}
}
QPDFObjectHandle kids = oh.getKey("/Kids");
if (kids.isArray())
{
size_t nitems = kids.getArrayNItems();
for (size_t i = 0; i < nitems; ++i)
{
updateMap(kids.getArrayItem(i));
}
}
} | 1 | [
"CWE-787"
] | qpdf | d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e | 87,623,462,360,968,880,000,000,000,000,000,000,000 | 34 | Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition. |
int ssl_check_clienthello_tlsext(SSL *s)
{
int ret=SSL_TLSEXT_ERR_NOACK;
int al = SSL_AD_UNRECOGNIZED_NAME;
#ifndef OPENSSL_NO_EC
/* The handling of the ECPointFormats extension is done elsewhere, namely in
* ssl3_choose_cipher in s3_lib.c.
*/
/* The handling of the EllipticCurves extension is done elsewhere, namely in
* ssl3_choose_cipher in s3_lib.c.
*/
#endif
if (s->ctx != NULL && s->ctx->tlsext_servername_callback != 0)
ret = s->ctx->tlsext_servername_callback(s, &al, s->ctx->tlsext_servername_arg);
else if (s->initial_ctx != NULL && s->initial_ctx->tlsext_servername_callback != 0)
ret = s->initial_ctx->tlsext_servername_callback(s, &al, s->initial_ctx->tlsext_servername_arg);
/* If status request then ask callback what to do.
* Note: this must be called after servername callbacks in case
* the certificate has changed.
*/
if ((s->tlsext_status_type != -1) && s->ctx && s->ctx->tlsext_status_cb)
{
int r;
r = s->ctx->tlsext_status_cb(s, s->ctx->tlsext_status_arg);
switch (r)
{
/* We don't want to send a status request response */
case SSL_TLSEXT_ERR_NOACK:
s->tlsext_status_expected = 0;
break;
/* status request response should be sent */
case SSL_TLSEXT_ERR_OK:
if (s->tlsext_ocsp_resp)
s->tlsext_status_expected = 1;
else
s->tlsext_status_expected = 0;
break;
/* something bad happened */
case SSL_TLSEXT_ERR_ALERT_FATAL:
ret = SSL_TLSEXT_ERR_ALERT_FATAL;
al = SSL_AD_INTERNAL_ERROR;
goto err;
}
}
else
s->tlsext_status_expected = 0;
#ifdef TLSEXT_TYPE_opaque_prf_input
{
/* This sort of belongs into ssl_prepare_serverhello_tlsext(),
* but we might be sending an alert in response to the client hello,
* so this has to happen here in ssl_check_clienthello_tlsext(). */
int r = 1;
if (s->ctx->tlsext_opaque_prf_input_callback != 0)
{
r = s->ctx->tlsext_opaque_prf_input_callback(s, NULL, 0, s->ctx->tlsext_opaque_prf_input_callback_arg);
if (!r)
{
ret = SSL_TLSEXT_ERR_ALERT_FATAL;
al = SSL_AD_INTERNAL_ERROR;
goto err;
}
}
if (s->s3->server_opaque_prf_input != NULL) /* shouldn't really happen */
OPENSSL_free(s->s3->server_opaque_prf_input);
s->s3->server_opaque_prf_input = NULL;
if (s->tlsext_opaque_prf_input != NULL)
{
if (s->s3->client_opaque_prf_input != NULL &&
s->s3->client_opaque_prf_input_len == s->tlsext_opaque_prf_input_len)
{
/* can only use this extension if we have a server opaque PRF input
* of the same length as the client opaque PRF input! */
if (s->tlsext_opaque_prf_input_len == 0)
s->s3->server_opaque_prf_input = OPENSSL_malloc(1); /* dummy byte just to get non-NULL */
else
s->s3->server_opaque_prf_input = BUF_memdup(s->tlsext_opaque_prf_input, s->tlsext_opaque_prf_input_len);
if (s->s3->server_opaque_prf_input == NULL)
{
ret = SSL_TLSEXT_ERR_ALERT_FATAL;
al = SSL_AD_INTERNAL_ERROR;
goto err;
}
s->s3->server_opaque_prf_input_len = s->tlsext_opaque_prf_input_len;
}
}
if (r == 2 && s->s3->server_opaque_prf_input == NULL)
{
/* The callback wants to enforce use of the extension,
* but we can't do that with the client opaque PRF input;
* abort the handshake.
*/
ret = SSL_TLSEXT_ERR_ALERT_FATAL;
al = SSL_AD_HANDSHAKE_FAILURE;
}
}
#endif
err:
switch (ret)
{
case SSL_TLSEXT_ERR_ALERT_FATAL:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
return -1;
case SSL_TLSEXT_ERR_ALERT_WARNING:
ssl3_send_alert(s,SSL3_AL_WARNING,al);
return 1;
case SSL_TLSEXT_ERR_NOACK:
s->servername_done=0;
default:
return 1;
}
} | 0 | [] | openssl | ee2ffc279417f15fef3b1073c7dc81a908991516 | 152,076,046,890,099,000,000,000,000,000,000,000,000 | 124 | Add Next Protocol Negotiation. |
static void lo_fsyncdir(fuse_req_t req, fuse_ino_t ino, int datasync,
struct fuse_file_info *fi)
{
int res;
struct lo_dirp *d;
int fd;
(void)ino;
d = lo_dirp(req, fi);
if (!d) {
fuse_reply_err(req, EBADF);
return;
}
fd = dirfd(d->dp);
if (datasync) {
res = fdatasync(fd);
} else {
res = fsync(fd);
}
lo_dirp_put(&d);
fuse_reply_err(req, res == -1 ? errno : 0);
} | 0 | [] | qemu | 6084633dff3a05d63176e06d7012c7e15aba15be | 246,144,895,411,297,730,000,000,000,000,000,000,000 | 26 | tools/virtiofsd: xattr name mappings: Add option
Add an option to define mappings of xattr names so that
the client and server filesystems see different views.
This can be used to have different SELinux mappings as
seen by the guest, to run the virtiofsd with less privileges
(e.g. in a case where it can't set trusted/system/security
xattrs but you want the guest to be able to), or to isolate
multiple users of the same name; e.g. trusted attributes
used by stacking overlayfs.
A mapping engine is used with 3 simple rules; the rules can
be combined to allow most useful mapping scenarios.
The ruleset is defined by -o xattrmap='rules...'.
This patch doesn't use the rule maps yet.
Signed-off-by: Dr. David Alan Gilbert <[email protected]>
Message-Id: <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Signed-off-by: Dr. David Alan Gilbert <[email protected]> |
krb5_ticket_get_endtime(krb5_context context,
const krb5_ticket *ticket)
{
return ticket->ticket.endtime;
} | 0 | [
"CWE-345"
] | heimdal | 6dd3eb836bbb80a00ffced4ad57077a1cdf227ea | 46,566,824,111,172,470,000,000,000,000,000,000,000 | 5 | CVE-2017-11103: Orpheus' Lyre KDC-REP service name validation
In _krb5_extract_ticket() the KDC-REP service name must be obtained from
encrypted version stored in 'enc_part' instead of the unencrypted version
stored in 'ticket'. Use of the unecrypted version provides an
opportunity for successful server impersonation and other attacks.
Identified by Jeffrey Altman, Viktor Duchovni and Nico Williams.
Change-Id: I45ef61e8a46e0f6588d64b5bd572a24c7432547c |
zone_detachdb(dns_zone_t *zone) {
REQUIRE(zone->db != NULL);
dns_db_detach(&zone->db);
} | 0 | [
"CWE-327"
] | bind9 | f09352d20a9d360e50683cd1d2fc52ccedcd77a0 | 212,707,107,205,259,970,000,000,000,000,000,000,000 | 5 | Update keyfetch_done compute_tag check
If in keyfetch_done the compute_tag fails (because for example the
algorithm is not supported), don't crash, but instead ignore the
key. |
ClientRequestContext::clientAccessCheckDone(const allow_t &answer)
{
acl_checklist = NULL;
err_type page_id;
Http::StatusCode status;
debugs(85, 2, "The request " << http->request->method << ' ' <<
http->uri << " is " << answer <<
"; last ACL checked: " << (AclMatchedName ? AclMatchedName : "[none]"));
#if USE_AUTH
char const *proxy_auth_msg = "<null>";
if (http->getConn() != NULL && http->getConn()->getAuth() != NULL)
proxy_auth_msg = http->getConn()->getAuth()->denyMessage("<null>");
else if (http->request->auth_user_request != NULL)
proxy_auth_msg = http->request->auth_user_request->denyMessage("<null>");
#endif
if (!answer.allowed()) {
// auth has a grace period where credentials can be expired but okay not to challenge.
/* Send an auth challenge or error */
// XXX: do we still need aclIsProxyAuth() ?
bool auth_challenge = (answer == ACCESS_AUTH_REQUIRED || aclIsProxyAuth(AclMatchedName));
debugs(85, 5, "Access Denied: " << http->uri);
debugs(85, 5, "AclMatchedName = " << (AclMatchedName ? AclMatchedName : "<null>"));
#if USE_AUTH
if (auth_challenge)
debugs(33, 5, "Proxy Auth Message = " << (proxy_auth_msg ? proxy_auth_msg : "<null>"));
#endif
/*
* NOTE: get page_id here, based on AclMatchedName because if
* USE_DELAY_POOLS is enabled, then AclMatchedName gets clobbered in
* the clientCreateStoreEntry() call just below. Pedro Ribeiro
* <[email protected]>
*/
page_id = aclGetDenyInfoPage(&Config.denyInfoList, AclMatchedName, answer != ACCESS_AUTH_REQUIRED);
http->logType = LOG_TCP_DENIED;
if (auth_challenge) {
#if USE_AUTH
if (http->request->flags.sslBumped) {
/*SSL Bumped request, authentication is not possible*/
status = Http::scForbidden;
} else if (!http->flags.accel) {
/* Proxy authorisation needed */
status = Http::scProxyAuthenticationRequired;
} else {
/* WWW authorisation needed */
status = Http::scUnauthorized;
}
#else
// need auth, but not possible to do.
status = Http::scForbidden;
#endif
if (page_id == ERR_NONE)
page_id = ERR_CACHE_ACCESS_DENIED;
} else {
status = Http::scForbidden;
if (page_id == ERR_NONE)
page_id = ERR_ACCESS_DENIED;
}
Ip::Address tmpnoaddr;
tmpnoaddr.setNoAddr();
error = clientBuildError(page_id, status,
NULL,
http->getConn() != NULL ? http->getConn()->clientConnection->remote : tmpnoaddr,
http->request
);
#if USE_AUTH
error->auth_user_request =
http->getConn() != NULL && http->getConn()->getAuth() != NULL ?
http->getConn()->getAuth() : http->request->auth_user_request;
#endif
readNextRequest = true;
}
/* ACCESS_ALLOWED continues here ... */
xfree(http->uri);
http->uri = SBufToCstring(http->request->effectiveRequestUri());
http->doCallouts();
} | 0 | [
"CWE-116"
] | squid | e7cf864f938f24eea8af0692c04d16790983c823 | 302,966,671,514,876,660,000,000,000,000,000,000,000 | 87 | Handle more Range requests (#790)
Also removed some effectively unused code. |
static int br_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *group)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
unsigned long now = jiffies;
int err;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
goto out;
mp = br_multicast_new_group(br, port, group);
err = PTR_ERR(mp);
if (IS_ERR(mp))
goto err;
if (!port) {
if (hlist_unhashed(&mp->mglist))
hlist_add_head(&mp->mglist, &br->mglist);
mod_timer(&mp->timer, now + br->multicast_membership_interval);
goto out;
}
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->port == port)
goto found;
if ((unsigned long)p->port < (unsigned long)port)
break;
}
p = kzalloc(sizeof(*p), GFP_ATOMIC);
err = -ENOMEM;
if (unlikely(!p))
goto err;
p->addr = *group;
p->port = port;
p->next = *pp;
hlist_add_head(&p->mglist, &port->mglist);
setup_timer(&p->timer, br_multicast_port_group_expired,
(unsigned long)p);
setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
(unsigned long)p);
rcu_assign_pointer(*pp, p);
found:
mod_timer(&p->timer, now + br->multicast_membership_interval);
out:
err = 0;
err:
spin_unlock(&br->multicast_lock);
return err;
} | 0 | [
"CWE-399"
] | linux | 6b0d6a9b4296fa16a28d10d416db7a770fc03287 | 329,324,791,886,772,000,000,000,000,000,000,000,000 | 61 | bridge: Fix mglist corruption that leads to memory corruption
The list mp->mglist is used to indicate whether a multicast group
is active on the bridge interface itself as opposed to one of the
constituent interfaces in the bridge.
Unfortunately the operation that adds the mp->mglist node to the
list neglected to check whether it has already been added. This
leads to list corruption in the form of nodes pointing to itself.
Normally this would be quite obvious as it would cause an infinite
loop when walking the list. However, as this list is never actually
walked (which means that we don't really need it, I'll get rid of
it in a subsequent patch), this instead is hidden until we perform
a delete operation on the affected nodes.
As the same node may now be pointed to by more than one node, the
delete operations can then cause modification of freed memory.
This was observed in practice to cause corruption in 512-byte slabs,
most commonly leading to crashes in jbd2.
Thanks to Josef Bacik for pointing me in the right direction.
Reported-by: Ian Page Hands <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
send_key_attr (ctrl_t ctrl, app_t app, const char *keyword, int number)
{
char buffer[200];
int n_bits;
const char *curve_oid;
assert (number >=0 && number < DIM(app->app_local->keyattr));
if (app->app_local->keyattr[number].key_type == KEY_TYPE_RSA)
snprintf (buffer, sizeof buffer, "%d 1 %u %u %d",
number+1,
app->app_local->keyattr[number].rsa.n_bits,
app->app_local->keyattr[number].rsa.e_bits,
app->app_local->keyattr[number].rsa.format);
else if (app->app_local->keyattr[number].key_type == KEY_TYPE_ECC)
{
get_ecc_key_parameters (app->app_local->keyattr[number].ecc.curve,
&n_bits, &curve_oid);
snprintf (buffer, sizeof buffer, "%d %d %u %s",
number+1, number==1? 18: 19, n_bits, curve_oid);
}
else if (app->app_local->keyattr[number].key_type == KEY_TYPE_EDDSA)
{
get_ecc_key_parameters (app->app_local->keyattr[number].eddsa.curve,
&n_bits, &curve_oid);
snprintf (buffer, sizeof buffer, "%d 22 %u %s",
number+1, n_bits, curve_oid);
}
else
snprintf (buffer, sizeof buffer, "0 0 UNKNOWN");
send_status_direct (ctrl, keyword, buffer);
} | 0 | [
"CWE-20"
] | gnupg | 2183683bd633818dd031b090b5530951de76f392 | 116,496,407,892,915,940,000,000,000,000,000,000,000 | 33 | Use inline functions to convert buffer data to scalars.
* common/host2net.h (buf16_to_ulong, buf16_to_uint): New.
(buf16_to_ushort, buf16_to_u16): New.
(buf32_to_size_t, buf32_to_ulong, buf32_to_uint, buf32_to_u32): New.
--
Commit 91b826a38880fd8a989318585eb502582636ddd8 was not enough to
avoid all sign extension on shift problems. Hanno Böck found a case
with an invalid read due to this problem. To fix that once and for
all almost all uses of "<< 24" and "<< 8" are changed by this patch to
use an inline function from host2net.h.
Signed-off-by: Werner Koch <[email protected]> |
static int16 TIFFClampDoubleToInt16( double val )
{
if( val > 32767 )
return 32767;
if( val < -32768 || val != val )
return -32768;
return (int16)val;
} | 0 | [
"CWE-617"
] | libtiff | de144fd228e4be8aa484c3caf3d814b6fa88c6d9 | 169,055,782,740,343,500,000,000,000,000,000,000,000 | 8 | TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963 |
void jswrap_graphics_setFontCustom(JsVar *parent, JsVar *bitmap, int firstChar, JsVar *width, int height) {
JsGraphics gfx; if (!graphicsGetFromVar(&gfx, parent)) return;
if (!jsvIsString(bitmap)) {
jsExceptionHere(JSET_ERROR, "Font bitmap must be a String");
return;
}
if (firstChar<0 || firstChar>255) {
jsExceptionHere(JSET_ERROR, "First character out of range");
return;
}
if (!jsvIsString(width) && !jsvIsInt(width)) {
jsExceptionHere(JSET_ERROR, "Font width must be a String or an integer");
return;
}
if (height<=0 || height>255) {
jsExceptionHere(JSET_ERROR, "Invalid height");
return;
}
jsvObjectSetChild(parent, JSGRAPHICS_CUSTOMFONT_BMP, bitmap);
jsvObjectSetChild(parent, JSGRAPHICS_CUSTOMFONT_WIDTH, width);
jsvObjectSetChildAndUnLock(parent, JSGRAPHICS_CUSTOMFONT_HEIGHT, jsvNewFromInteger(height));
jsvObjectSetChildAndUnLock(parent, JSGRAPHICS_CUSTOMFONT_FIRSTCHAR, jsvNewFromInteger(firstChar));
gfx.data.fontSize = JSGRAPHICS_FONTSIZE_CUSTOM;
graphicsSetVar(&gfx);
} | 0 | [
"CWE-125"
] | Espruino | 8a44b04b584b3d3ab1cb68fed410f7ecb165e50e | 108,342,518,189,691,920,000,000,000,000,000,000,000 | 26 | Add height check for Graphics.createArrayBuffer(...vertical_byte:true) (fix #1421) |
static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
struct io_uring_params __user *params)
{
struct io_ring_ctx *ctx;
struct file *file;
int ret;
if (!entries)
return -EINVAL;
if (entries > IORING_MAX_ENTRIES) {
if (!(p->flags & IORING_SETUP_CLAMP))
return -EINVAL;
entries = IORING_MAX_ENTRIES;
}
/*
* Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring,
* since the sqes are only used at submission time. This allows for
* some flexibility in overcommitting a bit. If the application has
* set IORING_SETUP_CQSIZE, it will have passed in the desired number
* of CQ ring entries manually.
*/
p->sq_entries = roundup_pow_of_two(entries);
if (p->flags & IORING_SETUP_CQSIZE) {
/*
* If IORING_SETUP_CQSIZE is set, we do the same roundup
* to a power-of-two, if it isn't already. We do NOT impose
* any cq vs sq ring sizing.
*/
if (!p->cq_entries)
return -EINVAL;
if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
if (!(p->flags & IORING_SETUP_CLAMP))
return -EINVAL;
p->cq_entries = IORING_MAX_CQ_ENTRIES;
}
p->cq_entries = roundup_pow_of_two(p->cq_entries);
if (p->cq_entries < p->sq_entries)
return -EINVAL;
} else {
p->cq_entries = 2 * p->sq_entries;
}
ctx = io_ring_ctx_alloc(p);
if (!ctx)
return -ENOMEM;
ctx->compat = in_compat_syscall();
if (!capable(CAP_IPC_LOCK))
ctx->user = get_uid(current_user());
/*
* This is just grabbed for accounting purposes. When a process exits,
* the mm is exited and dropped before the files, hence we need to hang
* on to this mm purely for the purposes of being able to unaccount
* memory (locked/pinned vm). It's not used for anything else.
*/
mmgrab(current->mm);
ctx->mm_account = current->mm;
ret = io_allocate_scq_urings(ctx, p);
if (ret)
goto err;
ret = io_sq_offload_create(ctx, p);
if (ret)
goto err;
/* always set a rsrc node */
ret = io_rsrc_node_switch_start(ctx);
if (ret)
goto err;
io_rsrc_node_switch(ctx, NULL);
memset(&p->sq_off, 0, sizeof(p->sq_off));
p->sq_off.head = offsetof(struct io_rings, sq.head);
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
p->sq_off.flags = offsetof(struct io_rings, sq_flags);
p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
memset(&p->cq_off, 0, sizeof(p->cq_off));
p->cq_off.head = offsetof(struct io_rings, cq.head);
p->cq_off.tail = offsetof(struct io_rings, cq.tail);
p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
p->cq_off.cqes = offsetof(struct io_rings, cqes);
p->cq_off.flags = offsetof(struct io_rings, cq_flags);
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
IORING_FEAT_LINKED_FILE;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
goto err;
}
file = io_uring_get_file(ctx);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto err;
}
/*
* Install ring fd as the very last thing, so we don't risk someone
* having closed it before we finish setup
*/
ret = io_uring_install_fd(ctx, file);
if (ret < 0) {
/* fput will clean it up */
fput(file);
return ret;
}
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);
return ret; | 0 | [
"CWE-909",
"CWE-94"
] | linux | 32452a3eb8b64e01e2be717f518c0be046975b9d | 100,017,018,615,125,420,000,000,000,000,000,000,000 | 127 | io_uring: fix uninitialized field in rw io_kiocb
io_rw_init_file does not initialize kiocb->private, so when iocb_bio_iopoll
reads kiocb->private it can contain uninitialized data.
Fixes: 3e08773c3841 ("block: switch polling to be bio based")
Signed-off-by: Joseph Ravichandran <[email protected]>
Signed-off-by: Jens Axboe <[email protected]> |
nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
void *data)
{
struct nfs_setaclres *res = data;
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_setattr(xdr);
out:
return status;
} | 0 | [
"CWE-787"
] | linux | b4487b93545214a9db8cbf32e86411677b0cca21 | 103,356,111,619,209,400,000,000,000,000,000,000,000 | 20 | nfs: Fix getxattr kernel panic and memory overflow
Move the buffer size check to decode_attr_security_label() before memcpy()
Only call memcpy() if the buffer is large enough
Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS")
Signed-off-by: Jeffrey Mitchell <[email protected]>
[Trond: clean up duplicate test of label->len != 0]
Signed-off-by: Trond Myklebust <[email protected]> |
bool commit(const Reservation& reservation) {
if (static_cast<const uint8_t*>(reservation.mem_) != base_ + reservable_ ||
reservable_ + reservation.len_ > capacity_ || reservable_ >= capacity_) {
// The reservation is not from this OwnedSlice.
return false;
}
reservable_ += reservation.len_;
return true;
} | 0 | [
"CWE-401"
] | envoy | 5eba69a1f375413fb93fab4173f9c393ac8c2818 | 129,235,414,828,192,570,000,000,000,000,000,000,000 | 9 | [buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <[email protected]> |
static int xmit_one(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{
unsigned int len;
int rc;
if (dev_nit_active(dev))
dev_queue_xmit_nit(skb, dev);
len = skb->len;
trace_net_dev_start_xmit(skb, dev);
rc = netdev_start_xmit(skb, dev, txq, more);
trace_net_dev_xmit(skb, rc, dev, len);
return rc; | 0 | [
"CWE-416"
] | linux | a4270d6795b0580287453ea55974d948393e66ef | 99,947,091,517,078,960,000,000,000,000,000,000,000 | 16 | net-gro: fix use-after-free read in napi_gro_frags()
If a network driver provides to napi_gro_frags() an
skb with a page fragment of exactly 14 bytes, the call
to gro_pull_from_frag0() will 'consume' the fragment
by calling skb_frag_unref(skb, 0), and the page might
be freed and reused.
Reading eth->h_proto at the end of napi_frags_skb() might
read mangled data, or crash under specific debugging features.
BUG: KASAN: use-after-free in napi_frags_skb net/core/dev.c:5833 [inline]
BUG: KASAN: use-after-free in napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
Read of size 2 at addr ffff88809366840c by task syz-executor599/8957
CPU: 1 PID: 8957 Comm: syz-executor599 Not tainted 5.2.0-rc1+ #32
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x172/0x1f0 lib/dump_stack.c:113
print_address_description.cold+0x7c/0x20d mm/kasan/report.c:188
__kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
kasan_report+0x12/0x20 mm/kasan/common.c:614
__asan_report_load_n_noabort+0xf/0x20 mm/kasan/generic_report.c:142
napi_frags_skb net/core/dev.c:5833 [inline]
napi_gro_frags+0xc6f/0xd10 net/core/dev.c:5841
tun_get_user+0x2f3c/0x3ff0 drivers/net/tun.c:1991
tun_chr_write_iter+0xbd/0x156 drivers/net/tun.c:2037
call_write_iter include/linux/fs.h:1872 [inline]
do_iter_readv_writev+0x5f8/0x8f0 fs/read_write.c:693
do_iter_write fs/read_write.c:970 [inline]
do_iter_write+0x184/0x610 fs/read_write.c:951
vfs_writev+0x1b3/0x2f0 fs/read_write.c:1015
do_writev+0x15b/0x330 fs/read_write.c:1058
Fixes: a50e233c50db ("net-gro: restore frag0 optimization")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: syzbot <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
static char *genstr(char *prefix, int i) {
static char result[64] = {0};
snprintf(result, sizeof(result), "%s%d", prefix, i);
return result;
} | 0 | [
"CWE-190"
] | redis | f6a40570fa63d5afdd596c78083d754081d80ae3 | 171,164,520,511,438,430,000,000,000,000,000,000,000 | 5 | Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error. |
int isFileInDir(char *dir, char *file){
size_t length, dirLength;
char *fullpath = NULL;
FILE *f = NULL;
int foundFile = 0;
dirLength = strlen(dir);
/* Constuct 'full' path */
if (dir[dirLength-1] == DIR_SEPARATOR) {
/* remove trailing '/' */
dir[dirLength-1] = '\0';
dirLength--;
}
length = dirLength + strlen(file) + 2; /* 2= '/' + null char */
fullpath = malloc(length);
if (NULL != fullpath) {
strcpy(fullpath, dir);
fullpath[dirLength] = DIR_SEPARATOR;
strcpy(fullpath+dirLength+1, file);
/* See if file exists - use fopen() for portability */
f = fopen(fullpath, "rb");
if (NULL != f) {
foundFile = 1;
fclose(f);
}
free(fullpath);
}
return foundFile;
} | 0 | [
"CWE-119"
] | openj9 | 0971f22d88f42cf7332364ad7430e9bd8681c970 | 172,531,877,943,474,950,000,000,000,000,000,000,000 | 31 | Clean up jio_snprintf and jio_vfprintf
Fixes https://bugs.eclipse.org/bugs/show_bug.cgi?id=543659
Signed-off-by: Peter Bain <[email protected]> |
_g_filename_is_hidden (const gchar *name)
{
if (name[0] != '.') return FALSE;
if (name[1] == '\0') return FALSE;
if ((name[1] == '.') && (name[2] == '\0')) return FALSE;
return TRUE;
} | 0 | [
"CWE-22"
] | file-roller | b147281293a8307808475e102a14857055f81631 | 261,684,889,405,961,340,000,000,000,000,000,000,000 | 8 | libarchive: sanitize filenames before extracting |
HttpHeader::removeHopByHopEntries()
{
removeConnectionHeaderEntries();
const HttpHeaderEntry *e;
HttpHeaderPos pos = HttpHeaderInitPos;
int headers_deleted = 0;
while ((e = getEntry(&pos))) {
Http::HdrType id = e->id;
if (Http::HeaderLookupTable.lookup(id).hopbyhop) {
delAt(pos, headers_deleted);
CBIT_CLR(mask, id);
}
}
} | 0 | [
"CWE-444"
] | squid | 9c8e2a71aa1d3c159a319d9365c346c48dc783a5 | 171,666,948,157,497,600,000,000,000,000,000,000,000 | 15 | Enforce token characters for field-name (#700)
RFC 7230 defines field-name as a token. Request splitting and cache
poisoning attacks have used non-token characters to fool broken HTTP
agents behind or in front of Squid for years. This change should
significantly reduce that abuse.
If we discover exceptional situations that need special treatment, the
relaxed parser can allow them on a case-by-case basis (while being extra
careful about framing-related header fields), just like we already
tolerate some header whitespace (e.g., between the response header
field-name and colon). |
main(
int argc,
char ** argv)
{
char *line = NULL;
char *qdisk = NULL;
char *qamdevice = NULL;
char *optstr = NULL;
char *err_extra = NULL;
char *s, *fp;
int ch;
dle_t *dle;
int level;
GSList *errlist;
am_level_t *alevel;
if (argc > 1 && argv && argv[1] && g_str_equal(argv[1], "--version")) {
printf("selfcheck-%s\n", VERSION);
return (0);
}
/* initialize */
/*
* Configure program for internationalization:
* 1) Only set the message locale for now.
* 2) Set textdomain for all amanda related programs to "amanda"
* We don't want to be forced to support dozens of message catalogs.
*/
setlocale(LC_MESSAGES, "C");
textdomain("amanda");
safe_fd(-1, 0);
openbsd_fd_inform();
safe_cd();
set_pname("selfcheck");
/* Don't die when child closes pipe */
signal(SIGPIPE, SIG_IGN);
add_amanda_log_handler(amanda_log_stderr);
add_amanda_log_handler(amanda_log_syslog);
dbopen(DBG_SUBDIR_CLIENT);
startclock();
dbprintf(_("version %s\n"), VERSION);
g_printf("OK version %s\n", VERSION);
print_platform();
if(argc > 2 && strcmp(argv[1], "amandad") == 0) {
amandad_auth = stralloc(argv[2]);
}
config_init(CONFIG_INIT_CLIENT, NULL);
/* (check for config errors comes later) */
check_running_as(RUNNING_AS_CLIENT_LOGIN);
our_features = am_init_feature_set();
our_feature_string = am_feature_to_string(our_features);
/* handle all service requests */
/*@ignore@*/
for(; (line = agets(stdin)) != NULL; free(line)) {
/*@end@*/
if (line[0] == '\0')
continue;
if(strncmp_const(line, "OPTIONS ") == 0) {
g_options = parse_g_options(line+8, 1);
if(!g_options->hostname) {
g_options->hostname = alloc(MAX_HOSTNAME_LENGTH+1);
gethostname(g_options->hostname, MAX_HOSTNAME_LENGTH);
g_options->hostname[MAX_HOSTNAME_LENGTH] = '\0';
}
g_printf("OPTIONS ");
if(am_has_feature(g_options->features, fe_rep_options_features)) {
g_printf("features=%s;", our_feature_string);
}
if(am_has_feature(g_options->features, fe_rep_options_hostname)) {
g_printf("hostname=%s;", g_options->hostname);
}
g_printf("\n");
fflush(stdout);
if (g_options->config) {
/* overlay this configuration on the existing (nameless) configuration */
config_init(CONFIG_INIT_CLIENT | CONFIG_INIT_EXPLICIT_NAME | CONFIG_INIT_OVERLAY,
g_options->config);
dbrename(get_config_name(), DBG_SUBDIR_CLIENT);
}
/* check for any config errors now */
if (config_errors(&errlist) >= CFGERR_ERRORS) {
char *errstr = config_errors_to_error_string(errlist);
g_printf("%s\n", errstr);
dbclose();
return 1;
}
if (am_has_feature(g_options->features, fe_req_xml)) {
break;
}
continue;
}
dle = alloc_dle();
s = line;
ch = *s++;
skip_whitespace(s, ch); /* find program name */
if (ch == '\0') {
goto err; /* no program */
}
dle->program = s - 1;
skip_non_whitespace(s, ch);
s[-1] = '\0'; /* terminate the program name */
dle->program_is_application_api = 0;
if(strcmp(dle->program,"APPLICATION")==0) {
dle->program_is_application_api = 1;
skip_whitespace(s, ch); /* find dumper name */
if (ch == '\0') {
goto err; /* no program */
}
dle->program = s - 1;
skip_non_whitespace(s, ch);
s[-1] = '\0'; /* terminate the program name */
}
if(strncmp_const(dle->program, "CALCSIZE") == 0) {
skip_whitespace(s, ch); /* find program name */
if (ch == '\0') {
goto err; /* no program */
}
dle->program = s - 1;
skip_non_whitespace(s, ch);
s[-1] = '\0';
dle->estimatelist = g_slist_append(dle->estimatelist,
GINT_TO_POINTER(ES_CALCSIZE));
}
else {
dle->estimatelist = g_slist_append(dle->estimatelist,
GINT_TO_POINTER(ES_CLIENT));
}
skip_whitespace(s, ch); /* find disk name */
if (ch == '\0') {
goto err; /* no disk */
}
qdisk = s - 1;
skip_quoted_string(s, ch);
s[-1] = '\0'; /* terminate the disk name */
dle->disk = unquote_string(qdisk);
skip_whitespace(s, ch); /* find the device or level */
if (ch == '\0') {
goto err; /* no device or level */
}
if(!isdigit((int)s[-1])) {
fp = s - 1;
skip_quoted_string(s, ch);
s[-1] = '\0'; /* terminate the device */
qamdevice = stralloc(fp);
dle->device = unquote_string(qamdevice);
skip_whitespace(s, ch); /* find level number */
}
else {
dle->device = stralloc(dle->disk);
qamdevice = stralloc(qdisk);
}
/* find level number */
if (ch == '\0' || sscanf(s - 1, "%d", &level) != 1) {
goto err; /* bad level */
}
alevel = g_new0(am_level_t, 1);
alevel->level = level;
dle->levellist = g_slist_append(dle->levellist, alevel);
skip_integer(s, ch);
skip_whitespace(s, ch);
if (ch && strncmp_const_skip(s - 1, "OPTIONS ", s, ch) == 0) {
skip_whitespace(s, ch); /* find the option string */
if(ch == '\0') {
goto err; /* bad options string */
}
optstr = s - 1;
skip_quoted_string(s, ch);
s[-1] = '\0'; /* terminate the options */
parse_options(optstr, dle, g_options->features, 1);
/*@ignore@*/
check_options(dle);
check_disk(dle);
/*@end@*/
} else if (ch == '\0') {
/* check all since no option */
need_samba=1;
need_rundump=1;
need_dump=1;
need_restore=1;
need_vdump=1;
need_vrestore=1;
need_xfsdump=1;
need_xfsrestore=1;
need_vxdump=1;
need_vxrestore=1;
need_runtar=1;
need_gnutar=1;
need_compress_path=1;
need_calcsize=1;
need_global_check=1;
/*@ignore@*/
check_disk(dle);
/*@end@*/
} else {
goto err; /* bad syntax */
}
amfree(qamdevice);
}
if (g_options == NULL) {
g_printf(_("ERROR [Missing OPTIONS line in selfcheck input]\n"));
error(_("Missing OPTIONS line in selfcheck input\n"));
/*NOTREACHED*/
}
if (am_has_feature(g_options->features, fe_req_xml)) {
char *errmsg = NULL;
dle_t *dles, *dle, *dle_next;
dles = amxml_parse_node_FILE(stdin, &errmsg);
if (errmsg) {
err_extra = errmsg;
goto err;
}
if (merge_dles_properties(dles, 1) == 0) {
goto checkoverall;
}
for (dle = dles; dle != NULL; dle = dle->next) {
run_client_scripts(EXECUTE_ON_PRE_HOST_AMCHECK, g_options, dle,
stdout);
}
for (dle = dles; dle != NULL; dle = dle->next) {
check_options(dle);
run_client_scripts(EXECUTE_ON_PRE_DLE_AMCHECK, g_options, dle,
stdout);
check_disk(dle);
run_client_scripts(EXECUTE_ON_POST_DLE_AMCHECK, g_options, dle,
stdout);
}
for (dle = dles; dle != NULL; dle = dle->next) {
run_client_scripts(EXECUTE_ON_POST_HOST_AMCHECK, g_options, dle,
stdout);
}
for (dle = dles; dle != NULL; dle = dle_next) {
dle_next = dle->next;
free_dle(dle);
}
}
checkoverall:
check_security_file_permission(stdout);
check_overall();
amfree(line);
amfree(our_feature_string);
am_release_feature_set(our_features);
our_features = NULL;
free_g_options(g_options);
dbclose();
return 0;
err:
if (err_extra) {
g_printf(_("ERROR [FORMAT ERROR IN REQUEST PACKET %s]\n"), err_extra);
dbprintf(_("REQ packet is bogus: %s\n"), err_extra);
} else {
g_printf(_("ERROR [FORMAT ERROR IN REQUEST PACKET]\n"));
dbprintf(_("REQ packet is bogus\n"));
}
dbclose();
return 1;
} | 0 | [
"CWE-264"
] | amanda | 4bf5b9b356848da98560ffbb3a07a9cb5c4ea6d7 | 187,308,718,710,823,250,000,000,000,000,000,000,000 | 290 | * Add a /etc/amanda-security.conf file
git-svn-id: https://svn.code.sf.net/p/amanda/code/amanda/branches/3_3@6486 a8d146d6-cc15-0410-8900-af154a0219e0 |
echo_string_core(
typval_T *tv,
char_u **tofree,
char_u *numbuf,
int copyID,
int echo_style,
int restore_copyID,
int composite_val)
{
static int recurse = 0;
char_u *r = NULL;
if (recurse >= DICT_MAXNEST)
{
if (!did_echo_string_emsg)
{
// Only give this message once for a recursive call to avoid
// flooding the user with errors. And stop iterating over lists
// and dicts.
did_echo_string_emsg = TRUE;
emsg(_("E724: variable nested too deep for displaying"));
}
*tofree = NULL;
return (char_u *)"{E724}";
}
++recurse;
switch (tv->v_type)
{
case VAR_STRING:
if (echo_style && !composite_val)
{
*tofree = NULL;
r = tv->vval.v_string;
if (r == NULL)
r = (char_u *)"";
}
else
{
*tofree = string_quote(tv->vval.v_string, FALSE);
r = *tofree;
}
break;
case VAR_FUNC:
if (echo_style)
{
*tofree = NULL;
r = tv->vval.v_string;
}
else
{
*tofree = string_quote(tv->vval.v_string, TRUE);
r = *tofree;
}
break;
case VAR_PARTIAL:
{
partial_T *pt = tv->vval.v_partial;
char_u *fname = string_quote(pt == NULL ? NULL
: partial_name(pt), FALSE);
garray_T ga;
int i;
char_u *tf;
ga_init2(&ga, 1, 100);
ga_concat(&ga, (char_u *)"function(");
if (fname != NULL)
{
// When using uf_name prepend "g:" for a global function.
if (pt != NULL && pt->pt_name == NULL && fname[0] == '\''
&& vim_isupper(fname[1]))
{
ga_concat(&ga, (char_u *)"'g:");
ga_concat(&ga, fname + 1);
}
else
ga_concat(&ga, fname);
vim_free(fname);
}
if (pt != NULL && pt->pt_argc > 0)
{
ga_concat(&ga, (char_u *)", [");
for (i = 0; i < pt->pt_argc; ++i)
{
if (i > 0)
ga_concat(&ga, (char_u *)", ");
ga_concat(&ga,
tv2string(&pt->pt_argv[i], &tf, numbuf, copyID));
vim_free(tf);
}
ga_concat(&ga, (char_u *)"]");
}
if (pt != NULL && pt->pt_dict != NULL)
{
typval_T dtv;
ga_concat(&ga, (char_u *)", ");
dtv.v_type = VAR_DICT;
dtv.vval.v_dict = pt->pt_dict;
ga_concat(&ga, tv2string(&dtv, &tf, numbuf, copyID));
vim_free(tf);
}
ga_concat(&ga, (char_u *)")");
*tofree = ga.ga_data;
r = *tofree;
break;
}
case VAR_BLOB:
r = blob2string(tv->vval.v_blob, tofree, numbuf);
break;
case VAR_LIST:
if (tv->vval.v_list == NULL)
{
// NULL list is equivalent to empty list.
*tofree = NULL;
r = (char_u *)"[]";
}
else if (copyID != 0 && tv->vval.v_list->lv_copyID == copyID
&& tv->vval.v_list->lv_len > 0)
{
*tofree = NULL;
r = (char_u *)"[...]";
}
else
{
int old_copyID = tv->vval.v_list->lv_copyID;
tv->vval.v_list->lv_copyID = copyID;
*tofree = list2string(tv, copyID, restore_copyID);
if (restore_copyID)
tv->vval.v_list->lv_copyID = old_copyID;
r = *tofree;
}
break;
case VAR_DICT:
if (tv->vval.v_dict == NULL)
{
// NULL dict is equivalent to empty dict.
*tofree = NULL;
r = (char_u *)"{}";
}
else if (copyID != 0 && tv->vval.v_dict->dv_copyID == copyID
&& tv->vval.v_dict->dv_hashtab.ht_used != 0)
{
*tofree = NULL;
r = (char_u *)"{...}";
}
else
{
int old_copyID = tv->vval.v_dict->dv_copyID;
tv->vval.v_dict->dv_copyID = copyID;
*tofree = dict2string(tv, copyID, restore_copyID);
if (restore_copyID)
tv->vval.v_dict->dv_copyID = old_copyID;
r = *tofree;
}
break;
case VAR_NUMBER:
case VAR_UNKNOWN:
case VAR_ANY:
case VAR_VOID:
*tofree = NULL;
r = tv_get_string_buf(tv, numbuf);
break;
case VAR_JOB:
case VAR_CHANNEL:
#ifdef FEAT_JOB_CHANNEL
*tofree = NULL;
r = tv->v_type == VAR_JOB ? job_to_string_buf(tv, numbuf)
: channel_to_string_buf(tv, numbuf);
if (composite_val)
{
*tofree = string_quote(r, FALSE);
r = *tofree;
}
#endif
break;
case VAR_INSTR:
*tofree = NULL;
r = (char_u *)"instructions";
break;
case VAR_FLOAT:
#ifdef FEAT_FLOAT
*tofree = NULL;
vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float);
r = numbuf;
break;
#endif
case VAR_BOOL:
case VAR_SPECIAL:
*tofree = NULL;
r = (char_u *)get_var_special_name(tv->vval.v_number);
break;
}
if (--recurse == 0)
did_echo_string_emsg = FALSE;
return r;
} | 0 | [
"CWE-122",
"CWE-787"
] | vim | 605ec91e5a7330d61be313637e495fa02a6dc264 | 113,953,533,567,533,610,000,000,000,000,000,000,000 | 211 | patch 8.2.3847: illegal memory access when using a lambda with an error
Problem: Illegal memory access when using a lambda with an error.
Solution: Avoid skipping over the NUL after a string. |
static int perf_parse_file(config_fn_t fn, void *data)
{
int comment = 0;
int baselen = 0;
static char var[MAXNAME];
/* U+FEFF Byte Order Mark in UTF8 */
static const unsigned char *utf8_bom = (unsigned char *) "\xef\xbb\xbf";
const unsigned char *bomptr = utf8_bom;
for (;;) {
int c = get_next_char();
if (bomptr && *bomptr) {
/* We are at the file beginning; skip UTF8-encoded BOM
* if present. Sane editors won't put this in on their
* own, but e.g. Windows Notepad will do it happily. */
if ((unsigned char) c == *bomptr) {
bomptr++;
continue;
} else {
/* Do not tolerate partial BOM. */
if (bomptr != utf8_bom)
break;
/* No BOM at file beginning. Cool. */
bomptr = NULL;
}
}
if (c == '\n') {
if (config_file_eof)
return 0;
comment = 0;
continue;
}
if (comment || isspace(c))
continue;
if (c == '#' || c == ';') {
comment = 1;
continue;
}
if (c == '[') {
baselen = get_base_var(var);
if (baselen <= 0)
break;
var[baselen++] = '.';
var[baselen] = 0;
continue;
}
if (!isalpha(c))
break;
var[baselen] = tolower(c);
if (get_value(fn, data, var, baselen+1) < 0)
break;
}
die("bad config file line %d in %s", config_linenr, config_file_name);
} | 0 | [
"CWE-94"
] | linux | aba8d056078e47350d85b06a9cabd5afcc4b72ea | 113,726,379,403,788,550,000,000,000,000,000,000,000 | 55 | perf tools: do not look at ./config for configuration
In addition to /etc/perfconfig and $HOME/.perfconfig, perf looks for
configuration in the file ./config, imitating git which looks at
$GIT_DIR/config. If ./config is not a perf configuration file, it
fails, or worse, treats it as a configuration file and changes behavior
in some unexpected way.
"config" is not an unusual name for a file to be lying around and perf
does not have a private directory dedicated for its own use, so let's
just stop looking for configuration in the cwd. Callers needing
context-sensitive configuration can use the PERF_CONFIG environment
variable.
Requested-by: Christian Ohm <[email protected]>
Cc: [email protected]
Cc: Ben Hutchings <[email protected]>
Cc: Christian Ohm <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Jonathan Nieder <[email protected]>
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]> |
has_trash_files (GMount *mount)
{
GList *dirs, *l;
GFile *dir;
gboolean res;
dirs = get_trash_dirs_for_mount (mount);
res = FALSE;
for (l = dirs; l != NULL; l = l->next)
{
dir = l->data;
if (dir_has_files (dir))
{
res = TRUE;
break;
}
}
g_list_free_full (dirs, g_object_unref);
return res;
} | 0 | [
"CWE-20"
] | nautilus | 1630f53481f445ada0a455e9979236d31a8d3bb0 | 26,574,806,076,724,130,000,000,000,000,000,000,000 | 25 | mime-actions: use file metadata for trusting desktop files
Currently we only trust desktop files that have the executable bit
set, and don't replace the displayed icon or the displayed name until
it's trusted, which prevents for running random programs by a malicious
desktop file.
However, the executable permission is preserved if the desktop file
comes from a compressed file.
To prevent this, add a metadata::trusted metadata to the file once the
user acknowledges the file as trusted. This adds metadata to the file,
which cannot be added unless it has access to the computer.
Also remove the SHEBANG "trusted" content we were putting inside the
desktop file, since that doesn't add more security since it can come
with the file itself.
https://bugzilla.gnome.org/show_bug.cgi?id=777991 |
static void _reset_coll_ring(pmixp_coll_ring_ctx_t *coll_ctx)
{
pmixp_coll_t *coll = _ctx_get_coll(coll_ctx);
#ifdef PMIXP_COLL_DEBUG
PMIXP_DEBUG("%p: called", coll_ctx);
#endif
pmixp_coll_ring_ctx_sanity_check(coll_ctx);
coll_ctx->in_use = false;
coll_ctx->state = PMIXP_COLL_RING_SYNC;
coll_ctx->contrib_local = false;
coll_ctx->contrib_prev = 0;
coll_ctx->forward_cnt = 0;
coll->ts = time(NULL);
memset(coll_ctx->contrib_map, 0, sizeof(bool) * coll->peers_cnt);
coll_ctx->ring_buf = NULL;
} | 0 | [
"CWE-120"
] | slurm | c3142dd87e06621ff148791c3d2f298b5c0b3a81 | 62,601,032,533,090,620,000,000,000,000,000,000,000 | 16 | PMIx - fix potential buffer overflows from use of unpackmem().
CVE-2020-27745. |
int proc_setattr(struct dentry *dentry, struct iattr *attr)
{
int error;
struct inode *inode = d_inode(dentry);
if (attr->ia_valid & ATTR_MODE)
return -EPERM;
error = setattr_prepare(dentry, attr);
if (error)
return error;
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
} | 0 | [
"CWE-119"
] | linux | 7f7ccc2ccc2e70c6054685f5e3522efa81556830 | 290,138,732,140,709,700,000,000,000,000,000,000,000 | 16 | proc: do not access cmdline nor environ from file-backed areas
proc_pid_cmdline_read() and environ_read() directly access the target
process' VM to retrieve the command line and environment. If this
process remaps these areas onto a file via mmap(), the requesting
process may experience various issues such as extra delays if the
underlying device is slow to respond.
Let's simply refuse to access file-backed areas in these functions.
For this we add a new FOLL_ANON gup flag that is passed to all calls
to access_remote_vm(). The code already takes care of such failures
(including unmapped areas). Accesses via /proc/pid/mem were not
changed though.
This was assigned CVE-2018-1120.
Note for stable backports: the patch may apply to kernels prior to 4.11
but silently miss one location; it must be checked that no call to
access_remote_vm() keeps zero as the last argument.
Reported-by: Qualys Security Advisory <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Oleg Nesterov <[email protected]>
Cc: [email protected]
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]> |
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
X86_CR0_CD | X86_CR0_NW;
cr0 |= X86_CR0_ET;
#ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL)
return 1;
#endif
cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
return 1;
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
return 1;
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
if ((vcpu->arch.efer & EFER_LME)) {
int cs_db, cs_l;
if (!is_pae(vcpu))
return 1;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l)
return 1;
} else
#endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
kvm_read_cr3(vcpu)))
return 1;
}
kvm_x86_ops->set_cr0(vcpu, cr0);
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
}
if ((cr0 ^ old_cr0) & update_bits)
kvm_mmu_reset_context(vcpu);
return 0;
} | 0 | [] | kvm | 0769c5de24621141c953fbe1f943582d37cb4244 | 272,132,524,705,528,900,000,000,000,000,000,000,000 | 49 | KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]> |
static int bond_release_and_destroy(struct net_device *bond_dev,
struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
int ret;
ret = bond_release(bond_dev, slave_dev);
if ((ret == 0) && (bond->slave_cnt == 0)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
}
return ret;
} | 0 | [
"CWE-703",
"CWE-264"
] | linux | 550fd08c2cebad61c548def135f67aba284c6162 | 326,323,409,616,778,620,000,000,000,000,000,000,000 | 15 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <[email protected]>
CC: Karsten Keil <[email protected]>
CC: "David S. Miller" <[email protected]>
CC: Jay Vosburgh <[email protected]>
CC: Andy Gospodarek <[email protected]>
CC: Patrick McHardy <[email protected]>
CC: Krzysztof Halasa <[email protected]>
CC: "John W. Linville" <[email protected]>
CC: Greg Kroah-Hartman <[email protected]>
CC: Marcel Holtmann <[email protected]>
CC: Johannes Berg <[email protected]>
Signed-off-by: David S. Miller <[email protected]> |
CBINDInstallDlg::CBINDInstallDlg(CWnd* pParent /*=NULL*/)
: CDialog(CBINDInstallDlg::IDD, pParent) {
char winsys[MAX_PATH];
//{{AFX_DATA_INIT(CBINDInstallDlg)
m_targetDir = _T("");
m_version = _T("");
m_toolsOnly = FALSE;
m_autoStart = FALSE;
m_keepFiles = FALSE;
m_current = _T("");
m_startOnInstall = FALSE;
m_accountName = _T("");
m_accountPassword = _T("");
m_accountName = _T("");
//}}AFX_DATA_INIT
// Note that LoadIcon does not require a subsequent
// DestroyIcon in Win32
m_hIcon = AfxGetApp()->LoadIcon(IDR_MAINFRAME);
GetSystemDirectory(winsys, MAX_PATH);
m_winSysDir = winsys;
m_defaultDir = "notyetknown";
m_installed = FALSE;
m_accountExists = FALSE;
m_accountUsed = FALSE;
m_serviceExists = TRUE;
GetCurrentServiceAccountName();
m_currentAccount = m_accountName;
if (m_accountName == "") {
m_accountName = "named";
}
} | 0 | [
"CWE-284"
] | bind9 | 967a3b9419a3c12b8c0870c86d1ee3840bcbbad7 | 268,491,912,669,300,540,000,000,000,000,000,000,000 | 35 | [master] quote service registry paths
4532. [security] The BIND installer on Windows used an unquoted
service path, which can enable privilege escalation.
(CVE-2017-3141) [RT #45229] |
callbacks_screen2board(gdouble *X, gdouble *Y, gint x, gint y) {
/* make sure we don't divide by zero (which is possible if the gui
isn't displayed yet */
if ((screenRenderInfo.scaleFactorX > 0.001)||(screenRenderInfo.scaleFactorY > 0.001)) {
*X = screenRenderInfo.lowerLeftX + (x / screenRenderInfo.scaleFactorX);
*Y = screenRenderInfo.lowerLeftY + ((screenRenderInfo.displayHeight - y)
/ screenRenderInfo.scaleFactorY);
}
else {
*X = *Y = 0.0;
}
} | 0 | [
"CWE-200"
] | gerbv | 319a8af890e4d0a5c38e6d08f510da8eefc42537 | 92,009,855,270,123,420,000,000,000,000,000,000,000 | 13 | Remove local alias to parameter array
Normalizing access to `gerbv_simplified_amacro_t::parameter` as a step to fix CVE-2021-40402 |
const char *gnutls_alert_get_strname(gnutls_alert_description_t alert)
{
const gnutls_alert_entry *p;
for (p = sup_alerts; p->name != NULL; p++)
if (p->alert == alert)
return p->name;
return NULL;
} | 0 | [
"CWE-310"
] | gnutls | db9a7d810f9ee4c9cc49731f5fd9bdeae68d7eaa | 151,802,830,721,689,400,000,000,000,000,000,000,000 | 10 | handshake: check for TLS_FALLBACK_SCSV
If TLS_FALLBACK_SCSV was sent by the client during the handshake, and
the advertised protocol version is lower than GNUTLS_TLS_VERSION_MAX,
send the "Inappropriate fallback" fatal alert and abort the handshake.
This mechanism was defined in RFC7507. |
Supports_Condition_Obj Parser::parse_supports_negation()
{
if (!lex < kwd_not >()) return 0;
Supports_Condition_Obj cond = parse_supports_condition_in_parens();
return SASS_MEMORY_NEW(Supports_Negation, pstate, cond);
} | 0 | [
"CWE-125"
] | libsass | b3374e3fd1a0c3658644d2bad24e4a0ff2e0dcea | 174,723,620,695,925,440,000,000,000,000,000,000,000 | 6 | Fix handling of unclosed interpolant in url
Fixes #2661 |
static u32 rgb_48_to_32(char *val)
{
u32 res = 0x0;
u32 i;
for (i=0; i<3; i++) {
u32 v = val[2*i];
v<<=8;
v|=val[2*i + 1];
v/=0xFF;
res <<= 8;
res |= v;
}
return res;
} | 0 | [
"CWE-476"
] | gpac | d527325a9b72218612455a534a508f9e1753f76e | 27,647,456,885,427,606,000,000,000,000,000,000,000 | 16 | fixed #1768 |
PJ_DEF(void) pjmedia_rtcp_fini(pjmedia_rtcp_session *sess)
{
#if defined(PJMEDIA_HAS_RTCP_XR) && (PJMEDIA_HAS_RTCP_XR != 0)
pjmedia_rtcp_xr_fini(&sess->xr_session);
#else
/* Nothing to do. */
PJ_UNUSED_ARG(sess);
#endif
} | 0 | [
"CWE-125"
] | pjproject | 8b621f192cae14456ee0b0ade52ce6c6f258af1e | 218,010,322,374,271,800,000,000,000,000,000,000,000 | 9 | Merge pull request from GHSA-3qx3-cg72-wrh9 |
check_colorcolumn(win_T *wp)
{
char_u *s;
int col;
int count = 0;
int color_cols[256];
int i;
int j = 0;
if (wp->w_buffer == NULL)
return NULL; // buffer was closed
for (s = wp->w_p_cc; *s != NUL && count < 255;)
{
if (*s == '-' || *s == '+')
{
// -N and +N: add to 'textwidth'
col = (*s == '-') ? -1 : 1;
++s;
if (!VIM_ISDIGIT(*s))
return e_invalid_argument;
col = col * getdigits(&s);
if (wp->w_buffer->b_p_tw == 0)
goto skip; // 'textwidth' not set, skip this item
col += wp->w_buffer->b_p_tw;
if (col < 0)
goto skip;
}
else if (VIM_ISDIGIT(*s))
col = getdigits(&s);
else
return e_invalid_argument;
color_cols[count++] = col - 1; // 1-based to 0-based
skip:
if (*s == NUL)
break;
if (*s != ',')
return e_invalid_argument;
if (*++s == NUL)
return e_invalid_argument; // illegal trailing comma as in "set cc=80,"
}
vim_free(wp->w_p_cc_cols);
if (count == 0)
wp->w_p_cc_cols = NULL;
else
{
wp->w_p_cc_cols = ALLOC_MULT(int, count + 1);
if (wp->w_p_cc_cols != NULL)
{
// sort the columns for faster usage on screen redraw inside
// win_line()
qsort(color_cols, count, sizeof(int), int_cmp);
for (i = 0; i < count; ++i)
// skip duplicates
if (j == 0 || wp->w_p_cc_cols[j - 1] != color_cols[i])
wp->w_p_cc_cols[j++] = color_cols[i];
wp->w_p_cc_cols[j] = -1; // end marker
}
}
return NULL; // no error
} | 0 | [
"CWE-476"
] | vim | 0f6e28f686dbb59ab3b562408ab9b2234797b9b1 | 135,990,483,215,242,210,000,000,000,000,000,000,000 | 64 | patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window. |
static bool encode_asq_control(void *mem_ctx, void *in, DATA_BLOB *out)
{
struct ldb_asq_control *lac = talloc_get_type(in, struct ldb_asq_control);
struct asn1_data *data = asn1_init(mem_ctx);
if (!data) return false;
if (!asn1_push_tag(data, ASN1_SEQUENCE(0))) {
return false;
}
if (lac->request) {
if (!asn1_write_OctetString(data, lac->source_attribute, lac->src_attr_len)) {
return false;
}
} else {
if (!asn1_write_enumerated(data, lac->result)) {
return false;
}
}
if (!asn1_pop_tag(data)) {
return false;
}
*out = data_blob_talloc(mem_ctx, data->data, data->length);
if (out->data == NULL) {
return false;
}
talloc_free(data);
return true;
} | 0 | [
"CWE-399"
] | samba | 530d50a1abdcdf4d1775652d4c456c1274d83d8d | 55,340,229,872,896,740,000,000,000,000,000,000,000 | 34 | CVE-2015-7540: s4: libcli: ldap message - Ensure all asn1_XX returns are checked.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=9187
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Ronnie Sahlberg <[email protected]>
Autobuild-User(master): Jeremy Allison <[email protected]>
Autobuild-Date(master): Fri Sep 26 03:15:00 CEST 2014 on sn-devel-104
(cherry picked from commit 69a7e3cfdc8dbba9c8dcfdfae82d2894c7247e15) |
static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
struct nfs_writeres *res)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (status)
goto out;
status = decode_sequence(xdr, &res->seq_res, rqstp);
if (status)
goto out;
status = decode_putfh(xdr);
if (status)
goto out;
status = decode_commit(xdr, res);
if (status)
goto out;
if (res->fattr)
decode_getfattr(xdr, res->fattr, res->server,
!RPC_IS_ASYNC(rqstp->rq_task));
out:
return status;
} | 0 | [
"CWE-703",
"CWE-189"
] | linux | bf118a342f10dafe44b14451a1392c3254629a1f | 242,287,751,696,439,200,000,000,000,000,000,000,000 | 24 | NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: [email protected]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]> |
String *Item::val_str(String *str, String *converter, CHARSET_INFO *cs)
{
String *res= val_str(str);
if (null_value)
return (String *) 0;
if (!cs)
return res;
uint errors;
if ((null_value= converter->copy(res->ptr(), res->length(),
collation.collation, cs, &errors)))
return (String *) 0;
return converter;
} | 0 | [] | server | b000e169562697aa072600695d4f0c0412f94f4f | 51,527,334,581,480,380,000,000,000,000,000,000,000 | 16 | Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST, COL), NAME_CONST('NAME', NULL))
based on:
commit f7316aa0c9a
Author: Ajo Robert <[email protected]>
Date: Thu Aug 24 17:03:21 2017 +0530
Bug#26361149 MYSQL SERVER CRASHES AT: COL IN(IFNULL(CONST,
COL), NAME_CONST('NAME', NULL))
Backport of Bug#19143243 fix.
NAME_CONST item can return NULL_ITEM type in case of incorrect arguments.
NULL_ITEM has special processing in Item_func_in function.
In Item_func_in::fix_length_and_dec an array of possible comparators is
created. Since NAME_CONST function has NULL_ITEM type, corresponding
array element is empty. Then NAME_CONST is wrapped to ITEM_CACHE.
ITEM_CACHE can not return proper type(NULL_ITEM) in Item_func_in::val_int(),
so the NULL_ITEM is attempted compared with an empty comparator.
The fix is to disable the caching of Item_name_const item. |
static int oidc_handle_discovery_response(request_rec *r, oidc_cfg *c) {
/* variables to hold the values returned in the response */
char *issuer = NULL, *target_link_uri = NULL, *login_hint = NULL,
*auth_request_params = NULL, *csrf_cookie, *csrf_query = NULL,
*user = NULL, *path_scopes;
oidc_provider_t *provider = NULL;
oidc_util_get_request_parameter(r, OIDC_DISC_OP_PARAM, &issuer);
oidc_util_get_request_parameter(r, OIDC_DISC_USER_PARAM, &user);
oidc_util_get_request_parameter(r, OIDC_DISC_RT_PARAM, &target_link_uri);
oidc_util_get_request_parameter(r, OIDC_DISC_LH_PARAM, &login_hint);
oidc_util_get_request_parameter(r, OIDC_DISC_SC_PARAM, &path_scopes);
oidc_util_get_request_parameter(r, OIDC_DISC_AR_PARAM,
&auth_request_params);
oidc_util_get_request_parameter(r, OIDC_CSRF_NAME, &csrf_query);
csrf_cookie = oidc_util_get_cookie(r, OIDC_CSRF_NAME);
/* do CSRF protection if not 3rd party initiated SSO */
if (csrf_cookie) {
/* clean CSRF cookie */
oidc_util_set_cookie(r, OIDC_CSRF_NAME, "", 0,
OIDC_COOKIE_EXT_SAME_SITE_NONE(r));
/* compare CSRF cookie value with query parameter value */
if ((csrf_query == NULL)
|| apr_strnatcmp(csrf_query, csrf_cookie) != 0) {
oidc_warn(r,
"CSRF protection failed, no Discovery and dynamic client registration will be allowed");
csrf_cookie = NULL;
}
}
// TODO: trim issuer/accountname/domain input and do more input validation
oidc_debug(r,
"issuer=\"%s\", target_link_uri=\"%s\", login_hint=\"%s\", user=\"%s\"",
issuer, target_link_uri, login_hint, user);
if (target_link_uri == NULL) {
if (c->default_sso_url == NULL) {
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"SSO to this module without specifying a \"target_link_uri\" parameter is not possible because " OIDCDefaultURL " is not set.",
HTTP_INTERNAL_SERVER_ERROR);
}
target_link_uri = c->default_sso_url;
}
/* do open redirect prevention */
if (oidc_target_link_uri_matches_configuration(r, c, target_link_uri)
== FALSE) {
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"\"target_link_uri\" parameter does not match configuration settings, aborting to prevent an open redirect.",
HTTP_UNAUTHORIZED);
}
/* see if this is a static setup */
if (c->metadata_dir == NULL) {
if ((oidc_provider_static_config(r, c, &provider) == TRUE)
&& (issuer != NULL)) {
if (apr_strnatcmp(provider->issuer, issuer) != 0) {
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
apr_psprintf(r->pool,
"The \"iss\" value must match the configured providers' one (%s != %s).",
issuer, c->provider.issuer),
HTTP_INTERNAL_SERVER_ERROR);
}
}
return oidc_authenticate_user(r, c, NULL, target_link_uri, login_hint,
NULL, NULL, auth_request_params, path_scopes);
}
/* find out if the user entered an account name or selected an OP manually */
if (user != NULL) {
if (login_hint == NULL)
login_hint = apr_pstrdup(r->pool, user);
/* normalize the user identifier */
if (strstr(user, "https://") != user)
user = apr_psprintf(r->pool, "https://%s", user);
/* got an user identifier as input, perform OP discovery with that */
if (oidc_proto_url_based_discovery(r, c, user, &issuer) == FALSE) {
/* something did not work out, show a user facing error */
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"Could not resolve the provided user identifier to an OpenID Connect provider; check your syntax.",
HTTP_NOT_FOUND);
}
/* issuer is set now, so let's continue as planned */
} else if (strstr(issuer, OIDC_STR_AT) != NULL) {
if (login_hint == NULL) {
login_hint = apr_pstrdup(r->pool, issuer);
//char *p = strstr(issuer, OIDC_STR_AT);
//*p = '\0';
}
/* got an account name as input, perform OP discovery with that */
if (oidc_proto_account_based_discovery(r, c, issuer, &issuer)
== FALSE) {
/* something did not work out, show a user facing error */
return oidc_util_html_send_error(r, c->error_template,
"Invalid Request",
"Could not resolve the provided account name to an OpenID Connect provider; check your syntax.",
HTTP_NOT_FOUND);
}
/* issuer is set now, so let's continue as planned */
}
/* strip trailing '/' */
int n = strlen(issuer);
if (issuer[n - 1] == OIDC_CHAR_FORWARD_SLASH)
issuer[n - 1] = '\0';
/* try and get metadata from the metadata directories for the selected OP */
if ((oidc_metadata_get(r, c, issuer, &provider, csrf_cookie != NULL) == TRUE)
&& (provider != NULL)) {
/* now we've got a selected OP, send the user there to authenticate */
return oidc_authenticate_user(r, c, provider, target_link_uri,
login_hint, NULL, NULL, auth_request_params, path_scopes);
}
/* something went wrong */
return oidc_util_html_send_error(r, c->error_template, "Invalid Request",
"Could not find valid provider metadata for the selected OpenID Connect provider; contact the administrator",
HTTP_NOT_FOUND);
} | 1 | [
"CWE-601"
] | mod_auth_openidc | 03e6bfb446f4e3f27c003d30d6a433e5dd8e2b3d | 73,918,821,075,416,065,000,000,000,000,000,000,000 | 140 | apply OIDCRedirectURLsAllowed setting to target_link_uri
closes #672; thanks @Meheni
release 2.4.9.4
Signed-off-by: Hans Zandbelt <[email protected]> |
Subsets and Splits