func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
uint8_t *outbuf)
{
uint8_t type = r->req.cmd.buf[1] & 7;
if (s->qdev.type != TYPE_ROM) {
return -1;
}
/* Types 1/2 are only defined for Blu-Ray. */
if (type != 0) {
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
return -1;
}
memset(outbuf, 0, 34);
outbuf[1] = 32;
outbuf[2] = 0xe; /* last session complete, disc finalized */
outbuf[3] = 1; /* first track on disc */
outbuf[4] = 1; /* # of sessions */
outbuf[5] = 1; /* first track of last session */
outbuf[6] = 1; /* last track of last session */
outbuf[7] = 0x20; /* unrestricted use */
outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
/* 9-10-11: most significant byte corresponding bytes 4-5-6 */
/* 12-23: not meaningful for CD-ROM or DVD-ROM */
/* 24-31: disc bar code */
/* 32: disc application code */
/* 33: number of OPC tables */
return 34;
}
| 0 |
[
"CWE-193"
] |
qemu
|
b3af7fdf9cc537f8f0dd3e2423d83f5c99a457e8
| 324,308,566,098,314,340,000,000,000,000,000,000,000 | 32 |
hw/scsi/scsi-disk: MODE_PAGE_ALLS not allowed in MODE SELECT commands
This avoids an off-by-one read of 'mode_sense_valid' buffer in
hw/scsi/scsi-disk.c:mode_sense_page().
Fixes: CVE-2021-3930
Cc: [email protected]
Reported-by: Alexander Bulekov <[email protected]>
Fixes: a8f4bbe2900 ("scsi-disk: store valid mode pages in a table")
Fixes: #546
Reported-by: Qiuhao Li <[email protected]>
Signed-off-by: Mauro Matteo Cascella <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
void CLASS parseNikonMakernote (int base, int uptag, unsigned dng_writer)
{
#define imn imgdata.makernotes.nikon
#define ilm imgdata.lens.makernotes
#define icWB imgdata.color.WB_Coeffs
unsigned offset = 0, entries, tag, type, len, save;
unsigned c, i;
uchar *LensData_buf;
uchar ColorBalanceData_buf[324];
int ColorBalanceData_ready = 0;
uchar ci, cj, ck;
unsigned serial = 0;
unsigned custom_serial = 0;
unsigned LensData_len = 0;
short morder, sorder = order;
char buf[10];
INT64 fsize = ifp->size();
fread(buf, 1, 10, ifp);
if (!strcmp(buf, "Nikon")) {
if (buf[6] != '\2') return;
base = ftell(ifp);
order = get2();
if (get2() != 42) goto quit;
offset = get4();
fseek(ifp, offset - 8, SEEK_CUR);
} else {
fseek(ifp, -10, SEEK_CUR);
}
entries = get2();
if (entries > 1000) return;
morder = order;
while (entries--) {
order = morder;
tiff_get(base, &tag, &type, &len, &save);
INT64 pos = ifp->tell();
if (len > 8 && pos + len > 2 * fsize) {
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue;
}
tag |= uptag << 16;
if (len > 100 * 1024 * 1024)
goto next; // 100Mb tag? No!
if (tag == 0x0002) {
if (!iso_speed) iso_speed = (get2(), get2());
} else if (tag == 0x000a) {
/*
B700, P330, P340, P6000, P7000, P7700, P7800
E5000, E5400, E5700, E8700, E8800
*/
ilm.LensMount = ilm.CameraMount = LIBRAW_MOUNT_FixedLens;
ilm.FocalType = LIBRAW_FT_ZOOM;
} else if (tag == 0x000c) {
/*
1 AW1, 1 J1, 1 J2, 1 J3, 1 J4, 1 J5, 1 S1, 1 S2, 1 V1, 1 V2, 1 V3
D1, D1H, D1X, D2H, D2Xs, D3, D3S, D3X, D4, D4S, Df, D5
D600, D610, D700, D750, D800, D800E, D810, D850
D200, D300, D300S, D500
D40, D40X, D60, D80, D90
D3000, D3100, D3200, D3300, D3400
D5000, D5100, D5200, D5300, D5500, D5600
D7000, D7100, D7200, D7500
B700, COOLPIX A, P330, P340, P7700, P7800
*/
cam_mul[0] = getreal(type);
cam_mul[2] = getreal(type);
cam_mul[1] = getreal(type);
cam_mul[3] = getreal(type);
} else if (tag == 0x11) {
if (is_raw) {
fseek(ifp, get4() + base, SEEK_SET);
parse_tiff_ifd(base);
}
} else if (tag == 0x0012) {
ci = fgetc(ifp);
cj = fgetc(ifp);
ck = fgetc(ifp);
if (ck)
imgdata.other.FlashEC = (float)(ci * cj) / (float)ck;
} else if (tag == 0x0014) {
if (type == 7) {
if (len == 2560) { // E5400, E8400, E8700, E8800
fseek(ifp, 1248, SEEK_CUR);
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
cam_mul[1] = cam_mul[3] = 1.0;
icWB[LIBRAW_WBI_Auto][0] = get2();
icWB[LIBRAW_WBI_Auto][2] = get2();
icWB[LIBRAW_WBI_Daylight][0] = get2();
icWB[LIBRAW_WBI_Daylight][2] = get2();
fseek (ifp, 24, SEEK_CUR);
icWB[LIBRAW_WBI_Tungsten][0] = get2();
icWB[LIBRAW_WBI_Tungsten][2] = get2();
fseek (ifp, 24, SEEK_CUR);
icWB[LIBRAW_WBI_FL_W][0] = get2();
icWB[LIBRAW_WBI_FL_W][2] = get2();
icWB[LIBRAW_WBI_FL_N][0] = get2();
icWB[LIBRAW_WBI_FL_N][2] = get2();
icWB[LIBRAW_WBI_FL_D][0] = get2();
icWB[LIBRAW_WBI_FL_D][2] = get2();
icWB[LIBRAW_WBI_Cloudy][0] = get2();
icWB[LIBRAW_WBI_Cloudy][2] = get2();
fseek (ifp, 24, SEEK_CUR);
icWB[LIBRAW_WBI_Flash][0] = get2();
icWB[LIBRAW_WBI_Flash][2] = get2();
icWB[LIBRAW_WBI_Auto][1] = icWB[LIBRAW_WBI_Auto][3] =
icWB[LIBRAW_WBI_Daylight][1] = icWB[LIBRAW_WBI_Daylight][3] =
icWB[LIBRAW_WBI_Tungsten][1] = icWB[LIBRAW_WBI_Tungsten][3] =
icWB[LIBRAW_WBI_FL_W][1] = icWB[LIBRAW_WBI_FL_W][3] =
icWB[LIBRAW_WBI_FL_N][1] = icWB[LIBRAW_WBI_FL_N][3] =
icWB[LIBRAW_WBI_FL_D][1] = icWB[LIBRAW_WBI_FL_D][3] =
icWB[LIBRAW_WBI_Cloudy][1] = icWB[LIBRAW_WBI_Cloudy][3] =
icWB[LIBRAW_WBI_Flash][1] = icWB[LIBRAW_WBI_Flash][3] = 256;
if (strncmp(model, "E8700", 5)) {
fseek (ifp, 24, SEEK_CUR);
icWB[LIBRAW_WBI_Shade][0] = get2();
icWB[LIBRAW_WBI_Shade][2] = get2();
icWB[LIBRAW_WBI_Shade][1] = icWB[LIBRAW_WBI_Shade][3] = 256;
}
} else if (len == 1280) { // E5000, E5700
cam_mul[0] = cam_mul[1] = cam_mul[2] = cam_mul[3] = 1.0;
} else {
fread(buf, 1, 10, ifp);
if (!strncmp(buf, "NRW ", 4)) { // P6000, P7000, P7100, B700, P1000
if (!strcmp(buf + 4, "0100")) { // P6000
fseek(ifp, 0x13de, SEEK_CUR);
cam_mul[0] = get4() << 1;
cam_mul[1] = get4();
cam_mul[3] = get4();
cam_mul[2] = get4() << 1;
Nikon_NRW_WBtag (LIBRAW_WBI_Daylight, 0);
Nikon_NRW_WBtag (LIBRAW_WBI_Cloudy, 0);
fseek(ifp, 16, SEEK_CUR);
Nikon_NRW_WBtag (LIBRAW_WBI_Tungsten, 0);
Nikon_NRW_WBtag (LIBRAW_WBI_FL_W, 0);
Nikon_NRW_WBtag (LIBRAW_WBI_Flash, 0);
fseek(ifp, 16, SEEK_CUR);
Nikon_NRW_WBtag (LIBRAW_WBI_Custom, 0);
Nikon_NRW_WBtag (LIBRAW_WBI_Auto, 0);
} else { // P7000, P7100, B700, P1000
fseek(ifp, 0x16, SEEK_CUR);
black = get2();
fseek(ifp, 0x16, SEEK_CUR);
cam_mul[0] = get4() << 1;
cam_mul[1] = get4();
cam_mul[3] = get4();
cam_mul[2] = get4() << 1;
Nikon_NRW_WBtag (LIBRAW_WBI_Daylight, 1);
Nikon_NRW_WBtag (LIBRAW_WBI_Cloudy, 1);
Nikon_NRW_WBtag (LIBRAW_WBI_Shade, 1);
Nikon_NRW_WBtag (LIBRAW_WBI_Tungsten, 1);
Nikon_NRW_WBtag (LIBRAW_WBI_FL_W, 1);
Nikon_NRW_WBtag (LIBRAW_WBI_FL_N, 1);
Nikon_NRW_WBtag (LIBRAW_WBI_FL_D, 1);
Nikon_NRW_WBtag (LIBRAW_WBI_HT_Mercury, 1);
fseek(ifp, 20, SEEK_CUR);
Nikon_NRW_WBtag (LIBRAW_WBI_Custom, 1);
Nikon_NRW_WBtag (LIBRAW_WBI_Auto, 1);
}
}
}
}
} else if (tag == 0x001b) {
imn.CropFormat = get2();
FORC(6) imn.CropData[c] = get2(); /* box inside CropData ([2], [3]): upper left pixel (x,y), size (width,height) */
} else if (tag == 0x001d) { // serial number
if (len > 0) {
while ((c = fgetc(ifp)) && (len-- > 0) && (c != EOF)) {
if ((!custom_serial) && (!isdigit(c))) {
if ((strbuflen(model) == 3) && (!strcmp(model, "D50"))) {
custom_serial = 34;
} else {
custom_serial = 96;
}
}
serial = serial * 10 + (isdigit(c) ? c - '0' : c % 10);
}
if (!imgdata.shootinginfo.BodySerial[0])
sprintf(imgdata.shootinginfo.BodySerial, "%d", serial);
}
} else if (tag == 0x0025) {
if (!iso_speed || (iso_speed == 65535)) {
iso_speed = int(100.0 * libraw_powf64l(2.0f, double((uchar)fgetc(ifp)) / 12.0 - 5.0));
}
} else if (tag == 0x003b) { // all 1s for regular exposures
imn.ME_WB[0] = getreal(type);
imn.ME_WB[2] = getreal(type);
imn.ME_WB[1] = getreal(type);
imn.ME_WB[3] = getreal(type);
} else if (tag == 0x03d) { // not corrected for file bitcount, to be patched in open_datastream
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black += i;
} else if (tag == 0x0045) { /* box inside CropData ([2], [3]): upper left pixel (x,y), size (width,height) */
imgdata.sizes.raw_crop.cleft = get2();
imgdata.sizes.raw_crop.ctop = get2();
imgdata.sizes.raw_crop.cwidth = get2();
imgdata.sizes.raw_crop.cheight = get2();
} else if (tag == 0x0082) { // lens attachment
stmread(ilm.Attachment, len, ifp);
} else if (tag == 0x0083) { // lens type
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
} else if (tag == 0x0084) { // lens
ilm.MinFocal = getreal(type);
ilm.MaxFocal = getreal(type);
ilm.MaxAp4MinFocal = getreal(type);
ilm.MaxAp4MaxFocal = getreal(type);
} else if (tag == 0x008b) { // lens f-stops
ci = fgetc(ifp);
cj = fgetc(ifp);
ck = fgetc(ifp);
if (ck) {
imgdata.lens.nikon.NikonLensFStops = ci * cj * (12 / ck);
ilm.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops / 12.0f;
}
} else if ((tag == 0x008c) || (tag == 0x0096)) {
meta_offset = ftell(ifp);
} else if (tag == 0x0093) {
imn.NEFCompression = i = get2();
if ((i == 7) || (i == 9)) {
ilm.LensMount = LIBRAW_MOUNT_FixedLens;
ilm.CameraMount = LIBRAW_MOUNT_FixedLens;
}
} else if (tag == 0x0097) { // ver97
FORC4 imn.ColorBalanceVersion = imn.ColorBalanceVersion * 10 + fgetc(ifp) - '0';
switch (imn.ColorBalanceVersion) {
case 100:
fseek(ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek(ifp, 6, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
break;
case 103:
fseek(ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (imn.ColorBalanceVersion >= 200) {
if (imn.ColorBalanceVersion != 205) {
fseek(ifp, 280, SEEK_CUR);
}
ColorBalanceData_ready = (fread(ColorBalanceData_buf, 324, 1, ifp) == 324);
}
if ((imn.ColorBalanceVersion >= 400) &&
(imn.ColorBalanceVersion <= 405)) { // 1 J1, 1 V1, 1 J2, 1 V2, 1 J3, 1 S1, 1 AW1, 1 S2, 1 J4, 1 V3, 1 J5
ilm.CameraFormat = LIBRAW_FORMAT_1INCH;
ilm.CameraMount = LIBRAW_MOUNT_Nikon_CX;
} else if ((imn.ColorBalanceVersion >= 500) &&
(imn.ColorBalanceVersion <= 502)) { // P7700, P7800, P330, P340
ilm.CameraMount = ilm.LensMount = LIBRAW_MOUNT_FixedLens;
ilm.FocalType = LIBRAW_FT_ZOOM;
} else if (imn.ColorBalanceVersion == 601) { // Coolpix A
ilm.CameraFormat = ilm.LensFormat = LIBRAW_FORMAT_APSC;
ilm.CameraMount = ilm.LensMount = LIBRAW_MOUNT_FixedLens;
ilm.FocalType = LIBRAW_FT_FIXED;
} else if (imn.ColorBalanceVersion == 800) { // "Z 7"
ilm.CameraFormat = LIBRAW_FORMAT_FF;
ilm.CameraMount = LIBRAW_MOUNT_Nikon_Z;
}
} else if (tag == 0x0098) { // contains lens data
FORC4 imn.LensDataVersion = imn.LensDataVersion * 10 + fgetc(ifp) - '0';
switch (imn.LensDataVersion) {
case 100:
LensData_len = 9;
break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203:
LensData_len = 15;
break;
case 204:
LensData_len = 16;
break;
case 400:
LensData_len = 459;
break;
case 401:
LensData_len = 590;
break;
case 402:
LensData_len = 509;
break;
case 403:
LensData_len = 879;
break;
}
if (LensData_len) {
LensData_buf = (uchar *)malloc(LensData_len);
fread(LensData_buf, LensData_len, 1, ifp);
}
} else if (tag == 0x00a0) {
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
} else if (tag == 0x00a7) { // shutter count
imn.NikonKey = fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp);
if (custom_serial) {
ci = xlat[0][custom_serial];
} else {
ci = xlat[0][serial & 0xff];
}
cj = xlat[1][imn.NikonKey];
ck = 0x60;
if (((unsigned)(imn.ColorBalanceVersion - 200) < 18) && ColorBalanceData_ready) {
for (i = 0; i < 324; i++)
ColorBalanceData_buf[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;555"[imn.ColorBalanceVersion - 200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] = sget2(ColorBalanceData_buf + (i & -2) + c * 2);
}
if (LensData_len) {
if (imn.LensDataVersion > 200) {
for (i = 0; i < LensData_len; i++) {
LensData_buf[i] ^= (cj += ci * ck++);
}
}
processNikonLensData(LensData_buf, LensData_len);
LensData_len = 0;
free(LensData_buf);
}
} else if (tag == 0x00a8) { // contains flash data
FORC4 imn.FlashInfoVersion = imn.FlashInfoVersion * 10 + fgetc(ifp) - '0';
} else if (tag == 0x00b0) {
get4(); // ME tag version, 4 symbols
imn.ExposureMode = get4();
imn.nMEshots = get4();
imn.MEgainOn = get4();
} else if (tag == 0x00b9) {
imn.AFFineTune = fgetc(ifp);
imn.AFFineTuneIndex = fgetc(ifp);
imn.AFFineTuneAdj = (int8_t)fgetc(ifp);
} else if ((tag == 0x100) && (type == 7 )) {
thumb_offset = ftell(ifp);
thumb_length = len;
} else if (tag == 0x0e01) { /* Nikon Software / in-camera edit Note */
int loopc = 0;
int WhiteBalanceAdj_active = 0;
order = 0x4949;
fseek(ifp, 22, SEEK_CUR);
for (offset = 22; offset + 22 < len; offset += 22 + i) {
if (loopc++ > 1024)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
tag = get4();
fseek(ifp, 14, SEEK_CUR);
i = get4() - 4;
if (tag == 0x76a43204) {
WhiteBalanceAdj_active = fgetc(ifp);
} else if (tag == 0xbf3c6c20) {
if (WhiteBalanceAdj_active) {
cam_mul[0] = getreal(12);
cam_mul[2] = getreal(12);
cam_mul[1] = cam_mul[3] = 1.0;
i -= 16;
}
fseek(ifp, i, SEEK_CUR);
} else if (tag == 0x76a43207) {
flip = get2();
} else {
fseek(ifp, i, SEEK_CUR);
}
}
} else if (tag == 0x0e22) {
FORC4 imn.NEFBitDepth[c] = get2();
}
next:
fseek(ifp, save, SEEK_SET);
}
quit:
order = sorder;
#undef icWB
#undef ilm
#undef imn
}
| 0 |
[
"CWE-400"
] |
LibRaw
|
e67a9862d10ebaa97712f532eca1eb5e2e410a22
| 323,702,800,896,415,800,000,000,000,000,000,000,000 | 419 |
Fixed Secunia Advisory SA86384
- possible infinite loop in unpacked_load_raw()
- possible infinite loop in parse_rollei()
- possible infinite loop in parse_sinar_ia()
Credits: Laurent Delosieres, Secunia Research at Flexera
|
TIFFWriteDirectoryTagCheckedRationalArray(TIFF* tif, uint32* ndir, TIFFDirEntry* dir, uint16 tag, uint32 count, float* value)
{
static const char module[] = "TIFFWriteDirectoryTagCheckedRationalArray";
uint32* m;
float* na;
uint32* nb;
uint32 nc;
int o;
assert(sizeof(uint32)==4);
m=_TIFFmalloc(count*2*sizeof(uint32));
if (m==NULL)
{
TIFFErrorExt(tif->tif_clientdata,module,"Out of memory");
return(0);
}
for (na=value, nb=m, nc=0; nc<count; na++, nb+=2, nc++)
{
if (*na<=0.0 || *na != *na)
{
nb[0]=0;
nb[1]=1;
}
else if (*na >= 0 && *na <= (float)0xFFFFFFFFU &&
*na==(float)(uint32)(*na))
{
nb[0]=(uint32)(*na);
nb[1]=1;
}
else if (*na<1.0)
{
nb[0]=(uint32)((double)(*na)*0xFFFFFFFF);
nb[1]=0xFFFFFFFF;
}
else
{
nb[0]=0xFFFFFFFF;
nb[1]=(uint32)((double)0xFFFFFFFF/(*na));
}
}
if (tif->tif_flags&TIFF_SWAB)
TIFFSwabArrayOfLong(m,count*2);
o=TIFFWriteDirectoryTagData(tif,ndir,dir,tag,TIFF_RATIONAL,count,count*8,&m[0]);
_TIFFfree(m);
return(o);
}
| 0 |
[
"CWE-617"
] |
libtiff
|
de144fd228e4be8aa484c3caf3d814b6fa88c6d9
| 307,577,244,098,367,540,000,000,000,000,000,000,000 | 45 |
TIFFWriteDirectorySec: avoid assertion. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2795. CVE-2018-10963
|
find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
unsigned int size,
struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
struct xt_target *target;
int ret;
unsigned int j;
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
return -ENOMEM;
j = 0;
memset(&mtpar, 0, sizeof(mtpar));
mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ip;
mtpar.hook_mask = e->comefrom;
mtpar.family = NFPROTO_IPV4;
xt_ematch_foreach(ematch, e) {
ret = find_check_match(ematch, &mtpar);
if (ret != 0)
goto cleanup_matches;
++j;
}
t = ipt_get_target(e);
target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto cleanup_matches;
}
t->u.kernel.target = target;
ret = check_target(e, net, name);
if (ret)
goto err;
return 0;
err:
module_put(t->u.kernel.target->me);
cleanup_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
cleanup_match(ematch, net);
}
xt_percpu_counter_free(&e->counters);
return ret;
}
| 0 |
[
"CWE-787"
] |
linux
|
b29c457a6511435960115c0f548c4360d5f4801d
| 305,118,562,189,642,440,000,000,000,000,000,000,000 | 55 |
netfilter: x_tables: fix compat match/target pad out-of-bound write
xt_compat_match/target_from_user doesn't check that zeroing the area
to start of next rule won't write past end of allocated ruleset blob.
Remove this code and zero the entire blob beforehand.
Reported-by: [email protected]
Reported-by: Andy Nguyen <[email protected]>
Fixes: 9fa492cdc160c ("[NETFILTER]: x_tables: simplify compat API")
Signed-off-by: Florian Westphal <[email protected]>
Signed-off-by: Pablo Neira Ayuso <[email protected]>
|
sub_points_edwards (mpi_point_t result,
mpi_point_t p1, mpi_point_t p2,
mpi_ec_t ctx)
{
mpi_point_t p2i = _gcry_mpi_point_new (0);
point_set (p2i, p2);
mpi_sub (p2i->x, ctx->p, p2i->x);
add_points_edwards (result, p1, p2i, ctx);
_gcry_mpi_point_release (p2i);
}
| 0 |
[
"CWE-200"
] |
libgcrypt
|
bf76acbf0da6b0f245e491bec12c0f0a1b5be7c9
| 157,809,037,509,498,100,000,000,000,000,000,000,000 | 10 |
ecc: Add input validation for X25519.
* cipher/ecc.c (ecc_decrypt_raw): Add input validation.
* mpi/ec.c (ec_p_init): Use scratch buffer for bad points.
(_gcry_mpi_ec_bad_point): New.
--
Following is the paper describing the attack:
May the Fourth Be With You: A Microarchitectural Side Channel Attack
on Real-World Applications of Curve25519
by Daniel Genkin, Luke Valenta, and Yuval Yarom
In the current implementation, we do output checking and it results an
error for those bad points. However, when attacked, the computation
will done with leak of private key, even it will results errors. To
mitigate leak, we added input validation.
Note that we only list bad points with MSB=0. By X25519, MSB is
always cleared.
In future, we should implement constant-time field computation. Then,
this input validation could be removed, if performance is important
and we are sure for no leak.
CVE-id: CVE-2017-0379
Signed-off-by: NIIBE Yutaka <[email protected]>
|
bool Segment::DoneParsing() const {
if (m_size < 0) {
long long total, avail;
const int status = m_pReader->Length(&total, &avail);
if (status < 0) // error
return true; // must assume done
if (total < 0)
return false; // assume live stream
return (m_pos >= total);
}
const long long stop = m_start + m_size;
return (m_pos >= stop);
}
| 0 |
[
"CWE-20"
] |
libvpx
|
34d54b04e98dd0bac32e9aab0fbda0bf501bc742
| 185,648,541,585,410,960,000,000,000,000,000,000,000 | 19 |
update libwebm to libwebm-1.0.0.27-358-gdbf1d10
changelog:
https://chromium.googlesource.com/webm/libwebm/+log/libwebm-1.0.0.27-351-g9f23fbc..libwebm-1.0.0.27-358-gdbf1d10
Change-Id: I28a6b3ae02a53fb1f2029eee11e9449afb94c8e3
|
static char *parse_len(char *p,
int dur_u,
int *p_len)
{
int len, fac;
int err = 0;
char *q;
len = dur_u;
if (isdigit((unsigned char) *p)) {
len *= strtol(p, &q, 10);
if (len <= 0 || len > 10000) {
syntax("Bad length", p);
len = dur_u;
}
p = q;
}
if (*p != '/') {
*p_len = len;
return p;
}
if (isdigit((unsigned char) p[1])) {
fac = strtol(p + 1, &q, 10);
p = q;
if (fac == 0 || (fac & (fac - 1)))
err = 1;
else
len /= fac;
} else {
while (*p == '/') {
if (len & 1)
err = 1;
len /= 2;
p++;
}
}
if (err || !len) {
syntax("Bad length divisor", p - 1);
len = dur_u;
}
*p_len = len;
return p;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
abcm2ps
|
3169ace6d63f6f517a64e8df0298f44a490c4a15
| 25,863,051,012,955,903,000,000,000,000,000,000,000 | 43 |
fix: crash when accidental without a note at start of line after K:
Issue #84.
|
static void _db_release(struct db_filter *db)
{
if (db == NULL)
return;
/* free and reset the DB */
_db_reset(db);
free(db);
}
| 0 |
[] |
libseccomp
|
c5bf78de480b32b324e0f511c88ce533ed280b37
| 291,079,944,181,159,770,000,000,000,000,000,000,000 | 9 |
db: fix 64-bit argument comparisons
Our approach to doing 64-bit comparisons using 32-bit operators was
just plain wrong, leading to a number of potential problems with
filters that used the LT, GT, LE, or GE operators. This patch fixes
this problem and a few other related issues that came to light in
the course of fixing the core problem.
A special thanks to Jann Horn for bringing this problem to our
attention.
Signed-off-by: Paul Moore <[email protected]>
|
static int lg4ff_switch_compatibility_mode(struct hid_device *hid, const struct lg4ff_compat_mode_switch *s)
{
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
unsigned long flags;
s32 *value;
u8 i;
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
hid_err(hid, "Private driver data not found!\n");
return -EINVAL;
}
entry = drv_data->device_props;
if (!entry) {
hid_err(hid, "Device properties not found!\n");
return -EINVAL;
}
value = entry->report->field[0]->value;
spin_lock_irqsave(&entry->report_lock, flags);
for (i = 0; i < s->cmd_count; i++) {
u8 j;
for (j = 0; j < 7; j++)
value[j] = s->cmd[j + (7*i)];
hid_hw_request(hid, entry->report, HID_REQ_SET_REPORT);
}
spin_unlock_irqrestore(&entry->report_lock, flags);
hid_hw_wait(hid);
return 0;
}
| 0 |
[
"CWE-787"
] |
linux
|
d9d4b1e46d9543a82c23f6df03f4ad697dab361b
| 29,923,371,217,299,944,000,000,000,000,000,000,000 | 34 |
HID: Fix assumption that devices have inputs
The syzbot fuzzer found a slab-out-of-bounds write bug in the hid-gaff
driver. The problem is caused by the driver's assumption that the
device must have an input report. While this will be true for all
normal HID input devices, a suitably malicious device can violate the
assumption.
The same assumption is present in over a dozen other HID drivers.
This patch fixes them by checking that the list of hid_inputs for the
hid_device is nonempty before allowing it to be used.
Reported-and-tested-by: [email protected]
Signed-off-by: Alan Stern <[email protected]>
CC: <[email protected]>
Signed-off-by: Benjamin Tissoires <[email protected]>
|
void bgp_packet_process_error(struct thread *thread)
{
struct peer *peer;
int code;
peer = THREAD_ARG(thread);
code = THREAD_VAL(thread);
if (bgp_debug_neighbor_events(peer))
zlog_debug("%s [Event] BGP error %d on fd %d",
peer->host, code, peer->fd);
/* Closed connection or error on the socket */
if (peer_established(peer)) {
if ((CHECK_FLAG(peer->flags, PEER_FLAG_GRACEFUL_RESTART)
|| CHECK_FLAG(peer->flags,
PEER_FLAG_GRACEFUL_RESTART_HELPER))
&& CHECK_FLAG(peer->sflags, PEER_STATUS_NSF_MODE)) {
peer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION;
SET_FLAG(peer->sflags, PEER_STATUS_NSF_WAIT);
} else
peer->last_reset = PEER_DOWN_CLOSE_SESSION;
}
bgp_event_update(peer, code);
}
| 0 |
[
"CWE-125"
] |
frr
|
ff6db1027f8f36df657ff2e5ea167773752537ed
| 97,605,940,547,752,080,000,000,000,000,000,000,000 | 26 |
bgpd: Make sure hdr length is at a minimum of what is expected
Ensure that if the capability length specified is enough data.
Signed-off-by: Donald Sharp <[email protected]>
|
st_select_lex* next_select() { return (st_select_lex*) next; }
| 0 |
[
"CWE-703"
] |
server
|
39feab3cd31b5414aa9b428eaba915c251ac34a2
| 159,915,932,284,248,180,000,000,000,000,000,000,000 | 1 |
MDEV-26412 Server crash in Item_field::fix_outer_field for INSERT SELECT
IF an INSERT/REPLACE SELECT statement contained an ON expression in the top
level select and this expression used a subquery with a column reference
that could not be resolved then an attempt to resolve this reference as
an outer reference caused a crash of the server. This happened because the
outer context field in the Name_resolution_context structure was not set
to NULL for such references. Rather it pointed to the first element in
the select_stack.
Note that starting from 10.4 we cannot use the SELECT_LEX::outer_select()
method when parsing a SELECT construct.
Approved by Oleksandr Byelkin <[email protected]>
|
void DecimalQuantity::negate() {
flags ^= NEGATIVE_FLAG;
}
| 0 |
[
"CWE-190"
] |
icu
|
53d8c8f3d181d87a6aa925b449b51c4a2c922a51
| 132,882,564,540,879,610,000,000,000,000,000,000,000 | 3 |
ICU-20246 Fixing another integer overflow in number parsing.
|
static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
{
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
__netif_schedule(qdisc_root(wd->qdisc));
return HRTIMER_NORESTART;
}
| 0 |
[
"CWE-909"
] |
linux-2.6
|
16ebb5e0b36ceadc8186f71d68b0c4fa4b6e781b
| 111,984,305,308,909,900,000,000,000,000,000,000,000 | 10 |
tc: Fix unitialized kernel memory leak
Three bytes of uninitialized kernel memory are currently leaked to user
Signed-off-by: Eric Dumazet <[email protected]>
Reviewed-by: Jiri Pirko <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
{
u64 phys_complete;
u64 completion;
completion = *chan->completion;
phys_complete = ioat_chansts_to_addr(completion);
dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
(unsigned long long) phys_complete);
return phys_complete;
}
| 0 |
[] |
linux
|
7bced397510ab569d31de4c70b39e13355046387
| 115,870,079,423,230,930,000,000,000,000,000,000,000 | 13 |
net_dma: simple removal
Per commit "77873803363c net_dma: mark broken" net_dma is no longer used
and there is no plan to fix it.
This is the mechanical removal of bits in CONFIG_NET_DMA ifdef guards.
Reverting the remainder of the net_dma induced changes is deferred to
subsequent patches.
Marked for stable due to Roman's report of a memory leak in
dma_pin_iovec_pages():
https://lkml.org/lkml/2014/9/3/177
Cc: Dave Jiang <[email protected]>
Cc: Vinod Koul <[email protected]>
Cc: David Whipple <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: <[email protected]>
Reported-by: Roman Gushchin <[email protected]>
Acked-by: David S. Miller <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
|
int RemoteIo::open()
{
close(); // reset the IO position
bigBlock_ = nullptr;
if (p_->isMalloced_ == false) {
long length = p_->getFileLength();
if (length < 0) { // unable to get the length of remote file, get the whole file content.
std::string data;
p_->getDataByRange(-1, -1, data);
p_->size_ = data.length();
size_t nBlocks = (p_->size_ + p_->blockSize_ - 1) / p_->blockSize_;
p_->blocksMap_ = new BlockMap[nBlocks];
p_->isMalloced_ = true;
byte* source = (byte*)data.c_str();
size_t remain = p_->size_, iBlock = 0, totalRead = 0;
while (remain) {
size_t allow = EXV_MIN(remain, p_->blockSize_);
p_->blocksMap_[iBlock].populate(&source[totalRead], allow);
remain -= allow;
totalRead += allow;
iBlock++;
}
} else if (length == 0) { // file is empty
throw Error(kerErrorMessage, "the file length is 0");
} else {
p_->size_ = (size_t) length;
size_t nBlocks = (p_->size_ + p_->blockSize_ - 1) / p_->blockSize_;
p_->blocksMap_ = new BlockMap[nBlocks];
p_->isMalloced_ = true;
}
}
return 0; // means OK
}
| 0 |
[
"CWE-190"
] |
exiv2
|
c73d1e27198a389ce7caf52ac30f8e2120acdafd
| 299,842,676,596,675,150,000,000,000,000,000,000,000 | 33 |
Avoid negative integer overflow when `filesize < io_->tell()`.
This fixes #791.
|
static int bpf_link_get_info_by_fd(struct file *file,
struct bpf_link *link,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
struct bpf_link_info info;
u32 info_len = attr->info.info_len;
int err;
err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
if (err)
return err;
info_len = min_t(u32, sizeof(info), info_len);
memset(&info, 0, sizeof(info));
if (copy_from_user(&info, uinfo, info_len))
return -EFAULT;
info.type = link->type;
info.id = link->id;
info.prog_id = link->prog->aux->id;
if (link->ops->fill_link_info) {
err = link->ops->fill_link_info(link, &info);
if (err)
return err;
}
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
return -EFAULT;
return 0;
}
| 0 |
[
"CWE-307"
] |
linux
|
350a5c4dd2452ea999cc5e1d4a8dbf12de2f97ef
| 259,977,006,152,993,120,000,000,000,000,000,000,000 | 35 |
bpf: Dont allow vmlinux BTF to be used in map_create and prog_load.
The syzbot got FD of vmlinux BTF and passed it into map_create which caused
crash in btf_type_id_size() when it tried to access resolved_ids. The vmlinux
BTF doesn't have 'resolved_ids' and 'resolved_sizes' initialized to save
memory. To avoid such issues disallow using vmlinux BTF in prog_load and
map_create commands.
Fixes: 5329722057d4 ("bpf: Assign ID to vmlinux BTF and return extra info for BTF in GET_OBJ_INFO")
Reported-by: [email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Yonghong Song <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected]
|
static int file_modexp(STANZA *s)
{
BIGNUM *a = NULL, *e = NULL, *m = NULL, *mod_exp = NULL, *ret = NULL;
BIGNUM *b = NULL, *c = NULL, *d = NULL;
int st = 0;
if (!TEST_ptr(a = getBN(s, "A"))
|| !TEST_ptr(e = getBN(s, "E"))
|| !TEST_ptr(m = getBN(s, "M"))
|| !TEST_ptr(mod_exp = getBN(s, "ModExp"))
|| !TEST_ptr(ret = BN_new())
|| !TEST_ptr(d = BN_new()))
goto err;
if (!TEST_true(BN_mod_exp(ret, a, e, m, ctx))
|| !equalBN("A ^ E (mod M)", mod_exp, ret))
goto err;
if (BN_is_odd(m)) {
if (!TEST_true(BN_mod_exp_mont(ret, a, e, m, ctx, NULL))
|| !equalBN("A ^ E (mod M) (mont)", mod_exp, ret)
|| !TEST_true(BN_mod_exp_mont_consttime(ret, a, e, m,
ctx, NULL))
|| !equalBN("A ^ E (mod M) (mont const", mod_exp, ret))
goto err;
}
/* Regression test for carry propagation bug in sqr8x_reduction */
BN_hex2bn(&a, "050505050505");
BN_hex2bn(&b, "02");
BN_hex2bn(&c,
"4141414141414141414141274141414141414141414141414141414141414141"
"4141414141414141414141414141414141414141414141414141414141414141"
"4141414141414141414141800000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000001");
if (!TEST_true(BN_mod_exp(d, a, b, c, ctx))
|| !TEST_true(BN_mul(e, a, a, ctx))
|| !TEST_BN_eq(d, e))
goto err;
st = 1;
err:
BN_free(a);
BN_free(b);
BN_free(c);
BN_free(d);
BN_free(e);
BN_free(m);
BN_free(mod_exp);
BN_free(ret);
return st;
}
| 0 |
[] |
openssl
|
336923c0c8d705cb8af5216b29a205662db0d590
| 184,789,079,071,561,900,000,000,000,000,000,000,000 | 54 |
Fix a carry overflow bug in bn_sqr_comba4/8 for mips 32-bit targets
bn_sqr_comba8 does for instance compute a wrong result for the value:
a=0x4aaac919 62056c84 fba7334e 1a6be678 022181ba fd3aa878 899b2346 ee210f45
The correct result is:
r=0x15c72e32 605a3061 d11b1012 3c187483 6df96999 bd0c22ba d3e7d437 4724a82f
912c5e61 6a187efe 8f7c47fc f6945fe5 75be8e3d 97ed17d4 7950b465 3cb32899
but the actual result was:
r=0x15c72e32 605a3061 d11b1012 3c187483 6df96999 bd0c22ba d3e7d437 4724a82f
912c5e61 6a187efe 8f7c47fc f6945fe5 75be8e3c 97ed17d4 7950b465 3cb32899
so the forth word of the result was 0x75be8e3c but should have been
0x75be8e3d instead.
Likewise bn_sqr_comba4 has an identical bug for the same value as well:
a=0x022181ba fd3aa878 899b2346 ee210f45
correct result:
r=0x00048a69 9fe82f8b 62bd2ed1 88781335 75be8e3d 97ed17d4 7950b465 3cb32899
wrong result:
r=0x00048a69 9fe82f8b 62bd2ed1 88781335 75be8e3c 97ed17d4 7950b465 3cb32899
Fortunately the bn_mul_comba4/8 code paths are not affected.
Also the mips64 target does in fact not handle the carry propagation
correctly.
Example:
a=0x4aaac91900000000 62056c8400000000 fba7334e00000000 1a6be67800000000
022181ba00000000 fd3aa87800000000 899b234635dad283 ee210f4500000001
correct result:
r=0x15c72e32272c4471 392debf018c679c8 b85496496bf8254c d0204f36611e2be1
0cdb3db8f3c081d8 c94ba0e1bacc5061 191b83d47ff929f6 5be0aebfc13ae68d
3eea7a7fdf2f5758 42f7ec656cab3cb5 6a28095be34756f2 64f24687bf37de06
2822309cd1d292f9 6fa698c972372f09 771e97d3a868cda0 dc421e8a00000001
wrong result:
r=0x15c72e32272c4471 392debf018c679c8 b85496496bf8254c d0204f36611e2be1
0cdb3db8f3c081d8 c94ba0e1bacc5061 191b83d47ff929f6 5be0aebfc13ae68d
3eea7a7fdf2f5758 42f7ec656cab3cb5 6a28095be34756f2 64f24687bf37de06
2822309cd1d292f8 6fa698c972372f09 771e97d3a868cda0 dc421e8a00000001
Reviewed-by: Paul Dale <[email protected]>
(Merged from https://github.com/openssl/openssl/pull/17258)
|
writeRandomBytes_arc4random(void *target, size_t count) {
size_t bytesWrittenTotal = 0;
while (bytesWrittenTotal < count) {
const uint32_t random32 = arc4random();
size_t i = 0;
for (; (i < sizeof(random32)) && (bytesWrittenTotal < count);
i++, bytesWrittenTotal++) {
const uint8_t random8 = (uint8_t)(random32 >> (i * 8));
((uint8_t *)target)[bytesWrittenTotal] = random8;
}
}
}
| 0 |
[
"CWE-611",
"CWE-776",
"CWE-415",
"CWE-125"
] |
libexpat
|
c20b758c332d9a13afbbb276d30db1d183a85d43
| 180,999,472,918,326,630,000,000,000,000,000,000,000 | 14 |
xmlparse.c: Deny internal entities closing the doctype
|
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
unsigned long text, lib, swap, anon, file, shmem;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
anon = get_mm_counter(mm, MM_ANONPAGES);
file = get_mm_counter(mm, MM_FILEPAGES);
shmem = get_mm_counter(mm, MM_SHMEMPAGES);
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
* hiwater_rss only when about to *lower* total_vm or rss. Any
* collector of these hiwater stats must therefore get total_vm
* and rss too, which will usually be the higher. Barriers? not
* worth the effort, such snapshots can always be inconsistent.
*/
hiwater_vm = total_vm = mm->total_vm;
if (hiwater_vm < mm->hiwater_vm)
hiwater_vm = mm->hiwater_vm;
hiwater_rss = total_rss = anon + file + shmem;
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
/* split executable areas between text and lib */
text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
text = min(text, mm->exec_vm << PAGE_SHIFT);
lib = (mm->exec_vm << PAGE_SHIFT) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
SEQ_PUT_DEC(" kB\nRssFile:\t", file);
SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
seq_put_decimal_ull_width(m,
" kB\nVmExe:\t", text >> 10, 8);
seq_put_decimal_ull_width(m,
" kB\nVmLib:\t", lib >> 10, 8);
seq_put_decimal_ull_width(m,
" kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
seq_puts(m, " kB\n");
hugetlb_report_usage(m, mm);
}
| 0 |
[
"CWE-362",
"CWE-703",
"CWE-667"
] |
linux
|
04f5866e41fb70690e28397487d8bd8eea7d712a
| 110,200,816,328,267,680,000,000,000,000,000,000,000 | 50 |
coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/[email protected]
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/[email protected]
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <[email protected]>
Reported-by: Jann Horn <[email protected]>
Suggested-by: Oleg Nesterov <[email protected]>
Acked-by: Peter Xu <[email protected]>
Reviewed-by: Mike Rapoport <[email protected]>
Reviewed-by: Oleg Nesterov <[email protected]>
Reviewed-by: Jann Horn <[email protected]>
Acked-by: Jason Gunthorpe <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
int sqlite3Fts3FirstFilter(
sqlite3_int64 iDelta, /* Varint that may be written to pOut */
char *pList, /* Position list (no 0x00 term) */
int nList, /* Size of pList in bytes */
char *pOut /* Write output here */
){
int nOut = 0;
int bWritten = 0; /* True once iDelta has been written */
char *p = pList;
char *pEnd = &pList[nList];
if( *p!=0x01 ){
if( *p==0x02 ){
nOut += sqlite3Fts3PutVarint(&pOut[nOut], iDelta);
pOut[nOut++] = 0x02;
bWritten = 1;
}
fts3ColumnlistCopy(0, &p);
}
while( p<pEnd ){
sqlite3_int64 iCol;
p++;
p += sqlite3Fts3GetVarint(p, &iCol);
if( *p==0x02 ){
if( bWritten==0 ){
nOut += sqlite3Fts3PutVarint(&pOut[nOut], iDelta);
bWritten = 1;
}
pOut[nOut++] = 0x01;
nOut += sqlite3Fts3PutVarint(&pOut[nOut], iCol);
pOut[nOut++] = 0x02;
}
fts3ColumnlistCopy(0, &p);
}
if( bWritten ){
pOut[nOut++] = 0x00;
}
return nOut;
}
| 0 |
[
"CWE-787"
] |
sqlite
|
c72f2fb7feff582444b8ffdc6c900c69847ce8a9
| 131,128,763,643,712,700,000,000,000,000,000,000,000 | 41 |
More improvements to shadow table corruption detection in FTS3.
FossilOrigin-Name: 51525f9c3235967bc00a090e84c70a6400698c897aa4742e817121c725b8c99d
|
query_entry_delete(void *k, void* ATTR_UNUSED(arg))
{
struct msgreply_entry* q = (struct msgreply_entry*)k;
lock_rw_destroy(&q->entry.lock);
query_info_clear(&q->key);
free(q);
}
| 0 |
[
"CWE-787"
] |
unbound
|
6c3a0b54ed8ace93d5b5ca7b8078dc87e75cd640
| 339,558,322,226,254,700,000,000,000,000,000,000,000 | 7 |
- Fix Out of Bound Write Compressed Names in rdata_copy(),
reported by X41 D-Sec.
|
ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
{
struct nfs4_flexfile_layout *ffl;
ffl = kzalloc(sizeof(*ffl), gfp_flags);
if (ffl) {
pnfs_init_ds_commit_info(&ffl->commit_info);
INIT_LIST_HEAD(&ffl->error_list);
INIT_LIST_HEAD(&ffl->mirrors);
ffl->last_report_time = ktime_get();
ffl->commit_info.ops = &ff_layout_commit_ops;
return &ffl->generic_hdr;
} else
return NULL;
}
| 0 |
[
"CWE-787"
] |
linux
|
ed34695e15aba74f45247f1ee2cf7e09d449f925
| 207,155,034,196,377,240,000,000,000,000,000,000,000 | 15 |
pNFS/flexfiles: fix incorrect size check in decode_nfs_fh()
We (adam zabrocki, alexander matrosov, alexander tereshkin, maksym
bazalii) observed the check:
if (fh->size > sizeof(struct nfs_fh))
should not use the size of the nfs_fh struct which includes an extra two
bytes from the size field.
struct nfs_fh {
unsigned short size;
unsigned char data[NFS_MAXFHSIZE];
}
but should determine the size from data[NFS_MAXFHSIZE] so the memcpy
will not write 2 bytes beyond destination. The proposed fix is to
compare against the NFS_MAXFHSIZE directly, as is done elsewhere in fs
code base.
Fixes: d67ae825a59d ("pnfs/flexfiles: Add the FlexFile Layout Driver")
Signed-off-by: Nikola Livic <[email protected]>
Signed-off-by: Dan Carpenter <[email protected]>
Signed-off-by: Trond Myklebust <[email protected]>
|
struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
{
struct ctl_table *table;
table = kmemdup(ipv6_icmp_table_template,
sizeof(ipv6_icmp_table_template),
GFP_KERNEL);
if (table)
table[0].data = &net->ipv6.sysctl.icmpv6_time;
return table;
}
| 0 |
[
"CWE-20",
"CWE-200"
] |
linux
|
79dc7e3f1cd323be4c81aa1a94faa1b3ed987fb2
| 116,459,145,171,709,610,000,000,000,000,000,000,000 | 13 |
net: handle no dst on skb in icmp6_send
Andrey reported the following while fuzzing the kernel with syzkaller:
kasan: CONFIG_KASAN_INLINE enabled
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN
Modules linked in:
CPU: 0 PID: 3859 Comm: a.out Not tainted 4.9.0-rc6+ #429
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
task: ffff8800666d4200 task.stack: ffff880067348000
RIP: 0010:[<ffffffff833617ec>] [<ffffffff833617ec>]
icmp6_send+0x5fc/0x1e30 net/ipv6/icmp.c:451
RSP: 0018:ffff88006734f2c0 EFLAGS: 00010206
RAX: ffff8800666d4200 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000018
RBP: ffff88006734f630 R08: ffff880064138418 R09: 0000000000000003
R10: dffffc0000000000 R11: 0000000000000005 R12: 0000000000000000
R13: ffffffff84e7e200 R14: ffff880064138484 R15: ffff8800641383c0
FS: 00007fb3887a07c0(0000) GS:ffff88006cc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 0000000020000000 CR3: 000000006b040000 CR4: 00000000000006f0
Stack:
ffff8800666d4200 ffff8800666d49f8 ffff8800666d4200 ffffffff84c02460
ffff8800666d4a1a 1ffff1000ccdaa2f ffff88006734f498 0000000000000046
ffff88006734f440 ffffffff832f4269 ffff880064ba7456 0000000000000000
Call Trace:
[<ffffffff83364ddc>] icmpv6_param_prob+0x2c/0x40 net/ipv6/icmp.c:557
[< inline >] ip6_tlvopt_unknown net/ipv6/exthdrs.c:88
[<ffffffff83394405>] ip6_parse_tlv+0x555/0x670 net/ipv6/exthdrs.c:157
[<ffffffff8339a759>] ipv6_parse_hopopts+0x199/0x460 net/ipv6/exthdrs.c:663
[<ffffffff832ee773>] ipv6_rcv+0xfa3/0x1dc0 net/ipv6/ip6_input.c:191
...
icmp6_send / icmpv6_send is invoked for both rx and tx paths. In both
cases the dst->dev should be preferred for determining the L3 domain
if the dst has been set on the skb. Fallback to the skb->dev if it has
not. This covers the case reported here where icmp6_send is invoked on
Rx before the route lookup.
Fixes: 5d41ce29e ("net: icmp6_send should use dst dev to determine L3 domain")
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David Ahern <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
{
/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
if (req->opcode == IORING_OP_POLL_ADD)
return req->async_data;
return req->apoll->double_poll;
| 0 |
[
"CWE-667"
] |
linux
|
3ebba796fa251d042be42b929a2d916ee5c34a49
| 199,526,926,140,541,700,000,000,000,000,000,000,000 | 7 |
io_uring: ensure that SQPOLL thread is started for exit
If we create it in a disabled state because IORING_SETUP_R_DISABLED is
set on ring creation, we need to ensure that we've kicked the thread if
we're exiting before it's been explicitly disabled. Otherwise we can run
into a deadlock where exit is waiting go park the SQPOLL thread, but the
SQPOLL thread itself is waiting to get a signal to start.
That results in the below trace of both tasks hung, waiting on each other:
INFO: task syz-executor458:8401 blocked for more than 143 seconds.
Not tainted 5.11.0-next-20210226-syzkaller #0
"echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
task:syz-executor458 state:D stack:27536 pid: 8401 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread_park fs/io_uring.c:7115 [inline]
io_sq_thread_park+0xd5/0x130 fs/io_uring.c:7103
io_uring_cancel_task_requests+0x24c/0xd90 fs/io_uring.c:8745
__io_uring_files_cancel+0x110/0x230 fs/io_uring.c:8840
io_uring_files_cancel include/linux/io_uring.h:47 [inline]
do_exit+0x299/0x2a60 kernel/exit.c:780
do_group_exit+0x125/0x310 kernel/exit.c:922
__do_sys_exit_group kernel/exit.c:933 [inline]
__se_sys_exit_group kernel/exit.c:931 [inline]
__x64_sys_exit_group+0x3a/0x50 kernel/exit.c:931
do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x43e899
RSP: 002b:00007ffe89376d48 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7
RAX: ffffffffffffffda RBX: 00000000004af2f0 RCX: 000000000043e899
RDX: 000000000000003c RSI: 00000000000000e7 RDI: 0000000000000000
RBP: 0000000000000000 R08: ffffffffffffffc0 R09: 0000000010000000
R10: 0000000000008011 R11: 0000000000000246 R12: 00000000004af2f0
R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000001
INFO: task iou-sqp-8401:8402 can't die for more than 143 seconds.
task:iou-sqp-8401 state:D stack:30272 pid: 8402 ppid: 8400 flags:0x00004004
Call Trace:
context_switch kernel/sched/core.c:4324 [inline]
__schedule+0x90c/0x21a0 kernel/sched/core.c:5075
schedule+0xcf/0x270 kernel/sched/core.c:5154
schedule_timeout+0x1db/0x250 kernel/time/timer.c:1868
do_wait_for_common kernel/sched/completion.c:85 [inline]
__wait_for_common kernel/sched/completion.c:106 [inline]
wait_for_common kernel/sched/completion.c:117 [inline]
wait_for_completion+0x168/0x270 kernel/sched/completion.c:138
io_sq_thread+0x27d/0x1ae0 fs/io_uring.c:6717
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294
INFO: task iou-sqp-8401:8402 blocked for more than 143 seconds.
Reported-by: [email protected]
Signed-off-by: Jens Axboe <[email protected]>
|
static int nfs_umountall_reply(uchar *pkt, unsigned len)
{
struct rpc_t rpc_pkt;
debug("%s\n", __func__);
memcpy(&rpc_pkt.u.data[0], pkt, len);
if (ntohl(rpc_pkt.u.reply.id) > rpc_id)
return -NFS_RPC_ERR;
else if (ntohl(rpc_pkt.u.reply.id) < rpc_id)
return -NFS_RPC_DROP;
if (rpc_pkt.u.reply.rstatus ||
rpc_pkt.u.reply.verifier ||
rpc_pkt.u.reply.astatus)
return -1;
fs_mounted = 0;
memset(dirfh, 0, sizeof(dirfh));
return 0;
}
| 0 |
[
"CWE-120",
"CWE-703"
] |
u-boot
|
5d14ee4e53a81055d34ba280cb8fd90330f22a96
| 303,226,601,670,205,000,000,000,000,000,000,000,000 | 23 |
CVE-2019-14196: nfs: fix unbounded memcpy with a failed length check at nfs_lookup_reply
This patch adds a check to rpc_pkt.u.reply.data at nfs_lookup_reply.
Signed-off-by: Cheng Liu <[email protected]>
Reported-by: Fermín Serna <[email protected]>
Acked-by: Joe Hershberger <[email protected]>
|
cliprdr_server_format_list_response(CliprdrServerContext* context,
const CLIPRDR_FORMAT_LIST_RESPONSE* formatListResponse)
{
wStream* s;
CliprdrServerPrivate* cliprdr = (CliprdrServerPrivate*)context->handle;
if (formatListResponse->msgType != CB_FORMAT_LIST_RESPONSE)
WLog_WARN(TAG, "[%s] called with invalid type %08" PRIx32, __FUNCTION__,
formatListResponse->msgType);
s = cliprdr_packet_new(CB_FORMAT_LIST_RESPONSE, formatListResponse->msgFlags,
formatListResponse->dataLen);
if (!s)
{
WLog_ERR(TAG, "cliprdr_packet_new failed!");
return ERROR_INTERNAL_ERROR;
}
WLog_DBG(TAG, "ServerFormatListResponse");
return cliprdr_server_packet_send(cliprdr, s);
}
| 0 |
[] |
FreeRDP
|
8e1a1b407565eb0a48923c796f5b1f69167b3c48
| 262,166,537,627,242,900,000,000,000,000,000,000,000 | 21 |
Fixed cliprdr_server_receive_capabilities
Thanks to hac425 CVE-2020-11017, CVE-2020-11018
|
int wc_RsaSSL_Verify(const byte* in, word32 inLen, byte* out,
word32 outLen, RsaKey* key)
{
if (in == NULL || out == NULL || key == NULL || inLen == 0) {
return BAD_FUNC_ARG;
}
return RsaSSL_Verify_fips(in, inLen, out, outLen, key);
}
| 0 |
[
"CWE-310",
"CWE-787"
] |
wolfssl
|
fb2288c46dd4c864b78f00a47a364b96a09a5c0f
| 158,018,249,177,203,280,000,000,000,000,000,000,000 | 8 |
RSA-PSS: Handle edge case with encoding message to hash
When the key is small relative to the digest (1024-bit key, 64-byte
hash, 61-byte salt length), the internal message to hash is larger than
the output size.
Allocate a buffer for the message when this happens.
|
load_xwd_f2_d1_b1 (const gchar *filename,
FILE *ifp,
L_XWDFILEHEADER *xwdhdr,
L_XWDCOLOR *xwdcolmap)
{
register int pix8;
register guchar *dest, *src;
guchar c1, c2, c3, c4;
gint width, height, scan_lines, tile_height;
gint i, j, ncols;
gchar *temp;
guchar bit2byte[256 * 8];
guchar *data, *scanline;
gint err = 0;
gint32 layer_ID, image_ID;
GeglBuffer *buffer;
#ifdef XWD_DEBUG
printf ("load_xwd_f2_d1_b1 (%s)\n", filename);
#endif
width = xwdhdr->l_pixmap_width;
height = xwdhdr->l_pixmap_height;
image_ID = create_new_image (filename, width, height, GIMP_INDEXED,
GIMP_INDEXED_IMAGE, &layer_ID, &buffer);
tile_height = gimp_tile_height ();
data = g_malloc (tile_height * width);
scanline = g_new (guchar, xwdhdr->l_bytes_per_line + 8);
ncols = xwdhdr->l_colormap_entries;
if (xwdhdr->l_ncolors < ncols)
ncols = xwdhdr->l_ncolors;
if (ncols < 2)
set_bw_color_table (image_ID);
else
set_color_table (image_ID, xwdhdr, xwdcolmap);
temp = (gchar *) bit2byte;
/* Get an array for mapping 8 bits in a byte to 8 bytes */
if (!xwdhdr->l_bitmap_bit_order)
{
for (j = 0; j < 256; j++)
for (i = 0; i < 8; i++)
*(temp++) = ((j & (1 << i)) != 0);
}
else
{
for (j = 0; j < 256; j++)
for (i = 7; i >= 0; i--)
*(temp++) = ((j & (1 << i)) != 0);
}
dest = data;
scan_lines = 0;
for (i = 0; i < height; i++)
{
if (fread (scanline, xwdhdr->l_bytes_per_line, 1, ifp) != 1)
{
err = 1;
break;
}
/* Need to check byte order ? */
if (xwdhdr->l_bitmap_bit_order != xwdhdr->l_byte_order)
{
src = scanline;
switch (xwdhdr->l_bitmap_unit)
{
case 16:
j = xwdhdr->l_bytes_per_line;
while (j > 0)
{
c1 = src[0]; c2 = src[1];
*(src++) = c2; *(src++) = c1;
j -= 2;
}
break;
case 32:
j = xwdhdr->l_bytes_per_line;
while (j > 0)
{
c1 = src[0]; c2 = src[1]; c3 = src[2]; c4 = src[3];
*(src++) = c4; *(src++) = c3; *(src++) = c2; *(src++) = c1;
j -= 4;
}
break;
}
}
src = scanline;
j = width;
while (j >= 8)
{
pix8 = *(src++);
memcpy (dest, bit2byte + pix8*8, 8);
dest += 8;
j -= 8;
}
if (j > 0)
{
pix8 = *(src++);
memcpy (dest, bit2byte + pix8*8, j);
dest += j;
}
scan_lines++;
if ((i % 20) == 0)
gimp_progress_update ((double)(i+1) / (double)height);
if ((scan_lines == tile_height) || ((i+1) == height))
{
gegl_buffer_set (buffer, GEGL_RECTANGLE (0, i - scan_lines + 1,
width, scan_lines), 0,
NULL, data, GEGL_AUTO_ROWSTRIDE);
scan_lines = 0;
dest = data;
}
if (err) break;
}
g_free (data);
g_free (scanline);
if (err)
g_message (_("EOF encountered on reading"));
g_object_unref (buffer);
return err ? -1 : image_ID;
}
| 0 |
[
"CWE-190"
] |
gimp
|
32ae0f83e5748299641cceaabe3f80f1b3afd03e
| 305,418,391,477,802,180,000,000,000,000,000,000,000 | 138 |
file-xwd: sanity check colormap size (CVE-2013-1913)
|
struct lhash_st *SSL_CTX_sessions(SSL_CTX *ctx)
{
return ctx->sessions;
}
| 0 |
[
"CWE-310"
] |
openssl
|
c6a876473cbff0fd323c8abcaace98ee2d21863d
| 325,167,638,497,038,140,000,000,000,000,000,000,000 | 4 |
Support TLS_FALLBACK_SCSV.
Reviewed-by: Stephen Henson <[email protected]>
|
X509_CRL *d2i_X509_CRL_fp(FILE *fp, X509_CRL **crl)
{
return ASN1_item_d2i_fp(ASN1_ITEM_rptr(X509_CRL), fp, crl);
}
| 0 |
[
"CWE-310"
] |
openssl
|
684400ce192dac51df3d3e92b61830a6ef90be3e
| 33,247,606,690,924,110,000,000,000,000,000,000,000 | 4 |
Fix various certificate fingerprint issues.
By using non-DER or invalid encodings outside the signed portion of a
certificate the fingerprint can be changed without breaking the signature.
Although no details of the signed portion of the certificate can be changed
this can cause problems with some applications: e.g. those using the
certificate fingerprint for blacklists.
1. Reject signatures with non zero unused bits.
If the BIT STRING containing the signature has non zero unused bits reject
the signature. All current signature algorithms require zero unused bits.
2. Check certificate algorithm consistency.
Check the AlgorithmIdentifier inside TBS matches the one in the
certificate signature. NB: this will result in signature failure
errors for some broken certificates.
3. Check DSA/ECDSA signatures use DER.
Reencode DSA/ECDSA signatures and compare with the original received
signature. Return an error if there is a mismatch.
This will reject various cases including garbage after signature
(thanks to Antti Karjalainen and Tuomo Untinen from the Codenomicon CROSS
program for discovering this case) and use of BER or invalid ASN.1 INTEGERs
(negative or with leading zeroes).
CVE-2014-8275
Reviewed-by: Emilia Käsper <[email protected]>
|
init()
{
rsRetVal localRet;
int iNbrActions;
int bHadConfigErr = 0;
ruleset_t *pRuleset;
char cbuf[BUFSIZ];
char bufStartUpMsg[512];
struct sigaction sigAct;
DEFiRet;
thrdTerminateAll(); /* stop all running input threads - TODO: reconsider location! */
/* initialize some static variables */
pDfltHostnameCmp = NULL;
pDfltProgNameCmp = NULL;
eDfltHostnameCmpMode = HN_NO_COMP;
DBGPRINTF("rsyslog %s - called init()\n", VERSION);
/* delete the message queue, which also flushes all messages left over */
if(pMsgQueue != NULL) {
DBGPRINTF("deleting main message queue\n");
qqueueDestruct(&pMsgQueue); /* delete pThis here! */
pMsgQueue = NULL;
}
/* Close all open log files and free log descriptor array. This also frees
* all output-modules instance data.
*/
destructAllActions();
/* Unload all non-static modules */
DBGPRINTF("Unloading non-static modules.\n");
module.UnloadAndDestructAll(eMOD_LINK_DYNAMIC_LOADED);
DBGPRINTF("Clearing templates.\n");
tplDeleteNew();
/* re-setting values to defaults (where applicable) */
/* once we have loadable modules, we must re-visit this code. The reason is
* that config variables are not re-set, because the module is not yet loaded. On
* the other hand, that doesn't matter, because the module got unloaded and is then
* re-loaded, so the variables should be re-set via that way. And this is exactly how
* it works. Loadable module's variables are initialized on load, the rest here.
* rgerhards, 2008-04-28
*/
conf.cfsysline((uchar*)"ResetConfigVariables");
conf.ReInitConf();
/* construct the default ruleset */
ruleset.Construct(&pRuleset);
ruleset.SetName(pRuleset, UCHAR_CONSTANT("RSYSLOG_DefaultRuleset"));
ruleset.ConstructFinalize(pRuleset);
/* open the configuration file */
localRet = conf.processConfFile(ConfFile);
CHKiRet(conf.GetNbrActActions(&iNbrActions));
if(localRet != RS_RET_OK) {
errmsg.LogError(0, localRet, "CONFIG ERROR: could not interpret master config file '%s'.", ConfFile);
bHadConfigErr = 1;
} else if(iNbrActions == 0) {
errmsg.LogError(0, RS_RET_NO_ACTIONS, "CONFIG ERROR: there are no active actions configured. Inputs will "
"run, but no output whatsoever is created.");
bHadConfigErr = 1;
}
if((localRet != RS_RET_OK && localRet != RS_RET_NONFATAL_CONFIG_ERR) || iNbrActions == 0) {
/* rgerhards: this code is executed to set defaults when the
* config file could not be opened. We might think about
* abandoning the run in this case - but this, too, is not
* very clever... So we stick with what we have.
* We ignore any errors while doing this - we would be lost anyhow...
*/
errmsg.LogError(0, NO_ERRCODE, "EMERGENCY CONFIGURATION ACTIVATED - fix rsyslog config file!");
/* note: we previously used _POSIY_TTY_NAME_MAX+1, but this turned out to be
* too low on linux... :-S -- rgerhards, 2008-07-28
*/
char szTTYNameBuf[128];
rule_t *pRule = NULL; /* initialization to NULL is *vitally* important! */
conf.cfline(UCHAR_CONSTANT("*.ERR\t" _PATH_CONSOLE), &pRule);
conf.cfline(UCHAR_CONSTANT("syslog.*\t" _PATH_CONSOLE), &pRule);
conf.cfline(UCHAR_CONSTANT("*.PANIC\t*"), &pRule);
conf.cfline(UCHAR_CONSTANT("syslog.*\troot"), &pRule);
if(ttyname_r(0, szTTYNameBuf, sizeof(szTTYNameBuf)) == 0) {
snprintf(cbuf,sizeof(cbuf), "*.*\t%s", szTTYNameBuf);
conf.cfline((uchar*)cbuf, &pRule);
} else {
DBGPRINTF("error %d obtaining controlling terminal, not using that emergency rule\n", errno);
}
ruleset.AddRule(ruleset.GetCurrent(), &pRule);
}
legacyOptsHook();
/* we are now done with reading the configuration. This is the right time to
* free some objects that were just needed for loading it. rgerhards 2005-10-19
*/
if(pDfltHostnameCmp != NULL) {
rsCStrDestruct(&pDfltHostnameCmp);
}
if(pDfltProgNameCmp != NULL) {
rsCStrDestruct(&pDfltProgNameCmp);
}
/* some checks */
if(iMainMsgQueueNumWorkers < 1) {
errmsg.LogError(0, NO_ERRCODE, "$MainMsgQueueNumWorkers must be at least 1! Set to 1.\n");
iMainMsgQueueNumWorkers = 1;
}
if(MainMsgQueType == QUEUETYPE_DISK) {
errno = 0; /* for logerror! */
if(glbl.GetWorkDir() == NULL) {
errmsg.LogError(0, NO_ERRCODE, "No $WorkDirectory specified - can not run main message queue in 'disk' mode. "
"Using 'FixedArray' instead.\n");
MainMsgQueType = QUEUETYPE_FIXED_ARRAY;
}
if(pszMainMsgQFName == NULL) {
errmsg.LogError(0, NO_ERRCODE, "No $MainMsgQueueFileName specified - can not run main message queue in "
"'disk' mode. Using 'FixedArray' instead.\n");
MainMsgQueType = QUEUETYPE_FIXED_ARRAY;
}
}
/* check if we need to generate a config DAG and, if so, do that */
if(pszConfDAGFile != NULL)
generateConfigDAG(pszConfDAGFile);
/* we are done checking the config - now validate if we should actually run or not.
* If not, terminate. -- rgerhards, 2008-07-25
*/
if(iConfigVerify) {
if(bHadConfigErr) {
/* a bit dirty, but useful... */
exit(1);
}
ABORT_FINALIZE(RS_RET_VALIDATION_RUN);
}
/* switch the message object to threaded operation, if necessary */
if(MainMsgQueType == QUEUETYPE_DIRECT || iMainMsgQueueNumWorkers > 1) {
MsgEnableThreadSafety();
}
/* create message queue */
CHKiRet_Hdlr(qqueueConstruct(&pMsgQueue, MainMsgQueType, iMainMsgQueueNumWorkers, iMainMsgQueueSize, msgConsumer)) {
/* no queue is fatal, we need to give up in that case... */
fprintf(stderr, "fatal error %d: could not create message queue - rsyslogd can not run!\n", iRet);
exit(1);
}
/* name our main queue object (it's not fatal if it fails...) */
obj.SetName((obj_t*) pMsgQueue, (uchar*) "main Q");
/* ... set some properties ... */
# define setQPROP(func, directive, data) \
CHKiRet_Hdlr(func(pMsgQueue, data)) { \
errmsg.LogError(0, NO_ERRCODE, "Invalid " #directive ", error %d. Ignored, running with default setting", iRet); \
}
# define setQPROPstr(func, directive, data) \
CHKiRet_Hdlr(func(pMsgQueue, data, (data == NULL)? 0 : strlen((char*) data))) { \
errmsg.LogError(0, NO_ERRCODE, "Invalid " #directive ", error %d. Ignored, running with default setting", iRet); \
}
setQPROP(qqueueSetMaxFileSize, "$MainMsgQueueFileSize", iMainMsgQueMaxFileSize);
setQPROP(qqueueSetsizeOnDiskMax, "$MainMsgQueueMaxDiskSpace", iMainMsgQueMaxDiskSpace);
setQPROPstr(qqueueSetFilePrefix, "$MainMsgQueueFileName", pszMainMsgQFName);
setQPROP(qqueueSetiPersistUpdCnt, "$MainMsgQueueCheckpointInterval", iMainMsgQPersistUpdCnt);
setQPROP(qqueueSetbSyncQueueFiles, "$MainMsgQueueSyncQueueFiles", bMainMsgQSyncQeueFiles);
setQPROP(qqueueSettoQShutdown, "$MainMsgQueueTimeoutShutdown", iMainMsgQtoQShutdown );
setQPROP(qqueueSettoActShutdown, "$MainMsgQueueTimeoutActionCompletion", iMainMsgQtoActShutdown);
setQPROP(qqueueSettoWrkShutdown, "$MainMsgQueueWorkerTimeoutThreadShutdown", iMainMsgQtoWrkShutdown);
setQPROP(qqueueSettoEnq, "$MainMsgQueueTimeoutEnqueue", iMainMsgQtoEnq);
setQPROP(qqueueSetiHighWtrMrk, "$MainMsgQueueHighWaterMark", iMainMsgQHighWtrMark);
setQPROP(qqueueSetiLowWtrMrk, "$MainMsgQueueLowWaterMark", iMainMsgQLowWtrMark);
setQPROP(qqueueSetiDiscardMrk, "$MainMsgQueueDiscardMark", iMainMsgQDiscardMark);
setQPROP(qqueueSetiDiscardSeverity, "$MainMsgQueueDiscardSeverity", iMainMsgQDiscardSeverity);
setQPROP(qqueueSetiMinMsgsPerWrkr, "$MainMsgQueueWorkerThreadMinimumMessages", iMainMsgQWrkMinMsgs);
setQPROP(qqueueSetbSaveOnShutdown, "$MainMsgQueueSaveOnShutdown", bMainMsgQSaveOnShutdown);
setQPROP(qqueueSetiDeqSlowdown, "$MainMsgQueueDequeueSlowdown", iMainMsgQDeqSlowdown);
setQPROP(qqueueSetiDeqtWinFromHr, "$MainMsgQueueDequeueTimeBegin", iMainMsgQueueDeqtWinFromHr);
setQPROP(qqueueSetiDeqtWinToHr, "$MainMsgQueueDequeueTimeEnd", iMainMsgQueueDeqtWinToHr);
# undef setQPROP
# undef setQPROPstr
/* ... and finally start the queue! */
CHKiRet_Hdlr(qqueueStart(pMsgQueue)) {
/* no queue is fatal, we need to give up in that case... */
fprintf(stderr, "fatal error %d: could not start message queue - rsyslogd can not run!\n", iRet);
exit(1);
}
bHaveMainQueue = (MainMsgQueType == QUEUETYPE_DIRECT) ? 0 : 1;
DBGPRINTF("Main processing queue is initialized and running\n");
/* the output part and the queue is now ready to run. So it is a good time
* to initialize the inputs. Please note that the net code above should be
* shuffled to down here once we have everything in input modules.
* rgerhards, 2007-12-14
* NOTE: as of 2009-06-29, the input modules are initialized, but not yet run.
* Keep in mind. though, that the outputs already run if the queue was
* persisted to disk. -- rgerhards
*/
startInputModules();
if(Debug) {
dbgPrintInitInfo();
}
/* we now generate the startup message. It now includes everything to
* identify this instance. -- rgerhards, 2005-08-17
*/
snprintf(bufStartUpMsg, sizeof(bufStartUpMsg)/sizeof(char),
" [origin software=\"rsyslogd\" " "swVersion=\"" VERSION \
"\" x-pid=\"%d\" x-info=\"http://www.rsyslog.com\"] (re)start",
(int) myPid);
logmsgInternal(NO_ERRCODE, LOG_SYSLOG|LOG_INFO, (uchar*)bufStartUpMsg, 0);
memset(&sigAct, 0, sizeof (sigAct));
sigemptyset(&sigAct.sa_mask);
sigAct.sa_handler = sighup_handler;
sigaction(SIGHUP, &sigAct, NULL);
DBGPRINTF(" (re)started.\n");
finalize_it:
RETiRet;
}
| 0 |
[
"CWE-119"
] |
rsyslog
|
1ca6cc236d1dabf1633238b873fb1c057e52f95e
| 154,994,542,216,298,430,000,000,000,000,000,000,000 | 233 |
bugfix: off-by-one(two) bug in legacy syslog parser
|
void CairoOutputDev::beginTextObject(GfxState *state) {
if (!(state->getRender() & 4) && state->getFillColorSpace()->getMode() == csPattern) {
haveCSPattern = gTrue;
saveState(state);
}
}
| 0 |
[] |
poppler
|
abf167af8b15e5f3b510275ce619e6fdb42edd40
| 46,069,207,291,316,840,000,000,000,000,000,000,000 | 6 |
Implement tiling/patterns in SplashOutputDev
Fixes bug 13518
|
qemuProcessStartPRDaemonHook(void *opaque)
{
virDomainObjPtr vm = opaque;
size_t i, nfds = 0;
g_autofree int *fds = NULL;
int ret = -1;
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT)) {
if (virProcessGetNamespaces(vm->pid, &nfds, &fds) < 0)
return ret;
if (nfds > 0 &&
virProcessSetNamespaces(nfds, fds) < 0)
goto cleanup;
}
ret = 0;
cleanup:
for (i = 0; i < nfds; i++)
VIR_FORCE_CLOSE(fds[i]);
return ret;
}
| 0 |
[
"CWE-416"
] |
libvirt
|
1ac703a7d0789e46833f4013a3876c2e3af18ec7
| 180,709,339,243,426,830,000,000,000,000,000,000,000 | 22 |
qemu: Add missing lock in qemuProcessHandleMonitorEOF
qemuMonitorUnregister will be called in multiple threads (e.g. threads
in rpc worker pool and the vm event thread). In some cases, it isn't
protected by the monitor lock, which may lead to call g_source_unref
more than one time and a use-after-free problem eventually.
Add the missing lock in qemuProcessHandleMonitorEOF (which is the only
position missing lock of monitor I found).
Suggested-by: Michal Privoznik <[email protected]>
Signed-off-by: Peng Liang <[email protected]>
Signed-off-by: Michal Privoznik <[email protected]>
Reviewed-by: Michal Privoznik <[email protected]>
|
static void io_req_task_queue(struct io_kiocb *req)
{
req->io_task_work.func = io_req_task_submit;
io_req_task_work_add(req);
}
| 0 |
[
"CWE-125"
] |
linux
|
89c2b3b74918200e46699338d7bcc19b1ea12110
| 71,571,420,614,794,740,000,000,000,000,000,000,000 | 5 |
io_uring: reexpand under-reexpanded iters
[ 74.211232] BUG: KASAN: stack-out-of-bounds in iov_iter_revert+0x809/0x900
[ 74.212778] Read of size 8 at addr ffff888025dc78b8 by task
syz-executor.0/828
[ 74.214756] CPU: 0 PID: 828 Comm: syz-executor.0 Not tainted
5.14.0-rc3-next-20210730 #1
[ 74.216525] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996),
BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 74.219033] Call Trace:
[ 74.219683] dump_stack_lvl+0x8b/0xb3
[ 74.220706] print_address_description.constprop.0+0x1f/0x140
[ 74.224226] kasan_report.cold+0x7f/0x11b
[ 74.226085] iov_iter_revert+0x809/0x900
[ 74.227960] io_write+0x57d/0xe40
[ 74.232647] io_issue_sqe+0x4da/0x6a80
[ 74.242578] __io_queue_sqe+0x1ac/0xe60
[ 74.245358] io_submit_sqes+0x3f6e/0x76a0
[ 74.248207] __do_sys_io_uring_enter+0x90c/0x1a20
[ 74.257167] do_syscall_64+0x3b/0x90
[ 74.257984] entry_SYSCALL_64_after_hwframe+0x44/0xae
old_size = iov_iter_count();
...
iov_iter_revert(old_size - iov_iter_count());
If iov_iter_revert() is done base on the initial size as above, and the
iter is truncated and not reexpanded in the middle, it miscalculates
borders causing problems. This trace is due to no one reexpanding after
generic_write_checks().
Now iters store how many bytes has been truncated, so reexpand them to
the initial state right before reverting.
Cc: [email protected]
Reported-by: Palash Oswal <[email protected]>
Reported-by: Sudip Mukherjee <[email protected]>
Reported-and-tested-by: [email protected]
Signed-off-by: Pavel Begunkov <[email protected]>
Signed-off-by: Al Viro <[email protected]>
|
_asn1_get_indefinite_length_string (const unsigned char *der, int *len)
{
int len2, len3, counter, indefinite;
unsigned long tag;
unsigned char class;
counter = indefinite = 0;
while (1)
{
if ((*len) < counter)
return ASN1_DER_ERROR;
if ((der[counter] == 0) && (der[counter + 1] == 0))
{
counter += 2;
indefinite--;
if (indefinite <= 0)
break;
else
continue;
}
if (asn1_get_tag_der
(der + counter, *len - counter, &class, &len2,
&tag) != ASN1_SUCCESS)
return ASN1_DER_ERROR;
if (counter + len2 > *len)
return ASN1_DER_ERROR;
counter += len2;
len2 = asn1_get_length_der (der + counter, *len - counter, &len3);
if (len2 < -1)
return ASN1_DER_ERROR;
if (len2 == -1)
{
indefinite++;
counter += 1;
}
else
{
counter += len2 + len3;
}
}
*len = counter;
return ASN1_SUCCESS;
}
| 1 |
[] |
libtasn1
|
0e80d79db71747644394fe3472dad28cd3e7b00b
| 238,761,186,358,235,000,000,000,000,000,000,000,000 | 48 |
More precise length check in _asn1_get_indefinite_length_string().
|
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32:
MaxEvalFloat<kernel_type>(context, node, params, data, input, output);
break;
case kTfLiteUInt8:
MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input,
output);
break;
case kTfLiteInt8:
MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input,
output);
break;
case kTfLiteInt16:
MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input,
output);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
| 0 |
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
| 329,876,413,039,320,980,000,000,000,000,000,000,000 | 31 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
|
void ServerConnectionImpl::maybeAddSentinelBufferFragment(Buffer::WatermarkBuffer& output_buffer) {
if (!flood_protection_) {
return;
}
// It's messy and complicated to try to tag the final write of an HTTP response for response
// tracking for flood protection. Instead, write an empty buffer fragment after the response,
// to allow for tracking.
// When the response is written out, the fragment will be deleted and the counter will be updated
// by ServerConnectionImpl::releaseOutboundResponse()
auto fragment =
Buffer::OwnedBufferFragmentImpl::create(absl::string_view("", 0), response_buffer_releasor_);
output_buffer.addBufferFragment(*fragment.release());
ASSERT(outbound_responses_ < max_outbound_responses_);
outbound_responses_++;
}
| 0 |
[
"CWE-770"
] |
envoy
|
7ca28ff7d46454ae930e193d97b7d08156b1ba59
| 304,730,468,948,433,200,000,000,000,000,000,000,000 | 15 |
[http1] Include request URL in request header size computation, and reject partial headers that exceed configured limits (#145)
Signed-off-by: antonio <[email protected]>
|
static int io_recvmsg_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
return -EOPNOTSUPP;
}
| 0 |
[] |
linux
|
0f2122045b946241a9e549c2a76cea54fa58a7ff
| 73,620,614,343,883,560,000,000,000,000,000,000,000 | 5 |
io_uring: don't rely on weak ->files references
Grab actual references to the files_struct. To avoid circular references
issues due to this, we add a per-task note that keeps track of what
io_uring contexts a task has used. When the tasks execs or exits its
assigned files, we cancel requests based on this tracking.
With that, we can grab proper references to the files table, and no
longer need to rely on stashing away ring_fd and ring_file to check
if the ring_fd may have been closed.
Cc: [email protected] # v5.5+
Reviewed-by: Pavel Begunkov <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
int main()
{
initialize_environment();
test_format_wrong_size();
test_blit_info_format_check();
test_blit_info_format_check_null_format();
test_format_is_plain_nullptr_deref_trigger();
test_format_util_format_is_rgb_nullptr_deref_trigger_illegal_resource();
test_format_util_format_is_rgb_nullptr_deref_trigger();
test_double_free_in_vrend_renderer_blit_int_trigger_invalid_formats();
test_double_free_in_vrend_renderer_blit_int_trigger();
test_format_is_has_alpha_nullptr_deref_trigger_original();
test_format_is_has_alpha_nullptr_deref_trigger_legal_resource();
test_heap_overflow_vrend_renderer_transfer_write_iov();
virgl_renderer_context_destroy(ctx_id);
virgl_renderer_cleanup(&cookie);
virgl_egl_destroy(test_egl);
return 0;
}
| 1 |
[] |
virglrenderer
|
8c9cfb4e425542e96f0717189fe4658555baaf08
| 243,314,667,655,714,000,000,000,000,000,000,000,000 | 23 |
tests: Add trigger for overflow in texture data upload
Related #140
Signed-off-by: Gert Wollny <[email protected]>
Acked-by: Emil Velikov <[email protected]>
|
Network::FilterStatus ConnectionManagerImpl::onData(Buffer::Instance& data, bool) {
if (!codec_) {
// Http3 codec should have been instantiated by now.
createCodec(data);
}
bool redispatch;
do {
redispatch = false;
const Status status = codec_->dispatch(data);
if (isBufferFloodError(status) || isInboundFramesWithEmptyPayloadError(status)) {
handleCodecError(status.message());
return Network::FilterStatus::StopIteration;
} else if (isCodecProtocolError(status)) {
stats_.named_.downstream_cx_protocol_error_.inc();
handleCodecError(status.message());
return Network::FilterStatus::StopIteration;
}
ASSERT(status.ok());
// Processing incoming data may release outbound data so check for closure here as well.
checkForDeferredClose(false);
// The HTTP/1 codec will pause dispatch after a single message is complete. We want to
// either redispatch if there are no streams and we have more data. If we have a single
// complete non-WebSocket stream but have not responded yet we will pause socket reads
// to apply back pressure.
if (codec_->protocol() < Protocol::Http2) {
if (read_callbacks_->connection().state() == Network::Connection::State::Open &&
data.length() > 0 && streams_.empty()) {
redispatch = true;
}
}
} while (redispatch);
if (!read_callbacks_->connection().streamInfo().protocol()) {
read_callbacks_->connection().streamInfo().protocol(codec_->protocol());
}
return Network::FilterStatus::StopIteration;
}
| 0 |
[
"CWE-416"
] |
envoy
|
148de954ed3585d8b4298b424aa24916d0de6136
| 250,943,366,504,935,200,000,000,000,000,000,000,000 | 43 |
CVE-2021-43825
Response filter manager crash
Signed-off-by: Yan Avlasov <[email protected]>
|
server_client_check_redraw(struct client *c)
{
struct session *s = c->session;
struct tty *tty = &c->tty;
struct window_pane *wp;
int needed, flags;
struct timeval tv = { .tv_usec = 1000 };
static struct event ev;
size_t left;
if (c->flags & (CLIENT_CONTROL|CLIENT_SUSPENDED))
return;
if (c->flags & CLIENT_ALLREDRAWFLAGS) {
log_debug("%s: redraw%s%s%s", c->name,
(c->flags & CLIENT_REDRAWWINDOW) ? " window" : "",
(c->flags & CLIENT_REDRAWSTATUS) ? " status" : "",
(c->flags & CLIENT_REDRAWBORDERS) ? " borders" : "");
}
/*
* If there is outstanding data, defer the redraw until it has been
* consumed. We can just add a timer to get out of the event loop and
* end up back here.
*/
needed = 0;
if (c->flags & CLIENT_ALLREDRAWFLAGS)
needed = 1;
else {
TAILQ_FOREACH(wp, &c->session->curw->window->panes, entry) {
if (wp->flags & PANE_REDRAW) {
needed = 1;
break;
}
}
}
if (needed && (left = EVBUFFER_LENGTH(tty->out)) != 0) {
log_debug("%s: redraw deferred (%zu left)", c->name, left);
if (!evtimer_initialized(&ev))
evtimer_set(&ev, server_client_redraw_timer, NULL);
if (!evtimer_pending(&ev, NULL)) {
log_debug("redraw timer started");
evtimer_add(&ev, &tv);
}
/*
* We may have got here for a single pane redraw, but force a
* full redraw next time in case other panes have been updated.
*/
c->flags |= CLIENT_ALLREDRAWFLAGS;
return;
} else if (needed)
log_debug("%s: redraw needed", c->name);
flags = tty->flags & (TTY_BLOCK|TTY_FREEZE|TTY_NOCURSOR);
tty->flags = (tty->flags & ~(TTY_BLOCK|TTY_FREEZE)) | TTY_NOCURSOR;
if (~c->flags & CLIENT_REDRAWWINDOW) {
/*
* If not redrawing the entire window, check whether each pane
* needs to be redrawn.
*/
TAILQ_FOREACH(wp, &c->session->curw->window->panes, entry) {
if (wp->flags & PANE_REDRAW) {
tty_update_mode(tty, tty->mode, NULL);
screen_redraw_pane(c, wp);
}
}
}
if (c->flags & CLIENT_ALLREDRAWFLAGS) {
if (options_get_number(s->options, "set-titles"))
server_client_set_title(c);
screen_redraw_screen(c);
}
tty->flags = (tty->flags & ~(TTY_FREEZE|TTY_NOCURSOR)) | flags;
tty_update_mode(tty, tty->mode, NULL);
c->flags &= ~(CLIENT_ALLREDRAWFLAGS|CLIENT_STATUSFORCE);
if (needed) {
/*
* We would have deferred the redraw unless the output buffer
* was empty, so we can record how many bytes the redraw
* generated.
*/
c->redraw = EVBUFFER_LENGTH(tty->out);
log_debug("%s: redraw added %zu bytes", c->name, c->redraw);
}
}
| 0 |
[] |
src
|
b32e1d34e10a0da806823f57f02a4ae6e93d756e
| 37,677,282,448,423,110,000,000,000,000,000,000,000 | 90 |
evbuffer_new and bufferevent_new can both fail (when malloc fails) and
return NULL. GitHub issue 1547.
|
proto_tree_add_ipv4(proto_tree *tree, int hfindex, tvbuff_t *tvb, gint start,
gint length, ws_in4_addr value)
{
proto_item *pi;
header_field_info *hfinfo;
CHECK_FOR_NULL_TREE(tree);
TRY_TO_FAKE_THIS_ITEM(tree, hfindex, hfinfo);
DISSECTOR_ASSERT_FIELD_TYPE(hfinfo, FT_IPv4);
pi = proto_tree_add_pi(tree, hfinfo, tvb, start, &length);
proto_tree_set_ipv4(PNODE_FINFO(pi), value);
return pi;
}
| 0 |
[
"CWE-401"
] |
wireshark
|
a9fc769d7bb4b491efb61c699d57c9f35269d871
| 36,795,843,115,759,577,000,000,000,000,000,000,000 | 17 |
epan: Fix a memory leak.
Make sure _proto_tree_add_bits_ret_val allocates a bits array using the
packet scope, otherwise we leak memory. Fixes #17032.
|
static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
{
struct packet_sock *po = pkt_sk(sk);
struct packet_fanout *f, *match;
u8 type = type_flags & 0xff;
u8 flags = type_flags >> 8;
int err;
switch (type) {
case PACKET_FANOUT_ROLLOVER:
if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
return -EINVAL;
case PACKET_FANOUT_HASH:
case PACKET_FANOUT_LB:
case PACKET_FANOUT_CPU:
case PACKET_FANOUT_RND:
case PACKET_FANOUT_QM:
case PACKET_FANOUT_CBPF:
case PACKET_FANOUT_EBPF:
break;
default:
return -EINVAL;
}
if (!po->running)
return -EINVAL;
if (po->fanout)
return -EALREADY;
if (type == PACKET_FANOUT_ROLLOVER ||
(type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
if (!po->rollover)
return -ENOMEM;
atomic_long_set(&po->rollover->num, 0);
atomic_long_set(&po->rollover->num_huge, 0);
atomic_long_set(&po->rollover->num_failed, 0);
}
mutex_lock(&fanout_mutex);
match = NULL;
list_for_each_entry(f, &fanout_list, list) {
if (f->id == id &&
read_pnet(&f->net) == sock_net(sk)) {
match = f;
break;
}
}
err = -EINVAL;
if (match && match->flags != flags)
goto out;
if (!match) {
err = -ENOMEM;
match = kzalloc(sizeof(*match), GFP_KERNEL);
if (!match)
goto out;
write_pnet(&match->net, sock_net(sk));
match->id = id;
match->type = type;
match->flags = flags;
INIT_LIST_HEAD(&match->list);
spin_lock_init(&match->lock);
atomic_set(&match->sk_ref, 0);
fanout_init_data(match);
match->prot_hook.type = po->prot_hook.type;
match->prot_hook.dev = po->prot_hook.dev;
match->prot_hook.func = packet_rcv_fanout;
match->prot_hook.af_packet_priv = match;
match->prot_hook.id_match = match_fanout_group;
dev_add_pack(&match->prot_hook);
list_add(&match->list, &fanout_list);
}
err = -EINVAL;
if (match->type == type &&
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
__dev_remove_pack(&po->prot_hook);
po->fanout = match;
atomic_inc(&match->sk_ref);
__fanout_link(sk, po);
err = 0;
}
}
out:
mutex_unlock(&fanout_mutex);
if (err) {
kfree(po->rollover);
po->rollover = NULL;
}
return err;
}
| 1 |
[
"CWE-416",
"CWE-362"
] |
linux
|
d199fab63c11998a602205f7ee7ff7c05c97164b
| 209,544,008,583,980,080,000,000,000,000,000,000,000 | 94 |
packet: fix races in fanout_add()
Multiple threads can call fanout_add() at the same time.
We need to grab fanout_mutex earlier to avoid races that could
lead to one thread freeing po->rollover that was set by another thread.
Do the same in fanout_release(), for peace of mind, and to help us
finding lockdep issues earlier.
Fixes: dc99f600698d ("packet: Add fanout support.")
Fixes: 0648ab70afe6 ("packet: rollover prepare: per-socket state")
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Willem de Bruijn <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void sdma_start_hw_clean_up(struct sdma_engine *sde)
{
tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
}
| 0 |
[
"CWE-400",
"CWE-401"
] |
linux
|
34b3be18a04ecdc610aae4c48e5d1b799d8689f6
| 153,140,684,617,032,260,000,000,000,000,000,000,000 | 4 |
RDMA/hfi1: Prevent memory leak in sdma_init
In sdma_init if rhashtable_init fails the allocated memory for
tmp_sdma_rht should be released.
Fixes: 5a52a7acf7e2 ("IB/hfi1: NULL pointer dereference when freeing rhashtable")
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Navid Emamdoost <[email protected]>
Acked-by: Dennis Dalessandro <[email protected]>
Signed-off-by: Jason Gunthorpe <[email protected]>
|
xmlReallocLoc(void *ptr,size_t size, const char * file, int line)
{
MEMHDR *p, *tmp;
unsigned long number;
#ifdef DEBUG_MEMORY
size_t oldsize;
#endif
if (ptr == NULL)
return(xmlMallocLoc(size, file, line));
if (!xmlMemInitialized) xmlInitMemory();
TEST_POINT
p = CLIENT_2_HDR(ptr);
number = p->mh_number;
if (xmlMemStopAtBlock == number) xmlMallocBreakpoint();
if (p->mh_tag != MEMTAG) {
Mem_Tag_Err(p);
goto error;
}
p->mh_tag = ~MEMTAG;
xmlMutexLock(xmlMemMutex);
debugMemSize -= p->mh_size;
debugMemBlocks--;
#ifdef DEBUG_MEMORY
oldsize = p->mh_size;
#endif
#ifdef MEM_LIST
debugmem_list_delete(p);
#endif
xmlMutexUnlock(xmlMemMutex);
tmp = (MEMHDR *) realloc(p,RESERVE_SIZE+size);
if (!tmp) {
free(p);
goto error;
}
p = tmp;
if (xmlMemTraceBlockAt == ptr) {
xmlGenericError(xmlGenericErrorContext,
"%p : Realloced(%lu -> %lu) Ok\n",
xmlMemTraceBlockAt, (long unsigned)p->mh_size,
(long unsigned)size);
xmlMallocBreakpoint();
}
p->mh_tag = MEMTAG;
p->mh_number = number;
p->mh_type = REALLOC_TYPE;
p->mh_size = size;
p->mh_file = file;
p->mh_line = line;
xmlMutexLock(xmlMemMutex);
debugMemSize += size;
debugMemBlocks++;
if (debugMemSize > debugMaxMemSize) debugMaxMemSize = debugMemSize;
#ifdef MEM_LIST
debugmem_list_add(p);
#endif
xmlMutexUnlock(xmlMemMutex);
TEST_POINT
#ifdef DEBUG_MEMORY
xmlGenericError(xmlGenericErrorContext,
"Realloced(%d to %d) Ok\n", oldsize, size);
#endif
return(HDR_2_CLIENT(p));
error:
return(NULL);
}
| 1 |
[
"CWE-787"
] |
libxml2
|
897dffbae322b46b83f99a607d527058a72c51ed
| 164,003,070,910,184,140,000,000,000,000,000,000,000 | 72 |
Check for integer overflow in memory debug code
Fixes bug 783026.
Thanks to Pranjal Jumde for the report.
|
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
BoxBlurContext *s = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int plane;
int cw = FF_CEIL_RSHIFT(inlink->w, s->hsub), ch = FF_CEIL_RSHIFT(in->height, s->vsub);
int w[4] = { inlink->w, cw, cw, inlink->w };
int h[4] = { in->height, ch, ch, in->height };
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
for (plane = 0; in->data[plane] && plane < 4; plane++)
hblur(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
w[plane], h[plane], s->radius[plane], s->power[plane],
s->temp);
for (plane = 0; in->data[plane] && plane < 4; plane++)
vblur(out->data[plane], out->linesize[plane],
out->data[plane], out->linesize[plane],
w[plane], h[plane], s->radius[plane], s->power[plane],
s->temp);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
| 1 |
[
"CWE-119",
"CWE-787"
] |
FFmpeg
|
e43a0a232dbf6d3c161823c2e07c52e76227a1bc
| 127,097,771,226,117,030,000,000,000,000,000,000,000 | 34 |
avfilter: fix plane validity checks
Fixes out of array accesses
Signed-off-by: Michael Niedermayer <[email protected]>
|
void SetParams(Membership* Memb)
{
memb = Memb;
PushParamRef(memb->chan->name);
}
| 0 |
[
"CWE-200",
"CWE-732"
] |
inspircd
|
4350a11c663b0d75f8119743bffb7736d87abd4d
| 273,015,913,359,568,800,000,000,000,000,000,000,000 | 5 |
Fix sending malformed pong messages in some cases.
|
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb)
{
struct drm_i915_gem_object *obj;
int ret = 0;
/* This is the fast path and we cannot handle a pagefault whilst
* holding the struct mutex lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
* acquire the struct mutex again. Obviously this is bad and so
* lockdep complains vehemently.
*/
pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
}
pagefault_enable();
return ret;
}
| 0 |
[] |
linux
|
3118a4f652c7b12c752f3222af0447008f9b2368
| 1,340,603,402,687,424,000,000,000,000,000,000,000 | 23 |
drm/i915: bounds check execbuffer relocation count
It is possible to wrap the counter used to allocate the buffer for
relocation copies. This could lead to heap writing overflows.
CVE-2013-0913
v3: collapse test, improve comment
v2: move check into validate_exec_list
Signed-off-by: Kees Cook <[email protected]>
Reported-by: Pinkie Pie
Cc: [email protected]
Reviewed-by: Chris Wilson <[email protected]>
Signed-off-by: Daniel Vetter <[email protected]>
|
goto_tabpage_tp(
tabpage_T *tp,
int trigger_enter_autocmds,
int trigger_leave_autocmds)
{
// Don't repeat a message in another tab page.
set_keep_msg(NULL, 0);
if (tp != curtab && leave_tabpage(tp->tp_curwin->w_buffer,
trigger_leave_autocmds) == OK)
{
if (valid_tabpage(tp))
enter_tabpage(tp, curbuf, trigger_enter_autocmds,
trigger_leave_autocmds);
else
enter_tabpage(curtab, curbuf, trigger_enter_autocmds,
trigger_leave_autocmds);
}
}
| 1 |
[
"CWE-476"
] |
vim
|
0f6e28f686dbb59ab3b562408ab9b2234797b9b1
| 298,925,399,444,516,940,000,000,000,000,000,000,000 | 19 |
patch 8.2.4428: crash when switching tabpage while in the cmdline window
Problem: Crash when switching tabpage while in the cmdline window.
Solution: Disallow switching tabpage when in the cmdline window.
|
xps_tifsWarningHandlerEx(thandle_t client_data, const char *module,
const char *fmt, va_list ap)
{
tifs_io_xps *tiffio = (tifs_io_xps *)client_data;
gx_device_xps *pdev = tiffio->pdev;
int count;
char buf[TIFF_PRINT_BUF_LENGTH];
count = vsnprintf(buf, sizeof(buf), fmt, ap);
if (count >= sizeof(buf) || count < 0) { /* C99 || MSVC */
dmlprintf1(pdev->memory, "%s", buf);
dmlprintf1(pdev->memory, "%s\n", tifs_msg_truncated);
}
else {
dmlprintf1(pdev->memory, "%s\n", buf);
}
}
| 0 |
[] |
ghostpdl
|
94d8955cb7725eb5f3557ddc02310c76124fdd1a
| 192,312,493,538,796,470,000,000,000,000,000,000,000 | 17 |
Bug 701818: better handling of error during PS/PDF image
In the xps device, if an error occurred after xps_begin_image() but before
xps_image_end_image(), *if* the Postscript had called 'restore' as part of the
error handling, the image enumerator would have been freed (by the restore)
despite the xps device still holding a reference to it.
Simply changing to an allocator unaffected save/restore doesn't work because
the enumerator holds references to other objects (graphics state, color space,
possibly others) whose lifespans are inherently controlled by save/restore.
So, add a finalize method for the XPS device's image enumerator
(xps_image_enum_finalize()) which takes over cleaning up the memory it allocates
and also deals with cleaning up references from the device to the enumerator
and from the enumerator to the device.
|
static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
{
int ret;
down_read(&rbd_dev->lock_rwsem);
dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
rbd_dev->lock_state);
if (__rbd_is_lock_owner(rbd_dev)) {
up_read(&rbd_dev->lock_rwsem);
return 0;
}
up_read(&rbd_dev->lock_rwsem);
down_write(&rbd_dev->lock_rwsem);
dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
rbd_dev->lock_state);
if (__rbd_is_lock_owner(rbd_dev)) {
up_write(&rbd_dev->lock_rwsem);
return 0;
}
ret = rbd_try_lock(rbd_dev);
if (ret < 0) {
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
if (ret == -EBLACKLISTED)
goto out;
ret = 1; /* request lock anyway */
}
if (ret > 0) {
up_write(&rbd_dev->lock_rwsem);
return ret;
}
rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
rbd_assert(list_empty(&rbd_dev->running_list));
ret = rbd_post_acquire_action(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
/*
* Can't stay in RBD_LOCK_STATE_LOCKED because
* rbd_lock_add_request() would let the request through,
* assuming that e.g. object map is locked and loaded.
*/
rbd_unlock(rbd_dev);
}
out:
wake_lock_waiters(rbd_dev, ret);
up_write(&rbd_dev->lock_rwsem);
return ret;
}
| 0 |
[
"CWE-863"
] |
linux
|
f44d04e696feaf13d192d942c4f14ad2e117065a
| 162,835,132,482,169,970,000,000,000,000,000,000,000 | 53 |
rbd: require global CAP_SYS_ADMIN for mapping and unmapping
It turns out that currently we rely only on sysfs attribute
permissions:
$ ll /sys/bus/rbd/{add*,remove*}
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major
--w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove
--w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major
This means that images can be mapped and unmapped (i.e. block devices
can be created and deleted) by a UID 0 process even after it drops all
privileges or by any process with CAP_DAC_OVERRIDE in its user namespace
as long as UID 0 is mapped into that user namespace.
Be consistent with other virtual block devices (loop, nbd, dm, md, etc)
and require CAP_SYS_ADMIN in the initial user namespace for mapping and
unmapping, and also for dumping the configuration string and refreshing
the image header.
Cc: [email protected]
Signed-off-by: Ilya Dryomov <[email protected]>
Reviewed-by: Jeff Layton <[email protected]>
|
main()
{
union {
int l;
char c[sizeof(int)];
} u;
int x0, x1, x2, x3;
u.l = 0x012345678;
x0 = u.c[0];
x3 = u.c[sizeof (int) - 1];
printf ("x0 = 0x%x x3 = 0x%x (%s)\n", x0, x3, x3 == 0x78 ? "bigendian" : "littleendian");
x0 = (u.l >> 24) & 0xff;
x1 = (u.l >> 16) & 0xff;
x2 = (u.l >> 8) & 0xff;
x3 = u.l & 0xff;
printf ("big endian x0:x3: %x %x %x %x\n", x0, x1, x2, x3);
}
| 0 |
[] |
bash
|
863d31ae775d56b785dc5b0105b6d251515d81d5
| 254,971,518,625,055,440,000,000,000,000,000,000,000 | 19 |
commit bash-20120224 snapshot
|
static void cleanup_timers(struct list_head *head)
{
cleanup_timers_list(head);
cleanup_timers_list(++head);
cleanup_timers_list(++head);
}
| 0 |
[
"CWE-190"
] |
linux
|
78c9c4dfbf8c04883941445a195276bb4bb92c76
| 160,263,593,982,879,370,000,000,000,000,000,000,000 | 6 |
posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Acked-by: John Stultz <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Michael Kerrisk <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
|
display_showcmd(void)
{
int len;
cursor_off();
len = (int)STRLEN(showcmd_buf);
if (len == 0)
showcmd_is_clear = TRUE;
else
{
screen_puts(showcmd_buf, (int)Rows - 1, sc_col, 0);
showcmd_is_clear = FALSE;
}
// clear the rest of an old message by outputting up to SHOWCMD_COLS
// spaces
screen_puts((char_u *)" " + len, (int)Rows - 1, sc_col + len, 0);
setcursor(); // put cursor back where it belongs
}
| 0 |
[
"CWE-416"
] |
vim
|
e2fa213cf571041dbd04ab0329303ffdc980678a
| 3,189,194,491,805,750,000,000,000,000,000,000,000 | 21 |
patch 8.2.5024: using freed memory with "]d"
Problem: Using freed memory with "]d".
Solution: Copy the pattern before searching.
|
static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
{
unsigned i;
u8 expected;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->actual_length;
unsigned maxpacket = get_maxpacket(urb->dev, urb->pipe);
int ret = check_guard_bytes(tdev, urb);
if (ret)
return ret;
for (i = 0; i < len; i++, buf++) {
switch (pattern) {
/* all-zeroes has no synchronization issues */
case 0:
expected = 0;
break;
/* mod63 stays in sync with short-terminated transfers,
* or otherwise when host and gadget agree on how large
* each usb transfer request should be. resync is done
* with set_interface or set_config.
*/
case 1: /* mod63 */
expected = (i % maxpacket) % 63;
break;
/* always fail unsupported patterns */
default:
expected = !*buf;
break;
}
if (*buf == expected)
continue;
ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
return -EINVAL;
}
return 0;
}
| 0 |
[
"CWE-476"
] |
linux
|
7c80f9e4a588f1925b07134bb2e3689335f6c6d8
| 103,973,262,594,843,700,000,000,000,000,000,000,000 | 38 |
usb: usbtest: fix NULL pointer dereference
If the usbtest driver encounters a device with an IN bulk endpoint but
no OUT bulk endpoint, it will try to dereference a NULL pointer
(out->desc.bEndpointAddress). The problem can be solved by adding a
missing test.
Signed-off-by: Alan Stern <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Tested-by: Andrey Konovalov <[email protected]>
Signed-off-by: Felipe Balbi <[email protected]>
|
parser_parse_tagged_template_literal (parser_context_t *context_p) /**< context */
{
JERRY_ASSERT (context_p->token.type == LEXER_TEMPLATE_LITERAL);
uint32_t call_arguments = 0;
ecma_collection_t *collection_p;
if (context_p->tagged_template_literal_cp == JMEM_CP_NULL)
{
collection_p = ecma_new_collection ();
ECMA_SET_INTERNAL_VALUE_POINTER (context_p->tagged_template_literal_cp, collection_p);
}
else
{
collection_p = ECMA_GET_INTERNAL_VALUE_POINTER (ecma_collection_t, context_p->tagged_template_literal_cp);
if (collection_p->item_count > CBC_MAXIMUM_BYTE_VALUE)
{
parser_raise_error (context_p, PARSER_ERR_ARGUMENT_LIMIT_REACHED);
}
}
const uint32_t tagged_id = collection_p->item_count;
uint32_t prop_idx = 0;
ecma_object_t *raw_strings_p;
ecma_object_t *template_obj_p = parser_new_tagged_template_literal (&raw_strings_p);
ecma_collection_push_back (collection_p, ecma_make_object_value (template_obj_p));
parser_tagged_template_literal_append_strings (context_p, template_obj_p, raw_strings_p, prop_idx++);
call_arguments++;
parser_emit_cbc_ext_call (context_p, CBC_EXT_GET_TAGGED_TEMPLATE_LITERAL, tagged_id);
while (context_p->source_p[-1] != LIT_CHAR_GRAVE_ACCENT)
{
JERRY_ASSERT (context_p->source_p[-1] == LIT_CHAR_LEFT_BRACE);
lexer_next_token (context_p);
if (++call_arguments > CBC_MAXIMUM_BYTE_VALUE)
{
parser_raise_error (context_p, PARSER_ERR_ARGUMENT_LIMIT_REACHED);
}
parser_parse_expression (context_p, PARSE_EXPR);
if (context_p->token.type != LEXER_RIGHT_BRACE)
{
parser_raise_error (context_p, PARSER_ERR_RIGHT_BRACE_EXPECTED);
}
context_p->source_p--;
context_p->column--;
lexer_parse_string (context_p, LEXER_STRING_NO_OPTS);
parser_tagged_template_literal_append_strings (context_p, template_obj_p, raw_strings_p, prop_idx++);
}
parser_tagged_template_literal_finalize (template_obj_p, raw_strings_p);
return call_arguments;
} /* parser_parse_tagged_template_literal */
| 0 |
[
"CWE-416"
] |
jerryscript
|
3bcd48f72d4af01d1304b754ef19fe1a02c96049
| 61,718,706,489,498,450,000,000,000,000,000,000,000 | 60 |
Improve parse_identifier (#4691)
Ascii string length is no longer computed during string allocation.
JerryScript-DCO-1.0-Signed-off-by: Daniel Batiz [email protected]
|
QPDFFormFieldObjectHelper::isNull()
{
return this->oh.isNull();
}
| 0 |
[
"CWE-787"
] |
qpdf
|
d71f05ca07eb5c7cfa4d6d23e5c1f2a800f52e8e
| 221,202,031,531,645,220,000,000,000,000,000,000,000 | 4 |
Fix sign and conversion warnings (major)
This makes all integer type conversions that have potential data loss
explicit with calls that do range checks and raise an exception. After
this commit, qpdf builds with no warnings when -Wsign-conversion
-Wconversion is used with gcc or clang or when -W3 -Wd4800 is used
with MSVC. This significantly reduces the likelihood of potential
crashes from bogus integer values.
There are some parts of the code that take int when they should take
size_t or an offset. Such places would make qpdf not support files
with more than 2^31 of something that usually wouldn't be so large. In
the event that such a file shows up and is valid, at least qpdf would
raise an error in the right spot so the issue could be legitimately
addressed rather than failing in some weird way because of a silent
overflow condition.
|
//! Load image from a DLM file \newinstance.
static CImg<T> get_load_dlm(const char *const filename) {
return CImg<T>().load_dlm(filename);
| 0 |
[
"CWE-125"
] |
CImg
|
10af1e8c1ad2a58a0a3342a856bae63e8f257abb
| 51,450,468,887,911,530,000,000,000,000,000,000,000 | 3 |
Fix other issues in 'CImg<T>::load_bmp()'.
|
static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
{
if (c->x86_vendor != X86_VENDOR_INTEL)
return 0;
if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
return 0;
return 1;
}
| 0 |
[] |
linux
|
dbbe2ad02e9df26e372f38cc3e70dab9222c832e
| 300,553,678,733,822,900,000,000,000,000,000,000,000 | 10 |
x86/speculation: Prevent rogue cross-process SSBD shutdown
On context switch the change of TIF_SSBD and TIF_SPEC_IB are evaluated
to adjust the mitigations accordingly. This is optimized to avoid the
expensive MSR write if not needed.
This optimization is buggy and allows an attacker to shutdown the SSBD
protection of a victim process.
The update logic reads the cached base value for the speculation control
MSR which has neither the SSBD nor the STIBP bit set. It then OR's the
SSBD bit only when TIF_SSBD is different and requests the MSR update.
That means if TIF_SSBD of the previous and next task are the same, then
the base value is not updated, even if TIF_SSBD is set. The MSR write is
not requested.
Subsequently if the TIF_STIBP bit differs then the STIBP bit is updated
in the base value and the MSR is written with a wrong SSBD value.
This was introduced when the per task/process conditional STIPB
switching was added on top of the existing SSBD switching.
It is exploitable if the attacker creates a process which enforces SSBD
and has the contrary value of STIBP than the victim process (i.e. if the
victim process enforces STIBP, the attacker process must not enforce it;
if the victim process does not enforce STIBP, the attacker process must
enforce it) and schedule it on the same core as the victim process. If
the victim runs after the attacker the victim becomes vulnerable to
Spectre V4.
To fix this, update the MSR value independent of the TIF_SSBD difference
and dependent on the SSBD mitigation method available. This ensures that
a subsequent STIPB initiated MSR write has the correct state of SSBD.
[ tglx: Handle X86_FEATURE_VIRT_SSBD & X86_FEATURE_VIRT_SSBD correctly
and massaged changelog ]
Fixes: 5bfbe3ad5840 ("x86/speculation: Prepare for per task indirect branch speculation control")
Signed-off-by: Anthony Steinhauser <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: [email protected]
|
static bool is_sector_request_lun_aligned(int64_t sector_num, int nb_sectors,
IscsiLun *iscsilun)
{
assert(nb_sectors <= BDRV_REQUEST_MAX_SECTORS);
return is_byte_request_lun_aligned(sector_num << BDRV_SECTOR_BITS,
nb_sectors << BDRV_SECTOR_BITS,
iscsilun);
}
| 0 |
[
"CWE-125"
] |
qemu
|
ff0507c239a246fd7215b31c5658fc6a3ee1e4c5
| 262,682,472,201,653,120,000,000,000,000,000,000,000 | 8 |
block/iscsi:fix heap-buffer-overflow in iscsi_aio_ioctl_cb
There is an overflow, the source 'datain.data[2]' is 100 bytes,
but the 'ss' is 252 bytes.This may cause a security issue because
we can access a lot of unrelated memory data.
The len for sbp copy data should take the minimum of mx_sb_len and
sb_len_wr, not the maximum.
If we use iscsi device for VM backend storage, ASAN show stack:
READ of size 252 at 0xfffd149dcfc4 thread T0
#0 0xaaad433d0d34 in __asan_memcpy (aarch64-softmmu/qemu-system-aarch64+0x2cb0d34)
#1 0xaaad45f9d6d0 in iscsi_aio_ioctl_cb /qemu/block/iscsi.c:996:9
#2 0xfffd1af0e2dc (/usr/lib64/iscsi/libiscsi.so.8+0xe2dc)
#3 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174)
#4 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac)
#5 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5
#6 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9
#7 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20
#8 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520
#9 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5
#10 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4)
#11 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9
#12 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242
#13 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518
#14 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9
#15 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5
#16 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c)
#17 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740)
0xfffd149dcfc4 is located 0 bytes to the right of 100-byte region [0xfffd149dcf60,0xfffd149dcfc4)
allocated by thread T0 here:
#0 0xaaad433d1e70 in __interceptor_malloc (aarch64-softmmu/qemu-system-aarch64+0x2cb1e70)
#1 0xfffd1af0e254 (/usr/lib64/iscsi/libiscsi.so.8+0xe254)
#2 0xfffd1af0d174 (/usr/lib64/iscsi/libiscsi.so.8+0xd174)
#3 0xfffd1af19fac (/usr/lib64/iscsi/libiscsi.so.8+0x19fac)
#4 0xaaad45f9acc8 in iscsi_process_read /qemu/block/iscsi.c:403:5
#5 0xaaad4623733c in aio_dispatch_handler /qemu/util/aio-posix.c:467:9
#6 0xaaad4622f350 in aio_dispatch_handlers /qemu/util/aio-posix.c:510:20
#7 0xaaad4622f350 in aio_dispatch /qemu/util/aio-posix.c:520
#8 0xaaad46215944 in aio_ctx_dispatch /qemu/util/async.c:298:5
#9 0xfffd1bed12f4 in g_main_context_dispatch (/lib64/libglib-2.0.so.0+0x512f4)
#10 0xaaad46227de0 in glib_pollfds_poll /qemu/util/main-loop.c:219:9
#11 0xaaad46227de0 in os_host_main_loop_wait /qemu/util/main-loop.c:242
#12 0xaaad46227de0 in main_loop_wait /qemu/util/main-loop.c:518
#13 0xaaad43d9d60c in qemu_main_loop /qemu/softmmu/vl.c:1662:9
#14 0xaaad4607a5b0 in main /qemu/softmmu/main.c:49:5
#15 0xfffd1a460b9c in __libc_start_main (/lib64/libc.so.6+0x20b9c)
#16 0xaaad43320740 in _start (aarch64-softmmu/qemu-system-aarch64+0x2c00740)
Reported-by: Euler Robot <[email protected]>
Signed-off-by: Chen Qun <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Message-id: [email protected]
Reviewed-by: Daniel P. Berrangé <[email protected]>
Signed-off-by: Peter Maydell <[email protected]>
|
static int close_connect_only(struct Curl_easy *data,
struct connectdata *conn, void *param)
{
(void)param;
if(data->state.lastconnect_id != conn->connection_id)
return 0;
if(!conn->bits.connect_only)
return 1;
connclose(conn, "Removing connect-only easy handle");
conn->bits.connect_only = FALSE;
return 1;
}
| 0 |
[
"CWE-416",
"CWE-295"
] |
curl
|
7f4a9a9b2a49547eae24d2e19bc5c346e9026479
| 37,042,879,446,772,070,000,000,000,000,000,000,000 | 15 |
openssl: associate/detach the transfer from connection
CVE-2021-22901
Bug: https://curl.se/docs/CVE-2021-22901.html
|
TEST_F(OwnedImplTest, LinearizeDrainTracking) {
constexpr uint32_t SmallChunk = 200;
constexpr uint32_t LargeChunk = 16384 - SmallChunk;
constexpr uint32_t LinearizeSize = SmallChunk + LargeChunk;
// Create a buffer with a eclectic combination of buffer OwnedSlice and UnownedSlices that will
// help us explore the properties of linearize.
Buffer::OwnedImpl buffer;
// Large add below the target linearize size.
testing::MockFunction<void()> tracker1;
buffer.add(std::string(LargeChunk, 'a'));
buffer.addDrainTracker(tracker1.AsStdFunction());
// Unowned slice which causes some fragmentation.
testing::MockFunction<void()> tracker2;
testing::MockFunction<void(const void*, size_t, const BufferFragmentImpl*)>
release_callback_tracker;
std::string frag_input(2 * SmallChunk, 'b');
BufferFragmentImpl frag(frag_input.c_str(), frag_input.size(),
release_callback_tracker.AsStdFunction());
buffer.addBufferFragment(frag);
buffer.addDrainTracker(tracker2.AsStdFunction());
// And an unowned slice with 0 size, because.
testing::MockFunction<void()> tracker3;
testing::MockFunction<void(const void*, size_t, const BufferFragmentImpl*)>
release_callback_tracker2;
BufferFragmentImpl frag2(nullptr, 0, release_callback_tracker2.AsStdFunction());
buffer.addBufferFragment(frag2);
buffer.addDrainTracker(tracker3.AsStdFunction());
// Add a very large chunk
testing::MockFunction<void()> tracker4;
buffer.add(std::string(LargeChunk + LinearizeSize, 'c'));
buffer.addDrainTracker(tracker4.AsStdFunction());
// Small adds that create no gaps.
testing::MockFunction<void()> tracker5;
for (int i = 0; i < 105; ++i) {
buffer.add(std::string(SmallChunk, 'd'));
}
buffer.addDrainTracker(tracker5.AsStdFunction());
expectSlices({{16184, 136, 16320},
{400, 0, 400},
{0, 0, 0},
{32704, 0, 32704},
{4032, 0, 4032},
{4032, 0, 4032},
{4032, 0, 4032},
{4032, 0, 4032},
{4032, 0, 4032},
{704, 3328, 4032}},
buffer);
testing::InSequence s;
testing::MockFunction<void(int, int)> drain_tracker;
testing::MockFunction<void()> done_tracker;
EXPECT_CALL(tracker1, Call());
EXPECT_CALL(release_callback_tracker, Call(_, _, _));
EXPECT_CALL(tracker2, Call());
EXPECT_CALL(drain_tracker, Call(3 * LargeChunk + 108 * SmallChunk, 16384));
EXPECT_CALL(release_callback_tracker2, Call(_, _, _));
EXPECT_CALL(tracker3, Call());
EXPECT_CALL(tracker4, Call());
EXPECT_CALL(drain_tracker, Call(2 * LargeChunk + 107 * SmallChunk, 16384));
EXPECT_CALL(drain_tracker, Call(LargeChunk + 106 * SmallChunk, 16384));
EXPECT_CALL(drain_tracker, Call(105 * SmallChunk, 16384));
EXPECT_CALL(tracker5, Call());
EXPECT_CALL(drain_tracker, Call(4616, 4616));
EXPECT_CALL(done_tracker, Call());
for (auto& expected_first_slice : std::vector<std::vector<int>>{{16584, 3832, 20416},
{32904, 3896, 36800},
{16520, 3896, 36800},
{20296, 120, 20416},
{4616, 3512, 8128}}) {
const uint32_t write_size = std::min<uint32_t>(LinearizeSize, buffer.length());
buffer.linearize(write_size);
expectFirstSlice(expected_first_slice, buffer);
drain_tracker.Call(buffer.length(), write_size);
buffer.drain(write_size);
}
done_tracker.Call();
expectSlices({}, buffer);
}
| 0 |
[
"CWE-401"
] |
envoy
|
5eba69a1f375413fb93fab4173f9c393ac8c2818
| 181,982,844,158,852,870,000,000,000,000,000,000,000 | 87 |
[buffer] Add on-drain hook to buffer API and use it to avoid fragmentation due to tracking of H2 data and control frames in the output buffer (#144)
Signed-off-by: antonio <[email protected]>
|
LONG ValidateSignature(HWND hDlg, const char* path)
{
LONG r = TRUST_E_SYSTEM_ERROR;
WINTRUST_DATA trust_data = { 0 };
WINTRUST_FILE_INFO trust_file = { 0 };
PF_TYPE_DECL(WINAPI, long, WinVerifyTrustEx, (HWND, GUID*, WINTRUST_DATA*));
PF_INIT(WinVerifyTrustEx, WinTrust);
GUID guid_generic_verify = // WINTRUST_ACTION_GENERIC_VERIFY_V2
{ 0xaac56b, 0xcd44, 0x11d0,{ 0x8c, 0xc2, 0x0, 0xc0, 0x4f, 0xc2, 0x95, 0xee } };
char *signature_name;
size_t i;
uint64_t current_ts, update_ts;
// Check the signature name. Make it specific enough (i.e. don't simply check for "Akeo")
// so that, besides hacking our server, it'll place an extra hurdle on any malicious entity
// into also fooling a C.A. to issue a certificate that passes our test.
signature_name = GetSignatureName(path, cert_country, (hDlg == INVALID_HANDLE_VALUE));
if (signature_name == NULL) {
uprintf("PKI: Could not get signature name");
if (hDlg != INVALID_HANDLE_VALUE)
MessageBoxExU(hDlg, lmprintf(MSG_284), lmprintf(MSG_283), MB_OK | MB_ICONERROR | MB_IS_RTL, selected_langid);
return TRUST_E_NOSIGNATURE;
}
for (i = 0; i < ARRAYSIZE(cert_name); i++) {
if (strcmp(signature_name, cert_name[i]) == 0)
break;
}
if (i >= ARRAYSIZE(cert_name)) {
uprintf("PKI: Signature '%s' is unexpected...", signature_name);
if ((hDlg == INVALID_HANDLE_VALUE) || (MessageBoxExU(hDlg,
lmprintf(MSG_285, signature_name), lmprintf(MSG_283),
MB_YESNO | MB_ICONWARNING | MB_IS_RTL, selected_langid) != IDYES))
return TRUST_E_EXPLICIT_DISTRUST;
}
trust_file.cbStruct = sizeof(trust_file);
trust_file.pcwszFilePath = utf8_to_wchar(path);
if (trust_file.pcwszFilePath == NULL) {
uprintf("PKI: Unable to convert '%s' to UTF16", path);
return ERROR_SEVERITY_ERROR | FAC(FACILITY_CERT) | ERROR_NOT_ENOUGH_MEMORY;
}
trust_data.cbStruct = sizeof(trust_data);
// NB: WTD_UI_ALL can result in ERROR_SUCCESS even if the signature validation fails,
// because it still prompts the user to run untrusted software, even after explicitly
// notifying them that the signature invalid (and of course Microsoft had to make
// that UI prompt a bit too similar to the other benign prompt you get when running
// trusted software, which, as per cert.org's assessment, may confuse non-security
// conscious-users who decide to gloss over these kind of notifications).
trust_data.dwUIChoice = WTD_UI_NONE;
// We just downloaded from the Internet, so we should be able to check revocation
trust_data.fdwRevocationChecks = WTD_REVOKE_WHOLECHAIN;
// 0x400 = WTD_MOTW for Windows 8.1 or later
trust_data.dwProvFlags = WTD_REVOCATION_CHECK_CHAIN | 0x400;
trust_data.dwUnionChoice = WTD_CHOICE_FILE;
trust_data.pFile = &trust_file;
if (pfWinVerifyTrustEx != NULL)
r = pfWinVerifyTrustEx(INVALID_HANDLE_VALUE, &guid_generic_verify, &trust_data);
safe_free(trust_file.pcwszFilePath);
switch (r) {
case ERROR_SUCCESS:
// hDlg = INVALID_HANDLE_VALUE is used when validating the Fido PS1 script
if (hDlg == INVALID_HANDLE_VALUE)
break;
// Verify that the timestamp of the downloaded update is in the future of our current one.
// This is done to prevent the use of an officially signed, but older binary, as potential attack vector.
current_ts = GetSignatureTimeStamp(NULL);
if (current_ts == 0ULL) {
uprintf("PKI: Cannot retrieve the current binary's timestamp - Aborting update");
r = TRUST_E_TIME_STAMP;
} else {
update_ts = GetSignatureTimeStamp(path);
if (update_ts < current_ts) {
uprintf("PKI: Update timestamp (%" PRIi64 ") is younger than ours (%" PRIi64 ") - Aborting update", update_ts, current_ts);
r = TRUST_E_TIME_STAMP;
}
}
if ((r != ERROR_SUCCESS) && (force_update < 2))
MessageBoxExU(hDlg, lmprintf(MSG_300), lmprintf(MSG_299), MB_OK | MB_ICONERROR | MB_IS_RTL, selected_langid);
break;
case TRUST_E_NOSIGNATURE:
// Should already have been reported, but since we have a custom message for it...
uprintf("PKI: File does not appear to be signed: %s", WinPKIErrorString());
if (hDlg != INVALID_HANDLE_VALUE)
MessageBoxExU(hDlg, lmprintf(MSG_284), lmprintf(MSG_283), MB_OK | MB_ICONERROR | MB_IS_RTL, selected_langid);
break;
default:
uprintf("PKI: Failed to validate signature: %s", WinPKIErrorString());
if (hDlg != INVALID_HANDLE_VALUE)
MessageBoxExU(hDlg, lmprintf(MSG_240), lmprintf(MSG_283), MB_OK | MB_ICONERROR | MB_IS_RTL, selected_langid);
break;
}
return r;
}
| 0 |
[
"CWE-94"
] |
rufus
|
19472668370aacec0dba9dda306601cfc4a4ed7e
| 277,363,723,060,616,900,000,000,000,000,000,000,000 | 96 |
[pki] don't link with wintrust.lib
* WinTrust.lib is responsible for the MSASN1.dll sideloading issue described in #1877,
so, since we only use it for WinVerifyTrustEx(), hook into that function manually.
* Closes #1877 for the MinGW side.
* Note that we will probably try to use the method suggested by @assarbad and documented at
https://stackoverflow.com/questions/1851267/mingw-gcc-delay-loaded-dll-equivalent/70416894#70416894
to try to put an end to the problem of DLL side loading.
|
void hashTypeConvert(robj *o, int enc) {
if (o->encoding == OBJ_ENCODING_ZIPLIST) {
hashTypeConvertZiplist(o, enc);
} else if (o->encoding == OBJ_ENCODING_HT) {
serverPanic("Not implemented");
} else {
serverPanic("Unknown hash encoding");
}
}
| 0 |
[
"CWE-190"
] |
redis
|
f6a40570fa63d5afdd596c78083d754081d80ae3
| 287,387,615,259,521,300,000,000,000,000,000,000,000 | 9 |
Fix ziplist and listpack overflows and truncations (CVE-2021-32627, CVE-2021-32628)
- fix possible heap corruption in ziplist and listpack resulting by trying to
allocate more than the maximum size of 4GB.
- prevent ziplist (hash and zset) from reaching size of above 1GB, will be
converted to HT encoding, that's not a useful size.
- prevent listpack (stream) from reaching size of above 1GB.
- XADD will start a new listpack if the new record may cause the previous
listpack to grow over 1GB.
- XADD will respond with an error if a single stream record is over 1GB
- List type (ziplist in quicklist) was truncating strings that were over 4GB,
now it'll respond with an error.
|
static int f_midi_out_open(struct snd_rawmidi_substream *substream)
{
struct f_midi *midi = substream->rmidi->private_data;
if (substream->number >= MAX_PORTS)
return -EINVAL;
VDBG(midi, "%s()\n", __func__);
midi->out_substream[substream->number] = substream;
return 0;
}
| 0 |
[
"CWE-415"
] |
linux
|
7fafcfdf6377b18b2a726ea554d6e593ba44349f
| 81,547,302,734,880,300,000,000,000,000,000,000,000 | 11 |
USB: gadget: f_midi: fixing a possible double-free in f_midi
It looks like there is a possibility of a double-free vulnerability on an
error path of the f_midi_set_alt function in the f_midi driver. If the
path is feasible then free_ep_req gets called twice:
req->complete = f_midi_complete;
err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
=> ...
usb_gadget_giveback_request
=>
f_midi_complete (CALLBACK)
(inside f_midi_complete, for various cases of status)
free_ep_req(ep, req); // first kfree
if (err) {
ERROR(midi, "%s: couldn't enqueue request: %d\n",
midi->out_ep->name, err);
free_ep_req(midi->out_ep, req); // second kfree
return err;
}
The double-free possibility was introduced with commit ad0d1a058eac
("usb: gadget: f_midi: fix leak on failed to enqueue out requests").
Found by MOXCAFE tool.
Signed-off-by: Tuba Yavuz <[email protected]>
Fixes: ad0d1a058eac ("usb: gadget: f_midi: fix leak on failed to enqueue out requests")
Acked-by: Felipe Balbi <[email protected]>
Cc: stable <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
explicit DelayedDelivery(Pipe *p)
: pipe(p),
delay_lock("Pipe::DelayedDelivery::delay_lock"), flush_count(0),
active_flush(false),
stop_delayed_delivery(false),
delay_dispatching(false),
stop_fast_dispatching_flag(false) { }
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 5,047,567,395,218,260,500,000,000,000,000,000,000 | 7 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
static void add_sit_entry(unsigned int segno, struct list_head *head)
{
struct sit_entry_set *ses;
unsigned int start_segno = START_SEGNO(segno);
list_for_each_entry(ses, head, set_list) {
if (ses->start_segno == start_segno) {
ses->entry_cnt++;
adjust_sit_entry_set(ses, head);
return;
}
}
ses = grab_sit_entry_set();
ses->start_segno = start_segno;
ses->entry_cnt++;
list_add(&ses->set_list, head);
}
| 0 |
[
"CWE-20"
] |
linux
|
638164a2718f337ea224b747cf5977ef143166a4
| 264,286,903,320,069,860,000,000,000,000,000,000,000 | 19 |
f2fs: fix potential panic during fstrim
As Ju Hyung Park reported:
"When 'fstrim' is called for manual trim, a BUG() can be triggered
randomly with this patch.
I'm seeing this issue on both x86 Desktop and arm64 Android phone.
On x86 Desktop, this was caused during Ubuntu boot-up. I have a
cronjob installed which calls 'fstrim -v /' during boot. On arm64
Android, this was caused during GC looping with 1ms gc_min_sleep_time
& gc_max_sleep_time."
Root cause of this issue is that f2fs_wait_discard_bios can only be
used by f2fs_put_super, because during put_super there must be no
other referrers, so it can ignore discard entry's reference count
when removing the entry, otherwise in other caller we will hit bug_on
in __remove_discard_cmd as there may be other issuer added reference
count in discard entry.
Thread A Thread B
- issue_discard_thread
- f2fs_ioc_fitrim
- f2fs_trim_fs
- f2fs_wait_discard_bios
- __issue_discard_cmd
- __submit_discard_cmd
- __wait_discard_cmd
- dc->ref++
- __wait_one_discard_bio
- __wait_discard_cmd
- __remove_discard_cmd
- f2fs_bug_on(sbi, dc->ref)
Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de
Reported-by: Ju Hyung Park <[email protected]>
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
|
raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned int n;
int err;
err = kstrtouint(buf, 10, &n);
if (err < 0)
return err;
err = mddev_lock(mddev);
if (err)
return err;
if (mddev->pers)
err = update_raid_disks(mddev, n);
else if (mddev->reshape_position != MaxSector) {
struct md_rdev *rdev;
int olddisks = mddev->raid_disks - mddev->delta_disks;
err = -EINVAL;
rdev_for_each(rdev, mddev) {
if (olddisks < n &&
rdev->data_offset < rdev->new_data_offset)
goto out_unlock;
if (olddisks > n &&
rdev->data_offset > rdev->new_data_offset)
goto out_unlock;
}
err = 0;
mddev->delta_disks = n - olddisks;
mddev->raid_disks = n;
mddev->reshape_backwards = (mddev->delta_disks < 0);
} else
mddev->raid_disks = n;
out_unlock:
mddev_unlock(mddev);
return err ? err : len;
}
| 0 |
[
"CWE-200"
] |
linux
|
b6878d9e03043695dbf3fa1caa6dfc09db225b16
| 294,133,463,353,868,170,000,000,000,000,000,000,000 | 37 |
md: use kzalloc() when bitmap is disabled
In drivers/md/md.c get_bitmap_file() uses kmalloc() for creating a
mdu_bitmap_file_t called "file".
5769 file = kmalloc(sizeof(*file), GFP_NOIO);
5770 if (!file)
5771 return -ENOMEM;
This structure is copied to user space at the end of the function.
5786 if (err == 0 &&
5787 copy_to_user(arg, file, sizeof(*file)))
5788 err = -EFAULT
But if bitmap is disabled only the first byte of "file" is initialized
with zero, so it's possible to read some bytes (up to 4095) of kernel
space memory from user space. This is an information leak.
5775 /* bitmap disabled, zero the first byte and copy out */
5776 if (!mddev->bitmap_info.file)
5777 file->pathname[0] = '\0';
Signed-off-by: Benjamin Randazzo <[email protected]>
Signed-off-by: NeilBrown <[email protected]>
|
static struct bitmap *find_objects(struct rev_info *revs,
struct object_list *roots,
struct bitmap *seen)
{
struct bitmap *base = NULL;
int needs_walk = 0;
struct object_list *not_mapped = NULL;
/*
* Go through all the roots for the walk. The ones that have bitmaps
* on the bitmap index will be `or`ed together to form an initial
* global reachability analysis.
*
* The ones without bitmaps in the index will be stored in the
* `not_mapped_list` for further processing.
*/
while (roots) {
struct object *object = roots->item;
roots = roots->next;
if (object->type == OBJ_COMMIT) {
khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->oid.hash);
if (pos < kh_end(bitmap_git.bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos);
struct ewah_bitmap *or_with = lookup_stored_bitmap(st);
if (base == NULL)
base = ewah_to_bitmap(or_with);
else
bitmap_or_ewah(base, or_with);
object->flags |= SEEN;
continue;
}
}
object_list_insert(object, ¬_mapped);
}
/*
* Best case scenario: We found bitmaps for all the roots,
* so the resulting `or` bitmap has the full reachability analysis
*/
if (not_mapped == NULL)
return base;
roots = not_mapped;
/*
* Let's iterate through all the roots that don't have bitmaps to
* check if we can determine them to be reachable from the existing
* global bitmap.
*
* If we cannot find them in the existing global bitmap, we'll need
* to push them to an actual walk and run it until we can confirm
* they are reachable
*/
while (roots) {
struct object *object = roots->item;
int pos;
roots = roots->next;
pos = bitmap_position(object->oid.hash);
if (pos < 0 || base == NULL || !bitmap_get(base, pos)) {
object->flags &= ~UNINTERESTING;
add_pending_object(revs, object, "");
needs_walk = 1;
} else {
object->flags |= SEEN;
}
}
if (needs_walk) {
struct include_data incdata;
if (base == NULL)
base = bitmap_new();
incdata.base = base;
incdata.seen = seen;
revs->include_check = should_include;
revs->include_check_data = &incdata;
if (prepare_revision_walk(revs))
die("revision walk setup failed");
traverse_commit_list(revs, show_commit, show_object, base);
}
return base;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
git
|
de1e67d0703894cb6ea782e36abb63976ab07e60
| 139,978,082,105,934,940,000,000,000,000,000,000,000 | 95 |
list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
_XimProtoIMFree(
Xim im)
{
/* XIMPrivateRec */
if (im->private.proto.im_onkeylist) {
Xfree(im->private.proto.im_onkeylist);
im->private.proto.im_onkeylist = NULL;
}
if (im->private.proto.im_offkeylist) {
Xfree(im->private.proto.im_offkeylist);
im->private.proto.im_offkeylist = NULL;
}
if (im->private.proto.intrproto) {
_XimFreeProtoIntrCallback(im);
im->private.proto.intrproto = NULL;
}
if (im->private.proto.im_inner_resources) {
Xfree(im->private.proto.im_inner_resources);
im->private.proto.im_inner_resources = NULL;
}
if (im->private.proto.ic_inner_resources) {
Xfree(im->private.proto.ic_inner_resources);
im->private.proto.ic_inner_resources = NULL;
}
if (im->private.proto.hold_data) {
Xfree(im->private.proto.hold_data);
im->private.proto.hold_data = NULL;
}
if (im->private.proto.locale_name) {
Xfree(im->private.proto.locale_name);
im->private.proto.locale_name = NULL;
}
if (im->private.proto.ctom_conv) {
_XlcCloseConverter(im->private.proto.ctom_conv);
im->private.proto.ctom_conv = NULL;
}
if (im->private.proto.ctow_conv) {
_XlcCloseConverter(im->private.proto.ctow_conv);
im->private.proto.ctow_conv = NULL;
}
if (im->private.proto.ctoutf8_conv) {
_XlcCloseConverter(im->private.proto.ctoutf8_conv);
im->private.proto.ctoutf8_conv = NULL;
}
if (im->private.proto.cstomb_conv) {
_XlcCloseConverter(im->private.proto.cstomb_conv);
im->private.proto.cstomb_conv = NULL;
}
if (im->private.proto.cstowc_conv) {
_XlcCloseConverter(im->private.proto.cstowc_conv);
im->private.proto.cstowc_conv = NULL;
}
if (im->private.proto.cstoutf8_conv) {
_XlcCloseConverter(im->private.proto.cstoutf8_conv);
im->private.proto.cstoutf8_conv = NULL;
}
if (im->private.proto.ucstoc_conv) {
_XlcCloseConverter(im->private.proto.ucstoc_conv);
im->private.proto.ucstoc_conv = NULL;
}
if (im->private.proto.ucstoutf8_conv) {
_XlcCloseConverter(im->private.proto.ucstoutf8_conv);
im->private.proto.ucstoutf8_conv = NULL;
}
#ifdef XIM_CONNECTABLE
if (!IS_SERVER_CONNECTED(im) && IS_RECONNECTABLE(im)) {
return;
}
#endif /* XIM_CONNECTABLE */
if (im->private.proto.saved_imvalues) {
Xfree(im->private.proto.saved_imvalues);
im->private.proto.saved_imvalues = NULL;
}
if (im->private.proto.default_styles) {
Xfree(im->private.proto.default_styles);
im->private.proto.default_styles = NULL;
}
/* core */
if (im->core.res_name) {
Xfree(im->core.res_name);
im->core.res_name = NULL;
}
if (im->core.res_class) {
Xfree(im->core.res_class);
im->core.res_class = NULL;
}
if (im->core.im_values_list) {
Xfree(im->core.im_values_list);
im->core.im_values_list = NULL;
}
if (im->core.ic_values_list) {
Xfree(im->core.ic_values_list);
im->core.ic_values_list = NULL;
}
if (im->core.im_name) {
Xfree(im->core.im_name);
im->core.im_name = NULL;
}
if (im->core.styles) {
Xfree(im->core.styles);
im->core.styles = NULL;
}
if (im->core.im_resources) {
Xfree(im->core.im_resources);
im->core.im_resources = NULL;
}
if (im->core.ic_resources) {
Xfree(im->core.ic_resources);
im->core.ic_resources = NULL;
}
return;
}
| 0 |
[
"CWE-190"
] |
libx11
|
1a566c9e00e5f35c1f9e7f3d741a02e5170852b2
| 291,820,474,759,634,400,000,000,000,000,000,000,000 | 116 |
Zero out buffers in functions
It looks like uninitialized stack or heap memory can leak
out via padding bytes.
Signed-off-by: Matthieu Herrb <[email protected]>
Reviewed-by: Matthieu Herrb <[email protected]>
|
static struct dirent *vfswrap_readdir(vfs_handle_struct *handle,
DIR *dirp,
SMB_STRUCT_STAT *sbuf)
{
struct dirent *result;
START_PROFILE(syscall_readdir);
result = readdir(dirp);
END_PROFILE(syscall_readdir);
if (sbuf) {
/* Default Posix readdir() does not give us stat info.
* Set to invalid to indicate we didn't return this info. */
SET_STAT_INVALID(*sbuf);
#if defined(HAVE_DIRFD) && defined(HAVE_FSTATAT)
if (result != NULL) {
/* See if we can efficiently return this. */
struct stat st;
int flags = (lp_posix_pathnames() ?
AT_SYMLINK_NOFOLLOW : 0);
int ret = fstatat(dirfd(dirp),
result->d_name,
&st,
flags);
if (ret == 0) {
init_stat_ex_from_stat(sbuf,
&st,
lp_fake_dir_create_times(
SNUM(handle->conn)));
}
}
#endif
}
return result;
}
| 0 |
[
"CWE-665"
] |
samba
|
30e724cbff1ecd90e5a676831902d1e41ec1b347
| 86,392,708,160,166,810,000,000,000,000,000,000,000 | 34 |
FSCTL_GET_SHADOW_COPY_DATA: Initialize output array to zero
Otherwise num_volumes and the end marker can return uninitialized data
to the client.
Signed-off-by: Christof Schmitt <[email protected]>
Reviewed-by: Jeremy Allison <[email protected]>
Reviewed-by: Simo Sorce <[email protected]>
|
Controller::constructHeaderForSessionProtocol(Request *req, char * restrict buffer,
unsigned int &size, const SessionProtocolWorkingState &state, string delta_monotonic)
{
char *pos = buffer;
const char *end = buffer + size;
pos += sizeof(boost::uint32_t);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("REQUEST_URI"));
pos = appendData(pos, end, req->path.start->data, req->path.size);
pos = appendData(pos, end, "", 1);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("PATH_INFO"));
pos = appendData(pos, end, state.path.data(), state.path.size());
pos = appendData(pos, end, "", 1);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("SCRIPT_NAME"));
if (state.hasBaseURI) {
pos = appendData(pos, end, req->options.baseURI);
pos = appendData(pos, end, "", 1);
} else {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL(""));
}
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("QUERY_STRING"));
pos = appendData(pos, end, state.queryString.data(), state.queryString.size());
pos = appendData(pos, end, "", 1);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("REQUEST_METHOD"));
pos = appendData(pos, end, state.methodStr);
pos = appendData(pos, end, "", 1);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("SERVER_NAME"));
pos = appendData(pos, end, state.serverName);
pos = appendData(pos, end, "", 1);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("SERVER_PORT"));
pos = appendData(pos, end, state.serverPort);
pos = appendData(pos, end, "", 1);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("SERVER_SOFTWARE"));
pos = appendData(pos, end, serverSoftware);
pos = appendData(pos, end, "", 1);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("SERVER_PROTOCOL"));
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("HTTP/1.1"));
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("REMOTE_ADDR"));
if (state.remoteAddr != NULL) {
pos = appendData(pos, end, state.remoteAddr);
pos = appendData(pos, end, "", 1);
} else {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("127.0.0.1"));
}
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("REMOTE_PORT"));
if (state.remotePort != NULL) {
pos = appendData(pos, end, state.remotePort);
pos = appendData(pos, end, "", 1);
} else {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("0"));
}
if (state.remoteUser != NULL) {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("REMOTE_USER"));
pos = appendData(pos, end, state.remoteUser);
pos = appendData(pos, end, "", 1);
}
if (state.contentType != NULL) {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("CONTENT_TYPE"));
pos = appendData(pos, end, state.contentType);
pos = appendData(pos, end, "", 1);
}
if (state.contentLength != NULL) {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("CONTENT_LENGTH"));
pos = appendData(pos, end, state.contentLength);
pos = appendData(pos, end, "", 1);
}
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("PASSENGER_CONNECT_PASSWORD"));
pos = appendData(pos, end, req->session->getApiKey().toStaticString());
pos = appendData(pos, end, "", 1);
if (req->https) {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("HTTPS"));
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("on"));
}
if (req->options.analytics) {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("PASSENGER_TXN_ID"));
pos = appendData(pos, end, req->options.transaction->getTxnId());
pos = appendData(pos, end, "", 1);
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("PASSENGER_DELTA_MONOTONIC"));
pos = appendData(pos, end, delta_monotonic);
pos = appendData(pos, end, "", 1);
}
if (req->upgraded()) {
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("HTTP_CONNECTION"));
pos = appendData(pos, end, P_STATIC_STRING_WITH_NULL("upgrade"));
}
ServerKit::HeaderTable::Iterator it(req->headers);
while (*it != NULL) {
// This header-skipping is not accounted for in determineHeaderSizeForSessionProtocol(), but
// since we are only reducing the size it just wastes some mem bytes.
if ((
(it->header->hash == HTTP_CONTENT_LENGTH.hash()
|| it->header->hash == HTTP_CONTENT_TYPE.hash()
|| it->header->hash == HTTP_CONNECTION.hash()
) && (psg_lstr_cmp(&it->header->key, P_STATIC_STRING("content-type"))
|| psg_lstr_cmp(&it->header->key, P_STATIC_STRING("content-length"))
|| psg_lstr_cmp(&it->header->key, P_STATIC_STRING("connection"))
)
) || containsNonAlphaNumDash(it->header->key)
)
{
it.next();
continue;
}
pos = appendData(pos, end, P_STATIC_STRING("HTTP_"));
const LString::Part *part = it->header->key.start;
while (part != NULL) {
char *start = pos;
pos = appendData(pos, end, part->data, part->size);
httpHeaderToScgiUpperCase((unsigned char *) start, pos - start);
part = part->next;
}
pos = appendData(pos, end, "", 1);
part = it->header->val.start;
while (part != NULL) {
pos = appendData(pos, end, part->data, part->size);
part = part->next;
}
pos = appendData(pos, end, "", 1);
it.next();
}
if (state.environmentVariablesData != NULL) {
pos = appendData(pos, end, state.environmentVariablesData, state.environmentVariablesSize);
}
Uint32Message::generate(buffer, pos - buffer - sizeof(boost::uint32_t));
size = pos - buffer;
return pos < end;
}
| 0 |
[
"CWE-20",
"CWE-476"
] |
passenger
|
ddb8ecc4ebf260e4967f57f271d4f5761abeac3e
| 158,234,307,156,545,910,000,000,000,000,000,000,000 | 153 |
Fix CVE-2015-7519 header collision vulnerability
|
static void mode_init_ov_sensor_regs(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int qvga, xstart, xend, ystart, yend;
u8 v;
qvga = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv & 1;
/******** Mode (VGA/QVGA) and sensor specific regs ********/
switch (sd->sensor) {
case SEN_OV2610:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a);
i2c_w(sd, 0x25, qvga ? 0x30 : 0x60);
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0);
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
return;
case SEN_OV2610AE: {
u8 v;
/* frame rates:
* 10fps / 5 fps for 1600x1200
* 40fps / 20fps for 800x600
*/
v = 80;
if (qvga) {
if (sd->frame_rate < 25)
v = 0x81;
} else {
if (sd->frame_rate < 10)
v = 0x81;
}
i2c_w(sd, 0x11, v);
i2c_w(sd, 0x12, qvga ? 0x60 : 0x20);
return;
}
case SEN_OV3610:
if (qvga) {
xstart = (1040 - gspca_dev->pixfmt.width) / 2 +
(0x1f << 4);
ystart = (776 - gspca_dev->pixfmt.height) / 2;
} else {
xstart = (2076 - gspca_dev->pixfmt.width) / 2 +
(0x10 << 4);
ystart = (1544 - gspca_dev->pixfmt.height) / 2;
}
xend = xstart + gspca_dev->pixfmt.width;
yend = ystart + gspca_dev->pixfmt.height;
/* Writing to the COMH register resets the other windowing regs
to their default values, so we must do this first. */
i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0);
i2c_w_mask(sd, 0x32,
(((xend >> 1) & 7) << 3) | ((xstart >> 1) & 7),
0x3f);
i2c_w_mask(sd, 0x03,
(((yend >> 1) & 3) << 2) | ((ystart >> 1) & 3),
0x0f);
i2c_w(sd, 0x17, xstart >> 4);
i2c_w(sd, 0x18, xend >> 4);
i2c_w(sd, 0x19, ystart >> 3);
i2c_w(sd, 0x1a, yend >> 3);
return;
case SEN_OV8610:
/* For OV8610 qvga means qsvga */
i2c_w_mask(sd, OV7610_REG_COM_C, qvga ? (1 << 5) : 0, 1 << 5);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
i2c_w_mask(sd, 0x2d, 0x00, 0x40); /* from windrv 090403 */
i2c_w_mask(sd, 0x28, 0x20, 0x20); /* progressive mode on */
break;
case SEN_OV7610:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
case SEN_OV7620:
case SEN_OV7620AE:
case SEN_OV76BE:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a);
i2c_w(sd, 0x25, qvga ? 0x30 : 0x60);
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
i2c_w_mask(sd, 0x67, qvga ? 0xb0 : 0x90, 0xf0);
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
if (sd->sensor == SEN_OV76BE)
i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e);
break;
case SEN_OV7640:
case SEN_OV7648:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
/* Setting this undocumented bit in qvga mode removes a very
annoying vertical shaking of the image */
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
/* Unknown */
i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0);
/* Allow higher automatic gain (to allow higher framerates) */
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x12, 0x04, 0x04); /* AWB: 1 */
break;
case SEN_OV7670:
/* set COM7_FMT_VGA or COM7_FMT_QVGA
* do we need to set anything else?
* HSTART etc are set in set_ov_sensor_window itself */
i2c_w_mask(sd, OV7670_R12_COM7,
qvga ? OV7670_COM7_FMT_QVGA : OV7670_COM7_FMT_VGA,
OV7670_COM7_FMT_MASK);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_AWB,
OV7670_COM8_AWB);
if (qvga) { /* QVGA from ov7670.c by
* Jonathan Corbet */
xstart = 164;
xend = 28;
ystart = 14;
yend = 494;
} else { /* VGA */
xstart = 158;
xend = 14;
ystart = 10;
yend = 490;
}
/* OV7670 hardware window registers are split across
* multiple locations */
i2c_w(sd, OV7670_R17_HSTART, xstart >> 3);
i2c_w(sd, OV7670_R18_HSTOP, xend >> 3);
v = i2c_r(sd, OV7670_R32_HREF);
v = (v & 0xc0) | ((xend & 0x7) << 3) | (xstart & 0x07);
msleep(10); /* need to sleep between read and write to
* same reg! */
i2c_w(sd, OV7670_R32_HREF, v);
i2c_w(sd, OV7670_R19_VSTART, ystart >> 2);
i2c_w(sd, OV7670_R1A_VSTOP, yend >> 2);
v = i2c_r(sd, OV7670_R03_VREF);
v = (v & 0xc0) | ((yend & 0x3) << 2) | (ystart & 0x03);
msleep(10); /* need to sleep between read and write to
* same reg! */
i2c_w(sd, OV7670_R03_VREF, v);
break;
case SEN_OV6620:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
case SEN_OV6630:
case SEN_OV66308AF:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
case SEN_OV9600: {
const struct ov_i2c_regvals *vals;
static const struct ov_i2c_regvals sxga_15[] = {
{0x11, 0x80}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
};
static const struct ov_i2c_regvals sxga_7_5[] = {
{0x11, 0x81}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
};
static const struct ov_i2c_regvals vga_30[] = {
{0x11, 0x81}, {0x14, 0x7e}, {0x24, 0x70}, {0x25, 0x60}
};
static const struct ov_i2c_regvals vga_15[] = {
{0x11, 0x83}, {0x14, 0x3e}, {0x24, 0x80}, {0x25, 0x70}
};
/* frame rates:
* 15fps / 7.5 fps for 1280x1024
* 30fps / 15fps for 640x480
*/
i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0x40);
if (qvga)
vals = sd->frame_rate < 30 ? vga_15 : vga_30;
else
vals = sd->frame_rate < 15 ? sxga_7_5 : sxga_15;
write_i2c_regvals(sd, vals, ARRAY_SIZE(sxga_15));
return;
}
default:
return;
}
/******** Clock programming ********/
i2c_w(sd, 0x11, sd->clockdiv);
}
| 0 |
[
"CWE-476"
] |
linux
|
998912346c0da53a6dbb71fab3a138586b596b30
| 50,676,981,149,842,080,000,000,000,000,000,000,000 | 190 |
media: ov519: add missing endpoint sanity checks
Make sure to check that we have at least one endpoint before accessing
the endpoint array to avoid dereferencing a NULL-pointer on stream
start.
Note that these sanity checks are not redundant as the driver is mixing
looking up altsettings by index and by number, which need not coincide.
Fixes: 1876bb923c98 ("V4L/DVB (12079): gspca_ov519: add support for the ov511 bridge")
Fixes: b282d87332f5 ("V4L/DVB (12080): gspca_ov519: Fix ov518+ with OV7620AE (Trust spacecam 320)")
Cc: stable <[email protected]> # 2.6.31
Cc: Hans de Goede <[email protected]>
Signed-off-by: Johan Hovold <[email protected]>
Signed-off-by: Hans Verkuil <[email protected]>
Signed-off-by: Mauro Carvalho Chehab <[email protected]>
|
XML_SetDoctypeDeclHandler(XML_Parser parser, XML_StartDoctypeDeclHandler start,
XML_EndDoctypeDeclHandler end) {
if (parser == NULL)
return;
parser->m_startDoctypeDeclHandler = start;
parser->m_endDoctypeDeclHandler = end;
}
| 0 |
[
"CWE-611",
"CWE-776",
"CWE-415",
"CWE-125"
] |
libexpat
|
c20b758c332d9a13afbbb276d30db1d183a85d43
| 222,031,077,707,102,700,000,000,000,000,000,000,000 | 7 |
xmlparse.c: Deny internal entities closing the doctype
|
debugPrintAll(void)
{
DEFiRet;
dbgprintf("All Rulesets:\n");
llExecFunc(&llRulesets, doDebugPrintAll, NULL);
dbgprintf("End of Rulesets.\n");
RETiRet;
}
| 0 |
[
"CWE-772",
"CWE-401"
] |
rsyslog
|
1ef709cc97d54f74d3fdeb83788cc4b01f4c6a2a
| 91,355,168,263,177,050,000,000,000,000,000,000,000 | 8 |
bugfix: fixed a memory leak and potential abort condition
this could happen if multiple rulesets were used and some output batches
contained messages belonging to more than one ruleset.
fixes: http://bugzilla.adiscon.com/show_bug.cgi?id=226
fixes: http://bugzilla.adiscon.com/show_bug.cgi?id=218
|
int mgr_log_level(p_fm_config_conx_hdlt hdl, fm_mgr_type_t mgr, int argc, char *argv[]) {
fm_mgr_config_errno_t res;
fm_msg_ret_code_t ret_code;
uint32_t loglevel=0;
if (mgr == FM_MGR_PM) {
fprintf(stderr, "pmLogLevel:\n");
fprintf(stderr, "\tThis command is not supported any more. The logging of the\n");
fprintf(stderr, "\tPerformance Manager(PM) is now\n");
fprintf(stderr, "\tbased on the logging of the Subnet manager(SM). Use the\n");
fprintf(stderr, "\tsmLogLevel command for changing the logging level of the\n");
fprintf(stderr, "\tSM and PM\n");
} else if (argc == 1) {
loglevel = atol(argv[0]);
if((res = fm_mgr_simple_query(hdl, FM_ACT_GET, FM_DT_LOG_LEVEL, mgr, sizeof(loglevel), (void *)&loglevel, &ret_code)) != FM_CONF_OK)
{
fprintf(stderr, "mgr_log_level: Failed to retrieve data: \n"
"\tError:(%d) %s \n\tRet code:(%d) %s\n",
res, fm_mgr_get_error_str(res),ret_code,
fm_mgr_get_resp_error_str(ret_code));
} else {
printf("mgr_log_level: Successfully sent Log Level control to local mgr instance\n");
}
} else {
fprintf(stderr, "mgr_log_level: must specify the log level parameter (1 > 5): \n");
}
return 0;
}
| 0 |
[
"CWE-362"
] |
opa-fm
|
c5759e7b76f5bf844be6c6641cc1b356bbc83869
| 338,229,217,118,082,260,000,000,000,000,000,000,000 | 29 |
Fix scripts and code that use well-known tmp files.
|
deallocateCharacterClasses(TranslationTableHeader *table) {
CharacterClass **classes = &table->characterClasses;
while (*classes) {
CharacterClass *class = *classes;
*classes = (*classes)->next;
if (class) free(class);
}
}
| 0 |
[
"CWE-787"
] |
liblouis
|
2e4772befb2b1c37cb4b9d6572945115ee28630a
| 35,605,386,192,648,096,000,000,000,000,000,000,000 | 8 |
Prevent an invalid memory writes in compileRule
Thanks to Han Zheng for reporting it
Fixes #1214
|
static void sigalrm_handler(int sig __attribute__((unused)))
{
gotsigalrm = 1;
}
| 0 |
[] |
cyrus-imapd
|
602f12ed2af0a49ac4a58affbfea57d0fc23dea5
| 89,638,892,655,770,130,000,000,000,000,000,000,000 | 4 |
httpd.c: only allow reuse of auth creds on a persistent connection against a backend server in a Murder
|
const char *FS_LoadedPakPureChecksums( void ) {
static char info[BIG_INFO_STRING];
searchpath_t *search;
info[0] = 0;
for ( search = fs_searchpaths ; search ; search = search->next ) {
// is the element a pak file?
if ( !search->pack ) {
continue;
}
Q_strcat( info, sizeof( info ), va("%i ", search->pack->pure_checksum ) );
}
return info;
}
| 0 |
[
"CWE-269"
] |
ioq3
|
376267d534476a875d8b9228149c4ee18b74a4fd
| 319,707,917,807,574,400,000,000,000,000,000,000,000 | 17 |
Don't load .pk3s as .dlls, and don't load user config files from .pk3s.
|
PJ_DEF(pj_status_t) pjsip_rx_data_free_cloned(pjsip_rx_data *rdata)
{
PJ_ASSERT_RETURN(rdata, PJ_EINVAL);
pjsip_transport_dec_ref(rdata->tp_info.transport);
pj_pool_release(rdata->tp_info.pool);
return PJ_SUCCESS;
}
| 0 |
[
"CWE-297",
"CWE-295"
] |
pjproject
|
67e46c1ac45ad784db5b9080f5ed8b133c122872
| 252,705,311,642,688,860,000,000,000,000,000,000,000 | 9 |
Merge pull request from GHSA-8hcp-hm38-mfph
* Check hostname during TLS transport selection
* revision based on feedback
* remove the code in create_request that has been moved
|
replace_pop_ins(void)
{
int cc;
int oldState = State;
State = MODE_NORMAL; // don't want MODE_REPLACE here
while ((cc = replace_pop()) > 0)
{
mb_replace_pop_ins(cc);
dec_cursor();
}
State = oldState;
}
| 0 |
[
"CWE-120"
] |
vim
|
7ce5b2b590256ce53d6af28c1d203fb3bc1d2d97
| 232,034,479,231,987,640,000,000,000,000,000,000,000 | 13 |
patch 8.2.4969: changing text in Visual mode may cause invalid memory access
Problem: Changing text in Visual mode may cause invalid memory access.
Solution: Check the Visual position after making a change.
|
set_ip_options(int sock_fd, int family, int flags)
{
#if defined(FEAT_IPV6) && defined(IPV6_V6ONLY)
/* Receive only IPv6 packets on an IPv6 socket */
if (family == IPADDR_INET6 && !SCK_SetIntOption(sock_fd, IPPROTO_IPV6, IPV6_V6ONLY, 1))
return 0;
#endif
/* Provide destination address of received packets if requested */
if (flags & SCK_FLAG_RX_DEST_ADDR) {
if (family == IPADDR_INET4) {
#ifdef HAVE_IN_PKTINFO
if (!SCK_SetIntOption(sock_fd, IPPROTO_IP, IP_PKTINFO, 1))
;
#elif defined(IP_RECVDSTADDR)
if (!SCK_SetIntOption(sock_fd, IPPROTO_IP, IP_RECVDSTADDR, 1))
;
#endif
}
#ifdef FEAT_IPV6
else if (family == IPADDR_INET6) {
#ifdef HAVE_IN6_PKTINFO
#ifdef IPV6_RECVPKTINFO
if (!SCK_SetIntOption(sock_fd, IPPROTO_IPV6, IPV6_RECVPKTINFO, 1))
;
#else
if (!SCK_SetIntOption(sock_fd, IPPROTO_IPV6, IPV6_PKTINFO, 1))
;
#endif
#endif
}
#endif
}
return 1;
}
| 0 |
[
"CWE-59"
] |
chrony
|
e18903a6b56341481a2e08469c0602010bf7bfe3
| 115,961,058,712,093,450,000,000,000,000,000,000,000 | 36 |
switch to new util file functions
Replace all fopen(), rename(), and unlink() calls with the new util
functions.
|
PlayerBase::PlayerTypes PlayerGeneric::getPreferredPlayerType(XModule* module)
{
if (module == NULL)
return PlayerBase::PlayerType_INVALID;
switch (module->getType())
{
case XModule::ModuleType_669:
case XModule::ModuleType_FAR:
#ifndef MILKYTRACKER
return PlayerBase::PlayerType_FAR;
break;
#endif
case XModule::ModuleType_IT:
#ifndef MILKYTRACKER
return PlayerBase::PlayerType_IT;
break;
#endif
case XModule::ModuleType_UNKNOWN: // just assume our standard player can handle this
//case XModule::ModuleType_669:
case XModule::ModuleType_AMF:
case XModule::ModuleType_AMS:
case XModule::ModuleType_CBA:
case XModule::ModuleType_DBM:
case XModule::ModuleType_DIGI:
case XModule::ModuleType_DSM:
case XModule::ModuleType_DSm:
case XModule::ModuleType_DTM_1:
case XModule::ModuleType_DTM_2:
case XModule::ModuleType_GDM:
case XModule::ModuleType_GMC:
case XModule::ModuleType_IMF:
case XModule::ModuleType_MDL:
case XModule::ModuleType_MOD:
case XModule::ModuleType_MTM:
case XModule::ModuleType_MXM:
case XModule::ModuleType_OKT:
case XModule::ModuleType_PLM:
case XModule::ModuleType_PSM:
case XModule::ModuleType_PTM:
case XModule::ModuleType_S3M:
case XModule::ModuleType_STM:
case XModule::ModuleType_SFX:
case XModule::ModuleType_UNI:
case XModule::ModuleType_ULT:
case XModule::ModuleType_XM:
case XModule::ModuleType_NONE:
return PlayerBase::PlayerType_Generic;
break;
default:
return PlayerBase::PlayerType_INVALID;
}
}
| 0 |
[
"CWE-416"
] |
MilkyTracker
|
7afd55c42ad80d01a339197a2d8b5461d214edaf
| 307,710,794,873,357,350,000,000,000,000,000,000,000 | 54 |
Fix use-after-free in PlayerGeneric destructor
|
static void ipip6_tunnel_bind_dev(struct net_device *dev)
{
struct net_device *tdev = NULL;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
struct flowi4 fl4;
tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
if (iph->daddr) {
struct rtable *rt = ip_route_output_ports(tunnel->net, &fl4,
NULL,
iph->daddr, iph->saddr,
0, 0,
IPPROTO_IPV6,
RT_TOS(iph->tos),
tunnel->parms.link);
if (!IS_ERR(rt)) {
tdev = rt->dst.dev;
ip_rt_put(rt);
}
dev->flags |= IFF_POINTOPOINT;
}
if (!tdev && tunnel->parms.link)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev) {
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
dev->mtu = tdev->mtu - t_hlen;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
}
| 0 |
[
"CWE-703",
"CWE-772",
"CWE-401"
] |
linux
|
07f12b26e21ab359261bf75cfcb424fdc7daeb6d
| 73,880,693,193,661,130,000,000,000,000,000,000,000 | 38 |
net: sit: fix memory leak in sit_init_net()
If register_netdev() is failed to register sitn->fb_tunnel_dev,
it will go to err_reg_dev and forget to free netdev(sitn->fb_tunnel_dev).
BUG: memory leak
unreferenced object 0xffff888378daad00 (size 512):
comm "syz-executor.1", pid 4006, jiffies 4295121142 (age 16.115s)
hex dump (first 32 bytes):
00 e6 ed c0 83 88 ff ff 00 00 00 00 00 00 00 00 ................
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<00000000d6dcb63e>] kvmalloc include/linux/mm.h:577 [inline]
[<00000000d6dcb63e>] kvzalloc include/linux/mm.h:585 [inline]
[<00000000d6dcb63e>] netif_alloc_netdev_queues net/core/dev.c:8380 [inline]
[<00000000d6dcb63e>] alloc_netdev_mqs+0x600/0xcc0 net/core/dev.c:8970
[<00000000867e172f>] sit_init_net+0x295/0xa40 net/ipv6/sit.c:1848
[<00000000871019fa>] ops_init+0xad/0x3e0 net/core/net_namespace.c:129
[<00000000319507f6>] setup_net+0x2ba/0x690 net/core/net_namespace.c:314
[<0000000087db4f96>] copy_net_ns+0x1dc/0x330 net/core/net_namespace.c:437
[<0000000057efc651>] create_new_namespaces+0x382/0x730 kernel/nsproxy.c:107
[<00000000676f83de>] copy_namespaces+0x2ed/0x3d0 kernel/nsproxy.c:165
[<0000000030b74bac>] copy_process.part.27+0x231e/0x6db0 kernel/fork.c:1919
[<00000000fff78746>] copy_process kernel/fork.c:1713 [inline]
[<00000000fff78746>] _do_fork+0x1bc/0xe90 kernel/fork.c:2224
[<000000001c2e0d1c>] do_syscall_64+0xc8/0x580 arch/x86/entry/common.c:290
[<00000000ec48bd44>] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[<0000000039acff8a>] 0xffffffffffffffff
Signed-off-by: Mao Wenan <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static bool start_new_rx_buffer(int offset, unsigned long size, int head)
{
/* simple case: we have completely filled the current buffer. */
if (offset == MAX_BUFFER_OFFSET)
return true;
/*
* complex case: start a fresh buffer if the current frag
* would overflow the current buffer but only if:
* (i) this frag would fit completely in the next buffer
* and (ii) there is already some data in the current buffer
* and (iii) this is not the head buffer.
*
* Where:
* - (i) stops us splitting a frag into two copies
* unless the frag is too large for a single buffer.
* - (ii) stops us from leaving a buffer pointlessly empty.
* - (iii) stops us leaving the first buffer
* empty. Strictly speaking this is already covered
* by (ii) but is explicitly checked because
* netfront relies on the first buffer being
* non-empty and can crash otherwise.
*
* This means we will effectively linearise small
* frags but do not needlessly split large buffers
* into multiple copies tend to give large frags their
* own buffers as before.
*/
if ((offset + size > MAX_BUFFER_OFFSET) &&
(size <= MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
}
| 0 |
[
"CWE-399"
] |
linux
|
7d5145d8eb2b9791533ffe4dc003b129b9696c48
| 56,849,519,628,620,670,000,000,000,000,000,000,000 | 34 |
xen/netback: don't leak pages on failure in xen_netbk_tx_check_gop.
Signed-off-by: Matthew Daley <[email protected]>
Reviewed-by: Konrad Rzeszutek Wilk <[email protected]>
Acked-by: Ian Campbell <[email protected]>
Acked-by: Jan Beulich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int phar_zip_applysignature(phar_archive_data *phar, struct _phar_zip_pass *pass,
smart_str *metadata) /* {{{ */
{
/* add signature for executable tars or tars explicitly set with setSignatureAlgorithm */
if (!phar->is_data || phar->sig_flags) {
int signature_length;
char *signature, sigbuf[8];
phar_entry_info entry = {0};
php_stream *newfile;
zend_off_t tell, st;
newfile = php_stream_fopen_tmpfile();
if (newfile == NULL) {
spprintf(pass->error, 0, "phar error: unable to create temporary file for the signature file");
return FAILURE;
}
st = tell = php_stream_tell(pass->filefp);
/* copy the local files, central directory, and the zip comment to generate the hash */
php_stream_seek(pass->filefp, 0, SEEK_SET);
php_stream_copy_to_stream_ex(pass->filefp, newfile, tell, NULL);
tell = php_stream_tell(pass->centralfp);
php_stream_seek(pass->centralfp, 0, SEEK_SET);
php_stream_copy_to_stream_ex(pass->centralfp, newfile, tell, NULL);
if (metadata->s) {
php_stream_write(newfile, ZSTR_VAL(metadata->s), ZSTR_LEN(metadata->s));
}
if (FAILURE == phar_create_signature(phar, newfile, &signature, &signature_length, pass->error)) {
if (pass->error) {
char *save = *(pass->error);
spprintf(pass->error, 0, "phar error: unable to write signature to zip-based phar: %s", save);
efree(save);
}
php_stream_close(newfile);
return FAILURE;
}
entry.filename = ".phar/signature.bin";
entry.filename_len = sizeof(".phar/signature.bin")-1;
entry.fp = php_stream_fopen_tmpfile();
entry.fp_type = PHAR_MOD;
entry.is_modified = 1;
if (entry.fp == NULL) {
spprintf(pass->error, 0, "phar error: unable to create temporary file for signature");
return FAILURE;
}
PHAR_SET_32(sigbuf, phar->sig_flags);
PHAR_SET_32(sigbuf + 4, signature_length);
if (8 != (int)php_stream_write(entry.fp, sigbuf, 8) || signature_length != (int)php_stream_write(entry.fp, signature, signature_length)) {
efree(signature);
if (pass->error) {
spprintf(pass->error, 0, "phar error: unable to write signature to zip-based phar %s", phar->fname);
}
php_stream_close(newfile);
return FAILURE;
}
efree(signature);
entry.uncompressed_filesize = entry.compressed_filesize = signature_length + 8;
entry.phar = phar;
/* throw out return value and write the signature */
phar_zip_changed_apply_int(&entry, (void *)pass);
php_stream_close(newfile);
if (pass->error && *(pass->error)) {
/* error is set by writeheaders */
php_stream_close(newfile);
return FAILURE;
}
} /* signature */
return SUCCESS;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
php-src
|
0bfb970f43acd1e81d11be1154805f86655f15d5
| 34,655,018,977,749,650,000,000,000,000,000,000,000 | 76 |
Fix bug #72928 - Out of bound when verify signature of zip phar in phar_parse_zipfile
(cherry picked from commit 19484ab77466f99c78fc0e677f7e03da0584d6a2)
|
int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
{
int err;
u8 *data;
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
if (in_compat_syscall())
return -EOPNOTSUPP;
if (!optval && !optlen) {
xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
__sk_dst_reset(sk);
return 0;
}
if (optlen <= 0 || optlen > PAGE_SIZE)
return -EMSGSIZE;
data = memdup_user(optval, optlen);
if (IS_ERR(data))
return PTR_ERR(data);
err = -EINVAL;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list) {
pol = km->compile_policy(sk, optname, data,
optlen, &err);
if (err >= 0)
break;
}
rcu_read_unlock();
if (err >= 0) {
xfrm_sk_policy_insert(sk, err, pol);
xfrm_pol_put(pol);
__sk_dst_reset(sk);
err = 0;
}
kfree(data);
return err;
}
| 0 |
[
"CWE-416"
] |
linux
|
dbb2483b2a46fbaf833cfb5deb5ed9cace9c7399
| 159,365,578,814,359,430,000,000,000,000,000,000,000 | 44 |
xfrm: clean up xfrm protocol checks
In commit 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
I introduced a check for xfrm protocol, but according to Herbert
IPSEC_PROTO_ANY should only be used as a wildcard for lookup, so
it should be removed from validate_tmpl().
And, IPSEC_PROTO_ANY is expected to only match 3 IPSec-specific
protocols, this is why xfrm_state_flush() could still miss
IPPROTO_ROUTING, which leads that those entries are left in
net->xfrm.state_all before exit net. Fix this by replacing
IPSEC_PROTO_ANY with zero.
This patch also extracts the check from validate_tmpl() to
xfrm_id_proto_valid() and uses it in parse_ipsecrequest().
With this, no other protocols should be added into xfrm.
Fixes: 6a53b7593233 ("xfrm: check id proto in validate_tmpl()")
Reported-by: [email protected]
Cc: Steffen Klassert <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: Cong Wang <[email protected]>
Acked-by: Herbert Xu <[email protected]>
Signed-off-by: Steffen Klassert <[email protected]>
|
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
| 0 |
[
"CWE-369"
] |
ImageMagick
|
8d25d94a363b104acd6ff23df7470aeedb806c51
| 289,676,394,133,202,430,000,000,000,000,000,000,000 | 18 |
https://github.com/ImageMagick/ImageMagick/issues/3195
|
slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value)
{
#ifdef PBLOCK_ANALYTICS
pblock_analytics_record(pblock, arg);
#endif
char *authtype;
Slapi_Backend *be;
PR_ASSERT(NULL != pblock);
PR_ASSERT(NULL != value);
be = pblock->pb_backend;
switch (arg) {
#ifdef PBLOCK_ANALYTICS
case SLAPI_HINT:
break;
#endif
case SLAPI_BACKEND:
(*(Slapi_Backend **)value) = be;
break;
case SLAPI_BACKEND_COUNT:
if (pblock->pb_misc != NULL) {
(*(int *)value) = pblock->pb_misc->pb_backend_count;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_BE_TYPE:
if (NULL == be) {
return (-1);
}
(*(char **)value) = be->be_type;
break;
case SLAPI_BE_READONLY:
if (NULL == be) {
(*(int *)value) = 0; /* default value */
} else {
(*(int *)value) = be->be_readonly;
}
break;
case SLAPI_BE_LASTMOD:
if (NULL == be) {
(*(int *)value) = (g_get_global_lastmod() == LDAP_ON);
} else {
(*(int *)value) = (be->be_lastmod == LDAP_ON || (be->be_lastmod == LDAP_UNDEFINED && g_get_global_lastmod() == LDAP_ON));
}
break;
case SLAPI_CONNECTION:
(*(Connection **)value) = pblock->pb_conn;
break;
case SLAPI_CONN_ID:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_TRACE,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_ID \n");
return (-1);
}
(*(uint64_t *)value) = pblock->pb_conn->c_connid;
break;
case SLAPI_CONN_DN:
/*
* NOTE: we have to make a copy of this that the caller
* is responsible for freeing. otherwise, they would get
* a pointer that could be freed out from under them.
*/
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_DN \n");
return (-1);
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
(*(char **)value) = (NULL == pblock->pb_conn->c_dn ? NULL : slapi_ch_strdup(pblock->pb_conn->c_dn));
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_AUTHTYPE: /* deprecated */
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_AUTHTYPE \n");
return (-1);
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
authtype = pblock->pb_conn->c_authtype;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
if (authtype == NULL) {
(*(char **)value) = NULL;
} else if (strcasecmp(authtype, SLAPD_AUTH_NONE) == 0) {
(*(char **)value) = SLAPD_AUTH_NONE;
} else if (strcasecmp(authtype, SLAPD_AUTH_SIMPLE) == 0) {
(*(char **)value) = SLAPD_AUTH_SIMPLE;
} else if (strcasecmp(authtype, SLAPD_AUTH_SSL) == 0) {
(*(char **)value) = SLAPD_AUTH_SSL;
} else if (strcasecmp(authtype, SLAPD_AUTH_OS) == 0) {
(*(char **)value) = SLAPD_AUTH_OS;
} else if (strncasecmp(authtype, SLAPD_AUTH_SASL,
strlen(SLAPD_AUTH_SASL)) == 0) {
(*(char **)value) = SLAPD_AUTH_SASL;
} else {
(*(char **)value) = "unknown";
}
break;
case SLAPI_CONN_AUTHMETHOD:
/* returns a copy */
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_AUTHMETHOD \n");
return (-1);
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
(*(char **)value) = pblock->pb_conn->c_authtype ? slapi_ch_strdup(pblock->pb_conn->c_authtype) : NULL;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_CLIENTNETADDR:
if (pblock->pb_conn == NULL) {
memset(value, 0, sizeof(PRNetAddr));
break;
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
if (pblock->pb_conn->cin_addr == NULL) {
memset(value, 0, sizeof(PRNetAddr));
} else {
(*(PRNetAddr *)value) =
*(pblock->pb_conn->cin_addr);
}
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_CLIENTNETADDR_ACLIP:
if (pblock->pb_conn == NULL) {
break;
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
(*(PRNetAddr **) value) = pblock->pb_conn->cin_addr_aclip;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_SERVERNETADDR:
if (pblock->pb_conn == NULL) {
memset(value, 0, sizeof(PRNetAddr));
break;
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
if (pblock->pb_conn->cin_destaddr == NULL) {
memset(value, 0, sizeof(PRNetAddr));
} else {
(*(PRNetAddr *)value) =
*(pblock->pb_conn->cin_destaddr);
}
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_CLIENTIP:
if (pblock->pb_conn == NULL) {
memset(value, 0, sizeof(struct in_addr));
break;
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
if (pblock->pb_conn->cin_addr == NULL) {
memset(value, 0, sizeof(struct in_addr));
} else {
if (PR_IsNetAddrType(pblock->pb_conn->cin_addr,
PR_IpAddrV4Mapped)) {
(*(struct in_addr *)value).s_addr =
(*(pblock->pb_conn->cin_addr)).ipv6.ip.pr_s6_addr32[3];
} else {
memset(value, 0, sizeof(struct in_addr));
}
}
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_SERVERIP:
if (pblock->pb_conn == NULL) {
memset(value, 0, sizeof(struct in_addr));
break;
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
if (pblock->pb_conn->cin_destaddr == NULL) {
memset(value, 0, sizeof(PRNetAddr));
} else {
if (PR_IsNetAddrType(pblock->pb_conn->cin_destaddr,
PR_IpAddrV4Mapped)) {
(*(struct in_addr *)value).s_addr =
(*(pblock->pb_conn->cin_destaddr)).ipv6.ip.pr_s6_addr32[3];
} else {
memset(value, 0, sizeof(struct in_addr));
}
}
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_IS_REPLICATION_SESSION:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_IS_REPLICATION_SESSION \n");
return (-1);
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
(*(int *)value) = pblock->pb_conn->c_isreplication_session;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_IS_SSL_SESSION:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_IS_SSL_SESSION \n");
return (-1);
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
(*(int *)value) = pblock->pb_conn->c_flags & CONN_FLAG_SSL;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_SASL_SSF:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_SASL_SSF \n");
return (-1);
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
(*(int *)value) = pblock->pb_conn->c_sasl_ssf;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_SSL_SSF:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_SSL_SSF \n");
return (-1);
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
(*(int *)value) = pblock->pb_conn->c_ssl_ssf;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_LOCAL_SSF:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_LOCAL_SSF \n");
return (-1);
}
pthread_mutex_lock(&(pblock->pb_conn->c_mutex));
(*(int *)value) = pblock->pb_conn->c_local_ssf;
pthread_mutex_unlock(&(pblock->pb_conn->c_mutex));
break;
case SLAPI_CONN_CERT:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_CERT \n");
return (-1);
}
(*(CERTCertificate **)value) = pblock->pb_conn->c_client_cert;
break;
case SLAPI_OPERATION:
(*(Operation **)value) = pblock->pb_op;
break;
case SLAPI_OPERATION_TYPE:
if (pblock->pb_op == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Operation is NULL and hence cannot access SLAPI_OPERATION_TYPE \n");
return (-1);
}
(*(int *)value) = pblock->pb_op->o_params.operation_type;
break;
case SLAPI_OPINITIATED_TIME:
if (pblock->pb_op == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Operation is NULL and hence cannot access SLAPI_OPINITIATED_TIME \n");
return (-1);
}
(*(time_t *)value) = pblock->pb_op->o_hr_time_utc.tv_sec;
break;
case SLAPI_REQUESTOR_ISROOT:
if (pblock->pb_intop != NULL) {
(*(int *)value) = pblock->pb_intop->pb_requestor_isroot;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_SKIP_MODIFIED_ATTRS:
if (pblock->pb_op == NULL) {
(*(int *)value) = 0; /* No Operation -> No skip */
} else {
(*(int *)value) = (pblock->pb_op->o_flags & OP_FLAG_SKIP_MODIFIED_ATTRS);
}
break;
case SLAPI_IS_REPLICATED_OPERATION:
if (pblock->pb_op == NULL) {
(*(int *)value) = 0; /* No Operation -> Not Replicated */
} else {
(*(int *)value) = (pblock->pb_op->o_flags & OP_FLAG_REPLICATED);
}
break;
case SLAPI_IS_MMR_REPLICATED_OPERATION:
if (pblock->pb_op == NULL) {
(*(int *)value) = 0; /* No Operation -> Not Replicated */
} else {
(*(int *)value) = (pblock->pb_op->o_flags & OP_FLAG_REPLICATED);
}
break;
case SLAPI_OPERATION_PARAMETERS:
if (pblock->pb_op != NULL) {
(*(struct slapi_operation_parameters **)value) = &pblock->pb_op->o_params;
}
break;
/* stuff related to config file processing */
case SLAPI_CONFIG_FILENAME:
case SLAPI_CONFIG_LINENO:
case SLAPI_CONFIG_ARGC:
case SLAPI_CONFIG_ARGV:
return (-1); /* deprecated since DS 5.0 (no longer useful) */
/* pblock memory management */
case SLAPI_DESTROY_CONTENT:
if (pblock->pb_deprecated != NULL) {
(*(int *)value) = pblock->pb_deprecated->pb_destroy_content;
} else {
(*(int *)value) = 0;
}
break;
/* stuff related to the current plugin */
case SLAPI_PLUGIN:
(*(struct slapdplugin **)value) = pblock->pb_plugin;
break;
case SLAPI_PLUGIN_PRIVATE:
(*(void **)value) = pblock->pb_plugin->plg_private;
break;
case SLAPI_PLUGIN_TYPE:
(*(int *)value) = pblock->pb_plugin->plg_type;
break;
case SLAPI_PLUGIN_ARGV:
(*(char ***)value) = pblock->pb_plugin->plg_argv;
break;
case SLAPI_PLUGIN_ARGC:
(*(int *)value) = pblock->pb_plugin->plg_argc;
break;
case SLAPI_PLUGIN_VERSION:
(*(char **)value) = pblock->pb_plugin->plg_version;
break;
case SLAPI_PLUGIN_PRECEDENCE:
(*(int *)value) = pblock->pb_plugin->plg_precedence;
break;
case SLAPI_PLUGIN_OPRETURN:
if (pblock->pb_intop != NULL) {
(*(int *)value) = pblock->pb_intop->pb_opreturn;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_PLUGIN_OBJECT:
if (pblock->pb_intplugin != NULL) {
(*(void **)value) = pblock->pb_intplugin->pb_object;
} else {
(*(void **)value) = NULL;
}
break;
case SLAPI_PLUGIN_DESTROY_FN:
if (pblock->pb_intplugin != NULL) {
(*(IFP *)value) = pblock->pb_intplugin->pb_destroy_fn;
} else {
(*(IFP *)value) = NULL;
}
break;
case SLAPI_PLUGIN_DESCRIPTION:
(*(Slapi_PluginDesc *)value) = pblock->pb_plugin->plg_desc;
break;
case SLAPI_PLUGIN_IDENTITY:
if (pblock->pb_intplugin != NULL) {
(*(void **)value) = pblock->pb_intplugin->pb_plugin_identity;
} else {
(*(void **)value) = NULL;
}
break;
case SLAPI_PLUGIN_CONFIG_AREA:
if (pblock->pb_intplugin != NULL) {
(*(char **)value) = pblock->pb_intplugin->pb_plugin_config_area;
} else {
(*(char **)value) = 0;
}
break;
case SLAPI_PLUGIN_CONFIG_DN:
if (pblock->pb_plugin != NULL) {
(*(char **)value) = pblock->pb_plugin->plg_dn;
}
break;
case SLAPI_PLUGIN_INTOP_RESULT:
if (pblock->pb_intop != NULL) {
(*(int *)value) = pblock->pb_intop->pb_internal_op_result;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry ***)value) = pblock->pb_intop->pb_plugin_internal_search_op_entries;
} else {
(*(Slapi_Entry ***)value) = NULL;
}
break;
case SLAPI_PLUGIN_INTOP_SEARCH_REFERRALS:
if (pblock->pb_intop != NULL) {
(*(char ***)value) = pblock->pb_intop->pb_plugin_internal_search_op_referrals;
} else {
(*(char ***)value) = NULL;
}
break;
/* database plugin functions */
case SLAPI_PLUGIN_DB_BIND_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bind;
break;
case SLAPI_PLUGIN_DB_UNBIND_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_unbind;
break;
case SLAPI_PLUGIN_DB_SEARCH_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_search;
break;
case SLAPI_PLUGIN_DB_NEXT_SEARCH_ENTRY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_next_search_entry;
break;
case SLAPI_PLUGIN_DB_NEXT_SEARCH_ENTRY_EXT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_next_search_entry_ext;
break;
case SLAPI_PLUGIN_DB_SEARCH_RESULTS_RELEASE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(VFPP *)value) = pblock->pb_plugin->plg_search_results_release;
break;
case SLAPI_PLUGIN_DB_PREV_SEARCH_RESULTS_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(VFP *)value) = pblock->pb_plugin->plg_prev_search_results;
break;
case SLAPI_PLUGIN_DB_COMPARE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_compare;
break;
case SLAPI_PLUGIN_DB_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_modify;
break;
case SLAPI_PLUGIN_DB_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_modrdn;
break;
case SLAPI_PLUGIN_DB_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_add;
break;
case SLAPI_PLUGIN_DB_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_delete;
break;
case SLAPI_PLUGIN_DB_ABANDON_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_abandon;
break;
case SLAPI_PLUGIN_DB_CONFIG_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_config;
break;
case SLAPI_PLUGIN_CLOSE_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_close;
break;
case SLAPI_PLUGIN_CLEANUP_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_cleanup;
break;
case SLAPI_PLUGIN_START_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_start;
break;
case SLAPI_PLUGIN_POSTSTART_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_poststart;
break;
case SLAPI_PLUGIN_DB_WIRE_IMPORT_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_wire_import;
break;
case SLAPI_PLUGIN_DB_GET_INFO_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_get_info;
break;
case SLAPI_PLUGIN_DB_SET_INFO_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_set_info;
break;
case SLAPI_PLUGIN_DB_CTRL_INFO_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_ctrl_info;
break;
case SLAPI_PLUGIN_DB_SEQ_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_seq;
break;
case SLAPI_PLUGIN_DB_ENTRY_FN:
(*(IFP *)value) = SLAPI_PBLOCK_GET_PLUGIN_RELATED_POINTER(pblock,
plg_entry);
break;
case SLAPI_PLUGIN_DB_REFERRAL_FN:
(*(IFP *)value) = SLAPI_PBLOCK_GET_PLUGIN_RELATED_POINTER(pblock,
plg_referral);
break;
case SLAPI_PLUGIN_DB_RESULT_FN:
(*(IFP *)value) = SLAPI_PBLOCK_GET_PLUGIN_RELATED_POINTER(pblock,
plg_result);
break;
case SLAPI_PLUGIN_DB_RMDB_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_rmdb;
break;
case SLAPI_PLUGIN_DB_LDIF2DB_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_ldif2db;
break;
case SLAPI_PLUGIN_DB_DB2LDIF_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_db2ldif;
break;
case SLAPI_PLUGIN_DB_COMPACT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_dbcompact;
break;
case SLAPI_PLUGIN_DB_DB2INDEX_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_db2index;
break;
case SLAPI_PLUGIN_DB_ARCHIVE2DB_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_archive2db;
break;
case SLAPI_PLUGIN_DB_DB2ARCHIVE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_db2archive;
break;
case SLAPI_PLUGIN_DB_UPGRADEDB_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_upgradedb;
break;
case SLAPI_PLUGIN_DB_UPGRADEDNFORMAT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_upgradednformat;
break;
case SLAPI_PLUGIN_DB_DBVERIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_dbverify;
break;
case SLAPI_PLUGIN_DB_BEGIN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_un.plg_un_db.plg_un_db_begin;
break;
case SLAPI_PLUGIN_DB_COMMIT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_un.plg_un_db.plg_un_db_commit;
break;
case SLAPI_PLUGIN_DB_ABORT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_un.plg_un_db.plg_un_db_abort;
break;
case SLAPI_PLUGIN_DB_TEST_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_un.plg_un_db.plg_un_db_dbtest;
break;
/* database plugin-specific parameters */
case SLAPI_PLUGIN_DB_NO_ACL:
if (pblock->pb_plugin && pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE) {
return (-1);
}
if (NULL == be) {
(*(int *)value) = 0; /* default value */
} else {
(*(int *)value) = be->be_noacl;
}
break;
/* extendedop plugin functions */
case SLAPI_PLUGIN_EXT_OP_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP &&
pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_exhandler;
break;
case SLAPI_PLUGIN_EXT_OP_OIDLIST:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP &&
pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP) {
return (-1);
}
(*(char ***)value) = pblock->pb_plugin->plg_exoids;
break;
case SLAPI_PLUGIN_EXT_OP_NAMELIST:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP &&
pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP) {
return (-1);
}
(*(char ***)value) = pblock->pb_plugin->plg_exnames;
break;
case SLAPI_PLUGIN_EXT_OP_BACKEND_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_EXTENDEDOP &&
pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNEXTENDEDOP) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_be_exhandler;
break;
/* preoperation plugin functions */
case SLAPI_PLUGIN_PRE_BIND_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_prebind;
break;
case SLAPI_PLUGIN_PRE_UNBIND_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_preunbind;
break;
case SLAPI_PLUGIN_PRE_SEARCH_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_presearch;
break;
case SLAPI_PLUGIN_PRE_COMPARE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_precompare;
break;
case SLAPI_PLUGIN_PRE_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_premodify;
break;
case SLAPI_PLUGIN_PRE_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_premodrdn;
break;
case SLAPI_PLUGIN_PRE_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_preadd;
break;
case SLAPI_PLUGIN_PRE_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_predelete;
break;
case SLAPI_PLUGIN_PRE_ABANDON_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_preabandon;
break;
case SLAPI_PLUGIN_PRE_ENTRY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_preentry;
break;
case SLAPI_PLUGIN_PRE_REFERRAL_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_prereferral;
break;
case SLAPI_PLUGIN_PRE_RESULT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_preresult;
break;
case SLAPI_PLUGIN_PRE_EXTOP_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREEXTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_preextop;
break;
/* postoperation plugin functions */
case SLAPI_PLUGIN_POST_BIND_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postbind;
break;
case SLAPI_PLUGIN_POST_UNBIND_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postunbind;
break;
case SLAPI_PLUGIN_POST_SEARCH_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postsearch;
break;
case SLAPI_PLUGIN_POST_SEARCH_FAIL_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postsearchfail;
break;
case SLAPI_PLUGIN_POST_COMPARE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postcompare;
break;
case SLAPI_PLUGIN_POST_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postmodify;
break;
case SLAPI_PLUGIN_POST_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postmodrdn;
break;
case SLAPI_PLUGIN_POST_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postadd;
break;
case SLAPI_PLUGIN_POST_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postdelete;
break;
case SLAPI_PLUGIN_POST_ABANDON_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postabandon;
break;
case SLAPI_PLUGIN_POST_ENTRY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postentry;
break;
case SLAPI_PLUGIN_POST_REFERRAL_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postreferral;
break;
case SLAPI_PLUGIN_POST_RESULT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postresult;
break;
case SLAPI_PLUGIN_POST_EXTOP_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTEXTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_postextop;
break;
case SLAPI_ENTRY_PRE_OP:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_intop->pb_pre_op_entry;
} else {
(*(Slapi_Entry **)value) = NULL;
}
break;
case SLAPI_ENTRY_POST_OP:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_intop->pb_post_op_entry;
} else {
(*(Slapi_Entry **)value) = NULL;
}
break;
/* backend preoperation plugin */
case SLAPI_PLUGIN_BE_PRE_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepremodify;
break;
case SLAPI_PLUGIN_BE_PRE_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepremodrdn;
break;
case SLAPI_PLUGIN_BE_PRE_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepreadd;
break;
case SLAPI_PLUGIN_BE_PRE_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepredelete;
break;
case SLAPI_PLUGIN_BE_PRE_CLOSE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepreclose;
break;
/* backend postoperation plugin */
case SLAPI_PLUGIN_BE_POST_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepostmodify;
break;
case SLAPI_PLUGIN_BE_POST_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepostmodrdn;
break;
case SLAPI_PLUGIN_BE_POST_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepostadd;
break;
case SLAPI_PLUGIN_BE_POST_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepostdelete;
break;
case SLAPI_PLUGIN_BE_POST_OPEN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepostopen;
break;
case SLAPI_PLUGIN_BE_POST_EXPORT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepostexport;
break;
case SLAPI_PLUGIN_BE_POST_IMPORT_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BEPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_bepostimport;
break;
/* internal preoperation plugin */
case SLAPI_PLUGIN_INTERNAL_PRE_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_pre_modify;
break;
case SLAPI_PLUGIN_INTERNAL_PRE_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_pre_modrdn;
break;
case SLAPI_PLUGIN_INTERNAL_PRE_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_pre_add;
break;
case SLAPI_PLUGIN_INTERNAL_PRE_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_pre_delete;
break;
/* internal postoperation plugin */
case SLAPI_PLUGIN_INTERNAL_POST_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_post_modify;
break;
case SLAPI_PLUGIN_INTERNAL_POST_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_post_modrdn;
break;
case SLAPI_PLUGIN_INTERNAL_POST_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_post_add;
break;
case SLAPI_PLUGIN_INTERNAL_POST_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_POSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_post_delete;
break;
/* rootDN pre bind operation plugin */
case SLAPI_PLUGIN_INTERNAL_PRE_BIND_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_INTERNAL_PREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_internal_pre_bind;
break;
/* backend pre txn operation plugin */
case SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpremodify;
break;
case SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpremodrdn;
break;
case SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpreadd;
break;
case SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpredelete;
break;
case SLAPI_PLUGIN_BE_TXN_PRE_DELETE_TOMBSTONE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpredeletetombstone;
break;
/* backend post txn operation plugin */
case SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpostmodify;
break;
case SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpostmodrdn;
break;
case SLAPI_PLUGIN_BE_TXN_POST_ADD_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpostadd;
break;
case SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_betxnpostdelete;
break;
/* target address & controls for all operations should be normalized */
case SLAPI_TARGET_ADDRESS:
if (pblock->pb_op != NULL) {
(*(entry_address **)value) = &(pblock->pb_op->o_params.target_address);
}
break;
case SLAPI_TARGET_DN: /* DEPRECATED */
/* The returned value refers SLAPI_TARGET_SDN.
* It should not be freed.*/
if (pblock->pb_op != NULL) {
Slapi_DN *sdn = pblock->pb_op->o_params.target_address.sdn;
if (sdn) {
(*(char **)value) = (char *)slapi_sdn_get_dn(sdn);
} else {
(*(char **)value) = NULL;
}
} else {
return (-1);
}
break;
case SLAPI_TARGET_SDN: /* Alias from SLAPI_ADD_TARGET_SDN */
if (pblock->pb_op != NULL) {
(*(Slapi_DN **)value) = pblock->pb_op->o_params.target_address.sdn;
} else {
return (-1);
}
break;
case SLAPI_ORIGINAL_TARGET_DN:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_params.target_address.udn;
}
break;
case SLAPI_TARGET_UNIQUEID:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_params.target_address.uniqueid;
}
break;
case SLAPI_REQCONTROLS:
if (pblock->pb_op != NULL) {
(*(LDAPControl ***)value) = pblock->pb_op->o_params.request_controls;
}
break;
case SLAPI_RESCONTROLS:
if (pblock->pb_op != NULL) {
(*(LDAPControl ***)value) = pblock->pb_op->o_results.result_controls;
}
break;
case SLAPI_CONTROLS_ARG: /* used to pass control argument before operation is created */
if (pblock->pb_intop != NULL) {
(*(LDAPControl ***)value) = pblock->pb_intop->pb_ctrls_arg;
} else {
(*(LDAPControl ***)value) = NULL;
}
break;
/* notes to be added to the access log RESULT line for this op. */
case SLAPI_OPERATION_NOTES:
if (pblock->pb_intop != NULL) {
(*(unsigned int *)value) = pblock->pb_intop->pb_operation_notes;
} else {
(*(unsigned int *)value) = 0;
}
break;
/* syntax plugin functions */
case SLAPI_PLUGIN_SYNTAX_FILTER_AVA:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_syntax_filter_ava;
break;
case SLAPI_PLUGIN_SYNTAX_FILTER_SUB:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_syntax_filter_sub;
break;
case SLAPI_PLUGIN_SYNTAX_VALUES2KEYS:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_syntax_values2keys;
break;
case SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_AVA:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_syntax_assertion2keys_ava;
break;
case SLAPI_PLUGIN_SYNTAX_ASSERTION2KEYS_SUB:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_syntax_assertion2keys_sub;
break;
case SLAPI_PLUGIN_SYNTAX_NAMES:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(char ***)value) = pblock->pb_plugin->plg_syntax_names;
break;
case SLAPI_PLUGIN_SYNTAX_OID:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(char **)value) = pblock->pb_plugin->plg_syntax_oid;
break;
case SLAPI_PLUGIN_SYNTAX_FLAGS:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(int *)value) = pblock->pb_plugin->plg_syntax_flags;
break;
case SLAPI_PLUGIN_SYNTAX_COMPARE:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_syntax_compare;
break;
case SLAPI_SYNTAX_SUBSTRLENS: /* aka SLAPI_MR_SUBSTRLENS */
if (pblock->pb_intplugin != NULL) {
(*(int **)value) = pblock->pb_intplugin->pb_substrlens;
} else {
(*(int **)value) = NULL;
}
break;
case SLAPI_PLUGIN_SYNTAX_VALIDATE:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_syntax_validate;
break;
case SLAPI_PLUGIN_SYNTAX_NORMALIZE:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX) {
return (-1);
}
(*(VFPV *)value) = pblock->pb_plugin->plg_syntax_normalize;
break;
/* controls we know about */
case SLAPI_MANAGEDSAIT:
if (pblock->pb_intop != NULL) {
(*(int *)value) = pblock->pb_intop->pb_managedsait;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_PWPOLICY:
if (pblock->pb_intop != NULL) {
(*(int *)value) = pblock->pb_intop->pb_pwpolicy_ctrl;
} else {
(*(int *)value) = 0;
}
break;
/* add arguments */
case SLAPI_ADD_ENTRY:
if (pblock->pb_op != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_op->o_params.p.p_add.target_entry;
}
break;
case SLAPI_ADD_EXISTING_DN_ENTRY:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_intop->pb_existing_dn_entry;
} else {
(*(Slapi_Entry **)value) = NULL;
}
break;
case SLAPI_ADD_EXISTING_UNIQUEID_ENTRY:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_intop->pb_existing_uniqueid_entry;
} else {
(*(Slapi_Entry **)value) = NULL;
}
break;
case SLAPI_ADD_PARENT_ENTRY:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_intop->pb_parent_entry;
}
break;
case SLAPI_ADD_PARENT_UNIQUEID:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_params.p.p_add.parentuniqueid;
} else {
(*(char **)value) = NULL;
}
break;
/* bind arguments */
case SLAPI_BIND_METHOD:
if (pblock->pb_op != NULL) {
(*(ber_tag_t *)value) = pblock->pb_op->o_params.p.p_bind.bind_method;
}
break;
case SLAPI_BIND_CREDENTIALS:
if (pblock->pb_op != NULL) {
(*(struct berval **)value) = pblock->pb_op->o_params.p.p_bind.bind_creds;
}
break;
case SLAPI_BIND_SASLMECHANISM:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_params.p.p_bind.bind_saslmechanism;
}
break;
/* bind return values */
case SLAPI_BIND_RET_SASLCREDS:
if (pblock->pb_op != NULL) {
(*(struct berval **)value) = pblock->pb_op->o_results.r.r_bind.bind_ret_saslcreds;
}
break;
/* compare arguments */
case SLAPI_COMPARE_TYPE:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_params.p.p_compare.compare_ava.ava_type;
}
break;
case SLAPI_COMPARE_VALUE:
if (pblock->pb_op != NULL) {
(*(struct berval **)value) = &pblock->pb_op->o_params.p.p_compare.compare_ava.ava_value;
}
break;
/* modify arguments */
case SLAPI_MODIFY_MODS:
PR_ASSERT(pblock->pb_op);
if (pblock->pb_op != NULL) {
if (pblock->pb_op->o_params.operation_type == SLAPI_OPERATION_MODIFY) {
(*(LDAPMod ***)value) = pblock->pb_op->o_params.p.p_modify.modify_mods;
} else if (pblock->pb_op->o_params.operation_type == SLAPI_OPERATION_MODRDN) {
(*(LDAPMod ***)value) = pblock->pb_op->o_params.p.p_modrdn.modrdn_mods;
} else {
PR_ASSERT(0); /* JCM */
}
}
break;
/* modrdn arguments */
case SLAPI_MODRDN_NEWRDN:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_params.p.p_modrdn.modrdn_newrdn;
}
break;
case SLAPI_MODRDN_DELOLDRDN:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_params.p.p_modrdn.modrdn_deloldrdn;
}
break;
case SLAPI_MODRDN_NEWSUPERIOR: /* DEPRECATED */
if (pblock->pb_op != NULL) {
Slapi_DN *sdn =
pblock->pb_op->o_params.p.p_modrdn.modrdn_newsuperior_address.sdn;
if (sdn) {
(*(char **)value) = (char *)slapi_sdn_get_dn(sdn);
} else {
(*(char **)value) = NULL;
}
} else {
return -1;
}
break;
case SLAPI_MODRDN_NEWSUPERIOR_SDN:
if (pblock->pb_op != NULL) {
(*(Slapi_DN **)value) =
pblock->pb_op->o_params.p.p_modrdn.modrdn_newsuperior_address.sdn;
} else {
return -1;
}
break;
case SLAPI_MODRDN_PARENT_ENTRY:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_intop->pb_parent_entry;
} else {
(*(Slapi_Entry **)value) = NULL;
}
break;
case SLAPI_MODRDN_NEWPARENT_ENTRY:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_intop->pb_newparent_entry;
} else {
(*(Slapi_Entry **)value) = NULL;
}
break;
case SLAPI_MODRDN_TARGET_ENTRY:
if (pblock->pb_intop != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_intop->pb_target_entry;
} else {
(*(Slapi_Entry **)value) = NULL;
}
break;
case SLAPI_MODRDN_NEWSUPERIOR_ADDRESS:
if (pblock->pb_op != NULL) {
(*(entry_address **)value) = &(pblock->pb_op->o_params.p.p_modrdn.modrdn_newsuperior_address);
}
break;
/* search arguments */
case SLAPI_SEARCH_SCOPE:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_params.p.p_search.search_scope;
}
break;
case SLAPI_SEARCH_DEREF:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_params.p.p_search.search_deref;
}
break;
case SLAPI_SEARCH_SIZELIMIT:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_params.p.p_search.search_sizelimit;
}
break;
case SLAPI_SEARCH_TIMELIMIT:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_params.p.p_search.search_timelimit;
}
break;
case SLAPI_SEARCH_FILTER:
if (pblock->pb_op != NULL) {
(*(struct slapi_filter **)value) = pblock->pb_op->o_params.p.p_search.search_filter;
}
break;
case SLAPI_SEARCH_STRFILTER:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_params.p.p_search.search_strfilter;
}
break;
case SLAPI_SEARCH_ATTRS:
if (pblock->pb_op != NULL) {
(*(char ***)value) = pblock->pb_op->o_params.p.p_search.search_attrs;
}
break;
case SLAPI_SEARCH_GERATTRS:
if (pblock->pb_op != NULL) {
(*(char ***)value) = pblock->pb_op->o_params.p.p_search.search_gerattrs;
}
break;
case SLAPI_SEARCH_REQATTRS:
if (pblock->pb_op != NULL) {
(*(char ***)value) = pblock->pb_op->o_searchattrs;
}
break;
case SLAPI_SEARCH_ATTRSONLY:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_params.p.p_search.search_attrsonly;
}
break;
case SLAPI_SEARCH_IS_AND:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_params.p.p_search.search_is_and;
}
break;
case SLAPI_ABANDON_MSGID:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_params.p.p_abandon.abandon_targetmsgid;
}
break;
/* extended operation arguments */
case SLAPI_EXT_OP_REQ_OID:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_params.p.p_extended.exop_oid;
}
break;
case SLAPI_EXT_OP_REQ_VALUE:
if (pblock->pb_op != NULL) {
(*(struct berval **)value) = pblock->pb_op->o_params.p.p_extended.exop_value;
}
break;
/* extended operation return values */
case SLAPI_EXT_OP_RET_OID:
if (pblock->pb_op != NULL) {
(*(char **)value) = pblock->pb_op->o_results.r.r_extended.exop_ret_oid;
}
break;
case SLAPI_EXT_OP_RET_VALUE:
if (pblock->pb_op != NULL) {
(*(struct berval **)value) = pblock->pb_op->o_results.r.r_extended.exop_ret_value;
}
break;
/* matching rule plugin functions */
case SLAPI_PLUGIN_MR_FILTER_CREATE_FN:
SLAPI_PLUGIN_TYPE_CHECK(pblock, SLAPI_PLUGIN_MATCHINGRULE);
(*(IFP *)value) = pblock->pb_plugin->plg_mr_filter_create;
break;
case SLAPI_PLUGIN_MR_INDEXER_CREATE_FN:
SLAPI_PLUGIN_TYPE_CHECK(pblock, SLAPI_PLUGIN_MATCHINGRULE);
(*(IFP *)value) = pblock->pb_plugin->plg_mr_indexer_create;
break;
case SLAPI_PLUGIN_MR_FILTER_MATCH_FN:
if (pblock->pb_mr != NULL) {
(*(mrFilterMatchFn *)value) = pblock->pb_mr->filter_match_fn;
} else {
(*(mrFilterMatchFn *)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_FILTER_INDEX_FN:
if (pblock->pb_mr != NULL) {
(*(IFP *)value) = pblock->pb_mr->filter_index_fn;
} else {
(*(IFP *)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_FILTER_RESET_FN:
if (pblock->pb_mr != NULL) {
(*(IFP *)value) = pblock->pb_mr->filter_reset_fn;
} else {
(*(IFP *)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_INDEX_FN:
if (pblock->pb_mr != NULL) {
(*(IFP *)value) = pblock->pb_mr->index_fn;
} else {
(*(IFP *)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_INDEX_SV_FN:
if (pblock->pb_mr != NULL) {
(*(IFP *)value) = pblock->pb_mr->index_sv_fn;
} else {
(*(IFP *)value) = NULL;
}
break;
/* matching rule plugin arguments */
case SLAPI_PLUGIN_MR_OID:
if (pblock->pb_mr != NULL) {
(*(char **)value) = pblock->pb_mr->oid;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_TYPE:
if (pblock->pb_mr != NULL) {
(*(char **)value) = pblock->pb_mr->type;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_VALUE:
if (pblock->pb_mr != NULL) {
(*(struct berval **)value) = pblock->pb_mr->value;
} else {
(*(struct berval **)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_VALUES:
if (pblock->pb_mr != NULL) {
(*(struct berval ***)value) = pblock->pb_mr->values;
} else {
(*(struct berval ***)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_KEYS:
if (pblock->pb_mr != NULL) {
(*(struct berval ***)value) = pblock->pb_mr->keys;
} else {
(*(struct berval ***)value) = NULL;
}
break;
case SLAPI_PLUGIN_MR_FILTER_REUSABLE:
if (pblock->pb_mr != NULL) {
(*(unsigned int *)value) = pblock->pb_mr->filter_reusable;
} else {
(*(unsigned int *)value) = 0;
}
break;
case SLAPI_PLUGIN_MR_QUERY_OPERATOR:
if (pblock->pb_mr != NULL) {
(*(int *)value) = pblock->pb_mr->query_operator;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_PLUGIN_MR_USAGE:
if (pblock->pb_mr != NULL) {
(*(unsigned int *)value) = pblock->pb_mr->usage;
} else {
(*(unsigned int *)value) = 0;
}
break;
/* new style matching rule syntax plugin functions */
case SLAPI_PLUGIN_MR_FILTER_AVA:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_mr_filter_ava;
break;
case SLAPI_PLUGIN_MR_FILTER_SUB:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_mr_filter_sub;
break;
case SLAPI_PLUGIN_MR_VALUES2KEYS:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_mr_values2keys;
break;
case SLAPI_PLUGIN_MR_ASSERTION2KEYS_AVA:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_mr_assertion2keys_ava;
break;
case SLAPI_PLUGIN_MR_ASSERTION2KEYS_SUB:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_mr_assertion2keys_sub;
break;
case SLAPI_PLUGIN_MR_FLAGS:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(int *)value) = pblock->pb_plugin->plg_mr_flags;
break;
case SLAPI_PLUGIN_MR_NAMES:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(char ***)value) = pblock->pb_plugin->plg_mr_names;
break;
case SLAPI_PLUGIN_MR_COMPARE:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(IFP *)value) = pblock->pb_plugin->plg_mr_compare;
break;
case SLAPI_PLUGIN_MR_NORMALIZE:
if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_MATCHINGRULE) {
return (-1);
}
(*(VFPV *)value) = pblock->pb_plugin->plg_mr_normalize;
break;
/* seq arguments */
case SLAPI_SEQ_TYPE:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->seq_type;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_SEQ_ATTRNAME:
if (pblock->pb_task != NULL) {
(*(char **)value) = pblock->pb_task->seq_attrname;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_SEQ_VAL:
if (pblock->pb_task != NULL) {
(*(char **)value) = pblock->pb_task->seq_val;
} else {
(*(char **)value) = NULL;
}
break;
/* ldif2db arguments */
case SLAPI_LDIF2DB_FILE:
if (pblock->pb_task != NULL) {
(*(char ***)value) = pblock->pb_task->ldif_files;
} else {
(*(char ***)value) = NULL;
}
break;
case SLAPI_LDIF2DB_REMOVEDUPVALS:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->removedupvals;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_DB2INDEX_ATTRS:
if (pblock->pb_task != NULL) {
(*(char ***)value) = pblock->pb_task->db2index_attrs;
} else {
(*(char ***)value) = NULL;
}
break;
case SLAPI_LDIF2DB_NOATTRINDEXES:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->ldif2db_noattrindexes;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_LDIF2DB_INCLUDE:
if (pblock->pb_task != NULL) {
(*(char ***)value) = pblock->pb_task->ldif_include;
} else {
(*(char ***)value) = NULL;
}
break;
case SLAPI_LDIF2DB_EXCLUDE:
if (pblock->pb_task != NULL) {
(*(char ***)value) = pblock->pb_task->ldif_exclude;
} else {
(*(char ***)value) = NULL;
}
break;
case SLAPI_LDIF2DB_GENERATE_UNIQUEID:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->ldif_generate_uniqueid;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_LDIF2DB_ENCRYPT:
case SLAPI_DB2LDIF_DECRYPT:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->ldif_encrypt;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_LDIF2DB_NAMESPACEID:
if (pblock->pb_task != NULL) {
(*(char **)value) = pblock->pb_task->ldif_namespaceid;
} else {
(*(char **)value) = NULL;
}
break;
/* db2ldif arguments */
case SLAPI_DB2LDIF_PRINTKEY:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->ldif_printkey;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_DB2LDIF_DUMP_UNIQUEID:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->ldif_dump_uniqueid;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_DB2LDIF_FILE:
if (pblock->pb_task != NULL) {
(*(char **)value) = pblock->pb_task->ldif_file;
} else {
(*(char **)value) = NULL;
}
break;
/* db2ldif/ldif2db/db2bak/bak2db arguments */
case SLAPI_BACKEND_INSTANCE_NAME:
if (pblock->pb_task != NULL) {
(*(char **)value) = pblock->pb_task->instance_name;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_BACKEND_TASK:
if (pblock->pb_task != NULL) {
(*(Slapi_Task **)value) = pblock->pb_task->task;
} else {
(*(Slapi_Task **)value) = NULL;
}
break;
case SLAPI_TASK_FLAGS:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->task_flags;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_DB2LDIF_SERVER_RUNNING:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->server_running;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_BULK_IMPORT_ENTRY:
if (pblock->pb_task != NULL) {
(*(Slapi_Entry **)value) = pblock->pb_task->import_entry;
} else {
(*(Slapi_Entry **)value) = NULL;
}
break;
case SLAPI_BULK_IMPORT_STATE:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->import_state;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_LDIF_CHANGELOG:
if (pblock->pb_task != NULL) {
(*(int *)value) = pblock->pb_task->ldif_include_changelog;
} else {
(*(int *)value) = 0;
}
break;
/* dbverify */
case SLAPI_DBVERIFY_DBDIR:
if (pblock->pb_task != NULL) {
(*(char **)value) = pblock->pb_task->dbverify_dbdir;
} else {
(*(char **)value) = NULL;
}
break;
/* transaction arguments */
case SLAPI_PARENT_TXN:
if (pblock->pb_deprecated != NULL) {
(*(void **)value) = pblock->pb_deprecated->pb_parent_txn;
} else {
(*(void **)value) = NULL;
}
break;
case SLAPI_TXN:
if (pblock->pb_intop != NULL) {
(*(void **)value) = pblock->pb_intop->pb_txn;
} else {
(*(void **)value) = NULL;
}
break;
case SLAPI_TXN_RUV_MODS_FN:
if (pblock->pb_intop != NULL) {
(*(IFP *)value) = pblock->pb_intop->pb_txn_ruv_mods_fn;
} else {
(*(IFP *)value) = NULL;
}
break;
/* Search results set */
case SLAPI_SEARCH_RESULT_SET:
if (pblock->pb_op != NULL) {
(*(void **)value) = pblock->pb_op->o_results.r.r_search.search_result_set;
}
break;
/* estimated search result set size */
case SLAPI_SEARCH_RESULT_SET_SIZE_ESTIMATE:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_results.r.r_search.estimate;
}
break;
/* Entry returned from iterating over results set */
case SLAPI_SEARCH_RESULT_ENTRY:
if (pblock->pb_op != NULL) {
(*(void **)value) = pblock->pb_op->o_results.r.r_search.search_result_entry;
}
break;
case SLAPI_SEARCH_RESULT_ENTRY_EXT:
if (pblock->pb_op != NULL) {
(*(void **)value) = pblock->pb_op->o_results.r.r_search.opaque_backend_ptr;
}
break;
/* Number of entries returned from search */
case SLAPI_NENTRIES:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_results.r.r_search.nentries;
}
break;
/* Referrals encountered while iterating over result set */
case SLAPI_SEARCH_REFERRALS:
if (pblock->pb_op != NULL) {
(*(struct berval ***)value) = pblock->pb_op->o_results.r.r_search.search_referrals;
}
break;
case SLAPI_RESULT_CODE:
if (pblock->pb_op != NULL)
*((int *)value) = pblock->pb_op->o_results.result_code;
break;
case SLAPI_RESULT_MATCHED:
if (pblock->pb_op != NULL)
*((char **)value) = pblock->pb_op->o_results.result_matched;
break;
case SLAPI_RESULT_TEXT:
if (pblock->pb_op != NULL)
*((char **)value) = pblock->pb_op->o_results.result_text;
break;
case SLAPI_PB_RESULT_TEXT:
if (pblock->pb_intop != NULL) {
*((char **)value) = pblock->pb_intop->pb_result_text;
} else {
*((char **)value) = NULL;
}
break;
/* Size of the database, in kb */
case SLAPI_DBSIZE:
if (pblock->pb_misc != NULL) {
(*(unsigned int *)value) = pblock->pb_misc->pb_dbsize;
} else {
(*(unsigned int *)value) = 0;
}
break;
/* ACL Plugin */
case SLAPI_PLUGIN_ACL_INIT:
(*(IFP *)value) = pblock->pb_plugin->plg_acl_init;
break;
case SLAPI_PLUGIN_ACL_SYNTAX_CHECK:
(*(IFP *)value) = pblock->pb_plugin->plg_acl_syntax_check;
break;
case SLAPI_PLUGIN_ACL_ALLOW_ACCESS:
(*(IFP *)value) = pblock->pb_plugin->plg_acl_access_allowed;
break;
case SLAPI_PLUGIN_ACL_MODS_ALLOWED:
(*(IFP *)value) = pblock->pb_plugin->plg_acl_mods_allowed;
break;
case SLAPI_PLUGIN_ACL_MODS_UPDATE:
(*(IFP *)value) = pblock->pb_plugin->plg_acl_mods_update;
break;
/* MMR Plugin */
case SLAPI_PLUGIN_MMR_BETXN_PREOP:
(*(IFP *)value) = pblock->pb_plugin->plg_mmr_betxn_preop;
break;
case SLAPI_PLUGIN_MMR_BETXN_POSTOP:
(*(IFP *)value) = pblock->pb_plugin->plg_mmr_betxn_postop;
break;
case SLAPI_REQUESTOR_DN:
/* NOTE: It's not a copy of the DN */
if (pblock->pb_op != NULL) {
char *dn = (char *)slapi_sdn_get_dn(&pblock->pb_op->o_sdn);
if (dn == NULL)
(*(char **)value) = "";
else
(*(char **)value) = dn;
}
break;
case SLAPI_REQUESTOR_SDN:
if (pblock->pb_op != NULL) {
(*(Slapi_DN **)value) = &pblock->pb_op->o_sdn;
}
break;
case SLAPI_REQUESTOR_NDN:
/* NOTE: It's not a copy of the DN */
if (pblock->pb_op != NULL) {
char *ndn = (char *)slapi_sdn_get_ndn(&pblock->pb_op->o_sdn);
if (ndn == NULL)
(*(char **)value) = "";
else
(*(char **)value) = ndn;
}
break;
case SLAPI_OPERATION_AUTHTYPE:
if (pblock->pb_op != NULL) {
if (pblock->pb_op->o_authtype == NULL)
(*(char **)value) = "";
else
(*(char **)value) = pblock->pb_op->o_authtype;
}
break;
case SLAPI_OPERATION_SSF:
if (pblock->pb_op != NULL) {
*((int *)value) = pblock->pb_op->o_ssf;
}
break;
case SLAPI_CLIENT_DNS:
if (pblock->pb_conn == NULL) {
slapi_log_err(SLAPI_LOG_ERR,
"slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CLIENT_DNS \n");
return (-1);
}
(*(struct berval ***)value) = pblock->pb_conn->c_domain;
break;
case SLAPI_BE_MAXNESTLEVEL:
if (NULL == be) {
return (-1);
}
(*(int *)value) = be->be_maxnestlevel;
break;
case SLAPI_OPERATION_ID:
if (pblock->pb_op != NULL) {
(*(int *)value) = pblock->pb_op->o_opid;
}
break;
/* Command line arguments */
case SLAPI_ARGC:
if (pblock->pb_misc != NULL) {
(*(int *)value) = pblock->pb_misc->pb_slapd_argc;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_ARGV:
if (pblock->pb_misc != NULL) {
(*(char ***)value) = pblock->pb_misc->pb_slapd_argv;
} else {
(*(char ***)value) = NULL;
}
break;
/* Config file directory */
case SLAPI_CONFIG_DIRECTORY:
if (pblock->pb_intplugin != NULL) {
(*(char **)value) = pblock->pb_intplugin->pb_slapd_configdir;
} else {
(*(char **)value) = NULL;
}
break;
/* password storage scheme (kexcoff */
case SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME:
(*(char **)value) = pblock->pb_plugin->plg_pwdstorageschemename;
break;
case SLAPI_PLUGIN_PWD_STORAGE_SCHEME_USER_PWD:
if (pblock->pb_deprecated != NULL) {
(*(char **)value) = pblock->pb_deprecated->pb_pwd_storage_scheme_user_passwd;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_PLUGIN_PWD_STORAGE_SCHEME_DB_PWD:
if (pblock->pb_deprecated != NULL) {
(*(char **)value) = pblock->pb_deprecated->pb_pwd_storage_scheme_db_passwd;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN:
(*(CFP *)value) = pblock->pb_plugin->plg_pwdstorageschemeenc;
break;
case SLAPI_PLUGIN_PWD_STORAGE_SCHEME_DEC_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_pwdstorageschemedec;
break;
case SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN:
(*(IFP *)value) = pblock->pb_plugin->plg_pwdstorageschemecmp;
break;
/* entry fetch/store plugin */
case SLAPI_PLUGIN_ENTRY_FETCH_FUNC:
(*(IFP *)value) = pblock->pb_plugin->plg_entryfetchfunc;
break;
case SLAPI_PLUGIN_ENTRY_STORE_FUNC:
(*(IFP *)value) = pblock->pb_plugin->plg_entrystorefunc;
break;
case SLAPI_PLUGIN_ENABLED:
if (pblock->pb_intplugin != NULL) {
*((int *)value) = pblock->pb_intplugin->pb_plugin_enabled;
} else {
*((int *)value) = 0;
}
break;
/* DSE add parameters */
case SLAPI_DSE_DONT_WRITE_WHEN_ADDING:
if (pblock->pb_dse != NULL) {
(*(int *)value) = pblock->pb_dse->dont_add_write;
} else {
(*(int *)value) = 0;
}
break;
/* DSE add parameters */
case SLAPI_DSE_MERGE_WHEN_ADDING:
if (pblock->pb_dse != NULL) {
(*(int *)value) = pblock->pb_dse->add_merge;
} else {
(*(int *)value) = 0;
}
break;
/* DSE add parameters */
case SLAPI_DSE_DONT_CHECK_DUPS:
if (pblock->pb_dse != NULL) {
(*(int *)value) = pblock->pb_dse->dont_check_dups;
} else {
(*(int *)value) = 0;
}
break;
/* DSE modify parameters */
case SLAPI_DSE_REAPPLY_MODS:
if (pblock->pb_dse != NULL) {
(*(int *)value) = pblock->pb_dse->reapply_mods;
} else {
(*(int *)value) = 0;
}
break;
/* DSE read parameters */
case SLAPI_DSE_IS_PRIMARY_FILE:
if (pblock->pb_dse != NULL) {
(*(int *)value) = pblock->pb_dse->is_primary_file;
} else {
(*(int *)value) = 0;
}
break;
/* used internally by schema code (schema.c) */
case SLAPI_SCHEMA_FLAGS:
if (pblock->pb_dse != NULL) {
(*(int *)value) = pblock->pb_dse->schema_flags;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_URP_NAMING_COLLISION_DN:
if (pblock->pb_intop != NULL) {
(*(char **)value) = pblock->pb_intop->pb_urp_naming_collision_dn;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_URP_TOMBSTONE_UNIQUEID:
if (pblock->pb_intop != NULL) {
(*(char **)value) = pblock->pb_intop->pb_urp_tombstone_uniqueid;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_URP_TOMBSTONE_CONFLICT_DN:
if (pblock->pb_intop != NULL) {
(*(char **)value) = pblock->pb_intop->pb_urp_tombstone_conflict_dn;
} else {
(*(char **)value) = NULL;
}
break;
case SLAPI_SEARCH_CTRLS:
if (pblock->pb_intop != NULL) {
(*(LDAPControl ***)value) = pblock->pb_intop->pb_search_ctrls;
} else {
(*(LDAPControl ***)value) = NULL;
}
break;
case SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED:
if (pblock->pb_intplugin != NULL) {
(*(int *)value) = pblock->pb_intplugin->pb_syntax_filter_normalized;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_PLUGIN_SYNTAX_FILTER_DATA:
if (pblock->pb_intplugin != NULL) {
(*(void **)value) = pblock->pb_intplugin->pb_syntax_filter_data;
} else {
(*(void **)value) = NULL;
}
break;
case SLAPI_PAGED_RESULTS_INDEX:
if (op_is_pagedresults(pblock->pb_op) && pblock->pb_intop != NULL) {
/* search req is simple paged results */
(*(int *)value) = pblock->pb_intop->pb_paged_results_index;
} else {
(*(int *)value) = -1;
}
break;
case SLAPI_PAGED_RESULTS_COOKIE:
if (op_is_pagedresults(pblock->pb_op) && pblock->pb_intop != NULL) {
/* search req is simple paged results */
(*(int *)value) = pblock->pb_intop->pb_paged_results_cookie;
} else {
(*(int *)value) = 0;
}
break;
case SLAPI_USN_INCREMENT_FOR_TOMBSTONE:
if (pblock->pb_intop != NULL) {
(*(int32_t *)value) = pblock->pb_intop->pb_usn_tombstone_incremented;
} else {
(*(int32_t *)value) = 0;
}
break;
/* ACI Target Check */
case SLAPI_ACI_TARGET_CHECK:
if (pblock->pb_misc != NULL) {
(*(int *)value) = pblock->pb_misc->pb_aci_target_check;
} else {
(*(int *)value) = 0;
}
break;
default:
slapi_log_err(SLAPI_LOG_ERR, "slapi_pblock_get", "Unknown parameter block argument %d\n", arg);
PR_ASSERT(0);
return (-1);
}
return (0);
}
| 0 |
[
"CWE-415"
] |
389-ds-base
|
a3c298f8140d3e4fa1bd5a670f1bb965a21a9b7b
| 297,673,435,553,831,370,000,000,000,000,000,000,000 | 2,142 |
Issue 5218 - double-free of the virtual attribute context in persistent search (#5219)
description:
A search is processed by a worker using a private pblock.
If the search is persistent, the worker spawn a thread
and kind of duplicate its private pblock so that the spawn
thread continue to process the persistent search.
Then worker ends the initial search, reinit (free) its private pblock,
and returns monitoring the wait_queue.
When the persistent search completes, it frees the duplicated
pblock.
The problem is that private pblock and duplicated pblock
are referring to a same structure (pb_vattr_context).
That can lead to a double free
Fix:
When cloning the pblock (slapi_pblock_clone) make sure
to transfert the references inside the original (private)
pblock to the target (cloned) one
That includes pb_vattr_context pointer.
Reviewed by: Mark Reynolds, James Chapman, Pierre Rogier (Thanks !)
Co-authored-by: Mark Reynolds <[email protected]>
|
static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg)
{
__u32 caps;
caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM |
USBDEVFS_CAP_REAP_AFTER_DISCONNECT | USBDEVFS_CAP_MMAP |
USBDEVFS_CAP_DROP_PRIVILEGES;
if (!ps->dev->bus->no_stop_on_short)
caps |= USBDEVFS_CAP_BULK_CONTINUATION;
if (ps->dev->bus->sg_tablesize)
caps |= USBDEVFS_CAP_BULK_SCATTER_GATHER;
if (put_user(caps, (__u32 __user *)arg))
return -EFAULT;
return 0;
}
| 0 |
[
"CWE-200"
] |
linux
|
681fef8380eb818c0b845fca5d2ab1dcbab114ee
| 119,264,482,028,818,590,000,000,000,000,000,000,000 | 17 |
USB: usbfs: fix potential infoleak in devio
The stack object “ci” has a total size of 8 bytes. Its last 3 bytes
are padding bytes which are not initialized and leaked to userland
via “copy_to_user”.
Signed-off-by: Kangjie Lu <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
|
LZWDecode(TIFF* tif, uint8* op0, tmsize_t occ0, uint16 s)
{
static const char module[] = "LZWDecode";
LZWCodecState *sp = DecoderState(tif);
char *op = (char*) op0;
long occ = (long) occ0;
char *tp;
unsigned char *bp;
hcode_t code;
int len;
long nbits, nextbits, nbitsmask;
unsigned long nextdata;
code_t *codep, *free_entp, *maxcodep, *oldcodep;
(void) s;
assert(sp != NULL);
assert(sp->dec_codetab != NULL);
/*
Fail if value does not fit in long.
*/
if ((tmsize_t) occ != occ0)
return (0);
/*
* Restart interrupted output operation.
*/
if (sp->dec_restart) {
long residue;
codep = sp->dec_codep;
residue = codep->length - sp->dec_restart;
if (residue > occ) {
/*
* Residue from previous decode is sufficient
* to satisfy decode request. Skip to the
* start of the decoded string, place decoded
* values in the output buffer, and return.
*/
sp->dec_restart += occ;
do {
codep = codep->next;
} while (--residue > occ && codep);
if (codep) {
tp = op + occ;
do {
*--tp = codep->value;
codep = codep->next;
} while (--occ && codep);
}
return (1);
}
/*
* Residue satisfies only part of the decode request.
*/
op += residue;
occ -= residue;
tp = op;
do {
int t;
--tp;
t = codep->value;
codep = codep->next;
*tp = (char)t;
} while (--residue && codep);
sp->dec_restart = 0;
}
bp = (unsigned char *)tif->tif_rawcp;
#ifdef LZW_CHECKEOS
sp->dec_bitsleft = (((uint64)tif->tif_rawcc) << 3);
#endif
nbits = sp->lzw_nbits;
nextdata = sp->lzw_nextdata;
nextbits = sp->lzw_nextbits;
nbitsmask = sp->dec_nbitsmask;
oldcodep = sp->dec_oldcodep;
free_entp = sp->dec_free_entp;
maxcodep = sp->dec_maxcodep;
while (occ > 0) {
NextCode(tif, sp, bp, code, GetNextCode);
if (code == CODE_EOI)
break;
if (code == CODE_CLEAR) {
do {
free_entp = sp->dec_codetab + CODE_FIRST;
_TIFFmemset(free_entp, 0,
(CSIZE - CODE_FIRST) * sizeof (code_t));
nbits = BITS_MIN;
nbitsmask = MAXCODE(BITS_MIN);
maxcodep = sp->dec_codetab + nbitsmask-1;
NextCode(tif, sp, bp, code, GetNextCode);
} while (code == CODE_CLEAR); /* consecutive CODE_CLEAR codes */
if (code == CODE_EOI)
break;
if (code > CODE_CLEAR) {
TIFFErrorExt(tif->tif_clientdata, tif->tif_name,
"LZWDecode: Corrupted LZW table at scanline %d",
tif->tif_row);
return (0);
}
*op++ = (char)code;
occ--;
oldcodep = sp->dec_codetab + code;
continue;
}
codep = sp->dec_codetab + code;
/*
* Add the new entry to the code table.
*/
if (free_entp < &sp->dec_codetab[0] ||
free_entp >= &sp->dec_codetab[CSIZE]) {
TIFFErrorExt(tif->tif_clientdata, module,
"Corrupted LZW table at scanline %d",
tif->tif_row);
return (0);
}
free_entp->next = oldcodep;
if (free_entp->next < &sp->dec_codetab[0] ||
free_entp->next >= &sp->dec_codetab[CSIZE]) {
TIFFErrorExt(tif->tif_clientdata, module,
"Corrupted LZW table at scanline %d",
tif->tif_row);
return (0);
}
free_entp->firstchar = free_entp->next->firstchar;
free_entp->length = free_entp->next->length+1;
free_entp->value = (codep < free_entp) ?
codep->firstchar : free_entp->firstchar;
if (++free_entp > maxcodep) {
if (++nbits > BITS_MAX) /* should not happen */
nbits = BITS_MAX;
nbitsmask = MAXCODE(nbits);
maxcodep = sp->dec_codetab + nbitsmask-1;
}
oldcodep = codep;
if (code >= 256) {
/*
* Code maps to a string, copy string
* value to output (written in reverse).
*/
if(codep->length == 0) {
TIFFErrorExt(tif->tif_clientdata, module,
"Wrong length of decoded string: "
"data probably corrupted at scanline %d",
tif->tif_row);
return (0);
}
if (codep->length > occ) {
/*
* String is too long for decode buffer,
* locate portion that will fit, copy to
* the decode buffer, and setup restart
* logic for the next decoding call.
*/
sp->dec_codep = codep;
do {
codep = codep->next;
} while (codep && codep->length > occ);
if (codep) {
sp->dec_restart = (long)occ;
tp = op + occ;
do {
*--tp = codep->value;
codep = codep->next;
} while (--occ && codep);
if (codep)
codeLoop(tif, module);
}
break;
}
len = codep->length;
tp = op + len;
do {
int t;
--tp;
t = codep->value;
codep = codep->next;
*tp = (char)t;
} while (codep && tp > op);
if (codep) {
codeLoop(tif, module);
break;
}
assert(occ >= len);
op += len;
occ -= len;
} else {
*op++ = (char)code;
occ--;
}
}
tif->tif_rawcc -= (tmsize_t)( (uint8*) bp - tif->tif_rawcp );
tif->tif_rawcp = (uint8*) bp;
sp->lzw_nbits = (unsigned short) nbits;
sp->lzw_nextdata = nextdata;
sp->lzw_nextbits = nextbits;
sp->dec_nbitsmask = nbitsmask;
sp->dec_oldcodep = oldcodep;
sp->dec_free_entp = free_entp;
sp->dec_maxcodep = maxcodep;
if (occ > 0) {
#if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__))
TIFFErrorExt(tif->tif_clientdata, module,
"Not enough data at scanline %d (short %I64d bytes)",
tif->tif_row, (unsigned __int64) occ);
#else
TIFFErrorExt(tif->tif_clientdata, module,
"Not enough data at scanline %d (short %llu bytes)",
tif->tif_row, (unsigned long long) occ);
#endif
return (0);
}
return (1);
}
| 0 |
[
"CWE-787"
] |
libtiff
|
58a898cb4459055bb488ca815c23b880c242a27d
| 198,674,023,166,641,060,000,000,000,000,000,000,000 | 219 |
LZWDecodeCompat(): fix potential index-out-of-bounds write. Fixes http://bugzilla.maptools.org/show_bug.cgi?id=2780 / CVE-2018-8905
The fix consists in using the similar code LZWDecode() to validate we
don't write outside of the output buffer.
|
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
int ret = -EINVAL;
vcpu_load(vcpu);
if (!lapic_in_kernel(vcpu) &&
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
goto out;
/*
* KVM_MP_STATE_INIT_RECEIVED means the processor is in
* INIT state; latched init should be reported using
* KVM_SET_VCPU_EVENTS, so reject it here.
*/
if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) &&
(mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
goto out;
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
} else
vcpu->arch.mp_state = mp_state->mp_state;
kvm_make_request(KVM_REQ_EVENT, vcpu);
ret = 0;
out:
vcpu_put(vcpu);
return ret;
}
| 0 |
[
"CWE-476"
] |
linux
|
55749769fe608fa3f4a075e42e89d237c8e37637
| 197,974,390,643,399,470,000,000,000,000,000,000,000 | 33 |
KVM: x86: Fix wall clock writes in Xen shared_info not to mark page dirty
When dirty ring logging is enabled, any dirty logging without an active
vCPU context will cause a kernel oops. But we've already declared that
the shared_info page doesn't get dirty tracking anyway, since it would
be kind of insane to mark it dirty every time we deliver an event channel
interrupt. Userspace is supposed to just assume it's always dirty any
time a vCPU can run or event channels are routed.
So stop using the generic kvm_write_wall_clock() and just write directly
through the gfn_to_pfn_cache that we already have set up.
We can make kvm_write_wall_clock() static in x86.c again now, but let's
not remove the 'sec_hi_ofs' argument even though it's not used yet. At
some point we *will* want to use that for KVM guests too.
Fixes: 629b5348841a ("KVM: x86/xen: update wallclock region")
Reported-by: butt3rflyh4ck <[email protected]>
Signed-off-by: David Woodhouse <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
|
do_ecmd(
int fnum,
char_u *ffname,
char_u *sfname,
exarg_T *eap, // can be NULL!
linenr_T newlnum,
int flags,
win_T *oldwin)
{
int other_file; // TRUE if editing another file
int oldbuf; // TRUE if using existing buffer
int auto_buf = FALSE; // TRUE if autocommands brought us
// into the buffer unexpectedly
char_u *new_name = NULL;
#if defined(FEAT_EVAL)
int did_set_swapcommand = FALSE;
#endif
buf_T *buf;
bufref_T bufref;
bufref_T old_curbuf;
char_u *free_fname = NULL;
#ifdef FEAT_BROWSE
char_u dot_path[] = ".";
char_u *browse_file = NULL;
#endif
int retval = FAIL;
long n;
pos_T orig_pos;
linenr_T topline = 0;
int newcol = -1;
int solcol = -1;
pos_T *pos;
char_u *command = NULL;
#ifdef FEAT_SPELL
int did_get_winopts = FALSE;
#endif
int readfile_flags = 0;
int did_inc_redrawing_disabled = FALSE;
long *so_ptr = curwin->w_p_so >= 0 ? &curwin->w_p_so : &p_so;
#ifdef FEAT_PROP_POPUP
if (ERROR_IF_TERM_POPUP_WINDOW)
return FAIL;
#endif
if (eap != NULL)
command = eap->do_ecmd_cmd;
set_bufref(&old_curbuf, curbuf);
if (fnum != 0)
{
if (fnum == curbuf->b_fnum) // file is already being edited
return OK; // nothing to do
other_file = TRUE;
}
else
{
#ifdef FEAT_BROWSE
if ((cmdmod.cmod_flags & CMOD_BROWSE) && !exiting)
{
if (
# ifdef FEAT_GUI
!gui.in_use &&
# endif
au_has_group((char_u *)"FileExplorer"))
{
// No browsing supported but we do have the file explorer:
// Edit the directory.
if (ffname == NULL || !mch_isdir(ffname))
ffname = dot_path;
}
else
{
browse_file = do_browse(0, (char_u *)_("Edit File"), ffname,
NULL, NULL, NULL, curbuf);
if (browse_file == NULL)
goto theend;
ffname = browse_file;
}
}
#endif
// if no short name given, use ffname for short name
if (sfname == NULL)
sfname = ffname;
#ifdef USE_FNAME_CASE
if (sfname != NULL)
fname_case(sfname, 0); // set correct case for sfname
#endif
if ((flags & (ECMD_ADDBUF | ECMD_ALTBUF))
&& (ffname == NULL || *ffname == NUL))
goto theend;
if (ffname == NULL)
other_file = TRUE;
// there is no file name
else if (*ffname == NUL && curbuf->b_ffname == NULL)
other_file = FALSE;
else
{
if (*ffname == NUL) // re-edit with same file name
{
ffname = curbuf->b_ffname;
sfname = curbuf->b_fname;
}
free_fname = fix_fname(ffname); // may expand to full path name
if (free_fname != NULL)
ffname = free_fname;
other_file = otherfile(ffname);
}
}
/*
* If the file was changed we may not be allowed to abandon it:
* - if we are going to re-edit the same file
* - or if we are the only window on this file and if ECMD_HIDE is FALSE
*/
if ( ((!other_file && !(flags & ECMD_OLDBUF))
|| (curbuf->b_nwindows == 1
&& !(flags & (ECMD_HIDE | ECMD_ADDBUF | ECMD_ALTBUF))))
&& check_changed(curbuf, (p_awa ? CCGD_AW : 0)
| (other_file ? 0 : CCGD_MULTWIN)
| ((flags & ECMD_FORCEIT) ? CCGD_FORCEIT : 0)
| (eap == NULL ? 0 : CCGD_EXCMD)))
{
if (fnum == 0 && other_file && ffname != NULL)
(void)setaltfname(ffname, sfname, newlnum < 0 ? 0 : newlnum);
goto theend;
}
/*
* End Visual mode before switching to another buffer, so the text can be
* copied into the GUI selection buffer.
*/
reset_VIsual();
#if defined(FEAT_EVAL)
if ((command != NULL || newlnum > (linenr_T)0)
&& *get_vim_var_str(VV_SWAPCOMMAND) == NUL)
{
int len;
char_u *p;
// Set v:swapcommand for the SwapExists autocommands.
if (command != NULL)
len = (int)STRLEN(command) + 3;
else
len = 30;
p = alloc(len);
if (p != NULL)
{
if (command != NULL)
vim_snprintf((char *)p, len, ":%s\r", command);
else
vim_snprintf((char *)p, len, "%ldG", (long)newlnum);
set_vim_var_string(VV_SWAPCOMMAND, p, -1);
did_set_swapcommand = TRUE;
vim_free(p);
}
}
#endif
/*
* If we are starting to edit another file, open a (new) buffer.
* Otherwise we re-use the current buffer.
*/
if (other_file)
{
int prev_alt_fnum = curwin->w_alt_fnum;
if (!(flags & (ECMD_ADDBUF | ECMD_ALTBUF)))
{
if ((cmdmod.cmod_flags & CMOD_KEEPALT) == 0)
curwin->w_alt_fnum = curbuf->b_fnum;
if (oldwin != NULL)
buflist_altfpos(oldwin);
}
if (fnum)
buf = buflist_findnr(fnum);
else
{
if (flags & (ECMD_ADDBUF | ECMD_ALTBUF))
{
// Default the line number to zero to avoid that a wininfo item
// is added for the current window.
linenr_T tlnum = 0;
buf_T *newbuf;
if (command != NULL)
{
tlnum = atol((char *)command);
if (tlnum <= 0)
tlnum = 1L;
}
// Add BLN_NOCURWIN to avoid a new wininfo items are associated
// with the current window.
newbuf = buflist_new(ffname, sfname, tlnum,
BLN_LISTED | BLN_NOCURWIN);
if (newbuf != NULL && (flags & ECMD_ALTBUF))
curwin->w_alt_fnum = newbuf->b_fnum;
goto theend;
}
buf = buflist_new(ffname, sfname, 0L,
BLN_CURBUF | ((flags & ECMD_SET_HELP) ? 0 : BLN_LISTED));
// autocommands may change curwin and curbuf
if (oldwin != NULL)
oldwin = curwin;
set_bufref(&old_curbuf, curbuf);
}
if (buf == NULL)
goto theend;
if (curwin->w_alt_fnum == buf->b_fnum && prev_alt_fnum != 0)
// reusing the buffer, keep the old alternate file
curwin->w_alt_fnum = prev_alt_fnum;
if (buf->b_ml.ml_mfp == NULL) // no memfile yet
{
oldbuf = FALSE;
}
else // existing memfile
{
oldbuf = TRUE;
set_bufref(&bufref, buf);
(void)buf_check_timestamp(buf, FALSE);
// Check if autocommands made the buffer invalid or changed the
// current buffer.
if (!bufref_valid(&bufref) || curbuf != old_curbuf.br_buf)
goto theend;
#ifdef FEAT_EVAL
if (aborting()) // autocmds may abort script processing
goto theend;
#endif
}
// May jump to last used line number for a loaded buffer or when asked
// for explicitly
if ((oldbuf && newlnum == ECMD_LASTL) || newlnum == ECMD_LAST)
{
pos = buflist_findfpos(buf);
newlnum = pos->lnum;
solcol = pos->col;
}
/*
* Make the (new) buffer the one used by the current window.
* If the old buffer becomes unused, free it if ECMD_HIDE is FALSE.
* If the current buffer was empty and has no file name, curbuf
* is returned by buflist_new(), nothing to do here.
*/
if (buf != curbuf)
{
bufref_T save_au_new_curbuf;
#ifdef FEAT_CMDWIN
int save_cmdwin_type = cmdwin_type;
// BufLeave applies to the old buffer.
cmdwin_type = 0;
#endif
/*
* Be careful: The autocommands may delete any buffer and change
* the current buffer.
* - If the buffer we are going to edit is deleted, give up.
* - If the current buffer is deleted, prefer to load the new
* buffer when loading a buffer is required. This avoids
* loading another buffer which then must be closed again.
* - If we ended up in the new buffer already, need to skip a few
* things, set auto_buf.
*/
if (buf->b_fname != NULL)
new_name = vim_strsave(buf->b_fname);
save_au_new_curbuf = au_new_curbuf;
set_bufref(&au_new_curbuf, buf);
apply_autocmds(EVENT_BUFLEAVE, NULL, NULL, FALSE, curbuf);
#ifdef FEAT_CMDWIN
cmdwin_type = save_cmdwin_type;
#endif
if (!bufref_valid(&au_new_curbuf))
{
// new buffer has been deleted
delbuf_msg(new_name); // frees new_name
au_new_curbuf = save_au_new_curbuf;
goto theend;
}
#ifdef FEAT_EVAL
if (aborting()) // autocmds may abort script processing
{
vim_free(new_name);
au_new_curbuf = save_au_new_curbuf;
goto theend;
}
#endif
if (buf == curbuf) // already in new buffer
auto_buf = TRUE;
else
{
win_T *the_curwin = curwin;
int did_decrement;
buf_T *was_curbuf = curbuf;
// Set the w_closing flag to avoid that autocommands close the
// window. And set b_locked for the same reason.
the_curwin->w_closing = TRUE;
++buf->b_locked;
if (curbuf == old_curbuf.br_buf)
buf_copy_options(buf, BCO_ENTER);
// Close the link to the current buffer. This will set
// oldwin->w_buffer to NULL.
u_sync(FALSE);
did_decrement = close_buffer(oldwin, curbuf,
(flags & ECMD_HIDE) ? 0 : DOBUF_UNLOAD, FALSE, FALSE);
// Autocommands may have closed the window.
if (win_valid(the_curwin))
the_curwin->w_closing = FALSE;
--buf->b_locked;
#ifdef FEAT_EVAL
// autocmds may abort script processing
if (aborting() && curwin->w_buffer != NULL)
{
vim_free(new_name);
au_new_curbuf = save_au_new_curbuf;
goto theend;
}
#endif
// Be careful again, like above.
if (!bufref_valid(&au_new_curbuf))
{
// new buffer has been deleted
delbuf_msg(new_name); // frees new_name
au_new_curbuf = save_au_new_curbuf;
goto theend;
}
if (buf == curbuf) // already in new buffer
{
// close_buffer() has decremented the window count,
// increment it again here and restore w_buffer.
if (did_decrement && buf_valid(was_curbuf))
++was_curbuf->b_nwindows;
if (win_valid_any_tab(oldwin) && oldwin->w_buffer == NULL)
oldwin->w_buffer = was_curbuf;
auto_buf = TRUE;
}
else
{
#ifdef FEAT_SYN_HL
/*
* <VN> We could instead free the synblock
* and re-attach to buffer, perhaps.
*/
if (curwin->w_buffer == NULL
|| curwin->w_s == &(curwin->w_buffer->b_s))
curwin->w_s = &(buf->b_s);
#endif
curwin->w_buffer = buf;
curbuf = buf;
++curbuf->b_nwindows;
// Set 'fileformat', 'binary' and 'fenc' when forced.
if (!oldbuf && eap != NULL)
{
set_file_options(TRUE, eap);
set_forced_fenc(eap);
}
}
// May get the window options from the last time this buffer
// was in this window (or another window). If not used
// before, reset the local window options to the global
// values. Also restores old folding stuff.
get_winopts(curbuf);
#ifdef FEAT_SPELL
did_get_winopts = TRUE;
#endif
}
vim_free(new_name);
au_new_curbuf = save_au_new_curbuf;
}
curwin->w_pcmark.lnum = 1;
curwin->w_pcmark.col = 0;
}
else // !other_file
{
if ((flags & (ECMD_ADDBUF | ECMD_ALTBUF)) || check_fname() == FAIL)
goto theend;
oldbuf = (flags & ECMD_OLDBUF);
}
// Don't redraw until the cursor is in the right line, otherwise
// autocommands may cause ml_get errors.
++RedrawingDisabled;
did_inc_redrawing_disabled = TRUE;
buf = curbuf;
if ((flags & ECMD_SET_HELP) || keep_help_flag)
{
prepare_help_buffer();
}
else
{
// Don't make a buffer listed if it's a help buffer. Useful when
// using CTRL-O to go back to a help file.
if (!curbuf->b_help)
set_buflisted(TRUE);
}
// If autocommands change buffers under our fingers, forget about
// editing the file.
if (buf != curbuf)
goto theend;
#ifdef FEAT_EVAL
if (aborting()) // autocmds may abort script processing
goto theend;
#endif
// Since we are starting to edit a file, consider the filetype to be
// unset. Helps for when an autocommand changes files and expects syntax
// highlighting to work in the other file.
did_filetype = FALSE;
/*
* other_file oldbuf
* FALSE FALSE re-edit same file, buffer is re-used
* FALSE TRUE re-edit same file, nothing changes
* TRUE FALSE start editing new file, new buffer
* TRUE TRUE start editing in existing buffer (nothing to do)
*/
if (!other_file && !oldbuf) // re-use the buffer
{
set_last_cursor(curwin); // may set b_last_cursor
if (newlnum == ECMD_LAST || newlnum == ECMD_LASTL)
{
newlnum = curwin->w_cursor.lnum;
solcol = curwin->w_cursor.col;
}
buf = curbuf;
if (buf->b_fname != NULL)
new_name = vim_strsave(buf->b_fname);
else
new_name = NULL;
set_bufref(&bufref, buf);
// If the buffer was used before, store the current contents so that
// the reload can be undone. Do not do this if the (empty) buffer is
// being re-used for another file.
if (!(curbuf->b_flags & BF_NEVERLOADED)
&& (p_ur < 0 || curbuf->b_ml.ml_line_count <= p_ur))
{
// Sync first so that this is a separate undo-able action.
u_sync(FALSE);
if (u_savecommon(0, curbuf->b_ml.ml_line_count + 1, 0, TRUE)
== FAIL)
{
vim_free(new_name);
goto theend;
}
u_unchanged(curbuf);
buf_freeall(curbuf, BFA_KEEP_UNDO);
// tell readfile() not to clear or reload undo info
readfile_flags = READ_KEEP_UNDO;
}
else
buf_freeall(curbuf, 0); // free all things for buffer
// If autocommands deleted the buffer we were going to re-edit, give
// up and jump to the end.
if (!bufref_valid(&bufref))
{
delbuf_msg(new_name); // frees new_name
goto theend;
}
vim_free(new_name);
// If autocommands change buffers under our fingers, forget about
// re-editing the file. Should do the buf_clear_file(), but perhaps
// the autocommands changed the buffer...
if (buf != curbuf)
goto theend;
#ifdef FEAT_EVAL
if (aborting()) // autocmds may abort script processing
goto theend;
#endif
buf_clear_file(curbuf);
curbuf->b_op_start.lnum = 0; // clear '[ and '] marks
curbuf->b_op_end.lnum = 0;
}
/*
* If we get here we are sure to start editing
*/
// Assume success now
retval = OK;
/*
* Check if we are editing the w_arg_idx file in the argument list.
*/
check_arg_idx(curwin);
if (!auto_buf)
{
/*
* Set cursor and init window before reading the file and executing
* autocommands. This allows for the autocommands to position the
* cursor.
*/
curwin_init();
#ifdef FEAT_FOLDING
// It's possible that all lines in the buffer changed. Need to update
// automatic folding for all windows where it's used.
{
win_T *win;
tabpage_T *tp;
FOR_ALL_TAB_WINDOWS(tp, win)
if (win->w_buffer == curbuf)
foldUpdateAll(win);
}
#endif
// Change directories when the 'acd' option is set.
DO_AUTOCHDIR;
/*
* Careful: open_buffer() and apply_autocmds() may change the current
* buffer and window.
*/
orig_pos = curwin->w_cursor;
topline = curwin->w_topline;
if (!oldbuf) // need to read the file
{
#ifdef FEAT_PROP_POPUP
// Don't use the swap-exists dialog for a popup window, can't edit
// the buffer.
if (WIN_IS_POPUP(curwin))
curbuf->b_flags |= BF_NO_SEA;
#endif
swap_exists_action = SEA_DIALOG;
curbuf->b_flags |= BF_CHECK_RO; // set/reset 'ro' flag
/*
* Open the buffer and read the file.
*/
if (flags & ECMD_NOWINENTER)
readfile_flags |= READ_NOWINENTER;
#if defined(FEAT_EVAL)
if (should_abort(open_buffer(FALSE, eap, readfile_flags)))
retval = FAIL;
#else
(void)open_buffer(FALSE, eap, readfile_flags);
#endif
#ifdef FEAT_PROP_POPUP
curbuf->b_flags &= ~BF_NO_SEA;
#endif
if (swap_exists_action == SEA_QUIT)
retval = FAIL;
handle_swap_exists(&old_curbuf);
}
else
{
// Read the modelines, but only to set window-local options. Any
// buffer-local options have already been set and may have been
// changed by the user.
do_modelines(OPT_WINONLY);
apply_autocmds_retval(EVENT_BUFENTER, NULL, NULL, FALSE,
curbuf, &retval);
if ((flags & ECMD_NOWINENTER) == 0)
apply_autocmds_retval(EVENT_BUFWINENTER, NULL, NULL, FALSE,
curbuf, &retval);
}
check_arg_idx(curwin);
// If autocommands change the cursor position or topline, we should
// keep it. Also when it moves within a line. But not when it moves
// to the first non-blank.
if (!EQUAL_POS(curwin->w_cursor, orig_pos))
{
char_u *text = ml_get_curline();
if (curwin->w_cursor.lnum != orig_pos.lnum
|| curwin->w_cursor.col != (int)(skipwhite(text) - text))
{
newlnum = curwin->w_cursor.lnum;
newcol = curwin->w_cursor.col;
}
}
if (curwin->w_topline == topline)
topline = 0;
// Even when cursor didn't move we need to recompute topline.
changed_line_abv_curs();
maketitle();
#if defined(FEAT_PROP_POPUP) && defined(FEAT_QUICKFIX)
if (WIN_IS_POPUP(curwin) && curwin->w_p_pvw && retval != FAIL)
popup_set_title(curwin);
#endif
}
#ifdef FEAT_DIFF
// Tell the diff stuff that this buffer is new and/or needs updating.
// Also needed when re-editing the same buffer, because unloading will
// have removed it as a diff buffer.
if (curwin->w_p_diff)
{
diff_buf_add(curbuf);
diff_invalidate(curbuf);
}
#endif
#ifdef FEAT_SPELL
// If the window options were changed may need to set the spell language.
// Can only do this after the buffer has been properly setup.
if (did_get_winopts && curwin->w_p_spell && *curwin->w_s->b_p_spl != NUL)
(void)did_set_spelllang(curwin);
#endif
if (command == NULL)
{
if (newcol >= 0) // position set by autocommands
{
curwin->w_cursor.lnum = newlnum;
curwin->w_cursor.col = newcol;
check_cursor();
}
else if (newlnum > 0) // line number from caller or old position
{
curwin->w_cursor.lnum = newlnum;
check_cursor_lnum();
if (solcol >= 0 && !p_sol)
{
// 'sol' is off: Use last known column.
curwin->w_cursor.col = solcol;
check_cursor_col();
curwin->w_cursor.coladd = 0;
curwin->w_set_curswant = TRUE;
}
else
beginline(BL_SOL | BL_FIX);
}
else // no line number, go to last line in Ex mode
{
if (exmode_active)
curwin->w_cursor.lnum = curbuf->b_ml.ml_line_count;
beginline(BL_WHITE | BL_FIX);
}
}
// Check if cursors in other windows on the same buffer are still valid
check_lnums(FALSE);
/*
* Did not read the file, need to show some info about the file.
* Do this after setting the cursor.
*/
if (oldbuf && !auto_buf)
{
int msg_scroll_save = msg_scroll;
// Obey the 'O' flag in 'cpoptions': overwrite any previous file
// message.
if (shortmess(SHM_OVERALL) && !exiting && p_verbose == 0)
msg_scroll = FALSE;
if (!msg_scroll) // wait a bit when overwriting an error msg
check_for_delay(FALSE);
msg_start();
msg_scroll = msg_scroll_save;
msg_scrolled_ign = TRUE;
if (!shortmess(SHM_FILEINFO))
fileinfo(FALSE, TRUE, FALSE);
msg_scrolled_ign = FALSE;
}
#ifdef FEAT_VIMINFO
curbuf->b_last_used = vim_time();
#endif
if (command != NULL)
do_cmdline(command, NULL, NULL, DOCMD_VERBOSE|DOCMD_RANGEOK);
#ifdef FEAT_KEYMAP
if (curbuf->b_kmap_state & KEYMAP_INIT)
(void)keymap_init();
#endif
--RedrawingDisabled;
did_inc_redrawing_disabled = FALSE;
if (!skip_redraw)
{
n = *so_ptr;
if (topline == 0 && command == NULL)
*so_ptr = 9999; // force cursor halfway the window
update_topline();
curwin->w_scbind_pos = curwin->w_topline;
*so_ptr = n;
redraw_curbuf_later(NOT_VALID); // redraw this buffer later
}
if (p_im && (State & MODE_INSERT) == 0)
need_start_insertmode = TRUE;
#ifdef FEAT_AUTOCHDIR
// Change directories when the 'acd' option is set and we aren't already in
// that directory (should already be done above). Expect getcwd() to be
// faster than calling shorten_fnames() unnecessarily.
if (p_acd && curbuf->b_ffname != NULL)
{
char_u curdir[MAXPATHL];
char_u filedir[MAXPATHL];
vim_strncpy(filedir, curbuf->b_ffname, MAXPATHL - 1);
*gettail_sep(filedir) = NUL;
if (mch_dirname(curdir, MAXPATHL) != FAIL
&& vim_fnamecmp(curdir, filedir) != 0)
do_autochdir();
}
#endif
#if defined(FEAT_NETBEANS_INTG)
if (curbuf->b_ffname != NULL)
{
# ifdef FEAT_NETBEANS_INTG
if ((flags & ECMD_SET_HELP) != ECMD_SET_HELP)
netbeans_file_opened(curbuf);
# endif
}
#endif
theend:
if (did_inc_redrawing_disabled)
--RedrawingDisabled;
#if defined(FEAT_EVAL)
if (did_set_swapcommand)
set_vim_var_string(VV_SWAPCOMMAND, NULL, -1);
#endif
#ifdef FEAT_BROWSE
vim_free(browse_file);
#endif
vim_free(free_fname);
return retval;
}
| 0 |
[
"CWE-787"
] |
vim
|
e2bd8600b873d2cd1f9d667c28cba8b1dba18839
| 10,638,816,403,684,585,000,000,000,000,000,000,000 | 752 |
patch 8.2.4977: memory access error when substitute expression changes window
Problem: Memory access error when substitute expression changes window.
Solution: Disallow changing window in substitute expression.
|
static void smb_vfs_call_pread_done(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
subreq, struct tevent_req);
struct smb_vfs_call_pread_state *state = tevent_req_data(
req, struct smb_vfs_call_pread_state);
int err;
state->retval = state->recv_fn(subreq, &err);
TALLOC_FREE(subreq);
if (state->retval == -1) {
tevent_req_error(req, err);
return;
}
tevent_req_done(req);
}
| 0 |
[
"CWE-264"
] |
samba
|
4278ef25f64d5fdbf432ff1534e275416ec9561e
| 24,634,265,220,566,767,000,000,000,000,000,000,000 | 16 |
CVE-2015-5252: s3: smbd: Fix symlink verification (file access outside the share).
Ensure matching component ends in '/' or '\0'.
BUG: https://bugzilla.samba.org/show_bug.cgi?id=11395
Signed-off-by: Jeremy Allison <[email protected]>
Reviewed-by: Volker Lendecke <[email protected]>
|
DECLAREContigPutFunc(putcontig8bitYCbCr11tile)
{
(void) y;
fromskew = (fromskew / 1) * (1 * 1 + 2);
do {
x = w; /* was x = w>>1; patched 2000/09/25 [email protected] */
do {
int32 Cb = pp[1];
int32 Cr = pp[2];
YCbCrtoRGB(*cp++, pp[0]);
pp += 3;
} while (--x);
cp += toskew;
pp += fromskew;
} while (--h);
}
| 0 |
[
"CWE-787"
] |
libtiff
|
4bb584a35f87af42d6cf09d15e9ce8909a839145
| 131,995,839,457,214,150,000,000,000,000,000,000,000 | 18 |
RGBA interface: fix integer overflow potentially causing write heap buffer overflow, especially on 32 bit builds. Fixes https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=16443. Credit to OSS Fuzz
|
static CURLcode pop3_list(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
struct pop3_conn *pop3c = &conn->proto.pop3c;
if(pop3c->mailbox[0] != '\0')
result = Curl_pp_sendf(&conn->proto.pop3c.pp, "LIST %s", pop3c->mailbox);
else
result = Curl_pp_sendf(&conn->proto.pop3c.pp, "LIST");
if(result)
return result;
if(pop3c->mailbox[0] != '\0')
state(conn, POP3_LIST_SINGLE);
else
state(conn, POP3_LIST);
return result;
}
| 0 |
[
"CWE-89"
] |
curl
|
75ca568fa1c19de4c5358fed246686de8467c238
| 167,092,992,046,531,050,000,000,000,000,000,000,000 | 18 |
URL sanitize: reject URLs containing bad data
Protocols (IMAP, POP3 and SMTP) that use the path part of a URL in a
decoded manner now use the new Curl_urldecode() function to reject URLs
with embedded control codes (anything that is or decodes to a byte value
less than 32).
URLs containing such codes could easily otherwise be used to do harm and
allow users to do unintended actions with otherwise innocent tools and
applications. Like for example using a URL like
pop3://pop3.example.com/1%0d%0aDELE%201 when the app wants a URL to get
a mail and instead this would delete one.
This flaw is considered a security vulnerability: CVE-2012-0036
Security advisory at: http://curl.haxx.se/docs/adv_20120124.html
Reported by: Dan Fandrich
|
keepalived_main(int argc, char **argv)
{
bool report_stopped = true;
struct utsname uname_buf;
char *end;
/* Ensure time_now is set. We then don't have to check anywhere
* else if it is set. */
set_time_now();
/* Save command line options in case need to log them later */
save_cmd_line_options(argc, argv);
/* Init debugging level */
debug = 0;
/* We are the parent process */
#ifndef _DEBUG_
prog_type = PROG_TYPE_PARENT;
#endif
/* Initialise daemon_mode */
#ifdef _WITH_VRRP_
__set_bit(DAEMON_VRRP, &daemon_mode);
#endif
#ifdef _WITH_LVS_
__set_bit(DAEMON_CHECKERS, &daemon_mode);
#endif
#ifdef _WITH_BFD_
__set_bit(DAEMON_BFD, &daemon_mode);
#endif
/* Open log with default settings so we can log initially */
openlog(PACKAGE_NAME, LOG_PID, log_facility);
#ifdef _MEM_CHECK_
mem_log_init(PACKAGE_NAME, "Parent process");
#endif
/* Some functionality depends on kernel version, so get the version here */
if (uname(&uname_buf))
log_message(LOG_INFO, "Unable to get uname() information - error %d", errno);
else {
os_major = (unsigned)strtoul(uname_buf.release, &end, 10);
if (*end != '.')
os_major = 0;
else {
os_minor = (unsigned)strtoul(end + 1, &end, 10);
if (*end != '.')
os_major = 0;
else {
if (!isdigit(end[1]))
os_major = 0;
else
os_release = (unsigned)strtoul(end + 1, &end, 10);
}
}
if (!os_major)
log_message(LOG_INFO, "Unable to parse kernel version %s", uname_buf.release);
/* config_id defaults to hostname */
if (!config_id) {
end = strchrnul(uname_buf.nodename, '.');
config_id = MALLOC((size_t)(end - uname_buf.nodename) + 1);
strncpy(config_id, uname_buf.nodename, (size_t)(end - uname_buf.nodename));
config_id[end - uname_buf.nodename] = '\0';
}
}
/*
* Parse command line and set debug level.
* bits 0..7 reserved by main.c
*/
if (parse_cmdline(argc, argv)) {
closelog();
if (!__test_bit(NO_SYSLOG_BIT, &debug))
openlog(PACKAGE_NAME, LOG_PID | ((__test_bit(LOG_CONSOLE_BIT, &debug)) ? LOG_CONS : 0) , log_facility);
}
if (__test_bit(LOG_CONSOLE_BIT, &debug))
enable_console_log();
#ifdef GIT_COMMIT
log_message(LOG_INFO, "Starting %s, git commit %s", version_string, GIT_COMMIT);
#else
log_message(LOG_INFO, "Starting %s", version_string);
#endif
/* Handle any core file requirements */
core_dump_init();
if (os_major) {
if (KERNEL_VERSION(os_major, os_minor, os_release) < LINUX_VERSION_CODE) {
/* keepalived was build for a later kernel version */
log_message(LOG_INFO, "WARNING - keepalived was build for newer Linux %d.%d.%d, running on %s %s %s",
(LINUX_VERSION_CODE >> 16) & 0xff,
(LINUX_VERSION_CODE >> 8) & 0xff,
(LINUX_VERSION_CODE ) & 0xff,
uname_buf.sysname, uname_buf.release, uname_buf.version);
} else {
/* keepalived was build for a later kernel version */
log_message(LOG_INFO, "Running on %s %s %s (built for Linux %d.%d.%d)",
uname_buf.sysname, uname_buf.release, uname_buf.version,
(LINUX_VERSION_CODE >> 16) & 0xff,
(LINUX_VERSION_CODE >> 8) & 0xff,
(LINUX_VERSION_CODE ) & 0xff);
}
}
#ifndef _DEBUG_
log_command_line(0);
#endif
/* Check we can read the configuration file(s).
NOTE: the working directory will be / if we
forked, but will be the current working directory
when keepalived was run if we haven't forked.
This means that if any config file names are not
absolute file names, the behaviour will be different
depending on whether we forked or not. */
if (!check_conf_file(conf_file)) {
if (__test_bit(CONFIG_TEST_BIT, &debug))
config_test_exit();
goto end;
}
global_data = alloc_global_data();
read_config_file();
init_global_data(global_data, NULL);
#if HAVE_DECL_CLONE_NEWNET
if (override_namespace) {
if (global_data->network_namespace) {
log_message(LOG_INFO, "Overriding config net_namespace '%s' with command line namespace '%s'", global_data->network_namespace, override_namespace);
FREE(global_data->network_namespace);
}
global_data->network_namespace = override_namespace;
override_namespace = NULL;
}
#endif
if (!__test_bit(CONFIG_TEST_BIT, &debug) &&
(global_data->instance_name
#if HAVE_DECL_CLONE_NEWNET
|| global_data->network_namespace
#endif
)) {
if ((syslog_ident = make_syslog_ident(PACKAGE_NAME))) {
log_message(LOG_INFO, "Changing syslog ident to %s", syslog_ident);
closelog();
openlog(syslog_ident, LOG_PID | ((__test_bit(LOG_CONSOLE_BIT, &debug)) ? LOG_CONS : 0), log_facility);
}
else
log_message(LOG_INFO, "Unable to change syslog ident");
use_pid_dir = true;
open_log_file(log_file_name,
NULL,
#if HAVE_DECL_CLONE_NEWNET
global_data->network_namespace,
#else
NULL,
#endif
global_data->instance_name);
}
/* Initialise pointer to child finding function */
set_child_finder_name(find_keepalived_child_name);
if (!__test_bit(CONFIG_TEST_BIT, &debug)) {
if (use_pid_dir) {
/* Create the directory for pid files */
create_pid_dir();
}
}
#if HAVE_DECL_CLONE_NEWNET
if (global_data->network_namespace) {
if (global_data->network_namespace && !set_namespaces(global_data->network_namespace)) {
log_message(LOG_ERR, "Unable to set network namespace %s - exiting", global_data->network_namespace);
goto end;
}
}
#endif
if (!__test_bit(CONFIG_TEST_BIT, &debug)) {
if (global_data->instance_name) {
if (!main_pidfile && (main_pidfile = make_pidfile_name(KEEPALIVED_PID_DIR KEEPALIVED_PID_FILE, global_data->instance_name, PID_EXTENSION)))
free_main_pidfile = true;
#ifdef _WITH_LVS_
if (!checkers_pidfile && (checkers_pidfile = make_pidfile_name(KEEPALIVED_PID_DIR CHECKERS_PID_FILE, global_data->instance_name, PID_EXTENSION)))
free_checkers_pidfile = true;
#endif
#ifdef _WITH_VRRP_
if (!vrrp_pidfile && (vrrp_pidfile = make_pidfile_name(KEEPALIVED_PID_DIR VRRP_PID_FILE, global_data->instance_name, PID_EXTENSION)))
free_vrrp_pidfile = true;
#endif
#ifdef _WITH_BFD_
if (!bfd_pidfile && (bfd_pidfile = make_pidfile_name(KEEPALIVED_PID_DIR VRRP_PID_FILE, global_data->instance_name, PID_EXTENSION)))
free_bfd_pidfile = true;
#endif
}
if (use_pid_dir) {
if (!main_pidfile)
main_pidfile = KEEPALIVED_PID_DIR KEEPALIVED_PID_FILE PID_EXTENSION;
#ifdef _WITH_LVS_
if (!checkers_pidfile)
checkers_pidfile = KEEPALIVED_PID_DIR CHECKERS_PID_FILE PID_EXTENSION;
#endif
#ifdef _WITH_VRRP_
if (!vrrp_pidfile)
vrrp_pidfile = KEEPALIVED_PID_DIR VRRP_PID_FILE PID_EXTENSION;
#endif
#ifdef _WITH_BFD_
if (!bfd_pidfile)
bfd_pidfile = KEEPALIVED_PID_DIR BFD_PID_FILE PID_EXTENSION;
#endif
}
else
{
if (!main_pidfile)
main_pidfile = PID_DIR KEEPALIVED_PID_FILE PID_EXTENSION;
#ifdef _WITH_LVS_
if (!checkers_pidfile)
checkers_pidfile = PID_DIR CHECKERS_PID_FILE PID_EXTENSION;
#endif
#ifdef _WITH_VRRP_
if (!vrrp_pidfile)
vrrp_pidfile = PID_DIR VRRP_PID_FILE PID_EXTENSION;
#endif
#ifdef _WITH_BFD_
if (!bfd_pidfile)
bfd_pidfile = PID_DIR BFD_PID_FILE PID_EXTENSION;
#endif
}
/* Check if keepalived is already running */
if (keepalived_running(daemon_mode)) {
log_message(LOG_INFO, "daemon is already running");
report_stopped = false;
goto end;
}
}
/* daemonize process */
if (!__test_bit(DONT_FORK_BIT, &debug) &&
xdaemon(false, false, true) > 0) {
closelog();
FREE_PTR(config_id);
FREE_PTR(orig_core_dump_pattern);
close_std_fd();
exit(0);
}
/* Set file creation mask */
umask(0);
#ifdef _MEM_CHECK_
enable_mem_log_termination();
#endif
if (__test_bit(CONFIG_TEST_BIT, &debug)) {
validate_config();
config_test_exit();
}
/* write the father's pidfile */
if (!pidfile_write(main_pidfile, getpid()))
goto end;
/* Create the master thread */
master = thread_make_master();
/* Signal handling initialization */
signal_init();
/* Init daemon */
if (!start_keepalived())
log_message(LOG_INFO, "Warning - keepalived has no configuration to run");
initialise_debug_options();
#ifdef THREAD_DUMP
register_parent_thread_addresses();
#endif
/* Launch the scheduling I/O multiplexer */
launch_thread_scheduler(master);
/* Finish daemon process */
stop_keepalived();
#ifdef THREAD_DUMP
deregister_thread_addresses();
#endif
/*
* Reached when terminate signal catched.
* finally return from system
*/
end:
if (report_stopped) {
#ifdef GIT_COMMIT
log_message(LOG_INFO, "Stopped %s, git commit %s", version_string, GIT_COMMIT);
#else
log_message(LOG_INFO, "Stopped %s", version_string);
#endif
}
#if HAVE_DECL_CLONE_NEWNET
if (global_data && global_data->network_namespace)
clear_namespaces();
#endif
if (use_pid_dir)
remove_pid_dir();
/* Restore original core_pattern if necessary */
if (orig_core_dump_pattern)
update_core_dump_pattern(orig_core_dump_pattern);
free_parent_mallocs_startup(false);
free_parent_mallocs_exit();
free_global_data(global_data);
closelog();
#ifndef _MEM_CHECK_LOG_
FREE_PTR(syslog_ident);
#else
if (syslog_ident)
free(syslog_ident);
#endif
close_std_fd();
exit(KEEPALIVED_EXIT_OK);
}
| 1 |
[
"CWE-200"
] |
keepalived
|
c6247a9ef2c7b33244ab1d3aa5d629ec49f0a067
| 191,621,240,332,145,250,000,000,000,000,000,000,000 | 343 |
Add command line and configuration option to set umask
Issue #1048 identified that files created by keepalived are created
with mode 0666. This commit changes the default to 0644, and also
allows the umask to be specified in the configuration or as a command
line option.
Signed-off-by: Quentin Armitage <[email protected]>
|
int crypto_ahash_digest(struct ahash_request *req)
{
return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
}
| 0 |
[
"CWE-835"
] |
linux
|
ef0579b64e93188710d48667cb5e014926af9f1b
| 275,533,719,308,030,350,000,000,000,000,000,000,000 | 4 |
crypto: ahash - Fix EINPROGRESS notification callback
The ahash API modifies the request's callback function in order
to clean up after itself in some corner cases (unaligned final
and missing finup).
When the request is complete ahash will restore the original
callback and everything is fine. However, when the request gets
an EBUSY on a full queue, an EINPROGRESS callback is made while
the request is still ongoing.
In this case the ahash API will incorrectly call its own callback.
This patch fixes the problem by creating a temporary request
object on the stack which is used to relay EINPROGRESS back to
the original completion function.
This patch also adds code to preserve the original flags value.
Fixes: ab6bf4e5e5e4 ("crypto: hash - Fix the pointer voodoo in...")
Cc: <[email protected]>
Reported-by: Sabrina Dubroca <[email protected]>
Tested-by: Sabrina Dubroca <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
|
static void SVGError(void *context,const char *format,...)
{
char
*message,
reason[MaxTextExtent];
SVGInfo
*svg_info;
va_list
operands;
/*
Display and format a error formats, gives file, line, position and
extra parameters.
*/
va_start(operands,format);
svg_info=(SVGInfo *) context;
(void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.error: ");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),format,operands);
#if !defined(MAGICKCORE_HAVE_VSNPRINTF)
(void) vsprintf(reason,format,operands);
#else
(void) vsnprintf(reason,MaxTextExtent,format,operands);
#endif
message=GetExceptionMessage(errno);
(void) ThrowMagickException(svg_info->exception,GetMagickModule(),CoderError,
reason,"`%s`",message);
message=DestroyString(message);
va_end(operands);
}
| 0 |
[
"CWE-125"
] |
ImageMagick6
|
a5db4873626f702d2ddd8bc293573493e0a412c0
| 187,896,316,540,905,900,000,000,000,000,000,000,000 | 31 |
https://github.com/ImageMagick/ImageMagick/issues/1336
|
static void cuse_gendev_release(struct device *dev)
{
kfree(dev);
}
| 0 |
[
"CWE-399"
] |
linux
|
2c5816b4beccc8ba709144539f6fdd764f8fa49c
| 266,066,978,299,462,370,000,000,000,000,000,000,000 | 4 |
cuse: fix memory leak
The problem is that fuse_dev_alloc() acquires an extra reference to cc.fc,
and the original ref count is never dropped.
Reported-by: Colin Ian King <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Fixes: cc080e9e9be1 ("fuse: introduce per-instance fuse_dev structure")
Cc: <[email protected]> # v4.2+
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.