func
stringlengths 0
484k
| target
int64 0
1
| cwe
listlengths 0
4
| project
stringclasses 799
values | commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
static void test_bug12337762()
{
int rc,i=0;
MYSQL_RES *result;
MYSQL_FIELD *field;
unsigned int tab_charsetnr[3]= {0};
DBUG_ENTER("test_bug12337762");
myheader("test_bug12337762");
/*
Creating table with specific charset.
*/
rc= mysql_query(mysql, "drop table if exists charset_tab");
rc= mysql_query(mysql, "create table charset_tab("\
"txt1 varchar(32) character set Latin1,"\
"txt2 varchar(32) character set Latin1 collate latin1_bin,"\
"txt3 varchar(32) character set utf8 collate utf8_bin"\
")");
DIE_UNLESS(rc == 0);
DIE_IF(mysql_errno(mysql));
/*
Creating view from table created earlier.
*/
rc= mysql_query(mysql, "drop view if exists charset_view");
rc= mysql_query(mysql, "create view charset_view as "\
"select * from charset_tab;");
DIE_UNLESS(rc == 0);
DIE_IF(mysql_errno(mysql));
/*
Checking field information for table.
*/
result= mysql_list_fields(mysql, "charset_tab", NULL);
DIE_IF(mysql_errno(mysql));
i=0;
while((field= mysql_fetch_field(result)))
{
printf("field name %s\n", field->name);
printf("field table %s\n", field->table);
printf("field type %d\n", field->type);
printf("field charset %d\n", field->charsetnr);
tab_charsetnr[i++]= field->charsetnr;
printf("\n");
}
mysql_free_result(result);
/*
Checking field information for view.
*/
result= mysql_list_fields(mysql, "charset_view", NULL);
DIE_IF(mysql_errno(mysql));
i=0;
while((field= mysql_fetch_field(result)))
{
printf("field name %s\n", field->name);
printf("field table %s\n", field->table);
printf("field type %d\n", field->type);
printf("field charset %d\n", field->charsetnr);
printf("\n");
/*
charset value for field must be same for both, view and table.
*/
DIE_UNLESS(field->charsetnr == tab_charsetnr[i++]);
}
mysql_free_result(result);
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 336,115,324,734,593,850,000,000,000,000,000,000,000 | 71 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
add_to_include_set(struct bitmap *base, struct commit *commit)
{
khiter_t hash_pos;
uint32_t bitmap_pos = find_object_pos(commit->object.oid.hash);
if (bitmap_get(base, bitmap_pos))
return 0;
hash_pos = kh_get_sha1(writer.bitmaps, commit->object.oid.hash);
if (hash_pos < kh_end(writer.bitmaps)) {
struct bitmapped_commit *bc = kh_value(writer.bitmaps, hash_pos);
bitmap_or_ewah(base, bc->bitmap);
return 0;
}
bitmap_set(base, bitmap_pos);
return 1;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
git
|
de1e67d0703894cb6ea782e36abb63976ab07e60
| 299,969,786,782,496,800,000,000,000,000,000,000,000 | 18 |
list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <[email protected]>
Signed-off-by: Junio C Hamano <[email protected]>
|
void CLASS parse_makernote(int base, int uptag)
{
unsigned offset = 0, entries, tag, type, len, save, c;
unsigned ver97 = 0, serial = 0, i, wbi = 0, wb[4] = {0, 0, 0, 0};
uchar buf97[324], ci, cj, ck;
short morder, sorder = order;
char buf[10];
unsigned SamsungKey[11];
uchar NikonKey;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned custom_serial = 0;
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
unsigned NikonFlashInfoVersion = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
unsigned typeCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x0116;
ushort table_buf_0x0116_len = 0;
uchar *table_buf_0x2010;
ushort table_buf_0x2010_len = 0;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_len = 0;
uchar *table_buf_0x9400;
ushort table_buf_0x9400_len = 0;
uchar *table_buf_0x9402;
ushort table_buf_0x9402_len = 0;
uchar *table_buf_0x9403;
ushort table_buf_0x9403_len = 0;
uchar *table_buf_0x9406;
ushort table_buf_0x9406_len = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_len = 0;
uchar *table_buf_0x940e;
ushort table_buf_0x940e_len = 0;
INT64 fsize = ifp->size();
#endif
/*
The MakerNote might have its own TIFF header (possibly with
its own byte-order!), or it might just be a table.
*/
if (!strncmp(make, "Nokia", 5))
return;
fread(buf, 1, 10, ifp);
/*
printf("===>>buf: 0x");
for (int i = 0; i < sizeof buf; i ++) {
printf("%02x", buf[i]);
}
putchar('\n');
*/
if (!strncmp(buf, "KDK", 3) || /* these aren't TIFF tables */
!strncmp(buf, "VER", 3) || !strncmp(buf, "IIII", 4) || !strncmp(buf, "MMMM", 4))
return;
if (!strncmp(buf, "KC", 2) || /* Konica KD-400Z, KD-510Z */
!strncmp(buf, "MLY", 3))
{ /* Minolta DiMAGE G series */
order = 0x4d4d;
while ((i = ftell(ifp)) < data_offset && i < 16384)
{
wb[0] = wb[2];
wb[2] = wb[1];
wb[1] = wb[3];
wb[3] = get2();
if (wb[1] == 256 && wb[3] == 256 && wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640)
FORC4 cam_mul[c] = wb[c];
}
goto quit;
}
if (!strcmp(buf, "Nikon"))
{
base = ftell(ifp);
order = get2();
if (get2() != 42)
goto quit;
offset = get4();
fseek(ifp, offset - 8, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMPUS") || !strcmp(buf, "PENTAX "))
{
base = ftell(ifp) - 10;
fseek(ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O')
get2();
}
else if (!strncmp(buf, "SONY", 4) || !strcmp(buf, "Panasonic"))
{
goto nf;
}
else if (!strncmp(buf, "FUJIFILM", 8))
{
base = ftell(ifp) - 10;
nf:
order = 0x4949;
fseek(ifp, 2, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMP") || !strcmp(buf, "LEICA") || !strcmp(buf, "Ricoh") || !strcmp(buf, "EPSON"))
fseek(ifp, -2, SEEK_CUR);
else if (!strcmp(buf, "AOC") || !strcmp(buf, "QVC"))
fseek(ifp, -4, SEEK_CUR);
else
{
fseek(ifp, -10, SEEK_CUR);
if (!strncmp(make, "SAMSUNG", 7))
base = ftell(ifp);
}
// adjust pos & base for Leica M8/M9/M Mono tags and dir in tag 0x3400
if (!strncasecmp(make, "LEICA", 5))
{
if (!strncmp(model, "M8", 2) || !strncasecmp(model, "Leica M8", 8) || !strncasecmp(model, "LEICA X", 7))
{
base = ftell(ifp) - 8;
}
else if (!strncasecmp(model, "LEICA M (Typ 240)", 17))
{
base = 0;
}
else if (!strncmp(model, "M9", 2) || !strncasecmp(model, "Leica M9", 8) || !strncasecmp(model, "M Monochrom", 11) ||
!strncasecmp(model, "Leica M Monochrom", 11))
{
if (!uptag)
{
base = ftell(ifp) - 10;
fseek(ifp, 8, SEEK_CUR);
}
else if (uptag == 0x3400)
{
fseek(ifp, 10, SEEK_CUR);
base += 10;
}
}
else if (!strncasecmp(model, "LEICA T", 7))
{
base = ftell(ifp) - 8;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_T;
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
else if (!strncasecmp(model, "LEICA SL", 8))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_SL;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
}
#endif
}
entries = get2();
if (entries > 1000)
return;
morder = order;
while (entries--)
{
order = morder;
tiff_get(base, &tag, &type, &len, &save);
tag |= uptag << 16;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 _pos = ftell(ifp);
if (len > 8 && _pos + len > 2 * fsize)
{
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue;
}
if (!strncasecmp(model, "KODAK P880", 10) || !strncasecmp(model, "KODAK P850", 10) ||
!strncasecmp(model, "KODAK P712", 10))
{
if (tag == 0xf90b)
{
imgdata.makernotes.kodak.clipBlack = get2();
}
else if (tag == 0xf90c)
{
imgdata.makernotes.kodak.clipWhite = get2();
}
}
if (!strncmp(make, "Canon", 5))
{
if (tag == 0x000d && len < 256000) // camera info
{
if (type != 4)
{
CanonCameraInfo = (uchar *)malloc(MAX(16, len));
fread(CanonCameraInfo, len, 1, ifp);
}
else
{
CanonCameraInfo = (uchar *)malloc(MAX(16, len * 4));
fread(CanonCameraInfo, len, 4, ifp);
}
lenCanonCameraInfo = len;
typeCanonCameraInfo = type;
}
else if (tag == 0x10) // Canon ModelID
{
unique_id = get4();
unique_id = setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo)
{
processCanonCameraInfo(unique_id, CanonCameraInfo, lenCanonCameraInfo, typeCanonCameraInfo);
free(CanonCameraInfo);
CanonCameraInfo = 0;
lenCanonCameraInfo = 0;
}
}
else
parseCanonMakernotes(tag, type, len);
}
else if (!strncmp(make, "FUJI", 4))
{
if (tag == 0x0010)
{
char FujiSerial[sizeof(imgdata.shootinginfo.InternalBodySerial)];
char *words[4];
char yy[2], mm[3], dd[3], ystr[16], ynum[16];
int year, nwords, ynum_len;
unsigned c;
stmread(FujiSerial, len, ifp);
nwords = getwords(FujiSerial, words, 4, sizeof(imgdata.shootinginfo.InternalBodySerial));
for (int i = 0; i < nwords; i++)
{
mm[2] = dd[2] = 0;
if (strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) < 18)
if (i == 0)
strncpy(imgdata.shootinginfo.InternalBodySerial, words[0],
sizeof(imgdata.shootinginfo.InternalBodySerial) - 1);
else
{
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
snprintf(tbuf, sizeof(tbuf), "%s %s", imgdata.shootinginfo.InternalBodySerial, words[i]);
strncpy(imgdata.shootinginfo.InternalBodySerial, tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial) - 1);
}
else
{
strncpy(dd, words[i] + strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 14, 2);
strncpy(mm, words[i] + strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 16, 2);
strncpy(yy, words[i] + strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 18, 2);
year = (yy[0] - '0') * 10 + (yy[1] - '0');
if (year < 70)
year += 2000;
else
year += 1900;
ynum_len = (int)strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 18;
strncpy(ynum, words[i], ynum_len);
ynum[ynum_len] = 0;
for (int j = 0; ynum[j] && ynum[j + 1] && sscanf(ynum + j, "%2x", &c); j += 2)
ystr[j / 2] = c;
ystr[ynum_len / 2 + 1] = 0;
strcpy(model2, ystr);
if (i == 0)
{
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
if (nwords == 1)
snprintf(tbuf, sizeof(tbuf), "%s %s %d:%s:%s",
words[0] + strnlen(words[0], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 12, ystr,
year, mm, dd);
else
snprintf(tbuf, sizeof(tbuf), "%s %d:%s:%s %s", ystr, year, mm, dd,
words[0] + strnlen(words[0], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 12);
strncpy(imgdata.shootinginfo.InternalBodySerial, tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial) - 1);
}
else
{
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
snprintf(tbuf, sizeof(tbuf), "%s %s %d:%s:%s %s", imgdata.shootinginfo.InternalBodySerial, ystr, year, mm,
dd, words[i] + strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 12);
strncpy(imgdata.shootinginfo.InternalBodySerial, tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial) - 1);
}
}
}
}
else
parseFujiMakernotes(tag, type);
}
else if (!strncasecmp(model, "Hasselblad X1D", 14) || !strncasecmp(model, "Hasselblad H6D", 14) ||
!strncasecmp(model, "Hasselblad A6D", 14))
{
if (tag == 0x0045)
{
imgdata.makernotes.hasselblad.BaseISO = get4();
}
else if (tag == 0x0046)
{
imgdata.makernotes.hasselblad.Gain = getreal(type);
}
}
else if (!strncasecmp(make, "LEICA", 5))
{
if (((tag == 0x035e) || (tag == 0x035f)) && (type == 10) && (len == 9))
{
int ind = tag == 0x035e ? 0 : 1;
for (int j = 0; j < 3; j++)
FORCC imgdata.color.dng_color[ind].forwardmatrix[j][c] = getreal(type);
imgdata.color.dng_color[ind].parsedfields |= LIBRAW_DNGFM_FORWARDMATRIX;
}
if (tag == 0x34003402)
imgdata.other.CameraTemperature = getreal(type);
if ((tag == 0x0320) && (type == 9) && (len == 1) && !strncasecmp(make, "Leica Camera AG", 15) &&
!strncmp(buf, "LEICA", 5) && (buf[5] == 0) && (buf[6] == 0) && (buf[7] == 0))
imgdata.other.CameraTemperature = getreal(type);
if ((tag == 0x0303) && (type != 4))
{
stmread(imgdata.lens.makernotes.Lens, len, ifp);
}
if ((tag == 0x3405) || (tag == 0x0310) || (tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID >> 2) << 8) | (imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') || !strncasecmp(model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') || !strncasecmp(model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (((tag == 0x0313) || (tag == 0x34003406)) && (fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5)))
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote(base, 0x3400);
}
}
else if (!strncmp(make, "NIKON", 5))
{
if (tag == 0x000a)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
else if (tag == 0x0012)
{
char a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
imgdata.other.FlashEC = (float)(a * b) / (float)c;
}
else if (tag == 0x003b) // all 1s for regular exposures
{
imgdata.makernotes.nikon.ME_WB[0] = getreal(type);
imgdata.makernotes.nikon.ME_WB[2] = getreal(type);
imgdata.makernotes.nikon.ME_WB[1] = getreal(type);
imgdata.makernotes.nikon.ME_WB[3] = getreal(type);
}
else if (tag == 0x0045)
{
imgdata.sizes.raw_crop.cleft = get2();
imgdata.sizes.raw_crop.ctop = get2();
imgdata.sizes.raw_crop.cwidth = get2();
imgdata.sizes.raw_crop.cheight = get2();
}
else if (tag == 0x0082) // lens attachment
{
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a * b * (12 / c);
imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops / 12.0f;
}
}
else if (tag == 0x0093) // Nikon compression
{
imgdata.makernotes.nikon.NEFCompression = i = get2();
if ((i == 7) || (i == 9))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100:
lenNikonLensData = 9;
break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203:
lenNikonLensData = 15;
break;
case 204:
lenNikonLensData = 16;
break;
case 400:
lenNikonLensData = 459;
break;
case 401:
lenNikonLensData = 590;
break;
case 402:
lenNikonLensData = 509;
break;
case 403:
lenNikonLensData = 879;
break;
}
if (lenNikonLensData > 0)
{
table_buf = (uchar *)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
free(table_buf);
lenNikonLensData = 0;
}
}
}
else if (tag == 0x00a0)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x00a8) // contains flash data
{
for (i = 0; i < 4; i++)
{
NikonFlashInfoVersion = NikonFlashInfoVersion * 10 + fgetc(ifp) - '0';
}
}
else if (tag == 0x00b0)
{
get4(); // ME tag version, 4 symbols
imgdata.makernotes.nikon.ExposureMode = get4();
imgdata.makernotes.nikon.nMEshots = get4();
imgdata.makernotes.nikon.MEgainOn = get4();
}
else if (tag == 0x00b9)
{
uchar uc;
int8_t sc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTune = uc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTuneIndex = uc;
fread(&sc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTuneAdj = sc;
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
switch (tag)
{
case 0x0404:
case 0x101a:
case 0x20100101:
if (!imgdata.shootinginfo.BodySerial[0])
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0x20100102:
if (!imgdata.shootinginfo.InternalBodySerial[0])
stmread(imgdata.shootinginfo.InternalBodySerial, len, ifp);
break;
case 0x0207:
case 0x20100100:
{
uchar sOlyID[8];
fread(sOlyID, MIN(len, 7), 1, ifp);
sOlyID[7] = 0;
OlyID = sOlyID[0];
i = 1;
while (i < 7 && sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = libraw_powf64l(2.0f, getreal(type) / 2);
break;
case 0x20400612:
case 0x30000612:
imgdata.sizes.raw_crop.cleft = get2();
break;
case 0x20400613:
case 0x30000613:
imgdata.sizes.raw_crop.ctop = get2();
break;
case 0x20400614:
case 0x30000614:
imgdata.sizes.raw_crop.cwidth = get2();
break;
case 0x20400615:
case 0x30000615:
imgdata.sizes.raw_crop.cheight = get2();
break;
case 0x20401112:
imgdata.makernotes.olympus.OlympusCropID = get2();
break;
case 0x20401113:
FORC4 imgdata.makernotes.olympus.OlympusFrame[c] = get2();
break;
case 0x20100201:
{
unsigned long long oly_lensid[3];
oly_lensid[0] = fgetc(ifp);
fgetc(ifp);
oly_lensid[1] = fgetc(ifp);
oly_lensid[2] = fgetc(ifp);
imgdata.lens.makernotes.LensID = (oly_lensid[0] << 16) | (oly_lensid[1] << 8) | oly_lensid[2];
}
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) || (imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100202:
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0x20100203:
stmread(imgdata.lens.makernotes.Lens, len, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID = imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
stmread(imgdata.lens.makernotes.Teleconverter, len, ifp);
break;
case 0x20100403:
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
break;
case 0x1007:
imgdata.other.SensorTemperature = (float)get2();
break;
case 0x1008:
imgdata.other.LensTemperature = (float)get2();
break;
case 0x20401306:
{
int temp = get2();
if ((temp != 0) && (temp != 100))
{
if (temp < 61)
imgdata.other.CameraTemperature = (float)temp;
else
imgdata.other.CameraTemperature = (float)(temp - 32) / 1.8f;
if ((OlyID == 0x4434353933ULL) && // TG-5
(imgdata.other.exifAmbientTemperature > -273.15f))
imgdata.other.CameraTemperature += imgdata.other.exifAmbientTemperature;
}
}
break;
case 0x20501500:
if (OlyID != 0x0ULL)
{
short temp = get2();
if ((OlyID == 0x4434303430ULL) || // E-1
(OlyID == 0x5330303336ULL) || // E-M5
(len != 1))
imgdata.other.SensorTemperature = (float)temp;
else if ((temp != -32768) && (temp != 0))
{
if (temp > 199)
imgdata.other.SensorTemperature = 86.474958f - 0.120228f * (float)temp;
else
imgdata.other.SensorTemperature = (float)temp;
}
}
break;
}
}
else if ((!strncmp(make, "PENTAX", 6) || !strncmp(make, "RICOH", 5)) && !strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
char buffer[17];
int count = 0;
fread(buffer, 16, 1, ifp);
buffer[16] = 0;
for (int i = 0; i < 16; i++)
{
// sprintf(imgdata.shootinginfo.InternalBodySerial+2*i, "%02x", buffer[i]);
if ((isspace(buffer[i])) || (buffer[i] == 0x2D) || (isalnum(buffer[i])))
count++;
}
if (count == 16)
{
sprintf(imgdata.shootinginfo.BodySerial, "%8s", buffer + 8);
buffer[8] = 0;
sprintf(imgdata.shootinginfo.InternalBodySerial, "%8s", buffer);
}
else
{
sprintf(imgdata.shootinginfo.BodySerial, "%02x%02x%02x%02x", buffer[4], buffer[5], buffer[6], buffer[7]);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%02x%02x%02x%02x", buffer[8], buffer[9], buffer[10],
buffer[11]);
}
}
else if ((tag == 0x1001) && (type == 3))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
imgdata.lens.makernotes.FocalType = 1;
}
else if ((tag == 0x100b) && (type == 10))
{
imgdata.other.FlashEC = getreal(type);
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
}
else if (!strncmp(make, "RICOH", 5) && strncmp(model, "PENTAX", 6))
{
if ((tag == 0x0005) && !strncmp(model, "GXR", 3))
{
char buffer[9];
buffer[8] = 0;
fread(buffer, 8, 1, ifp);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%8s", buffer);
}
else if ((tag == 0x100b) && (type == 10))
{
imgdata.other.FlashEC = getreal(type);
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
else if ((tag == 0x2001) && !strncmp(model, "GXR", 3))
{
short ntags, cur_tag;
fseek(ifp, 20, SEEK_CUR);
ntags = get2();
cur_tag = get2();
while (cur_tag != 0x002c)
{
fseek(ifp, 10, SEEK_CUR);
cur_tag = get2();
}
fseek(ifp, 6, SEEK_CUR);
fseek(ifp, get4() + 20, SEEK_SET);
stread(imgdata.shootinginfo.BodySerial, 12, ifp);
get2();
imgdata.lens.makernotes.LensID = getc(ifp) - '0';
switch (imgdata.lens.makernotes.LensID)
{
case 1:
case 2:
case 3:
case 5:
case 6:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_RicohModule;
break;
case 8:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
break;
default:
imgdata.lens.makernotes.LensID = -1;
}
fseek(ifp, 17, SEEK_CUR);
stread(imgdata.lens.LensSerial, 12, ifp);
}
}
else if ((!strncmp(make, "PENTAX", 6) || !strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && dng_version)) &&
strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
}
else if (tag == 0x000d)
{
imgdata.makernotes.pentax.FocusMode = get2();
}
else if (tag == 0x000e)
{
imgdata.makernotes.pentax.AFPointSelected = get2();
}
else if (tag == 0x000f)
{
imgdata.makernotes.pentax.AFPointsInFocus = getint(type);
}
else if (tag == 0x0010)
{
imgdata.makernotes.pentax.FocusPosition = get2();
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2() / 10.0f;
}
else if (tag == 0x0014)
{
PentaxISO(get2());
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4() / 100.0f;
}
else if (tag == 0x0034)
{
uchar uc;
FORC4
{
fread(&uc, 1, 1, ifp);
imgdata.makernotes.pentax.DriveMode[c] = uc;
}
}
else if (tag == 0x0038)
{
imgdata.sizes.raw_crop.cleft = get2();
imgdata.sizes.raw_crop.ctop = get2();
}
else if (tag == 0x0039)
{
imgdata.sizes.raw_crop.cwidth = get2();
imgdata.sizes.raw_crop.cheight = get2();
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x0047)
{
imgdata.other.CameraTemperature = (float)fgetc(ifp);
}
else if (tag == 0x004d)
{
if (type == 9)
imgdata.other.FlashEC = getreal(type) / 256.0f;
else
imgdata.other.FlashEC = (float)((signed short)fgetc(ifp)) / 6.0f;
}
else if (tag == 0x0072)
{
imgdata.makernotes.pentax.AFAdjustment = get2();
}
else if (tag == 0x007e)
{
imgdata.color.linear_max[0] = imgdata.color.linear_max[1] = imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = (long)(-1) * get4();
}
else if (tag == 0x0207)
{
if (len < 65535) // Safety belt
PentaxLensInfo(imgdata.lens.makernotes.CamID, len);
}
else if ((tag >= 0x020d) && (tag <= 0x0214))
{
FORC4 imgdata.color.WB_Coeffs[Pentax_wb_list1[tag - 0x020d]][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0221)
{
int nWB = get2();
if (nWB <= sizeof(imgdata.color.WBCT_Coeffs) / sizeof(imgdata.color.WBCT_Coeffs[0]))
for (int i = 0; i < nWB; i++)
{
imgdata.color.WBCT_Coeffs[i][0] = (unsigned)0xcfc6 - get2();
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = get2();
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 0x2000;
imgdata.color.WBCT_Coeffs[i][3] = get2();
}
}
else if (tag == 0x0215)
{
fseek(ifp, 16, SEEK_CUR);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%d", get4());
}
else if (tag == 0x0229)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x022d)
{
int wb_ind;
getc(ifp);
for (int wb_cnt = 0; wb_cnt < nPentax_wb_list2; wb_cnt++)
{
wb_ind = getc(ifp);
if (wb_ind < nPentax_wb_list2)
FORC4 imgdata.color.WB_Coeffs[Pentax_wb_list2[wb_ind]][c ^ (c >> 1)] = get2();
}
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo[20];
fseek(ifp, 2, SEEK_CUR);
stread(imgdata.lens.makernotes.Lens, 30, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
stread(LensInfo, 20, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7))
{
if (tag == 0x0002)
{
if (get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
unique_id = imgdata.lens.makernotes.CamID = get4();
}
else if (tag == 0x0043)
{
int temp = get4();
if (temp)
{
imgdata.other.CameraTemperature = (float)temp;
if (get4() == 10)
imgdata.other.CameraTemperature /= 10.0f;
}
}
else if (tag == 0xa002)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa005)
{
stmread(imgdata.lens.InternalLensSerial, len, ifp);
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) || !strncasecmp(make, "Konica", 6) || !strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) || !strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "Lusso", 5) || !strncasecmp(model, "HV", 2))))
{
parseSonyMakernotes(tag, type, len, nonDNG, table_buf_0x0116, table_buf_0x0116_len, table_buf_0x2010,
table_buf_0x2010_len, table_buf_0x9050, table_buf_0x9050_len, table_buf_0x9400,
table_buf_0x9400_len, table_buf_0x9402, table_buf_0x9402_len, table_buf_0x9403,
table_buf_0x9403_len, table_buf_0x9406, table_buf_0x9406_len, table_buf_0x940c,
table_buf_0x940c_len, table_buf_0x940e, table_buf_0x940e_len);
}
fseek(ifp, _pos, SEEK_SET);
#endif
if (tag == 2 && strstr(make, "NIKON") && !iso_speed)
iso_speed = (get2(), get2());
if (tag == 37 && strstr(make, "NIKON") && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc, 1, 1, ifp);
iso_speed = int(100.0 * libraw_powf64l(2.0f, float(cc) / 12.0 - 5.0));
}
if (tag == 4 && len > 26 && len < 35)
{
if ((i = (get4(), get2())) != 0x7fff && (!iso_speed || iso_speed == 65535))
iso_speed = 50 * libraw_powf64l(2.0, i / 32.0 - 4);
#ifdef LIBRAW_LIBRARY_BUILD
get4();
#else
if ((i = (get2(), get2())) != 0x7fff && !aperture)
aperture = libraw_powf64l(2.0, i / 64.0);
#endif
if ((i = get2()) != 0xffff && !shutter)
shutter = libraw_powf64l(2.0, (short)i / -32.0);
wbi = (get2(), get2());
shot_order = (get2(), get2());
}
if ((tag == 4 || tag == 0x114) && !strncmp(make, "KONICA", 6))
{
fseek(ifp, tag == 4 ? 140 : 160, SEEK_CUR);
switch (get2())
{
case 72:
flip = 0;
break;
case 76:
flip = 6;
break;
case 82:
flip = 5;
break;
}
}
if (tag == 7 && type == 2 && len > 20)
fgets(model2, 64, ifp);
if (tag == 8 && type == 4)
shot_order = get4();
if (tag == 9 && !strncmp(make, "Canon", 5))
fread(artist, 64, 1, ifp);
if (tag == 0xc && len == 4)
FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type);
if (tag == 0xd && type == 7 && get2() == 0xaaaa)
{
#if 0 /* Canon rotation data is handled by EXIF.Orientation */
for (c = i = 2; (ushort)c != 0xbbbb && i < len; i++)
c = c << 8 | fgetc(ifp);
while ((i += 4) < len - 5)
if (get4() == 257 && (i = len) && (c = (get4(), fgetc(ifp))) < 3)
flip = "065"[c] - '0';
#endif
}
#ifndef LIBRAW_LIBRARY_BUILD
if (tag == 0x10 && type == 4)
unique_id = get4();
#endif
#ifdef LIBRAW_LIBRARY_BUILD
INT64 _pos2 = ftell(ifp);
if (!strncasecmp(make, "Olympus", 7))
{
short nWB, tWB;
if ((tag == 0x20300108) || (tag == 0x20310109))
imgdata.makernotes.olympus.ColorSpace = get2();
if ((tag == 0x20400101) && (len == 2) && (!strncasecmp(model, "E-410", 5) || !strncasecmp(model, "E-510", 5)))
{
int i;
for (i = 0; i < 64; i++)
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = imgdata.color.WB_Coeffs[i][1] =
imgdata.color.WB_Coeffs[i][3] = 0x100;
for (i = 64; i < 256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
if ((tag >= 0x20400101) && (tag <= 0x20400111))
{
nWB = tag - 0x20400101;
tWB = Oly_wb_list2[nWB << 1];
ushort CT = Oly_wb_list2[(nWB << 1) | 1];
int wb[4];
wb[0] = get2();
wb[2] = get2();
if (tWB != 0x100)
{
imgdata.color.WB_Coeffs[tWB][0] = wb[0];
imgdata.color.WB_Coeffs[tWB][2] = wb[2];
}
if (CT)
{
imgdata.color.WBCT_Coeffs[nWB - 1][0] = CT;
imgdata.color.WBCT_Coeffs[nWB - 1][1] = wb[0];
imgdata.color.WBCT_Coeffs[nWB - 1][3] = wb[2];
}
if (len == 4)
{
wb[1] = get2();
wb[3] = get2();
if (tWB != 0x100)
{
imgdata.color.WB_Coeffs[tWB][1] = wb[1];
imgdata.color.WB_Coeffs[tWB][3] = wb[3];
}
if (CT)
{
imgdata.color.WBCT_Coeffs[nWB - 1][2] = wb[1];
imgdata.color.WBCT_Coeffs[nWB - 1][4] = wb[3];
}
}
}
if ((tag >= 0x20400112) && (tag <= 0x2040011e))
{
nWB = tag - 0x20400112;
int wbG = get2();
tWB = Oly_wb_list2[nWB << 1];
if (nWB)
imgdata.color.WBCT_Coeffs[nWB - 1][2] = imgdata.color.WBCT_Coeffs[nWB - 1][4] = wbG;
if (tWB != 0x100)
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = wbG;
}
if (tag == 0x20400121)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][2] = get2();
if (len == 4)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = get2();
}
}
if (tag == 0x2040011f)
{
int wbG = get2();
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0])
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = wbG;
FORC4 if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][0])
imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][3] =
wbG;
}
if ((tag == 0x30000110) && strcmp(software, "v757-71"))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][2] = get2();
if (len == 2)
{
for (int i = 0; i < 256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
}
if ((((tag >= 0x30000120) && (tag <= 0x30000124)) || ((tag >= 0x30000130) && (tag <= 0x30000133))) &&
strcmp(software, "v757-71"))
{
int wb_ind;
if (tag <= 0x30000124)
wb_ind = tag - 0x30000120;
else
wb_ind = tag - 0x30000130 + 5;
imgdata.color.WB_Coeffs[Oly_wb_list1[wb_ind]][0] = get2();
imgdata.color.WB_Coeffs[Oly_wb_list1[wb_ind]][2] = get2();
}
if ((tag == 0x20400805) && (len == 2))
{
imgdata.makernotes.olympus.OlympusSensorCalibration[0] = getreal(type);
imgdata.makernotes.olympus.OlympusSensorCalibration[1] = getreal(type);
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.olympus.OlympusSensorCalibration[0];
}
if (tag == 0x20200306)
{
uchar uc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.olympus.AFFineTune = uc;
}
if (tag == 0x20200307)
{
FORC3 imgdata.makernotes.olympus.AFFineTuneAdj[c] = get2();
}
if (tag == 0x20200401)
{
imgdata.other.FlashEC = getreal(type);
}
}
fseek(ifp, _pos2, SEEK_SET);
#endif
if (tag == 0x11 && is_raw && !strncmp(make, "NIKON", 5))
{
fseek(ifp, get4() + base, SEEK_SET);
parse_tiff_ifd(base);
}
if (tag == 0x14 && type == 7)
{
if (len == 2560)
{
fseek(ifp, 1248, SEEK_CUR);
goto get2_256;
}
fread(buf, 1, 10, ifp);
if (!strncmp(buf, "NRW ", 4))
{
fseek(ifp, strcmp(buf + 4, "0100") ? 46 : 1546, SEEK_CUR);
cam_mul[0] = get4() << 2;
cam_mul[1] = get4() + get4();
cam_mul[2] = get4() << 2;
}
}
if (tag == 0x15 && type == 2 && is_raw)
fread(model, 64, 1, ifp);
if (strstr(make, "PENTAX"))
{
if (tag == 0x1b)
tag = 0x1018;
if (tag == 0x1c)
tag = 0x1017;
}
if (tag == 0x1d)
{
while ((c = fgetc(ifp)) && c != EOF)
#ifdef LIBRAW_LIBRARY_BUILD
{
if ((!custom_serial) && (!isdigit(c)))
{
if ((strbuflen(model) == 3) && (!strcmp(model, "D50")))
{
custom_serial = 34;
}
else
{
custom_serial = 96;
}
}
#endif
serial = serial * 10 + (isdigit(c) ? c - '0' : c % 10);
#ifdef LIBRAW_LIBRARY_BUILD
}
if (!imgdata.shootinginfo.BodySerial[0])
sprintf(imgdata.shootinginfo.BodySerial, "%d", serial);
#endif
}
if (tag == 0x29 && type == 1)
{ // Canon PowerShot G9
c = wbi < 18 ? "012347800000005896"[wbi] - '0' : 0;
fseek(ifp, 8 + c * 32, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4();
}
#ifndef LIBRAW_LIBRARY_BUILD
if (tag == 0x3d && type == 3 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2() >> (14 - tiff_bps);
#endif
if (tag == 0x81 && type == 4)
{
data_offset = get4();
fseek(ifp, data_offset + 41, SEEK_SET);
raw_height = get2() * 2;
raw_width = get2();
filters = 0x61616161;
}
if ((tag == 0x81 && type == 7) || (tag == 0x100 && type == 7) || (tag == 0x280 && type == 1))
{
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (tag == 0x88 && type == 4 && (thumb_offset = get4()))
thumb_offset += base;
if (tag == 0x89 && type == 4)
thumb_length = get4();
if (tag == 0x8c || tag == 0x96)
meta_offset = ftell(ifp);
if (tag == 0x97)
{
for (i = 0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp) - '0';
switch (ver97)
{
case 100:
fseek(ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek(ifp, 6, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
break;
case 103:
fseek(ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (ver97 >= 200)
{
if (ver97 != 205)
fseek(ifp, 280, SEEK_CUR);
fread(buf97, 324, 1, ifp);
}
}
if ((tag == 0xa1) && (type == 7) && strncasecmp(make, "Samsung", 7))
{
order = 0x4949;
fseek(ifp, 140, SEEK_CUR);
FORC3 cam_mul[c] = get4();
}
if (tag == 0xa4 && type == 3)
{
fseek(ifp, wbi * 48, SEEK_CUR);
FORC3 cam_mul[c] = get2();
}
if (tag == 0xa7)
{ // shutter count
NikonKey = fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp);
if ((unsigned)(ver97 - 200) < 17)
{
ci = xlat[0][serial & 0xff];
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < 324; i++)
buf97[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;55"[ver97 - 200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] = sget2(buf97 + (i & -2) + c * 2);
}
#ifdef LIBRAW_LIBRARY_BUILD
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
if (custom_serial)
{
ci = xlat[0][custom_serial];
}
else
{
ci = xlat[0][serial & 0xff];
}
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
free(table_buf);
}
if (ver97 == 601) // Coolpix A
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
#endif
}
if (tag == 0xb001 && type == 3) // Sony ModelID
{
unique_id = get2();
}
if (tag == 0x200 && len == 3)
shot_order = (get4(), get4());
if (tag == 0x200 && len == 4) // Pentax black level
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x201 && len == 4) // Pentax As Shot WB
FORC4 cam_mul[c ^ (c >> 1)] = get2();
if (tag == 0x220 && type == 7)
meta_offset = ftell(ifp);
if (tag == 0x401 && type == 4 && len == 4)
FORC4 cblack[c ^ c >> 1] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
// not corrected for file bitcount, to be patched in open_datastream
if (tag == 0x03d && strstr(make, "NIKON") && len == 4)
{
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black += i;
}
#endif
if (tag == 0xe01)
{ /* Nikon Capture Note */
#ifdef LIBRAW_LIBRARY_BUILD
int loopc = 0;
#endif
order = 0x4949;
fseek(ifp, 22, SEEK_CUR);
for (offset = 22; offset + 22 < len; offset += 22 + i)
{
#ifdef LIBRAW_LIBRARY_BUILD
if (loopc++ > 1024)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
tag = get4();
fseek(ifp, 14, SEEK_CUR);
i = get4() - 4;
if (tag == 0x76a43207)
flip = get2();
else
fseek(ifp, i, SEEK_CUR);
}
}
if (tag == 0xe80 && len == 256 && type == 7)
{
fseek(ifp, 48, SEEK_CUR);
cam_mul[0] = get2() * 508 * 1.078 / 0x10000;
cam_mul[2] = get2() * 382 * 1.173 / 0x10000;
}
if (tag == 0xf00 && type == 7)
{
if (len == 614)
fseek(ifp, 176, SEEK_CUR);
else if (len == 734 || len == 1502)
fseek(ifp, 148, SEEK_CUR);
else
goto next;
goto get2_256;
}
if (((tag == 0x1011 && len == 9) || tag == 0x20400200) && strcmp(software, "v757-71"))
for (i = 0; i < 3; i++)
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!imgdata.makernotes.olympus.ColorSpace)
{
FORC3 cmatrix[i][c] = ((short)get2()) / 256.0;
}
else
{
FORC3 imgdata.color.ccm[i][c] = ((short)get2()) / 256.0;
}
#else
FORC3 cmatrix[i][c] = ((short)get2()) / 256.0;
#endif
}
if ((tag == 0x1012 || tag == 0x20400600) && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x1017 || tag == 0x20400100)
cam_mul[0] = get2() / 256.0;
if (tag == 0x1018 || tag == 0x20400100)
cam_mul[2] = get2() / 256.0;
if (tag == 0x2011 && len == 2)
{
get2_256:
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
}
if ((tag | 0x70) == 0x2070 && (type == 4 || type == 13))
fseek(ifp, get4() + base, SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
if (tag == 0x2010)
{
INT64 _pos3 = ftell(ifp);
parse_makernote(base, 0x2010);
fseek(ifp, _pos3, SEEK_SET);
}
if (((tag == 0x2020) || (tag == 0x3000) || (tag == 0x2030) || (tag == 0x2031) || (tag == 0x2050)) &&
((type == 7) || (type == 13)) && !strncasecmp(make, "Olympus", 7))
{
INT64 _pos3 = ftell(ifp);
parse_makernote(base, tag);
fseek(ifp, _pos3, SEEK_SET);
}
// IB end
#endif
if ((tag == 0x2020) && ((type == 7) || (type == 13)) && !strncmp(buf, "OLYMP", 5))
parse_thumb_note(base, 257, 258);
if (tag == 0x2040)
parse_makernote(base, 0x2040);
if (tag == 0xb028)
{
fseek(ifp, get4() + base, SEEK_SET);
parse_thumb_note(base, 136, 137);
}
if (tag == 0x4001 && len > 500 && len < 100000)
{
i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126;
fseek(ifp, i, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
for (i += 18; i <= len; i += 10)
{
get2();
FORC4 sraw_mul[c ^ (c >> 1)] = get2();
if (sraw_mul[1] == 1170)
break;
}
}
if (!strncasecmp(make, "Samsung", 7))
{
if (tag == 0xa020) // get the full Samsung encryption key
for (i = 0; i < 11; i++)
SamsungKey[i] = get4();
if (tag == 0xa021) // get and decode Samsung cam_mul array
FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c];
#ifdef LIBRAW_LIBRARY_BUILD
if (tag == 0xa022)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get4() - SamsungKey[c + 4];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][1] >> 1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][3] >> 4;
}
}
if (tag == 0xa023)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][0] = get4() - SamsungKey[8];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] = get4() - SamsungKey[9];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] = get4() - SamsungKey[10];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][2] = get4() - SamsungKey[0];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] >> 1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] >> 4;
}
}
if (tag == 0xa024)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][c ^ (c >> 1)] = get4() - SamsungKey[c + 1];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] >> 1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][3] >> 4;
}
}
/*
if (tag == 0xa025) {
i = get4();
imgdata.color.linear_max[0] = imgdata.color.linear_max[1] = imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = i - SamsungKey[0]; printf ("Samsung 0xa025 %d\n", i); }
*/
if (tag == 0xa030 && len == 9)
for (i = 0; i < 3; i++)
FORC3 imgdata.color.ccm[i][c] = (float)((short)((get4() + SamsungKey[i * 3 + c]))) / 256.0;
#endif
if (tag == 0xa031 && len == 9) // get and decode Samsung color matrix
for (i = 0; i < 3; i++)
FORC3 cmatrix[i][c] = (float)((short)((get4() + SamsungKey[i * 3 + c]))) / 256.0;
if (tag == 0xa028)
FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c];
}
else
{
// Somebody else use 0xa021 and 0xa028?
if (tag == 0xa021)
FORC4 cam_mul[c ^ (c >> 1)] = get4();
if (tag == 0xa028)
FORC4 cam_mul[c ^ (c >> 1)] -= get4();
}
#ifdef LIBRAW_LIBRARY_BUILD
if (tag == 0x4021 && (imgdata.makernotes.canon.multishot[0] = get4()) &&
(imgdata.makernotes.canon.multishot[1] = get4()))
{
if (len >= 4)
{
imgdata.makernotes.canon.multishot[2] = get4();
imgdata.makernotes.canon.multishot[3] = get4();
}
FORC4 cam_mul[c] = 1024;
}
#else
if (tag == 0x4021 && get4() && get4())
FORC4 cam_mul[c] = 1024;
#endif
next:
fseek(ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
| 1 |
[
"CWE-787"
] |
LibRaw
|
fbf60377c006eaea8d3eca3f5e4c654909dcdfd2
| 95,405,058,195,596,630,000,000,000,000,000,000,000 | 1,548 |
possible buffer overrun in Fuji makernotes parser
|
DSA_PrivateKey::DSA_PrivateKey(const AlgorithmIdentifier& alg_id,
const secure_vector<uint8_t>& key_bits) :
DL_Scheme_PrivateKey(alg_id, key_bits, DL_Group::ANSI_X9_57)
{
m_y = m_group.power_g_p(m_x);
}
| 0 |
[
"CWE-200"
] |
botan
|
48fc8df51d99f9d8ba251219367b3d629cc848e3
| 84,809,281,758,245,960,000,000,000,000,000,000,000 | 6 |
Address DSA/ECDSA side channel
|
WandExport double DrawGetStrokeWidth(const DrawingWand *wand)
{
assert(wand != (const DrawingWand *) NULL);
assert(wand->signature == MagickWandSignature);
if (wand->debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand->name);
return(CurrentContext->stroke_width);
}
| 0 |
[
"CWE-476"
] |
ImageMagick
|
6ad5fc3c9b652eec27fc0b1a0817159f8547d5d9
| 35,293,529,384,868,960,000,000,000,000,000,000,000 | 8 |
https://github.com/ImageMagick/ImageMagick/issues/716
|
static void register_stuff(AvahiServer *s) {
assert(s);
server_set_state(s, AVAHI_SERVER_REGISTERING);
s->n_host_rr_pending ++; /** Make sure that the state isn't changed tp AVAHI_SERVER_RUNNING too early */
register_hinfo(s);
register_browse_domain(s);
avahi_interface_monitor_update_rrs(s->monitor, 0);
assert(s->n_host_rr_pending > 0);
s->n_host_rr_pending --;
if (s->n_host_rr_pending == 0)
server_set_state(s, AVAHI_SERVER_RUNNING);
}
| 0 |
[
"CWE-346"
] |
avahi
|
e111def44a7df4624a4aa3f85fe98054bffb6b4f
| 287,719,149,953,555,200,000,000,000,000,000,000,000 | 16 |
Drop legacy unicast queries from address not on local link
When handling legacy unicast queries, ensure that the source IP is
inside a subnet on the local link, otherwise drop the packet.
Fixes #145
Fixes #203
CVE-2017-6519
CVE-2018-100084
|
inline uint32_t Unref() {
if (_refcount == 0) {
return 0;
}
return --_refcount;
}
| 0 |
[
"CWE-191"
] |
node
|
656260b4b65fec3b10f6da3fdc9f11fb941aafb5
| 291,750,375,978,039,880,000,000,000,000,000,000,000 | 6 |
napi: fix memory corruption vulnerability
Fixes: https://hackerone.com/reports/784186
CVE-ID: CVE-2020-8174
PR-URL: https://github.com/nodejs-private/node-private/pull/195
Reviewed-By: Anna Henningsen <[email protected]>
Reviewed-By: Gabriel Schulhof <[email protected]>
Reviewed-By: Michael Dawson <[email protected]>
Reviewed-By: Colin Ihrig <[email protected]>
Reviewed-By: Rich Trott <[email protected]>
|
Status ConvertNodeDefsToGraph(const GraphConstructorOptions& opts,
gtl::ArraySlice<NodeDef> nodes, Graph* g) {
ShapeRefiner refiner(TF_GRAPH_DEF_VERSION, g->op_registry());
// TODO(irving): Copy will go away once NodeInfo exists
std::vector<const NodeDef*> node_defs;
node_defs.reserve(nodes.size());
for (const auto& n : nodes) {
node_defs.push_back(&n);
}
return GraphConstructor::Construct(opts, node_defs, nullptr, nullptr, g,
&refiner, /*return_tensors=*/nullptr,
/*return_nodes=*/nullptr,
/*missing_unused_input_map_keys=*/nullptr);
}
| 0 |
[
"CWE-125",
"CWE-369",
"CWE-908"
] |
tensorflow
|
0cc38aaa4064fd9e79101994ce9872c6d91f816b
| 172,454,641,249,108,630,000,000,000,000,000,000,000 | 14 |
Prevent unitialized memory access in `GraphConstructor::MakeEdge`
The `MakeEdge` implementation assumes that there exists an output at `output_index` of `src` node and an input at `input_index` of `dst` node. However, if this is not the case this results in accessing data out of bounds. Because we are accessing an array that is a private member of a class and only in read only mode, this usually results only in unitialized memory access. However, it is reasonable to think that malicious users could manipulate these indexes to actually read data outside the class, thus resulting in information leakage and further exploits.
PiperOrigin-RevId: 346343288
Change-Id: I2127da27c2023d27f26efd39afa6c853385cab6f
|
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
int ret;
if (!gif_set(svm) ||
(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
return 0;
ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
if (is_nested(svm))
return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
return ret;
}
| 0 |
[
"CWE-400"
] |
linux-2.6
|
9581d442b9058d3699b4be568b6e5eae38a41493
| 238,100,652,964,098,100,000,000,000,000,000,000,000 | 17 |
KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
inline void AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("AveragePool");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
// TODO(benoitjacob) make this a proper reference impl without Eigen!
const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
// TODO(benoitjacob) get rid of the dynamic memory allocation here!
Eigen::VectorXf out_count(out_mat.cols());
out_count.setZero();
// Prefill the output to 0.
out_mat.setZero();
for (int b = 0; b < batches; ++b) {
for (int h = 0; h < input_height; ++h) {
for (int w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
int hpad = h + params.padding_values.height;
int wpad = w + params.padding_values.width;
int h_start = (hpad < params.filter_height)
? 0
: (hpad - params.filter_height) / stride_height + 1;
int h_end = std::min(hpad / stride_height + 1, output_height);
int w_start = (wpad < params.filter_width)
? 0
: (wpad - params.filter_width) / stride_width + 1;
int w_end = std::min(wpad / stride_width + 1, output_width);
// compute elementwise sum
for (int ph = h_start; ph < h_end; ++ph) {
for (int pw = w_start; pw < w_end; ++pw) {
int out_offset = NodeOffset(b, ph, pw, output_height, output_width);
out_mat.col(out_offset) +=
in_mat.col(NodeOffset(b, h, w, input_height, input_width));
out_count(out_offset)++;
}
}
}
}
}
// Divide the output by the actual number of elements being averaged over
TFLITE_DCHECK_GT(out_count.minCoeff(), 0);
out_mat.array().rowwise() /= out_count.transpose().array();
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(output_data[i],
params.float_activation_min,
params.float_activation_max);
}
}
| 1 |
[
"CWE-703",
"CWE-835"
] |
tensorflow
|
dfa22b348b70bb89d6d6ec0ff53973bacb4f4695
| 325,526,093,205,511,140,000,000,000,000,000,000,000 | 61 |
Prevent a division by 0 in average ops.
PiperOrigin-RevId: 385184660
Change-Id: I7affd4554f9b336fca29ac68f633232c094d0bd3
|
void handleInvalidSpawnResponseType(const string &line, NegotiationDetails &details) {
throwAppSpawnException("An error occurred while starting "
"the web application. It sent an unknown response type \"" +
cEscapeString(line) + "\".",
SpawnException::APP_STARTUP_PROTOCOL_ERROR,
details);
}
| 0 |
[] |
passenger
|
8c6693e0818772c345c979840d28312c2edd4ba4
| 182,175,903,514,643,070,000,000,000,000,000,000,000 | 7 |
Security check socket filenames reported by spawned application processes.
|
unsigned fuse_file_poll(struct file *file, poll_table *wait)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
struct fuse_poll_out outarg;
FUSE_ARGS(args);
int err;
if (fc->no_poll)
return DEFAULT_POLLMASK;
poll_wait(file, &ff->poll_wait, wait);
inarg.events = (__u32)poll_requested_events(wait);
/*
* Ask for notification iff there's someone waiting for it.
* The client may ignore the flag and always notify.
*/
if (waitqueue_active(&ff->poll_wait)) {
inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
fuse_register_polled_file(fc, ff);
}
args.in.h.opcode = FUSE_POLL;
args.in.h.nodeid = ff->nodeid;
args.in.numargs = 1;
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.out.numargs = 1;
args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
return outarg.revents;
if (err == -ENOSYS) {
fc->no_poll = 1;
return DEFAULT_POLLMASK;
}
return POLLERR;
}
| 0 |
[
"CWE-399",
"CWE-835"
] |
linux
|
3ca8138f014a913f98e6ef40e939868e1e9ea876
| 259,853,358,959,467,370,000,000,000,000,000,000,000 | 42 |
fuse: break infinite loop in fuse_fill_write_pages()
I got a report about unkillable task eating CPU. Further
investigation shows, that the problem is in the fuse_fill_write_pages()
function. If iov's first segment has zero length, we get an infinite
loop, because we never reach iov_iter_advance() call.
Fix this by calling iov_iter_advance() before repeating an attempt to
copy data from userspace.
A similar problem is described in 124d3b7041f ("fix writev regression:
pan hanging unkillable and un-straceable"). If zero-length segmend
is followed by segment with invalid address,
iov_iter_fault_in_readable() checks only first segment (zero-length),
iov_iter_copy_from_user_atomic() skips it, fails at second and
returns zero -> goto again without skipping zero-length segment.
Patch calls iov_iter_advance() before goto again: we'll skip zero-length
segment at second iteraction and iov_iter_fault_in_readable() will detect
invalid address.
Special thanks to Konstantin Khlebnikov, who helped a lot with the commit
description.
Cc: Andrew Morton <[email protected]>
Cc: Maxim Patlasov <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Signed-off-by: Roman Gushchin <[email protected]>
Signed-off-by: Miklos Szeredi <[email protected]>
Fixes: ea9b9907b82a ("fuse: implement perform_write")
Cc: <[email protected]>
|
static void __user *get_addr_ref_64(struct insn *insn, struct pt_regs *regs)
{
return (void __user *)-1L;
}
| 0 |
[
"CWE-416",
"CWE-362"
] |
linux
|
de9f869616dd95e95c00bdd6b0fcd3421e8a4323
| 273,036,129,213,631,760,000,000,000,000,000,000,000 | 4 |
x86/insn-eval: Fix use-after-free access to LDT entry
get_desc() computes a pointer into the LDT while holding a lock that
protects the LDT from being freed, but then drops the lock and returns the
(now potentially dangling) pointer to its caller.
Fix it by giving the caller a copy of the LDT entry instead.
Fixes: 670f928ba09b ("x86/insn-eval: Add utility function to get segment descriptor")
Cc: [email protected]
Signed-off-by: Jann Horn <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
{
u8 phy_port;
u32 i = 0;
if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
return 0;
for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
hr_dev->dev_addr[port][i] = addr[i];
phy_port = hr_dev->iboe.phy_port[port];
return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
| 0 |
[
"CWE-665"
] |
kernel
|
72be029e947510dd6cbbbaf51879622af26e4200
| 98,057,797,779,978,440,000,000,000,000,000,000,000 | 14 |
RDMA/hns: Fix init resp when alloc ucontext (bsc#1104427
FATE#326416).
suse-commit: 8e5436bc2806cbe952f043cc995804c188ce047a
|
int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned open_flags,
umode_t mode)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct nfs_open_context *ctx;
struct dentry *res;
struct iattr attr = { .ia_valid = ATTR_OPEN };
struct inode *inode;
unsigned int lookup_flags = 0;
unsigned long dir_verifier;
bool switched = false;
int created = 0;
int err;
/* Expect a negative dentry */
BUG_ON(d_inode(dentry));
dfprintk(VFS, "NFS: atomic_open(%s/%lu), %pd\n",
dir->i_sb->s_id, dir->i_ino, dentry);
err = nfs_check_flags(open_flags);
if (err)
return err;
/* NFS only supports OPEN on regular files */
if ((open_flags & O_DIRECTORY)) {
if (!d_in_lookup(dentry)) {
/*
* Hashed negative dentry with O_DIRECTORY: dentry was
* revalidated and is fine, no need to perform lookup
* again
*/
return -ENOENT;
}
lookup_flags = LOOKUP_OPEN|LOOKUP_DIRECTORY;
goto no_open;
}
if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
return -ENAMETOOLONG;
if (open_flags & O_CREAT) {
struct nfs_server *server = NFS_SERVER(dir);
if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
mode &= ~current_umask();
attr.ia_valid |= ATTR_MODE;
attr.ia_mode = mode;
}
if (open_flags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
}
if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
d_drop(dentry);
switched = true;
dentry = d_alloc_parallel(dentry->d_parent,
&dentry->d_name, &wq);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (unlikely(!d_in_lookup(dentry)))
return finish_no_open(file, dentry);
}
ctx = create_nfs_open_context(dentry, open_flags, file);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
goto out;
trace_nfs_atomic_open_enter(dir, ctx, open_flags);
inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, &created);
if (created)
file->f_mode |= FMODE_CREATED;
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
d_drop(dentry);
switch (err) {
case -ENOENT:
d_splice_alias(NULL, dentry);
if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
dir_verifier = inode_peek_iversion_raw(dir);
else
dir_verifier = nfs_save_change_attribute(dir);
nfs_set_verifier(dentry, dir_verifier);
break;
case -EISDIR:
case -ENOTDIR:
goto no_open;
case -ELOOP:
if (!(open_flags & O_NOFOLLOW))
goto no_open;
break;
/* case -EINVAL: */
default:
break;
}
goto out;
}
err = nfs_finish_open(ctx, ctx->dentry, file, open_flags);
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
out:
if (unlikely(switched)) {
d_lookup_done(dentry);
dput(dentry);
}
return err;
no_open:
res = nfs_lookup(dir, dentry, lookup_flags);
if (!res) {
inode = d_inode(dentry);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
!S_ISDIR(inode->i_mode))
res = ERR_PTR(-ENOTDIR);
} else if (!IS_ERR(res)) {
inode = d_inode(res);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
!S_ISDIR(inode->i_mode)) {
dput(res);
res = ERR_PTR(-ENOTDIR);
}
}
if (switched) {
d_lookup_done(dentry);
if (!res)
res = dentry;
else
dput(dentry);
}
if (IS_ERR(res))
return PTR_ERR(res);
return finish_no_open(file, res);
}
| 0 |
[
"CWE-909"
] |
linux
|
ac795161c93699d600db16c1a8cc23a65a1eceaf
| 242,922,560,145,518,200,000,000,000,000,000,000,000 | 140 |
NFSv4: Handle case where the lookup of a directory fails
If the application sets the O_DIRECTORY flag, and tries to open a
regular file, nfs_atomic_open() will punt to doing a regular lookup.
If the server then returns a regular file, we will happily return a
file descriptor with uninitialised open state.
The fix is to return the expected ENOTDIR error in these cases.
Reported-by: Lyu Tao <[email protected]>
Fixes: 0dd2b474d0b6 ("nfs: implement i_op->atomic_open()")
Signed-off-by: Trond Myklebust <[email protected]>
Signed-off-by: Anna Schumaker <[email protected]>
|
do_wait_for_common(struct completion *x, long timeout, int state)
{
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
if ((state == TASK_INTERRUPTIBLE &&
signal_pending(current)) ||
(state == TASK_KILLABLE &&
fatal_signal_pending(current))) {
__remove_wait_queue(&x->wait, &wait);
return -ERESTARTSYS;
}
__set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
if (!timeout) {
__remove_wait_queue(&x->wait, &wait);
return timeout;
}
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
return timeout;
}
| 0 |
[] |
linux-2.6
|
8f1bc385cfbab474db6c27b5af1e439614f3025c
| 274,697,841,924,585,050,000,000,000,000,000,000,000 | 29 |
sched: fair: weight calculations
In order to level the hierarchy, we need to calculate load based on the
root view. That is, each task's load is in the same unit.
A
/ \
B 1
/ \
2 3
To compute 1's load we do:
weight(1)
--------------
rq_weight(A)
To compute 2's load we do:
weight(2) weight(B)
------------ * -----------
rq_weight(B) rw_weight(A)
This yields load fractions in comparable units.
The consequence is that it changes virtual time. We used to have:
time_{i}
vtime_{i} = ------------
weight_{i}
vtime = \Sum vtime_{i} = time / rq_weight.
But with the new way of load calculation we get that vtime equals time.
Signed-off-by: Peter Zijlstra <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
|
spell_load_cb(char_u *fname, void *cookie)
{
spelload_T *slp = (spelload_T *)cookie;
slang_T *slang;
slang = spell_load_file(fname, slp->sl_lang, NULL, FALSE);
if (slang != NULL)
{
// When a previously loaded file has NOBREAK also use it for the
// ".add" files.
if (slp->sl_nobreak && slang->sl_add)
slang->sl_nobreak = TRUE;
else if (slang->sl_nobreak)
slp->sl_nobreak = TRUE;
slp->sl_slang = slang;
}
}
| 0 |
[
"CWE-416"
] |
vim
|
2813f38e021c6e6581c0c88fcf107e41788bc835
| 309,134,877,870,114,600,000,000,000,000,000,000,000 | 18 |
patch 8.2.5072: using uninitialized value and freed memory in spell command
Problem: Using uninitialized value and freed memory in spell command.
Solution: Initialize "attr". Check for empty line early.
|
bool remoteDecodeComplete() const {
return stream_info_.downstreamTiming() &&
stream_info_.downstreamTiming()->lastDownstreamRxByteReceived().has_value();
}
| 0 |
[
"CWE-416"
] |
envoy
|
fe7c69c248f4fe5a9080c7ccb35275b5218bb5ab
| 61,594,886,654,638,790,000,000,000,000,000,000,000 | 4 |
internal redirect: fix a lifetime bug (#785)
Signed-off-by: Alyssa Wilk <[email protected]>
Signed-off-by: Matt Klein <[email protected]>
Signed-off-by: Pradeep Rao <[email protected]>
|
static int multipath_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
int sz = 0;
unsigned long flags;
struct multipath *m = (struct multipath *) ti->private;
struct priority_group *pg;
struct pgpath *p;
unsigned pg_num;
char state;
spin_lock_irqsave(&m->lock, flags);
/* Features */
if (type == STATUSTYPE_INFO)
DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
else {
DMEMIT("%u ", m->queue_if_no_path +
(m->pg_init_retries > 0) * 2 +
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
if (m->queue_if_no_path)
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
}
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
DMEMIT("0 ");
else
DMEMIT("1 %s ", m->hw_handler_name);
DMEMIT("%u ", m->nr_priority_groups);
if (m->next_pg)
pg_num = m->next_pg->pg_num;
else if (m->current_pg)
pg_num = m->current_pg->pg_num;
else
pg_num = (m->nr_priority_groups ? 1 : 0);
DMEMIT("%u ", pg_num);
switch (type) {
case STATUSTYPE_INFO:
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed)
state = 'D'; /* Disabled */
else if (pg == m->current_pg)
state = 'A'; /* Currently Active */
else
state = 'E'; /* Enabled */
DMEMIT("%c ", state);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps, NULL, type,
result + sz,
maxlen - sz);
else
DMEMIT("0 ");
DMEMIT("%u %u ", pg->nr_pgpaths,
pg->ps.type->info_args);
list_for_each_entry(p, &pg->pgpaths, list) {
DMEMIT("%s %s %u ", p->path.dev->name,
p->is_active ? "A" : "F",
p->fail_count);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,
maxlen - sz);
}
}
break;
case STATUSTYPE_TABLE:
list_for_each_entry(pg, &m->priority_groups, list) {
DMEMIT("%s ", pg->ps.type->name);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps, NULL, type,
result + sz,
maxlen - sz);
else
DMEMIT("0 ");
DMEMIT("%u %u ", pg->nr_pgpaths,
pg->ps.type->table_args);
list_for_each_entry(p, &pg->pgpaths, list) {
DMEMIT("%s ", p->path.dev->name);
if (pg->ps.type->status)
sz += pg->ps.type->status(&pg->ps,
&p->path, type, result + sz,
maxlen - sz);
}
}
break;
}
spin_unlock_irqrestore(&m->lock, flags);
return 0;
}
| 0 |
[
"CWE-284",
"CWE-264"
] |
linux
|
ec8013beddd717d1740cfefb1a9b900deef85462
| 13,161,415,457,445,187,000,000,000,000,000,000,000 | 107 |
dm: do not forward ioctls from logical volumes to the underlying device
A logical volume can map to just part of underlying physical volume.
In this case, it must be treated like a partition.
Based on a patch from Alasdair G Kergon.
Cc: Alasdair G Kergon <[email protected]>
Cc: [email protected]
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
Word wasi_unstable_fd_writeHandler(void* raw_context, Word fd, Word iovs, Word iovs_len,
Word nwritten_ptr) {
auto context = WASM_CONTEXT(raw_context);
Word nwritten(0);
auto result = writevImpl(raw_context, fd, iovs, iovs_len, &nwritten);
if (result.u64_ != 0) { // __WASI_ESUCCESS
return result;
}
if (!context->wasmVm()->setWord(nwritten_ptr.u64_, Word(nwritten))) {
return 21; // __WASI_EFAULT
}
return 0; // __WASI_ESUCCESS
}
| 0 |
[
"CWE-476"
] |
envoy
|
8788a3cf255b647fd14e6b5e2585abaaedb28153
| 131,428,986,782,311,910,000,000,000,000,000,000,000 | 14 |
1.4 - Do not call into the VM unless the VM Context has been created. (#24)
* Ensure that the in VM Context is created before onDone is called.
Signed-off-by: John Plevyak <[email protected]>
* Update as per offline discussion.
Signed-off-by: John Plevyak <[email protected]>
* Set in_vm_context_created_ in onNetworkNewConnection.
Signed-off-by: John Plevyak <[email protected]>
* Add guards to other network calls.
Signed-off-by: John Plevyak <[email protected]>
* Fix common/wasm tests.
Signed-off-by: John Plevyak <[email protected]>
* Patch tests.
Signed-off-by: John Plevyak <[email protected]>
* Remove unecessary file from cherry-pick.
Signed-off-by: John Plevyak <[email protected]>
|
static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
{
struct ipq *qp;
struct ip4_create_arg *arg = a;
qp = container_of(q, struct ipq, q);
return qp->id == arg->iph->id &&
qp->saddr == arg->iph->saddr &&
qp->daddr == arg->iph->daddr &&
qp->protocol == arg->iph->protocol &&
qp->user == arg->user;
}
| 0 |
[] |
linux
|
3ef0eb0db4bf92c6d2510fe5c4dc51852746f206
| 244,704,418,841,748,030,000,000,000,000,000,000,000 | 12 |
net: frag, move LRU list maintenance outside of rwlock
Updating the fragmentation queues LRU (Least-Recently-Used) list,
required taking the hash writer lock. However, the LRU list isn't
tied to the hash at all, so we can use a separate lock for it.
Original-idea-by: Florian Westphal <[email protected]>
Signed-off-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static void pthreads_thread_id(CRYPTO_THREADID *tid)
{
CRYPTO_THREADID_set_numeric(tid, (unsigned long)pthread_self());
}
| 0 |
[] |
spice
|
ca5bbc5692e052159bce1a75f55dc60b36078749
| 185,672,992,520,897,800,000,000,000,000,000,000,000 | 4 |
With OpenSSL 1.1: Disable client-initiated renegotiation.
Fixes issue #49
Fixes BZ#1904459
Signed-off-by: Julien Ropé <[email protected]>
Reported-by: BlackKD
Acked-by: Frediano Ziglio <[email protected]>
|
static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
if (ctxt->exception.vector == PF_VECTOR)
kvm_propagate_fault(vcpu, &ctxt->exception);
else if (ctxt->exception.error_code_valid)
kvm_queue_exception_e(vcpu, ctxt->exception.vector,
ctxt->exception.error_code);
else
kvm_queue_exception(vcpu, ctxt->exception.vector);
}
| 0 |
[] |
kvm
|
0769c5de24621141c953fbe1f943582d37cb4244
| 199,515,248,110,079,860,000,000,000,000,000,000,000 | 11 |
KVM: x86: extend "struct x86_emulate_ops" with "get_cpuid"
In order to be able to proceed checks on CPU-specific properties
within the emulator, function "get_cpuid" is introduced.
With "get_cpuid" it is possible to virtually call the guests
"cpuid"-opcode without changing the VM's context.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
|
pdf_filter_gs_UseBlackPtComp(fz_context *ctx, pdf_processor *proc, pdf_obj *name)
{
pdf_filter_processor *p = (pdf_filter_processor*)proc;
filter_flush(ctx, p, 0);
if (p->chain->op_gs_UseBlackPtComp)
p->chain->op_gs_UseBlackPtComp(ctx, p->chain, name);
}
| 0 |
[
"CWE-125"
] |
mupdf
|
97096297d409ec6f206298444ba00719607e8ba8
| 63,036,079,307,752,290,000,000,000,000,000,000,000 | 7 |
Bug 701292: Fix test for missing/empty string.
|
static void test_bug54041_impl()
{
int rc;
MYSQL_STMT *stmt;
MYSQL_BIND bind;
DBUG_ENTER("test_bug54041");
myheader("test_bug54041");
rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1");
myquery(rc);
rc= mysql_query(mysql, "CREATE TABLE t1 (a INT)");
myquery(rc);
stmt= mysql_simple_prepare(mysql, "SELECT a FROM t1 WHERE a > ?");
check_stmt(stmt);
verify_param_count(stmt, 1);
memset(&bind, 0, sizeof(bind));
/* Any type that does not support long data handling. */
bind.buffer_type= MYSQL_TYPE_LONG;
rc= mysql_stmt_bind_param(stmt, &bind);
check_execute(stmt, rc);
/*
Trick the client API into sending a long data packet for
the parameter. Long data is only supported for string and
binary types.
*/
stmt->params[0].buffer_type= MYSQL_TYPE_STRING;
rc= mysql_stmt_send_long_data(stmt, 0, "data", 5);
check_execute(stmt, rc);
/* Undo API violation. */
stmt->params[0].buffer_type= MYSQL_TYPE_LONG;
rc= mysql_stmt_execute(stmt);
/* Incorrect arguments. */
check_execute_r(stmt, rc);
mysql_stmt_close(stmt);
rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1");
myquery(rc);
DBUG_VOID_RETURN;
}
| 0 |
[
"CWE-284",
"CWE-295"
] |
mysql-server
|
3bd5589e1a5a93f9c224badf983cd65c45215390
| 80,493,282,506,727,730,000,000,000,000,000,000,000 | 51 |
WL#6791 : Redefine client --ssl option to imply enforced encryption
# Changed the meaning of the --ssl=1 option of all client binaries
to mean force ssl, not try ssl and fail over to eunecrypted
# Added a new MYSQL_OPT_SSL_ENFORCE mysql_options()
option to specify that an ssl connection is required.
# Added a new macro SSL_SET_OPTIONS() to the client
SSL handling headers that sets all the relevant SSL options at
once.
# Revamped all of the current native clients to use the new macro
# Removed some Windows line endings.
# Added proper handling of the new option into the ssl helper
headers.
# If SSL is mandatory assume that the media is secure enough
for the sha256 plugin to do unencrypted password exchange even
before establishing a connection.
# Set the default ssl cipher to DHE-RSA-AES256-SHA if none is
specified.
# updated test cases that require a non-default cipher to spawn
a mysql command line tool binary since mysqltest has no support
for specifying ciphers.
# updated the replication slave connection code to always enforce
SSL if any of the SSL config options is present.
# test cases added and updated.
# added a mysql_get_option() API to return mysql_options()
values. Used the new API inside the sha256 plugin.
# Fixed compilation warnings because of unused variables.
# Fixed test failures (mysql_ssl and bug13115401)
# Fixed whitespace issues.
# Fully implemented the mysql_get_option() function.
# Added a test case for mysql_get_option()
# fixed some trailing whitespace issues
# fixed some uint/int warnings in mysql_client_test.c
# removed shared memory option from non-windows get_options
tests
# moved MYSQL_OPT_LOCAL_INFILE to the uint options
|
bool Item_sum_bit::add_as_window(ulonglong value)
{
DBUG_ASSERT(as_window_function);
for (int i= 0; i < NUM_BIT_COUNTERS; i++)
{
bit_counters[i]+= (value & (1ULL << i)) ? 1 : 0;
}
// Prevent overflow;
num_values_added = MY_MAX(num_values_added, num_values_added + 1);
set_bits_from_counters();
return 0;
}
| 0 |
[
"CWE-120"
] |
server
|
eca207c46293bc72dd8d0d5622153fab4d3fccf1
| 78,285,738,019,338,460,000,000,000,000,000,000,000 | 12 |
MDEV-25317 Assertion `scale <= precision' failed in decimal_bin_size And Assertion `scale >= 0 && precision > 0 && scale <= precision' failed in decimal_bin_size_inline/decimal_bin_size.
Precision should be kept below DECIMAL_MAX_SCALE for computations.
It can be bigger in Item_decimal. I'd fix this too but it changes the
existing behaviour so problemmatic to ix.
|
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
return NULL;
}
| 0 |
[] |
linux-2.6
|
6a6029b8cefe0ca7e82f27f3904dbedba3de4e06
| 59,381,100,822,851,090,000,000,000,000,000,000,000 | 4 |
sched: simplify sched_slice()
Use the existing calc_delta_mine() calculation for sched_slice(). This
saves a divide and simplifies the code because we share it with the
other /cfs_rq->load users.
It also improves code size:
text data bss dec hex filename
42659 2740 144 45543 b1e7 sched.o.before
42093 2740 144 44977 afb1 sched.o.after
Signed-off-by: Ingo Molnar <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
|
status_begin(stream * s, pcl_state_t * pcs)
{
byte *buffer = pcs->status.buffer;
if (pcs->status.read_pos > 0) {
memmove(buffer, buffer + pcs->status.read_pos,
pcs->status.write_pos - pcs->status.read_pos);
pcs->status.write_pos -= pcs->status.read_pos;
pcs->status.read_pos = 0;
}
if (buffer == 0) {
buffer = gs_alloc_bytes(pcs->memory, STATUS_BUFFER_SIZE,
"status buffer");
pcs->status.buffer = buffer;
}
if (buffer == 0)
swrite_string(s, pcs->status.internal_buffer,
sizeof(pcs->status.internal_buffer));
else
swrite_string(s, buffer, gs_object_size(pcs->memory, buffer));
sseek(s, pcs->status.write_pos);
stputs(s, "PCL\r\n");
}
| 0 |
[
"CWE-787"
] |
ghostpdl
|
e1134d375e2ca176068e19a2aa9b040baffe1c22
| 114,900,279,199,117,600,000,000,000,000,000,000,000 | 23 |
Bug 705156(1): Avoid double free of PCL error buffer
|
void defer_recovery(float defer_for) {
defer_recovery_until = ceph_clock_now();
defer_recovery_until += defer_for;
}
| 0 |
[
"CWE-287",
"CWE-284"
] |
ceph
|
5ead97120e07054d80623dada90a5cc764c28468
| 267,223,499,858,982,030,000,000,000,000,000,000,000 | 4 |
auth/cephx: add authorizer challenge
Allow the accepting side of a connection to reject an initial authorizer
with a random challenge. The connecting side then has to respond with an
updated authorizer proving they are able to decrypt the service's challenge
and that the new authorizer was produced for this specific connection
instance.
The accepting side requires this challenge and response unconditionally
if the client side advertises they have the feature bit. Servers wishing
to require this improved level of authentication simply have to require
the appropriate feature.
Signed-off-by: Sage Weil <[email protected]>
(cherry picked from commit f80b848d3f830eb6dba50123e04385173fa4540b)
# Conflicts:
# src/auth/Auth.h
# src/auth/cephx/CephxProtocol.cc
# src/auth/cephx/CephxProtocol.h
# src/auth/none/AuthNoneProtocol.h
# src/msg/Dispatcher.h
# src/msg/async/AsyncConnection.cc
- const_iterator
- ::decode vs decode
- AsyncConnection ctor arg noise
- get_random_bytes(), not cct->random()
|
class HuffmanStatistics *Scan::DCHuffmanStatisticsOf(UBYTE idx) const
{
class HuffmanTemplate *t;
ScanType sc = m_pFrame->ScanTypeOf();
assert(idx < 4);
t = m_pHuffman->DCTemplateOf(m_ucDCTable[idx],sc,m_pFrame->PrecisionOf(),
m_pFrame->HiddenPrecisionOf(),m_ucScanIndex);
if (t == NULL)
JPG_THROW(OBJECT_DOESNT_EXIST,"Scan::DCHuffmanStatisticsOf","requested DC Huffman coding table not defined");
return t->StatisticsOf(true);
}
| 0 |
[
"CWE-476"
] |
libjpeg
|
ea6315164b1649ff932a396b7600eac4bffcfaba
| 199,192,464,931,643,070,000,000,000,000,000,000,000 | 14 |
Added a check whether all components in a scan are actually present.
|
static inline int cmp_type_and_name(const struct ATTRIB *a1,
const struct ATTRIB *a2)
{
return a1->type != a2->type || a1->name_len != a2->name_len ||
(a1->name_len && memcmp(attr_name(a1), attr_name(a2),
a1->name_len * sizeof(short)));
}
| 0 |
[
"CWE-416"
] |
linux
|
f26967b9f7a830e228bb13fb41bd516ddd9d789d
| 292,539,975,344,773,330,000,000,000,000,000,000,000 | 7 |
fs/ntfs3: Fix invalid free in log_replay
log_read_rst() returns ENOMEM error when there is not enough memory.
In this case, if info is returned without initialization,
it attempts to kfree the uninitialized info->r_page pointer. This patch
moves the memset initialization code to before log_read_rst() is called.
Reported-by: Gerald Lee <[email protected]>
Signed-off-by: Namjae Jeon <[email protected]>
Signed-off-by: Konstantin Komarov <[email protected]>
|
void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = START_NID(nid);
struct f2fs_nat_block *nat_blk;
struct page *page = NULL;
struct f2fs_nat_entry ne;
struct nat_entry *e;
int i;
ni->nid = nid;
/* Check nat cache */
down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (e) {
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
up_read(&nm_i->nat_tree_lock);
return;
}
memset(&ne, 0, sizeof(struct f2fs_nat_entry));
/* Check current segment summary */
down_read(&curseg->journal_rwsem);
i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
if (i >= 0) {
ne = nat_in_journal(journal, i);
node_info_from_raw_nat(ni, &ne);
}
up_read(&curseg->journal_rwsem);
if (i >= 0)
goto cache;
/* Fill node_info from nat page */
page = get_current_nat_page(sbi, start_nid);
nat_blk = (struct f2fs_nat_block *)page_address(page);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
f2fs_put_page(page, 1);
cache:
up_read(&nm_i->nat_tree_lock);
/* cache nat entry */
down_write(&nm_i->nat_tree_lock);
cache_nat_entry(sbi, nid, &ne);
up_write(&nm_i->nat_tree_lock);
}
| 0 |
[
"CWE-200",
"CWE-362"
] |
linux
|
30a61ddf8117c26ac5b295e1233eaa9629a94ca3
| 29,540,755,634,296,312,000,000,000,000,000,000,000 | 51 |
f2fs: fix race condition in between free nid allocator/initializer
In below concurrent case, allocated nid can be loaded into free nid cache
and be allocated again.
Thread A Thread B
- f2fs_create
- f2fs_new_inode
- alloc_nid
- __insert_nid_to_list(ALLOC_NID_LIST)
- f2fs_balance_fs_bg
- build_free_nids
- __build_free_nids
- scan_nat_page
- add_free_nid
- __lookup_nat_cache
- f2fs_add_link
- init_inode_metadata
- new_inode_page
- new_node_page
- set_node_addr
- alloc_nid_done
- __remove_nid_from_list(ALLOC_NID_LIST)
- __insert_nid_to_list(FREE_NID_LIST)
This patch makes nat cache lookup and free nid list operation being atomical
to avoid this race condition.
Signed-off-by: Jaegeuk Kim <[email protected]>
Signed-off-by: Chao Yu <[email protected]>
Signed-off-by: Jaegeuk Kim <[email protected]>
|
TfLiteStatus NonMaxSuppressionMultiClassRegularHelper(TfLiteContext* context,
TfLiteNode* node,
OpData* op_data,
const float* scores) {
const TfLiteTensor* input_box_encodings;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorBoxEncodings,
&input_box_encodings));
const TfLiteTensor* input_class_predictions;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensorClassPredictions,
&input_class_predictions));
const TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
TfLiteTensor* detection_boxes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionBoxes,
&detection_boxes));
TfLiteTensor* detection_classes;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionClasses,
&detection_classes));
TfLiteTensor* detection_scores;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorDetectionScores,
&detection_scores));
TfLiteTensor* num_detections;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensorNumDetections,
&num_detections));
const int num_boxes = input_box_encodings->dims->data[1];
const int num_classes = op_data->num_classes;
const int num_detections_per_class = op_data->detections_per_class;
const int max_detections = op_data->max_detections;
const int num_classes_with_background =
input_class_predictions->dims->data[2];
// The row index offset is 1 if background class is included and 0 otherwise.
int label_offset = num_classes_with_background - num_classes;
TF_LITE_ENSURE(context, num_detections_per_class > 0);
// For each class, perform non-max suppression.
std::vector<float> class_scores(num_boxes);
std::vector<int> box_indices_after_regular_non_max_suppression(
num_boxes + max_detections);
std::vector<float> scores_after_regular_non_max_suppression(num_boxes +
max_detections);
int size_of_sorted_indices = 0;
std::vector<int> sorted_indices;
sorted_indices.resize(num_boxes + max_detections);
std::vector<float> sorted_values;
sorted_values.resize(max_detections);
for (int col = 0; col < num_classes; col++) {
for (int row = 0; row < num_boxes; row++) {
// Get scores of boxes corresponding to all anchors for single class
class_scores[row] =
*(scores + row * num_classes_with_background + col + label_offset);
}
// Perform non-maximal suppression on single class
std::vector<int> selected;
TF_LITE_ENSURE_STATUS(NonMaxSuppressionSingleClassHelper(
context, node, op_data, class_scores, &selected,
num_detections_per_class));
// Add selected indices from non-max suppression of boxes in this class
int output_index = size_of_sorted_indices;
for (const auto& selected_index : selected) {
box_indices_after_regular_non_max_suppression[output_index] =
(selected_index * num_classes_with_background + col + label_offset);
scores_after_regular_non_max_suppression[output_index] =
class_scores[selected_index];
output_index++;
}
// Sort the max scores among the selected indices
// Get the indices for top scores
int num_indices_to_sort = std::min(output_index, max_detections);
DecreasingPartialArgSort(scores_after_regular_non_max_suppression.data(),
output_index, num_indices_to_sort,
sorted_indices.data());
// Copy values to temporary vectors
for (int row = 0; row < num_indices_to_sort; row++) {
int temp = sorted_indices[row];
sorted_indices[row] = box_indices_after_regular_non_max_suppression[temp];
sorted_values[row] = scores_after_regular_non_max_suppression[temp];
}
// Copy scores and indices from temporary vectors
for (int row = 0; row < num_indices_to_sort; row++) {
box_indices_after_regular_non_max_suppression[row] = sorted_indices[row];
scores_after_regular_non_max_suppression[row] = sorted_values[row];
}
size_of_sorted_indices = num_indices_to_sort;
}
// Allocate output tensors
for (int output_box_index = 0; output_box_index < max_detections;
output_box_index++) {
if (output_box_index < size_of_sorted_indices) {
const int anchor_index = floor(
box_indices_after_regular_non_max_suppression[output_box_index] /
num_classes_with_background);
const int class_index =
box_indices_after_regular_non_max_suppression[output_box_index] -
anchor_index * num_classes_with_background - label_offset;
const float selected_score =
scores_after_regular_non_max_suppression[output_box_index];
// detection_boxes
ReInterpretTensor<BoxCornerEncoding*>(detection_boxes)[output_box_index] =
ReInterpretTensor<const BoxCornerEncoding*>(
decoded_boxes)[anchor_index];
// detection_classes
GetTensorData<float>(detection_classes)[output_box_index] = class_index;
// detection_scores
GetTensorData<float>(detection_scores)[output_box_index] = selected_score;
} else {
ReInterpretTensor<BoxCornerEncoding*>(
detection_boxes)[output_box_index] = {0.0f, 0.0f, 0.0f, 0.0f};
// detection_classes
GetTensorData<float>(detection_classes)[output_box_index] = 0.0f;
// detection_scores
GetTensorData<float>(detection_scores)[output_box_index] = 0.0f;
}
}
GetTensorData<float>(num_detections)[0] = size_of_sorted_indices;
box_indices_after_regular_non_max_suppression.clear();
scores_after_regular_non_max_suppression.clear();
return kTfLiteOk;
}
| 0 |
[
"CWE-125",
"CWE-787"
] |
tensorflow
|
1970c2158b1ffa416d159d03c3370b9a462aee35
| 22,235,811,954,621,150,000,000,000,000,000,000,000 | 131 |
[tflite]: Insert `nullptr` checks when obtaining tensors.
As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages.
We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`).
PiperOrigin-RevId: 332521299
Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
|
static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
unsigned int idx)
{
hlist_add_head(&timer->entry, base->vectors + idx);
__set_bit(idx, base->pending_map);
timer_set_idx(timer, idx);
trace_timer_start(timer, timer->expires, timer->flags);
}
| 0 |
[
"CWE-200",
"CWE-330"
] |
linux
|
f227e3ec3b5cad859ad15666874405e8c1bbc1d4
| 259,665,884,664,898,820,000,000,000,000,000,000,000 | 9 |
random32: update the net random state on interrupt and activity
This modifies the first 32 bits out of the 128 bits of a random CPU's
net_rand_state on interrupt or CPU activity to complicate remote
observations that could lead to guessing the network RNG's internal
state.
Note that depending on some network devices' interrupt rate moderation
or binding, this re-seeding might happen on every packet or even almost
never.
In addition, with NOHZ some CPUs might not even get timer interrupts,
leaving their local state rarely updated, while they are running
networked processes making use of the random state. For this reason, we
also perform this update in update_process_times() in order to at least
update the state when there is user or system activity, since it's the
only case we care about.
Reported-by: Amit Klein <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: "Jason A. Donenfeld" <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: <[email protected]>
Signed-off-by: Willy Tarreau <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
|
static void complete_nread_binary(conn *c) {
assert(c != NULL);
assert(c->cmd >= 0);
switch(c->substate) {
case bin_reading_set_header:
if (c->cmd == PROTOCOL_BINARY_CMD_APPEND ||
c->cmd == PROTOCOL_BINARY_CMD_PREPEND) {
process_bin_append_prepend(c);
} else {
process_bin_update(c);
}
break;
case bin_read_set_value:
complete_update_bin(c);
break;
case bin_reading_get_key:
process_bin_get(c);
break;
case bin_reading_stat:
process_bin_stat(c);
break;
case bin_reading_del_header:
process_bin_delete(c);
break;
case bin_reading_incr_header:
complete_incr_bin(c);
break;
case bin_read_flush_exptime:
process_bin_flush(c);
break;
case bin_reading_sasl_auth:
process_bin_sasl_auth(c);
break;
case bin_reading_sasl_auth_data:
process_bin_complete_sasl_auth(c);
break;
default:
fprintf(stderr, "Not handling substate %d\n", c->substate);
assert(0);
}
}
| 0 |
[
"CWE-20"
] |
memcached
|
d9cd01ede97f4145af9781d448c62a3318952719
| 46,615,681,758,519,590,000,000,000,000,000,000,000 | 42 |
Use strncmp when checking for large ascii multigets.
|
get_name_end_code_point(OnigCodePoint start)
{
switch (start) {
case '<': return (OnigCodePoint )'>'; break;
case '\'': return (OnigCodePoint )'\''; break;
case '(': return (OnigCodePoint )')'; break;
case '{': return (OnigCodePoint )'}'; break;
default:
break;
}
return (OnigCodePoint )0;
}
| 0 |
[
"CWE-476"
] |
Onigmo
|
00cc7e28a3ed54b3b512ef3b58ea737a57acf1f9
| 286,025,768,661,520,740,000,000,000,000,000,000,000 | 13 |
Fix SEGV in onig_error_code_to_str() (Fix #132)
When onig_new(ONIG_SYNTAX_PERL) fails with ONIGERR_INVALID_GROUP_NAME,
onig_error_code_to_str() crashes.
onig_scan_env_set_error_string() should have been used when returning
ONIGERR_INVALID_GROUP_NAME.
|
void MainWindow::on_actionTutorials_triggered()
{
QDesktopServices::openUrl(QUrl("https://www.shotcut.org/tutorials/"));
}
| 0 |
[
"CWE-89",
"CWE-327",
"CWE-295"
] |
shotcut
|
f008adc039642307f6ee3378d378cdb842e52c1d
| 276,311,226,005,064,000,000,000,000,000,000,000,000 | 4 |
fix upgrade check is not using TLS correctly
|
static JSON_INLINE int bucket_is_empty(hashtable_t *hashtable, bucket_t *bucket)
{
return bucket->first == &hashtable->list && bucket->first == bucket->last;
}
| 0 |
[
"CWE-310"
] |
jansson
|
8f80c2d83808150724d31793e6ade92749b1faa4
| 155,668,151,341,536,310,000,000,000,000,000,000,000 | 4 |
CVE-2013-6401: Change hash function, randomize hashes
Thanks to Florian Weimer and Eric Sesterhenn for reporting, reviewing
and testing.
|
extern "C" void *SUB_REALLOC(void *ptr, size_t size)
{
void *new_ptr = NULL;
#ifdef MBED_MEM_TRACING_ENABLED
mbed_mem_trace_lock();
#endif
#ifdef MBED_HEAP_STATS_ENABLED
// Note - no lock needed since malloc and free are thread safe
// Get old size
uint32_t old_size = 0;
if (ptr != NULL) {
alloc_info_t *alloc_info = ((alloc_info_t *)ptr) - 1;
old_size = alloc_info->size;
}
// Allocate space
if (size != 0) {
new_ptr = malloc(size);
}
// If the new buffer has been allocated copy the data to it
// and free the old buffer
if ((new_ptr != NULL) && (ptr != NULL)) {
uint32_t copy_size = (old_size < size) ? old_size : size;
memcpy(new_ptr, (void *)ptr, copy_size);
free(ptr);
}
#else // #ifdef MBED_HEAP_STATS_ENABLED
new_ptr = SUPER_REALLOC(ptr, size);
#endif // #ifdef MBED_HEAP_STATS_ENABLED
#ifdef MBED_MEM_TRACING_ENABLED
mbed_mem_trace_realloc(new_ptr, ptr, size, MBED_CALLER_ADDR());
mbed_mem_trace_unlock();
#endif // #ifdef MBED_MEM_TRACING_ENABLED
return new_ptr;
}
| 0 |
[
"CWE-190"
] |
mbed-os
|
151ebfcfc9f2383ee11ce3c771c3bf92900d6b43
| 278,997,080,826,780,700,000,000,000,000,000,000,000 | 37 |
Add integer overflow check to the malloc wrappers
Add a check that the combined size of the buffer to allocate and
alloc_info_t does not exceed the maximum integer value representable
by size_t.
|
void *js_touserdata(js_State *J, int idx, const char *tag)
{
js_Value *v = stackidx(J, idx);
if (v->type == JS_TOBJECT && v->u.object->type == JS_CUSERDATA)
if (!strcmp(tag, v->u.object->u.user.tag))
return v->u.object->u.user.data;
js_typeerror(J, "not a %s", tag);
}
| 0 |
[
"CWE-476"
] |
mujs
|
77ab465f1c394bb77f00966cd950650f3f53cb24
| 255,027,788,095,449,420,000,000,000,000,000,000,000 | 8 |
Fix 697401: Error when dropping extra arguments to lightweight functions.
|
CreateFileListInfo(FileListInfoPtr pFileListInfo, char* path, int flag)
{
DIR* pDir = NULL;
struct dirent* pDirent = NULL;
if(path == NULL) {
return FAILURE;
}
if(strlen(path) == 0) {
/* In this case we will send the list of entries in ftp root*/
sprintf(path, "%s%s", GetFtpRoot(), "/");
}
if((pDir = opendir(path)) == NULL) {
rfbLog("File [%s]: Method [%s]: not able to open the dir\n",
__FILE__, __FUNCTION__);
return FAILURE;
}
while((pDirent = readdir(pDir))) {
if(strcmp(pDirent->d_name, ".") && strcmp(pDirent->d_name, "..")) {
struct stat stat_buf;
/*
int fpLen = sizeof(char)*(strlen(pDirent->d_name)+strlen(path)+2);
*/
char fullpath[PATH_MAX];
memset(fullpath, 0, PATH_MAX);
strcpy(fullpath, path);
if(path[strlen(path)-1] != '/')
strcat(fullpath, "/");
strcat(fullpath, pDirent->d_name);
if(stat(fullpath, &stat_buf) < 0) {
rfbLog("File [%s]: Method [%s]: Reading stat for file %s failed\n",
__FILE__, __FUNCTION__, fullpath);
continue;
}
if(S_ISDIR(stat_buf.st_mode)) {
if(AddFileListItemInfo(pFileListInfo, pDirent->d_name, -1, 0) == 0) {
rfbLog("File [%s]: Method [%s]: Add directory %s in the"
" list failed\n", __FILE__, __FUNCTION__, fullpath);
continue;
}
}
else {
if(flag) {
if(AddFileListItemInfo(pFileListInfo, pDirent->d_name,
stat_buf.st_size,
stat_buf.st_mtime) == 0) {
rfbLog("File [%s]: Method [%s]: Add file %s in the "
"list failed\n", __FILE__, __FUNCTION__, fullpath);
continue;
}
}
}
}
}
if(closedir(pDir) < 0) {
rfbLog("File [%s]: Method [%s]: ERROR Couldn't close dir\n",
__FILE__, __FUNCTION__);
}
return SUCCESS;
}
| 0 |
[
"CWE-416"
] |
libvncserver
|
73cb96fec028a576a5a24417b57723b55854ad7b
| 80,603,598,581,700,460,000,000,000,000,000,000,000 | 68 |
tightvnc-filetransfer: wait for download thread end in CloseUndoneFileDownload()
...and use it when deregistering the file transfer extension.
Closes #242
|
static int ext4_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
handle_t *handle;
int retries = 0;
struct page *page;
pgoff_t index;
unsigned from, to;
trace_mark(ext4_write_begin,
"dev %s ino %lu pos %llu len %u flags %u",
inode->i_sb->s_id, inode->i_ino,
(unsigned long long) pos, len, flags);
index = pos >> PAGE_CACHE_SHIFT;
from = pos & (PAGE_CACHE_SIZE - 1);
to = from + len;
retry:
handle = ext4_journal_start(inode, needed_blocks);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
goto out;
}
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page) {
ext4_journal_stop(handle);
ret = -ENOMEM;
goto out;
}
*pagep = page;
ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
ext4_get_block);
if (!ret && ext4_should_journal_data(inode)) {
ret = walk_page_buffers(handle, page_buffers(page),
from, to, NULL, do_journal_get_write_access);
}
if (ret) {
unlock_page(page);
ext4_journal_stop(handle);
page_cache_release(page);
/*
* block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
* i_size_read because we hold i_mutex.
*/
if (pos + len > inode->i_size)
vmtruncate(inode, inode->i_size);
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
out:
return ret;
}
| 0 |
[
"CWE-399"
] |
linux-2.6
|
06a279d636734da32bb62dd2f7b0ade666f65d7c
| 164,725,113,270,173,010,000,000,000,000,000,000,000 | 61 |
ext4: only use i_size_high for regular files
Directories are not allowed to be bigger than 2GB, so don't use
i_size_high for anything other than regular files. E2fsck should
complain about these inodes, but the simplest thing to do for the
kernel is to only use i_size_high for regular files.
This prevents an intentially corrupted filesystem from causing the
kernel to burn a huge amount of CPU and issuing error messages such
as:
EXT4-fs warning (device loop0): ext4_block_to_path: block 135090028 > max
Thanks to David Maciejak from Fortinet's FortiGuard Global Security
Research Team for reporting this issue.
http://bugzilla.kernel.org/show_bug.cgi?id=12375
Signed-off-by: "Theodore Ts'o" <[email protected]>
Cc: [email protected]
|
chkExternalURIBuffer(Buffer *buf)
{
int i;
struct table2 *ump;
for (i = 0; (ump = urimethods[i]) != NULL; i++) {
for (; ump->item1 != NULL; ump++) {
reAnchor(buf, Sprintf("%s:%s", ump->item1, URI_PATTERN)->ptr);
}
}
for (ump = default_urimethods; ump->item1 != NULL; ump++) {
reAnchor(buf, Sprintf("%s:%s", ump->item1, URI_PATTERN)->ptr);
}
}
| 0 |
[
"CWE-119"
] |
w3m
|
ba9d78faeba9024c3e8840579c3b0e959ae2cb0f
| 48,845,879,331,211,650,000,000,000,000,000,000,000 | 14 |
Prevent global-buffer-overflow in parseURL()
Bug-Debian: https://github.com/tats/w3m/issues/41
|
static int tracing_open(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
int ret = 0;
if (trace_array_get(tr) < 0)
return -ENODEV;
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
int cpu = tracing_get_cpu(inode);
struct trace_buffer *trace_buf = &tr->trace_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->current_trace->print_max)
trace_buf = &tr->max_buffer;
#endif
if (cpu == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(trace_buf);
else
tracing_reset(trace_buf, cpu);
}
if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file, false);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
}
if (ret < 0)
trace_array_put(tr);
return ret;
}
| 0 |
[
"CWE-415"
] |
linux
|
4397f04575c44e1440ec2e49b6302785c95fd2f8
| 65,792,993,757,146,400,000,000,000,000,000,000,000 | 38 |
tracing: Fix possible double free on failure of allocating trace buffer
Jing Xia and Chunyan Zhang reported that on failing to allocate part of the
tracing buffer, memory is freed, but the pointers that point to them are not
initialized back to NULL, and later paths may try to free the freed memory
again. Jing and Chunyan fixed one of the locations that does this, but
missed a spot.
Link: http://lkml.kernel.org/r/[email protected]
Cc: [email protected]
Fixes: 737223fbca3b1 ("tracing: Consolidate buffer allocation code")
Reported-by: Jing Xia <[email protected]>
Reported-by: Chunyan Zhang <[email protected]>
Signed-off-by: Steven Rostedt (VMware) <[email protected]>
|
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb)
{
struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
struct br_ip ip;
if (br->multicast_disabled)
return NULL;
if (BR_INPUT_SKB_CB(skb)->igmp)
return NULL;
ip.proto = skb->protocol;
switch (skb->protocol) {
case htons(ETH_P_IP):
ip.u.ip4 = ip_hdr(skb)->daddr;
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case htons(ETH_P_IPV6):
ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
break;
#endif
default:
return NULL;
}
return br_mdb_ip_get(mdb, &ip);
}
| 0 |
[
"CWE-399"
] |
linux
|
6b0d6a9b4296fa16a28d10d416db7a770fc03287
| 153,770,663,402,869,860,000,000,000,000,000,000,000 | 29 |
bridge: Fix mglist corruption that leads to memory corruption
The list mp->mglist is used to indicate whether a multicast group
is active on the bridge interface itself as opposed to one of the
constituent interfaces in the bridge.
Unfortunately the operation that adds the mp->mglist node to the
list neglected to check whether it has already been added. This
leads to list corruption in the form of nodes pointing to itself.
Normally this would be quite obvious as it would cause an infinite
loop when walking the list. However, as this list is never actually
walked (which means that we don't really need it, I'll get rid of
it in a subsequent patch), this instead is hidden until we perform
a delete operation on the affected nodes.
As the same node may now be pointed to by more than one node, the
delete operations can then cause modification of freed memory.
This was observed in practice to cause corruption in 512-byte slabs,
most commonly leading to crashes in jbd2.
Thanks to Josef Bacik for pointing me in the right direction.
Reported-by: Ian Page Hands <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
PJ_DEF(pj_status_t) pjsip_tpmgr_register_tpfactory( pjsip_tpmgr *mgr,
pjsip_tpfactory *tpf)
{
pjsip_tpfactory *p;
pj_status_t status;
pj_lock_acquire(mgr->lock);
/* Check that no same factory has been registered. */
status = PJ_SUCCESS;
for (p=mgr->factory_list.next; p!=&mgr->factory_list; p=p->next) {
if (p == tpf) {
status = PJ_EEXISTS;
break;
}
}
if (status != PJ_SUCCESS) {
pj_lock_release(mgr->lock);
return status;
}
pj_list_insert_before(&mgr->factory_list, tpf);
pj_lock_release(mgr->lock);
return PJ_SUCCESS;
}
| 0 |
[
"CWE-297",
"CWE-295"
] |
pjproject
|
67e46c1ac45ad784db5b9080f5ed8b133c122872
| 137,676,480,279,251,880,000,000,000,000,000,000,000 | 28 |
Merge pull request from GHSA-8hcp-hm38-mfph
* Check hostname during TLS transport selection
* revision based on feedback
* remove the code in create_request that has been moved
|
static int hci_uart_set_proto(struct hci_uart *hu, int id)
{
const struct hci_uart_proto *p;
int err;
p = hci_uart_get_proto(id);
if (!p)
return -EPROTONOSUPPORT;
hu->proto = p;
set_bit(HCI_UART_PROTO_READY, &hu->flags);
err = hci_uart_register_dev(hu);
if (err) {
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
return err;
}
return 0;
}
| 1 |
[
"CWE-416"
] |
linux
|
56897b217a1d0a91c9920cb418d6b3fe922f590a
| 324,377,223,174,079,700,000,000,000,000,000,000,000 | 20 |
Bluetooth: hci_ldisc: Postpone HCI_UART_PROTO_READY bit set in hci_uart_set_proto()
task A: task B:
hci_uart_set_proto flush_to_ldisc
- p->open(hu) -> h5_open //alloc h5 - receive_buf
- set_bit HCI_UART_PROTO_READY - tty_port_default_receive_buf
- hci_uart_register_dev - tty_ldisc_receive_buf
- hci_uart_tty_receive
- test_bit HCI_UART_PROTO_READY
- h5_recv
- clear_bit HCI_UART_PROTO_READY while() {
- p->open(hu) -> h5_close //free h5
- h5_rx_3wire_hdr
- h5_reset() //use-after-free
}
It could use ioctl to set hci uart proto, but there is
a use-after-free issue when hci_uart_register_dev() fail in
hci_uart_set_proto(), see stack above, fix this by setting
HCI_UART_PROTO_READY bit only when hci_uart_register_dev()
return success.
Reported-by: [email protected]
Signed-off-by: Kefeng Wang <[email protected]>
Reviewed-by: Jeremy Cline <[email protected]>
Signed-off-by: Marcel Holtmann <[email protected]>
|
explicit BoostedTreesQuantileStreamResourceGetBucketBoundariesOp(
OpKernelConstruction* const context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr(kNumFeaturesName, &num_features_));
}
| 0 |
[
"CWE-703",
"CWE-681"
] |
tensorflow
|
8a84f7a2b5a2b27ecf88d25bad9ac777cd2f7992
| 220,572,269,980,903,620,000,000,000,000,000,000,000 | 5 |
Ensure num_streams >= 0 in tf.raw_ops.BoostedTreesCreateQuantileStreamResource
PiperOrigin-RevId: 387452765
Change-Id: I9990c760e177fabca6a3b9b4612ceeaeeba51495
|
compileRule(FileInfo *nested, CharacterClass **characterClasses,
TranslationTableCharacterAttributes *characterClassAttribute,
short opcodeLengths[], TranslationTableOffset *newRuleOffset,
TranslationTableRule **newRule, RuleName **ruleNames,
TranslationTableHeader **table) {
int lastToken = 0;
int ok = 1;
CharsString token;
TranslationTableOpcode opcode;
CharsString ruleChars;
CharsString ruleDots;
CharsString cells;
CharsString scratchPad;
CharsString emphClass;
TranslationTableCharacterAttributes after = 0;
TranslationTableCharacterAttributes before = 0;
TranslationTableCharacter *c = NULL;
widechar *patterns = NULL;
int k, i;
int noback, nofor;
noback = nofor = 0;
TranslationTableOffset tmp_offset;
doOpcode:
if (!getToken(nested, &token, NULL, &lastToken)) return 1; /* blank line */
if (token.chars[0] == '#' || token.chars[0] == '<') return 1; /* comment */
if (nested->lineNumber == 1 &&
(eqasc2uni((unsigned char *)"ISO", token.chars, 3) ||
eqasc2uni((unsigned char *)"UTF-8", token.chars, 5))) {
compileHyphenation(nested, &token, &lastToken, table);
return 1;
}
opcode = getOpcode(nested, &token, opcodeLengths);
switch (opcode) { /* Carry out operations */
case CTO_None:
break;
case CTO_IncludeFile: {
CharsString includedFile;
if (getToken(nested, &token, "include file name", &lastToken))
if (parseChars(nested, &includedFile, &token))
if (!includeFile(nested, &includedFile, characterClasses,
characterClassAttribute, opcodeLengths, newRuleOffset,
newRule, ruleNames, table))
ok = 0;
break;
}
case CTO_Locale:
break;
case CTO_Undefined:
tmp_offset = (*table)->undefined;
ok = compileBrailleIndicator(nested, "undefined character opcode", CTO_Undefined,
&tmp_offset, &lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->undefined = tmp_offset;
break;
case CTO_Match: {
CharsString ptn_before, ptn_after;
TranslationTableOffset offset;
int len, mrk;
size_t patternsByteSize = sizeof(*patterns) * 27720;
patterns = (widechar *)malloc(patternsByteSize);
if (!patterns) _lou_outOfMemory();
memset(patterns, 0xffff, patternsByteSize);
noback = 1;
getCharacters(nested, &ptn_before, &lastToken);
getRuleCharsText(nested, &ruleChars, &lastToken);
getCharacters(nested, &ptn_after, &lastToken);
getRuleDotsPattern(nested, &ruleDots, &lastToken);
if (!addRule(nested, opcode, &ruleChars, &ruleDots, after, before, newRuleOffset,
newRule, noback, nofor, table))
ok = 0;
if (ptn_before.chars[0] == '-' && ptn_before.length == 1)
len = _lou_pattern_compile(
&ptn_before.chars[0], 0, &patterns[1], 13841, *table);
else
len = _lou_pattern_compile(
&ptn_before.chars[0], ptn_before.length, &patterns[1], 13841, *table);
if (!len) {
ok = 0;
break;
}
mrk = patterns[0] = len + 1;
_lou_pattern_reverse(&patterns[1]);
if (ptn_after.chars[0] == '-' && ptn_after.length == 1)
len = _lou_pattern_compile(
&ptn_after.chars[0], 0, &patterns[mrk], 13841, *table);
else
len = _lou_pattern_compile(
&ptn_after.chars[0], ptn_after.length, &patterns[mrk], 13841, *table);
if (!len) {
ok = 0;
break;
}
len += mrk;
if (!allocateSpaceInTable(nested, &offset, len * sizeof(widechar), table)) {
ok = 0;
break;
}
/* realloc may have moved table, so make sure newRule is still valid */
*newRule = (TranslationTableRule *)&(*table)->ruleArea[*newRuleOffset];
memcpy(&(*table)->ruleArea[offset], patterns, len * sizeof(widechar));
(*newRule)->patterns = offset;
break;
}
case CTO_BackMatch: {
CharsString ptn_before, ptn_after;
TranslationTableOffset offset;
int len, mrk;
size_t patternsByteSize = sizeof(*patterns) * 27720;
patterns = (widechar *)malloc(patternsByteSize);
if (!patterns) _lou_outOfMemory();
memset(patterns, 0xffff, patternsByteSize);
nofor = 1;
getCharacters(nested, &ptn_before, &lastToken);
getRuleCharsText(nested, &ruleChars, &lastToken);
getCharacters(nested, &ptn_after, &lastToken);
getRuleDotsPattern(nested, &ruleDots, &lastToken);
if (!addRule(nested, opcode, &ruleChars, &ruleDots, 0, 0, newRuleOffset, newRule,
noback, nofor, table))
ok = 0;
if (ptn_before.chars[0] == '-' && ptn_before.length == 1)
len = _lou_pattern_compile(
&ptn_before.chars[0], 0, &patterns[1], 13841, *table);
else
len = _lou_pattern_compile(
&ptn_before.chars[0], ptn_before.length, &patterns[1], 13841, *table);
if (!len) {
ok = 0;
break;
}
mrk = patterns[0] = len + 1;
_lou_pattern_reverse(&patterns[1]);
if (ptn_after.chars[0] == '-' && ptn_after.length == 1)
len = _lou_pattern_compile(
&ptn_after.chars[0], 0, &patterns[mrk], 13841, *table);
else
len = _lou_pattern_compile(
&ptn_after.chars[0], ptn_after.length, &patterns[mrk], 13841, *table);
if (!len) {
ok = 0;
break;
}
len += mrk;
if (!allocateSpaceInTable(nested, &offset, len * sizeof(widechar), table)) {
ok = 0;
break;
}
/* realloc may have moved table, so make sure newRule is still valid */
*newRule = (TranslationTableRule *)&(*table)->ruleArea[*newRuleOffset];
memcpy(&(*table)->ruleArea[offset], patterns, len * sizeof(widechar));
(*newRule)->patterns = offset;
break;
}
case CTO_BegCapsPhrase:
tmp_offset = (*table)->emphRules[capsRule][begPhraseOffset];
ok = compileBrailleIndicator(nested, "first word capital sign",
CTO_BegCapsPhraseRule, &tmp_offset, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
(*table)->emphRules[capsRule][begPhraseOffset] = tmp_offset;
break;
case CTO_EndCapsPhrase:
switch (compileBeforeAfter(nested, &lastToken)) {
case 1: // before
if ((*table)->emphRules[capsRule][endPhraseAfterOffset]) {
compileError(nested, "Capital sign after last word already defined.");
ok = 0;
break;
}
tmp_offset = (*table)->emphRules[capsRule][endPhraseBeforeOffset];
ok = compileBrailleIndicator(nested, "capital sign before last word",
CTO_EndCapsPhraseBeforeRule, &tmp_offset, &lastToken, newRuleOffset,
newRule, noback, nofor, table);
(*table)->emphRules[capsRule][endPhraseBeforeOffset] = tmp_offset;
break;
case 2: // after
if ((*table)->emphRules[capsRule][endPhraseBeforeOffset]) {
compileError(nested, "Capital sign before last word already defined.");
ok = 0;
break;
}
tmp_offset = (*table)->emphRules[capsRule][endPhraseAfterOffset];
ok = compileBrailleIndicator(nested, "capital sign after last word",
CTO_EndCapsPhraseAfterRule, &tmp_offset, &lastToken, newRuleOffset,
newRule, noback, nofor, table);
(*table)->emphRules[capsRule][endPhraseAfterOffset] = tmp_offset;
break;
default: // error
compileError(nested, "Invalid lastword indicator location.");
ok = 0;
break;
}
break;
case CTO_BegCaps:
tmp_offset = (*table)->emphRules[capsRule][begOffset];
ok = compileBrailleIndicator(nested, "first letter capital sign", CTO_BegCapsRule,
&tmp_offset, &lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[capsRule][begOffset] = tmp_offset;
break;
case CTO_EndCaps:
tmp_offset = (*table)->emphRules[capsRule][endOffset];
ok = compileBrailleIndicator(nested, "last letter capital sign", CTO_EndCapsRule,
&tmp_offset, &lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[capsRule][endOffset] = tmp_offset;
break;
case CTO_CapsLetter:
tmp_offset = (*table)->emphRules[capsRule][letterOffset];
ok = compileBrailleIndicator(nested, "single letter capital sign",
CTO_CapsLetterRule, &tmp_offset, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
(*table)->emphRules[capsRule][letterOffset] = tmp_offset;
break;
case CTO_BegCapsWord:
tmp_offset = (*table)->emphRules[capsRule][begWordOffset];
ok = compileBrailleIndicator(nested, "capital word", CTO_BegCapsWordRule,
&tmp_offset, &lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[capsRule][begWordOffset] = tmp_offset;
break;
case CTO_EndCapsWord:
tmp_offset = (*table)->emphRules[capsRule][endWordOffset];
ok = compileBrailleIndicator(nested, "capital word stop", CTO_EndCapsWordRule,
&tmp_offset, &lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[capsRule][endWordOffset] = tmp_offset;
break;
case CTO_LenCapsPhrase:
ok = (*table)->emphRules[capsRule][lenPhraseOffset] =
compileNumber(nested, &lastToken);
break;
/* these 9 general purpose emphasis opcodes are compiled further down to more specific
* internal opcodes:
* - emphletter
* - begemphword
* - endemphword
* - begemph
* - endemph
* - begemphphrase
* - endemphphrase
* - lenemphphrase
*/
case CTO_EmphClass:
if (getToken(nested, &token, "emphasis class", &lastToken))
if (parseChars(nested, &emphClass, &token)) {
char *s = malloc(sizeof(char) * (emphClass.length + 1));
for (k = 0; k < emphClass.length; k++) s[k] = (char)emphClass.chars[k];
s[k++] = '\0';
for (i = 0; (*table)->emphClasses[i]; i++)
if (strcmp(s, (*table)->emphClasses[i]) == 0) {
_lou_logMessage(LOG_WARN, "Duplicate emphasis class: %s", s);
warningCount++;
free(s);
return 1;
}
if (i < MAX_EMPH_CLASSES) {
switch (i) {
/* For backwards compatibility (i.e. because programs will assume the
* first 3
* typeform bits are `italic', `underline' and `bold') we require that
* the first
* 3 emphclass definitions are (in that order):
*
* emphclass italic
* emphclass underline
* emphclass bold
*
* While it would be possible to use the emphclass opcode only for
* defining
* _additional_ classes (not allowing for them to be called italic,
* underline or
* bold), thereby reducing the amount of boilerplate, we deliberately
* choose not
* to do that in order to not give italic, underline and bold any
* special
* status. The hope is that eventually all programs will use liblouis
* for
* emphasis the recommended way (i.e. by looking up the supported
* typeforms in
* the documentation or API) so that we can drop this restriction.
*/
case 0:
if (strcmp(s, "italic") != 0) {
_lou_logMessage(LOG_ERROR,
"First emphasis class must be \"italic\" but got %s",
s);
errorCount++;
free(s);
return 0;
}
break;
case 1:
if (strcmp(s, "underline") != 0) {
_lou_logMessage(LOG_ERROR,
"Second emphasis class must be \"underline\" but got "
"%s",
s);
errorCount++;
free(s);
return 0;
}
break;
case 2:
if (strcmp(s, "bold") != 0) {
_lou_logMessage(LOG_ERROR,
"Third emphasis class must be \"bold\" but got %s",
s);
errorCount++;
free(s);
return 0;
}
break;
}
(*table)->emphClasses[i] = s;
(*table)->emphClasses[i + 1] = NULL;
ok = 1;
break;
} else {
_lou_logMessage(LOG_ERROR,
"Max number of emphasis classes (%i) reached",
MAX_EMPH_CLASSES);
errorCount++;
free(s);
ok = 0;
break;
}
}
compileError(nested, "emphclass must be followed by a valid class name.");
ok = 0;
break;
case CTO_EmphLetter:
case CTO_BegEmphWord:
case CTO_EndEmphWord:
case CTO_BegEmph:
case CTO_EndEmph:
case CTO_BegEmphPhrase:
case CTO_EndEmphPhrase:
case CTO_LenEmphPhrase:
ok = 0;
if (getToken(nested, &token, "emphasis class", &lastToken))
if (parseChars(nested, &emphClass, &token)) {
char *s = malloc(sizeof(char) * (emphClass.length + 1));
for (k = 0; k < emphClass.length; k++) s[k] = (char)emphClass.chars[k];
s[k++] = '\0';
for (i = 0; (*table)->emphClasses[i]; i++)
if (strcmp(s, (*table)->emphClasses[i]) == 0) break;
if (!(*table)->emphClasses[i]) {
_lou_logMessage(LOG_ERROR, "Emphasis class %s not declared", s);
errorCount++;
free(s);
break;
}
i++; // in table->emphRules the first index is used for caps
if (opcode == CTO_EmphLetter) {
tmp_offset = (*table)->emphRules[i][letterOffset];
ok = compileBrailleIndicator(nested, "single letter",
CTO_Emph1LetterRule + letterOffset + (8 * i), &tmp_offset,
&lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[i][letterOffset] = tmp_offset;
} else if (opcode == CTO_BegEmphWord) {
tmp_offset = (*table)->emphRules[i][begWordOffset];
ok = compileBrailleIndicator(nested, "word",
CTO_Emph1LetterRule + begWordOffset + (8 * i), &tmp_offset,
&lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[i][begWordOffset] = tmp_offset;
} else if (opcode == CTO_EndEmphWord) {
tmp_offset = (*table)->emphRules[i][endWordOffset];
ok = compileBrailleIndicator(nested, "word stop",
CTO_Emph1LetterRule + endWordOffset + (8 * i), &tmp_offset,
&lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[i][endWordOffset] = tmp_offset;
} else if (opcode == CTO_BegEmph) {
/* fail if both begemph and any of begemphphrase or begemphword are
* defined */
if ((*table)->emphRules[i][begWordOffset] ||
(*table)->emphRules[i][begPhraseOffset]) {
compileError(nested,
"Cannot define emphasis for both no context and word or "
"phrase context, i.e. cannot have both begemph and "
"begemphword or begemphphrase.");
ok = 0;
break;
}
tmp_offset = (*table)->emphRules[i][begOffset];
ok = compileBrailleIndicator(nested, "first letter",
CTO_Emph1LetterRule + begOffset + (8 * i), &tmp_offset,
&lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[i][begOffset] = tmp_offset;
} else if (opcode == CTO_EndEmph) {
if ((*table)->emphRules[i][endWordOffset] ||
(*table)->emphRules[i][endPhraseBeforeOffset] ||
(*table)->emphRules[i][endPhraseAfterOffset]) {
compileError(nested,
"Cannot define emphasis for both no context and word or "
"phrase context, i.e. cannot have both endemph and "
"endemphword or endemphphrase.");
ok = 0;
break;
}
tmp_offset = (*table)->emphRules[i][endOffset];
ok = compileBrailleIndicator(nested, "last letter",
CTO_Emph1LetterRule + endOffset + (8 * i), &tmp_offset,
&lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[i][endOffset] = tmp_offset;
} else if (opcode == CTO_BegEmphPhrase) {
tmp_offset = (*table)->emphRules[i][begPhraseOffset];
ok = compileBrailleIndicator(nested, "first word",
CTO_Emph1LetterRule + begPhraseOffset + (8 * i), &tmp_offset,
&lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->emphRules[i][begPhraseOffset] = tmp_offset;
} else if (opcode == CTO_EndEmphPhrase)
switch (compileBeforeAfter(nested, &lastToken)) {
case 1: // before
if ((*table)->emphRules[i][endPhraseAfterOffset]) {
compileError(nested, "last word after already defined.");
ok = 0;
break;
}
tmp_offset = (*table)->emphRules[i][endPhraseBeforeOffset];
ok = compileBrailleIndicator(nested, "last word before",
CTO_Emph1LetterRule + endPhraseBeforeOffset + (8 * i),
&tmp_offset, &lastToken, newRuleOffset, newRule, noback,
nofor, table);
(*table)->emphRules[i][endPhraseBeforeOffset] = tmp_offset;
break;
case 2: // after
if ((*table)->emphRules[i][endPhraseBeforeOffset]) {
compileError(nested, "last word before already defined.");
ok = 0;
break;
}
tmp_offset = (*table)->emphRules[i][endPhraseAfterOffset];
ok = compileBrailleIndicator(nested, "last word after",
CTO_Emph1LetterRule + endPhraseAfterOffset + (8 * i),
&tmp_offset, &lastToken, newRuleOffset, newRule, noback,
nofor, table);
(*table)->emphRules[i][endPhraseAfterOffset] = tmp_offset;
break;
default: // error
compileError(nested, "Invalid lastword indicator location.");
ok = 0;
break;
}
else if (opcode == CTO_LenEmphPhrase)
ok = (*table)->emphRules[i][lenPhraseOffset] =
compileNumber(nested, &lastToken);
free(s);
}
break;
case CTO_LetterSign:
tmp_offset = (*table)->letterSign;
ok = compileBrailleIndicator(nested, "letter sign", CTO_LetterRule, &tmp_offset,
&lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->letterSign = tmp_offset;
break;
case CTO_NoLetsignBefore:
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
if (((*table)->noLetsignBeforeCount + ruleChars.length) > LETSIGNSIZE) {
compileError(nested, "More than %d characters", LETSIGNSIZE);
ok = 0;
break;
}
for (k = 0; k < ruleChars.length; k++)
(*table)->noLetsignBefore[(*table)->noLetsignBeforeCount++] =
ruleChars.chars[k];
}
break;
case CTO_NoLetsign:
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
if (((*table)->noLetsignCount + ruleChars.length) > LETSIGNSIZE) {
compileError(nested, "More than %d characters", LETSIGNSIZE);
ok = 0;
break;
}
for (k = 0; k < ruleChars.length; k++)
(*table)->noLetsign[(*table)->noLetsignCount++] = ruleChars.chars[k];
}
break;
case CTO_NoLetsignAfter:
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
if (((*table)->noLetsignAfterCount + ruleChars.length) > LETSIGNSIZE) {
compileError(nested, "More than %d characters", LETSIGNSIZE);
ok = 0;
break;
}
for (k = 0; k < ruleChars.length; k++)
(*table)->noLetsignAfter[(*table)->noLetsignAfterCount++] =
ruleChars.chars[k];
}
break;
case CTO_NumberSign:
tmp_offset = (*table)->numberSign;
ok = compileBrailleIndicator(nested, "number sign", CTO_NumberRule, &tmp_offset,
&lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->numberSign = tmp_offset;
break;
case CTO_Attribute:
c = NULL;
ok = 1;
if (!getToken(nested, &ruleChars, "attribute number", &lastToken)) {
compileError(nested, "Expected attribute number.");
ok = 0;
break;
}
k = -1;
switch (ruleChars.chars[0]) {
case '0':
k = 0;
break;
case '1':
k = 1;
break;
case '2':
k = 2;
break;
case '3':
k = 3;
break;
case '4':
k = 4;
break;
case '5':
k = 5;
break;
case '6':
k = 6;
break;
case '7':
k = 7;
break;
}
if (k == -1) {
compileError(nested, "Invalid attribute number.");
ok = 0;
break;
}
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
for (i = 0; i < ruleChars.length; i++) {
c = compile_findCharOrDots(ruleChars.chars[i], 0, *table);
if (c)
c->attributes |= (CTC_UserDefined0 << k);
else {
compileError(nested, "Attribute character undefined");
ok = 0;
break;
}
}
}
break;
case CTO_NumericModeChars:
c = NULL;
ok = 1;
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
for (k = 0; k < ruleChars.length; k++) {
c = compile_findCharOrDots(ruleChars.chars[k], 0, *table);
if (c)
c->attributes |= CTC_NumericMode;
else {
compileError(nested, "Numeric mode character undefined");
ok = 0;
break;
}
}
(*table)->usesNumericMode = 1;
}
break;
case CTO_NumericNoContractChars:
c = NULL;
ok = 1;
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
for (k = 0; k < ruleChars.length; k++) {
c = compile_findCharOrDots(ruleChars.chars[k], 0, *table);
if (c)
c->attributes |= CTC_NumericNoContract;
else {
compileError(nested, "Numeric no contraction character undefined");
ok = 0;
break;
}
}
(*table)->usesNumericMode = 1;
}
break;
case CTO_NoContractSign:
tmp_offset = (*table)->noContractSign;
ok = compileBrailleIndicator(nested, "no contractions sign", CTO_NoContractRule,
&tmp_offset, &lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->noContractSign = tmp_offset;
break;
case CTO_SeqDelimiter:
c = NULL;
ok = 1;
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
for (k = 0; k < ruleChars.length; k++) {
c = compile_findCharOrDots(ruleChars.chars[k], 0, *table);
if (c)
c->attributes |= CTC_SeqDelimiter;
else {
compileError(nested, "Sequence delimiter character undefined");
ok = 0;
break;
}
}
(*table)->usesSequences = 1;
}
break;
case CTO_SeqBeforeChars:
c = NULL;
ok = 1;
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
for (k = 0; k < ruleChars.length; k++) {
c = compile_findCharOrDots(ruleChars.chars[k], 0, *table);
if (c)
c->attributes |= CTC_SeqBefore;
else {
compileError(nested, "Sequence before character undefined");
ok = 0;
break;
}
}
}
break;
case CTO_SeqAfterChars:
c = NULL;
ok = 1;
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
for (k = 0; k < ruleChars.length; k++) {
c = compile_findCharOrDots(ruleChars.chars[k], 0, *table);
if (c)
c->attributes |= CTC_SeqAfter;
else {
compileError(nested, "Sequence after character undefined");
ok = 0;
break;
}
}
}
break;
case CTO_SeqAfterPattern:
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
if (((*table)->seqPatternsCount + ruleChars.length + 1) > SEQPATTERNSIZE) {
compileError(nested, "More than %d characters", SEQPATTERNSIZE);
ok = 0;
break;
}
for (k = 0; k < ruleChars.length; k++)
(*table)->seqPatterns[(*table)->seqPatternsCount++] = ruleChars.chars[k];
(*table)->seqPatterns[(*table)->seqPatternsCount++] = 0;
}
break;
case CTO_SeqAfterExpression:
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
for ((*table)->seqAfterExpressionLength = 0;
(*table)->seqAfterExpressionLength < ruleChars.length;
(*table)->seqAfterExpressionLength++)
(*table)->seqAfterExpression[(*table)->seqAfterExpressionLength] =
ruleChars.chars[(*table)->seqAfterExpressionLength];
(*table)->seqAfterExpression[(*table)->seqAfterExpressionLength] = 0;
}
break;
case CTO_CapsModeChars:
c = NULL;
ok = 1;
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
for (k = 0; k < ruleChars.length; k++) {
c = compile_findCharOrDots(ruleChars.chars[k], 0, *table);
if (c)
c->attributes |= CTC_CapsMode;
else {
compileError(nested, "Capital mode character undefined");
ok = 0;
break;
}
}
}
break;
case CTO_BegComp:
tmp_offset = (*table)->begComp;
ok = compileBrailleIndicator(nested, "begin computer braille", CTO_BegCompRule,
&tmp_offset, &lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->begComp = tmp_offset;
break;
case CTO_EndComp:
tmp_offset = (*table)->endComp;
ok = compileBrailleIndicator(nested, "end computer braslle", CTO_EndCompRule,
&tmp_offset, &lastToken, newRuleOffset, newRule, noback, nofor, table);
(*table)->endComp = tmp_offset;
break;
case CTO_Syllable:
(*table)->syllables = 1;
case CTO_Always:
case CTO_NoCross:
case CTO_LargeSign:
case CTO_WholeWord:
case CTO_PartWord:
case CTO_JoinNum:
case CTO_JoinableWord:
case CTO_LowWord:
case CTO_SuffixableWord:
case CTO_PrefixableWord:
case CTO_BegWord:
case CTO_BegMidWord:
case CTO_MidWord:
case CTO_MidEndWord:
case CTO_EndWord:
case CTO_PrePunc:
case CTO_PostPunc:
case CTO_BegNum:
case CTO_MidNum:
case CTO_EndNum:
case CTO_Repeated:
case CTO_RepWord:
if (getRuleCharsText(nested, &ruleChars, &lastToken))
if (getRuleDotsPattern(nested, &ruleDots, &lastToken))
if (!addRule(nested, opcode, &ruleChars, &ruleDots, after, before,
newRuleOffset, newRule, noback, nofor, table))
ok = 0;
// if (opcode == CTO_MidNum)
// {
// TranslationTableCharacter *c = compile_findCharOrDots(ruleChars.chars[0], 0);
// if(c)
// c->attributes |= CTC_NumericMode;
// }
break;
case CTO_CompDots:
case CTO_Comp6:
if (!getRuleCharsText(nested, &ruleChars, &lastToken)) return 0;
if (ruleChars.length != 1 || ruleChars.chars[0] > 255) {
compileError(nested, "first operand must be 1 character and < 256");
return 0;
}
if (!getRuleDotsPattern(nested, &ruleDots, &lastToken)) return 0;
if (!addRule(nested, opcode, &ruleChars, &ruleDots, after, before, newRuleOffset,
newRule, noback, nofor, table))
ok = 0;
(*table)->compdotsPattern[ruleChars.chars[0]] = *newRuleOffset;
break;
case CTO_ExactDots:
if (!getRuleCharsText(nested, &ruleChars, &lastToken)) return 0;
if (ruleChars.chars[0] != '@') {
compileError(nested, "The operand must begin with an at sign (@)");
return 0;
}
for (k = 1; k < ruleChars.length; k++)
scratchPad.chars[k - 1] = ruleChars.chars[k];
scratchPad.length = ruleChars.length - 1;
if (!parseDots(nested, &ruleDots, &scratchPad)) return 0;
if (!addRule(nested, opcode, &ruleChars, &ruleDots, before, after, newRuleOffset,
newRule, noback, nofor, table))
ok = 0;
break;
case CTO_CapsNoCont:
ruleChars.length = 1;
ruleChars.chars[0] = 'a';
if (!addRule(nested, CTO_CapsNoContRule, &ruleChars, NULL, after, before,
newRuleOffset, newRule, noback, nofor, table))
ok = 0;
(*table)->capsNoCont = *newRuleOffset;
break;
case CTO_Replace:
if (getRuleCharsText(nested, &ruleChars, &lastToken)) {
if (lastToken)
ruleDots.length = ruleDots.chars[0] = 0;
else {
getRuleDotsText(nested, &ruleDots, &lastToken);
if (ruleDots.chars[0] == '#')
ruleDots.length = ruleDots.chars[0] = 0;
else if (ruleDots.chars[0] == '\\' && ruleDots.chars[1] == '#')
memcpy(&ruleDots.chars[0], &ruleDots.chars[1],
ruleDots.length-- * CHARSIZE);
}
}
for (k = 0; k < ruleChars.length; k++)
addCharOrDots(nested, ruleChars.chars[k], 0, table);
for (k = 0; k < ruleDots.length; k++)
addCharOrDots(nested, ruleDots.chars[k], 0, table);
if (!addRule(nested, opcode, &ruleChars, &ruleDots, after, before, newRuleOffset,
newRule, noback, nofor, table))
ok = 0;
break;
case CTO_Correct:
(*table)->corrections = 1;
goto doPass;
case CTO_Pass2:
if ((*table)->numPasses < 2) (*table)->numPasses = 2;
goto doPass;
case CTO_Pass3:
if ((*table)->numPasses < 3) (*table)->numPasses = 3;
goto doPass;
case CTO_Pass4:
if ((*table)->numPasses < 4) (*table)->numPasses = 4;
doPass:
case CTO_Context:
if (!(nofor || noback)) {
compileError(nested, "%s or %s must be specified.",
_lou_findOpcodeName(CTO_NoFor), _lou_findOpcodeName(CTO_NoBack));
ok = 0;
break;
}
if (!compilePassOpcode(nested, opcode, *characterClasses, newRuleOffset, newRule,
noback, nofor, *ruleNames, table))
ok = 0;
break;
case CTO_Contraction:
case CTO_NoCont:
case CTO_CompBrl:
case CTO_Literal:
if (getRuleCharsText(nested, &ruleChars, &lastToken))
if (!addRule(nested, opcode, &ruleChars, NULL, after, before, newRuleOffset,
newRule, noback, nofor, table))
ok = 0;
break;
case CTO_MultInd: {
int t;
ruleChars.length = 0;
if (getToken(nested, &token, "multiple braille indicators", &lastToken) &&
parseDots(nested, &cells, &token)) {
while ((t = getToken(nested, &token, "multind opcodes", &lastToken))) {
opcode = getOpcode(nested, &token, opcodeLengths);
if (opcode >= CTO_CapsLetter && opcode < CTO_MultInd)
ruleChars.chars[ruleChars.length++] = (widechar)opcode;
else {
compileError(nested, "Not a braille indicator opcode.");
ok = 0;
}
if (t == 2) break;
}
} else
ok = 0;
if (!addRule(nested, CTO_MultInd, &ruleChars, &cells, after, before,
newRuleOffset, newRule, noback, nofor, table))
ok = 0;
break;
}
case CTO_Class: {
CharsString characters;
const CharacterClass *class;
if (!*characterClasses) {
if (!allocateCharacterClasses(characterClasses, characterClassAttribute))
ok = 0;
}
if (getToken(nested, &token, "character class name", &lastToken)) {
class = findCharacterClass(&token, *characterClasses);
if (!class)
// no class with that name: create one
class = addCharacterClass(nested, &token.chars[0], token.length,
characterClasses, characterClassAttribute);
if (class) {
// there is a class with that name or a new class was successfully created
if (getCharacters(nested, &characters, &lastToken)) {
int index;
for (index = 0; index < characters.length; ++index) {
TranslationTableRule *defRule;
TranslationTableCharacter *character = definedCharOrDots(
nested, characters.chars[index], 0, *table);
character->attributes |= class->attribute;
defRule = (TranslationTableRule *)&(
*table)->ruleArea[character->definitionRule];
if (defRule->dotslen == 1) {
character = definedCharOrDots(nested,
defRule->charsdots[defRule->charslen], 1, *table);
character->attributes |= class->attribute;
}
}
}
}
}
break;
}
{
TranslationTableCharacterAttributes *attributes;
const CharacterClass *class;
case CTO_After:
attributes = &after;
goto doClass;
case CTO_Before:
attributes = &before;
doClass:
if (!*characterClasses) {
if (!allocateCharacterClasses(characterClasses, characterClassAttribute))
ok = 0;
}
if (getCharacterClass(nested, &class, *characterClasses, &lastToken)) {
*attributes |= class->attribute;
goto doOpcode;
}
break;
}
case CTO_NoBack:
if (nofor) {
compileError(nested, "%s already specified.", _lou_findOpcodeName(CTO_NoFor));
ok = 0;
break;
}
noback = 1;
goto doOpcode;
case CTO_NoFor:
if (noback) {
compileError(
nested, "%s already specified.", _lou_findOpcodeName(CTO_NoBack));
ok = 0;
break;
}
nofor = 1;
goto doOpcode;
case CTO_EmpMatchBefore:
before |= CTC_EmpMatch;
goto doOpcode;
case CTO_EmpMatchAfter:
after |= CTC_EmpMatch;
goto doOpcode;
case CTO_SwapCc:
case CTO_SwapCd:
case CTO_SwapDd:
if (!compileSwap(nested, opcode, &lastToken, newRuleOffset, newRule, noback,
nofor, ruleNames, table))
ok = 0;
break;
case CTO_Hyphen:
case CTO_DecPoint:
// case CTO_Apostrophe:
// case CTO_Initial:
if (getRuleCharsText(nested, &ruleChars, &lastToken))
if (getRuleDotsPattern(nested, &ruleDots, &lastToken)) {
if (ruleChars.length != 1 || ruleDots.length < 1) {
compileError(nested,
"One Unicode character and at least one cell are required.");
ok = 0;
}
if (!addRule(nested, opcode, &ruleChars, &ruleDots, after, before,
newRuleOffset, newRule, noback, nofor, table))
ok = 0;
// if (opcode == CTO_DecPoint)
// {
// TranslationTableCharacter *c =
// compile_findCharOrDots(ruleChars.chars[0], 0);
// if(c)
// c->attributes |= CTC_NumericMode;
// }
}
break;
case CTO_Space:
compileCharDef(nested, opcode, CTC_Space, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
break;
case CTO_Digit:
compileCharDef(nested, opcode, CTC_Digit, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
break;
case CTO_LitDigit:
compileCharDef(nested, opcode, CTC_LitDigit, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
break;
case CTO_Punctuation:
compileCharDef(nested, opcode, CTC_Punctuation, &lastToken, newRuleOffset,
newRule, noback, nofor, table);
break;
case CTO_Math:
compileCharDef(nested, opcode, CTC_Math, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
break;
case CTO_Sign:
compileCharDef(nested, opcode, CTC_Sign, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
break;
case CTO_Letter:
compileCharDef(nested, opcode, CTC_Letter, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
break;
case CTO_UpperCase:
compileCharDef(nested, opcode, CTC_UpperCase, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
break;
case CTO_LowerCase:
compileCharDef(nested, opcode, CTC_LowerCase, &lastToken, newRuleOffset, newRule,
noback, nofor, table);
break;
case CTO_Grouping:
ok = compileGrouping(nested, &lastToken, newRuleOffset, newRule, noback, nofor,
ruleNames, table);
break;
case CTO_UpLow:
ok = compileUplow(
nested, &lastToken, newRuleOffset, newRule, noback, nofor, table);
break;
case CTO_Display:
if (getRuleCharsText(nested, &ruleChars, &lastToken))
if (getRuleDotsPattern(nested, &ruleDots, &lastToken)) {
if (ruleChars.length != 1 || ruleDots.length != 1) {
compileError(
nested, "Exactly one character and one cell are required.");
ok = 0;
}
putCharAndDots(nested, ruleChars.chars[0], ruleDots.chars[0], table);
}
break;
default:
compileError(nested, "unimplemented opcode.");
ok = 0;
break;
}
if (patterns != NULL) free(patterns);
return ok;
}
| 0 |
[
"CWE-787"
] |
liblouis
|
fb2bfce4ed49ac4656a8f7e5b5526e4838da1dde
| 3,496,946,251,092,518,000,000,000,000,000,000,000 | 1,052 |
Fix yet another buffer overflow in the braille table parser
Reported by Henri Salo
Fixes #592
|
static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
{
if (seq_cft(seq)->seq_stop)
seq_cft(seq)->seq_stop(seq, v);
}
| 0 |
[
"CWE-416"
] |
linux
|
a06247c6804f1a7c86a2e5398a4c1f1db1471848
| 328,330,878,288,272,800,000,000,000,000,000,000,000 | 5 |
psi: Fix uaf issue when psi trigger is destroyed while being polled
With write operation on psi files replacing old trigger with a new one,
the lifetime of its waitqueue is totally arbitrary. Overwriting an
existing trigger causes its waitqueue to be freed and pending poll()
will stumble on trigger->event_wait which was destroyed.
Fix this by disallowing to redefine an existing psi trigger. If a write
operation is used on a file descriptor with an already existing psi
trigger, the operation will fail with EBUSY error.
Also bypass a check for psi_disabled in the psi_trigger_destroy as the
flag can be flipped after the trigger is created, leading to a memory
leak.
Fixes: 0e94682b73bf ("psi: introduce psi monitor")
Reported-by: [email protected]
Suggested-by: Linus Torvalds <[email protected]>
Analyzed-by: Eric Biggers <[email protected]>
Signed-off-by: Suren Baghdasaryan <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Reviewed-by: Eric Biggers <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: [email protected]
Link: https://lore.kernel.org/r/[email protected]
|
__releases(aarp_lock)
{
read_unlock_bh(&aarp_lock);
}
| 0 |
[
"CWE-476"
] |
linux
|
9804501fa1228048857910a6bf23e085aade37cc
| 278,148,882,309,209,680,000,000,000,000,000,000,000 | 4 |
appletalk: Fix potential NULL pointer dereference in unregister_snap_client
register_snap_client may return NULL, all the callers
check it, but only print a warning. This will result in
NULL pointer dereference in unregister_snap_client and other
places.
It has always been used like this since v2.6
Reported-by: Dan Carpenter <[email protected]>
Signed-off-by: YueHaibing <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
void Vers_history_point::print(String *str, enum_query_type query_type,
const char *prefix, size_t plen) const
{
const static LEX_CSTRING unit_type[]=
{
{ STRING_WITH_LEN("") },
{ STRING_WITH_LEN("TIMESTAMP ") },
{ STRING_WITH_LEN("TRANSACTION ") }
};
str->append(prefix, plen);
str->append(unit_type + unit);
item->print(str, query_type);
}
| 0 |
[
"CWE-416"
] |
server
|
c02ebf3510850ba78a106be9974c94c3b97d8585
| 310,262,714,556,048,200,000,000,000,000,000,000,000 | 13 |
MDEV-24176 Preparations
1. moved fix_vcol_exprs() call to open_table()
mysql_alter_table() doesn't do lock_tables() so it cannot win from
fix_vcol_exprs() from there. Tests affected: main.default_session
2. Vanilla cleanups and comments.
|
static void ImportBlueQuantum(const Image *image,QuantumInfo *quantum_info,
const MagickSizeType number_pixels,const unsigned char *magick_restrict p,
Quantum *magick_restrict q,ExceptionInfo *exception)
{
QuantumAny
range;
register ssize_t
x;
unsigned int
pixel;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
switch (quantum_info->depth)
{
case 8:
{
unsigned char
pixel;
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushCharPixel(p,&pixel);
SetPixelBlue(image,ScaleCharToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
case 16:
{
unsigned short
pixel;
if (quantum_info->format == FloatingPointQuantumFormat)
{
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushShortPixel(quantum_info->endian,p,&pixel);
SetPixelBlue(image,ClampToQuantum(QuantumRange*
HalfToSinglePrecision(pixel)),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushShortPixel(quantum_info->endian,p,&pixel);
SetPixelBlue(image,ScaleShortToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
case 32:
{
unsigned int
pixel;
if (quantum_info->format == FloatingPointQuantumFormat)
{
float
pixel;
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushFloatPixel(quantum_info,p,&pixel);
SetPixelBlue(image,ClampToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushLongPixel(quantum_info->endian,p,&pixel);
SetPixelBlue(image,ScaleLongToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
case 64:
{
if (quantum_info->format == FloatingPointQuantumFormat)
{
double
pixel;
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushDoublePixel(quantum_info,p,&pixel);
SetPixelBlue(image,ClampToQuantum(pixel),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
}
default:
{
range=GetQuantumRange(quantum_info->depth);
for (x=0; x < (ssize_t) number_pixels; x++)
{
p=PushQuantumPixel(quantum_info,p,&pixel);
SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q);
p+=quantum_info->pad;
q+=GetPixelChannels(image);
}
break;
}
}
}
| 0 |
[
"CWE-125"
] |
ImageMagick
|
430403b0029b37decf216d57f810899cab2317dd
| 12,677,968,315,342,022,000,000,000,000,000,000,000 | 116 |
https://github.com/ImageMagick/ImageMagick/issues/126
|
static int install_permanent_handler(int num_cpus, uintptr_t smbase,
size_t smsize, size_t save_state_size)
{
/* There are num_cpus concurrent stacks and num_cpus concurrent save
* state areas. Lastly, set the stack size to 1KiB. */
struct smm_loader_params smm_params = {
.per_cpu_stack_size = CONFIG_SMM_MODULE_STACK_SIZE,
.num_concurrent_stacks = num_cpus,
.per_cpu_save_state_size = save_state_size,
.num_concurrent_save_states = num_cpus,
};
/* Allow callback to override parameters. */
if (mp_state.ops.adjust_smm_params != NULL)
mp_state.ops.adjust_smm_params(&smm_params, 1);
printk(BIOS_DEBUG, "Installing SMM handler to 0x%08lx\n", smbase);
if (smm_load_module((void *)smbase, smsize, &smm_params))
return -1;
adjust_smm_apic_id_map(&smm_params);
return 0;
}
| 1 |
[
"CWE-269"
] |
coreboot
|
afb7a814783cda12f5b72167163b9109ee1d15a7
| 284,267,377,863,943,500,000,000,000,000,000,000,000 | 25 |
cpu/x86/smm: Introduce SMM module loader version 2
Xeon-SP Skylake Scalable Processor can have 36 CPU threads (18 cores).
Current coreboot SMM is unable to handle more than ~32 CPU threads.
This patch introduces a version 2 of the SMM module loader which
addresses this problem. Having two versions of the SMM module loader
prevents any issues to current projects. Future Xeon-SP products will
be using this version of the SMM loader. Subsequent patches will
enable board specific functionality for Xeon-SP.
The reason for moving to version 2 is the state save area begins to
encroach upon the SMI handling code when more than 32 CPU threads are
in the system. This can cause system hangs, reboots, etc. The second
change is related to staggered entry points with simple near jumps. In
the current loader, near jumps will not work because the CPU is jumping
within the same code segment. In version 2, "far" address jumps are
necessary therefore protected mode must be enabled first. The SMM
layout and how the CPUs are staggered are documented in the code.
By making the modifications above, this allows the smm module loader to
expand easily as more CPU threads are added.
TEST=build for Tiogapass platform under OCP mainboard. Enable the
following in Kconfig.
select CPU_INTEL_COMMON_SMM
select SOC_INTEL_COMMON_BLOCK_SMM
select SMM_TSEG
select HAVE_SMI_HANDLER
select ACPI_INTEL_HARDWARE_SLEEP_VALUES
Debug console will show all 36 cores relocated. Further tested by
generating SMI's to port 0xb2 using XDP/ITP HW debugger and ensured all
cores entering and exiting SMM properly. In addition, booted to Linux
5.4 kernel and observed no issues during mp init.
Change-Id: I00a23a5f2a46110536c344254868390dbb71854c
Signed-off-by: Rocky Phagura <[email protected]>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/43684
Tested-by: build bot (Jenkins) <[email protected]>
Reviewed-by: Angel Pons <[email protected]>
|
static ssize_t cdc_ncm_show_tx_max(struct device *d, struct device_attribute *attr, char *buf)
{
struct usbnet *dev = netdev_priv(to_net_dev(d));
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
return sprintf(buf, "%u\n", ctx->tx_max);
}
| 0 |
[
"CWE-703"
] |
linux
|
4d06dd537f95683aba3651098ae288b7cbff8274
| 35,132,872,408,860,014,000,000,000,000,000,000,000 | 7 |
cdc_ncm: do not call usbnet_link_change from cdc_ncm_bind
usbnet_link_change will call schedule_work and should be
avoided if bind is failing. Otherwise we will end up with
scheduled work referring to a netdev which has gone away.
Instead of making the call conditional, we can just defer
it to usbnet_probe, using the driver_info flag made for
this purpose.
Fixes: 8a34b0ae8778 ("usbnet: cdc_ncm: apply usbnet_link_change")
Reported-by: Andrey Konovalov <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Signed-off-by: Bjørn Mork <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
__releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
{
spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
}
| 0 |
[
"CWE-416"
] |
net
|
36d5fe6a000790f56039afe26834265db0a3ad4c
| 199,501,001,105,914,900,000,000,000,000,000,000,000 | 4 |
core, nfqueue, openvswitch: Orphan frags in skb_zerocopy and handle errors
skb_zerocopy can copy elements of the frags array between skbs, but it doesn't
orphan them. Also, it doesn't handle errors, so this patch takes care of that
as well, and modify the callers accordingly. skb_tx_error() is also added to
the callers so they will signal the failed delivery towards the creator of the
skb.
Signed-off-by: Zoltan Kiss <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
struct sctp_association *asoc,
struct sctp_chunk *chunk)
{
int err = 0;
if (sctp_outq_sack(&asoc->outqueue, chunk)) {
struct net *net = sock_net(asoc->base.sk);
/* There are no more TSNs awaiting SACK. */
err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
asoc->state, asoc->ep, asoc, NULL,
GFP_ATOMIC);
}
return err;
}
| 0 |
[] |
linux
|
196d67593439b03088913227093e374235596e33
| 43,269,639,023,376,365,000,000,000,000,000,000,000 | 18 |
sctp: Add support to per-association statistics via a new SCTP_GET_ASSOC_STATS call
The current SCTP stack is lacking a mechanism to have per association
statistics. This is an implementation modeled after OpenSolaris'
SCTP_GET_ASSOC_STATS.
Userspace part will follow on lksctp if/when there is a general ACK on
this.
V4:
- Move ipackets++ before q->immediate.func() for consistency reasons
- Move sctp_max_rto() at the end of sctp_transport_update_rto() to avoid
returning bogus RTO values
- return asoc->rto_min when max_obs_rto value has not changed
V3:
- Increase ictrlchunks in sctp_assoc_bh_rcv() as well
- Move ipackets++ to sctp_inq_push()
- return 0 when no rto updates took place since the last call
V2:
- Implement partial retrieval of stat struct to cope for future expansion
- Kill the rtxpackets counter as it cannot be precise anyway
- Rename outseqtsns to outofseqtsns to make it clearer that these are out
of sequence unexpected TSNs
- Move asoc->ipackets++ under a lock to avoid potential miscounts
- Fold asoc->opackets++ into the already existing asoc check
- Kill unneeded (q->asoc) test when increasing rtxchunks
- Do not count octrlchunks if sending failed (SCTP_XMIT_OK != 0)
- Don't count SHUTDOWNs as SACKs
- Move SCTP_GET_ASSOC_STATS to the private space API
- Adjust the len check in sctp_getsockopt_assoc_stats() to allow for
future struct growth
- Move association statistics in their own struct
- Update idupchunks when we send a SACK with dup TSNs
- return min_rto in max_rto when RTO has not changed. Also return the
transport when max_rto last changed.
Signed-off: Michele Baldessari <[email protected]>
Acked-by: Vlad Yasevich <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
GC_debug_malloc_atomic_ignore_off_page(size_t lb, GC_EXTRA_PARAMS)
{
void * result = GC_malloc_atomic_ignore_off_page(lb + DEBUG_BYTES);
if (result == 0) {
GC_err_printf("GC_debug_malloc_atomic_ignore_off_page(%lu)"
" returning NULL (%s:%d)\n", (unsigned long)lb, s, i);
return(0);
}
if (!GC_debugging_started) {
GC_start_debugging();
}
ADD_CALL_CHAIN(result, ra);
return (GC_store_debug_info(result, (word)lb, s, i));
}
| 1 |
[
"CWE-119"
] |
bdwgc
|
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
| 153,281,133,996,333,800,000,000,000,000,000,000,000 | 15 |
Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now).
|
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
{
IF_CANCEL(int cancel_state;)
DCL_LOCK_STATE;
LOCK();
DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
# ifdef STACK_GROWS_DOWN
b -> mem_base = GC_find_limit(GC_approx_sp(), TRUE);
# ifdef IA64
b -> reg_base = GC_find_limit(GC_save_regs_in_stack(), FALSE);
# endif
# else
b -> mem_base = GC_find_limit(GC_approx_sp(), FALSE);
# endif
RESTORE_CANCEL(cancel_state);
UNLOCK();
return GC_SUCCESS;
}
| 0 |
[
"CWE-119"
] |
bdwgc
|
7292c02fac2066d39dd1bcc37d1a7054fd1e32ee
| 104,522,162,346,010,820,000,000,000,000,000,000,000 | 19 |
Fix malloc routines to prevent size value wrap-around
See issue #135 on Github.
* allchblk.c (GC_allochblk, GC_allochblk_nth): Use
OBJ_SZ_TO_BLOCKS_CHECKED instead of OBJ_SZ_TO_BLOCKS.
* malloc.c (GC_alloc_large): Likewise.
* alloc.c (GC_expand_hp_inner): Type of "bytes" local variable changed
from word to size_t; cast ROUNDUP_PAGESIZE argument to size_t; prevent
overflow when computing GC_heapsize+bytes > GC_max_heapsize.
* dbg_mlc.c (GC_debug_malloc, GC_debug_malloc_ignore_off_page,
GC_debug_malloc_atomic_ignore_off_page, GC_debug_generic_malloc,
GC_debug_generic_malloc_inner,
GC_debug_generic_malloc_inner_ignore_off_page,
GC_debug_malloc_stubborn, GC_debug_malloc_atomic,
GC_debug_malloc_uncollectable, GC_debug_malloc_atomic_uncollectable):
Use SIZET_SAT_ADD (instead of "+" operator) to add extra bytes to lb
value.
* fnlz_mlc.c (GC_finalized_malloc): Likewise.
* gcj_mlc.c (GC_debug_gcj_malloc): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Likewise.
* include/private/gcconfig.h (GET_MEM): Likewise.
* mallocx.c (GC_malloc_many, GC_memalign): Likewise.
* os_dep.c (GC_wince_get_mem, GC_win32_get_mem): Likewise.
* typd_mlc.c (GC_malloc_explicitly_typed,
GC_malloc_explicitly_typed_ignore_off_page,
GC_calloc_explicitly_typed): Likewise.
* headers.c (GC_scratch_alloc): Change type of bytes_to_get from word
to size_t (because ROUNDUP_PAGESIZE_IF_MMAP result type changed).
* include/private/gc_priv.h: Include limits.h (unless SIZE_MAX already
defined).
* include/private/gc_priv.h (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Move from
malloc.c file.
* include/private/gc_priv.h (SIZET_SAT_ADD): New macro (defined before
include gcconfig.h).
* include/private/gc_priv.h (EXTRA_BYTES, GC_page_size): Change type
to size_t.
* os_dep.c (GC_page_size): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, ROUNDUP_PAGESIZE): Add comment about the argument.
* include/private/gcconfig.h (GET_MEM): Likewise.
* include/private/gc_priv.h (ROUNDUP_GRANULE_SIZE, ROUNDED_UP_GRANULES,
ADD_SLOP, OBJ_SZ_TO_BLOCKS, ROUNDUP_PAGESIZE,
ROUNDUP_PAGESIZE_IF_MMAP): Rename argument to "lb".
* include/private/gc_priv.h (OBJ_SZ_TO_BLOCKS_CHECKED): New macro.
* include/private/gcconfig.h (GC_win32_get_mem, GC_wince_get_mem,
GC_unix_get_mem): Change argument type from word to int.
* os_dep.c (GC_unix_mmap_get_mem, GC_unix_get_mem,
GC_unix_sbrk_get_mem, GC_wince_get_mem, GC_win32_get_mem): Likewise.
* malloc.c (GC_alloc_large_and_clear): Call OBJ_SZ_TO_BLOCKS only
if no value wrap around is guaranteed.
* malloc.c (GC_generic_malloc): Do not check for lb_rounded < lb case
(because ROUNDED_UP_GRANULES and GRANULES_TO_BYTES guarantees no value
wrap around).
* mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.
* misc.c (GC_init_size_map): Change "i" local variable type from int
to size_t.
* os_dep.c (GC_write_fault_handler, catch_exception_raise): Likewise.
* misc.c (GC_envfile_init): Cast len to size_t when passed to
ROUNDUP_PAGESIZE_IF_MMAP.
* os_dep.c (GC_setpagesize): Cast GC_sysinfo.dwPageSize and
GETPAGESIZE() to size_t (when setting GC_page_size).
* os_dep.c (GC_unix_mmap_get_mem, GC_unmap_start, GC_remove_protection):
Expand ROUNDUP_PAGESIZE macro but without value wrap-around checking
(the argument is of word type).
* os_dep.c (GC_unix_mmap_get_mem): Replace -GC_page_size with
~GC_page_size+1 (because GC_page_size is unsigned); remove redundant
cast to size_t.
* os_dep.c (GC_unix_sbrk_get_mem): Add explicit cast of GC_page_size
to SBRK_ARG_T.
* os_dep.c (GC_wince_get_mem): Change type of res_bytes local variable
to size_t.
* typd_mlc.c: Do not include limits.h.
* typd_mlc.c (GC_SIZE_MAX, GC_SQRT_SIZE_MAX): Remove (as defined in
gc_priv.h now).
|
gdk_pixbuf_new (GdkColorspace colorspace,
gboolean has_alpha,
int bits_per_sample,
int width,
int height)
{
guchar *buf;
int channels;
int rowstride;
gsize bytes;
g_return_val_if_fail (colorspace == GDK_COLORSPACE_RGB, NULL);
g_return_val_if_fail (bits_per_sample == 8, NULL);
g_return_val_if_fail (width > 0, NULL);
g_return_val_if_fail (height > 0, NULL);
channels = has_alpha ? 4 : 3;
rowstride = width * channels;
if (rowstride / channels != width || rowstride + 3 < 0) /* overflow */
return NULL;
/* Always align rows to 32-bit boundaries */
rowstride = (rowstride + 3) & ~3;
bytes = height * rowstride;
if (bytes / rowstride != height) /* overflow */
return NULL;
buf = g_try_malloc (bytes);
if (!buf)
return NULL;
return gdk_pixbuf_new_from_data (buf, colorspace, has_alpha, bits_per_sample,
width, height, rowstride,
free_buffer, NULL);
}
| 1 |
[] |
gdk-pixbuf
|
deb78d971c4bcb9e3ccbb71e7925bc6baa707188
| 117,464,290,274,774,670,000,000,000,000,000,000,000 | 36 |
Use g_try_malloc_n where it makes sense
This lets us avoid some manual overflow checks.
|
initoptions_finish()
{
#ifndef MAC
char *opts = getenv("NETHACKOPTIONS");
if (!opts) opts = getenv("HACKOPTIONS");
if (opts) {
if (*opts == '/' || *opts == '\\' || *opts == '@') {
if (*opts == '@') opts++; /* @filename */
/* looks like a filename */
if (strlen(opts) < BUFSZ/2)
read_config_file(opts, SET_IN_FILE);
} else {
read_config_file((char *)0, SET_IN_FILE);
/* let the total length of options be long;
* parseoptions() will check each individually
*/
parseoptions(opts, TRUE, FALSE);
}
} else
#endif
read_config_file((char *)0, SET_IN_FILE);
(void)fruitadd(pl_fruit);
/* Remove "slime mold" from list of object names; this will */
/* prevent it from being wished unless it's actually present */
/* as a named (or default) fruit. Wishing for "fruit" will */
/* result in the player's preferred fruit [better than "\033"]. */
obj_descr[SLIME_MOLD].oc_name = "fruit";
return;
}
| 0 |
[
"CWE-269"
] |
NetHack
|
612755bfb5c412079795c68ba392df5d93874ed8
| 262,457,328,978,488,800,000,000,000,000,000,000,000 | 31 |
escapes() revamp
Partial rewrite of escapes(), mostly changing its if-then-else
logic so that end-of-string can be checked once instead for each case.
The previous version had a bug if the input string ended with backslash
and one decimal digit (due to being lumped together with the handling
for trailing \X or \O).
|
REF_EnableLocal(int stratum, double distance, int orphan)
{
enable_local_stratum = 1;
local_stratum = CLAMP(1, stratum, NTP_MAX_STRATUM - 1);
local_distance = distance;
local_orphan = !!orphan;
}
| 0 |
[
"CWE-59"
] |
chrony
|
e18903a6b56341481a2e08469c0602010bf7bfe3
| 147,671,445,555,935,060,000,000,000,000,000,000,000 | 7 |
switch to new util file functions
Replace all fopen(), rename(), and unlink() calls with the new util
functions.
|
TEST(ModMatchExpression, ZeroDivisor) {
ModMatchExpression mod;
ASSERT(!mod.init("", 0, 1).isOK());
}
| 0 |
[] |
mongo
|
b0ef26c639112b50648a02d969298650fbd402a4
| 87,009,424,391,699,790,000,000,000,000,000,000,000 | 4 |
SERVER-51083 Reject invalid UTF-8 from $regex match expressions
|
ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
cpu_buffer = buffer->buffers[cpu];
ret = local_read(&cpu_buffer->commit_overrun);
return ret;
}
| 0 |
[
"CWE-190"
] |
linux-stable
|
59643d1535eb220668692a5359de22545af579f6
| 255,874,449,885,104,080,000,000,000,000,000,000,000 | 13 |
ring-buffer: Prevent overflow of size in ring_buffer_resize()
If the size passed to ring_buffer_resize() is greater than MAX_LONG - BUF_PAGE_SIZE
then the DIV_ROUND_UP() will return zero.
Here's the details:
# echo 18014398509481980 > /sys/kernel/debug/tracing/buffer_size_kb
tracing_entries_write() processes this and converts kb to bytes.
18014398509481980 << 10 = 18446744073709547520
and this is passed to ring_buffer_resize() as unsigned long size.
size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
Where DIV_ROUND_UP(a, b) is (a + b - 1)/b
BUF_PAGE_SIZE is 4080 and here
18446744073709547520 + 4080 - 1 = 18446744073709551599
where 18446744073709551599 is still smaller than 2^64
2^64 - 18446744073709551599 = 17
But now 18446744073709551599 / 4080 = 4521260802379792
and size = size * 4080 = 18446744073709551360
This is checked to make sure its still greater than 2 * 4080,
which it is.
Then we convert to the number of buffer pages needed.
nr_page = DIV_ROUND_UP(size, BUF_PAGE_SIZE)
but this time size is 18446744073709551360 and
2^64 - (18446744073709551360 + 4080 - 1) = -3823
Thus it overflows and the resulting number is less than 4080, which makes
3823 / 4080 = 0
an nr_pages is set to this. As we already checked against the minimum that
nr_pages may be, this causes the logic to fail as well, and we crash the
kernel.
There's no reason to have the two DIV_ROUND_UP() (that's just result of
historical code changes), clean up the code and fix this bug.
Cc: [email protected] # 3.5+
Fixes: 83f40318dab00 ("ring-buffer: Make removal of ring buffer pages atomic")
Signed-off-by: Steven Rostedt <[email protected]>
|
lyd_parse_path(struct ly_ctx *ctx, const char *path, LYD_FORMAT format, int options, ...)
{
int fd;
struct lyd_node *ret;
va_list ap;
if (!ctx || !path) {
LOGARG;
return NULL;
}
fd = open(path, O_RDONLY);
if (fd == -1) {
LOGERR(ctx, LY_ESYS, "Failed to open data file \"%s\" (%s).", path, strerror(errno));
return NULL;
}
va_start(ap, options);
ret = lyd_parse_fd_(ctx, fd, format, options, ap);
va_end(ap);
close(fd);
return ret;
}
| 0 |
[
"CWE-119"
] |
libyang
|
32fb4993bc8bb49e93e84016af3c10ea53964be5
| 238,866,282,652,849,450,000,000,000,000,000,000,000 | 25 |
schema tree BUGFIX do not check features while still resolving schema
Fixes #723
|
STACK_OF(SSL_CIPHER) *ssl_bytes_to_cipher_list(SSL *s,unsigned char *p,int num,
STACK_OF(SSL_CIPHER) **skp)
{
const SSL_CIPHER *c;
STACK_OF(SSL_CIPHER) *sk;
int i,n;
if (s->s3)
s->s3->send_connection_binding = 0;
n=ssl_put_cipher_by_char(s,NULL,NULL);
if ((num%n) != 0)
{
SSLerr(SSL_F_SSL_BYTES_TO_CIPHER_LIST,SSL_R_ERROR_IN_RECEIVED_CIPHER_LIST);
return(NULL);
}
if ((skp == NULL) || (*skp == NULL))
sk=sk_SSL_CIPHER_new_null(); /* change perhaps later */
else
{
sk= *skp;
sk_SSL_CIPHER_zero(sk);
}
for (i=0; i<num; i+=n)
{
/* Check for SCSV */
if (s->s3 && (n != 3 || !p[0]) &&
(p[n-2] == ((SSL3_CK_SCSV >> 8) & 0xff)) &&
(p[n-1] == (SSL3_CK_SCSV & 0xff)))
{
/* SCSV fatal if renegotiating */
if (s->renegotiate)
{
SSLerr(SSL_F_SSL_BYTES_TO_CIPHER_LIST,SSL_R_SCSV_RECEIVED_WHEN_RENEGOTIATING);
ssl3_send_alert(s,SSL3_AL_FATAL,SSL_AD_HANDSHAKE_FAILURE);
goto err;
}
s->s3->send_connection_binding = 1;
p += n;
#ifdef OPENSSL_RI_DEBUG
fprintf(stderr, "SCSV received by server\n");
#endif
continue;
}
c=ssl_get_cipher_by_char(s,p);
p+=n;
if (c != NULL)
{
if (!sk_SSL_CIPHER_push(sk,c))
{
SSLerr(SSL_F_SSL_BYTES_TO_CIPHER_LIST,ERR_R_MALLOC_FAILURE);
goto err;
}
}
}
if (skp != NULL)
*skp=sk;
return(sk);
err:
if ((skp == NULL) || (*skp == NULL))
sk_SSL_CIPHER_free(sk);
return(NULL);
}
| 1 |
[
"CWE-310"
] |
openssl
|
6bfe55380abbf7528e04e59f18921bd6c896af1c
| 150,845,871,496,867,880,000,000,000,000,000,000,000 | 65 |
Support TLS_FALLBACK_SCSV.
Reviewed-by: Rich Salz <[email protected]>
|
_copyAIndices(const A_Indices *from)
{
A_Indices *newnode = makeNode(A_Indices);
COPY_NODE_FIELD(lidx);
COPY_NODE_FIELD(uidx);
return newnode;
}
| 0 |
[
"CWE-362"
] |
postgres
|
5f173040e324f6c2eebb90d86cf1b0cdb5890f0a
| 308,008,505,307,655,860,000,000,000,000,000,000,000 | 9 |
Avoid repeated name lookups during table and index DDL.
If the name lookups come to different conclusions due to concurrent
activity, we might perform some parts of the DDL on a different table
than other parts. At least in the case of CREATE INDEX, this can be
used to cause the permissions checks to be performed against a
different table than the index creation, allowing for a privilege
escalation attack.
This changes the calling convention for DefineIndex, CreateTrigger,
transformIndexStmt, transformAlterTableStmt, CheckIndexCompatible
(in 9.2 and newer), and AlterTable (in 9.1 and older). In addition,
CheckRelationOwnership is removed in 9.2 and newer and the calling
convention is changed in older branches. A field has also been added
to the Constraint node (FkConstraint in 8.4). Third-party code calling
these functions or using the Constraint node will require updating.
Report by Andres Freund. Patch by Robert Haas and Andres Freund,
reviewed by Tom Lane.
Security: CVE-2014-0062
|
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
struct sk_buff_head xmitq;
u32 dnode, dport = 0;
int err;
struct tipc_sock *tsk;
struct sock *sk;
struct sk_buff *skb;
__skb_queue_head_init(&xmitq);
while (skb_queue_len(inputq)) {
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
tipc_sk_enqueue(inputq, sk, dport, &xmitq);
spin_unlock_bh(&sk->sk_lock.slock);
}
/* Send pending response/rejected messages, if any */
tipc_node_distr_xmit(sock_net(sk), &xmitq);
sock_put(sk);
continue;
}
/* No destination socket => dequeue skb if still there */
skb = tipc_skb_dequeue(inputq, dport);
if (!skb)
return;
/* Try secondary lookup if unresolved named message */
err = TIPC_ERR_NO_PORT;
if (tipc_msg_lookup_dest(net, skb, &err))
goto xmit;
/* Prepare for message rejection */
if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
continue;
trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
xmit:
dnode = msg_destnode(buf_msg(skb));
tipc_node_xmit_skb(net, skb, dnode, dport);
}
}
| 0 |
[
"CWE-200",
"CWE-909"
] |
linux
|
d6d86830705f173fca6087a3e67ceaf68db80523
| 4,459,939,942,210,170,300,000,000,000,000,000,000 | 45 |
net ticp:fix a kernel-infoleak in __tipc_sendmsg()
struct tipc_socket_addr.ref has a 4-byte hole,and __tipc_getname() currently
copying it to user space,causing kernel-infoleak.
BUG: KMSAN: kernel-infoleak in instrument_copy_to_user include/linux/instrumented.h:121 [inline]
BUG: KMSAN: kernel-infoleak in instrument_copy_to_user include/linux/instrumented.h:121 [inline] lib/usercopy.c:33
BUG: KMSAN: kernel-infoleak in _copy_to_user+0x1c9/0x270 lib/usercopy.c:33 lib/usercopy.c:33
instrument_copy_to_user include/linux/instrumented.h:121 [inline]
instrument_copy_to_user include/linux/instrumented.h:121 [inline] lib/usercopy.c:33
_copy_to_user+0x1c9/0x270 lib/usercopy.c:33 lib/usercopy.c:33
copy_to_user include/linux/uaccess.h:209 [inline]
copy_to_user include/linux/uaccess.h:209 [inline] net/socket.c:287
move_addr_to_user+0x3f6/0x600 net/socket.c:287 net/socket.c:287
__sys_getpeername+0x470/0x6b0 net/socket.c:1987 net/socket.c:1987
__do_sys_getpeername net/socket.c:1997 [inline]
__se_sys_getpeername net/socket.c:1994 [inline]
__do_sys_getpeername net/socket.c:1997 [inline] net/socket.c:1994
__se_sys_getpeername net/socket.c:1994 [inline] net/socket.c:1994
__x64_sys_getpeername+0xda/0x120 net/socket.c:1994 net/socket.c:1994
do_syscall_x64 arch/x86/entry/common.c:51 [inline]
do_syscall_x64 arch/x86/entry/common.c:51 [inline] arch/x86/entry/common.c:82
do_syscall_64+0x54/0xd0 arch/x86/entry/common.c:82 arch/x86/entry/common.c:82
entry_SYSCALL_64_after_hwframe+0x44/0xae
Uninit was stored to memory at:
tipc_getname+0x575/0x5e0 net/tipc/socket.c:757 net/tipc/socket.c:757
__sys_getpeername+0x3b3/0x6b0 net/socket.c:1984 net/socket.c:1984
__do_sys_getpeername net/socket.c:1997 [inline]
__se_sys_getpeername net/socket.c:1994 [inline]
__do_sys_getpeername net/socket.c:1997 [inline] net/socket.c:1994
__se_sys_getpeername net/socket.c:1994 [inline] net/socket.c:1994
__x64_sys_getpeername+0xda/0x120 net/socket.c:1994 net/socket.c:1994
do_syscall_x64 arch/x86/entry/common.c:51 [inline]
do_syscall_x64 arch/x86/entry/common.c:51 [inline] arch/x86/entry/common.c:82
do_syscall_64+0x54/0xd0 arch/x86/entry/common.c:82 arch/x86/entry/common.c:82
entry_SYSCALL_64_after_hwframe+0x44/0xae
Uninit was stored to memory at:
msg_set_word net/tipc/msg.h:212 [inline]
msg_set_destport net/tipc/msg.h:619 [inline]
msg_set_word net/tipc/msg.h:212 [inline] net/tipc/socket.c:1486
msg_set_destport net/tipc/msg.h:619 [inline] net/tipc/socket.c:1486
__tipc_sendmsg+0x44fa/0x5890 net/tipc/socket.c:1486 net/tipc/socket.c:1486
tipc_sendmsg+0xeb/0x140 net/tipc/socket.c:1402 net/tipc/socket.c:1402
sock_sendmsg_nosec net/socket.c:704 [inline]
sock_sendmsg net/socket.c:724 [inline]
sock_sendmsg_nosec net/socket.c:704 [inline] net/socket.c:2409
sock_sendmsg net/socket.c:724 [inline] net/socket.c:2409
____sys_sendmsg+0xe11/0x12c0 net/socket.c:2409 net/socket.c:2409
___sys_sendmsg net/socket.c:2463 [inline]
___sys_sendmsg net/socket.c:2463 [inline] net/socket.c:2492
__sys_sendmsg+0x704/0x840 net/socket.c:2492 net/socket.c:2492
__do_sys_sendmsg net/socket.c:2501 [inline]
__se_sys_sendmsg net/socket.c:2499 [inline]
__do_sys_sendmsg net/socket.c:2501 [inline] net/socket.c:2499
__se_sys_sendmsg net/socket.c:2499 [inline] net/socket.c:2499
__x64_sys_sendmsg+0xe2/0x120 net/socket.c:2499 net/socket.c:2499
do_syscall_x64 arch/x86/entry/common.c:51 [inline]
do_syscall_x64 arch/x86/entry/common.c:51 [inline] arch/x86/entry/common.c:82
do_syscall_64+0x54/0xd0 arch/x86/entry/common.c:82 arch/x86/entry/common.c:82
entry_SYSCALL_64_after_hwframe+0x44/0xae
Local variable skaddr created at:
__tipc_sendmsg+0x2d0/0x5890 net/tipc/socket.c:1419 net/tipc/socket.c:1419
tipc_sendmsg+0xeb/0x140 net/tipc/socket.c:1402 net/tipc/socket.c:1402
Bytes 4-7 of 16 are uninitialized
Memory access of size 16 starts at ffff888113753e00
Data copied to user address 0000000020000280
Reported-by: [email protected]
Signed-off-by: Haimin Zhang <[email protected]>
Acked-by: Jon Maloy <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
|
libxlDomainShutdownThread(void *opaque)
{
struct libxlShutdownThreadInfo *shutdown_info = opaque;
virDomainObj *vm = shutdown_info->vm;
libxl_event *ev = shutdown_info->event;
libxlDriverPrivate *driver = shutdown_info->driver;
virObjectEvent *dom_event = NULL;
libxl_shutdown_reason xl_reason = ev->u.domain_shutdown.shutdown_reason;
g_autoptr(libxlDriverConfig) cfg = libxlDriverConfigGet(driver);
libxl_domain_config d_config;
libxl_domain_config_init(&d_config);
if (libxlDomainObjBeginJob(driver, vm, LIBXL_JOB_MODIFY) < 0)
goto cleanup;
if (xl_reason == LIBXL_SHUTDOWN_REASON_POWEROFF) {
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF,
VIR_DOMAIN_SHUTOFF_SHUTDOWN);
dom_event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN);
switch ((virDomainLifecycleAction) vm->def->onPoweroff) {
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART_RENAME:
libxlDomainShutdownHandleRestart(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE:
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY:
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_LAST:
goto endjob;
}
} else if (xl_reason == LIBXL_SHUTDOWN_REASON_CRASH) {
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF,
VIR_DOMAIN_SHUTOFF_CRASHED);
dom_event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_CRASHED);
switch ((virDomainLifecycleAction) vm->def->onCrash) {
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART_RENAME:
libxlDomainShutdownHandleRestart(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE:
case VIR_DOMAIN_LIFECYCLE_ACTION_LAST:
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY:
libxlDomainAutoCoreDump(driver, vm);
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART:
libxlDomainAutoCoreDump(driver, vm);
libxlDomainShutdownHandleRestart(driver, vm);
goto endjob;
}
} else if (xl_reason == LIBXL_SHUTDOWN_REASON_REBOOT) {
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF,
VIR_DOMAIN_SHUTOFF_SHUTDOWN);
dom_event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN);
switch ((virDomainLifecycleAction) vm->def->onReboot) {
case VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY:
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_RESTART_RENAME:
libxlDomainShutdownHandleRestart(driver, vm);
goto endjob;
case VIR_DOMAIN_LIFECYCLE_ACTION_PRESERVE:
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY:
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART:
case VIR_DOMAIN_LIFECYCLE_ACTION_LAST:
goto endjob;
}
} else if (xl_reason == LIBXL_SHUTDOWN_REASON_SOFT_RESET) {
libxlDomainObjPrivate *priv = vm->privateData;
if (libxlRetrieveDomainConfigurationWrapper(cfg->ctx, vm->def->id,
&d_config) != 0) {
VIR_ERROR(_("Failed to retrieve config for VM '%s'. "
"Unable to perform soft reset. Destroying VM"),
vm->def->name);
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
}
if (priv->deathW) {
libxl_evdisable_domain_death(cfg->ctx, priv->deathW);
priv->deathW = NULL;
}
if (libxl_domain_soft_reset(cfg->ctx, &d_config, vm->def->id,
NULL, NULL) != 0) {
VIR_ERROR(_("Failed to soft reset VM '%s'. Destroying VM"),
vm->def->name);
libxlDomainShutdownHandleDestroy(driver, vm);
goto endjob;
}
libxl_evenable_domain_death(cfg->ctx, vm->def->id, 0, &priv->deathW);
libxlDomainUnpauseWrapper(cfg->ctx, vm->def->id);
} else {
VIR_INFO("Unhandled shutdown_reason %d", xl_reason);
}
endjob:
libxlDomainObjEndJob(driver, vm);
cleanup:
virDomainObjEndAPI(&vm);
virObjectEventStateQueue(driver->domainEventState, dom_event);
libxl_event_free(cfg->ctx, ev);
VIR_FREE(shutdown_info);
libxl_domain_config_dispose(&d_config);
}
| 1 |
[
"CWE-667"
] |
libvirt
|
a4e6fba069c0809b8b5dde5e9db62d2efd91b4a0
| 71,813,687,864,462,610,000,000,000,000,000,000,000 | 125 |
libxl: Rename libxlShutdownThreadInfo struct
An upcoming change will use the struct in a thread created to process
death events. Rename libxlShutdownThreadInfo to libxlEventHandlerThreadInfo
to reflect the more generic usage.
Signed-off-by: Jim Fehlig <[email protected]>
Reviewed-by: Daniel P. Berrangé <[email protected]>
Reviewed-by: Ján Tomko <[email protected]>
|
static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req,
bool active)
{
NvmeNamespace *ns;
NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
uint32_t nsid = le32_to_cpu(c->nsid);
trace_pci_nvme_identify_ns_csi(nsid, c->csi);
if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
return NVME_INVALID_NSID | NVME_DNR;
}
ns = nvme_ns(n, nsid);
if (unlikely(!ns)) {
if (!active) {
ns = nvme_subsys_ns(n->subsys, nsid);
if (!ns) {
return nvme_rpt_empty_id_struct(n, req);
}
} else {
return nvme_rpt_empty_id_struct(n, req);
}
}
if (c->csi == NVME_CSI_NVM) {
return nvme_rpt_empty_id_struct(n, req);
} else if (c->csi == NVME_CSI_ZONED && ns->csi == NVME_CSI_ZONED) {
return nvme_c2h(n, (uint8_t *)ns->id_ns_zoned, sizeof(NvmeIdNsZoned),
req);
}
return NVME_INVALID_FIELD | NVME_DNR;
}
| 0 |
[] |
qemu
|
736b01642d85be832385063f278fe7cd4ffb5221
| 138,318,871,750,740,540,000,000,000,000,000,000,000 | 34 |
hw/nvme: fix CVE-2021-3929
This fixes CVE-2021-3929 "locally" by denying DMA to the iomem of the
device itself. This still allows DMA to MMIO regions of other devices
(e.g. doing P2P DMA to the controller memory buffer of another NVMe
device).
Fixes: CVE-2021-3929
Reported-by: Qiuhao Li <[email protected]>
Reviewed-by: Keith Busch <[email protected]>
Reviewed-by: Philippe Mathieu-Daudé <[email protected]>
Signed-off-by: Klaus Jensen <[email protected]>
|
static int phar_build(zend_object_iterator *iter, void *puser) /* {{{ */
{
zval *value;
zend_bool close_fp = 1;
struct _phar_t *p_obj = (struct _phar_t*) puser;
uint32_t str_key_len, base_len = p_obj->l;
phar_entry_data *data;
php_stream *fp;
size_t fname_len;
size_t contents_len;
char *fname, *error = NULL, *base = p_obj->b, *save = NULL, *temp = NULL;
zend_string *opened;
char *str_key;
zend_class_entry *ce = p_obj->c;
phar_archive_object *phar_obj = p_obj->p;
value = iter->funcs->get_current_data(iter);
if (EG(exception)) {
return ZEND_HASH_APPLY_STOP;
}
if (!value) {
/* failure in get_current_data */
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned no value", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
switch (Z_TYPE_P(value)) {
case IS_STRING:
break;
case IS_RESOURCE:
php_stream_from_zval_no_verify(fp, value);
if (!fp) {
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Iterator %s returned an invalid stream handle", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
if (iter->funcs->get_current_key) {
zval key;
iter->funcs->get_current_key(iter, &key);
if (EG(exception)) {
return ZEND_HASH_APPLY_STOP;
}
if (Z_TYPE(key) != IS_STRING) {
zval_dtor(&key);
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned an invalid key (must return a string)", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
if (ZEND_SIZE_T_INT_OVFL(Z_STRLEN(key))) {
zval_dtor(&key);
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned an invalid key (too long)", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
str_key_len = (int)Z_STRLEN(key);
str_key = estrndup(Z_STRVAL(key), str_key_len);
save = str_key;
zval_dtor(&key);
} else {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned an invalid key (must return a string)", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
close_fp = 0;
opened = zend_string_init("[stream]", sizeof("[stream]") - 1, 0);
goto after_open_fp;
case IS_OBJECT:
if (instanceof_function(Z_OBJCE_P(value), spl_ce_SplFileInfo)) {
char *test = NULL;
zval dummy;
spl_filesystem_object *intern = (spl_filesystem_object*)((char*)Z_OBJ_P(value) - Z_OBJ_P(value)->handlers->offset);
if (!base_len) {
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Iterator %s returns an SplFileInfo object, so base directory must be specified", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
switch (intern->type) {
case SPL_FS_DIR:
test = spl_filesystem_object_get_path(intern, NULL);
fname_len = spprintf(&fname, 0, "%s%c%s", test, DEFAULT_SLASH, intern->u.dir.entry.d_name);
php_stat(fname, fname_len, FS_IS_DIR, &dummy);
if (Z_TYPE(dummy) == IS_TRUE) {
/* ignore directories */
efree(fname);
return ZEND_HASH_APPLY_KEEP;
}
test = expand_filepath(fname, NULL);
efree(fname);
if (test) {
fname = test;
fname_len = strlen(fname);
} else {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Could not resolve file path");
return ZEND_HASH_APPLY_STOP;
}
save = fname;
goto phar_spl_fileinfo;
case SPL_FS_INFO:
case SPL_FS_FILE:
fname = expand_filepath(intern->file_name, NULL);
if (!fname) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Could not resolve file path");
return ZEND_HASH_APPLY_STOP;
}
fname_len = strlen(fname);
save = fname;
goto phar_spl_fileinfo;
}
}
/* fall-through */
default:
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned an invalid value (must return a string)", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
fname = Z_STRVAL_P(value);
fname_len = Z_STRLEN_P(value);
phar_spl_fileinfo:
if (base_len) {
temp = expand_filepath(base, NULL);
if (!temp) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Could not resolve file path");
if (save) {
efree(save);
}
return ZEND_HASH_APPLY_STOP;
}
base = temp;
base_len = (int)strlen(base);
if (strstr(fname, base)) {
str_key_len = fname_len - base_len;
if (str_key_len <= 0) {
if (save) {
efree(save);
efree(temp);
}
return ZEND_HASH_APPLY_KEEP;
}
str_key = fname + base_len;
if (*str_key == '/' || *str_key == '\\') {
str_key++;
str_key_len--;
}
} else {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned a path \"%s\" that is not in the base directory \"%s\"", ZSTR_VAL(ce->name), fname, base);
if (save) {
efree(save);
efree(temp);
}
return ZEND_HASH_APPLY_STOP;
}
} else {
if (iter->funcs->get_current_key) {
zval key;
iter->funcs->get_current_key(iter, &key);
if (EG(exception)) {
return ZEND_HASH_APPLY_STOP;
}
if (Z_TYPE(key) != IS_STRING) {
zval_dtor(&key);
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned an invalid key (must return a string)", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
if (ZEND_SIZE_T_INT_OVFL(Z_STRLEN(key))) {
zval_dtor(&key);
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned an invalid key (too long)", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
str_key_len = (int)Z_STRLEN(key);
str_key = estrndup(Z_STRVAL(key), str_key_len);
save = str_key;
zval_dtor(&key);
} else {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned an invalid key (must return a string)", ZSTR_VAL(ce->name));
return ZEND_HASH_APPLY_STOP;
}
}
if (php_check_open_basedir(fname)) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned a path \"%s\" that open_basedir prevents opening", ZSTR_VAL(ce->name), fname);
if (save) {
efree(save);
}
if (temp) {
efree(temp);
}
return ZEND_HASH_APPLY_STOP;
}
/* try to open source file, then create internal phar file and copy contents */
fp = php_stream_open_wrapper(fname, "rb", STREAM_MUST_SEEK|0, &opened);
if (!fp) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0, "Iterator %s returned a file that could not be opened \"%s\"", ZSTR_VAL(ce->name), fname);
if (save) {
efree(save);
}
if (temp) {
efree(temp);
}
return ZEND_HASH_APPLY_STOP;
}
after_open_fp:
if (str_key_len >= sizeof(".phar")-1 && !memcmp(str_key, ".phar", sizeof(".phar")-1)) {
/* silently skip any files that would be added to the magic .phar directory */
if (save) {
efree(save);
}
if (temp) {
efree(temp);
}
if (opened) {
zend_string_release(opened);
}
if (close_fp) {
php_stream_close(fp);
}
return ZEND_HASH_APPLY_KEEP;
}
if (!(data = phar_get_or_create_entry_data(phar_obj->archive->fname, phar_obj->archive->fname_len, str_key, str_key_len, "w+b", 0, &error, 1))) {
zend_throw_exception_ex(spl_ce_BadMethodCallException, 0, "Entry %s cannot be created: %s", str_key, error);
efree(error);
if (save) {
efree(save);
}
if (opened) {
zend_string_release(opened);
}
if (temp) {
efree(temp);
}
if (close_fp) {
php_stream_close(fp);
}
return ZEND_HASH_APPLY_STOP;
} else {
if (error) {
efree(error);
}
/* convert to PHAR_UFP */
if (data->internal_file->fp_type == PHAR_MOD) {
php_stream_close(data->internal_file->fp);
}
data->internal_file->fp = NULL;
data->internal_file->fp_type = PHAR_UFP;
data->internal_file->offset_abs = data->internal_file->offset = php_stream_tell(p_obj->fp);
data->fp = NULL;
php_stream_copy_to_stream_ex(fp, p_obj->fp, PHP_STREAM_COPY_ALL, &contents_len);
data->internal_file->uncompressed_filesize = data->internal_file->compressed_filesize =
php_stream_tell(p_obj->fp) - data->internal_file->offset;
}
if (close_fp) {
php_stream_close(fp);
}
add_assoc_str(p_obj->ret, str_key, opened);
if (save) {
efree(save);
}
if (temp) {
efree(temp);
}
data->internal_file->compressed_filesize = data->internal_file->uncompressed_filesize = contents_len;
phar_entry_delref(data);
return ZEND_HASH_APPLY_KEEP;
}
| 1 |
[
"CWE-281"
] |
php-src
|
e5c95234d87fcb8f6b7569a96a89d1e1544749a6
| 291,454,051,633,721,800,000,000,000,000,000,000,000 | 315 |
Fix bug #79082 - Files added to tar with Phar::buildFromIterator have all-access permissions
|
R_API char *r_egg_to_string(REgg *egg) {
return r_buf_to_string (egg->buf);
}
| 0 |
[
"CWE-125"
] |
radare2
|
e710401ebb4a892a87b0c709d709af8b5dcbbb01
| 294,011,400,619,038,830,000,000,000,000,000,000,000 | 3 |
patch #14211 heap buffer overflow in large ragg2
inputs. this should be refactored to use an RBuffer to enable dynamic
resizing, but for now just patching it to bail out if we are about to
overwrite the allocated statically sized buffer
|
PHP_METHOD(Phar, copy)
{
char *oldfile, *newfile, *error;
const char *pcr_error;
int oldfile_len, newfile_len;
phar_entry_info *oldentry, newentry = {0}, *temp;
PHAR_ARCHIVE_OBJECT();
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "ss", &oldfile, &oldfile_len, &newfile, &newfile_len) == FAILURE) {
return;
}
if (PHAR_G(readonly) && !phar_obj->arc.archive->is_data) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC,
"Cannot copy \"%s\" to \"%s\", phar is read-only", oldfile, newfile);
RETURN_FALSE;
}
if (oldfile_len >= sizeof(".phar")-1 && !memcmp(oldfile, ".phar", sizeof(".phar")-1)) {
/* can't copy a meta file */
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC,
"file \"%s\" cannot be copied to file \"%s\", cannot copy Phar meta-file in %s", oldfile, newfile, phar_obj->arc.archive->fname);
RETURN_FALSE;
}
if (newfile_len >= sizeof(".phar")-1 && !memcmp(newfile, ".phar", sizeof(".phar")-1)) {
/* can't copy a meta file */
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC,
"file \"%s\" cannot be copied to file \"%s\", cannot copy to Phar meta-file in %s", oldfile, newfile, phar_obj->arc.archive->fname);
RETURN_FALSE;
}
if (!zend_hash_exists(&phar_obj->arc.archive->manifest, oldfile, (uint) oldfile_len) || SUCCESS != zend_hash_find(&phar_obj->arc.archive->manifest, oldfile, (uint) oldfile_len, (void**)&oldentry) || oldentry->is_deleted) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC,
"file \"%s\" cannot be copied to file \"%s\", file does not exist in %s", oldfile, newfile, phar_obj->arc.archive->fname);
RETURN_FALSE;
}
if (zend_hash_exists(&phar_obj->arc.archive->manifest, newfile, (uint) newfile_len)) {
if (SUCCESS == zend_hash_find(&phar_obj->arc.archive->manifest, newfile, (uint) newfile_len, (void**)&temp) || !temp->is_deleted) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC,
"file \"%s\" cannot be copied to file \"%s\", file must not already exist in phar %s", oldfile, newfile, phar_obj->arc.archive->fname);
RETURN_FALSE;
}
}
if (phar_path_check(&newfile, &newfile_len, &pcr_error) > pcr_is_ok) {
zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC,
"file \"%s\" contains invalid characters %s, cannot be copied from \"%s\" in phar %s", newfile, pcr_error, oldfile, phar_obj->arc.archive->fname);
RETURN_FALSE;
}
if (phar_obj->arc.archive->is_persistent) {
if (FAILURE == phar_copy_on_write(&(phar_obj->arc.archive) TSRMLS_CC)) {
zend_throw_exception_ex(phar_ce_PharException, 0 TSRMLS_CC, "phar \"%s\" is persistent, unable to copy on write", phar_obj->arc.archive->fname);
return;
}
/* re-populate with copied-on-write entry */
zend_hash_find(&phar_obj->arc.archive->manifest, oldfile, (uint) oldfile_len, (void**)&oldentry);
}
memcpy((void *) &newentry, oldentry, sizeof(phar_entry_info));
if (newentry.metadata) {
zval *t;
t = newentry.metadata;
ALLOC_ZVAL(newentry.metadata);
*newentry.metadata = *t;
zval_copy_ctor(newentry.metadata);
Z_SET_REFCOUNT_P(newentry.metadata, 1);
newentry.metadata_str.c = NULL;
newentry.metadata_str.len = 0;
}
newentry.filename = estrndup(newfile, newfile_len);
newentry.filename_len = newfile_len;
newentry.fp_refcount = 0;
if (oldentry->fp_type != PHAR_FP) {
if (FAILURE == phar_copy_entry_fp(oldentry, &newentry, &error TSRMLS_CC)) {
efree(newentry.filename);
php_stream_close(newentry.fp);
zend_throw_exception_ex(phar_ce_PharException, 0 TSRMLS_CC, "%s", error);
efree(error);
return;
}
}
zend_hash_add(&oldentry->phar->manifest, newfile, newfile_len, (void*)&newentry, sizeof(phar_entry_info), NULL);
phar_obj->arc.archive->is_modified = 1;
phar_flush(phar_obj->arc.archive, 0, 0, 0, &error TSRMLS_CC);
if (error) {
zend_throw_exception_ex(phar_ce_PharException, 0 TSRMLS_CC, "%s", error);
efree(error);
}
RETURN_TRUE;
}
| 0 |
[
"CWE-416"
] |
php-src
|
b2cf3f064b8f5efef89bb084521b61318c71781b
| 201,455,425,313,882,500,000,000,000,000,000,000,000 | 102 |
Fixed bug #68901 (use after free)
|
PHP_METHOD(Phar, isValidPharFilename)
{
char *fname;
const char *ext_str;
int fname_len, ext_len, is_executable;
zend_bool executable = 1;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|b", &fname, &fname_len, &executable) == FAILURE) {
return;
}
is_executable = executable;
RETVAL_BOOL(phar_detect_phar_fname_ext(fname, fname_len, &ext_str, &ext_len, is_executable, 2, 1 TSRMLS_CC) == SUCCESS);
}
| 0 |
[
"CWE-416"
] |
php-src
|
b2cf3f064b8f5efef89bb084521b61318c71781b
| 322,223,242,466,590,700,000,000,000,000,000,000,000 | 14 |
Fixed bug #68901 (use after free)
|
mp_sint32 LoaderXM::load(XMFileBase& f, XModule* module)
{
mp_ubyte insData[230];
mp_sint32 smpReloc[MP_MAXINSSAMPS];
mp_ubyte nbu[MP_MAXINSSAMPS];
mp_uint32 fileSize = 0;
module->cleanUp();
// this will make code much easier to read
TXMHeader* header = &module->header;
TXMInstrument* instr = module->instr;
TXMSample* smp = module->smp;
TXMPattern* phead = module->phead;
// we're already out of memory here
if (!phead || !instr || !smp)
return MP_OUT_OF_MEMORY;
fileSize = f.sizeWithBaseOffset();
f.read(&header->sig,1,17);
f.read(&header->name,1,20);
f.read(&header->whythis1a,1,1);
header->whythis1a=0;
f.read(&header->tracker,1,20);
f.readWords(&header->ver,1);
if (header->ver != 0x102 &&
header->ver != 0x103 && // untested
header->ver != 0x104)
return MP_LOADER_FAILED;
f.readDwords(&header->hdrsize,1);
header->hdrsize-=4;
mp_uint32 hdrSize = 0x110;
if (header->hdrsize > hdrSize)
hdrSize = header->hdrsize;
mp_ubyte* hdrBuff = new mp_ubyte[hdrSize];
memset(hdrBuff, 0, hdrSize);
f.read(hdrBuff, 1, header->hdrsize);
header->ordnum = LittleEndian::GET_WORD(hdrBuff);
header->restart = LittleEndian::GET_WORD(hdrBuff+2);
header->channum = LittleEndian::GET_WORD(hdrBuff+4);
header->patnum = LittleEndian::GET_WORD(hdrBuff+6);
header->insnum = LittleEndian::GET_WORD(hdrBuff+8);
header->freqtab = LittleEndian::GET_WORD(hdrBuff+10);
header->tempo = LittleEndian::GET_WORD(hdrBuff+12);
header->speed = LittleEndian::GET_WORD(hdrBuff+14);
memcpy(header->ord, hdrBuff+16, 256);
if(header->ordnum > MP_MAXORDERS)
header->ordnum = MP_MAXORDERS;
if(header->insnum > MP_MAXINS)
return MP_LOADER_FAILED;
delete[] hdrBuff;
header->mainvol=255;
header->flags = XModule::MODULE_XMNOTECLIPPING |
XModule::MODULE_XMARPEGGIO |
XModule::MODULE_XMPORTANOTEBUFFER |
XModule::MODULE_XMVOLCOLUMNVIBRATO;
header->uppernotebound = 119;
mp_sint32 i,y,sc;
for (i=0;i<32;i++) header->pan[i]=0x80;
// old version?
if (header->ver == 0x102 || header->ver == 0x103)
{
mp_sint32 s = 0;
mp_sint32 e = 0;
for (y=0;y<header->insnum;y++) {
f.readDwords(&instr[y].size,1);
f.read(&instr[y].name,1,22);
f.read(&instr[y].type,1,1);
mp_uword numSamples = 0;
f.readWords(&numSamples,1);
if(numSamples > MP_MAXINSSAMPS)
return MP_LOADER_FAILED;
instr[y].samp = numSamples;
if (instr[y].size == 29)
{
#ifdef MILKYTRACKER
s+=16;
#endif
for (mp_sint32 i = 0; i < 120; i++)
instr[y].snum[i] = -1;
continue;
}
f.readDwords(&instr[y].shsize,1);
memset(insData, 0, 230);
if (instr[y].size - 33 > 230)
return MP_OUT_OF_MEMORY;
f.read(insData, 1, instr[y].size - 33);
if (instr[y].samp) {
mp_ubyte* insDataPtr = insData;
memcpy(nbu, insDataPtr, MP_MAXINSSAMPS);
insDataPtr+=MP_MAXINSSAMPS;
TEnvelope venv;
TEnvelope penv;
memset(&venv,0,sizeof(venv));
memset(&penv,0,sizeof(penv));
mp_sint32 k;
for (k = 0; k < XM_ENVELOPENUMPOINTS; k++)
{
venv.env[k][0] = LittleEndian::GET_WORD(insDataPtr);
venv.env[k][1] = LittleEndian::GET_WORD(insDataPtr+2);
insDataPtr+=4;
}
for (k = 0; k < XM_ENVELOPENUMPOINTS; k++)
{
penv.env[k][0] = LittleEndian::GET_WORD(insDataPtr);
penv.env[k][1] = LittleEndian::GET_WORD(insDataPtr+2);
insDataPtr+=4;
}
venv.num = *insDataPtr++;
if (venv.num > XM_ENVELOPENUMPOINTS) venv.num = XM_ENVELOPENUMPOINTS;
penv.num = *insDataPtr++;
if (penv.num > XM_ENVELOPENUMPOINTS) penv.num = XM_ENVELOPENUMPOINTS;
venv.sustain = *insDataPtr++;
venv.loops = *insDataPtr++;
venv.loope = *insDataPtr++;
penv.sustain = *insDataPtr++;
penv.loops = *insDataPtr++;
penv.loope = *insDataPtr++;
venv.type = *insDataPtr++;
penv.type = *insDataPtr++;
mp_ubyte vibtype, vibsweep, vibdepth, vibrate;
mp_uword volfade;
vibtype = *insDataPtr++;
vibsweep = *insDataPtr++;
vibdepth = *insDataPtr++;
vibrate = *insDataPtr++;
vibdepth<<=1;
volfade = LittleEndian::GET_WORD(insDataPtr);
insDataPtr+=2;
volfade<<=1;
//instr[y].res = LittleEndian::GET_WORD(insDataPtr);
insDataPtr+=2;
for (mp_sint32 l=0;l<XM_ENVELOPENUMPOINTS;l++) {
venv.env[l][1]<<=2;
penv.env[l][1]<<=2;
}
if (!module->addVolumeEnvelope(venv))
return MP_OUT_OF_MEMORY;
if (!module->addPanningEnvelope(penv))
return MP_OUT_OF_MEMORY;
mp_sint32 g=0, sc;
for (sc=0;sc<instr[y].samp;sc++) {
smp[g+s].flags=3;
smp[g+s].venvnum=e+1;
smp[g+s].penvnum=e+1;
smp[g+s].vibtype=vibtype;
smp[g+s].vibsweep=vibsweep;
smp[g+s].vibdepth=vibdepth;
smp[g+s].vibrate=vibrate;
smp[g+s].volfade=volfade;
// not sure why I did that, actually doesn't make sense
//if (!(venv.type&1)) smp[g+s].volfade=0;
f.readDwords(&smp[g+s].samplen,1);
f.readDwords(&smp[g+s].loopstart,1);
f.readDwords(&smp[g+s].looplen,1);
smp[g+s].vol=XModule::vol64to255(f.readByte());
//f.read(&smp[g+s].vol,1,1);
f.read(&smp[g+s].finetune,1,1);
f.read(&smp[g+s].type,1,1);
#ifdef VERBOSE
printf("Before: %i, After: %i\n", smp[g+s].type, smp[g+s].type & (3+16));
#endif
f.read(&smp[g+s].pan,1,1);
f.read(&smp[g+s].relnote,1,1);
f.read(&smp[g+s].res,1,1);
f.read(&smp[g+s].name,1,22);
char line[30];
memset(line, 0, sizeof(line));
XModule::convertStr(line, smp[g+s].name, 23, false);
if (line[0])
module->addSongMessageLine(line);
// ignore empty samples
#ifndef MILKYTRACKER
// ignore empty samples when not being a tracker
if (smp[g+s].samplen) {
smpReloc[sc] = g;
g++;
}
else
smpReloc[sc] = -1;
#else
smpReloc[sc] = g;
g++;
#endif
}
instr[y].samp = g;
for (sc = 0; sc < MP_MAXINSSAMPS; sc++) {
if (smpReloc[nbu[sc]] == -1)
instr[y].snum[sc] = -1;
else
instr[y].snum[sc] = smpReloc[nbu[sc]]+s;
}
e++;
}
else
{
for (mp_sint32 i = 0; i < 120; i++)
instr[y].snum[i] = -1;
}
#ifdef MILKYTRACKER
s+=16;
#else
s+=instr[y].samp;
#endif
}
header->smpnum=s;
header->volenvnum=e;
header->panenvnum=e;
}
for (y=0;y<header->patnum;y++) {
if (header->ver == 0x104 || header->ver == 0x103)
{
f.readDwords(&phead[y].len,1);
f.read(&phead[y].ptype,1,1);
f.readWords(&phead[y].rows,1);
f.readWords(&phead[y].patdata,1);
}
else
{
f.readDwords(&phead[y].len,1);
f.read(&phead[y].ptype,1,1);
phead[y].rows = (mp_uword)f.readByte()+1;
f.readWords(&phead[y].patdata,1);
}
phead[y].effnum=2;
phead[y].channum=(mp_ubyte)header->channum;
phead[y].patternData = new mp_ubyte[phead[y].rows*header->channum*6];
// out of memory?
if (phead[y].patternData == NULL)
{
return MP_OUT_OF_MEMORY;
}
memset(phead[y].patternData,0,phead[y].rows*header->channum*6);
if (phead[y].patdata) {
mp_ubyte *buffer = new mp_ubyte[phead[y].patdata];
// out of memory?
if (buffer == NULL)
{
return MP_OUT_OF_MEMORY;
}
f.read(buffer,1,phead[y].patdata);
//printf("%i\n", phead[y].patdata);
mp_sint32 pc = 0, bc = 0;
for (mp_sint32 r=0;r<phead[y].rows;r++) {
for (mp_sint32 c=0;c<header->channum;c++) {
mp_ubyte slot[5];
memset(slot,0,5);
if ((buffer[pc]&128)) {
mp_ubyte pb = buffer[pc];
pc++;
if ((pb&1)) {
//phead[y].patternData[bc]=buffer[pc];
slot[0]=buffer[pc];
pc++;
}
if ((pb&2)) {
//phead[y].patternData[bc+1]=buffer[pc];
slot[1]=buffer[pc];
pc++;
}
if ((pb&4)) {
//phead[y].patternData[bc+2]=buffer[pc];
slot[2]=buffer[pc];
pc++;
}
if ((pb&8)) {
//phead[y].patternData[bc+3]=buffer[pc];
slot[3]=buffer[pc];
pc++;
}
if ((pb&16)) {
//phead[y].patternData[bc+4]=buffer[pc];
slot[4]=buffer[pc];
pc++;
}
}
else {
//memcpy(phead[y].patternData+bc,buffer+pc,5);
memcpy(slot,buffer+pc,5);
pc+=5;
}
char gl=0;
for (mp_sint32 i=0;i<XModule::numValidXMEffects;i++)
if (slot[3]==XModule::validXMEffects[i]) gl=1;
if (!gl) slot[3]=slot[4]=0;
if ((slot[3]==0xC)||(slot[3]==0x10)) {
slot[4] = XModule::vol64to255(slot[4]);
/*mp_sint32 bl = slot[4];
if (bl>64) bl=64;
slot[4]=(bl*261120)>>16;*/
}
if ((!slot[3])&&(slot[4])) slot[3]=0x20;
if (slot[3]==0xE) {
slot[3]=(slot[4]>>4)+0x30;
slot[4]=slot[4]&0xf;
}
if (slot[3]==0x21) {
slot[3]=(slot[4]>>4)+0x40;
slot[4]=slot[4]&0xf;
}
if (slot[0]==97) slot[0]=XModule::NOTE_OFF;
phead[y].patternData[bc]=slot[0];
phead[y].patternData[bc+1]=slot[1];
XModule::convertXMVolumeEffects(slot[2], phead[y].patternData[bc+2], phead[y].patternData[bc+3]);
phead[y].patternData[bc+4]=slot[3];
phead[y].patternData[bc+5]=slot[4];
/*if ((y==3)&&(c==2)) {
for (mp_sint32 bl=0;bl<6;bl++) cprintf("%x ",phead[y].patternData[bc+bl]);
cprintf("\r\n");
getch();
};*/
/*printf("Note : %i\r\n",phead[y].patternData[bc]);
printf("Ins : %i\r\n",phead[y].patternData[bc+1]);
printf("Vol : %i\r\n",phead[y].patternData[bc+2]);
printf("Eff : %i\r\n",phead[y].patternData[bc+3]);
printf("Effop: %i\r\n",phead[y].patternData[bc+4]);
getch();*/
bc+=6;
} // for c
} // for r
delete[] buffer;
}
}
if (header->ver == 0x104)
{
mp_sint32 s = 0;
mp_sint32 e = 0;
for (y=0;y<header->insnum;y++) {
// fixes MOOH.XM loading problems
// seems to store more instruments in the header than in the actual file
if (f.posWithBaseOffset() >= fileSize)
break;
//TXMInstrument* ins = &instr[y];
f.readDwords(&instr[y].size,1);
if (instr[y].size >= 4 && instr[y].size < 29)
{
mp_ubyte buffer[29];
memset(buffer, 0, sizeof(buffer));
f.read(buffer, 1, instr[y].size - 4);
memcpy(instr[y].name, buffer, 22);
instr[y].type = buffer[22];
instr[y].samp = LittleEndian::GET_WORD(buffer + 23);
}
else
{
f.read(&instr[y].name,1,22);
f.read(&instr[y].type,1,1);
f.readWords(&instr[y].samp,1);
}
if (instr[y].samp > MP_MAXINSSAMPS)
return MP_LOADER_FAILED;
//printf("%i, %i\n", instr[y].size, instr[y].samp);
if (instr[y].size <= 29)
{
#ifdef MILKYTRACKER
s+=16;
#endif
for (mp_sint32 i = 0; i < 120; i++)
instr[y].snum[i] = -1;
continue;
}
f.readDwords(&instr[y].shsize,1);
#ifdef VERBOSE
printf("%i/%i: %i, %i, %i, %s\n",y,header->insnum-1,instr[y].size,instr[y].shsize,instr[y].samp,instr[y].name);
#endif
memset(insData, 0, 230);
if (instr[y].size - 33 > 230)
{
//return -7;
break;
}
f.read(insData, 1, instr[y].size - 33);
/*printf("%i\r\n",instr[y].size);
printf("%s\r\n",instr[y].name);
printf("%i\r\n",instr[y].type);
printf("%i\r\n",instr[y].samp);
printf("%i\r\n",instr[y].shsize);*/
//getch();
memset(smpReloc, 0, sizeof(smpReloc));
if (instr[y].samp) {
mp_ubyte* insDataPtr = insData;
//f.read(&nbu,1,96);
memcpy(nbu, insDataPtr, MP_MAXINSSAMPS);
insDataPtr+=MP_MAXINSSAMPS;
TEnvelope venv;
TEnvelope penv;
memset(&venv,0,sizeof(venv));
memset(&penv,0,sizeof(penv));
mp_sint32 k;
for (k = 0; k < XM_ENVELOPENUMPOINTS; k++)
{
venv.env[k][0] = LittleEndian::GET_WORD(insDataPtr);
venv.env[k][1] = LittleEndian::GET_WORD(insDataPtr+2);
insDataPtr+=4;
}
for (k = 0; k < XM_ENVELOPENUMPOINTS; k++)
{
penv.env[k][0] = LittleEndian::GET_WORD(insDataPtr);
penv.env[k][1] = LittleEndian::GET_WORD(insDataPtr+2);
insDataPtr+=4;
}
venv.num = *insDataPtr++;
if (venv.num > XM_ENVELOPENUMPOINTS) venv.num = XM_ENVELOPENUMPOINTS;
penv.num = *insDataPtr++;
if (penv.num > XM_ENVELOPENUMPOINTS) penv.num = XM_ENVELOPENUMPOINTS;
venv.sustain = *insDataPtr++;
venv.loops = *insDataPtr++;
venv.loope = *insDataPtr++;
penv.sustain = *insDataPtr++;
penv.loops = *insDataPtr++;
penv.loope = *insDataPtr++;
venv.type = *insDataPtr++;
penv.type = *insDataPtr++;
mp_ubyte vibtype, vibsweep, vibdepth, vibrate;
mp_uword volfade;
vibtype = *insDataPtr++;
vibsweep = *insDataPtr++;
vibdepth = *insDataPtr++;
vibrate = *insDataPtr++;
vibdepth<<=1;
//f.readWords(&volfade,1);
volfade = LittleEndian::GET_WORD(insDataPtr);
insDataPtr+=2;
volfade<<=1;
//instr[y].res = LittleEndian::GET_WORD(insDataPtr);
insDataPtr+=2;
for (mp_sint32 l=0;l<XM_ENVELOPENUMPOINTS;l++) {
venv.env[l][1]<<=2;
penv.env[l][1]<<=2;
}
if (!module->addVolumeEnvelope(venv))
return MP_OUT_OF_MEMORY;
if (!module->addPanningEnvelope(penv))
return MP_OUT_OF_MEMORY;
mp_sint32 g=0, sc;
for (sc=0;sc<instr[y].samp;sc++) {
//TXMSample* smpl = &smp[g+s];
smp[g+s].flags=3;
smp[g+s].venvnum=e+1;
smp[g+s].penvnum=e+1;
smp[g+s].vibtype=vibtype;
smp[g+s].vibsweep=vibsweep;
smp[g+s].vibdepth=vibdepth;
smp[g+s].vibrate=vibrate;
smp[g+s].volfade=volfade;
// not sure why I did that, actually doesn't make sense
//if (!(venv.type&1)) smp[g+s].volfade=0;
f.readDwords(&smp[g+s].samplen,1);
f.readDwords(&smp[g+s].loopstart,1);
f.readDwords(&smp[g+s].looplen,1);
smp[g+s].vol=XModule::vol64to255(f.readByte());
//f.read(&smp[g+s].vol,1,1);
f.read(&smp[g+s].finetune,1,1);
f.read(&smp[g+s].type,1,1);
#ifdef VERBOSE
printf("Before: %i, After: %i\n", smp[g+s].type, smp[g+s].type & (3+16));
#endif
f.read(&smp[g+s].pan,1,1);
f.read(&smp[g+s].relnote,1,1);
f.read(&smp[g+s].res,1,1);
f.read(&smp[g+s].name,1,22);
char line[30];
memset(line, 0, sizeof(line));
XModule::convertStr(line, smp[g+s].name, 23, false);
if (line[0])
module->addSongMessageLine(line);
#ifndef MILKYTRACKER
// ignore empty samples when not being a tracker
if (smp[g+s].samplen) {
smpReloc[sc] = g;
g++;
}
else
smpReloc[sc] = -1;
#else
smpReloc[sc] = g;
g++;
#endif
}
instr[y].samp = g;
for (sc = 0; sc < MP_MAXINSSAMPS; sc++) {
if (smpReloc[nbu[sc]] == -1)
instr[y].snum[sc] = -1;
else
instr[y].snum[sc] = smpReloc[nbu[sc]]+s;
}
for (sc=0;sc<instr[y].samp;sc++) {
if (smp[s].samplen)
{
bool adpcm = (smp[s].res == 0xAD);
mp_uint32 oldSize = smp[s].samplen;
if (smp[s].type&16)
{
smp[s].samplen>>=1;
smp[s].loopstart>>=1;
smp[s].looplen>>=1;
}
mp_sint32 result = module->loadModuleSample(f, s,
adpcm ? XModule::ST_PACKING_ADPCM : XModule::ST_DELTA,
adpcm ? (XModule::ST_PACKING_ADPCM | XModule::ST_16BIT) : (XModule::ST_DELTA | XModule::ST_16BIT),
oldSize);
if (result != MP_OK)
return result;
if (adpcm)
smp[s].res = 0;
}
s++;
if (s>=MP_MAXSAMPLES)
return MP_OUT_OF_MEMORY;
}
e++;
}
else
{
for (mp_sint32 i = 0; i < 120; i++)
instr[y].snum[i] = -1;
}
#ifdef MILKYTRACKER
s+=16 - instr[y].samp;
#endif
}
header->smpnum=s;
header->volenvnum=e;
header->panenvnum=e;
}
else
{
mp_sint32 s = 0;
for (y=0;y<header->insnum;y++) {
for (sc=0;sc<instr[y].samp;sc++) {
if (smp[s].samplen)
{
mp_uint32 oldSize = smp[s].samplen;
if (smp[s].type&16)
{
smp[s].samplen>>=1;
smp[s].loopstart>>=1;
smp[s].looplen>>=1;
}
mp_sint32 result = module->loadModuleSample(f, s, XModule::ST_DELTA, XModule::ST_DELTA | XModule::ST_16BIT, oldSize);
if (result != MP_OK)
return result;
}
s++;
if (s>=MP_MAXSAMPLES)
return MP_OUT_OF_MEMORY;
}
#ifdef MILKYTRACKER
s+=16 - instr[y].samp;
#endif
}
}
// convert modplug stereo samples
for (mp_sint32 s = 0; s < header->smpnum; s++)
{
if (smp[s].type & 32)
{
// that's what's allowed, stupid modplug tracker
smp[s].type &= 3+16;
if (smp[s].sample == NULL)
continue;
if (!(smp[s].type&16)) {
smp[s].samplen>>=1;
smp[s].loopstart>>=1;
smp[s].looplen>>=1;
mp_sbyte* sample = (mp_sbyte*)smp[s].sample;
mp_sint32 samplen = smp[s].samplen;
for (mp_sint32 i = 0; i < samplen; i++)
{
mp_sint32 s = ((mp_sint32)sample[i] + (mp_sint32)sample[i + samplen]) >> 1;
if (s < -128) s = -128;
if (s > 127) s = 127;
sample[i] = (mp_sbyte)s;
}
}
else
{
smp[s].samplen>>=1;
smp[s].loopstart>>=1;
smp[s].looplen>>=1;
mp_sword* sample = (mp_sword*)smp[s].sample;
mp_sint32 samplen = smp[s].samplen;
for (mp_sint32 i = 0; i < samplen; i++)
{
mp_sint32 s = ((mp_sint32)sample[i] + (mp_sint32)sample[i + samplen]) >> 1;
if (s < -32768) s = -32768;
if (s > 32767) s = 32767;
sample[i] = (mp_sword)s;
}
}
}
// correct loop type 0x03 (undefined)
// will become ping pong loop
// note that FT2 will refuse to load XM files with such a loop type
if ((smp[s].type & 0x3) == 0x3)
smp[s].type&=~1;
}
// correct number of patterns if necessary, otherwise the post processing will remove
// the "invalid" patterns from the order list
bool addPatterns = false;
for (i = 0; i < header->ordnum; i++)
if (header->ord[i]+1 > header->patnum)
{
header->patnum = header->ord[i]+1;
addPatterns = true;
}
// if the pattern number has been adjusted, add some empty patterns
if (addPatterns)
{
for (i = 0; i < header->patnum; i++)
if (phead[i].patternData == NULL)
{
phead[i].rows = 64;
phead[i].effnum = 2;
phead[i].channum = (mp_ubyte)header->channum;
phead[i].patternData = new mp_ubyte[phead[i].rows*header->channum*6];
// out of memory?
if (phead[i].patternData == NULL)
{
return MP_OUT_OF_MEMORY;
}
memset(phead[i].patternData,0,phead[i].rows*header->channum*6);
}
}
// check for MODPLUG extensions
if (f.posWithBaseOffset() + 8 <= fileSize)
{
char buffer[4];
f.read(buffer, 1, 4);
if (memcmp(buffer, "text", 4) == 0)
{
mp_uint32 len = f.readDword();
module->allocateSongMessage(len+1);
memset(module->message, 0, len+1);
f.read(module->message, 1, len);
}
}
module->postProcessSamples();
return MP_OK;
}
| 0 |
[
"CWE-787"
] |
MilkyTracker
|
3a5474f9102cbdc10fbd9e7b1b2c8d3f3f45d91b
| 117,553,339,823,195,960,000,000,000,000,000,000,000 | 790 |
Fix possible stack corruption with XM instrument headers claiming a size of less than 4
Closes #275
|
struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
{
return head->next;
}
| 1 |
[
"CWE-665"
] |
tip
|
511885d7061eda3eb1faf3f57dcc936ff75863f1
| 192,534,851,203,297,680,000,000,000,000,000,000,000 | 4 |
lib/timerqueue: Rely on rbtree semantics for next timer
Simplify the timerqueue code by using cached rbtrees and rely on the tree
leftmost node semantics to get the timer with earliest expiration time.
This is a drop in conversion, and therefore semantics remain untouched.
The runtime overhead of cached rbtrees is be pretty much the same as the
current head->next method, noting that when removing the leftmost node,
a common operation for the timerqueue, the rb_next(leftmost) is O(1) as
well, so the next timer will either be the right node or its parent.
Therefore no extra pointer chasing. Finally, the size of the struct
timerqueue_head remains the same.
Passes several hours of rcutorture.
Signed-off-by: Davidlohr Bueso <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Link: https://lkml.kernel.org/r/20190724152323.bojciei3muvfxalm@linux-r8p5
|
static int expand_occurrences(icalcomponent *ical, icalcomponent_kind kind,
struct calquery_filter *calfilter)
{
struct freebusy_array *freebusy = &calfilter->freebusy;
icalcomponent *comp;
icaltimezone *utc = icaltimezone_get_utc_timezone();
icaltime_span rangespan;
unsigned firstr, lastr;
/* If not saving busytime, reset our array */
if (!(calfilter->flags & BUSYTIME_QUERY)) freebusy->len = 0;
/* Create a span for the given time-range */
rangespan.start =
icaltime_as_timet_with_zone(calfilter->start, utc);
rangespan.end =
icaltime_as_timet_with_zone(calfilter->end, utc);
/* Mark start of where recurrences will be added */
firstr = freebusy->len;
/* Find the master component */
for (comp = icalcomponent_get_first_component(ical, kind);
comp &&
icalcomponent_get_first_property(comp, ICAL_RECURRENCEID_PROPERTY);
comp = icalcomponent_get_next_component(ical, kind));
if (is_busytime(calfilter, comp)) {
/* Add all recurring busytime in specified time-range */
icalcomponent_foreach_recurrence(comp,
calfilter->start,
calfilter->end,
add_freebusy_comp, calfilter);
}
/* Mark end of where recurrences were added */
lastr = freebusy->len;
/* Sort freebusy periods by start time */
qsort(freebusy->fb + firstr, freebusy->len - firstr,
sizeof(struct freebusy), compare_freebusy);
/* Handle overridden recurrences */
for (comp = icalcomponent_get_first_component(ical, kind);
comp; comp = icalcomponent_get_next_component(ical, kind)) {
icalproperty *prop;
struct icaltimetype recurid;
icalparameter *param;
struct freebusy *overridden;
icaltime_span recurspan;
/* The *_get_recurrenceid() functions don't appear
to deal with timezones properly, so we do it ourselves */
prop =
icalcomponent_get_first_property(comp, ICAL_RECURRENCEID_PROPERTY);
if (!prop) continue;
recurid = icalproperty_get_recurrenceid(prop);
param = icalproperty_get_first_parameter(prop, ICAL_TZID_PARAMETER);
if (param) {
const char *tzid = icalparameter_get_tzid(param);
icaltimezone *tz = NULL;
tz = icalcomponent_get_timezone(ical, tzid);
if (!tz) {
tz = icaltimezone_get_builtin_timezone_from_tzid(tzid);
}
if (tz) icaltime_set_timezone(&recurid, tz);
}
recurid =
icaltime_convert_to_zone(recurid,
icaltimezone_get_utc_timezone());
recurid.is_date = 0; /* make DATE-TIME for comparison */
/* Check if this overridden instance is in our array */
overridden = bsearch(&recurid, freebusy->fb + firstr, lastr - firstr,
sizeof(struct freebusy), compare_recurid);
if (overridden) {
/* "Remove" the instance
by setting fbtype to NONE (we ignore these later)
NOTE: MUST keep period.start otherwise bsearch() breaks */
/* XXX Doesn't handle the RANGE=THISANDFUTURE param */
overridden->type = ICAL_FBTYPE_NONE;
}
/* If overriding component isn't busytime, skip it */
if (!is_busytime(calfilter, comp)) continue;
/* Check if the new instance is in our time-range */
recurspan = icaltime_span_new(icalcomponent_get_dtstart(comp),
icalcomponent_get_dtend(comp), 1);
if (icaltime_span_overlaps(&recurspan, &rangespan)) {
/* Add this instance to the array */
add_freebusy_comp(comp, &recurspan, calfilter);
}
}
return (freebusy->len - firstr);
}
| 0 |
[
"CWE-787"
] |
cyrus-imapd
|
a5779db8163b99463e25e7c476f9cbba438b65f3
| 226,998,661,185,131,100,000,000,000,000,000,000,000 | 102 |
HTTP: don't overrun buffer when parsing strings with sscanf()
|
static void ip6gre_tunnel_uninit(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
ip6gre_tunnel_unlink(ign, t);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
}
| 0 |
[
"CWE-125"
] |
net
|
7892032cfe67f4bde6fc2ee967e45a8fbaf33756
| 235,251,112,494,540,900,000,000,000,000,000,000,000 | 9 |
ip6_gre: fix ip6gre_err() invalid reads
Andrey Konovalov reported out of bound accesses in ip6gre_err()
If GRE flags contains GRE_KEY, the following expression
*(((__be32 *)p) + (grehlen / 4) - 1)
accesses data ~40 bytes after the expected point, since
grehlen includes the size of IPv6 headers.
Let's use a "struct gre_base_hdr *greh" pointer to make this
code more readable.
p[1] becomes greh->protocol.
grhlen is the GRE header length.
Fixes: c12b395a4664 ("gre: Support GRE over IPv6")
Signed-off-by: Eric Dumazet <[email protected]>
Reported-by: Andrey Konovalov <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
mm_answer_pam_init_ctx(int sock, Buffer *m)
{
debug3("%s", __func__);
sshpam_ctxt = (sshpam_device.init_ctx)(authctxt);
sshpam_authok = NULL;
buffer_clear(m);
if (sshpam_ctxt != NULL) {
monitor_permit(mon_dispatch, MONITOR_REQ_PAM_FREE_CTX, 1);
buffer_put_int(m, 1);
} else {
buffer_put_int(m, 0);
}
mm_request_send(sock, MONITOR_ANS_PAM_INIT_CTX, m);
return (0);
}
| 0 |
[
"CWE-20",
"CWE-200"
] |
openssh-portable
|
d4697fe9a28dab7255c60433e4dd23cf7fce8a8b
| 322,463,115,523,036,470,000,000,000,000,000,000,000 | 15 |
Don't resend username to PAM; it already has it.
Pointed out by Moritz Jodeit; ok dtucker@
|
NTLMSSPOWFencrypt (uschar passwd[8], uschar * ntlmchalresp, uschar p24[24])
{
uschar p21[21];
memset (p21, '\0', 21);
memcpy (p21, passwd, 8);
memset (p21 + 8, 0xbd, 8);
E_P24 (p21, ntlmchalresp, p24);
#ifdef DEBUG_PASSWORD
DEBUG_X (100, ("NTLMSSPOWFencrypt: p21, c8, p24\n"));
dump_data (100, CS p21, 21);
dump_data (100, CS ntlmchalresp, 8);
dump_data (100, CS p24, 24);
#endif
}
| 0 |
[
"CWE-125"
] |
exim
|
57aa14b216432be381b6295c312065b2fd034f86
| 184,900,131,140,792,030,000,000,000,000,000,000,000 | 16 |
Fix SPA authenticator, checking client-supplied data before using it. Bug 2571
|
void MainWindow::onKeyerTriggered(QAction *action)
{
LOG_DEBUG() << action->data().toString();
MLT.videoWidget()->setProperty("keyer", action->data());
MLT.consumerChanged();
Settings.setPlayerKeyerMode(action->data().toInt());
}
| 0 |
[
"CWE-89",
"CWE-327",
"CWE-295"
] |
shotcut
|
f008adc039642307f6ee3378d378cdb842e52c1d
| 219,391,733,322,680,720,000,000,000,000,000,000,000 | 7 |
fix upgrade check is not using TLS correctly
|
mobility_opt_print(netdissect_options *ndo,
const u_char *bp, const unsigned len)
{
unsigned i, optlen;
for (i = 0; i < len; i += optlen) {
ND_TCHECK(bp[i]);
if (bp[i] == IP6MOPT_PAD1)
optlen = 1;
else {
if (i + 1 < len) {
ND_TCHECK(bp[i + 1]);
optlen = bp[i + 1] + 2;
}
else
goto trunc;
}
if (i + optlen > len)
goto trunc;
ND_TCHECK(bp[i + optlen]);
switch (bp[i]) {
case IP6MOPT_PAD1:
ND_PRINT((ndo, "(pad1)"));
break;
case IP6MOPT_PADN:
if (len - i < IP6MOPT_MINLEN) {
ND_PRINT((ndo, "(padn: trunc)"));
goto trunc;
}
ND_PRINT((ndo, "(padn)"));
break;
case IP6MOPT_REFRESH:
if (len - i < IP6MOPT_REFRESH_MINLEN) {
ND_PRINT((ndo, "(refresh: trunc)"));
goto trunc;
}
/* units of 4 secs */
ND_TCHECK_16BITS(&bp[i+2]);
ND_PRINT((ndo, "(refresh: %u)",
EXTRACT_16BITS(&bp[i+2]) << 2));
break;
case IP6MOPT_ALTCOA:
if (len - i < IP6MOPT_ALTCOA_MINLEN) {
ND_PRINT((ndo, "(altcoa: trunc)"));
goto trunc;
}
ND_PRINT((ndo, "(alt-CoA: %s)", ip6addr_string(ndo, &bp[i+2])));
break;
case IP6MOPT_NONCEID:
if (len - i < IP6MOPT_NONCEID_MINLEN) {
ND_PRINT((ndo, "(ni: trunc)"));
goto trunc;
}
ND_PRINT((ndo, "(ni: ho=0x%04x co=0x%04x)",
EXTRACT_16BITS(&bp[i+2]),
EXTRACT_16BITS(&bp[i+4])));
break;
case IP6MOPT_AUTH:
if (len - i < IP6MOPT_AUTH_MINLEN) {
ND_PRINT((ndo, "(auth: trunc)"));
goto trunc;
}
ND_PRINT((ndo, "(auth)"));
break;
default:
if (len - i < IP6MOPT_MINLEN) {
ND_PRINT((ndo, "(sopt_type %u: trunc)", bp[i]));
goto trunc;
}
ND_PRINT((ndo, "(type-0x%02x: len=%u)", bp[i], bp[i + 1]));
break;
}
}
return 0;
trunc:
return 1;
}
| 1 |
[
"CWE-20",
"CWE-125"
] |
tcpdump
|
7d3aba9f06899d0128ef46e8a2fa143c6fad8f62
| 317,153,277,885,002,570,000,000,000,000,000,000,000 | 79 |
CVE-2017-13024/IPv6 mobility: Add a bounds check before fetching data
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s), modified
so the capture file won't cause 'tcpdump: pcap_loop: truncated dump file'
|
void ok_inflater_reset(ok_inflater *inflater) {
if (inflater) {
inflater->input = NULL;
inflater->input_end = NULL;
inflater->input_buffer = 0;
inflater->input_buffer_bits = 0;
inflater->buffer_start_pos = 0;
inflater->buffer_end_pos = 0;
inflater->final_block = false;
inflater->state = (inflater->nowrap ? OK_INFLATER_STATE_READY_FOR_NEXT_BLOCK :
OK_INFLATER_STATE_READY_FOR_HEAD);
}
}
| 0 |
[
"CWE-787"
] |
ok-file-formats
|
e49cdfb84fb5eca2a6261f3c51a3c793fab9f62e
| 210,196,484,176,639,200,000,000,000,000,000,000,000 | 14 |
ok_png: Disallow multiple IHDR chunks (#15)
|
int skip_comments(FILE * file) {
int ch;
int n = 0;
while (EOF != (ch = get_char(file))) {
/* ch is now the first character of a line.
*/
if (++n > MAX_GARBAGE)
return FALSE;
while (ch == ' ' || ch == '\t') {
ch = get_char(file);
if (++n > MAX_GARBAGE)
return FALSE;
}
if (ch == EOF)
break;
/* ch is now the first non-blank character of a line.
*/
if (ch != '\n' && ch != '#')
break;
/* ch must be a newline or comment as first non-blank
* character on a line.
*/
while (ch != '\n' && ch != EOF) {
ch = get_char(file);
if (++n > MAX_GARBAGE)
return FALSE;
}
/* ch is now the newline of a line which we're going to
* ignore.
*/
}
if (ch != EOF)
unget_char(ch, file);
return TRUE;
}
| 0 |
[
"CWE-476"
] |
cronie
|
a6576769f01325303b11edc3e0cfb05ef382ce56
| 133,612,102,133,256,400,000,000,000,000,000,000,000 | 41 |
Fix CVE-2019-9704 and CVE-2019-9705
The users can cause DoS of the crond by loading huge crontab files.
We now allow maximum 1000 environment variables and 1000 crontab entries.
Also the comments and whitespace between the entries and variables
are now limited to 32768 characters.
|
set_data_file_type(SORT_INFO *sort_info, MYISAM_SHARE *share)
{
if ((sort_info->new_data_file_type=share->data_file_type) ==
COMPRESSED_RECORD && sort_info->param->testflag & T_UNPACK)
{
MYISAM_SHARE tmp;
if (share->options & HA_OPTION_PACK_RECORD)
sort_info->new_data_file_type = DYNAMIC_RECORD;
else
sort_info->new_data_file_type = STATIC_RECORD;
/* Set delete_function for sort_delete_record() */
memcpy((char*) &tmp, share, sizeof(*share));
tmp.options= ~HA_OPTION_COMPRESS_RECORD;
mi_setup_functions(&tmp);
share->delete_record=tmp.delete_record;
}
}
| 0 |
[
"CWE-362"
] |
mysql-server
|
4e5473862e6852b0f3802b0cd0c6fa10b5253291
| 96,558,972,944,994,700,000,000,000,000,000,000,000 | 19 |
Bug#24388746: PRIVILEGE ESCALATION AND RACE CONDITION USING CREATE TABLE
During REPAIR TABLE of a MyISAM table, a temporary data file (.TMD)
is created. When repair finishes, this file is renamed to the original
.MYD file. The problem was that during this rename, we copied the
stats from the old file to the new file with chmod/chown. If a user
managed to replace the temporary file before chmod/chown was executed,
it was possible to get an arbitrary file with the privileges of the
mysql user.
This patch fixes the problem by not copying stats from the old
file to the new file. This is not needed as the new file was
created with the correct stats. This fix only changes server
behavior - external utilities such as myisamchk still does
chmod/chown.
No test case provided since the problem involves synchronization
with file system operations.
|
char *__get_modname_by_ord(r_bin_le_obj_t *bin, ut32 ordinal) {
char *modname = NULL;
ut64 off = (ut64)bin->header->impmod + bin->headerOff;
while (ordinal > 0) {
free (modname);
modname = __read_nonnull_str_at (bin->buf, &off);
ordinal--;
}
return modname;
}
| 0 |
[
"CWE-252"
] |
radare2
|
d7ea20fb2e1433ebece9f004d87ad8f2377af23d
| 303,513,359,113,478,250,000,000,000,000,000,000,000 | 10 |
Fix #18923 - Fix resource exhaustion bug in LE binary (#18926)
|
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
{
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
if (rc != 0)
return rc;
sk->sk_max_ack_backlog = 0;
sk->sk_ack_backlog = 0;
inet_csk_delack_init(sk);
/* There is race window here: we announce ourselves listening,
* but this transition is still not validated by get_port().
* It is OK, because this socket enters to hash table only
* after validation is complete.
*/
sk->sk_state = TCP_LISTEN;
if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
inet->inet_sport = htons(inet->inet_num);
sk_dst_reset(sk);
sk->sk_prot->hash(sk);
return 0;
}
sk->sk_state = TCP_CLOSE;
__reqsk_queue_destroy(&icsk->icsk_accept_queue);
return -EADDRINUSE;
}
| 0 |
[
"CWE-362"
] |
linux-2.6
|
f6d8bd051c391c1c0458a30b2a7abcd939329259
| 233,681,863,787,308,100,000,000,000,000,000,000,000 | 32 |
inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <[email protected]>
Cc: Herbert Xu <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
|
launch_router_descriptor_downloads(smartlist_t *downloadable,
routerstatus_t *source, time_t now)
{
int should_delay = 0, n_downloadable;
or_options_t *options = get_options();
n_downloadable = smartlist_len(downloadable);
if (!directory_fetches_dir_info_early(options)) {
if (n_downloadable >= MAX_DL_TO_DELAY) {
log_debug(LD_DIR,
"There are enough downloadable routerdescs to launch requests.");
should_delay = 0;
} else {
should_delay = (last_routerdesc_download_attempted +
MAX_CLIENT_INTERVAL_WITHOUT_REQUEST) > now;
if (!should_delay && n_downloadable) {
if (last_routerdesc_download_attempted) {
log_info(LD_DIR,
"There are not many downloadable routerdescs, but we've "
"been waiting long enough (%d seconds). Downloading.",
(int)(now-last_routerdesc_download_attempted));
} else {
log_info(LD_DIR,
"There are not many downloadable routerdescs, but we haven't "
"tried downloading descriptors recently. Downloading.");
}
}
}
}
/* XXX should we consider having even the dir mirrors delay
* a little bit, so we don't load the authorities as much? -RD
* I don't think so. If we do, clients that want those descriptors may
* not actually find them if the caches haven't got them yet. -NM
*/
if (! should_delay && n_downloadable) {
int i, n_per_request;
const char *req_plural = "", *rtr_plural = "";
int pds_flags = PDS_RETRY_IF_NO_SERVERS;
if (! authdir_mode_any_nonhidserv(options)) {
/* If we wind up going to the authorities, we want to only open one
* connection to each authority at a time, so that we don't overload
* them. We do this by setting PDS_NO_EXISTING_SERVERDESC_FETCH
* regardless of whether we're a cache or not; it gets ignored if we're
* not calling router_pick_trusteddirserver.
*
* Setting this flag can make initiate_descriptor_downloads() ignore
* requests. We need to make sure that we do in fact call
* update_router_descriptor_downloads() later on, once the connections
* have succeeded or failed.
*/
pds_flags |= PDS_NO_EXISTING_SERVERDESC_FETCH;
}
n_per_request = CEIL_DIV(n_downloadable, MIN_REQUESTS);
if (n_per_request > MAX_DL_PER_REQUEST)
n_per_request = MAX_DL_PER_REQUEST;
if (n_per_request < MIN_DL_PER_REQUEST)
n_per_request = MIN_DL_PER_REQUEST;
if (n_downloadable > n_per_request)
req_plural = rtr_plural = "s";
else if (n_downloadable > 1)
rtr_plural = "s";
log_info(LD_DIR,
"Launching %d request%s for %d router%s, %d at a time",
CEIL_DIV(n_downloadable, n_per_request),
req_plural, n_downloadable, rtr_plural, n_per_request);
smartlist_sort_digests(downloadable);
for (i=0; i < n_downloadable; i += n_per_request) {
initiate_descriptor_downloads(source, DIR_PURPOSE_FETCH_SERVERDESC,
downloadable, i, i+n_per_request,
pds_flags);
}
last_routerdesc_download_attempted = now;
}
}
| 0 |
[
"CWE-399"
] |
tor
|
308f6dad20675c42b29862f4269ad1fbfb00dc9a
| 297,245,239,394,399,370,000,000,000,000,000,000,000 | 78 |
Mitigate a side-channel leak of which relays Tor chooses for a circuit
Tor's and OpenSSL's current design guarantee that there are other leaks,
but this one is likely to be more easily exploitable, and is easy to fix.
|
buflist_match(
regmatch_T *rmp,
buf_T *buf,
int ignore_case) // when TRUE ignore case, when FALSE use 'fic'
{
char_u *match;
// First try the short file name, then the long file name.
match = fname_match(rmp, buf->b_sfname, ignore_case);
if (match == NULL)
match = fname_match(rmp, buf->b_ffname, ignore_case);
return match;
}
| 1 |
[
"CWE-476"
] |
vim
|
a59f2dfd0cf9ee1a584d3de5b7c2d47648e79060
| 83,973,833,373,093,060,000,000,000,000,000,000,000 | 14 |
patch 8.2.4938: crash when matching buffer with invalid pattern
Problem: Crash when matching buffer with invalid pattern.
Solution: Check for NULL regprog.
|
koi8_mbc_case_fold(OnigCaseFoldType flag ARG_UNUSED,
const UChar** pp, const UChar* end ARG_UNUSED, UChar* lower)
{
const UChar* p = *pp;
*lower = ENC_KOI8_TO_LOWER_CASE(*p);
(*pp)++;
return 1;
}
| 0 |
[
"CWE-125"
] |
oniguruma
|
65a9b1aa03c9bc2dc01b074295b9603232cb3b78
| 156,707,039,514,201,550,000,000,000,000,000,000,000 | 9 |
onig-5.9.2
|
static int on_frame_recv_cb(nghttp2_session *ng2s,
const nghttp2_frame *frame,
void *userp)
{
h2_session *session = (h2_session *)userp;
h2_stream *stream;
apr_status_t rv = APR_SUCCESS;
if (APLOGcdebug(session->c)) {
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
H2_SSSN_LOG(APLOGNO(03066), session,
"recv FRAME[%s], frames=%ld/%ld (r/s)"),
buffer, (long)session->frames_received,
(long)session->frames_sent);
}
++session->frames_received;
switch (frame->hd.type) {
case NGHTTP2_HEADERS:
/* This can be HEADERS for a new stream, defining the request,
* or HEADER may come after DATA at the end of a stream as in
* trailers */
stream = get_stream(session, frame->hd.stream_id);
if (stream) {
rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags,
frame->hd.length + H2_FRAME_HDR_LEN);
}
break;
case NGHTTP2_DATA:
stream = get_stream(session, frame->hd.stream_id);
if (stream) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
H2_STRM_LOG(APLOGNO(02923), stream,
"DATA, len=%ld, flags=%d"),
(long)frame->hd.length, frame->hd.flags);
rv = h2_stream_recv_frame(stream, NGHTTP2_DATA, frame->hd.flags,
frame->hd.length + H2_FRAME_HDR_LEN);
}
break;
case NGHTTP2_PRIORITY:
session->reprioritize = 1;
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_stream(%ld-%d): PRIORITY frame "
" weight=%d, dependsOn=%d, exclusive=%d",
session->id, (int)frame->hd.stream_id,
frame->priority.pri_spec.weight,
frame->priority.pri_spec.stream_id,
frame->priority.pri_spec.exclusive);
break;
case NGHTTP2_WINDOW_UPDATE:
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_stream(%ld-%d): WINDOW_UPDATE incr=%d",
session->id, (int)frame->hd.stream_id,
frame->window_update.window_size_increment);
if (nghttp2_session_want_write(session->ngh2)) {
dispatch_event(session, H2_SESSION_EV_FRAME_RCVD, 0, "window update");
}
break;
case NGHTTP2_RST_STREAM:
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
"h2_stream(%ld-%d): RST_STREAM by client, errror=%d",
session->id, (int)frame->hd.stream_id,
(int)frame->rst_stream.error_code);
stream = get_stream(session, frame->hd.stream_id);
if (stream && stream->initiated_on) {
++session->pushes_reset;
}
else {
++session->streams_reset;
}
break;
case NGHTTP2_GOAWAY:
if (frame->goaway.error_code == 0
&& frame->goaway.last_stream_id == ((1u << 31) - 1)) {
/* shutdown notice. Should not come from a client... */
session->remote.accepting = 0;
}
else {
session->remote.accepted_max = frame->goaway.last_stream_id;
dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY,
frame->goaway.error_code, NULL);
}
break;
case NGHTTP2_SETTINGS:
if (APLOGctrace2(session->c)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length);
}
break;
default:
if (APLOGctrace2(session->c)) {
char buffer[256];
h2_util_frame_print(frame, buffer,
sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
H2_SSSN_MSG(session, "on_frame_rcv %s"), buffer);
}
break;
}
if (session->state == H2_SESSION_ST_IDLE) {
/* We received a frame, but session is in state IDLE. That means the frame
* did not really progress any of the (possibly) open streams. It was a meta
* frame, e.g. SETTINGS/WINDOW_UPDATE/unknown/etc.
* Remember: IDLE means we cannot send because either there are no streams open or
* all open streams are blocked on exhausted WINDOWs for outgoing data.
* The more frames we receive that do not change this, the less interested we
* become in serving this connection. This is expressed in increasing "idle_delays".
* Eventually, the connection will timeout and we'll close it. */
session->idle_frames = H2MIN(session->idle_frames + 1, session->frames_received);
ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c,
H2_SSSN_MSG(session, "session has %ld idle frames"),
(long)session->idle_frames);
if (session->idle_frames > 10) {
apr_size_t busy_frames = H2MAX(session->frames_received - session->idle_frames, 1);
int idle_ratio = (int)(session->idle_frames / busy_frames);
if (idle_ratio > 100) {
session->idle_delay = apr_time_from_msec(H2MIN(1000, idle_ratio));
}
else if (idle_ratio > 10) {
session->idle_delay = apr_time_from_msec(10);
}
else if (idle_ratio > 1) {
session->idle_delay = apr_time_from_msec(1);
}
else {
session->idle_delay = 0;
}
}
}
if (APR_SUCCESS != rv) return NGHTTP2_ERR_PROTO;
return 0;
}
| 1 |
[
"CWE-770"
] |
mod_h2
|
dd05d49abe0f67512ce9ed5ba422d7711effecfb
| 197,754,370,408,027,600,000,000,000,000,000,000,000 | 138 |
* fixes Timeout vs. KeepAliveTimeout behaviour, see PR 63534 (for trunk now,
mpm event backport to 2.4.x up for vote).
* Fixes stream cleanup when connection throttling is in place.
* Counts stream resets by client on streams initiated by client as cause
for connection throttling.
* Header length checks are now logged similar to HTTP/1.1 protocol handler (thanks @mkaufmann)
* Header length is checked also on the merged value from several header instances
and results in a 431 response.
|
static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
}
| 0 |
[
"CWE-362",
"CWE-264"
] |
linux
|
0048b4837affd153897ed1222283492070027aa9
| 256,687,183,409,471,460,000,000,000,000,000,000,000 | 5 |
blk-mq: fix race between timeout and freeing request
Inside timeout handler, blk_mq_tag_to_rq() is called
to retrieve the request from one tag. This way is obviously
wrong because the request can be freed any time and some
fiedds of the request can't be trusted, then kernel oops
might be triggered[1].
Currently wrt. blk_mq_tag_to_rq(), the only special case is
that the flush request can share same tag with the request
cloned from, and the two requests can't be active at the same
time, so this patch fixes the above issue by updating tags->rqs[tag]
with the active request(either flush rq or the request cloned
from) of the tag.
Also blk_mq_tag_to_rq() gets much simplified with this patch.
Given blk_mq_tag_to_rq() is mainly for drivers and the caller must
make sure the request can't be freed, so in bt_for_each() this
helper is replaced with tags->rqs[tag].
[1] kernel oops log
[ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M
[ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M
[ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M
[ 439.700653] Dumping ftrace buffer:^M
[ 439.700653] (ftrace buffer empty)^M
[ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M
[ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M
[ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M
[ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M
[ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M
[ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M
[ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M
[ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M
[ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M
[ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M
[ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M
[ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M
[ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M
[ 439.730500] Stack:^M
[ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M
[ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M
[ 439.755663] Call Trace:^M
[ 439.755663] <IRQ> ^M
[ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M
[ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M
[ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M
[ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M
[ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M
[ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M
[ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M
[ 439.755663] <EOI> ^M
[ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M
[ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M
[ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M
[ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M
[ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M
[ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M
[ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M
[ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M
[ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M
[ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M
[ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M
[ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M
[ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89
f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b
53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10
^M
[ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.790911] RSP <ffff880819203da0>^M
[ 439.790911] CR2: 0000000000000158^M
[ 439.790911] ---[ end trace d40af58949325661 ]---^M
Cc: <[email protected]>
Signed-off-by: Ming Lei <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
|
ZEND_VM_HELPER(zend_bw_and_helper, ANY, ANY, zval *op_1, zval *op_2)
{
USE_OPLINE
SAVE_OPLINE();
if (UNEXPECTED(Z_TYPE_INFO_P(op_1) == IS_UNDEF)) {
op_1 = ZVAL_UNDEFINED_OP1();
}
if (UNEXPECTED(Z_TYPE_INFO_P(op_2) == IS_UNDEF)) {
op_2 = ZVAL_UNDEFINED_OP2();
}
bitwise_and_function(EX_VAR(opline->result.var), op_1, op_2);
if (OP1_TYPE & (IS_TMP_VAR|IS_VAR)) {
zval_ptr_dtor_nogc(op_1);
}
if (OP2_TYPE & (IS_TMP_VAR|IS_VAR)) {
zval_ptr_dtor_nogc(op_2);
}
ZEND_VM_NEXT_OPCODE_CHECK_EXCEPTION();
}
| 0 |
[
"CWE-787"
] |
php-src
|
f1ce8d5f5839cb2069ea37ff424fb96b8cd6932d
| 217,608,036,569,211,030,000,000,000,000,000,000,000 | 20 |
Fix #73122: Integer Overflow when concatenating strings
We must avoid integer overflows in memory allocations, so we introduce
an additional check in the VM, and bail out in the rare case of an
overflow. Since the recent fix for bug #74960 still doesn't catch all
possible overflows, we fix that right away.
|
int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd,
curlsocktype __maybe_unused purpose)
{
return keep_sockalive(fd);
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
bfgminer
|
c80ad8548251eb0e15329fc240c89070640c9d79
| 119,091,474,391,380,210,000,000,000,000,000,000,000 | 5 |
Stratum: extract_sockaddr: Truncate overlong addresses rather than stack overflow
Thanks to Mick Ayzenberg <[email protected]> for finding this!
|
SES_Wait(struct sess *sp, const struct transport *xp)
{
struct pool *pp;
struct waited *wp;
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
CHECK_OBJ_NOTNULL(xp, TRANSPORT_MAGIC);
pp = sp->pool;
CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
assert(sp->fd > 0);
/*
* XXX: waiter_epoll prevents us from zeroing the struct because
* XXX: it keeps state across calls.
*/
VTCP_nonblocking(sp->fd);
/*
* put struct waited on the workspace
*/
if (WS_ReserveSize(sp->ws, sizeof(struct waited))
< sizeof(struct waited)) {
SES_Delete(sp, SC_OVERLOAD, NAN);
return;
}
wp = (void*)sp->ws->f;
INIT_OBJ(wp, WAITED_MAGIC);
wp->fd = sp->fd;
wp->priv1 = sp;
wp->priv2 = (uintptr_t)xp;
wp->idle = sp->t_idle;
wp->func = ses_handle;
wp->tmo = SESS_TMO(sp, timeout_idle);
if (Wait_Enter(pp->waiter, wp))
SES_Delete(sp, SC_PIPE_OVERFLOW, NAN);
}
| 1 |
[
"CWE-617"
] |
varnish-cache
|
2d8fc1a784a1e26d78c30174923a2b14ee2ebf62
| 30,431,409,820,464,630,000,000,000,000,000,000,000 | 35 |
Take sizeof pool_task into account when reserving WS in SES_Wait
The assert on WS_ReserveSize() in ses_handle() can not trip because
sizeof (struct pool_task) is less than sizeof (struct waited). But to safe
guard against future problems if that were to change, this patch makes
sure that the session workspace can hold the largest of them before
entering the waiter, erroring out if not.
|
static int match_tcon(struct cifs_tcon *tcon, const char *unc)
{
if (tcon->tidStatus == CifsExiting)
return 0;
if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
return 0;
return 1;
}
| 0 |
[
"CWE-703",
"CWE-189"
] |
linux
|
1fc29bacedeabb278080e31bb9c1ecb49f143c3b
| 69,505,187,101,330,480,000,000,000,000,000,000,000 | 8 |
cifs: fix off-by-one bug in build_unc_path_to_root
commit 839db3d10a (cifs: fix up handling of prefixpath= option) changed
the code such that the vol->prepath no longer contained a leading
delimiter and then fixed up the places that accessed that field to
account for that change.
One spot in build_unc_path_to_root was missed however. When doing the
pointer addition on pos, that patch failed to account for the fact that
we had already incremented "pos" by one when adding the length of the
prepath. This caused a buffer overrun by one byte.
This patch fixes the problem by correcting the handling of "pos".
Cc: <[email protected]> # v3.8+
Reported-by: Marcus Moeller <[email protected]>
Reported-by: Ken Fallon <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
|
static long do_rmdir(int dfd, const char __user *pathname)
{
int error = 0;
struct filename *name;
struct dentry *dentry;
struct path path;
struct qstr last;
int type;
unsigned int lookup_flags = 0;
retry:
name = filename_parentat(dfd, getname(pathname), lookup_flags,
&path, &last, &type);
if (IS_ERR(name))
return PTR_ERR(name);
switch (type) {
case LAST_DOTDOT:
error = -ENOTEMPTY;
goto exit1;
case LAST_DOT:
error = -EINVAL;
goto exit1;
case LAST_ROOT:
error = -EBUSY;
goto exit1;
}
error = mnt_want_write(path.mnt);
if (error)
goto exit1;
inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT);
dentry = __lookup_hash(&last, path.dentry, lookup_flags);
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
if (!dentry->d_inode) {
error = -ENOENT;
goto exit3;
}
error = security_path_rmdir(&path, dentry);
if (error)
goto exit3;
error = vfs_rmdir(path.dentry->d_inode, dentry);
exit3:
dput(dentry);
exit2:
inode_unlock(path.dentry->d_inode);
mnt_drop_write(path.mnt);
exit1:
path_put(&path);
putname(name);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
return error;
}
| 0 |
[
"CWE-362",
"CWE-399"
] |
linux
|
49d31c2f389acfe83417083e1208422b4091cd9e
| 317,087,463,864,121,170,000,000,000,000,000,000,000 | 58 |
dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <[email protected]>
|
extern int afm_parse_file(FILE *fp, FontInfo **fi, FLAGS flags)
{
int code = ok; /* return code from each of the parsing routines */
int error = ok; /* used as the return code from this function */
register char *keyword; /* used to store a token */
/* storage data for the global variable ident */
ident = (char *) calloc(MAX_NAME, sizeof(char));
if (ident == NULL) {error = storageProblem; return(error);}
(*fi) = (FontInfo *) calloc(1, sizeof(FontInfo));
if ((*fi) == NULL) {error = storageProblem; return(error);}
if (flags & P_G)
{
(*fi)->gfi = (GlobalFontInfo *) calloc(1, sizeof(GlobalFontInfo));
if ((*fi)->gfi == NULL) {error = storageProblem; return(error);}
}
/* The AFM File begins with Global Font Information. This section */
/* will be parsed whether or not information should be saved. */
code = parseGlobals(fp, (*fi)->gfi);
if (code < 0) error = code;
/* The Global Font Information is followed by the Character Metrics */
/* section. Which procedure is used to parse this section depends on */
/* how much information should be saved. If all of the metrics info */
/* is wanted, parseCharMetrics is called. If only the character widths */
/* is wanted, parseCharWidths is called. parseCharWidths will also */
/* be called in the case that no character data is to be saved, just */
/* to parse through the section. */
if ((code != normalEOF) && (code != earlyEOF))
{
(*fi)->numOfChars = atoi(token(fp));
if (flags & (P_M ^ P_W))
{
(*fi)->cmi = (CharMetricInfo *)
calloc((*fi)->numOfChars, sizeof(CharMetricInfo));
if ((*fi)->cmi == NULL) {error = storageProblem; return(error);}
code = parseCharMetrics(fp, *fi);
}
else
{
if (flags & P_W)
{
(*fi)->cwi = (int *) calloc(256, sizeof(int));
if ((*fi)->cwi == NULL)
{
error = storageProblem;
return(error);
}
}
/* parse section regardless */
code = parseCharWidths(fp, (*fi)->cwi);
} /* else */
} /* if */
if ((error != earlyEOF) && (code < 0)) error = code;
/* The remaining sections of the AFM are optional. This code will */
/* look at the next keyword in the file to determine what section */
/* is next, and then allocate the appropriate amount of storage */
/* for the data (if the data is to be saved) and call the */
/* appropriate parsing routine to parse the section. */
while ((code != normalEOF) && (code != earlyEOF))
{
keyword = token(fp);
if (keyword == NULL)
/* Have reached an early and unexpected EOF. */
/* Set flag and stop parsing */
{
code = earlyEOF;
break; /* get out of loop */
}
switch(recognize(keyword))
{
case STARTKERNDATA:
break;
case ENDKERNDATA:
break;
case STARTTRACKKERN:
keyword = token(fp);
if (flags & P_T)
{
(*fi)->numOfTracks = atoi(keyword);
(*fi)->tkd = (TrackKernData *)
calloc((*fi)->numOfTracks, sizeof(TrackKernData));
if ((*fi)->tkd == NULL)
{
error = storageProblem;
return(error);
}
} /* if */
code = parseTrackKernData(fp, *fi);
break;
case STARTKERNPAIRS:
keyword = token(fp);
if (flags & P_P)
{
(*fi)->numOfPairs = atoi(keyword);
(*fi)->pkd = (PairKernData *)
calloc((*fi)->numOfPairs, sizeof(PairKernData));
if ((*fi)->pkd == NULL)
{
error = storageProblem;
return(error);
}
} /* if */
code = parsePairKernData(fp, *fi);
break;
case STARTCOMPOSITES:
keyword = token(fp);
if (flags & P_C)
{
(*fi)->numOfComps = atoi(keyword);
(*fi)->ccd = (CompCharData *)
calloc((*fi)->numOfComps, sizeof(CompCharData));
if ((*fi)->ccd == NULL)
{
error = storageProblem;
return(error);
}
} /* if */
code = parseCompCharData(fp, *fi);
break;
case ENDFONTMETRICS:
code = normalEOF;
break;
case NOPE:
default:
code = parseError;
break;
} /* switch */
if ((error != earlyEOF) && (code < 0)) error = code;
} /* while */
if ((error != earlyEOF) && (code < 0)) error = code;
if (ident != NULL) { free(ident); ident = NULL; }
return(error);
} /* parseFile */
| 0 |
[
"CWE-20"
] |
evince
|
d4139205b010ed06310d14284e63114e88ec6de2
| 26,667,110,893,319,715,000,000,000,000,000,000,000 | 151 |
backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643.
|
~GenericDecipher() {
clear();
}
| 0 |
[
"CWE-78"
] |
ssh2
|
f763271f41320e71d5cbee02ea5bc6a2ded3ca21
| 300,878,372,505,225,880,000,000,000,000,000,000,000 | 3 |
examples,lib,test: switch to code rewrite
For more information see: https://github.com/mscdex/ssh2/issues/935
|
static int open_input_stream(HTTPContext *c, const char *info)
{
char buf[128];
char input_filename[1024];
AVFormatContext *s = NULL;
int buf_size, i, ret;
int64_t stream_pos;
/* find file name */
if (c->stream->feed) {
strcpy(input_filename, c->stream->feed->feed_filename);
buf_size = FFM_PACKET_SIZE;
/* compute position (absolute time) */
if (av_find_info_tag(buf, sizeof(buf), "date", info)) {
if ((ret = av_parse_time(&stream_pos, buf, 0)) < 0) {
http_log("Invalid date specification '%s' for stream\n", buf);
return ret;
}
} else if (av_find_info_tag(buf, sizeof(buf), "buffer", info)) {
int prebuffer = strtol(buf, 0, 10);
stream_pos = av_gettime() - prebuffer * (int64_t)1000000;
} else
stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000;
} else {
strcpy(input_filename, c->stream->feed_filename);
buf_size = 0;
/* compute position (relative time) */
if (av_find_info_tag(buf, sizeof(buf), "date", info)) {
if ((ret = av_parse_time(&stream_pos, buf, 1)) < 0) {
http_log("Invalid date specification '%s' for stream\n", buf);
return ret;
}
} else
stream_pos = 0;
}
if (!input_filename[0]) {
http_log("No filename was specified for stream\n");
return AVERROR(EINVAL);
}
/* open stream */
ret = avformat_open_input(&s, input_filename, c->stream->ifmt,
&c->stream->in_opts);
if (ret < 0) {
http_log("Could not open input '%s': %s\n",
input_filename, av_err2str(ret));
return ret;
}
/* set buffer size */
if (buf_size > 0) {
ret = ffio_set_buf_size(s->pb, buf_size);
if (ret < 0) {
http_log("Failed to set buffer size\n");
return ret;
}
}
s->flags |= AVFMT_FLAG_GENPTS;
c->fmt_in = s;
if (strcmp(s->iformat->name, "ffm") &&
(ret = avformat_find_stream_info(c->fmt_in, NULL)) < 0) {
http_log("Could not find stream info for input '%s'\n", input_filename);
avformat_close_input(&s);
return ret;
}
/* choose stream as clock source (we favor the video stream if
* present) for packet sending */
c->pts_stream_index = 0;
for(i=0;i<c->stream->nb_streams;i++) {
if (c->pts_stream_index == 0 &&
c->stream->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
c->pts_stream_index = i;
}
}
if (c->fmt_in->iformat->read_seek)
av_seek_frame(c->fmt_in, -1, stream_pos, 0);
/* set the start time (needed for maxtime and RTP packet timing) */
c->start_time = cur_time;
c->first_pts = AV_NOPTS_VALUE;
return 0;
}
| 0 |
[
"CWE-119",
"CWE-787"
] |
FFmpeg
|
a5d25faa3f4b18dac737fdb35d0dd68eb0dc2156
| 29,851,974,505,965,910,000,000,000,000,000,000,000 | 84 |
ffserver: Check chunk size
Fixes out of array access
Fixes: poc_ffserver.py
Found-by: Paul Cher <[email protected]>
Signed-off-by: Michael Niedermayer <[email protected]>
|
Subsets and Splits
CWE 416 & 19
The query filters records related to specific CWEs (Common Weakness Enumerations), providing a basic overview of entries with these vulnerabilities but without deeper analysis.
CWE Frequency in Train Set
Counts the occurrences of each CWE (Common Weakness Enumeration) in the dataset, providing a basic distribution but limited insight.