id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
| func_clean
stringlengths 26
131k
| vul_lines
dict | normalized_func
stringlengths 24
132k
| lines
sequencelengths 1
2.8k
| label
sequencelengths 1
2.8k
| line_no
sequencelengths 1
2.8k
|
---|---|---|---|---|---|---|---|---|---|---|
21,241 | int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
{
Coroutine *co;
DiscardCo rwco = {
.bs = bs,
.sector_num = sector_num,
.nb_sectors = nb_sectors,
.ret = NOT_DONE,
};
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
bdrv_discard_co_entry(&rwco);
} else {
AioContext *aio_context = bdrv_get_aio_context(bs);
co = qemu_coroutine_create(bdrv_discard_co_entry);
qemu_coroutine_enter(co, &rwco);
while (rwco.ret == NOT_DONE) {
aio_poll(aio_context, true);
}
}
return rwco.ret;
}
| true | qemu | 0b8b8753e4d94901627b3e86431230f2319215c4 | int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
{
Coroutine *co;
DiscardCo rwco = {
.bs = bs,
.sector_num = sector_num,
.nb_sectors = nb_sectors,
.ret = NOT_DONE,
};
if (qemu_in_coroutine()) {
bdrv_discard_co_entry(&rwco);
} else {
AioContext *aio_context = bdrv_get_aio_context(bs);
co = qemu_coroutine_create(bdrv_discard_co_entry);
qemu_coroutine_enter(co, &rwco);
while (rwco.ret == NOT_DONE) {
aio_poll(aio_context, true);
}
}
return rwco.ret;
}
| {
"code": [
" qemu_coroutine_enter(co, &rwco);",
" co = qemu_coroutine_create(bdrv_discard_co_entry);",
" qemu_coroutine_enter(co, &rwco);"
],
"line_no": [
35,
33,
35
]
} | int FUNC_0(BlockDriverState *VAR_0, int64_t VAR_1, int VAR_2)
{
Coroutine *co;
DiscardCo rwco = {
.VAR_0 = VAR_0,
.VAR_1 = VAR_1,
.VAR_2 = VAR_2,
.ret = NOT_DONE,
};
if (qemu_in_coroutine()) {
bdrv_discard_co_entry(&rwco);
} else {
AioContext *aio_context = bdrv_get_aio_context(VAR_0);
co = qemu_coroutine_create(bdrv_discard_co_entry);
qemu_coroutine_enter(co, &rwco);
while (rwco.ret == NOT_DONE) {
aio_poll(aio_context, true);
}
}
return rwco.ret;
}
| [
"int FUNC_0(BlockDriverState *VAR_0, int64_t VAR_1, int VAR_2)\n{",
"Coroutine *co;",
"DiscardCo rwco = {",
".VAR_0 = VAR_0,\n.VAR_1 = VAR_1,\n.VAR_2 = VAR_2,\n.ret = NOT_DONE,\n};",
"if (qemu_in_coroutine()) {",
"bdrv_discard_co_entry(&rwco);",
"} else {",
"AioContext *aio_context = bdrv_get_aio_context(VAR_0);",
"co = qemu_coroutine_create(bdrv_discard_co_entry);",
"qemu_coroutine_enter(co, &rwco);",
"while (rwco.ret == NOT_DONE) {",
"aio_poll(aio_context, true);",
"}",
"}",
"return rwco.ret;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9,
11,
13,
15,
17
],
[
21
],
[
25
],
[
27
],
[
29
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
47
],
[
49
]
] |
21,242 | static void cg3_realizefn(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
CG3State *s = CG3(dev);
int ret;
char *fcode_filename;
/* FCode ROM */
vmstate_register_ram_global(&s->rom);
fcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, CG3_ROM_FILE);
if (fcode_filename) {
ret = load_image_targphys(fcode_filename, s->prom_addr,
FCODE_MAX_ROM_SIZE);
g_free(fcode_filename);
if (ret < 0 || ret > FCODE_MAX_ROM_SIZE) {
error_report("cg3: could not load prom '%s'", CG3_ROM_FILE);
}
}
memory_region_init_ram(&s->vram_mem, NULL, "cg3.vram", s->vram_size,
&error_abort);
memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA);
vmstate_register_ram_global(&s->vram_mem);
sysbus_init_mmio(sbd, &s->vram_mem);
sysbus_init_irq(sbd, &s->irq);
s->con = graphic_console_init(DEVICE(dev), 0, &cg3_ops, s);
qemu_console_resize(s->con, s->width, s->height);
}
| true | qemu | f8ed85ac992c48814d916d5df4d44f9a971c5de4 | static void cg3_realizefn(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
CG3State *s = CG3(dev);
int ret;
char *fcode_filename;
vmstate_register_ram_global(&s->rom);
fcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, CG3_ROM_FILE);
if (fcode_filename) {
ret = load_image_targphys(fcode_filename, s->prom_addr,
FCODE_MAX_ROM_SIZE);
g_free(fcode_filename);
if (ret < 0 || ret > FCODE_MAX_ROM_SIZE) {
error_report("cg3: could not load prom '%s'", CG3_ROM_FILE);
}
}
memory_region_init_ram(&s->vram_mem, NULL, "cg3.vram", s->vram_size,
&error_abort);
memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA);
vmstate_register_ram_global(&s->vram_mem);
sysbus_init_mmio(sbd, &s->vram_mem);
sysbus_init_irq(sbd, &s->irq);
s->con = graphic_console_init(DEVICE(dev), 0, &cg3_ops, s);
qemu_console_resize(s->con, s->width, s->height);
}
| {
"code": [
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);",
" &error_abort);"
],
"line_no": [
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41,
41
]
} | static void FUNC_0(DeviceState *VAR_0, Error **VAR_1)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(VAR_0);
CG3State *s = CG3(VAR_0);
int VAR_2;
char *VAR_3;
vmstate_register_ram_global(&s->rom);
VAR_3 = qemu_find_file(QEMU_FILE_TYPE_BIOS, CG3_ROM_FILE);
if (VAR_3) {
VAR_2 = load_image_targphys(VAR_3, s->prom_addr,
FCODE_MAX_ROM_SIZE);
g_free(VAR_3);
if (VAR_2 < 0 || VAR_2 > FCODE_MAX_ROM_SIZE) {
error_report("cg3: could not load prom '%s'", CG3_ROM_FILE);
}
}
memory_region_init_ram(&s->vram_mem, NULL, "cg3.vram", s->vram_size,
&error_abort);
memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA);
vmstate_register_ram_global(&s->vram_mem);
sysbus_init_mmio(sbd, &s->vram_mem);
sysbus_init_irq(sbd, &s->irq);
s->con = graphic_console_init(DEVICE(VAR_0), 0, &cg3_ops, s);
qemu_console_resize(s->con, s->width, s->height);
}
| [
"static void FUNC_0(DeviceState *VAR_0, Error **VAR_1)\n{",
"SysBusDevice *sbd = SYS_BUS_DEVICE(VAR_0);",
"CG3State *s = CG3(VAR_0);",
"int VAR_2;",
"char *VAR_3;",
"vmstate_register_ram_global(&s->rom);",
"VAR_3 = qemu_find_file(QEMU_FILE_TYPE_BIOS, CG3_ROM_FILE);",
"if (VAR_3) {",
"VAR_2 = load_image_targphys(VAR_3, s->prom_addr,\nFCODE_MAX_ROM_SIZE);",
"g_free(VAR_3);",
"if (VAR_2 < 0 || VAR_2 > FCODE_MAX_ROM_SIZE) {",
"error_report(\"cg3: could not load prom '%s'\", CG3_ROM_FILE);",
"}",
"}",
"memory_region_init_ram(&s->vram_mem, NULL, \"cg3.vram\", s->vram_size,\n&error_abort);",
"memory_region_set_log(&s->vram_mem, true, DIRTY_MEMORY_VGA);",
"vmstate_register_ram_global(&s->vram_mem);",
"sysbus_init_mmio(sbd, &s->vram_mem);",
"sysbus_init_irq(sbd, &s->irq);",
"s->con = graphic_console_init(DEVICE(VAR_0), 0, &cg3_ops, s);",
"qemu_console_resize(s->con, s->width, s->height);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
17
],
[
19
],
[
21
],
[
23,
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
39,
41
],
[
43
],
[
45
],
[
47
],
[
51
],
[
55
],
[
57
],
[
59
]
] |
21,243 | static void virtio_gpu_set_scanout(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_scanout *scanout;
pixman_format_code_t format;
uint32_t offset;
int bpp;
struct virtio_gpu_set_scanout ss;
VIRTIO_GPU_FILL_CMD(ss);
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
if (ss.scanout_id >= g->conf.max_outputs) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
__func__, ss.scanout_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
g->enable = 1;
if (ss.resource_id == 0) {
scanout = &g->scanout[ss.scanout_id];
if (scanout->resource_id) {
res = virtio_gpu_find_resource(g, scanout->resource_id);
if (res) {
res->scanout_bitmask &= ~(1 << ss.scanout_id);
}
}
if (ss.scanout_id == 0) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: illegal scanout id specified %d",
__func__, ss.scanout_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
scanout->ds = NULL;
scanout->width = 0;
scanout->height = 0;
return;
}
/* create a surface for this scanout */
res = virtio_gpu_find_resource(g, ss.resource_id);
if (!res) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
__func__, ss.resource_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
if (ss.r.x > res->width ||
ss.r.y > res->height ||
ss.r.width > res->width ||
ss.r.height > res->height ||
ss.r.x + ss.r.width > res->width ||
ss.r.y + ss.r.height > res->height) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
" resource %d, (%d,%d)+%d,%d vs %d %d\n",
__func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
ss.r.width, ss.r.height, res->width, res->height);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
scanout = &g->scanout[ss.scanout_id];
format = pixman_image_get_format(res->image);
bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
if (!scanout->ds || surface_data(scanout->ds)
!= ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
scanout->width != ss.r.width ||
scanout->height != ss.r.height) {
pixman_image_t *rect;
void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
pixman_image_get_stride(res->image));
pixman_image_ref(res->image);
pixman_image_set_destroy_function(rect, virtio_unref_resource,
res->image);
/* realloc the surface ptr */
scanout->ds = qemu_create_displaysurface_pixman(rect);
if (!scanout->ds) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
}
res->scanout_bitmask |= (1 << ss.scanout_id);
scanout->resource_id = ss.resource_id;
scanout->x = ss.r.x;
scanout->y = ss.r.y;
scanout->width = ss.r.width;
scanout->height = ss.r.height;
} | true | qemu | dd248ed7e204ee8a1873914e02b8b526e8f1b80d | static void virtio_gpu_set_scanout(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_scanout *scanout;
pixman_format_code_t format;
uint32_t offset;
int bpp;
struct virtio_gpu_set_scanout ss;
VIRTIO_GPU_FILL_CMD(ss);
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
if (ss.scanout_id >= g->conf.max_outputs) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
__func__, ss.scanout_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
g->enable = 1;
if (ss.resource_id == 0) {
scanout = &g->scanout[ss.scanout_id];
if (scanout->resource_id) {
res = virtio_gpu_find_resource(g, scanout->resource_id);
if (res) {
res->scanout_bitmask &= ~(1 << ss.scanout_id);
}
}
if (ss.scanout_id == 0) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: illegal scanout id specified %d",
__func__, ss.scanout_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
scanout->ds = NULL;
scanout->width = 0;
scanout->height = 0;
return;
}
res = virtio_gpu_find_resource(g, ss.resource_id);
if (!res) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
__func__, ss.resource_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
if (ss.r.x > res->width ||
ss.r.y > res->height ||
ss.r.width > res->width ||
ss.r.height > res->height ||
ss.r.x + ss.r.width > res->width ||
ss.r.y + ss.r.height > res->height) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
" resource %d, (%d,%d)+%d,%d vs %d %d\n",
__func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
ss.r.width, ss.r.height, res->width, res->height);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
scanout = &g->scanout[ss.scanout_id];
format = pixman_image_get_format(res->image);
bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
if (!scanout->ds || surface_data(scanout->ds)
!= ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
scanout->width != ss.r.width ||
scanout->height != ss.r.height) {
pixman_image_t *rect;
void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
pixman_image_get_stride(res->image));
pixman_image_ref(res->image);
pixman_image_set_destroy_function(rect, virtio_unref_resource,
res->image);
scanout->ds = qemu_create_displaysurface_pixman(rect);
if (!scanout->ds) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
}
res->scanout_bitmask |= (1 << ss.scanout_id);
scanout->resource_id = ss.resource_id;
scanout->x = ss.r.x;
scanout->y = ss.r.y;
scanout->width = ss.r.width;
scanout->height = ss.r.height;
} | {
"code": [],
"line_no": []
} | static void FUNC_0(VirtIOGPU *VAR_0,
struct virtio_gpu_ctrl_command *VAR_1)
{
struct virtio_gpu_simple_resource *VAR_2;
struct virtio_gpu_scanout *VAR_3;
pixman_format_code_t format;
uint32_t offset;
int VAR_4;
struct FUNC_0 VAR_5;
VIRTIO_GPU_FILL_CMD(VAR_5);
trace_virtio_gpu_cmd_set_scanout(VAR_5.scanout_id, VAR_5.resource_id,
VAR_5.r.width, VAR_5.r.height, VAR_5.r.x, VAR_5.r.y);
if (VAR_5.scanout_id >= VAR_0->conf.max_outputs) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal VAR_3 id specified %d",
__func__, VAR_5.scanout_id);
VAR_1->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
VAR_0->enable = 1;
if (VAR_5.resource_id == 0) {
VAR_3 = &VAR_0->VAR_3[VAR_5.scanout_id];
if (VAR_3->resource_id) {
VAR_2 = virtio_gpu_find_resource(VAR_0, VAR_3->resource_id);
if (VAR_2) {
VAR_2->scanout_bitmask &= ~(1 << VAR_5.scanout_id);
}
}
if (VAR_5.scanout_id == 0) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: illegal VAR_3 id specified %d",
__func__, VAR_5.scanout_id);
VAR_1->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
dpy_gfx_replace_surface(VAR_0->VAR_3[VAR_5.scanout_id].con, NULL);
VAR_3->ds = NULL;
VAR_3->width = 0;
VAR_3->height = 0;
return;
}
VAR_2 = virtio_gpu_find_resource(VAR_0, VAR_5.resource_id);
if (!VAR_2) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
__func__, VAR_5.resource_id);
VAR_1->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
if (VAR_5.r.x > VAR_2->width ||
VAR_5.r.y > VAR_2->height ||
VAR_5.r.width > VAR_2->width ||
VAR_5.r.height > VAR_2->height ||
VAR_5.r.x + VAR_5.r.width > VAR_2->width ||
VAR_5.r.y + VAR_5.r.height > VAR_2->height) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal VAR_3 %d bounds for"
" resource %d, (%d,%d)+%d,%d vs %d %d\n",
__func__, VAR_5.scanout_id, VAR_5.resource_id, VAR_5.r.x, VAR_5.r.y,
VAR_5.r.width, VAR_5.r.height, VAR_2->width, VAR_2->height);
VAR_1->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
VAR_3 = &VAR_0->VAR_3[VAR_5.scanout_id];
format = pixman_image_get_format(VAR_2->image);
VAR_4 = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
offset = (VAR_5.r.x * VAR_4) + VAR_5.r.y * pixman_image_get_stride(VAR_2->image);
if (!VAR_3->ds || surface_data(VAR_3->ds)
!= ((uint8_t *)pixman_image_get_data(VAR_2->image) + offset) ||
VAR_3->width != VAR_5.r.width ||
VAR_3->height != VAR_5.r.height) {
pixman_image_t *rect;
void *VAR_6 = (uint8_t *)pixman_image_get_data(VAR_2->image) + offset;
rect = pixman_image_create_bits(format, VAR_5.r.width, VAR_5.r.height, VAR_6,
pixman_image_get_stride(VAR_2->image));
pixman_image_ref(VAR_2->image);
pixman_image_set_destroy_function(rect, virtio_unref_resource,
VAR_2->image);
VAR_3->ds = qemu_create_displaysurface_pixman(rect);
if (!VAR_3->ds) {
VAR_1->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
}
dpy_gfx_replace_surface(VAR_0->VAR_3[VAR_5.scanout_id].con, VAR_3->ds);
}
VAR_2->scanout_bitmask |= (1 << VAR_5.scanout_id);
VAR_3->resource_id = VAR_5.resource_id;
VAR_3->x = VAR_5.r.x;
VAR_3->y = VAR_5.r.y;
VAR_3->width = VAR_5.r.width;
VAR_3->height = VAR_5.r.height;
} | [
"static void FUNC_0(VirtIOGPU *VAR_0,\nstruct virtio_gpu_ctrl_command *VAR_1)\n{",
"struct virtio_gpu_simple_resource *VAR_2;",
"struct virtio_gpu_scanout *VAR_3;",
"pixman_format_code_t format;",
"uint32_t offset;",
"int VAR_4;",
"struct FUNC_0 VAR_5;",
"VIRTIO_GPU_FILL_CMD(VAR_5);",
"trace_virtio_gpu_cmd_set_scanout(VAR_5.scanout_id, VAR_5.resource_id,\nVAR_5.r.width, VAR_5.r.height, VAR_5.r.x, VAR_5.r.y);",
"if (VAR_5.scanout_id >= VAR_0->conf.max_outputs) {",
"qemu_log_mask(LOG_GUEST_ERROR, \"%s: illegal VAR_3 id specified %d\",\n__func__, VAR_5.scanout_id);",
"VAR_1->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;",
"return;",
"}",
"VAR_0->enable = 1;",
"if (VAR_5.resource_id == 0) {",
"VAR_3 = &VAR_0->VAR_3[VAR_5.scanout_id];",
"if (VAR_3->resource_id) {",
"VAR_2 = virtio_gpu_find_resource(VAR_0, VAR_3->resource_id);",
"if (VAR_2) {",
"VAR_2->scanout_bitmask &= ~(1 << VAR_5.scanout_id);",
"}",
"}",
"if (VAR_5.scanout_id == 0) {",
"qemu_log_mask(LOG_GUEST_ERROR,\n\"%s: illegal VAR_3 id specified %d\",\n__func__, VAR_5.scanout_id);",
"VAR_1->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;",
"return;",
"}",
"dpy_gfx_replace_surface(VAR_0->VAR_3[VAR_5.scanout_id].con, NULL);",
"VAR_3->ds = NULL;",
"VAR_3->width = 0;",
"VAR_3->height = 0;",
"return;",
"}",
"VAR_2 = virtio_gpu_find_resource(VAR_0, VAR_5.resource_id);",
"if (!VAR_2) {",
"qemu_log_mask(LOG_GUEST_ERROR, \"%s: illegal resource specified %d\\n\",\n__func__, VAR_5.resource_id);",
"VAR_1->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;",
"return;",
"}",
"if (VAR_5.r.x > VAR_2->width ||\nVAR_5.r.y > VAR_2->height ||\nVAR_5.r.width > VAR_2->width ||\nVAR_5.r.height > VAR_2->height ||\nVAR_5.r.x + VAR_5.r.width > VAR_2->width ||\nVAR_5.r.y + VAR_5.r.height > VAR_2->height) {",
"qemu_log_mask(LOG_GUEST_ERROR, \"%s: illegal VAR_3 %d bounds for\"\n\" resource %d, (%d,%d)+%d,%d vs %d %d\\n\",\n__func__, VAR_5.scanout_id, VAR_5.resource_id, VAR_5.r.x, VAR_5.r.y,\nVAR_5.r.width, VAR_5.r.height, VAR_2->width, VAR_2->height);",
"VAR_1->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;",
"return;",
"}",
"VAR_3 = &VAR_0->VAR_3[VAR_5.scanout_id];",
"format = pixman_image_get_format(VAR_2->image);",
"VAR_4 = (PIXMAN_FORMAT_BPP(format) + 7) / 8;",
"offset = (VAR_5.r.x * VAR_4) + VAR_5.r.y * pixman_image_get_stride(VAR_2->image);",
"if (!VAR_3->ds || surface_data(VAR_3->ds)\n!= ((uint8_t *)pixman_image_get_data(VAR_2->image) + offset) ||\nVAR_3->width != VAR_5.r.width ||\nVAR_3->height != VAR_5.r.height) {",
"pixman_image_t *rect;",
"void *VAR_6 = (uint8_t *)pixman_image_get_data(VAR_2->image) + offset;",
"rect = pixman_image_create_bits(format, VAR_5.r.width, VAR_5.r.height, VAR_6,\npixman_image_get_stride(VAR_2->image));",
"pixman_image_ref(VAR_2->image);",
"pixman_image_set_destroy_function(rect, virtio_unref_resource,\nVAR_2->image);",
"VAR_3->ds = qemu_create_displaysurface_pixman(rect);",
"if (!VAR_3->ds) {",
"VAR_1->error = VIRTIO_GPU_RESP_ERR_UNSPEC;",
"return;",
"}",
"dpy_gfx_replace_surface(VAR_0->VAR_3[VAR_5.scanout_id].con, VAR_3->ds);",
"}",
"VAR_2->scanout_bitmask |= (1 << VAR_5.scanout_id);",
"VAR_3->resource_id = VAR_5.resource_id;",
"VAR_3->x = VAR_5.r.x;",
"VAR_3->y = VAR_5.r.y;",
"VAR_3->width = VAR_5.r.width;",
"VAR_3->height = VAR_5.r.height;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23,
25
],
[
29
],
[
31,
33
],
[
35
],
[
37
],
[
39
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63,
65,
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83
],
[
85
],
[
91
],
[
93
],
[
95,
97
],
[
99
],
[
101
],
[
103
],
[
107,
109,
111,
113,
115,
117
],
[
119,
121,
123,
125
],
[
127
],
[
129
],
[
131
],
[
135
],
[
139
],
[
141
],
[
143
],
[
145,
147,
149,
151
],
[
153
],
[
155
],
[
157,
159
],
[
161
],
[
163,
165
],
[
169
],
[
171
],
[
173
],
[
175
],
[
177
],
[
180
],
[
182
],
[
186
],
[
188
],
[
190
],
[
192
],
[
194
],
[
196
],
[
198
]
] |
21,244 | void kbd_put_keycode(int keycode)
{
QEMUPutKbdEntry *entry = QTAILQ_FIRST(&kbd_handlers);
if (!runstate_is_running() && !runstate_check(RUN_STATE_SUSPENDED)) {
return;
}
if (entry) {
entry->put_kbd(entry->opaque, keycode);
}
}
| true | qemu | 4282c8277013dc5613b8f27845f6121b66b7cbff | void kbd_put_keycode(int keycode)
{
QEMUPutKbdEntry *entry = QTAILQ_FIRST(&kbd_handlers);
if (!runstate_is_running() && !runstate_check(RUN_STATE_SUSPENDED)) {
return;
}
if (entry) {
entry->put_kbd(entry->opaque, keycode);
}
}
| {
"code": [
" if (entry) {"
],
"line_no": [
15
]
} | void FUNC_0(int VAR_0)
{
QEMUPutKbdEntry *entry = QTAILQ_FIRST(&kbd_handlers);
if (!runstate_is_running() && !runstate_check(RUN_STATE_SUSPENDED)) {
return;
}
if (entry) {
entry->put_kbd(entry->opaque, VAR_0);
}
}
| [
"void FUNC_0(int VAR_0)\n{",
"QEMUPutKbdEntry *entry = QTAILQ_FIRST(&kbd_handlers);",
"if (!runstate_is_running() && !runstate_check(RUN_STATE_SUSPENDED)) {",
"return;",
"}",
"if (entry) {",
"entry->put_kbd(entry->opaque, VAR_0);",
"}",
"}"
] | [
0,
0,
0,
0,
0,
1,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
]
] |
21,245 | static av_cold int libx265_encode_init(AVCodecContext *avctx)
{
libx265Context *ctx = avctx->priv_data;
ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth);
if (!ctx->api)
ctx->api = x265_api_get(0);
ctx->params = ctx->api->param_alloc();
if (!ctx->params) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n");
return AVERROR(ENOMEM);
if (ctx->api->param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {
int i;
av_log(avctx, AV_LOG_ERROR, "Error setting preset/tune %s/%s.\n", ctx->preset, ctx->tune);
av_log(avctx, AV_LOG_INFO, "Possible presets:");
for (i = 0; x265_preset_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x265_preset_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
av_log(avctx, AV_LOG_INFO, "Possible tunes:");
for (i = 0; x265_tune_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x265_tune_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
return AVERROR(EINVAL);
ctx->params->frameNumThreads = avctx->thread_count;
ctx->params->fpsNum = avctx->time_base.den;
ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame;
ctx->params->sourceWidth = avctx->width;
ctx->params->sourceHeight = avctx->height;
ctx->params->bEnablePsnr = !!(avctx->flags & AV_CODEC_FLAG_PSNR);
if ((avctx->color_primaries <= AVCOL_PRI_BT2020 &&
avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) ||
(avctx->color_trc <= AVCOL_TRC_BT2020_12 &&
avctx->color_trc != AVCOL_TRC_UNSPECIFIED) ||
(avctx->colorspace <= AVCOL_SPC_BT2020_CL &&
avctx->colorspace != AVCOL_SPC_UNSPECIFIED)) {
ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;
ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;
// x265 validates the parameters internally
ctx->params->vui.colorPrimaries = avctx->color_primaries;
ctx->params->vui.transferCharacteristics = avctx->color_trc;
ctx->params->vui.matrixCoeffs = avctx->colorspace;
if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {
char sar[12];
int sar_num, sar_den;
av_reduce(&sar_num, &sar_den,
avctx->sample_aspect_ratio.num,
avctx->sample_aspect_ratio.den, 65535);
snprintf(sar, sizeof(sar), "%d:%d", sar_num, sar_den);
if (ctx->api->param_parse(ctx->params, "sar", sar) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid SAR: %d:%d.\n", sar_num, sar_den);
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV420P10:
case AV_PIX_FMT_YUV420P12:
ctx->params->internalCsp = X265_CSP_I420;
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV422P10:
case AV_PIX_FMT_YUV422P12:
ctx->params->internalCsp = X265_CSP_I422;
case AV_PIX_FMT_GBRP:
case AV_PIX_FMT_GBRP10:
case AV_PIX_FMT_GBRP12:
ctx->params->vui.matrixCoeffs = AVCOL_SPC_RGB;
ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;
ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUV444P10:
case AV_PIX_FMT_YUV444P12:
ctx->params->internalCsp = X265_CSP_I444;
if (ctx->crf >= 0) {
char crf[6];
snprintf(crf, sizeof(crf), "%2.2f", ctx->crf);
if (ctx->api->param_parse(ctx->params, "crf", crf) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid crf: %2.2f.\n", ctx->crf);
return AVERROR(EINVAL);
} else if (avctx->bit_rate > 0) {
ctx->params->rc.bitrate = avctx->bit_rate / 1000;
ctx->params->rc.rateControlMode = X265_RC_ABR;
if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
ctx->params->bRepeatHeaders = 1;
if (ctx->x265_opts) {
AVDictionary *dict = NULL;
AVDictionaryEntry *en = NULL;
if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value);
switch (parse_ret) {
case X265_PARAM_BAD_NAME:
av_log(avctx, AV_LOG_WARNING,
"Unknown option: %s.\n", en->key);
case X265_PARAM_BAD_VALUE:
av_log(avctx, AV_LOG_WARNING,
"Invalid value for %s: %s.\n", en->key, en->value);
default:
av_dict_free(&dict);
ctx->encoder = ctx->api->encoder_open(ctx->params);
if (!ctx->encoder) {
av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n");
libx265_encode_close(avctx);
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
x265_nal *nal;
int nnal;
avctx->extradata_size = ctx->api->encoder_headers(ctx->encoder, &nal, &nnal);
if (avctx->extradata_size <= 0) {
av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n");
libx265_encode_close(avctx);
avctx->extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata) {
"Cannot allocate HEVC header of size %d.\n", avctx->extradata_size);
libx265_encode_close(avctx);
return AVERROR(ENOMEM);
memcpy(avctx->extradata, nal[0].payload, avctx->extradata_size);
return 0;
| true | FFmpeg | 3e4357eb822c8bcaf9743dde008f5774d1356e74 | static av_cold int libx265_encode_init(AVCodecContext *avctx)
{
libx265Context *ctx = avctx->priv_data;
ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth);
if (!ctx->api)
ctx->api = x265_api_get(0);
ctx->params = ctx->api->param_alloc();
if (!ctx->params) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n");
return AVERROR(ENOMEM);
if (ctx->api->param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {
int i;
av_log(avctx, AV_LOG_ERROR, "Error setting preset/tune %s/%s.\n", ctx->preset, ctx->tune);
av_log(avctx, AV_LOG_INFO, "Possible presets:");
for (i = 0; x265_preset_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x265_preset_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
av_log(avctx, AV_LOG_INFO, "Possible tunes:");
for (i = 0; x265_tune_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x265_tune_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
return AVERROR(EINVAL);
ctx->params->frameNumThreads = avctx->thread_count;
ctx->params->fpsNum = avctx->time_base.den;
ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame;
ctx->params->sourceWidth = avctx->width;
ctx->params->sourceHeight = avctx->height;
ctx->params->bEnablePsnr = !!(avctx->flags & AV_CODEC_FLAG_PSNR);
if ((avctx->color_primaries <= AVCOL_PRI_BT2020 &&
avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) ||
(avctx->color_trc <= AVCOL_TRC_BT2020_12 &&
avctx->color_trc != AVCOL_TRC_UNSPECIFIED) ||
(avctx->colorspace <= AVCOL_SPC_BT2020_CL &&
avctx->colorspace != AVCOL_SPC_UNSPECIFIED)) {
ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;
ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;
ctx->params->vui.colorPrimaries = avctx->color_primaries;
ctx->params->vui.transferCharacteristics = avctx->color_trc;
ctx->params->vui.matrixCoeffs = avctx->colorspace;
if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {
char sar[12];
int sar_num, sar_den;
av_reduce(&sar_num, &sar_den,
avctx->sample_aspect_ratio.num,
avctx->sample_aspect_ratio.den, 65535);
snprintf(sar, sizeof(sar), "%d:%d", sar_num, sar_den);
if (ctx->api->param_parse(ctx->params, "sar", sar) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid SAR: %d:%d.\n", sar_num, sar_den);
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV420P10:
case AV_PIX_FMT_YUV420P12:
ctx->params->internalCsp = X265_CSP_I420;
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV422P10:
case AV_PIX_FMT_YUV422P12:
ctx->params->internalCsp = X265_CSP_I422;
case AV_PIX_FMT_GBRP:
case AV_PIX_FMT_GBRP10:
case AV_PIX_FMT_GBRP12:
ctx->params->vui.matrixCoeffs = AVCOL_SPC_RGB;
ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;
ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUV444P10:
case AV_PIX_FMT_YUV444P12:
ctx->params->internalCsp = X265_CSP_I444;
if (ctx->crf >= 0) {
char crf[6];
snprintf(crf, sizeof(crf), "%2.2f", ctx->crf);
if (ctx->api->param_parse(ctx->params, "crf", crf) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid crf: %2.2f.\n", ctx->crf);
return AVERROR(EINVAL);
} else if (avctx->bit_rate > 0) {
ctx->params->rc.bitrate = avctx->bit_rate / 1000;
ctx->params->rc.rateControlMode = X265_RC_ABR;
if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
ctx->params->bRepeatHeaders = 1;
if (ctx->x265_opts) {
AVDictionary *dict = NULL;
AVDictionaryEntry *en = NULL;
if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value);
switch (parse_ret) {
case X265_PARAM_BAD_NAME:
av_log(avctx, AV_LOG_WARNING,
"Unknown option: %s.\n", en->key);
case X265_PARAM_BAD_VALUE:
av_log(avctx, AV_LOG_WARNING,
"Invalid value for %s: %s.\n", en->key, en->value);
default:
av_dict_free(&dict);
ctx->encoder = ctx->api->encoder_open(ctx->params);
if (!ctx->encoder) {
av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n");
libx265_encode_close(avctx);
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
x265_nal *nal;
int nnal;
avctx->extradata_size = ctx->api->encoder_headers(ctx->encoder, &nal, &nnal);
if (avctx->extradata_size <= 0) {
av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n");
libx265_encode_close(avctx);
avctx->extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata) {
"Cannot allocate HEVC header of size %d.\n", avctx->extradata_size);
libx265_encode_close(avctx);
return AVERROR(ENOMEM);
memcpy(avctx->extradata, nal[0].payload, avctx->extradata_size);
return 0;
| {
"code": [],
"line_no": []
} | static av_cold int FUNC_0(AVCodecContext *avctx)
{
libx265Context *ctx = avctx->priv_data;
ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth);
if (!ctx->api)
ctx->api = x265_api_get(0);
ctx->params = ctx->api->param_alloc();
if (!ctx->params) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n");
return AVERROR(ENOMEM);
if (ctx->api->param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {
int VAR_0;
av_log(avctx, AV_LOG_ERROR, "Error setting preset/tune %s/%s.\n", ctx->preset, ctx->tune);
av_log(avctx, AV_LOG_INFO, "Possible presets:");
for (VAR_0 = 0; x265_preset_names[VAR_0]; VAR_0++)
av_log(avctx, AV_LOG_INFO, " %s", x265_preset_names[VAR_0]);
av_log(avctx, AV_LOG_INFO, "\n");
av_log(avctx, AV_LOG_INFO, "Possible tunes:");
for (VAR_0 = 0; x265_tune_names[VAR_0]; VAR_0++)
av_log(avctx, AV_LOG_INFO, " %s", x265_tune_names[VAR_0]);
av_log(avctx, AV_LOG_INFO, "\n");
return AVERROR(EINVAL);
ctx->params->frameNumThreads = avctx->thread_count;
ctx->params->fpsNum = avctx->time_base.den;
ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame;
ctx->params->sourceWidth = avctx->width;
ctx->params->sourceHeight = avctx->height;
ctx->params->bEnablePsnr = !!(avctx->flags & AV_CODEC_FLAG_PSNR);
if ((avctx->color_primaries <= AVCOL_PRI_BT2020 &&
avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) ||
(avctx->color_trc <= AVCOL_TRC_BT2020_12 &&
avctx->color_trc != AVCOL_TRC_UNSPECIFIED) ||
(avctx->colorspace <= AVCOL_SPC_BT2020_CL &&
avctx->colorspace != AVCOL_SPC_UNSPECIFIED)) {
ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;
ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;
ctx->params->vui.colorPrimaries = avctx->color_primaries;
ctx->params->vui.transferCharacteristics = avctx->color_trc;
ctx->params->vui.matrixCoeffs = avctx->colorspace;
if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {
char VAR_1[12];
int VAR_2, VAR_3;
av_reduce(&VAR_2, &VAR_3,
avctx->sample_aspect_ratio.num,
avctx->sample_aspect_ratio.den, 65535);
snprintf(VAR_1, sizeof(VAR_1), "%d:%d", VAR_2, VAR_3);
if (ctx->api->param_parse(ctx->params, "VAR_1", VAR_1) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid SAR: %d:%d.\n", VAR_2, VAR_3);
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV420P10:
case AV_PIX_FMT_YUV420P12:
ctx->params->internalCsp = X265_CSP_I420;
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV422P10:
case AV_PIX_FMT_YUV422P12:
ctx->params->internalCsp = X265_CSP_I422;
case AV_PIX_FMT_GBRP:
case AV_PIX_FMT_GBRP10:
case AV_PIX_FMT_GBRP12:
ctx->params->vui.matrixCoeffs = AVCOL_SPC_RGB;
ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;
ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUV444P10:
case AV_PIX_FMT_YUV444P12:
ctx->params->internalCsp = X265_CSP_I444;
if (ctx->VAR_4 >= 0) {
char VAR_4[6];
snprintf(VAR_4, sizeof(VAR_4), "%2.2f", ctx->VAR_4);
if (ctx->api->param_parse(ctx->params, "VAR_4", VAR_4) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid VAR_4: %2.2f.\n", ctx->VAR_4);
return AVERROR(EINVAL);
} else if (avctx->bit_rate > 0) {
ctx->params->rc.bitrate = avctx->bit_rate / 1000;
ctx->params->rc.rateControlMode = X265_RC_ABR;
if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
ctx->params->bRepeatHeaders = 1;
if (ctx->x265_opts) {
AVDictionary *dict = NULL;
AVDictionaryEntry *en = NULL;
if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
int VAR_5 = ctx->api->param_parse(ctx->params, en->key, en->value);
switch (VAR_5) {
case X265_PARAM_BAD_NAME:
av_log(avctx, AV_LOG_WARNING,
"Unknown option: %s.\n", en->key);
case X265_PARAM_BAD_VALUE:
av_log(avctx, AV_LOG_WARNING,
"Invalid value for %s: %s.\n", en->key, en->value);
default:
av_dict_free(&dict);
ctx->encoder = ctx->api->encoder_open(ctx->params);
if (!ctx->encoder) {
av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n");
libx265_encode_close(avctx);
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
x265_nal *nal;
int VAR_6;
avctx->extradata_size = ctx->api->encoder_headers(ctx->encoder, &nal, &VAR_6);
if (avctx->extradata_size <= 0) {
av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n");
libx265_encode_close(avctx);
avctx->extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata) {
"Cannot allocate HEVC header of size %d.\n", avctx->extradata_size);
libx265_encode_close(avctx);
return AVERROR(ENOMEM);
memcpy(avctx->extradata, nal[0].payload, avctx->extradata_size);
return 0;
| [
"static av_cold int FUNC_0(AVCodecContext *avctx)\n{",
"libx265Context *ctx = avctx->priv_data;",
"ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth);",
"if (!ctx->api)\nctx->api = x265_api_get(0);",
"ctx->params = ctx->api->param_alloc();",
"if (!ctx->params) {",
"av_log(avctx, AV_LOG_ERROR, \"Could not allocate x265 param structure.\\n\");",
"return AVERROR(ENOMEM);",
"if (ctx->api->param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {",
"int VAR_0;",
"av_log(avctx, AV_LOG_ERROR, \"Error setting preset/tune %s/%s.\\n\", ctx->preset, ctx->tune);",
"av_log(avctx, AV_LOG_INFO, \"Possible presets:\");",
"for (VAR_0 = 0; x265_preset_names[VAR_0]; VAR_0++)",
"av_log(avctx, AV_LOG_INFO, \" %s\", x265_preset_names[VAR_0]);",
"av_log(avctx, AV_LOG_INFO, \"\\n\");",
"av_log(avctx, AV_LOG_INFO, \"Possible tunes:\");",
"for (VAR_0 = 0; x265_tune_names[VAR_0]; VAR_0++)",
"av_log(avctx, AV_LOG_INFO, \" %s\", x265_tune_names[VAR_0]);",
"av_log(avctx, AV_LOG_INFO, \"\\n\");",
"return AVERROR(EINVAL);",
"ctx->params->frameNumThreads = avctx->thread_count;",
"ctx->params->fpsNum = avctx->time_base.den;",
"ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame;",
"ctx->params->sourceWidth = avctx->width;",
"ctx->params->sourceHeight = avctx->height;",
"ctx->params->bEnablePsnr = !!(avctx->flags & AV_CODEC_FLAG_PSNR);",
"if ((avctx->color_primaries <= AVCOL_PRI_BT2020 &&\navctx->color_primaries != AVCOL_PRI_UNSPECIFIED) ||\n(avctx->color_trc <= AVCOL_TRC_BT2020_12 &&\navctx->color_trc != AVCOL_TRC_UNSPECIFIED) ||\n(avctx->colorspace <= AVCOL_SPC_BT2020_CL &&\navctx->colorspace != AVCOL_SPC_UNSPECIFIED)) {",
"ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;",
"ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;",
"ctx->params->vui.colorPrimaries = avctx->color_primaries;",
"ctx->params->vui.transferCharacteristics = avctx->color_trc;",
"ctx->params->vui.matrixCoeffs = avctx->colorspace;",
"if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {",
"char VAR_1[12];",
"int VAR_2, VAR_3;",
"av_reduce(&VAR_2, &VAR_3,\navctx->sample_aspect_ratio.num,\navctx->sample_aspect_ratio.den, 65535);",
"snprintf(VAR_1, sizeof(VAR_1), \"%d:%d\", VAR_2, VAR_3);",
"if (ctx->api->param_parse(ctx->params, \"VAR_1\", VAR_1) == X265_PARAM_BAD_VALUE) {",
"av_log(avctx, AV_LOG_ERROR, \"Invalid SAR: %d:%d.\\n\", VAR_2, VAR_3);",
"switch (avctx->pix_fmt) {",
"case AV_PIX_FMT_YUV420P:\ncase AV_PIX_FMT_YUV420P10:\ncase AV_PIX_FMT_YUV420P12:\nctx->params->internalCsp = X265_CSP_I420;",
"case AV_PIX_FMT_YUV422P:\ncase AV_PIX_FMT_YUV422P10:\ncase AV_PIX_FMT_YUV422P12:\nctx->params->internalCsp = X265_CSP_I422;",
"case AV_PIX_FMT_GBRP:\ncase AV_PIX_FMT_GBRP10:\ncase AV_PIX_FMT_GBRP12:\nctx->params->vui.matrixCoeffs = AVCOL_SPC_RGB;",
"ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;",
"ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;",
"case AV_PIX_FMT_YUV444P:\ncase AV_PIX_FMT_YUV444P10:\ncase AV_PIX_FMT_YUV444P12:\nctx->params->internalCsp = X265_CSP_I444;",
"if (ctx->VAR_4 >= 0) {",
"char VAR_4[6];",
"snprintf(VAR_4, sizeof(VAR_4), \"%2.2f\", ctx->VAR_4);",
"if (ctx->api->param_parse(ctx->params, \"VAR_4\", VAR_4) == X265_PARAM_BAD_VALUE) {",
"av_log(avctx, AV_LOG_ERROR, \"Invalid VAR_4: %2.2f.\\n\", ctx->VAR_4);",
"return AVERROR(EINVAL);",
"} else if (avctx->bit_rate > 0) {",
"ctx->params->rc.bitrate = avctx->bit_rate / 1000;",
"ctx->params->rc.rateControlMode = X265_RC_ABR;",
"if (!(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER))\nctx->params->bRepeatHeaders = 1;",
"if (ctx->x265_opts) {",
"AVDictionary *dict = NULL;",
"AVDictionaryEntry *en = NULL;",
"if (!av_dict_parse_string(&dict, ctx->x265_opts, \"=\", \":\", 0)) {",
"while ((en = av_dict_get(dict, \"\", en, AV_DICT_IGNORE_SUFFIX))) {",
"int VAR_5 = ctx->api->param_parse(ctx->params, en->key, en->value);",
"switch (VAR_5) {",
"case X265_PARAM_BAD_NAME:\nav_log(avctx, AV_LOG_WARNING,\n\"Unknown option: %s.\\n\", en->key);",
"case X265_PARAM_BAD_VALUE:\nav_log(avctx, AV_LOG_WARNING,\n\"Invalid value for %s: %s.\\n\", en->key, en->value);",
"default:\nav_dict_free(&dict);",
"ctx->encoder = ctx->api->encoder_open(ctx->params);",
"if (!ctx->encoder) {",
"av_log(avctx, AV_LOG_ERROR, \"Cannot open libx265 encoder.\\n\");",
"libx265_encode_close(avctx);",
"if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {",
"x265_nal *nal;",
"int VAR_6;",
"avctx->extradata_size = ctx->api->encoder_headers(ctx->encoder, &nal, &VAR_6);",
"if (avctx->extradata_size <= 0) {",
"av_log(avctx, AV_LOG_ERROR, \"Cannot encode headers.\\n\");",
"libx265_encode_close(avctx);",
"avctx->extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);",
"if (!avctx->extradata) {",
"\"Cannot allocate HEVC header of size %d.\\n\", avctx->extradata_size);",
"libx265_encode_close(avctx);",
"return AVERROR(ENOMEM);",
"memcpy(avctx->extradata, nal[0].payload, avctx->extradata_size);",
"return 0;"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11,
13
],
[
17
],
[
19
],
[
21
],
[
23
],
[
28
],
[
30
],
[
34
],
[
36
],
[
38
],
[
40
],
[
44
],
[
46
],
[
48
],
[
50
],
[
54
],
[
58
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
77,
79,
81,
83,
85,
87
],
[
91
],
[
93
],
[
99
],
[
101
],
[
103
],
[
108
],
[
110
],
[
112
],
[
116,
118,
120
],
[
122
],
[
124
],
[
126
],
[
133
],
[
135,
137,
139,
141
],
[
144,
146,
148,
150
],
[
153,
155,
157,
159
],
[
161
],
[
163
],
[
165,
167,
169,
171
],
[
186
],
[
188
],
[
192
],
[
194
],
[
196
],
[
198
],
[
201
],
[
203
],
[
205
],
[
210,
212
],
[
216
],
[
218
],
[
220
],
[
224
],
[
226
],
[
228
],
[
232
],
[
234,
236,
238
],
[
241,
243,
245
],
[
248,
253
],
[
259
],
[
261
],
[
263
],
[
265
],
[
271
],
[
273
],
[
275
],
[
279
],
[
281
],
[
283
],
[
285
],
[
291
],
[
293
],
[
296
],
[
298
],
[
300
],
[
305
],
[
310
]
] |
21,246 | static void nbd_coroutine_end(BlockDriverState *bs,
NBDRequest *request)
{
NBDClientSession *s = nbd_get_client_session(bs);
int i = HANDLE_TO_INDEX(s, request->handle);
s->recv_coroutine[i] = NULL;
s->in_flight--;
qemu_co_queue_next(&s->free_sema);
/* Kick the read_reply_co to get the next reply. */
if (s->read_reply_co) {
aio_co_wake(s->read_reply_co);
}
}
| true | qemu | 6bdcc018a6ed760b9dfe43539124e420aed83092 | static void nbd_coroutine_end(BlockDriverState *bs,
NBDRequest *request)
{
NBDClientSession *s = nbd_get_client_session(bs);
int i = HANDLE_TO_INDEX(s, request->handle);
s->recv_coroutine[i] = NULL;
s->in_flight--;
qemu_co_queue_next(&s->free_sema);
if (s->read_reply_co) {
aio_co_wake(s->read_reply_co);
}
}
| {
"code": [
" s->in_flight--;",
" qemu_co_queue_next(&s->free_sema);"
],
"line_no": [
15,
17
]
} | static void FUNC_0(BlockDriverState *VAR_0,
NBDRequest *VAR_1)
{
NBDClientSession *s = nbd_get_client_session(VAR_0);
int VAR_2 = HANDLE_TO_INDEX(s, VAR_1->handle);
s->recv_coroutine[VAR_2] = NULL;
s->in_flight--;
qemu_co_queue_next(&s->free_sema);
if (s->read_reply_co) {
aio_co_wake(s->read_reply_co);
}
}
| [
"static void FUNC_0(BlockDriverState *VAR_0,\nNBDRequest *VAR_1)\n{",
"NBDClientSession *s = nbd_get_client_session(VAR_0);",
"int VAR_2 = HANDLE_TO_INDEX(s, VAR_1->handle);",
"s->recv_coroutine[VAR_2] = NULL;",
"s->in_flight--;",
"qemu_co_queue_next(&s->free_sema);",
"if (s->read_reply_co) {",
"aio_co_wake(s->read_reply_co);",
"}",
"}"
] | [
0,
0,
0,
0,
1,
1,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
23
],
[
25
],
[
27
],
[
29
]
] |
21,247 | static int dump_init(DumpState *s, int fd, bool has_format,
DumpGuestMemoryFormat format, bool paging, bool has_filter,
int64_t begin, int64_t length, Error **errp)
{
CPUState *cpu;
int nr_cpus;
Error *err = NULL;
int ret;
/* kdump-compressed is conflict with paging and filter */
if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
assert(!paging && !has_filter);
}
if (runstate_is_running()) {
vm_stop(RUN_STATE_SAVE_VM);
s->resume = true;
} else {
s->resume = false;
}
/* If we use KVM, we should synchronize the registers before we get dump
* info or physmap info.
*/
cpu_synchronize_all_states();
nr_cpus = 0;
CPU_FOREACH(cpu) {
nr_cpus++;
}
s->fd = fd;
s->has_filter = has_filter;
s->begin = begin;
s->length = length;
guest_phys_blocks_init(&s->guest_phys_blocks);
guest_phys_blocks_append(&s->guest_phys_blocks);
s->start = get_start_block(s);
if (s->start == -1) {
error_set(errp, QERR_INVALID_PARAMETER, "begin");
goto cleanup;
}
/* get dump info: endian, class and architecture.
* If the target architecture is not supported, cpu_get_dump_info() will
* return -1.
*/
ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
if (ret < 0) {
error_set(errp, QERR_UNSUPPORTED);
goto cleanup;
}
s->note_size = cpu_get_note_size(s->dump_info.d_class,
s->dump_info.d_machine, nr_cpus);
if (s->note_size < 0) {
error_set(errp, QERR_UNSUPPORTED);
goto cleanup;
}
/* get memory mapping */
memory_mapping_list_init(&s->list);
if (paging) {
qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
if (err != NULL) {
error_propagate(errp, err);
goto cleanup;
}
} else {
qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
}
s->nr_cpus = nr_cpus;
get_max_mapnr(s);
uint64_t tmp;
tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), TARGET_PAGE_SIZE);
s->len_dump_bitmap = tmp * TARGET_PAGE_SIZE;
/* init for kdump-compressed format */
if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
switch (format) {
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
break;
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
#ifdef CONFIG_LZO
if (lzo_init() != LZO_E_OK) {
error_setg(errp, "failed to initialize the LZO library");
goto cleanup;
}
#endif
s->flag_compress = DUMP_DH_COMPRESSED_LZO;
break;
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
break;
default:
s->flag_compress = 0;
}
return 0;
}
if (s->has_filter) {
memory_mapping_filter(&s->list, s->begin, s->length);
}
/*
* calculate phdr_num
*
* the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
*/
s->phdr_num = 1; /* PT_NOTE */
if (s->list.num < UINT16_MAX - 2) {
s->phdr_num += s->list.num;
s->have_section = false;
} else {
s->have_section = true;
s->phdr_num = PN_XNUM;
s->sh_info = 1; /* PT_NOTE */
/* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
if (s->list.num <= UINT32_MAX - 1) {
s->sh_info += s->list.num;
} else {
s->sh_info = UINT32_MAX;
}
}
if (s->dump_info.d_class == ELFCLASS64) {
if (s->have_section) {
s->memory_offset = sizeof(Elf64_Ehdr) +
sizeof(Elf64_Phdr) * s->sh_info +
sizeof(Elf64_Shdr) + s->note_size;
} else {
s->memory_offset = sizeof(Elf64_Ehdr) +
sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
}
} else {
if (s->have_section) {
s->memory_offset = sizeof(Elf32_Ehdr) +
sizeof(Elf32_Phdr) * s->sh_info +
sizeof(Elf32_Shdr) + s->note_size;
} else {
s->memory_offset = sizeof(Elf32_Ehdr) +
sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
}
}
return 0;
cleanup:
guest_phys_blocks_free(&s->guest_phys_blocks);
if (s->resume) {
vm_start();
}
return -1;
}
| true | qemu | 2928207ac1bb2751a1554ea0f9a9641179f51488 | static int dump_init(DumpState *s, int fd, bool has_format,
DumpGuestMemoryFormat format, bool paging, bool has_filter,
int64_t begin, int64_t length, Error **errp)
{
CPUState *cpu;
int nr_cpus;
Error *err = NULL;
int ret;
if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
assert(!paging && !has_filter);
}
if (runstate_is_running()) {
vm_stop(RUN_STATE_SAVE_VM);
s->resume = true;
} else {
s->resume = false;
}
cpu_synchronize_all_states();
nr_cpus = 0;
CPU_FOREACH(cpu) {
nr_cpus++;
}
s->fd = fd;
s->has_filter = has_filter;
s->begin = begin;
s->length = length;
guest_phys_blocks_init(&s->guest_phys_blocks);
guest_phys_blocks_append(&s->guest_phys_blocks);
s->start = get_start_block(s);
if (s->start == -1) {
error_set(errp, QERR_INVALID_PARAMETER, "begin");
goto cleanup;
}
ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
if (ret < 0) {
error_set(errp, QERR_UNSUPPORTED);
goto cleanup;
}
s->note_size = cpu_get_note_size(s->dump_info.d_class,
s->dump_info.d_machine, nr_cpus);
if (s->note_size < 0) {
error_set(errp, QERR_UNSUPPORTED);
goto cleanup;
}
memory_mapping_list_init(&s->list);
if (paging) {
qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
if (err != NULL) {
error_propagate(errp, err);
goto cleanup;
}
} else {
qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
}
s->nr_cpus = nr_cpus;
get_max_mapnr(s);
uint64_t tmp;
tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), TARGET_PAGE_SIZE);
s->len_dump_bitmap = tmp * TARGET_PAGE_SIZE;
if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
switch (format) {
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
break;
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
#ifdef CONFIG_LZO
if (lzo_init() != LZO_E_OK) {
error_setg(errp, "failed to initialize the LZO library");
goto cleanup;
}
#endif
s->flag_compress = DUMP_DH_COMPRESSED_LZO;
break;
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
break;
default:
s->flag_compress = 0;
}
return 0;
}
if (s->has_filter) {
memory_mapping_filter(&s->list, s->begin, s->length);
}
s->phdr_num = 1;
if (s->list.num < UINT16_MAX - 2) {
s->phdr_num += s->list.num;
s->have_section = false;
} else {
s->have_section = true;
s->phdr_num = PN_XNUM;
s->sh_info = 1;
if (s->list.num <= UINT32_MAX - 1) {
s->sh_info += s->list.num;
} else {
s->sh_info = UINT32_MAX;
}
}
if (s->dump_info.d_class == ELFCLASS64) {
if (s->have_section) {
s->memory_offset = sizeof(Elf64_Ehdr) +
sizeof(Elf64_Phdr) * s->sh_info +
sizeof(Elf64_Shdr) + s->note_size;
} else {
s->memory_offset = sizeof(Elf64_Ehdr) +
sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
}
} else {
if (s->have_section) {
s->memory_offset = sizeof(Elf32_Ehdr) +
sizeof(Elf32_Phdr) * s->sh_info +
sizeof(Elf32_Shdr) + s->note_size;
} else {
s->memory_offset = sizeof(Elf32_Ehdr) +
sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
}
}
return 0;
cleanup:
guest_phys_blocks_free(&s->guest_phys_blocks);
if (s->resume) {
vm_start();
}
return -1;
}
| {
"code": [
" memory_mapping_list_init(&s->list);",
" guest_phys_blocks_free(&s->guest_phys_blocks);",
" if (s->resume) {",
" vm_start();"
],
"line_no": [
125,
317,
321,
323
]
} | static int FUNC_0(DumpState *VAR_0, int VAR_1, bool VAR_2,
DumpGuestMemoryFormat VAR_3, bool VAR_4, bool VAR_5,
int64_t VAR_6, int64_t VAR_7, Error **VAR_8)
{
CPUState *cpu;
int VAR_9;
Error *err = NULL;
int VAR_10;
if (VAR_2 && VAR_3 != DUMP_GUEST_MEMORY_FORMAT_ELF) {
assert(!VAR_4 && !VAR_5);
}
if (runstate_is_running()) {
vm_stop(RUN_STATE_SAVE_VM);
VAR_0->resume = true;
} else {
VAR_0->resume = false;
}
cpu_synchronize_all_states();
VAR_9 = 0;
CPU_FOREACH(cpu) {
VAR_9++;
}
VAR_0->VAR_1 = VAR_1;
VAR_0->VAR_5 = VAR_5;
VAR_0->VAR_6 = VAR_6;
VAR_0->VAR_7 = VAR_7;
guest_phys_blocks_init(&VAR_0->guest_phys_blocks);
guest_phys_blocks_append(&VAR_0->guest_phys_blocks);
VAR_0->start = get_start_block(VAR_0);
if (VAR_0->start == -1) {
error_set(VAR_8, QERR_INVALID_PARAMETER, "VAR_6");
goto cleanup;
}
VAR_10 = cpu_get_dump_info(&VAR_0->dump_info, &VAR_0->guest_phys_blocks);
if (VAR_10 < 0) {
error_set(VAR_8, QERR_UNSUPPORTED);
goto cleanup;
}
VAR_0->note_size = cpu_get_note_size(VAR_0->dump_info.d_class,
VAR_0->dump_info.d_machine, VAR_9);
if (VAR_0->note_size < 0) {
error_set(VAR_8, QERR_UNSUPPORTED);
goto cleanup;
}
memory_mapping_list_init(&VAR_0->list);
if (VAR_4) {
qemu_get_guest_memory_mapping(&VAR_0->list, &VAR_0->guest_phys_blocks, &err);
if (err != NULL) {
error_propagate(VAR_8, err);
goto cleanup;
}
} else {
qemu_get_guest_simple_memory_mapping(&VAR_0->list, &VAR_0->guest_phys_blocks);
}
VAR_0->VAR_9 = VAR_9;
get_max_mapnr(VAR_0);
uint64_t tmp;
tmp = DIV_ROUND_UP(DIV_ROUND_UP(VAR_0->max_mapnr, CHAR_BIT), TARGET_PAGE_SIZE);
VAR_0->len_dump_bitmap = tmp * TARGET_PAGE_SIZE;
if (VAR_2 && VAR_3 != DUMP_GUEST_MEMORY_FORMAT_ELF) {
switch (VAR_3) {
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
VAR_0->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
break;
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
#ifdef CONFIG_LZO
if (lzo_init() != LZO_E_OK) {
error_setg(VAR_8, "failed to initialize the LZO library");
goto cleanup;
}
#endif
VAR_0->flag_compress = DUMP_DH_COMPRESSED_LZO;
break;
case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
VAR_0->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
break;
default:
VAR_0->flag_compress = 0;
}
return 0;
}
if (VAR_0->VAR_5) {
memory_mapping_filter(&VAR_0->list, VAR_0->VAR_6, VAR_0->VAR_7);
}
VAR_0->phdr_num = 1;
if (VAR_0->list.num < UINT16_MAX - 2) {
VAR_0->phdr_num += VAR_0->list.num;
VAR_0->have_section = false;
} else {
VAR_0->have_section = true;
VAR_0->phdr_num = PN_XNUM;
VAR_0->sh_info = 1;
if (VAR_0->list.num <= UINT32_MAX - 1) {
VAR_0->sh_info += VAR_0->list.num;
} else {
VAR_0->sh_info = UINT32_MAX;
}
}
if (VAR_0->dump_info.d_class == ELFCLASS64) {
if (VAR_0->have_section) {
VAR_0->memory_offset = sizeof(Elf64_Ehdr) +
sizeof(Elf64_Phdr) * VAR_0->sh_info +
sizeof(Elf64_Shdr) + VAR_0->note_size;
} else {
VAR_0->memory_offset = sizeof(Elf64_Ehdr) +
sizeof(Elf64_Phdr) * VAR_0->phdr_num + VAR_0->note_size;
}
} else {
if (VAR_0->have_section) {
VAR_0->memory_offset = sizeof(Elf32_Ehdr) +
sizeof(Elf32_Phdr) * VAR_0->sh_info +
sizeof(Elf32_Shdr) + VAR_0->note_size;
} else {
VAR_0->memory_offset = sizeof(Elf32_Ehdr) +
sizeof(Elf32_Phdr) * VAR_0->phdr_num + VAR_0->note_size;
}
}
return 0;
cleanup:
guest_phys_blocks_free(&VAR_0->guest_phys_blocks);
if (VAR_0->resume) {
vm_start();
}
return -1;
}
| [
"static int FUNC_0(DumpState *VAR_0, int VAR_1, bool VAR_2,\nDumpGuestMemoryFormat VAR_3, bool VAR_4, bool VAR_5,\nint64_t VAR_6, int64_t VAR_7, Error **VAR_8)\n{",
"CPUState *cpu;",
"int VAR_9;",
"Error *err = NULL;",
"int VAR_10;",
"if (VAR_2 && VAR_3 != DUMP_GUEST_MEMORY_FORMAT_ELF) {",
"assert(!VAR_4 && !VAR_5);",
"}",
"if (runstate_is_running()) {",
"vm_stop(RUN_STATE_SAVE_VM);",
"VAR_0->resume = true;",
"} else {",
"VAR_0->resume = false;",
"}",
"cpu_synchronize_all_states();",
"VAR_9 = 0;",
"CPU_FOREACH(cpu) {",
"VAR_9++;",
"}",
"VAR_0->VAR_1 = VAR_1;",
"VAR_0->VAR_5 = VAR_5;",
"VAR_0->VAR_6 = VAR_6;",
"VAR_0->VAR_7 = VAR_7;",
"guest_phys_blocks_init(&VAR_0->guest_phys_blocks);",
"guest_phys_blocks_append(&VAR_0->guest_phys_blocks);",
"VAR_0->start = get_start_block(VAR_0);",
"if (VAR_0->start == -1) {",
"error_set(VAR_8, QERR_INVALID_PARAMETER, \"VAR_6\");",
"goto cleanup;",
"}",
"VAR_10 = cpu_get_dump_info(&VAR_0->dump_info, &VAR_0->guest_phys_blocks);",
"if (VAR_10 < 0) {",
"error_set(VAR_8, QERR_UNSUPPORTED);",
"goto cleanup;",
"}",
"VAR_0->note_size = cpu_get_note_size(VAR_0->dump_info.d_class,\nVAR_0->dump_info.d_machine, VAR_9);",
"if (VAR_0->note_size < 0) {",
"error_set(VAR_8, QERR_UNSUPPORTED);",
"goto cleanup;",
"}",
"memory_mapping_list_init(&VAR_0->list);",
"if (VAR_4) {",
"qemu_get_guest_memory_mapping(&VAR_0->list, &VAR_0->guest_phys_blocks, &err);",
"if (err != NULL) {",
"error_propagate(VAR_8, err);",
"goto cleanup;",
"}",
"} else {",
"qemu_get_guest_simple_memory_mapping(&VAR_0->list, &VAR_0->guest_phys_blocks);",
"}",
"VAR_0->VAR_9 = VAR_9;",
"get_max_mapnr(VAR_0);",
"uint64_t tmp;",
"tmp = DIV_ROUND_UP(DIV_ROUND_UP(VAR_0->max_mapnr, CHAR_BIT), TARGET_PAGE_SIZE);",
"VAR_0->len_dump_bitmap = tmp * TARGET_PAGE_SIZE;",
"if (VAR_2 && VAR_3 != DUMP_GUEST_MEMORY_FORMAT_ELF) {",
"switch (VAR_3) {",
"case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:\nVAR_0->flag_compress = DUMP_DH_COMPRESSED_ZLIB;",
"break;",
"case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:\n#ifdef CONFIG_LZO\nif (lzo_init() != LZO_E_OK) {",
"error_setg(VAR_8, \"failed to initialize the LZO library\");",
"goto cleanup;",
"}",
"#endif\nVAR_0->flag_compress = DUMP_DH_COMPRESSED_LZO;",
"break;",
"case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:\nVAR_0->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;",
"break;",
"default:\nVAR_0->flag_compress = 0;",
"}",
"return 0;",
"}",
"if (VAR_0->VAR_5) {",
"memory_mapping_filter(&VAR_0->list, VAR_0->VAR_6, VAR_0->VAR_7);",
"}",
"VAR_0->phdr_num = 1;",
"if (VAR_0->list.num < UINT16_MAX - 2) {",
"VAR_0->phdr_num += VAR_0->list.num;",
"VAR_0->have_section = false;",
"} else {",
"VAR_0->have_section = true;",
"VAR_0->phdr_num = PN_XNUM;",
"VAR_0->sh_info = 1;",
"if (VAR_0->list.num <= UINT32_MAX - 1) {",
"VAR_0->sh_info += VAR_0->list.num;",
"} else {",
"VAR_0->sh_info = UINT32_MAX;",
"}",
"}",
"if (VAR_0->dump_info.d_class == ELFCLASS64) {",
"if (VAR_0->have_section) {",
"VAR_0->memory_offset = sizeof(Elf64_Ehdr) +\nsizeof(Elf64_Phdr) * VAR_0->sh_info +\nsizeof(Elf64_Shdr) + VAR_0->note_size;",
"} else {",
"VAR_0->memory_offset = sizeof(Elf64_Ehdr) +\nsizeof(Elf64_Phdr) * VAR_0->phdr_num + VAR_0->note_size;",
"}",
"} else {",
"if (VAR_0->have_section) {",
"VAR_0->memory_offset = sizeof(Elf32_Ehdr) +\nsizeof(Elf32_Phdr) * VAR_0->sh_info +\nsizeof(Elf32_Shdr) + VAR_0->note_size;",
"} else {",
"VAR_0->memory_offset = sizeof(Elf32_Ehdr) +\nsizeof(Elf32_Phdr) * VAR_0->phdr_num + VAR_0->note_size;",
"}",
"}",
"return 0;",
"cleanup:\nguest_phys_blocks_free(&VAR_0->guest_phys_blocks);",
"if (VAR_0->resume) {",
"vm_start();",
"}",
"return -1;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
21
],
[
23
],
[
25
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
61
],
[
63
],
[
65
],
[
67
],
[
71
],
[
73
],
[
77
],
[
79
],
[
81
],
[
83
],
[
85
],
[
97
],
[
99
],
[
101
],
[
103
],
[
105
],
[
109,
111
],
[
113
],
[
115
],
[
117
],
[
119
],
[
125
],
[
127
],
[
129
],
[
131
],
[
133
],
[
135
],
[
137
],
[
139
],
[
141
],
[
143
],
[
147
],
[
151
],
[
155
],
[
157
],
[
159
],
[
165
],
[
167
],
[
169,
171
],
[
173
],
[
177,
179,
181
],
[
183
],
[
185
],
[
187
],
[
189,
191
],
[
193
],
[
197,
199
],
[
201
],
[
205,
207
],
[
209
],
[
213
],
[
215
],
[
219
],
[
221
],
[
223
],
[
237
],
[
239
],
[
241
],
[
243
],
[
245
],
[
247
],
[
249
],
[
251
],
[
257
],
[
259
],
[
261
],
[
263
],
[
265
],
[
267
],
[
271
],
[
273
],
[
275,
277,
279
],
[
281
],
[
283,
285
],
[
287
],
[
289
],
[
291
],
[
293,
295,
297
],
[
299
],
[
301,
303
],
[
305
],
[
307
],
[
311
],
[
315,
317
],
[
321
],
[
323
],
[
325
],
[
329
],
[
331
]
] |
21,248 | static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
{
return wrap_timestamp(s->streams[stream_index], read_timestamp(s, stream_index, ppos, pos_limit));
}
| false | FFmpeg | 695a766bff4cd8414a84e58159506d72b4e44892 | static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
{
return wrap_timestamp(s->streams[stream_index], read_timestamp(s, stream_index, ppos, pos_limit));
}
| {
"code": [],
"line_no": []
} | static int64_t FUNC_0(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
{
return wrap_timestamp(s->streams[stream_index], read_timestamp(s, stream_index, ppos, pos_limit));
}
| [
"static int64_t FUNC_0(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,\nint64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))\n{",
"return wrap_timestamp(s->streams[stream_index], read_timestamp(s, stream_index, ppos, pos_limit));",
"}"
] | [
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
]
] |
21,249 | gboolean vnc_client_io(QIOChannel *ioc G_GNUC_UNUSED,
GIOCondition condition, void *opaque)
{
VncState *vs = opaque;
if (condition & G_IO_IN) {
vnc_client_read(vs);
}
if (condition & G_IO_OUT) {
vnc_client_write(vs);
}
return TRUE;
}
| true | qemu | ea697449884d83b83fefbc9cd87bdde0c94b49d6 | gboolean vnc_client_io(QIOChannel *ioc G_GNUC_UNUSED,
GIOCondition condition, void *opaque)
{
VncState *vs = opaque;
if (condition & G_IO_IN) {
vnc_client_read(vs);
}
if (condition & G_IO_OUT) {
vnc_client_write(vs);
}
return TRUE;
}
| {
"code": [
" vnc_client_read(vs);"
],
"line_no": [
11
]
} | gboolean FUNC_0(QIOChannel *ioc G_GNUC_UNUSED,
GIOCondition condition, void *opaque)
{
VncState *vs = opaque;
if (condition & G_IO_IN) {
vnc_client_read(vs);
}
if (condition & G_IO_OUT) {
vnc_client_write(vs);
}
return TRUE;
}
| [
"gboolean FUNC_0(QIOChannel *ioc G_GNUC_UNUSED,\nGIOCondition condition, void *opaque)\n{",
"VncState *vs = opaque;",
"if (condition & G_IO_IN) {",
"vnc_client_read(vs);",
"}",
"if (condition & G_IO_OUT) {",
"vnc_client_write(vs);",
"}",
"return TRUE;",
"}"
] | [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
]
] |
21,252 | float128 float128_scalbn( float128 a, int n STATUS_PARAM )
{
flag aSign;
int32 aExp;
uint64_t aSig0, aSig1;
aSig1 = extractFloat128Frac1( a );
aSig0 = extractFloat128Frac0( a );
aExp = extractFloat128Exp( a );
aSign = extractFloat128Sign( a );
if ( aExp == 0x7FFF ) {
return a;
}
if ( aExp != 0 )
aSig0 |= LIT64( 0x0001000000000000 );
else if ( aSig0 == 0 && aSig1 == 0 )
return a;
aExp += n - 1;
return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1
STATUS_VAR );
}
| true | qemu | 326b9e98a391d542cc33c4c91782ff4ba51edfc5 | float128 float128_scalbn( float128 a, int n STATUS_PARAM )
{
flag aSign;
int32 aExp;
uint64_t aSig0, aSig1;
aSig1 = extractFloat128Frac1( a );
aSig0 = extractFloat128Frac0( a );
aExp = extractFloat128Exp( a );
aSign = extractFloat128Sign( a );
if ( aExp == 0x7FFF ) {
return a;
}
if ( aExp != 0 )
aSig0 |= LIT64( 0x0001000000000000 );
else if ( aSig0 == 0 && aSig1 == 0 )
return a;
aExp += n - 1;
return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1
STATUS_VAR );
}
| {
"code": [
" int32 aExp;"
],
"line_no": [
7
]
} | float128 FUNC_0( float128 a, int n STATUS_PARAM )
{
flag aSign;
int32 aExp;
uint64_t aSig0, aSig1;
aSig1 = extractFloat128Frac1( a );
aSig0 = extractFloat128Frac0( a );
aExp = extractFloat128Exp( a );
aSign = extractFloat128Sign( a );
if ( aExp == 0x7FFF ) {
return a;
}
if ( aExp != 0 )
aSig0 |= LIT64( 0x0001000000000000 );
else if ( aSig0 == 0 && aSig1 == 0 )
return a;
aExp += n - 1;
return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1
STATUS_VAR );
}
| [
"float128 FUNC_0( float128 a, int n STATUS_PARAM )\n{",
"flag aSign;",
"int32 aExp;",
"uint64_t aSig0, aSig1;",
"aSig1 = extractFloat128Frac1( a );",
"aSig0 = extractFloat128Frac0( a );",
"aExp = extractFloat128Exp( a );",
"aSign = extractFloat128Sign( a );",
"if ( aExp == 0x7FFF ) {",
"return a;",
"}",
"if ( aExp != 0 )\naSig0 |= LIT64( 0x0001000000000000 );",
"else if ( aSig0 == 0 && aSig1 == 0 )\nreturn a;",
"aExp += n - 1;",
"return normalizeRoundAndPackFloat128( aSign, aExp, aSig0, aSig1\nSTATUS_VAR );",
"}"
] | [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27,
29
],
[
31,
33
],
[
37
],
[
39,
41
],
[
45
]
] |
21,253 | static void process_synthesis_subpackets(QDM2Context *q, QDM2SubPNode *list)
{
QDM2SubPNode *nodes[4];
nodes[0] = qdm2_search_subpacket_type_in_list(list, 9);
if (nodes[0] != NULL)
process_subpacket_9(q, nodes[0]);
nodes[1] = qdm2_search_subpacket_type_in_list(list, 10);
if (nodes[1] != NULL)
process_subpacket_10(q, nodes[1]);
else
process_subpacket_10(q, NULL);
nodes[2] = qdm2_search_subpacket_type_in_list(list, 11);
if (nodes[0] != NULL && nodes[1] != NULL && nodes[2] != NULL)
process_subpacket_11(q, nodes[2]);
else
process_subpacket_11(q, NULL);
nodes[3] = qdm2_search_subpacket_type_in_list(list, 12);
if (nodes[0] != NULL && nodes[1] != NULL && nodes[3] != NULL)
process_subpacket_12(q, nodes[3]);
else
process_subpacket_12(q, NULL);
}
| false | FFmpeg | 4b1f5e5090abed6c618c8ba380cd7d28d140f867 | static void process_synthesis_subpackets(QDM2Context *q, QDM2SubPNode *list)
{
QDM2SubPNode *nodes[4];
nodes[0] = qdm2_search_subpacket_type_in_list(list, 9);
if (nodes[0] != NULL)
process_subpacket_9(q, nodes[0]);
nodes[1] = qdm2_search_subpacket_type_in_list(list, 10);
if (nodes[1] != NULL)
process_subpacket_10(q, nodes[1]);
else
process_subpacket_10(q, NULL);
nodes[2] = qdm2_search_subpacket_type_in_list(list, 11);
if (nodes[0] != NULL && nodes[1] != NULL && nodes[2] != NULL)
process_subpacket_11(q, nodes[2]);
else
process_subpacket_11(q, NULL);
nodes[3] = qdm2_search_subpacket_type_in_list(list, 12);
if (nodes[0] != NULL && nodes[1] != NULL && nodes[3] != NULL)
process_subpacket_12(q, nodes[3]);
else
process_subpacket_12(q, NULL);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(QDM2Context *VAR_0, QDM2SubPNode *VAR_1)
{
QDM2SubPNode *nodes[4];
nodes[0] = qdm2_search_subpacket_type_in_list(VAR_1, 9);
if (nodes[0] != NULL)
process_subpacket_9(VAR_0, nodes[0]);
nodes[1] = qdm2_search_subpacket_type_in_list(VAR_1, 10);
if (nodes[1] != NULL)
process_subpacket_10(VAR_0, nodes[1]);
else
process_subpacket_10(VAR_0, NULL);
nodes[2] = qdm2_search_subpacket_type_in_list(VAR_1, 11);
if (nodes[0] != NULL && nodes[1] != NULL && nodes[2] != NULL)
process_subpacket_11(VAR_0, nodes[2]);
else
process_subpacket_11(VAR_0, NULL);
nodes[3] = qdm2_search_subpacket_type_in_list(VAR_1, 12);
if (nodes[0] != NULL && nodes[1] != NULL && nodes[3] != NULL)
process_subpacket_12(VAR_0, nodes[3]);
else
process_subpacket_12(VAR_0, NULL);
}
| [
"static void FUNC_0(QDM2Context *VAR_0, QDM2SubPNode *VAR_1)\n{",
"QDM2SubPNode *nodes[4];",
"nodes[0] = qdm2_search_subpacket_type_in_list(VAR_1, 9);",
"if (nodes[0] != NULL)\nprocess_subpacket_9(VAR_0, nodes[0]);",
"nodes[1] = qdm2_search_subpacket_type_in_list(VAR_1, 10);",
"if (nodes[1] != NULL)\nprocess_subpacket_10(VAR_0, nodes[1]);",
"else\nprocess_subpacket_10(VAR_0, NULL);",
"nodes[2] = qdm2_search_subpacket_type_in_list(VAR_1, 11);",
"if (nodes[0] != NULL && nodes[1] != NULL && nodes[2] != NULL)\nprocess_subpacket_11(VAR_0, nodes[2]);",
"else\nprocess_subpacket_11(VAR_0, NULL);",
"nodes[3] = qdm2_search_subpacket_type_in_list(VAR_1, 12);",
"if (nodes[0] != NULL && nodes[1] != NULL && nodes[3] != NULL)\nprocess_subpacket_12(VAR_0, nodes[3]);",
"else\nprocess_subpacket_12(VAR_0, NULL);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11,
13
],
[
17
],
[
19,
21
],
[
23,
25
],
[
29
],
[
31,
33
],
[
35,
37
],
[
41
],
[
43,
45
],
[
47,
49
],
[
51
]
] |
21,254 | static int ffm_read_data(AVFormatContext *s,
uint8_t *buf, int size, int header)
{
FFMContext *ffm = s->priv_data;
AVIOContext *pb = s->pb;
int len, fill_size, size1, frame_offset, id;
int64_t last_pos = -1;
size1 = size;
while (size > 0) {
redo:
len = ffm->packet_end - ffm->packet_ptr;
if (len < 0)
return -1;
if (len > size)
len = size;
if (len == 0) {
if (avio_tell(pb) == ffm->file_size)
avio_seek(pb, ffm->packet_size, SEEK_SET);
retry_read:
if (pb->buffer_size != ffm->packet_size) {
int64_t tell = avio_tell(pb);
ffio_set_buf_size(pb, ffm->packet_size);
avio_seek(pb, tell, SEEK_SET);
}
id = avio_rb16(pb); /* PACKET_ID */
if (id != PACKET_ID) {
if (ffm_resync(s, id) < 0)
return -1;
last_pos = avio_tell(pb);
}
fill_size = avio_rb16(pb);
ffm->dts = avio_rb64(pb);
frame_offset = avio_rb16(pb);
avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);
ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size);
if (ffm->packet_end < ffm->packet || frame_offset < 0)
return -1;
/* if first packet or resynchronization packet, we must
handle it specifically */
if (ffm->first_packet || (frame_offset & 0x8000)) {
if (!frame_offset) {
/* This packet has no frame headers in it */
if (avio_tell(pb) >= ffm->packet_size * 3LL) {
int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos);
seekback = FFMAX(seekback, 0);
avio_seek(pb, -seekback, SEEK_CUR);
goto retry_read;
}
/* This is bad, we cannot find a valid frame header */
return 0;
}
ffm->first_packet = 0;
if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE)
return -1;
ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE;
if (!header)
break;
} else {
ffm->packet_ptr = ffm->packet;
}
goto redo;
}
memcpy(buf, ffm->packet_ptr, len);
buf += len;
ffm->packet_ptr += len;
size -= len;
header = 0;
}
return size1 - size;
}
| false | FFmpeg | dc55477a64cefebf8dcc611f026be71382814ae2 | static int ffm_read_data(AVFormatContext *s,
uint8_t *buf, int size, int header)
{
FFMContext *ffm = s->priv_data;
AVIOContext *pb = s->pb;
int len, fill_size, size1, frame_offset, id;
int64_t last_pos = -1;
size1 = size;
while (size > 0) {
redo:
len = ffm->packet_end - ffm->packet_ptr;
if (len < 0)
return -1;
if (len > size)
len = size;
if (len == 0) {
if (avio_tell(pb) == ffm->file_size)
avio_seek(pb, ffm->packet_size, SEEK_SET);
retry_read:
if (pb->buffer_size != ffm->packet_size) {
int64_t tell = avio_tell(pb);
ffio_set_buf_size(pb, ffm->packet_size);
avio_seek(pb, tell, SEEK_SET);
}
id = avio_rb16(pb);
if (id != PACKET_ID) {
if (ffm_resync(s, id) < 0)
return -1;
last_pos = avio_tell(pb);
}
fill_size = avio_rb16(pb);
ffm->dts = avio_rb64(pb);
frame_offset = avio_rb16(pb);
avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);
ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size);
if (ffm->packet_end < ffm->packet || frame_offset < 0)
return -1;
if (ffm->first_packet || (frame_offset & 0x8000)) {
if (!frame_offset) {
if (avio_tell(pb) >= ffm->packet_size * 3LL) {
int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos);
seekback = FFMAX(seekback, 0);
avio_seek(pb, -seekback, SEEK_CUR);
goto retry_read;
}
return 0;
}
ffm->first_packet = 0;
if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE)
return -1;
ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE;
if (!header)
break;
} else {
ffm->packet_ptr = ffm->packet;
}
goto redo;
}
memcpy(buf, ffm->packet_ptr, len);
buf += len;
ffm->packet_ptr += len;
size -= len;
header = 0;
}
return size1 - size;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(AVFormatContext *VAR_0,
uint8_t *VAR_1, int VAR_2, int VAR_3)
{
FFMContext *ffm = VAR_0->priv_data;
AVIOContext *pb = VAR_0->pb;
int VAR_4, VAR_5, VAR_6, VAR_7, VAR_8;
int64_t last_pos = -1;
VAR_6 = VAR_2;
while (VAR_2 > 0) {
redo:
VAR_4 = ffm->packet_end - ffm->packet_ptr;
if (VAR_4 < 0)
return -1;
if (VAR_4 > VAR_2)
VAR_4 = VAR_2;
if (VAR_4 == 0) {
if (avio_tell(pb) == ffm->file_size)
avio_seek(pb, ffm->packet_size, SEEK_SET);
retry_read:
if (pb->buffer_size != ffm->packet_size) {
int64_t tell = avio_tell(pb);
ffio_set_buf_size(pb, ffm->packet_size);
avio_seek(pb, tell, SEEK_SET);
}
VAR_8 = avio_rb16(pb);
if (VAR_8 != PACKET_ID) {
if (ffm_resync(VAR_0, VAR_8) < 0)
return -1;
last_pos = avio_tell(pb);
}
VAR_5 = avio_rb16(pb);
ffm->dts = avio_rb64(pb);
VAR_7 = avio_rb16(pb);
avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);
ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - VAR_5);
if (ffm->packet_end < ffm->packet || VAR_7 < 0)
return -1;
if (ffm->first_packet || (VAR_7 & 0x8000)) {
if (!VAR_7) {
if (avio_tell(pb) >= ffm->packet_size * 3LL) {
int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos);
seekback = FFMAX(seekback, 0);
avio_seek(pb, -seekback, SEEK_CUR);
goto retry_read;
}
return 0;
}
ffm->first_packet = 0;
if ((VAR_7 & 0x7fff) < FFM_HEADER_SIZE)
return -1;
ffm->packet_ptr = ffm->packet + (VAR_7 & 0x7fff) - FFM_HEADER_SIZE;
if (!VAR_3)
break;
} else {
ffm->packet_ptr = ffm->packet;
}
goto redo;
}
memcpy(VAR_1, ffm->packet_ptr, VAR_4);
VAR_1 += VAR_4;
ffm->packet_ptr += VAR_4;
VAR_2 -= VAR_4;
VAR_3 = 0;
}
return VAR_6 - VAR_2;
}
| [
"static int FUNC_0(AVFormatContext *VAR_0,\nuint8_t *VAR_1, int VAR_2, int VAR_3)\n{",
"FFMContext *ffm = VAR_0->priv_data;",
"AVIOContext *pb = VAR_0->pb;",
"int VAR_4, VAR_5, VAR_6, VAR_7, VAR_8;",
"int64_t last_pos = -1;",
"VAR_6 = VAR_2;",
"while (VAR_2 > 0) {",
"redo:\nVAR_4 = ffm->packet_end - ffm->packet_ptr;",
"if (VAR_4 < 0)\nreturn -1;",
"if (VAR_4 > VAR_2)\nVAR_4 = VAR_2;",
"if (VAR_4 == 0) {",
"if (avio_tell(pb) == ffm->file_size)\navio_seek(pb, ffm->packet_size, SEEK_SET);",
"retry_read:\nif (pb->buffer_size != ffm->packet_size) {",
"int64_t tell = avio_tell(pb);",
"ffio_set_buf_size(pb, ffm->packet_size);",
"avio_seek(pb, tell, SEEK_SET);",
"}",
"VAR_8 = avio_rb16(pb);",
"if (VAR_8 != PACKET_ID) {",
"if (ffm_resync(VAR_0, VAR_8) < 0)\nreturn -1;",
"last_pos = avio_tell(pb);",
"}",
"VAR_5 = avio_rb16(pb);",
"ffm->dts = avio_rb64(pb);",
"VAR_7 = avio_rb16(pb);",
"avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);",
"ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - VAR_5);",
"if (ffm->packet_end < ffm->packet || VAR_7 < 0)\nreturn -1;",
"if (ffm->first_packet || (VAR_7 & 0x8000)) {",
"if (!VAR_7) {",
"if (avio_tell(pb) >= ffm->packet_size * 3LL) {",
"int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos);",
"seekback = FFMAX(seekback, 0);",
"avio_seek(pb, -seekback, SEEK_CUR);",
"goto retry_read;",
"}",
"return 0;",
"}",
"ffm->first_packet = 0;",
"if ((VAR_7 & 0x7fff) < FFM_HEADER_SIZE)\nreturn -1;",
"ffm->packet_ptr = ffm->packet + (VAR_7 & 0x7fff) - FFM_HEADER_SIZE;",
"if (!VAR_3)\nbreak;",
"} else {",
"ffm->packet_ptr = ffm->packet;",
"}",
"goto redo;",
"}",
"memcpy(VAR_1, ffm->packet_ptr, VAR_4);",
"VAR_1 += VAR_4;",
"ffm->packet_ptr += VAR_4;",
"VAR_2 -= VAR_4;",
"VAR_3 = 0;",
"}",
"return VAR_6 - VAR_2;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19
],
[
21,
23
],
[
25,
27
],
[
29,
31
],
[
33
],
[
35,
37
],
[
39,
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
55,
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73,
75
],
[
81
],
[
83
],
[
87
],
[
89
],
[
91
],
[
93
],
[
95
],
[
97
],
[
101
],
[
103
],
[
105
],
[
107,
109
],
[
111
],
[
113,
115
],
[
117
],
[
119
],
[
121
],
[
123
],
[
125
],
[
127
],
[
129
],
[
131
],
[
133
],
[
135
],
[
137
],
[
139
],
[
141
]
] |
21,255 | static int udp_close(URLContext *h)
{
UDPContext *s = h->priv_data;
int ret;
if (s->is_multicast && (h->flags & AVIO_FLAG_READ))
udp_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);
closesocket(s->udp_fd);
av_fifo_free(s->fifo);
#if HAVE_PTHREADS
if (s->thread_started) {
pthread_cancel(s->circular_buffer_thread);
ret = pthread_join(s->circular_buffer_thread, NULL);
if (ret != 0)
av_log(h, AV_LOG_ERROR, "pthread_join(): %s\n", strerror(ret));
}
pthread_mutex_destroy(&s->mutex);
pthread_cond_destroy(&s->cond);
#endif
return 0;
}
| false | FFmpeg | 281bde27894f994d0982ab9283f15d6073ae352c | static int udp_close(URLContext *h)
{
UDPContext *s = h->priv_data;
int ret;
if (s->is_multicast && (h->flags & AVIO_FLAG_READ))
udp_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);
closesocket(s->udp_fd);
av_fifo_free(s->fifo);
#if HAVE_PTHREADS
if (s->thread_started) {
pthread_cancel(s->circular_buffer_thread);
ret = pthread_join(s->circular_buffer_thread, NULL);
if (ret != 0)
av_log(h, AV_LOG_ERROR, "pthread_join(): %s\n", strerror(ret));
}
pthread_mutex_destroy(&s->mutex);
pthread_cond_destroy(&s->cond);
#endif
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(URLContext *VAR_0)
{
UDPContext *s = VAR_0->priv_data;
int VAR_1;
if (s->is_multicast && (VAR_0->flags & AVIO_FLAG_READ))
udp_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);
closesocket(s->udp_fd);
av_fifo_free(s->fifo);
#if HAVE_PTHREADS
if (s->thread_started) {
pthread_cancel(s->circular_buffer_thread);
VAR_1 = pthread_join(s->circular_buffer_thread, NULL);
if (VAR_1 != 0)
av_log(VAR_0, AV_LOG_ERROR, "pthread_join(): %s\n", strerror(VAR_1));
}
pthread_mutex_destroy(&s->mutex);
pthread_cond_destroy(&s->cond);
#endif
return 0;
}
| [
"static int FUNC_0(URLContext *VAR_0)\n{",
"UDPContext *s = VAR_0->priv_data;",
"int VAR_1;",
"if (s->is_multicast && (VAR_0->flags & AVIO_FLAG_READ))\nudp_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);",
"closesocket(s->udp_fd);",
"av_fifo_free(s->fifo);",
"#if HAVE_PTHREADS\nif (s->thread_started) {",
"pthread_cancel(s->circular_buffer_thread);",
"VAR_1 = pthread_join(s->circular_buffer_thread, NULL);",
"if (VAR_1 != 0)\nav_log(VAR_0, AV_LOG_ERROR, \"pthread_join(): %s\\n\", strerror(VAR_1));",
"}",
"pthread_mutex_destroy(&s->mutex);",
"pthread_cond_destroy(&s->cond);",
"#endif\nreturn 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11,
13
],
[
15
],
[
17
],
[
19,
21
],
[
23
],
[
25
],
[
27,
29
],
[
31
],
[
35
],
[
37
],
[
39,
41
],
[
43
]
] |
21,257 | static void gen_sse(CPUX86State *env, DisasContext *s, int b,
target_ulong pc_start, int rex_r)
{
int b1, op1_offset, op2_offset, is_xmm, val;
int modrm, mod, rm, reg;
SSEFunc_0_epp sse_fn_epp;
SSEFunc_0_eppi sse_fn_eppi;
SSEFunc_0_ppi sse_fn_ppi;
SSEFunc_0_eppt sse_fn_eppt;
TCGMemOp ot;
b &= 0xff;
if (s->prefix & PREFIX_DATA)
b1 = 1;
else if (s->prefix & PREFIX_REPZ)
b1 = 2;
else if (s->prefix & PREFIX_REPNZ)
b1 = 3;
else
b1 = 0;
sse_fn_epp = sse_op_table1[b][b1];
if (!sse_fn_epp) {
goto unknown_op;
}
if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
is_xmm = 1;
} else {
if (b1 == 0) {
/* MMX case */
is_xmm = 0;
} else {
is_xmm = 1;
}
}
/* simple MMX/SSE operation */
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
return;
}
if (s->flags & HF_EM_MASK) {
illegal_op:
gen_illegal_opcode(s);
return;
}
if (is_xmm
&& !(s->flags & HF_OSFXSR_MASK)
&& ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
goto unknown_op;
}
if (b == 0x0e) {
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
/* If we were fully decoding this we might use illegal_op. */
goto unknown_op;
}
/* femms */
gen_helper_emms(cpu_env);
return;
}
if (b == 0x77) {
/* emms */
gen_helper_emms(cpu_env);
return;
}
/* prepare MMX state (XXX: optimize by storing fptt and fptags in
the static cpu state) */
if (!is_xmm) {
gen_helper_enter_mmx(cpu_env);
}
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7);
if (is_xmm)
reg |= rex_r;
mod = (modrm >> 6) & 3;
if (sse_fn_epp == SSE_SPECIAL) {
b |= (b1 << 8);
switch(b) {
case 0x0e7: /* movntq */
if (mod == 3) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
break;
case 0x1e7: /* movntdq */
case 0x02b: /* movntps */
case 0x12b: /* movntps */
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
break;
case 0x3f0: /* lddqu */
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
break;
case 0x22b: /* movntss */
case 0x32b: /* movntsd */
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if (b1 & 1) {
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(0)));
gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
}
break;
case 0x6e: /* movd mm, ea */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
} else
#endif
{
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
}
break;
case 0x16e: /* movd xmm, ea */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
} else
#endif
{
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
}
break;
case 0x6f: /* movq mm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
} else {
rm = (modrm & 7);
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,fpregs[rm].mmx));
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
}
break;
case 0x010: /* movups */
case 0x110: /* movupd */
case 0x028: /* movaps */
case 0x128: /* movapd */
case 0x16f: /* movdqa xmm, ea */
case 0x26f: /* movdqu xmm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
offsetof(CPUX86State,xmm_regs[rm]));
}
break;
case 0x210: /* movss xmm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
}
break;
case 0x310: /* movsd xmm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
break;
case 0x012: /* movlps */
case 0x112: /* movlpd */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
/* movhlps */
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
}
break;
case 0x212: /* movsldup */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
}
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
break;
case 0x312: /* movddup */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
break;
case 0x016: /* movhps */
case 0x116: /* movhpd */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(1)));
} else {
/* movlhps */
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
break;
case 0x216: /* movshdup */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
}
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
break;
case 0x178:
case 0x378:
{
int bit_index, field_length;
if (b1 == 1 && reg != 0)
goto illegal_op;
field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
if (b1 == 1)
gen_helper_extrq_i(cpu_env, cpu_ptr0,
tcg_const_i32(bit_index),
tcg_const_i32(field_length));
else
gen_helper_insertq_i(cpu_env, cpu_ptr0,
tcg_const_i32(bit_index),
tcg_const_i32(field_length));
}
break;
case 0x7e: /* movd ea, mm */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
}
break;
case 0x17e: /* movd ea, xmm */
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
}
break;
case 0x27e: /* movq xmm, ea */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
break;
case 0x7f: /* movq ea, mm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
} else {
rm = (modrm & 7);
gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
offsetof(CPUX86State,fpregs[reg].mmx));
}
break;
case 0x011: /* movups */
case 0x111: /* movupd */
case 0x029: /* movaps */
case 0x129: /* movapd */
case 0x17f: /* movdqa ea, xmm */
case 0x27f: /* movdqu ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
offsetof(CPUX86State,xmm_regs[reg]));
}
break;
case 0x211: /* movss ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
}
break;
case 0x311: /* movsd ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
}
break;
case 0x013: /* movlps */
case 0x113: /* movlpd */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
goto illegal_op;
}
break;
case 0x017: /* movhps */
case 0x117: /* movhpd */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(1)));
} else {
goto illegal_op;
}
break;
case 0x71: /* shift mm, im */
case 0x72:
case 0x73:
case 0x171: /* shift xmm, im */
case 0x172:
case 0x173:
if (b1 >= 2) {
goto unknown_op;
}
val = cpu_ldub_code(env, s->pc++);
if (is_xmm) {
tcg_gen_movi_tl(cpu_T0, val);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
op1_offset = offsetof(CPUX86State,xmm_t0);
} else {
tcg_gen_movi_tl(cpu_T0, val);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
op1_offset = offsetof(CPUX86State,mmx_t0);
}
sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
(((modrm >> 3)) & 7)][b1];
if (!sse_fn_epp) {
goto unknown_op;
}
if (is_xmm) {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
} else {
rm = (modrm & 7);
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x050: /* movmskps */
rm = (modrm & 7) | REX_B(s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
break;
case 0x150: /* movmskpd */
rm = (modrm & 7) | REX_B(s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
break;
case 0x02a: /* cvtpi2ps */
case 0x12a: /* cvtpi2pd */
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(s, op2_offset);
} else {
rm = (modrm & 7);
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
}
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
switch(b >> 8) {
case 0x0:
gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
break;
default:
case 0x1:
gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
break;
case 0x22a: /* cvtsi2ss */
case 0x32a: /* cvtsi2sd */
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
if (ot == MO_32) {
SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
#else
goto illegal_op;
#endif
}
break;
case 0x02c: /* cvttps2pi */
case 0x12c: /* cvttpd2pi */
case 0x02d: /* cvtps2pi */
case 0x12d: /* cvtpd2pi */
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,xmm_t0);
gen_ldo_env_A0(s, op2_offset);
} else {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
}
op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
switch(b) {
case 0x02c:
gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x12c:
gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x02d:
gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x12d:
gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
break;
case 0x22c: /* cvttss2si */
case 0x32c: /* cvttsd2si */
case 0x22d: /* cvtss2si */
case 0x32d: /* cvtsd2si */
ot = mo_64_32(s->dflag);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
if ((b >> 8) & 1) {
gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
} else {
gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
}
op2_offset = offsetof(CPUX86State,xmm_t0);
} else {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
if (ot == MO_32) {
SSEFunc_i_ep sse_fn_i_ep =
sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_l_ep sse_fn_l_ep =
sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
#else
goto illegal_op;
#endif
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0xc4: /* pinsrw */
case 0x1c4:
s->rip_offset = 1;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
} else {
val &= 3;
tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
}
break;
case 0xc5: /* pextrw */
case 0x1c5:
if (mod != 3)
goto illegal_op;
ot = mo_64_32(s->dflag);
val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
rm = (modrm & 7) | REX_B(s);
tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
} else {
val &= 3;
rm = (modrm & 7);
tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
}
reg = ((modrm >> 3) & 7) | rex_r;
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x1d6: /* movq ea, xmm */
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
}
break;
case 0x2d6: /* movq2dq */
gen_helper_enter_mmx(cpu_env);
rm = (modrm & 7);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,fpregs[rm].mmx));
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
break;
case 0x3d6: /* movdq2q */
gen_helper_enter_mmx(cpu_env);
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
break;
case 0xd7: /* pmovmskb */
case 0x1d7:
if (mod != 3)
goto illegal_op;
if (b1) {
rm = (modrm & 7) | REX_B(s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
} else {
rm = (modrm & 7);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
}
reg = ((modrm >> 3) & 7) | rex_r;
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
break;
case 0x138:
case 0x038:
b = modrm;
if ((b & 0xf0) == 0xf0) {
goto do_0f_38_fx;
}
modrm = cpu_ldub_code(env, s->pc++);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (b1 >= 2) {
goto unknown_op;
}
sse_fn_epp = sse_op_table6[b].op[b1];
if (!sse_fn_epp) {
goto unknown_op;
}
if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
goto illegal_op;
if (b1) {
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
if (mod == 3) {
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
} else {
op2_offset = offsetof(CPUX86State,xmm_t0);
gen_lea_modrm(env, s, modrm);
switch (b) {
case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
gen_ldq_env_A0(s, op2_offset +
offsetof(ZMMReg, ZMM_Q(0)));
break;
case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
offsetof(ZMMReg, ZMM_L(0)));
break;
case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
s->mem_index, MO_LEUW);
tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
offsetof(ZMMReg, ZMM_W(0)));
break;
case 0x2a: /* movntqda */
gen_ldo_env_A0(s, op1_offset);
return;
default:
gen_ldo_env_A0(s, op2_offset);
}
}
} else {
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod == 3) {
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
} else {
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, op2_offset);
}
}
if (sse_fn_epp == SSE_SPECIAL) {
goto unknown_op;
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
if (b == 0x17) {
set_cc_op(s, CC_OP_EFLAGS);
}
break;
case 0x238:
case 0x338:
do_0f_38_fx:
/* Various integer extensions at 0f 38 f[0-f]. */
b = modrm | (b1 << 8);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
switch (b) {
case 0x3f0: /* crc32 Gd,Eb */
case 0x3f1: /* crc32 Gd,Ey */
do_crc32:
if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
goto illegal_op;
}
if ((b & 0xff) == 0xf0) {
ot = MO_8;
} else if (s->dflag != MO_64) {
ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
} else {
ot = MO_64;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
cpu_T0, tcg_const_i32(8 << ot));
ot = mo_64_32(s->dflag);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x1f0: /* crc32 or movbe */
case 0x1f1:
/* For these insns, the f3 prefix is supposed to have priority
over the 66 prefix, but that's not what we implement above
setting b1. */
if (s->prefix & PREFIX_REPNZ) {
goto do_crc32;
}
/* FALLTHRU */
case 0x0f0: /* movbe Gy,My */
case 0x0f1: /* movbe My,Gy */
if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
goto illegal_op;
}
if (s->dflag != MO_64) {
ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
} else {
ot = MO_64;
}
gen_lea_modrm(env, s, modrm);
if ((b & 1) == 0) {
tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
s->mem_index, ot | MO_BE);
gen_op_mov_reg_v(ot, reg, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
s->mem_index, ot | MO_BE);
}
break;
case 0x0f2: /* andn Gy, By, Ey */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0x0f7: /* bextr Gy, Ey, By */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
{
TCGv bound, zero;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
/* Extract START, and shift the operand.
Shifts larger than operand size get zeros. */
tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
zero = tcg_const_tl(0);
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
cpu_T0, zero);
tcg_temp_free(zero);
/* Extract the LEN into a mask. Lengths larger than
operand size get all ones. */
tcg_gen_extract_tl(cpu_A0, cpu_regs[s->vex_v], 8, 8);
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
cpu_A0, bound);
tcg_temp_free(bound);
tcg_gen_movi_tl(cpu_T1, 1);
tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
}
break;
case 0x0f5: /* bzhi Gy, Ey, By */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
{
TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
/* Note that since we're using BMILG (in order to get O
cleared) we need to store the inverse into C. */
tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
cpu_T1, bound);
tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
bound, bound, cpu_T1);
tcg_temp_free(bound);
}
tcg_gen_movi_tl(cpu_A0, -1);
tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 0x3f6: /* mulx By, Gy, rdx, Ey */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
switch (ot) {
default:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
cpu_T0, cpu_regs[R_EDX]);
tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
break;
#endif
}
break;
case 0x3f5: /* pdep Gy, By, Ey */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
/* Note that by zero-extending the mask operand, we
automatically handle zero-extending the result. */
if (ot == MO_64) {
tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
} else {
tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
}
gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
break;
case 0x2f5: /* pext Gy, By, Ey */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
/* Note that by zero-extending the mask operand, we
automatically handle zero-extending the result. */
if (ot == MO_64) {
tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
} else {
tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
}
gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
break;
case 0x1f6: /* adcx Gy, Ey */
case 0x2f6: /* adox Gy, Ey */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
goto illegal_op;
} else {
TCGv carry_in, carry_out, zero;
int end_op;
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
/* Re-use the carry-out from a previous round. */
TCGV_UNUSED(carry_in);
carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
switch (s->cc_op) {
case CC_OP_ADCX:
if (b == 0x1f6) {
carry_in = cpu_cc_dst;
end_op = CC_OP_ADCX;
} else {
end_op = CC_OP_ADCOX;
}
break;
case CC_OP_ADOX:
if (b == 0x1f6) {
end_op = CC_OP_ADCOX;
} else {
carry_in = cpu_cc_src2;
end_op = CC_OP_ADOX;
}
break;
case CC_OP_ADCOX:
end_op = CC_OP_ADCOX;
carry_in = carry_out;
break;
default:
end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
break;
}
/* If we can't reuse carry-out, get it out of EFLAGS. */
if (TCGV_IS_UNUSED(carry_in)) {
if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
gen_compute_eflags(s);
}
carry_in = cpu_tmp0;
tcg_gen_extract_tl(carry_in, cpu_cc_src,
ctz32(b == 0x1f6 ? CC_C : CC_O), 1);
}
switch (ot) {
#ifdef TARGET_X86_64
case MO_32:
/* If we know TL is 64-bit, and we want a 32-bit
result, just do everything in 64-bit arithmetic. */
tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
tcg_gen_shri_i64(carry_out, cpu_T0, 32);
break;
#endif
default:
/* Otherwise compute the carry-out in two steps. */
zero = tcg_const_tl(0);
tcg_gen_add2_tl(cpu_T0, carry_out,
cpu_T0, zero,
carry_in, zero);
tcg_gen_add2_tl(cpu_regs[reg], carry_out,
cpu_regs[reg], carry_out,
cpu_T0, zero);
tcg_temp_free(zero);
break;
}
set_cc_op(s, end_op);
}
break;
case 0x1f7: /* shlx Gy, Ey, By */
case 0x2f7: /* sarx Gy, Ey, By */
case 0x3f7: /* shrx Gy, Ey, By */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (ot == MO_64) {
tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
} else {
tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
}
if (b == 0x1f7) {
tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
} else if (b == 0x2f7) {
if (ot != MO_64) {
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
if (ot != MO_64) {
tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
}
tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x0f3:
case 0x1f3:
case 0x2f3:
case 0x3f3: /* Group 17 */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
switch (reg & 7) {
case 1: /* blsr By,Ey */
tcg_gen_neg_tl(cpu_T1, cpu_T0);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
gen_op_update2_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 2: /* blsmsk By,Ey */
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 3: /* blsi By, Ey */
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_BMILGB + ot);
break;
default:
goto unknown_op;
}
break;
default:
goto unknown_op;
}
break;
case 0x03a:
case 0x13a:
b = modrm;
modrm = cpu_ldub_code(env, s->pc++);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (b1 >= 2) {
goto unknown_op;
}
sse_fn_eppi = sse_op_table7[b].op[b1];
if (!sse_fn_eppi) {
goto unknown_op;
}
if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
goto illegal_op;
s->rip_offset = 1;
if (sse_fn_eppi == SSE_SPECIAL) {
ot = mo_64_32(s->dflag);
rm = (modrm & 7) | REX_B(s);
if (mod != 3)
gen_lea_modrm(env, s, modrm);
reg = ((modrm >> 3) & 7) | rex_r;
val = cpu_ldub_code(env, s->pc++);
switch (b) {
case 0x14: /* pextrb */
tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_B(val & 15)));
if (mod == 3) {
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_UB);
}
break;
case 0x15: /* pextrw */
tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_W(val & 7)));
if (mod == 3) {
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_LEUW);
}
break;
case 0x16:
if (ot == MO_32) { /* pextrd */
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(val & 3)));
if (mod == 3) {
tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
} else {
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
}
} else { /* pextrq */
#ifdef TARGET_X86_64
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(val & 1)));
if (mod == 3) {
tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
} else {
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
}
#else
goto illegal_op;
#endif
}
break;
case 0x17: /* extractps */
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(val & 3)));
if (mod == 3) {
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_LEUL);
}
break;
case 0x20: /* pinsrb */
if (mod == 3) {
gen_op_mov_v_reg(MO_32, cpu_T0, rm);
} else {
tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
s->mem_index, MO_UB);
}
tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_B(val & 15)));
break;
case 0x21: /* insertps */
if (mod == 3) {
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]
.ZMM_L((val >> 6) & 3)));
} else {
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
}
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]
.ZMM_L((val >> 4) & 3)));
if ((val >> 0) & 1)
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(0)));
if ((val >> 1) & 1)
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(1)));
if ((val >> 2) & 1)
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(2)));
if ((val >> 3) & 1)
tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(3)));
break;
case 0x22:
if (ot == MO_32) { /* pinsrd */
if (mod == 3) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
} else {
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
}
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(val & 3)));
} else { /* pinsrq */
#ifdef TARGET_X86_64
if (mod == 3) {
gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
} else {
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
}
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(val & 1)));
#else
goto illegal_op;
#endif
}
break;
}
return;
}
if (b1) {
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
if (mod == 3) {
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
} else {
op2_offset = offsetof(CPUX86State,xmm_t0);
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, op2_offset);
}
} else {
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod == 3) {
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
} else {
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, op2_offset);
}
}
val = cpu_ldub_code(env, s->pc++);
if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
set_cc_op(s, CC_OP_EFLAGS);
if (s->dflag == MO_64) {
/* The helper must use entire 64-bit gp registers */
val |= 1 << 8;
}
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
break;
case 0x33a:
/* Various integer extensions at 0f 3a f[0-f]. */
b = modrm | (b1 << 8);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
switch (b) {
case 0x3f0: /* rorx Gy,Ey, Ib */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
b = cpu_ldub_code(env, s->pc++);
if (ot == MO_64) {
tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
default:
goto unknown_op;
}
break;
default:
unknown_op:
gen_unknown_opcode(env, s);
return;
}
} else {
/* generic MMX or SSE operation */
switch(b) {
case 0x70: /* pshufx insn */
case 0xc6: /* pshufx insn */
case 0xc2: /* compare insns */
s->rip_offset = 1;
break;
default:
break;
}
if (is_xmm) {
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
if (mod != 3) {
int sz = 4;
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,xmm_t0);
switch (b) {
case 0x50 ... 0x5a:
case 0x5c ... 0x5f:
case 0xc2:
/* Most sse scalar operations. */
if (b1 == 2) {
sz = 2;
} else if (b1 == 3) {
sz = 3;
}
break;
case 0x2e: /* ucomis[sd] */
case 0x2f: /* comis[sd] */
if (b1 == 0) {
sz = 2;
} else {
sz = 3;
}
break;
}
switch (sz) {
case 2:
/* 32 bit access */
gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
break;
case 3:
/* 64 bit access */
gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
break;
default:
/* 128 bit access */
gen_ldo_env_A0(s, op2_offset);
break;
}
} else {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
}
} else {
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(s, op2_offset);
} else {
rm = (modrm & 7);
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
}
}
switch(b) {
case 0x0f: /* 3DNow! data insns */
val = cpu_ldub_code(env, s->pc++);
sse_fn_epp = sse_op_table5[val];
if (!sse_fn_epp) {
goto unknown_op;
}
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
goto illegal_op;
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x70: /* pshufx insn */
case 0xc6: /* pshufx insn */
val = cpu_ldub_code(env, s->pc++);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
/* XXX: introduce a new table? */
sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
break;
case 0xc2:
/* compare insns */
val = cpu_ldub_code(env, s->pc++);
if (val >= 8)
goto unknown_op;
sse_fn_epp = sse_op_table4[val][b1];
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0xf7:
/* maskmov : we must prepare A0 */
if (mod != 3)
goto illegal_op;
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
/* XXX: introduce a new table? */
sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
break;
default:
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
if (b == 0x2e || b == 0x2f) {
set_cc_op(s, CC_OP_EFLAGS);
}
}
}
| false | qemu | e3af7c788b73a6495eb9d94992ef11f6ad6f3c56 | static void gen_sse(CPUX86State *env, DisasContext *s, int b,
target_ulong pc_start, int rex_r)
{
int b1, op1_offset, op2_offset, is_xmm, val;
int modrm, mod, rm, reg;
SSEFunc_0_epp sse_fn_epp;
SSEFunc_0_eppi sse_fn_eppi;
SSEFunc_0_ppi sse_fn_ppi;
SSEFunc_0_eppt sse_fn_eppt;
TCGMemOp ot;
b &= 0xff;
if (s->prefix & PREFIX_DATA)
b1 = 1;
else if (s->prefix & PREFIX_REPZ)
b1 = 2;
else if (s->prefix & PREFIX_REPNZ)
b1 = 3;
else
b1 = 0;
sse_fn_epp = sse_op_table1[b][b1];
if (!sse_fn_epp) {
goto unknown_op;
}
if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
is_xmm = 1;
} else {
if (b1 == 0) {
is_xmm = 0;
} else {
is_xmm = 1;
}
}
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
return;
}
if (s->flags & HF_EM_MASK) {
illegal_op:
gen_illegal_opcode(s);
return;
}
if (is_xmm
&& !(s->flags & HF_OSFXSR_MASK)
&& ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
goto unknown_op;
}
if (b == 0x0e) {
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
goto unknown_op;
}
gen_helper_emms(cpu_env);
return;
}
if (b == 0x77) {
gen_helper_emms(cpu_env);
return;
}
if (!is_xmm) {
gen_helper_enter_mmx(cpu_env);
}
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7);
if (is_xmm)
reg |= rex_r;
mod = (modrm >> 6) & 3;
if (sse_fn_epp == SSE_SPECIAL) {
b |= (b1 << 8);
switch(b) {
case 0x0e7:
if (mod == 3) {
goto illegal_op;
}
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
break;
case 0x1e7:
case 0x02b:
case 0x12b:
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
break;
case 0x3f0:
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
break;
case 0x22b:
case 0x32b:
if (mod == 3)
goto illegal_op;
gen_lea_modrm(env, s, modrm);
if (b1 & 1) {
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(0)));
gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
}
break;
case 0x6e:
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
} else
#endif
{
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
}
break;
case 0x16e:
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
} else
#endif
{
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
}
break;
case 0x6f:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
} else {
rm = (modrm & 7);
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,fpregs[rm].mmx));
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
}
break;
case 0x010:
case 0x110:
case 0x028:
case 0x128:
case 0x16f:
case 0x26f:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
offsetof(CPUX86State,xmm_regs[rm]));
}
break;
case 0x210:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
}
break;
case 0x310:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
break;
case 0x012:
case 0x112:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
}
break;
case 0x212:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
}
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
break;
case 0x312:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
break;
case 0x016:
case 0x116:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(1)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
break;
case 0x216:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
}
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
break;
case 0x178:
case 0x378:
{
int bit_index, field_length;
if (b1 == 1 && reg != 0)
goto illegal_op;
field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
if (b1 == 1)
gen_helper_extrq_i(cpu_env, cpu_ptr0,
tcg_const_i32(bit_index),
tcg_const_i32(field_length));
else
gen_helper_insertq_i(cpu_env, cpu_ptr0,
tcg_const_i32(bit_index),
tcg_const_i32(field_length));
}
break;
case 0x7e:
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
}
break;
case 0x17e:
#ifdef TARGET_X86_64
if (s->dflag == MO_64) {
tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
}
break;
case 0x27e:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
}
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
break;
case 0x7f:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
} else {
rm = (modrm & 7);
gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
offsetof(CPUX86State,fpregs[reg].mmx));
}
break;
case 0x011:
case 0x111:
case 0x029:
case 0x129:
case 0x17f:
case 0x27f:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
offsetof(CPUX86State,xmm_regs[reg]));
}
break;
case 0x211:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
}
break;
case 0x311:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
}
break;
case 0x013:
case 0x113:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
goto illegal_op;
}
break;
case 0x017:
case 0x117:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(1)));
} else {
goto illegal_op;
}
break;
case 0x71:
case 0x72:
case 0x73:
case 0x171:
case 0x172:
case 0x173:
if (b1 >= 2) {
goto unknown_op;
}
val = cpu_ldub_code(env, s->pc++);
if (is_xmm) {
tcg_gen_movi_tl(cpu_T0, val);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
op1_offset = offsetof(CPUX86State,xmm_t0);
} else {
tcg_gen_movi_tl(cpu_T0, val);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
op1_offset = offsetof(CPUX86State,mmx_t0);
}
sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
(((modrm >> 3)) & 7)][b1];
if (!sse_fn_epp) {
goto unknown_op;
}
if (is_xmm) {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
} else {
rm = (modrm & 7);
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x050:
rm = (modrm & 7) | REX_B(s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
break;
case 0x150:
rm = (modrm & 7) | REX_B(s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
break;
case 0x02a:
case 0x12a:
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(s, op2_offset);
} else {
rm = (modrm & 7);
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
}
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
switch(b >> 8) {
case 0x0:
gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
break;
default:
case 0x1:
gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
break;
case 0x22a:
case 0x32a:
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
if (ot == MO_32) {
SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
#else
goto illegal_op;
#endif
}
break;
case 0x02c:
case 0x12c:
case 0x02d:
case 0x12d:
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,xmm_t0);
gen_ldo_env_A0(s, op2_offset);
} else {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
}
op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
switch(b) {
case 0x02c:
gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x12c:
gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x02d:
gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x12d:
gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
break;
case 0x22c:
case 0x32c:
case 0x22d:
case 0x32d:
ot = mo_64_32(s->dflag);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
if ((b >> 8) & 1) {
gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
} else {
gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
}
op2_offset = offsetof(CPUX86State,xmm_t0);
} else {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
if (ot == MO_32) {
SSEFunc_i_ep sse_fn_i_ep =
sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_l_ep sse_fn_l_ep =
sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
#else
goto illegal_op;
#endif
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0xc4:
case 0x1c4:
s->rip_offset = 1;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
} else {
val &= 3;
tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
}
break;
case 0xc5:
case 0x1c5:
if (mod != 3)
goto illegal_op;
ot = mo_64_32(s->dflag);
val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
rm = (modrm & 7) | REX_B(s);
tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
} else {
val &= 3;
rm = (modrm & 7);
tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
}
reg = ((modrm >> 3) & 7) | rex_r;
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x1d6:
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
gen_stq_env_A0(s, offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
}
break;
case 0x2d6:
gen_helper_enter_mmx(cpu_env);
rm = (modrm & 7);
gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
offsetof(CPUX86State,fpregs[rm].mmx));
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
break;
case 0x3d6:
gen_helper_enter_mmx(cpu_env);
rm = (modrm & 7) | REX_B(s);
gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
break;
case 0xd7:
case 0x1d7:
if (mod != 3)
goto illegal_op;
if (b1) {
rm = (modrm & 7) | REX_B(s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
} else {
rm = (modrm & 7);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
}
reg = ((modrm >> 3) & 7) | rex_r;
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
break;
case 0x138:
case 0x038:
b = modrm;
if ((b & 0xf0) == 0xf0) {
goto do_0f_38_fx;
}
modrm = cpu_ldub_code(env, s->pc++);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (b1 >= 2) {
goto unknown_op;
}
sse_fn_epp = sse_op_table6[b].op[b1];
if (!sse_fn_epp) {
goto unknown_op;
}
if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
goto illegal_op;
if (b1) {
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
if (mod == 3) {
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
} else {
op2_offset = offsetof(CPUX86State,xmm_t0);
gen_lea_modrm(env, s, modrm);
switch (b) {
case 0x20: case 0x30:
case 0x23: case 0x33:
case 0x25: case 0x35:
gen_ldq_env_A0(s, op2_offset +
offsetof(ZMMReg, ZMM_Q(0)));
break;
case 0x21: case 0x31:
case 0x24: case 0x34:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
offsetof(ZMMReg, ZMM_L(0)));
break;
case 0x22: case 0x32:
tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
s->mem_index, MO_LEUW);
tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
offsetof(ZMMReg, ZMM_W(0)));
break;
case 0x2a:
gen_ldo_env_A0(s, op1_offset);
return;
default:
gen_ldo_env_A0(s, op2_offset);
}
}
} else {
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod == 3) {
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
} else {
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, op2_offset);
}
}
if (sse_fn_epp == SSE_SPECIAL) {
goto unknown_op;
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
if (b == 0x17) {
set_cc_op(s, CC_OP_EFLAGS);
}
break;
case 0x238:
case 0x338:
do_0f_38_fx:
b = modrm | (b1 << 8);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
switch (b) {
case 0x3f0:
case 0x3f1:
do_crc32:
if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
goto illegal_op;
}
if ((b & 0xff) == 0xf0) {
ot = MO_8;
} else if (s->dflag != MO_64) {
ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
} else {
ot = MO_64;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
cpu_T0, tcg_const_i32(8 << ot));
ot = mo_64_32(s->dflag);
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x1f0:
case 0x1f1:
if (s->prefix & PREFIX_REPNZ) {
goto do_crc32;
}
case 0x0f0:
case 0x0f1:
if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
goto illegal_op;
}
if (s->dflag != MO_64) {
ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
} else {
ot = MO_64;
}
gen_lea_modrm(env, s, modrm);
if ((b & 1) == 0) {
tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
s->mem_index, ot | MO_BE);
gen_op_mov_reg_v(ot, reg, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
s->mem_index, ot | MO_BE);
}
break;
case 0x0f2:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0x0f7:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
{
TCGv bound, zero;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
zero = tcg_const_tl(0);
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
cpu_T0, zero);
tcg_temp_free(zero);
tcg_gen_extract_tl(cpu_A0, cpu_regs[s->vex_v], 8, 8);
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
cpu_A0, bound);
tcg_temp_free(bound);
tcg_gen_movi_tl(cpu_T1, 1);
tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
}
break;
case 0x0f5:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
{
TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
cpu_T1, bound);
tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
bound, bound, cpu_T1);
tcg_temp_free(bound);
}
tcg_gen_movi_tl(cpu_A0, -1);
tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
gen_op_mov_reg_v(ot, reg, cpu_T0);
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 0x3f6:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
switch (ot) {
default:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
cpu_T0, cpu_regs[R_EDX]);
tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
break;
#endif
}
break;
case 0x3f5:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (ot == MO_64) {
tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
} else {
tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
}
gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
break;
case 0x2f5:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (ot == MO_64) {
tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
} else {
tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
}
gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
break;
case 0x1f6:
case 0x2f6:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
goto illegal_op;
} else {
TCGv carry_in, carry_out, zero;
int end_op;
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
TCGV_UNUSED(carry_in);
carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
switch (s->cc_op) {
case CC_OP_ADCX:
if (b == 0x1f6) {
carry_in = cpu_cc_dst;
end_op = CC_OP_ADCX;
} else {
end_op = CC_OP_ADCOX;
}
break;
case CC_OP_ADOX:
if (b == 0x1f6) {
end_op = CC_OP_ADCOX;
} else {
carry_in = cpu_cc_src2;
end_op = CC_OP_ADOX;
}
break;
case CC_OP_ADCOX:
end_op = CC_OP_ADCOX;
carry_in = carry_out;
break;
default:
end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
break;
}
if (TCGV_IS_UNUSED(carry_in)) {
if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
gen_compute_eflags(s);
}
carry_in = cpu_tmp0;
tcg_gen_extract_tl(carry_in, cpu_cc_src,
ctz32(b == 0x1f6 ? CC_C : CC_O), 1);
}
switch (ot) {
#ifdef TARGET_X86_64
case MO_32:
tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
tcg_gen_shri_i64(carry_out, cpu_T0, 32);
break;
#endif
default:
zero = tcg_const_tl(0);
tcg_gen_add2_tl(cpu_T0, carry_out,
cpu_T0, zero,
carry_in, zero);
tcg_gen_add2_tl(cpu_regs[reg], carry_out,
cpu_regs[reg], carry_out,
cpu_T0, zero);
tcg_temp_free(zero);
break;
}
set_cc_op(s, end_op);
}
break;
case 0x1f7:
case 0x2f7:
case 0x3f7:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (ot == MO_64) {
tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
} else {
tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
}
if (b == 0x1f7) {
tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
} else if (b == 0x2f7) {
if (ot != MO_64) {
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
if (ot != MO_64) {
tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
}
tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
case 0x0f3:
case 0x1f3:
case 0x2f3:
case 0x3f3:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
switch (reg & 7) {
case 1:
tcg_gen_neg_tl(cpu_T1, cpu_T0);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
gen_op_update2_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 2:
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_BMILGB + ot);
break;
case 3:
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(s, CC_OP_BMILGB + ot);
break;
default:
goto unknown_op;
}
break;
default:
goto unknown_op;
}
break;
case 0x03a:
case 0x13a:
b = modrm;
modrm = cpu_ldub_code(env, s->pc++);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (b1 >= 2) {
goto unknown_op;
}
sse_fn_eppi = sse_op_table7[b].op[b1];
if (!sse_fn_eppi) {
goto unknown_op;
}
if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
goto illegal_op;
s->rip_offset = 1;
if (sse_fn_eppi == SSE_SPECIAL) {
ot = mo_64_32(s->dflag);
rm = (modrm & 7) | REX_B(s);
if (mod != 3)
gen_lea_modrm(env, s, modrm);
reg = ((modrm >> 3) & 7) | rex_r;
val = cpu_ldub_code(env, s->pc++);
switch (b) {
case 0x14:
tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_B(val & 15)));
if (mod == 3) {
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_UB);
}
break;
case 0x15:
tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_W(val & 7)));
if (mod == 3) {
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_LEUW);
}
break;
case 0x16:
if (ot == MO_32) {
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(val & 3)));
if (mod == 3) {
tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
} else {
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
}
} else {
#ifdef TARGET_X86_64
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(val & 1)));
if (mod == 3) {
tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
} else {
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
}
#else
goto illegal_op;
#endif
}
break;
case 0x17:
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(val & 3)));
if (mod == 3) {
gen_op_mov_reg_v(ot, rm, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
s->mem_index, MO_LEUL);
}
break;
case 0x20:
if (mod == 3) {
gen_op_mov_v_reg(MO_32, cpu_T0, rm);
} else {
tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
s->mem_index, MO_UB);
}
tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_B(val & 15)));
break;
case 0x21:
if (mod == 3) {
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,xmm_regs[rm]
.ZMM_L((val >> 6) & 3)));
} else {
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
}
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]
.ZMM_L((val >> 4) & 3)));
if ((val >> 0) & 1)
tcg_gen_st_i32(tcg_const_i32(0 ),
cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(0)));
if ((val >> 1) & 1)
tcg_gen_st_i32(tcg_const_i32(0 ),
cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(1)));
if ((val >> 2) & 1)
tcg_gen_st_i32(tcg_const_i32(0 ),
cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(2)));
if ((val >> 3) & 1)
tcg_gen_st_i32(tcg_const_i32(0 ),
cpu_env, offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(3)));
break;
case 0x22:
if (ot == MO_32) {
if (mod == 3) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
} else {
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
s->mem_index, MO_LEUL);
}
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,
xmm_regs[reg].ZMM_L(val & 3)));
} else {
#ifdef TARGET_X86_64
if (mod == 3) {
gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
} else {
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
s->mem_index, MO_LEQ);
}
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,
xmm_regs[reg].ZMM_Q(val & 1)));
#else
goto illegal_op;
#endif
}
break;
}
return;
}
if (b1) {
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
if (mod == 3) {
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
} else {
op2_offset = offsetof(CPUX86State,xmm_t0);
gen_lea_modrm(env, s, modrm);
gen_ldo_env_A0(s, op2_offset);
}
} else {
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod == 3) {
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
} else {
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_lea_modrm(env, s, modrm);
gen_ldq_env_A0(s, op2_offset);
}
}
val = cpu_ldub_code(env, s->pc++);
if ((b & 0xfc) == 0x60) {
set_cc_op(s, CC_OP_EFLAGS);
if (s->dflag == MO_64) {
val |= 1 << 8;
}
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
break;
case 0x33a:
b = modrm | (b1 << 8);
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
switch (b) {
case 0x3f0:
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(s->prefix & PREFIX_VEX)
|| s->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(s->dflag);
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
b = cpu_ldub_code(env, s->pc++);
if (ot == MO_64) {
tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
}
gen_op_mov_reg_v(ot, reg, cpu_T0);
break;
default:
goto unknown_op;
}
break;
default:
unknown_op:
gen_unknown_opcode(env, s);
return;
}
} else {
switch(b) {
case 0x70:
case 0xc6:
case 0xc2:
s->rip_offset = 1;
break;
default:
break;
}
if (is_xmm) {
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
if (mod != 3) {
int sz = 4;
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,xmm_t0);
switch (b) {
case 0x50 ... 0x5a:
case 0x5c ... 0x5f:
case 0xc2:
if (b1 == 2) {
sz = 2;
} else if (b1 == 3) {
sz = 3;
}
break;
case 0x2e:
case 0x2f:
if (b1 == 0) {
sz = 2;
} else {
sz = 3;
}
break;
}
switch (sz) {
case 2:
gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
break;
case 3:
gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
break;
default:
gen_ldo_env_A0(s, op2_offset);
break;
}
} else {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
}
} else {
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod != 3) {
gen_lea_modrm(env, s, modrm);
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(s, op2_offset);
} else {
rm = (modrm & 7);
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
}
}
switch(b) {
case 0x0f:
val = cpu_ldub_code(env, s->pc++);
sse_fn_epp = sse_op_table5[val];
if (!sse_fn_epp) {
goto unknown_op;
}
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
goto illegal_op;
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x70:
case 0xc6:
val = cpu_ldub_code(env, s->pc++);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
break;
case 0xc2:
val = cpu_ldub_code(env, s->pc++);
if (val >= 8)
goto unknown_op;
sse_fn_epp = sse_op_table4[val][b1];
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0xf7:
if (mod != 3)
goto illegal_op;
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
gen_extu(s->aflag, cpu_A0);
gen_add_A0_ds_seg(s);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
break;
default:
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
if (b == 0x2e || b == 0x2f) {
set_cc_op(s, CC_OP_EFLAGS);
}
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(CPUX86State *VAR_0, DisasContext *VAR_1, int VAR_2,
target_ulong VAR_3, int VAR_4)
{
int VAR_5, VAR_6, VAR_7, VAR_8, VAR_9;
int VAR_10, VAR_11, VAR_12, VAR_13;
SSEFunc_0_epp sse_fn_epp;
SSEFunc_0_eppi sse_fn_eppi;
SSEFunc_0_ppi sse_fn_ppi;
SSEFunc_0_eppt sse_fn_eppt;
TCGMemOp ot;
VAR_2 &= 0xff;
if (VAR_1->prefix & PREFIX_DATA)
VAR_5 = 1;
else if (VAR_1->prefix & PREFIX_REPZ)
VAR_5 = 2;
else if (VAR_1->prefix & PREFIX_REPNZ)
VAR_5 = 3;
else
VAR_5 = 0;
sse_fn_epp = sse_op_table1[VAR_2][VAR_5];
if (!sse_fn_epp) {
goto unknown_op;
}
if ((VAR_2 <= 0x5f && VAR_2 >= 0x10) || VAR_2 == 0xc6 || VAR_2 == 0xc2) {
VAR_8 = 1;
} else {
if (VAR_5 == 0) {
VAR_8 = 0;
} else {
VAR_8 = 1;
}
}
if (VAR_1->flags & HF_TS_MASK) {
gen_exception(VAR_1, EXCP07_PREX, VAR_3 - VAR_1->cs_base);
return;
}
if (VAR_1->flags & HF_EM_MASK) {
illegal_op:
gen_illegal_opcode(VAR_1);
return;
}
if (VAR_8
&& !(VAR_1->flags & HF_OSFXSR_MASK)
&& ((VAR_2 != 0x38 && VAR_2 != 0x3a) || (VAR_1->prefix & PREFIX_DATA))) {
goto unknown_op;
}
if (VAR_2 == 0x0e) {
if (!(VAR_1->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
goto unknown_op;
}
gen_helper_emms(cpu_env);
return;
}
if (VAR_2 == 0x77) {
gen_helper_emms(cpu_env);
return;
}
if (!VAR_8) {
gen_helper_enter_mmx(cpu_env);
}
VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);
VAR_13 = ((VAR_10 >> 3) & 7);
if (VAR_8)
VAR_13 |= VAR_4;
VAR_11 = (VAR_10 >> 6) & 3;
if (sse_fn_epp == SSE_SPECIAL) {
VAR_2 |= (VAR_5 << 8);
switch(VAR_2) {
case 0x0e7:
if (VAR_11 == 3) {
goto illegal_op;
}
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_stq_env_A0(VAR_1, offsetof(CPUX86State, fpregs[VAR_13].mmx));
break;
case 0x1e7:
case 0x02b:
case 0x12b:
if (VAR_11 == 3)
goto illegal_op;
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_sto_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));
break;
case 0x3f0:
if (VAR_11 == 3)
goto illegal_op;
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldo_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));
break;
case 0x22b:
case 0x32b:
if (VAR_11 == 3)
goto illegal_op;
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
if (VAR_5 & 1) {
gen_stq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(0)));
} else {
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_L(0)));
gen_op_st_v(VAR_1, MO_32, cpu_T0, cpu_A0);
}
break;
case 0x6e:
#ifdef TARGET_X86_64
if (VAR_1->dflag == MO_64) {
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_64, OR_TMP0, 0);
tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[VAR_13].mmx));
} else
#endif
{
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[VAR_13].mmx));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
}
break;
case 0x16e:
#ifdef TARGET_X86_64
if (VAR_1->dflag == MO_64) {
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_64, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_13]));
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
} else
#endif
{
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_32, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_13]));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
}
break;
case 0x6f:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldq_env_A0(VAR_1, offsetof(CPUX86State, fpregs[VAR_13].mmx));
} else {
VAR_12 = (VAR_10 & 7);
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,fpregs[VAR_12].mmx));
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,fpregs[VAR_13].mmx));
}
break;
case 0x010:
case 0x110:
case 0x028:
case 0x128:
case 0x16f:
case 0x26f:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldo_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movo(offsetof(CPUX86State,xmm_regs[VAR_13]),
offsetof(CPUX86State,xmm_regs[VAR_12]));
}
break;
case 0x210:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_op_ld_v(VAR_1, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(1)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(0)));
}
break;
case 0x310:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)));
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));
}
break;
case 0x012:
case 0x112:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(0)));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(1)));
}
break;
case 0x212:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldo_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(0)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(2)));
}
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(1)),
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)),
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)));
break;
case 0x312:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(0)));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));
}
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(1)),
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)));
break;
case 0x016:
case 0x116:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(1)));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(1)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));
}
break;
case 0x216:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldo_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(1)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(1)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(3)));
}
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(1)));
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)),
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)));
break;
case 0x178:
case 0x378:
{
int VAR_14, VAR_15;
if (VAR_5 == 1 && VAR_13 != 0)
goto illegal_op;
VAR_15 = cpu_ldub_code(VAR_0, VAR_1->pc++) & 0x3F;
VAR_14 = cpu_ldub_code(VAR_0, VAR_1->pc++) & 0x3F;
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_13]));
if (VAR_5 == 1)
gen_helper_extrq_i(cpu_env, cpu_ptr0,
tcg_const_i32(VAR_14),
tcg_const_i32(VAR_15));
else
gen_helper_insertq_i(cpu_env, cpu_ptr0,
tcg_const_i32(VAR_14),
tcg_const_i32(VAR_15));
}
break;
case 0x7e:
#ifdef TARGET_X86_64
if (VAR_1->dflag == MO_64) {
tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[VAR_13].mmx));
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_64, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[VAR_13].mmx.MMX_L(0)));
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_32, OR_TMP0, 1);
}
break;
case 0x17e:
#ifdef TARGET_X86_64
if (VAR_1->dflag == MO_64) {
tcg_gen_ld_i64(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)));
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_64, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_32, OR_TMP0, 1);
}
break;
case 0x27e:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(0)));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));
}
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(1)));
break;
case 0x7f:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_stq_env_A0(VAR_1, offsetof(CPUX86State, fpregs[VAR_13].mmx));
} else {
VAR_12 = (VAR_10 & 7);
gen_op_movq(offsetof(CPUX86State,fpregs[VAR_12].mmx),
offsetof(CPUX86State,fpregs[VAR_13].mmx));
}
break;
case 0x011:
case 0x111:
case 0x029:
case 0x129:
case 0x17f:
case 0x27f:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_sto_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movo(offsetof(CPUX86State,xmm_regs[VAR_12]),
offsetof(CPUX86State,xmm_regs[VAR_13]));
}
break;
case 0x211:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));
gen_op_st_v(VAR_1, MO_32, cpu_T0, cpu_A0);
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(0)),
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));
}
break;
case 0x311:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_stq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(0)));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)));
}
break;
case 0x013:
case 0x113:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_stq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(0)));
} else {
goto illegal_op;
}
break;
case 0x017:
case 0x117:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_stq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(1)));
} else {
goto illegal_op;
}
break;
case 0x71:
case 0x72:
case 0x73:
case 0x171:
case 0x172:
case 0x173:
if (VAR_5 >= 2) {
goto unknown_op;
}
VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);
if (VAR_8) {
tcg_gen_movi_tl(cpu_T0, VAR_9);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
VAR_6 = offsetof(CPUX86State,xmm_t0);
} else {
tcg_gen_movi_tl(cpu_T0, VAR_9);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
tcg_gen_movi_tl(cpu_T0, 0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
VAR_6 = offsetof(CPUX86State,mmx_t0);
}
sse_fn_epp = sse_op_table2[((VAR_2 - 1) & 3) * 8 +
(((VAR_10 >> 3)) & 7)][VAR_5];
if (!sse_fn_epp) {
goto unknown_op;
}
if (VAR_8) {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12]);
} else {
VAR_12 = (VAR_10 & 7);
VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_7);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_6);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x050:
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_12]));
gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_regs[VAR_13], cpu_tmp2_i32);
break;
case 0x150:
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_12]));
gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_regs[VAR_13], cpu_tmp2_i32);
break;
case 0x02a:
case 0x12a:
gen_helper_enter_mmx(cpu_env);
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
VAR_7 = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(VAR_1, VAR_7);
} else {
VAR_12 = (VAR_10 & 7);
VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);
}
VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
switch(VAR_2 >> 8) {
case 0x0:
gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
break;
default:
case 0x1:
gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
break;
case 0x22a:
case 0x32a:
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
if (ot == MO_32) {
SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(VAR_2 >> 8) & 1];
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(VAR_2 >> 8) & 1];
sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
#else
goto illegal_op;
#endif
}
break;
case 0x02c:
case 0x12c:
case 0x02d:
case 0x12d:
gen_helper_enter_mmx(cpu_env);
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
VAR_7 = offsetof(CPUX86State,xmm_t0);
gen_ldo_env_A0(VAR_1, VAR_7);
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12]);
}
VAR_6 = offsetof(CPUX86State,fpregs[VAR_13 & 7].mmx);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
switch(VAR_2) {
case 0x02c:
gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x12c:
gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x02d:
gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x12d:
gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
break;
case 0x22c:
case 0x32c:
case 0x22d:
case 0x32d:
ot = mo_64_32(VAR_1->dflag);
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
if ((VAR_2 >> 8) & 1) {
gen_ldq_env_A0(VAR_1, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
} else {
gen_op_ld_v(VAR_1, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
}
VAR_7 = offsetof(CPUX86State,xmm_t0);
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12]);
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_7);
if (ot == MO_32) {
SSEFunc_i_ep sse_fn_i_ep =
sse_op_table3bi[((VAR_2 >> 7) & 2) | (VAR_2 & 1)];
sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
} else {
#ifdef TARGET_X86_64
SSEFunc_l_ep sse_fn_l_ep =
sse_op_table3bq[((VAR_2 >> 7) & 2) | (VAR_2 & 1)];
sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
#else
goto illegal_op;
#endif
}
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
break;
case 0xc4:
case 0x1c4:
VAR_1->rip_offset = 1;
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_16, OR_TMP0, 0);
VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);
if (VAR_5) {
VAR_9 &= 7;
tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_W(VAR_9)));
} else {
VAR_9 &= 3;
tcg_gen_st16_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[VAR_13].mmx.MMX_W(VAR_9)));
}
break;
case 0xc5:
case 0x1c5:
if (VAR_11 != 3)
goto illegal_op;
ot = mo_64_32(VAR_1->dflag);
VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);
if (VAR_5) {
VAR_9 &= 7;
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_W(VAR_9)));
} else {
VAR_9 &= 3;
VAR_12 = (VAR_10 & 7);
tcg_gen_ld16u_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,fpregs[VAR_12].mmx.MMX_W(VAR_9)));
}
VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
break;
case 0x1d6:
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_stq_env_A0(VAR_1, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(0)));
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)),
offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)));
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(1)));
}
break;
case 0x2d6:
gen_helper_enter_mmx(cpu_env);
VAR_12 = (VAR_10 & 7);
gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),
offsetof(CPUX86State,fpregs[VAR_12].mmx));
gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(1)));
break;
case 0x3d6:
gen_helper_enter_mmx(cpu_env);
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
gen_op_movq(offsetof(CPUX86State,fpregs[VAR_13 & 7].mmx),
offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));
break;
case 0xd7:
case 0x1d7:
if (VAR_11 != 3)
goto illegal_op;
if (VAR_5) {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_12]));
gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
} else {
VAR_12 = (VAR_10 & 7);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[VAR_12].mmx));
gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
}
VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;
tcg_gen_extu_i32_tl(cpu_regs[VAR_13], cpu_tmp2_i32);
break;
case 0x138:
case 0x038:
VAR_2 = VAR_10;
if ((VAR_2 & 0xf0) == 0xf0) {
goto do_0f_38_fx;
}
VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);
VAR_12 = VAR_10 & 7;
VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;
VAR_11 = (VAR_10 >> 6) & 3;
if (VAR_5 >= 2) {
goto unknown_op;
}
sse_fn_epp = sse_op_table6[VAR_2].op[VAR_5];
if (!sse_fn_epp) {
goto unknown_op;
}
if (!(VAR_1->cpuid_ext_features & sse_op_table6[VAR_2].ext_mask))
goto illegal_op;
if (VAR_5) {
VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);
if (VAR_11 == 3) {
VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12 | REX_B(VAR_1)]);
} else {
VAR_7 = offsetof(CPUX86State,xmm_t0);
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
switch (VAR_2) {
case 0x20: case 0x30:
case 0x23: case 0x33:
case 0x25: case 0x35:
gen_ldq_env_A0(VAR_1, VAR_7 +
offsetof(ZMMReg, ZMM_Q(0)));
break;
case 0x21: case 0x31:
case 0x24: case 0x34:
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
VAR_1->mem_index, MO_LEUL);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, VAR_7 +
offsetof(ZMMReg, ZMM_L(0)));
break;
case 0x22: case 0x32:
tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
VAR_1->mem_index, MO_LEUW);
tcg_gen_st16_tl(cpu_tmp0, cpu_env, VAR_7 +
offsetof(ZMMReg, ZMM_W(0)));
break;
case 0x2a:
gen_ldo_env_A0(VAR_1, VAR_6);
return;
default:
gen_ldo_env_A0(VAR_1, VAR_7);
}
}
} else {
VAR_6 = offsetof(CPUX86State,fpregs[VAR_13].mmx);
if (VAR_11 == 3) {
VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);
} else {
VAR_7 = offsetof(CPUX86State,mmx_t0);
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldq_env_A0(VAR_1, VAR_7);
}
}
if (sse_fn_epp == SSE_SPECIAL) {
goto unknown_op;
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
if (VAR_2 == 0x17) {
set_cc_op(VAR_1, CC_OP_EFLAGS);
}
break;
case 0x238:
case 0x338:
do_0f_38_fx:
VAR_2 = VAR_10 | (VAR_5 << 8);
VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);
VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;
switch (VAR_2) {
case 0x3f0:
case 0x3f1:
do_crc32:
if (!(VAR_1->cpuid_ext_features & CPUID_EXT_SSE42)) {
goto illegal_op;
}
if ((VAR_2 & 0xff) == 0xf0) {
ot = MO_8;
} else if (VAR_1->dflag != MO_64) {
ot = (VAR_1->prefix & PREFIX_DATA ? MO_16 : MO_32);
} else {
ot = MO_64;
}
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[VAR_13]);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
cpu_T0, tcg_const_i32(8 << ot));
ot = mo_64_32(VAR_1->dflag);
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
break;
case 0x1f0:
case 0x1f1:
if (VAR_1->prefix & PREFIX_REPNZ) {
goto do_crc32;
}
case 0x0f0:
case 0x0f1:
if (!(VAR_1->cpuid_ext_features & CPUID_EXT_MOVBE)) {
goto illegal_op;
}
if (VAR_1->dflag != MO_64) {
ot = (VAR_1->prefix & PREFIX_DATA ? MO_16 : MO_32);
} else {
ot = MO_64;
}
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
if ((VAR_2 & 1) == 0) {
tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
VAR_1->mem_index, ot | MO_BE);
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_regs[VAR_13], cpu_A0,
VAR_1->mem_index, ot | MO_BE);
}
break;
case 0x0f2:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
tcg_gen_andc_tl(cpu_T0, cpu_regs[VAR_1->vex_v], cpu_T0);
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
gen_op_update1_cc();
set_cc_op(VAR_1, CC_OP_LOGICB + ot);
break;
case 0x0f7:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
{
TCGv bound, zero;
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
tcg_gen_ext8u_tl(cpu_A0, cpu_regs[VAR_1->vex_v]);
tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
zero = tcg_const_tl(0);
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
cpu_T0, zero);
tcg_temp_free(zero);
tcg_gen_extract_tl(cpu_A0, cpu_regs[VAR_1->vex_v], 8, 8);
tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
cpu_A0, bound);
tcg_temp_free(bound);
tcg_gen_movi_tl(cpu_T1, 1);
tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
gen_op_update1_cc();
set_cc_op(VAR_1, CC_OP_LOGICB + ot);
}
break;
case 0x0f5:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
tcg_gen_ext8u_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);
{
TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
cpu_T1, bound);
tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
bound, bound, cpu_T1);
tcg_temp_free(bound);
}
tcg_gen_movi_tl(cpu_A0, -1);
tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
gen_op_update1_cc();
set_cc_op(VAR_1, CC_OP_BMILGB + ot);
break;
case 0x3f6:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
switch (ot) {
default:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[VAR_1->vex_v], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[VAR_13], cpu_tmp3_i32);
break;
#ifdef TARGET_X86_64
case MO_64:
tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
cpu_T0, cpu_regs[R_EDX]);
tcg_gen_mov_i64(cpu_regs[VAR_1->vex_v], cpu_T0);
tcg_gen_mov_i64(cpu_regs[VAR_13], cpu_T1);
break;
#endif
}
break;
case 0x3f5:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
if (ot == MO_64) {
tcg_gen_mov_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);
} else {
tcg_gen_ext32u_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);
}
gen_helper_pdep(cpu_regs[VAR_13], cpu_T0, cpu_T1);
break;
case 0x2f5:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
if (ot == MO_64) {
tcg_gen_mov_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);
} else {
tcg_gen_ext32u_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);
}
gen_helper_pext(cpu_regs[VAR_13], cpu_T0, cpu_T1);
break;
case 0x1f6:
case 0x2f6:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
goto illegal_op;
} else {
TCGv carry_in, carry_out, zero;
int VAR_16;
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
TCGV_UNUSED(carry_in);
carry_out = (VAR_2 == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
switch (VAR_1->cc_op) {
case CC_OP_ADCX:
if (VAR_2 == 0x1f6) {
carry_in = cpu_cc_dst;
VAR_16 = CC_OP_ADCX;
} else {
VAR_16 = CC_OP_ADCOX;
}
break;
case CC_OP_ADOX:
if (VAR_2 == 0x1f6) {
VAR_16 = CC_OP_ADCOX;
} else {
carry_in = cpu_cc_src2;
VAR_16 = CC_OP_ADOX;
}
break;
case CC_OP_ADCOX:
VAR_16 = CC_OP_ADCOX;
carry_in = carry_out;
break;
default:
VAR_16 = (VAR_2 == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
break;
}
if (TCGV_IS_UNUSED(carry_in)) {
if (VAR_1->cc_op != CC_OP_ADCX && VAR_1->cc_op != CC_OP_ADOX) {
gen_compute_eflags(VAR_1);
}
carry_in = cpu_tmp0;
tcg_gen_extract_tl(carry_in, cpu_cc_src,
ctz32(VAR_2 == 0x1f6 ? CC_C : CC_O), 1);
}
switch (ot) {
#ifdef TARGET_X86_64
case MO_32:
tcg_gen_ext32u_i64(cpu_regs[VAR_13], cpu_regs[VAR_13]);
tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[VAR_13]);
tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
tcg_gen_ext32u_i64(cpu_regs[VAR_13], cpu_T0);
tcg_gen_shri_i64(carry_out, cpu_T0, 32);
break;
#endif
default:
zero = tcg_const_tl(0);
tcg_gen_add2_tl(cpu_T0, carry_out,
cpu_T0, zero,
carry_in, zero);
tcg_gen_add2_tl(cpu_regs[VAR_13], carry_out,
cpu_regs[VAR_13], carry_out,
cpu_T0, zero);
tcg_temp_free(zero);
break;
}
set_cc_op(VAR_1, VAR_16);
}
break;
case 0x1f7:
case 0x2f7:
case 0x3f7:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
if (ot == MO_64) {
tcg_gen_andi_tl(cpu_T1, cpu_regs[VAR_1->vex_v], 63);
} else {
tcg_gen_andi_tl(cpu_T1, cpu_regs[VAR_1->vex_v], 31);
}
if (VAR_2 == 0x1f7) {
tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
} else if (VAR_2 == 0x2f7) {
if (ot != MO_64) {
tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
}
tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
} else {
if (ot != MO_64) {
tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
}
tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
}
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
break;
case 0x0f3:
case 0x1f3:
case 0x2f3:
case 0x3f3:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
switch (VAR_13 & 7) {
case 1:
tcg_gen_neg_tl(cpu_T1, cpu_T0);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
gen_op_mov_reg_v(ot, VAR_1->vex_v, cpu_T0);
gen_op_update2_cc();
set_cc_op(VAR_1, CC_OP_BMILGB + ot);
break;
case 2:
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(VAR_1, CC_OP_BMILGB + ot);
break;
case 3:
tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
set_cc_op(VAR_1, CC_OP_BMILGB + ot);
break;
default:
goto unknown_op;
}
break;
default:
goto unknown_op;
}
break;
case 0x03a:
case 0x13a:
VAR_2 = VAR_10;
VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);
VAR_12 = VAR_10 & 7;
VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;
VAR_11 = (VAR_10 >> 6) & 3;
if (VAR_5 >= 2) {
goto unknown_op;
}
sse_fn_eppi = sse_op_table7[VAR_2].op[VAR_5];
if (!sse_fn_eppi) {
goto unknown_op;
}
if (!(VAR_1->cpuid_ext_features & sse_op_table7[VAR_2].ext_mask))
goto illegal_op;
VAR_1->rip_offset = 1;
if (sse_fn_eppi == SSE_SPECIAL) {
ot = mo_64_32(VAR_1->dflag);
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
if (VAR_11 != 3)
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;
VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);
switch (VAR_2) {
case 0x14:
tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_B(VAR_9 & 15)));
if (VAR_11 == 3) {
gen_op_mov_reg_v(ot, VAR_12, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
VAR_1->mem_index, MO_UB);
}
break;
case 0x15:
tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_W(VAR_9 & 7)));
if (VAR_11 == 3) {
gen_op_mov_reg_v(ot, VAR_12, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
VAR_1->mem_index, MO_LEUW);
}
break;
case 0x16:
if (ot == MO_32) {
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_L(VAR_9 & 3)));
if (VAR_11 == 3) {
tcg_gen_extu_i32_tl(cpu_regs[VAR_12], cpu_tmp2_i32);
} else {
tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
VAR_1->mem_index, MO_LEUL);
}
} else {
#ifdef TARGET_X86_64
tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(VAR_9 & 1)));
if (VAR_11 == 3) {
tcg_gen_mov_i64(cpu_regs[VAR_12], cpu_tmp1_i64);
} else {
tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
VAR_1->mem_index, MO_LEQ);
}
#else
goto illegal_op;
#endif
}
break;
case 0x17:
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_L(VAR_9 & 3)));
if (VAR_11 == 3) {
gen_op_mov_reg_v(ot, VAR_12, cpu_T0);
} else {
tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
VAR_1->mem_index, MO_LEUL);
}
break;
case 0x20:
if (VAR_11 == 3) {
gen_op_mov_v_reg(MO_32, cpu_T0, VAR_12);
} else {
tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
VAR_1->mem_index, MO_UB);
}
tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_B(VAR_9 & 15)));
break;
case 0x21:
if (VAR_11 == 3) {
tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_12]
.ZMM_L((VAR_9 >> 6) & 3)));
} else {
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
VAR_1->mem_index, MO_LEUL);
}
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,xmm_regs[VAR_13]
.ZMM_L((VAR_9 >> 4) & 3)));
if ((VAR_9 >> 0) & 1)
tcg_gen_st_i32(tcg_const_i32(0 ),
cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_L(0)));
if ((VAR_9 >> 1) & 1)
tcg_gen_st_i32(tcg_const_i32(0 ),
cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_L(1)));
if ((VAR_9 >> 2) & 1)
tcg_gen_st_i32(tcg_const_i32(0 ),
cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_L(2)));
if ((VAR_9 >> 3) & 1)
tcg_gen_st_i32(tcg_const_i32(0 ),
cpu_env, offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_L(3)));
break;
case 0x22:
if (ot == MO_32) {
if (VAR_11 == 3) {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[VAR_12]);
} else {
tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
VAR_1->mem_index, MO_LEUL);
}
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_L(VAR_9 & 3)));
} else {
#ifdef TARGET_X86_64
if (VAR_11 == 3) {
gen_op_mov_v_reg(ot, cpu_tmp1_i64, VAR_12);
} else {
tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
VAR_1->mem_index, MO_LEQ);
}
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
offsetof(CPUX86State,
xmm_regs[VAR_13].ZMM_Q(VAR_9 & 1)));
#else
goto illegal_op;
#endif
}
break;
}
return;
}
if (VAR_5) {
VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);
if (VAR_11 == 3) {
VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12 | REX_B(VAR_1)]);
} else {
VAR_7 = offsetof(CPUX86State,xmm_t0);
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldo_env_A0(VAR_1, VAR_7);
}
} else {
VAR_6 = offsetof(CPUX86State,fpregs[VAR_13].mmx);
if (VAR_11 == 3) {
VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);
} else {
VAR_7 = offsetof(CPUX86State,mmx_t0);
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
gen_ldq_env_A0(VAR_1, VAR_7);
}
}
VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);
if ((VAR_2 & 0xfc) == 0x60) {
set_cc_op(VAR_1, CC_OP_EFLAGS);
if (VAR_1->dflag == MO_64) {
VAR_9 |= 1 << 8;
}
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(VAR_9));
break;
case 0x33a:
VAR_2 = VAR_10 | (VAR_5 << 8);
VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);
VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;
switch (VAR_2) {
case 0x3f0:
if (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
|| !(VAR_1->prefix & PREFIX_VEX)
|| VAR_1->vex_l != 0) {
goto illegal_op;
}
ot = mo_64_32(VAR_1->dflag);
gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);
VAR_2 = cpu_ldub_code(VAR_0, VAR_1->pc++);
if (ot == MO_64) {
tcg_gen_rotri_tl(cpu_T0, cpu_T0, VAR_2 & 63);
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, VAR_2 & 31);
tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
}
gen_op_mov_reg_v(ot, VAR_13, cpu_T0);
break;
default:
goto unknown_op;
}
break;
default:
unknown_op:
gen_unknown_opcode(VAR_0, VAR_1);
return;
}
} else {
switch(VAR_2) {
case 0x70:
case 0xc6:
case 0xc2:
VAR_1->rip_offset = 1;
break;
default:
break;
}
if (VAR_8) {
VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);
if (VAR_11 != 3) {
int VAR_17 = 4;
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
VAR_7 = offsetof(CPUX86State,xmm_t0);
switch (VAR_2) {
case 0x50 ... 0x5a:
case 0x5c ... 0x5f:
case 0xc2:
if (VAR_5 == 2) {
VAR_17 = 2;
} else if (VAR_5 == 3) {
VAR_17 = 3;
}
break;
case 0x2e:
case 0x2f:
if (VAR_5 == 0) {
VAR_17 = 2;
} else {
VAR_17 = 3;
}
break;
}
switch (VAR_17) {
case 2:
gen_op_ld_v(VAR_1, MO_32, cpu_T0, cpu_A0);
tcg_gen_st32_tl(cpu_T0, cpu_env,
offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
break;
case 3:
gen_ldq_env_A0(VAR_1, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
break;
default:
gen_ldo_env_A0(VAR_1, VAR_7);
break;
}
} else {
VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);
VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12]);
}
} else {
VAR_6 = offsetof(CPUX86State,fpregs[VAR_13].mmx);
if (VAR_11 != 3) {
gen_lea_modrm(VAR_0, VAR_1, VAR_10);
VAR_7 = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(VAR_1, VAR_7);
} else {
VAR_12 = (VAR_10 & 7);
VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);
}
}
switch(VAR_2) {
case 0x0f:
VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);
sse_fn_epp = sse_op_table5[VAR_9];
if (!sse_fn_epp) {
goto unknown_op;
}
if (!(VAR_1->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
goto illegal_op;
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0x70:
case 0xc6:
VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(VAR_9));
break;
case 0xc2:
VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);
if (VAR_9 >= 8)
goto unknown_op;
sse_fn_epp = sse_op_table4[VAR_9][VAR_5];
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
case 0xf7:
if (VAR_11 != 3)
goto illegal_op;
tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
gen_extu(VAR_1->aflag, cpu_A0);
gen_add_A0_ds_seg(VAR_1);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
break;
default:
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);
sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
break;
}
if (VAR_2 == 0x2e || VAR_2 == 0x2f) {
set_cc_op(VAR_1, CC_OP_EFLAGS);
}
}
}
| [
"static void FUNC_0(CPUX86State *VAR_0, DisasContext *VAR_1, int VAR_2,\ntarget_ulong VAR_3, int VAR_4)\n{",
"int VAR_5, VAR_6, VAR_7, VAR_8, VAR_9;",
"int VAR_10, VAR_11, VAR_12, VAR_13;",
"SSEFunc_0_epp sse_fn_epp;",
"SSEFunc_0_eppi sse_fn_eppi;",
"SSEFunc_0_ppi sse_fn_ppi;",
"SSEFunc_0_eppt sse_fn_eppt;",
"TCGMemOp ot;",
"VAR_2 &= 0xff;",
"if (VAR_1->prefix & PREFIX_DATA)\nVAR_5 = 1;",
"else if (VAR_1->prefix & PREFIX_REPZ)\nVAR_5 = 2;",
"else if (VAR_1->prefix & PREFIX_REPNZ)\nVAR_5 = 3;",
"else\nVAR_5 = 0;",
"sse_fn_epp = sse_op_table1[VAR_2][VAR_5];",
"if (!sse_fn_epp) {",
"goto unknown_op;",
"}",
"if ((VAR_2 <= 0x5f && VAR_2 >= 0x10) || VAR_2 == 0xc6 || VAR_2 == 0xc2) {",
"VAR_8 = 1;",
"} else {",
"if (VAR_5 == 0) {",
"VAR_8 = 0;",
"} else {",
"VAR_8 = 1;",
"}",
"}",
"if (VAR_1->flags & HF_TS_MASK) {",
"gen_exception(VAR_1, EXCP07_PREX, VAR_3 - VAR_1->cs_base);",
"return;",
"}",
"if (VAR_1->flags & HF_EM_MASK) {",
"illegal_op:\ngen_illegal_opcode(VAR_1);",
"return;",
"}",
"if (VAR_8\n&& !(VAR_1->flags & HF_OSFXSR_MASK)\n&& ((VAR_2 != 0x38 && VAR_2 != 0x3a) || (VAR_1->prefix & PREFIX_DATA))) {",
"goto unknown_op;",
"}",
"if (VAR_2 == 0x0e) {",
"if (!(VAR_1->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {",
"goto unknown_op;",
"}",
"gen_helper_emms(cpu_env);",
"return;",
"}",
"if (VAR_2 == 0x77) {",
"gen_helper_emms(cpu_env);",
"return;",
"}",
"if (!VAR_8) {",
"gen_helper_enter_mmx(cpu_env);",
"}",
"VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"VAR_13 = ((VAR_10 >> 3) & 7);",
"if (VAR_8)\nVAR_13 |= VAR_4;",
"VAR_11 = (VAR_10 >> 6) & 3;",
"if (sse_fn_epp == SSE_SPECIAL) {",
"VAR_2 |= (VAR_5 << 8);",
"switch(VAR_2) {",
"case 0x0e7:\nif (VAR_11 == 3) {",
"goto illegal_op;",
"}",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_stq_env_A0(VAR_1, offsetof(CPUX86State, fpregs[VAR_13].mmx));",
"break;",
"case 0x1e7:\ncase 0x02b:\ncase 0x12b:\nif (VAR_11 == 3)\ngoto illegal_op;",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_sto_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));",
"break;",
"case 0x3f0:\nif (VAR_11 == 3)\ngoto illegal_op;",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldo_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));",
"break;",
"case 0x22b:\ncase 0x32b:\nif (VAR_11 == 3)\ngoto illegal_op;",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"if (VAR_5 & 1) {",
"gen_stq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(0)));",
"} else {",
"tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_L(0)));",
"gen_op_st_v(VAR_1, MO_32, cpu_T0, cpu_A0);",
"}",
"break;",
"case 0x6e:\n#ifdef TARGET_X86_64\nif (VAR_1->dflag == MO_64) {",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_64, OR_TMP0, 0);",
"tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[VAR_13].mmx));",
"} else",
"#endif\n{",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_32, OR_TMP0, 0);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env,\noffsetof(CPUX86State,fpregs[VAR_13].mmx));",
"tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);",
"gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);",
"}",
"break;",
"case 0x16e:\n#ifdef TARGET_X86_64\nif (VAR_1->dflag == MO_64) {",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_64, OR_TMP0, 0);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_13]));",
"gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);",
"} else",
"#endif\n{",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_32, OR_TMP0, 0);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_13]));",
"tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);",
"gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);",
"}",
"break;",
"case 0x6f:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldq_env_A0(VAR_1, offsetof(CPUX86State, fpregs[VAR_13].mmx));",
"} else {",
"VAR_12 = (VAR_10 & 7);",
"tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,\noffsetof(CPUX86State,fpregs[VAR_12].mmx));",
"tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,\noffsetof(CPUX86State,fpregs[VAR_13].mmx));",
"}",
"break;",
"case 0x010:\ncase 0x110:\ncase 0x028:\ncase 0x128:\ncase 0x16f:\ncase 0x26f:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldo_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movo(offsetof(CPUX86State,xmm_regs[VAR_13]),\noffsetof(CPUX86State,xmm_regs[VAR_12]));",
"}",
"break;",
"case 0x210:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_op_ld_v(VAR_1, MO_32, cpu_T0, cpu_A0);",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));",
"tcg_gen_movi_tl(cpu_T0, 0);",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(1)));",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)));",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(0)));",
"}",
"break;",
"case 0x310:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(0)));",
"tcg_gen_movi_tl(cpu_T0, 0);",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)));",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));",
"}",
"break;",
"case 0x012:\ncase 0x112:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(0)));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(1)));",
"}",
"break;",
"case 0x212:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldo_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(0)));",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(2)));",
"}",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(1)),\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)),\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)));",
"break;",
"case 0x312:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(0)));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));",
"}",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(1)),\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)));",
"break;",
"case 0x016:\ncase 0x116:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(1)));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(1)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));",
"}",
"break;",
"case 0x216:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldo_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(1)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(1)));",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(3)));",
"}",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)),\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(1)));",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(2)),\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(3)));",
"break;",
"case 0x178:\ncase 0x378:\n{",
"int VAR_14, VAR_15;",
"if (VAR_5 == 1 && VAR_13 != 0)\ngoto illegal_op;",
"VAR_15 = cpu_ldub_code(VAR_0, VAR_1->pc++) & 0x3F;",
"VAR_14 = cpu_ldub_code(VAR_0, VAR_1->pc++) & 0x3F;",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_13]));",
"if (VAR_5 == 1)\ngen_helper_extrq_i(cpu_env, cpu_ptr0,\ntcg_const_i32(VAR_14),\ntcg_const_i32(VAR_15));",
"else\ngen_helper_insertq_i(cpu_env, cpu_ptr0,\ntcg_const_i32(VAR_14),\ntcg_const_i32(VAR_15));",
"}",
"break;",
"case 0x7e:\n#ifdef TARGET_X86_64\nif (VAR_1->dflag == MO_64) {",
"tcg_gen_ld_i64(cpu_T0, cpu_env,\noffsetof(CPUX86State,fpregs[VAR_13].mmx));",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_64, OR_TMP0, 1);",
"} else",
"#endif\n{",
"tcg_gen_ld32u_tl(cpu_T0, cpu_env,\noffsetof(CPUX86State,fpregs[VAR_13].mmx.MMX_L(0)));",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_32, OR_TMP0, 1);",
"}",
"break;",
"case 0x17e:\n#ifdef TARGET_X86_64\nif (VAR_1->dflag == MO_64) {",
"tcg_gen_ld_i64(cpu_T0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)));",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_64, OR_TMP0, 1);",
"} else",
"#endif\n{",
"tcg_gen_ld32u_tl(cpu_T0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_32, OR_TMP0, 1);",
"}",
"break;",
"case 0x27e:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(0)));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));",
"}",
"gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(1)));",
"break;",
"case 0x7f:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_stq_env_A0(VAR_1, offsetof(CPUX86State, fpregs[VAR_13].mmx));",
"} else {",
"VAR_12 = (VAR_10 & 7);",
"gen_op_movq(offsetof(CPUX86State,fpregs[VAR_12].mmx),\noffsetof(CPUX86State,fpregs[VAR_13].mmx));",
"}",
"break;",
"case 0x011:\ncase 0x111:\ncase 0x029:\ncase 0x129:\ncase 0x17f:\ncase 0x27f:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_sto_env_A0(VAR_1, offsetof(CPUX86State, xmm_regs[VAR_13]));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movo(offsetof(CPUX86State,xmm_regs[VAR_12]),\noffsetof(CPUX86State,xmm_regs[VAR_13]));",
"}",
"break;",
"case 0x211:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));",
"gen_op_st_v(VAR_1, MO_32, cpu_T0, cpu_A0);",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movl(offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_L(0)),\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_L(0)));",
"}",
"break;",
"case 0x311:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_stq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(0)));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)),\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)));",
"}",
"break;",
"case 0x013:\ncase 0x113:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_stq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(0)));",
"} else {",
"goto illegal_op;",
"}",
"break;",
"case 0x017:\ncase 0x117:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_stq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(1)));",
"} else {",
"goto illegal_op;",
"}",
"break;",
"case 0x71:\ncase 0x72:\ncase 0x73:\ncase 0x171:\ncase 0x172:\ncase 0x173:\nif (VAR_5 >= 2) {",
"goto unknown_op;",
"}",
"VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"if (VAR_8) {",
"tcg_gen_movi_tl(cpu_T0, VAR_9);",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));",
"tcg_gen_movi_tl(cpu_T0, 0);",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));",
"VAR_6 = offsetof(CPUX86State,xmm_t0);",
"} else {",
"tcg_gen_movi_tl(cpu_T0, VAR_9);",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));",
"tcg_gen_movi_tl(cpu_T0, 0);",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));",
"VAR_6 = offsetof(CPUX86State,mmx_t0);",
"}",
"sse_fn_epp = sse_op_table2[((VAR_2 - 1) & 3) * 8 +\n(((VAR_10 >> 3)) & 7)][VAR_5];",
"if (!sse_fn_epp) {",
"goto unknown_op;",
"}",
"if (VAR_8) {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12]);",
"} else {",
"VAR_12 = (VAR_10 & 7);",
"VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);",
"}",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_7);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_6);",
"sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"case 0x050:\nVAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_12]));",
"gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);",
"tcg_gen_extu_i32_tl(cpu_regs[VAR_13], cpu_tmp2_i32);",
"break;",
"case 0x150:\nVAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_12]));",
"gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);",
"tcg_gen_extu_i32_tl(cpu_regs[VAR_13], cpu_tmp2_i32);",
"break;",
"case 0x02a:\ncase 0x12a:\ngen_helper_enter_mmx(cpu_env);",
"if (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"VAR_7 = offsetof(CPUX86State,mmx_t0);",
"gen_ldq_env_A0(VAR_1, VAR_7);",
"} else {",
"VAR_12 = (VAR_10 & 7);",
"VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);",
"}",
"VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"switch(VAR_2 >> 8) {",
"case 0x0:\ngen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"default:\ncase 0x1:\ngen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"}",
"break;",
"case 0x22a:\ncase 0x32a:\not = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"if (ot == MO_32) {",
"SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(VAR_2 >> 8) & 1];",
"tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);",
"sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);",
"} else {",
"#ifdef TARGET_X86_64\nSSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(VAR_2 >> 8) & 1];",
"sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);",
"#else\ngoto illegal_op;",
"#endif\n}",
"break;",
"case 0x02c:\ncase 0x12c:\ncase 0x02d:\ncase 0x12d:\ngen_helper_enter_mmx(cpu_env);",
"if (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"VAR_7 = offsetof(CPUX86State,xmm_t0);",
"gen_ldo_env_A0(VAR_1, VAR_7);",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12]);",
"}",
"VAR_6 = offsetof(CPUX86State,fpregs[VAR_13 & 7].mmx);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"switch(VAR_2) {",
"case 0x02c:\ngen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"case 0x12c:\ngen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"case 0x02d:\ngen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"case 0x12d:\ngen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"}",
"break;",
"case 0x22c:\ncase 0x32c:\ncase 0x22d:\ncase 0x32d:\not = mo_64_32(VAR_1->dflag);",
"if (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"if ((VAR_2 >> 8) & 1) {",
"gen_ldq_env_A0(VAR_1, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));",
"} else {",
"gen_op_ld_v(VAR_1, MO_32, cpu_T0, cpu_A0);",
"tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));",
"}",
"VAR_7 = offsetof(CPUX86State,xmm_t0);",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12]);",
"}",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_7);",
"if (ot == MO_32) {",
"SSEFunc_i_ep sse_fn_i_ep =\nsse_op_table3bi[((VAR_2 >> 7) & 2) | (VAR_2 & 1)];",
"sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);",
"tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);",
"} else {",
"#ifdef TARGET_X86_64\nSSEFunc_l_ep sse_fn_l_ep =\nsse_op_table3bq[((VAR_2 >> 7) & 2) | (VAR_2 & 1)];",
"sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);",
"#else\ngoto illegal_op;",
"#endif\n}",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"break;",
"case 0xc4:\ncase 0x1c4:\nVAR_1->rip_offset = 1;",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, MO_16, OR_TMP0, 0);",
"VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"if (VAR_5) {",
"VAR_9 &= 7;",
"tcg_gen_st16_tl(cpu_T0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_W(VAR_9)));",
"} else {",
"VAR_9 &= 3;",
"tcg_gen_st16_tl(cpu_T0, cpu_env,\noffsetof(CPUX86State,fpregs[VAR_13].mmx.MMX_W(VAR_9)));",
"}",
"break;",
"case 0xc5:\ncase 0x1c5:\nif (VAR_11 != 3)\ngoto illegal_op;",
"ot = mo_64_32(VAR_1->dflag);",
"VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"if (VAR_5) {",
"VAR_9 &= 7;",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"tcg_gen_ld16u_tl(cpu_T0, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_W(VAR_9)));",
"} else {",
"VAR_9 &= 3;",
"VAR_12 = (VAR_10 & 7);",
"tcg_gen_ld16u_tl(cpu_T0, cpu_env,\noffsetof(CPUX86State,fpregs[VAR_12].mmx.MMX_W(VAR_9)));",
"}",
"VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"break;",
"case 0x1d6:\nif (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_stq_env_A0(VAR_1, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(0)));",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)),\noffsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)));",
"gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(1)));",
"}",
"break;",
"case 0x2d6:\ngen_helper_enter_mmx(cpu_env);",
"VAR_12 = (VAR_10 & 7);",
"gen_op_movq(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(0)),\noffsetof(CPUX86State,fpregs[VAR_12].mmx));",
"gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[VAR_13].ZMM_Q(1)));",
"break;",
"case 0x3d6:\ngen_helper_enter_mmx(cpu_env);",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"gen_op_movq(offsetof(CPUX86State,fpregs[VAR_13 & 7].mmx),\noffsetof(CPUX86State,xmm_regs[VAR_12].ZMM_Q(0)));",
"break;",
"case 0xd7:\ncase 0x1d7:\nif (VAR_11 != 3)\ngoto illegal_op;",
"if (VAR_5) {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[VAR_12]));",
"gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);",
"} else {",
"VAR_12 = (VAR_10 & 7);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[VAR_12].mmx));",
"gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);",
"}",
"VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;",
"tcg_gen_extu_i32_tl(cpu_regs[VAR_13], cpu_tmp2_i32);",
"break;",
"case 0x138:\ncase 0x038:\nVAR_2 = VAR_10;",
"if ((VAR_2 & 0xf0) == 0xf0) {",
"goto do_0f_38_fx;",
"}",
"VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"VAR_12 = VAR_10 & 7;",
"VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;",
"VAR_11 = (VAR_10 >> 6) & 3;",
"if (VAR_5 >= 2) {",
"goto unknown_op;",
"}",
"sse_fn_epp = sse_op_table6[VAR_2].op[VAR_5];",
"if (!sse_fn_epp) {",
"goto unknown_op;",
"}",
"if (!(VAR_1->cpuid_ext_features & sse_op_table6[VAR_2].ext_mask))\ngoto illegal_op;",
"if (VAR_5) {",
"VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);",
"if (VAR_11 == 3) {",
"VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12 | REX_B(VAR_1)]);",
"} else {",
"VAR_7 = offsetof(CPUX86State,xmm_t0);",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"switch (VAR_2) {",
"case 0x20: case 0x30:\ncase 0x23: case 0x33:\ncase 0x25: case 0x35:\ngen_ldq_env_A0(VAR_1, VAR_7 +\noffsetof(ZMMReg, ZMM_Q(0)));",
"break;",
"case 0x21: case 0x31:\ncase 0x24: case 0x34:\ntcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,\nVAR_1->mem_index, MO_LEUL);",
"tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, VAR_7 +\noffsetof(ZMMReg, ZMM_L(0)));",
"break;",
"case 0x22: case 0x32:\ntcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,\nVAR_1->mem_index, MO_LEUW);",
"tcg_gen_st16_tl(cpu_tmp0, cpu_env, VAR_7 +\noffsetof(ZMMReg, ZMM_W(0)));",
"break;",
"case 0x2a:\ngen_ldo_env_A0(VAR_1, VAR_6);",
"return;",
"default:\ngen_ldo_env_A0(VAR_1, VAR_7);",
"}",
"}",
"} else {",
"VAR_6 = offsetof(CPUX86State,fpregs[VAR_13].mmx);",
"if (VAR_11 == 3) {",
"VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);",
"} else {",
"VAR_7 = offsetof(CPUX86State,mmx_t0);",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldq_env_A0(VAR_1, VAR_7);",
"}",
"}",
"if (sse_fn_epp == SSE_SPECIAL) {",
"goto unknown_op;",
"}",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);",
"if (VAR_2 == 0x17) {",
"set_cc_op(VAR_1, CC_OP_EFLAGS);",
"}",
"break;",
"case 0x238:\ncase 0x338:\ndo_0f_38_fx:\nVAR_2 = VAR_10 | (VAR_5 << 8);",
"VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;",
"switch (VAR_2) {",
"case 0x3f0:\ncase 0x3f1:\ndo_crc32:\nif (!(VAR_1->cpuid_ext_features & CPUID_EXT_SSE42)) {",
"goto illegal_op;",
"}",
"if ((VAR_2 & 0xff) == 0xf0) {",
"ot = MO_8;",
"} else if (VAR_1->dflag != MO_64) {",
"ot = (VAR_1->prefix & PREFIX_DATA ? MO_16 : MO_32);",
"} else {",
"ot = MO_64;",
"}",
"tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[VAR_13]);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"gen_helper_crc32(cpu_T0, cpu_tmp2_i32,\ncpu_T0, tcg_const_i32(8 << ot));",
"ot = mo_64_32(VAR_1->dflag);",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"break;",
"case 0x1f0:\ncase 0x1f1:\nif (VAR_1->prefix & PREFIX_REPNZ) {",
"goto do_crc32;",
"}",
"case 0x0f0:\ncase 0x0f1:\nif (!(VAR_1->cpuid_ext_features & CPUID_EXT_MOVBE)) {",
"goto illegal_op;",
"}",
"if (VAR_1->dflag != MO_64) {",
"ot = (VAR_1->prefix & PREFIX_DATA ? MO_16 : MO_32);",
"} else {",
"ot = MO_64;",
"}",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"if ((VAR_2 & 1) == 0) {",
"tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,\nVAR_1->mem_index, ot | MO_BE);",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"} else {",
"tcg_gen_qemu_st_tl(cpu_regs[VAR_13], cpu_A0,\nVAR_1->mem_index, ot | MO_BE);",
"}",
"break;",
"case 0x0f2:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"tcg_gen_andc_tl(cpu_T0, cpu_regs[VAR_1->vex_v], cpu_T0);",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"gen_op_update1_cc();",
"set_cc_op(VAR_1, CC_OP_LOGICB + ot);",
"break;",
"case 0x0f7:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"{",
"TCGv bound, zero;",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"tcg_gen_ext8u_tl(cpu_A0, cpu_regs[VAR_1->vex_v]);",
"tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);",
"bound = tcg_const_tl(ot == MO_64 ? 63 : 31);",
"zero = tcg_const_tl(0);",
"tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,\ncpu_T0, zero);",
"tcg_temp_free(zero);",
"tcg_gen_extract_tl(cpu_A0, cpu_regs[VAR_1->vex_v], 8, 8);",
"tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,\ncpu_A0, bound);",
"tcg_temp_free(bound);",
"tcg_gen_movi_tl(cpu_T1, 1);",
"tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);",
"tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);",
"tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"gen_op_update1_cc();",
"set_cc_op(VAR_1, CC_OP_LOGICB + ot);",
"}",
"break;",
"case 0x0f5:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"tcg_gen_ext8u_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);",
"{",
"TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);",
"tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,\ncpu_T1, bound);",
"tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,\nbound, bound, cpu_T1);",
"tcg_temp_free(bound);",
"}",
"tcg_gen_movi_tl(cpu_A0, -1);",
"tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);",
"tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"gen_op_update1_cc();",
"set_cc_op(VAR_1, CC_OP_BMILGB + ot);",
"break;",
"case 0x3f6:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"switch (ot) {",
"default:\ntcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);",
"tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);",
"tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,\ncpu_tmp2_i32, cpu_tmp3_i32);",
"tcg_gen_extu_i32_tl(cpu_regs[VAR_1->vex_v], cpu_tmp2_i32);",
"tcg_gen_extu_i32_tl(cpu_regs[VAR_13], cpu_tmp3_i32);",
"break;",
"#ifdef TARGET_X86_64\ncase MO_64:\ntcg_gen_mulu2_i64(cpu_T0, cpu_T1,\ncpu_T0, cpu_regs[R_EDX]);",
"tcg_gen_mov_i64(cpu_regs[VAR_1->vex_v], cpu_T0);",
"tcg_gen_mov_i64(cpu_regs[VAR_13], cpu_T1);",
"break;",
"#endif\n}",
"break;",
"case 0x3f5:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"if (ot == MO_64) {",
"tcg_gen_mov_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);",
"} else {",
"tcg_gen_ext32u_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);",
"}",
"gen_helper_pdep(cpu_regs[VAR_13], cpu_T0, cpu_T1);",
"break;",
"case 0x2f5:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"if (ot == MO_64) {",
"tcg_gen_mov_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);",
"} else {",
"tcg_gen_ext32u_tl(cpu_T1, cpu_regs[VAR_1->vex_v]);",
"}",
"gen_helper_pext(cpu_regs[VAR_13], cpu_T0, cpu_T1);",
"break;",
"case 0x1f6:\ncase 0x2f6:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {",
"goto illegal_op;",
"} else {",
"TCGv carry_in, carry_out, zero;",
"int VAR_16;",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"TCGV_UNUSED(carry_in);",
"carry_out = (VAR_2 == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);",
"switch (VAR_1->cc_op) {",
"case CC_OP_ADCX:\nif (VAR_2 == 0x1f6) {",
"carry_in = cpu_cc_dst;",
"VAR_16 = CC_OP_ADCX;",
"} else {",
"VAR_16 = CC_OP_ADCOX;",
"}",
"break;",
"case CC_OP_ADOX:\nif (VAR_2 == 0x1f6) {",
"VAR_16 = CC_OP_ADCOX;",
"} else {",
"carry_in = cpu_cc_src2;",
"VAR_16 = CC_OP_ADOX;",
"}",
"break;",
"case CC_OP_ADCOX:\nVAR_16 = CC_OP_ADCOX;",
"carry_in = carry_out;",
"break;",
"default:\nVAR_16 = (VAR_2 == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);",
"break;",
"}",
"if (TCGV_IS_UNUSED(carry_in)) {",
"if (VAR_1->cc_op != CC_OP_ADCX && VAR_1->cc_op != CC_OP_ADOX) {",
"gen_compute_eflags(VAR_1);",
"}",
"carry_in = cpu_tmp0;",
"tcg_gen_extract_tl(carry_in, cpu_cc_src,\nctz32(VAR_2 == 0x1f6 ? CC_C : CC_O), 1);",
"}",
"switch (ot) {",
"#ifdef TARGET_X86_64\ncase MO_32:\ntcg_gen_ext32u_i64(cpu_regs[VAR_13], cpu_regs[VAR_13]);",
"tcg_gen_ext32u_i64(cpu_T0, cpu_T0);",
"tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[VAR_13]);",
"tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);",
"tcg_gen_ext32u_i64(cpu_regs[VAR_13], cpu_T0);",
"tcg_gen_shri_i64(carry_out, cpu_T0, 32);",
"break;",
"#endif\ndefault:\nzero = tcg_const_tl(0);",
"tcg_gen_add2_tl(cpu_T0, carry_out,\ncpu_T0, zero,\ncarry_in, zero);",
"tcg_gen_add2_tl(cpu_regs[VAR_13], carry_out,\ncpu_regs[VAR_13], carry_out,\ncpu_T0, zero);",
"tcg_temp_free(zero);",
"break;",
"}",
"set_cc_op(VAR_1, VAR_16);",
"}",
"break;",
"case 0x1f7:\ncase 0x2f7:\ncase 0x3f7:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"if (ot == MO_64) {",
"tcg_gen_andi_tl(cpu_T1, cpu_regs[VAR_1->vex_v], 63);",
"} else {",
"tcg_gen_andi_tl(cpu_T1, cpu_regs[VAR_1->vex_v], 31);",
"}",
"if (VAR_2 == 0x1f7) {",
"tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);",
"} else if (VAR_2 == 0x2f7) {",
"if (ot != MO_64) {",
"tcg_gen_ext32s_tl(cpu_T0, cpu_T0);",
"}",
"tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);",
"} else {",
"if (ot != MO_64) {",
"tcg_gen_ext32u_tl(cpu_T0, cpu_T0);",
"}",
"tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);",
"}",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"break;",
"case 0x0f3:\ncase 0x1f3:\ncase 0x2f3:\ncase 0x3f3:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"switch (VAR_13 & 7) {",
"case 1:\ntcg_gen_neg_tl(cpu_T1, cpu_T0);",
"tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);",
"gen_op_mov_reg_v(ot, VAR_1->vex_v, cpu_T0);",
"gen_op_update2_cc();",
"set_cc_op(VAR_1, CC_OP_BMILGB + ot);",
"break;",
"case 2:\ntcg_gen_mov_tl(cpu_cc_src, cpu_T0);",
"tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);",
"tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);",
"tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);",
"set_cc_op(VAR_1, CC_OP_BMILGB + ot);",
"break;",
"case 3:\ntcg_gen_mov_tl(cpu_cc_src, cpu_T0);",
"tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);",
"tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);",
"tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);",
"set_cc_op(VAR_1, CC_OP_BMILGB + ot);",
"break;",
"default:\ngoto unknown_op;",
"}",
"break;",
"default:\ngoto unknown_op;",
"}",
"break;",
"case 0x03a:\ncase 0x13a:\nVAR_2 = VAR_10;",
"VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"VAR_12 = VAR_10 & 7;",
"VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;",
"VAR_11 = (VAR_10 >> 6) & 3;",
"if (VAR_5 >= 2) {",
"goto unknown_op;",
"}",
"sse_fn_eppi = sse_op_table7[VAR_2].op[VAR_5];",
"if (!sse_fn_eppi) {",
"goto unknown_op;",
"}",
"if (!(VAR_1->cpuid_ext_features & sse_op_table7[VAR_2].ext_mask))\ngoto illegal_op;",
"VAR_1->rip_offset = 1;",
"if (sse_fn_eppi == SSE_SPECIAL) {",
"ot = mo_64_32(VAR_1->dflag);",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"if (VAR_11 != 3)\ngen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;",
"VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"switch (VAR_2) {",
"case 0x14:\ntcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_B(VAR_9 & 15)));",
"if (VAR_11 == 3) {",
"gen_op_mov_reg_v(ot, VAR_12, cpu_T0);",
"} else {",
"tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,\nVAR_1->mem_index, MO_UB);",
"}",
"break;",
"case 0x15:\ntcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_W(VAR_9 & 7)));",
"if (VAR_11 == 3) {",
"gen_op_mov_reg_v(ot, VAR_12, cpu_T0);",
"} else {",
"tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,\nVAR_1->mem_index, MO_LEUW);",
"}",
"break;",
"case 0x16:\nif (ot == MO_32) {",
"tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,\noffsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_L(VAR_9 & 3)));",
"if (VAR_11 == 3) {",
"tcg_gen_extu_i32_tl(cpu_regs[VAR_12], cpu_tmp2_i32);",
"} else {",
"tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,\nVAR_1->mem_index, MO_LEUL);",
"}",
"} else {",
"#ifdef TARGET_X86_64\ntcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,\noffsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(VAR_9 & 1)));",
"if (VAR_11 == 3) {",
"tcg_gen_mov_i64(cpu_regs[VAR_12], cpu_tmp1_i64);",
"} else {",
"tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,\nVAR_1->mem_index, MO_LEQ);",
"}",
"#else\ngoto illegal_op;",
"#endif\n}",
"break;",
"case 0x17:\ntcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_L(VAR_9 & 3)));",
"if (VAR_11 == 3) {",
"gen_op_mov_reg_v(ot, VAR_12, cpu_T0);",
"} else {",
"tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,\nVAR_1->mem_index, MO_LEUL);",
"}",
"break;",
"case 0x20:\nif (VAR_11 == 3) {",
"gen_op_mov_v_reg(MO_32, cpu_T0, VAR_12);",
"} else {",
"tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,\nVAR_1->mem_index, MO_UB);",
"}",
"tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_B(VAR_9 & 15)));",
"break;",
"case 0x21:\nif (VAR_11 == 3) {",
"tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_12]\n.ZMM_L((VAR_9 >> 6) & 3)));",
"} else {",
"tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,\nVAR_1->mem_index, MO_LEUL);",
"}",
"tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,\noffsetof(CPUX86State,xmm_regs[VAR_13]\n.ZMM_L((VAR_9 >> 4) & 3)));",
"if ((VAR_9 >> 0) & 1)\ntcg_gen_st_i32(tcg_const_i32(0 ),\ncpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_L(0)));",
"if ((VAR_9 >> 1) & 1)\ntcg_gen_st_i32(tcg_const_i32(0 ),\ncpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_L(1)));",
"if ((VAR_9 >> 2) & 1)\ntcg_gen_st_i32(tcg_const_i32(0 ),\ncpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_L(2)));",
"if ((VAR_9 >> 3) & 1)\ntcg_gen_st_i32(tcg_const_i32(0 ),\ncpu_env, offsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_L(3)));",
"break;",
"case 0x22:\nif (ot == MO_32) {",
"if (VAR_11 == 3) {",
"tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[VAR_12]);",
"} else {",
"tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,\nVAR_1->mem_index, MO_LEUL);",
"}",
"tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,\noffsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_L(VAR_9 & 3)));",
"} else {",
"#ifdef TARGET_X86_64\nif (VAR_11 == 3) {",
"gen_op_mov_v_reg(ot, cpu_tmp1_i64, VAR_12);",
"} else {",
"tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,\nVAR_1->mem_index, MO_LEQ);",
"}",
"tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,\noffsetof(CPUX86State,\nxmm_regs[VAR_13].ZMM_Q(VAR_9 & 1)));",
"#else\ngoto illegal_op;",
"#endif\n}",
"break;",
"}",
"return;",
"}",
"if (VAR_5) {",
"VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);",
"if (VAR_11 == 3) {",
"VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12 | REX_B(VAR_1)]);",
"} else {",
"VAR_7 = offsetof(CPUX86State,xmm_t0);",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldo_env_A0(VAR_1, VAR_7);",
"}",
"} else {",
"VAR_6 = offsetof(CPUX86State,fpregs[VAR_13].mmx);",
"if (VAR_11 == 3) {",
"VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);",
"} else {",
"VAR_7 = offsetof(CPUX86State,mmx_t0);",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"gen_ldq_env_A0(VAR_1, VAR_7);",
"}",
"}",
"VAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"if ((VAR_2 & 0xfc) == 0x60) {",
"set_cc_op(VAR_1, CC_OP_EFLAGS);",
"if (VAR_1->dflag == MO_64) {",
"VAR_9 |= 1 << 8;",
"}",
"}",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(VAR_9));",
"break;",
"case 0x33a:\nVAR_2 = VAR_10 | (VAR_5 << 8);",
"VAR_10 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"VAR_13 = ((VAR_10 >> 3) & 7) | VAR_4;",
"switch (VAR_2) {",
"case 0x3f0:\nif (!(VAR_1->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)\n|| !(VAR_1->prefix & PREFIX_VEX)\n|| VAR_1->vex_l != 0) {",
"goto illegal_op;",
"}",
"ot = mo_64_32(VAR_1->dflag);",
"gen_ldst_modrm(VAR_0, VAR_1, VAR_10, ot, OR_TMP0, 0);",
"VAR_2 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"if (ot == MO_64) {",
"tcg_gen_rotri_tl(cpu_T0, cpu_T0, VAR_2 & 63);",
"} else {",
"tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);",
"tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, VAR_2 & 31);",
"tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);",
"}",
"gen_op_mov_reg_v(ot, VAR_13, cpu_T0);",
"break;",
"default:\ngoto unknown_op;",
"}",
"break;",
"default:\nunknown_op:\ngen_unknown_opcode(VAR_0, VAR_1);",
"return;",
"}",
"} else {",
"switch(VAR_2) {",
"case 0x70:\ncase 0xc6:\ncase 0xc2:\nVAR_1->rip_offset = 1;",
"break;",
"default:\nbreak;",
"}",
"if (VAR_8) {",
"VAR_6 = offsetof(CPUX86State,xmm_regs[VAR_13]);",
"if (VAR_11 != 3) {",
"int VAR_17 = 4;",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"VAR_7 = offsetof(CPUX86State,xmm_t0);",
"switch (VAR_2) {",
"case 0x50 ... 0x5a:\ncase 0x5c ... 0x5f:\ncase 0xc2:\nif (VAR_5 == 2) {",
"VAR_17 = 2;",
"} else if (VAR_5 == 3) {",
"VAR_17 = 3;",
"}",
"break;",
"case 0x2e:\ncase 0x2f:\nif (VAR_5 == 0) {",
"VAR_17 = 2;",
"} else {",
"VAR_17 = 3;",
"}",
"break;",
"}",
"switch (VAR_17) {",
"case 2:\ngen_op_ld_v(VAR_1, MO_32, cpu_T0, cpu_A0);",
"tcg_gen_st32_tl(cpu_T0, cpu_env,\noffsetof(CPUX86State,xmm_t0.ZMM_L(0)));",
"break;",
"case 3:\ngen_ldq_env_A0(VAR_1, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));",
"break;",
"default:\ngen_ldo_env_A0(VAR_1, VAR_7);",
"break;",
"}",
"} else {",
"VAR_12 = (VAR_10 & 7) | REX_B(VAR_1);",
"VAR_7 = offsetof(CPUX86State,xmm_regs[VAR_12]);",
"}",
"} else {",
"VAR_6 = offsetof(CPUX86State,fpregs[VAR_13].mmx);",
"if (VAR_11 != 3) {",
"gen_lea_modrm(VAR_0, VAR_1, VAR_10);",
"VAR_7 = offsetof(CPUX86State,mmx_t0);",
"gen_ldq_env_A0(VAR_1, VAR_7);",
"} else {",
"VAR_12 = (VAR_10 & 7);",
"VAR_7 = offsetof(CPUX86State,fpregs[VAR_12].mmx);",
"}",
"}",
"switch(VAR_2) {",
"case 0x0f:\nVAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"sse_fn_epp = sse_op_table5[VAR_9];",
"if (!sse_fn_epp) {",
"goto unknown_op;",
"}",
"if (!(VAR_1->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {",
"goto illegal_op;",
"}",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"case 0x70:\ncase 0xc6:\nVAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;",
"sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(VAR_9));",
"break;",
"case 0xc2:\nVAR_9 = cpu_ldub_code(VAR_0, VAR_1->pc++);",
"if (VAR_9 >= 8)\ngoto unknown_op;",
"sse_fn_epp = sse_op_table4[VAR_9][VAR_5];",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"case 0xf7:\nif (VAR_11 != 3)\ngoto illegal_op;",
"tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);",
"gen_extu(VAR_1->aflag, cpu_A0);",
"gen_add_A0_ds_seg(VAR_1);",
"tcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;",
"sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);",
"break;",
"default:\ntcg_gen_addi_ptr(cpu_ptr0, cpu_env, VAR_6);",
"tcg_gen_addi_ptr(cpu_ptr1, cpu_env, VAR_7);",
"sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);",
"break;",
"}",
"if (VAR_2 == 0x2e || VAR_2 == 0x2f) {",
"set_cc_op(VAR_1, CC_OP_EFLAGS);",
"}",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25,
27
],
[
29,
31
],
[
33,
35
],
[
37,
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
55
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81,
83
],
[
85
],
[
87
],
[
89,
91,
93
],
[
95
],
[
97
],
[
99
],
[
101
],
[
105
],
[
107
],
[
111
],
[
113
],
[
115
],
[
117
],
[
121
],
[
123
],
[
125
],
[
131
],
[
133
],
[
135
],
[
139
],
[
141
],
[
143,
145
],
[
147
],
[
149
],
[
151
],
[
153
],
[
155,
157
],
[
159
],
[
161
],
[
163
],
[
165
],
[
167
],
[
169,
171,
173,
175,
177
],
[
179
],
[
181
],
[
183
],
[
185,
187,
189
],
[
191
],
[
193
],
[
195
],
[
197,
199,
201,
203
],
[
205
],
[
207
],
[
209,
211
],
[
213
],
[
215,
217
],
[
219
],
[
221
],
[
223
],
[
225,
227,
229
],
[
231
],
[
233
],
[
235
],
[
237,
239
],
[
241
],
[
243,
245
],
[
247
],
[
249
],
[
251
],
[
253
],
[
255,
257,
259
],
[
261
],
[
263,
265
],
[
267
],
[
269
],
[
271,
273
],
[
275
],
[
277,
279
],
[
281
],
[
283
],
[
285
],
[
287
],
[
289,
291
],
[
293
],
[
295
],
[
297
],
[
299
],
[
301,
303
],
[
305,
307
],
[
309
],
[
311
],
[
313,
315,
317,
319,
321,
323,
325
],
[
327
],
[
329
],
[
331
],
[
333
],
[
335,
337
],
[
339
],
[
341
],
[
343,
345
],
[
347
],
[
349
],
[
351
],
[
353
],
[
355
],
[
357
],
[
359
],
[
361
],
[
363
],
[
365,
367
],
[
369
],
[
371
],
[
373,
375
],
[
377
],
[
379,
381
],
[
383
],
[
385
],
[
387
],
[
389
],
[
391
],
[
393,
395
],
[
397
],
[
399
],
[
401,
403,
405
],
[
407
],
[
409,
411
],
[
413
],
[
417
],
[
419,
421
],
[
423
],
[
425
],
[
427,
429
],
[
431
],
[
433
],
[
435
],
[
437
],
[
439,
441
],
[
443,
445
],
[
447
],
[
449,
451
],
[
453,
455
],
[
457
],
[
459,
461
],
[
463
],
[
465,
467
],
[
469
],
[
471
],
[
473,
475
],
[
477
],
[
479,
481
],
[
483
],
[
485,
487,
489
],
[
491
],
[
493,
495
],
[
497
],
[
501
],
[
503,
505
],
[
507
],
[
509
],
[
511,
513
],
[
515
],
[
517
],
[
519
],
[
521
],
[
523,
525
],
[
527,
529
],
[
531
],
[
533,
535
],
[
537,
539
],
[
541
],
[
543,
545,
547
],
[
549
],
[
553,
555
],
[
557
],
[
559
],
[
561,
563
],
[
565,
567,
569,
571
],
[
573,
575,
577,
579
],
[
581
],
[
583
],
[
585,
587,
589
],
[
591,
593
],
[
595
],
[
597
],
[
599,
601
],
[
603,
605
],
[
607
],
[
609
],
[
611
],
[
613,
615,
617
],
[
619,
621
],
[
623
],
[
625
],
[
627,
629
],
[
631,
633
],
[
635
],
[
637
],
[
639
],
[
641,
643
],
[
645
],
[
647,
649
],
[
651
],
[
653
],
[
655,
657
],
[
659
],
[
661
],
[
663
],
[
665,
667
],
[
669
],
[
671
],
[
673
],
[
675
],
[
677,
679
],
[
681
],
[
683
],
[
685,
687,
689,
691,
693,
695,
697
],
[
699
],
[
701
],
[
703
],
[
705
],
[
707,
709
],
[
711
],
[
713
],
[
715,
717
],
[
719
],
[
721
],
[
723
],
[
725
],
[
727
],
[
729,
731
],
[
733
],
[
735
],
[
737,
739
],
[
741
],
[
743,
745
],
[
747
],
[
749
],
[
751,
753
],
[
755
],
[
757
],
[
759,
761,
763
],
[
765
],
[
767,
769
],
[
771
],
[
773
],
[
775
],
[
777
],
[
779,
781,
783
],
[
785
],
[
787,
789
],
[
791
],
[
793
],
[
795
],
[
797
],
[
799,
801,
803,
805,
807,
809,
811
],
[
813
],
[
815
],
[
817
],
[
819
],
[
821
],
[
823
],
[
825
],
[
827
],
[
829
],
[
831
],
[
833
],
[
835
],
[
837
],
[
839
],
[
841
],
[
843
],
[
845,
847
],
[
849
],
[
851
],
[
853
],
[
855
],
[
857
],
[
859
],
[
861
],
[
863
],
[
865
],
[
867
],
[
869
],
[
871
],
[
873
],
[
875
],
[
877,
879
],
[
881,
883
],
[
885
],
[
887
],
[
889
],
[
891,
893
],
[
895,
897
],
[
899
],
[
901
],
[
903
],
[
905,
907,
909
],
[
911
],
[
913
],
[
915
],
[
917
],
[
919
],
[
921
],
[
923
],
[
925
],
[
927
],
[
929
],
[
931
],
[
933
],
[
935,
937
],
[
939
],
[
941,
943,
945
],
[
947
],
[
949
],
[
951
],
[
953,
955,
957
],
[
959
],
[
961
],
[
963
],
[
965
],
[
967
],
[
969
],
[
971
],
[
973
],
[
975,
977
],
[
979
],
[
981,
983
],
[
985,
987
],
[
989
],
[
991,
993,
995,
997,
999
],
[
1001
],
[
1003
],
[
1005
],
[
1007
],
[
1009
],
[
1011
],
[
1013
],
[
1015
],
[
1017
],
[
1019
],
[
1021
],
[
1023
],
[
1025,
1027
],
[
1029
],
[
1031,
1033
],
[
1035
],
[
1037,
1039
],
[
1041
],
[
1043,
1045
],
[
1047
],
[
1049
],
[
1051
],
[
1053,
1055,
1057,
1059,
1061
],
[
1063
],
[
1065
],
[
1067
],
[
1069
],
[
1071
],
[
1073
],
[
1075
],
[
1077
],
[
1079
],
[
1081
],
[
1083
],
[
1085
],
[
1087
],
[
1089
],
[
1091
],
[
1093,
1095
],
[
1097
],
[
1099
],
[
1101
],
[
1103,
1105,
1107
],
[
1109
],
[
1111,
1113
],
[
1115,
1117
],
[
1119
],
[
1121
],
[
1123,
1125,
1127
],
[
1129
],
[
1131
],
[
1133
],
[
1135
],
[
1137,
1139
],
[
1141
],
[
1143
],
[
1145,
1147
],
[
1149
],
[
1151
],
[
1153,
1155,
1157,
1159
],
[
1161
],
[
1163
],
[
1165
],
[
1167
],
[
1169
],
[
1171,
1173
],
[
1175
],
[
1177
],
[
1179
],
[
1181,
1183
],
[
1185
],
[
1187
],
[
1189
],
[
1191
],
[
1193,
1195
],
[
1197
],
[
1199,
1201
],
[
1203
],
[
1205
],
[
1207,
1209
],
[
1211
],
[
1213
],
[
1215
],
[
1217,
1219
],
[
1221
],
[
1223,
1225
],
[
1227
],
[
1229
],
[
1231,
1233
],
[
1235
],
[
1237,
1239
],
[
1241
],
[
1243,
1245,
1247,
1249
],
[
1251
],
[
1253
],
[
1255
],
[
1257
],
[
1259
],
[
1261
],
[
1263
],
[
1265
],
[
1267
],
[
1269
],
[
1271
],
[
1273
],
[
1277,
1279,
1281
],
[
1283
],
[
1285
],
[
1287
],
[
1289
],
[
1291
],
[
1293
],
[
1295
],
[
1297
],
[
1299
],
[
1301
],
[
1305
],
[
1307
],
[
1309
],
[
1311
],
[
1313,
1315
],
[
1319
],
[
1321
],
[
1323
],
[
1325
],
[
1327
],
[
1329
],
[
1331
],
[
1333
],
[
1335,
1337,
1339,
1341,
1343
],
[
1345
],
[
1347,
1349,
1351,
1353
],
[
1355,
1357
],
[
1359
],
[
1361,
1363,
1365
],
[
1367,
1369
],
[
1371
],
[
1373,
1375
],
[
1377
],
[
1379,
1381
],
[
1383
],
[
1385
],
[
1387
],
[
1389
],
[
1391
],
[
1393
],
[
1395
],
[
1397
],
[
1399
],
[
1401
],
[
1403
],
[
1405
],
[
1407
],
[
1409
],
[
1411
],
[
1415
],
[
1417
],
[
1419
],
[
1423
],
[
1425
],
[
1427
],
[
1429
],
[
1433,
1435,
1437,
1441
],
[
1443
],
[
1445
],
[
1449
],
[
1451,
1453,
1455,
1457
],
[
1459
],
[
1461
],
[
1463
],
[
1465
],
[
1467
],
[
1469
],
[
1471
],
[
1473
],
[
1475
],
[
1479
],
[
1481
],
[
1483,
1485
],
[
1489
],
[
1491
],
[
1493
],
[
1497,
1499,
1507
],
[
1509
],
[
1511
],
[
1515,
1517,
1519
],
[
1521
],
[
1523
],
[
1525
],
[
1527
],
[
1529
],
[
1531
],
[
1533
],
[
1537
],
[
1539
],
[
1541,
1543
],
[
1545
],
[
1547
],
[
1549,
1551
],
[
1553
],
[
1555
],
[
1559,
1561,
1563,
1565
],
[
1567
],
[
1569
],
[
1571
],
[
1573
],
[
1575
],
[
1577
],
[
1579
],
[
1581
],
[
1583
],
[
1587,
1589,
1591,
1593
],
[
1595
],
[
1597
],
[
1599
],
[
1601
],
[
1603
],
[
1607
],
[
1613
],
[
1615
],
[
1619
],
[
1621
],
[
1623,
1625
],
[
1627
],
[
1635
],
[
1637,
1639
],
[
1641
],
[
1643
],
[
1645
],
[
1647
],
[
1649
],
[
1653
],
[
1655
],
[
1657
],
[
1659
],
[
1661
],
[
1665,
1667,
1669,
1671
],
[
1673
],
[
1675
],
[
1677
],
[
1679
],
[
1681
],
[
1683
],
[
1685
],
[
1691,
1693
],
[
1695,
1697
],
[
1699
],
[
1701
],
[
1703
],
[
1705
],
[
1707
],
[
1709
],
[
1711
],
[
1713
],
[
1715
],
[
1719,
1721,
1723,
1725
],
[
1727
],
[
1729
],
[
1731
],
[
1733
],
[
1735
],
[
1737,
1739
],
[
1741
],
[
1743,
1745
],
[
1747
],
[
1749
],
[
1751
],
[
1753,
1755,
1757,
1759
],
[
1761
],
[
1763
],
[
1765
],
[
1767,
1769
],
[
1771
],
[
1775,
1777,
1779,
1781
],
[
1783
],
[
1785
],
[
1787
],
[
1789
],
[
1795
],
[
1797
],
[
1799
],
[
1801
],
[
1803
],
[
1805
],
[
1807
],
[
1811,
1813,
1815,
1817
],
[
1819
],
[
1821
],
[
1823
],
[
1825
],
[
1831
],
[
1833
],
[
1835
],
[
1837
],
[
1839
],
[
1841
],
[
1843
],
[
1847,
1849,
1851
],
[
1853
],
[
1855
],
[
1857
],
[
1859
],
[
1863
],
[
1865
],
[
1871
],
[
1873
],
[
1875
],
[
1877,
1879
],
[
1881
],
[
1883
],
[
1885
],
[
1887
],
[
1889
],
[
1891
],
[
1893,
1895
],
[
1897
],
[
1899
],
[
1901
],
[
1903
],
[
1905
],
[
1907
],
[
1909,
1911
],
[
1913
],
[
1915
],
[
1917,
1919
],
[
1921
],
[
1923
],
[
1927
],
[
1929
],
[
1931
],
[
1933
],
[
1935
],
[
1937,
1939
],
[
1941
],
[
1945
],
[
1947,
1949,
1955
],
[
1957
],
[
1959
],
[
1961
],
[
1963
],
[
1965
],
[
1967
],
[
1969,
1971,
1975
],
[
1977,
1979,
1981
],
[
1983,
1985,
1987
],
[
1989
],
[
1991
],
[
1993
],
[
1995
],
[
1997
],
[
1999
],
[
2003,
2005,
2007,
2009,
2011,
2013
],
[
2015
],
[
2017
],
[
2019
],
[
2021
],
[
2023
],
[
2025
],
[
2027
],
[
2029
],
[
2031
],
[
2033
],
[
2035
],
[
2037
],
[
2039
],
[
2041
],
[
2043
],
[
2045
],
[
2047
],
[
2049
],
[
2051
],
[
2053
],
[
2055
],
[
2057
],
[
2059
],
[
2061
],
[
2065,
2067,
2069,
2071,
2073,
2075,
2077
],
[
2079
],
[
2081
],
[
2083
],
[
2085
],
[
2089
],
[
2091,
2093
],
[
2095
],
[
2097
],
[
2099
],
[
2101
],
[
2103
],
[
2107,
2109
],
[
2111
],
[
2113
],
[
2115
],
[
2117
],
[
2119
],
[
2123,
2125
],
[
2127
],
[
2129
],
[
2131
],
[
2133
],
[
2135
],
[
2139,
2141
],
[
2143
],
[
2145
],
[
2149,
2151
],
[
2153
],
[
2155
],
[
2159,
2161,
2163
],
[
2165
],
[
2167
],
[
2169
],
[
2171
],
[
2173
],
[
2175
],
[
2177
],
[
2181
],
[
2183
],
[
2185
],
[
2187
],
[
2189,
2191
],
[
2195
],
[
2199
],
[
2201
],
[
2203
],
[
2205,
2207
],
[
2209
],
[
2211
],
[
2213
],
[
2215,
2217,
2219
],
[
2221
],
[
2223
],
[
2225
],
[
2227,
2229
],
[
2231
],
[
2233
],
[
2235,
2237,
2239
],
[
2241
],
[
2243
],
[
2245
],
[
2247,
2249
],
[
2251
],
[
2253
],
[
2255,
2257
],
[
2259,
2261,
2263
],
[
2265
],
[
2267
],
[
2269
],
[
2271,
2273
],
[
2275
],
[
2277
],
[
2279,
2281,
2283,
2285
],
[
2287
],
[
2289
],
[
2291
],
[
2293,
2295
],
[
2297
],
[
2299,
2301
],
[
2303,
2305
],
[
2307
],
[
2309,
2311,
2313
],
[
2315
],
[
2317
],
[
2319
],
[
2321,
2323
],
[
2325
],
[
2327
],
[
2329,
2331
],
[
2333
],
[
2335
],
[
2337,
2339
],
[
2341
],
[
2343,
2345
],
[
2347
],
[
2349,
2351
],
[
2353,
2355,
2357
],
[
2359
],
[
2361,
2363
],
[
2365
],
[
2367,
2369,
2371
],
[
2373,
2375,
2377,
2379
],
[
2381,
2383,
2385,
2387
],
[
2389,
2391,
2393,
2395
],
[
2397,
2399,
2401,
2403
],
[
2405
],
[
2407,
2409
],
[
2411
],
[
2413
],
[
2415
],
[
2417,
2419
],
[
2421
],
[
2423,
2425,
2427
],
[
2429
],
[
2431,
2433
],
[
2435
],
[
2437
],
[
2439,
2441
],
[
2443
],
[
2445,
2447,
2449
],
[
2451,
2453
],
[
2455,
2457
],
[
2459
],
[
2461
],
[
2463
],
[
2465
],
[
2469
],
[
2471
],
[
2473
],
[
2475
],
[
2477
],
[
2479
],
[
2481
],
[
2483
],
[
2485
],
[
2487
],
[
2489
],
[
2491
],
[
2493
],
[
2495
],
[
2497
],
[
2499
],
[
2501
],
[
2503
],
[
2505
],
[
2507
],
[
2511
],
[
2513
],
[
2517
],
[
2521
],
[
2523
],
[
2525
],
[
2529
],
[
2531
],
[
2533
],
[
2535
],
[
2539,
2543
],
[
2545
],
[
2547
],
[
2551
],
[
2553,
2555,
2557,
2559
],
[
2561
],
[
2563
],
[
2565
],
[
2567
],
[
2569
],
[
2571
],
[
2573
],
[
2575
],
[
2577
],
[
2579
],
[
2581
],
[
2583
],
[
2585
],
[
2587
],
[
2591,
2593
],
[
2595
],
[
2597
],
[
2601,
2603,
2605
],
[
2607
],
[
2609
],
[
2611
],
[
2615
],
[
2617,
2619,
2621,
2623
],
[
2625
],
[
2627,
2629
],
[
2631
],
[
2633
],
[
2635
],
[
2637
],
[
2639
],
[
2643
],
[
2645
],
[
2649
],
[
2651,
2653,
2655,
2659
],
[
2661
],
[
2663
],
[
2665
],
[
2667
],
[
2669
],
[
2673,
2675,
2677
],
[
2679
],
[
2681
],
[
2683
],
[
2685
],
[
2687
],
[
2689
],
[
2693
],
[
2695,
2699
],
[
2701,
2703
],
[
2705
],
[
2707,
2711
],
[
2713
],
[
2715,
2719
],
[
2721
],
[
2723
],
[
2725
],
[
2727
],
[
2729
],
[
2731
],
[
2733
],
[
2735
],
[
2737
],
[
2739
],
[
2741
],
[
2743
],
[
2745
],
[
2747
],
[
2749
],
[
2751
],
[
2753
],
[
2755
],
[
2757,
2759
],
[
2761
],
[
2763
],
[
2765
],
[
2767
],
[
2769
],
[
2771
],
[
2773
],
[
2775
],
[
2777
],
[
2779
],
[
2781
],
[
2783,
2785,
2787
],
[
2789
],
[
2791
],
[
2795
],
[
2797
],
[
2799
],
[
2801,
2805
],
[
2807,
2809
],
[
2811
],
[
2815
],
[
2817
],
[
2819
],
[
2821
],
[
2823,
2827,
2829
],
[
2831
],
[
2833
],
[
2835
],
[
2839
],
[
2841
],
[
2845
],
[
2847
],
[
2849
],
[
2851,
2853
],
[
2855
],
[
2857
],
[
2859
],
[
2861
],
[
2863
],
[
2865
],
[
2867
],
[
2869
],
[
2871
]
] |
21,258 | void kvmppc_check_papr_resize_hpt(Error **errp)
{
if (!kvm_enabled()) {
return;
}
/* TODO: Check for resize-capable KVM implementations */
error_setg(errp,
"Hash page table resizing not available with this KVM version");
}
| false | qemu | b55d295e3ec98e46f5b39d50e4a3a9725b4289b3 | void kvmppc_check_papr_resize_hpt(Error **errp)
{
if (!kvm_enabled()) {
return;
}
error_setg(errp,
"Hash page table resizing not available with this KVM version");
}
| {
"code": [],
"line_no": []
} | void FUNC_0(Error **VAR_0)
{
if (!kvm_enabled()) {
return;
}
error_setg(VAR_0,
"Hash page table resizing not available with this KVM version");
}
| [
"void FUNC_0(Error **VAR_0)\n{",
"if (!kvm_enabled()) {",
"return;",
"}",
"error_setg(VAR_0,\n\"Hash page table resizing not available with this KVM version\");",
"}"
] | [
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
17,
19
],
[
21
]
] |
21,259 | static void v9fs_walk(void *opaque)
{
int name_idx;
V9fsQID *qids = NULL;
int i, err = 0;
V9fsPath dpath, path;
uint16_t nwnames;
struct stat stbuf;
size_t offset = 7;
int32_t fid, newfid;
V9fsString *wnames = NULL;
V9fsFidState *fidp;
V9fsFidState *newfidp = NULL;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
offset += pdu_unmarshal(pdu, offset, "ddw", &fid,
&newfid, &nwnames);
trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);
if (nwnames && nwnames <= P9_MAXWELEM) {
wnames = g_malloc0(sizeof(wnames[0]) * nwnames);
qids = g_malloc0(sizeof(qids[0]) * nwnames);
for (i = 0; i < nwnames; i++) {
offset += pdu_unmarshal(pdu, offset, "s", &wnames[i]);
}
} else if (nwnames > P9_MAXWELEM) {
err = -EINVAL;
goto out_nofid;
}
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
v9fs_path_init(&dpath);
v9fs_path_init(&path);
/*
* Both dpath and path initially poin to fidp.
* Needed to handle request with nwnames == 0
*/
v9fs_path_copy(&dpath, &fidp->path);
v9fs_path_copy(&path, &fidp->path);
for (name_idx = 0; name_idx < nwnames; name_idx++) {
err = v9fs_co_name_to_path(pdu, &dpath, wnames[name_idx].data, &path);
if (err < 0) {
goto out;
}
err = v9fs_co_lstat(pdu, &path, &stbuf);
if (err < 0) {
goto out;
}
stat_to_qid(&stbuf, &qids[name_idx]);
v9fs_path_copy(&dpath, &path);
}
if (fid == newfid) {
BUG_ON(fidp->fid_type != P9_FID_NONE);
v9fs_path_copy(&fidp->path, &path);
} else {
newfidp = alloc_fid(s, newfid);
if (newfidp == NULL) {
err = -EINVAL;
goto out;
}
newfidp->uid = fidp->uid;
v9fs_path_copy(&newfidp->path, &path);
}
err = v9fs_walk_marshal(pdu, nwnames, qids);
trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids);
out:
put_fid(pdu, fidp);
if (newfidp) {
put_fid(pdu, newfidp);
}
v9fs_path_free(&dpath);
v9fs_path_free(&path);
out_nofid:
complete_pdu(s, pdu, err);
if (nwnames && nwnames <= P9_MAXWELEM) {
for (name_idx = 0; name_idx < nwnames; name_idx++) {
v9fs_string_free(&wnames[name_idx]);
}
g_free(wnames);
g_free(qids);
}
return;
}
| false | qemu | ddca7f86ac022289840e0200fd4050b2b58e9176 | static void v9fs_walk(void *opaque)
{
int name_idx;
V9fsQID *qids = NULL;
int i, err = 0;
V9fsPath dpath, path;
uint16_t nwnames;
struct stat stbuf;
size_t offset = 7;
int32_t fid, newfid;
V9fsString *wnames = NULL;
V9fsFidState *fidp;
V9fsFidState *newfidp = NULL;
V9fsPDU *pdu = opaque;
V9fsState *s = pdu->s;
offset += pdu_unmarshal(pdu, offset, "ddw", &fid,
&newfid, &nwnames);
trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);
if (nwnames && nwnames <= P9_MAXWELEM) {
wnames = g_malloc0(sizeof(wnames[0]) * nwnames);
qids = g_malloc0(sizeof(qids[0]) * nwnames);
for (i = 0; i < nwnames; i++) {
offset += pdu_unmarshal(pdu, offset, "s", &wnames[i]);
}
} else if (nwnames > P9_MAXWELEM) {
err = -EINVAL;
goto out_nofid;
}
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
v9fs_path_init(&dpath);
v9fs_path_init(&path);
v9fs_path_copy(&dpath, &fidp->path);
v9fs_path_copy(&path, &fidp->path);
for (name_idx = 0; name_idx < nwnames; name_idx++) {
err = v9fs_co_name_to_path(pdu, &dpath, wnames[name_idx].data, &path);
if (err < 0) {
goto out;
}
err = v9fs_co_lstat(pdu, &path, &stbuf);
if (err < 0) {
goto out;
}
stat_to_qid(&stbuf, &qids[name_idx]);
v9fs_path_copy(&dpath, &path);
}
if (fid == newfid) {
BUG_ON(fidp->fid_type != P9_FID_NONE);
v9fs_path_copy(&fidp->path, &path);
} else {
newfidp = alloc_fid(s, newfid);
if (newfidp == NULL) {
err = -EINVAL;
goto out;
}
newfidp->uid = fidp->uid;
v9fs_path_copy(&newfidp->path, &path);
}
err = v9fs_walk_marshal(pdu, nwnames, qids);
trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids);
out:
put_fid(pdu, fidp);
if (newfidp) {
put_fid(pdu, newfidp);
}
v9fs_path_free(&dpath);
v9fs_path_free(&path);
out_nofid:
complete_pdu(s, pdu, err);
if (nwnames && nwnames <= P9_MAXWELEM) {
for (name_idx = 0; name_idx < nwnames; name_idx++) {
v9fs_string_free(&wnames[name_idx]);
}
g_free(wnames);
g_free(qids);
}
return;
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0)
{
int VAR_1;
V9fsQID *qids = NULL;
int VAR_2, VAR_3 = 0;
V9fsPath dpath, path;
uint16_t nwnames;
struct stat VAR_4;
size_t offset = 7;
int32_t fid, newfid;
V9fsString *wnames = NULL;
V9fsFidState *fidp;
V9fsFidState *newfidp = NULL;
V9fsPDU *pdu = VAR_0;
V9fsState *s = pdu->s;
offset += pdu_unmarshal(pdu, offset, "ddw", &fid,
&newfid, &nwnames);
trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);
if (nwnames && nwnames <= P9_MAXWELEM) {
wnames = g_malloc0(sizeof(wnames[0]) * nwnames);
qids = g_malloc0(sizeof(qids[0]) * nwnames);
for (VAR_2 = 0; VAR_2 < nwnames; VAR_2++) {
offset += pdu_unmarshal(pdu, offset, "s", &wnames[VAR_2]);
}
} else if (nwnames > P9_MAXWELEM) {
VAR_3 = -EINVAL;
goto out_nofid;
}
fidp = get_fid(pdu, fid);
if (fidp == NULL) {
VAR_3 = -ENOENT;
goto out_nofid;
}
v9fs_path_init(&dpath);
v9fs_path_init(&path);
v9fs_path_copy(&dpath, &fidp->path);
v9fs_path_copy(&path, &fidp->path);
for (VAR_1 = 0; VAR_1 < nwnames; VAR_1++) {
VAR_3 = v9fs_co_name_to_path(pdu, &dpath, wnames[VAR_1].data, &path);
if (VAR_3 < 0) {
goto out;
}
VAR_3 = v9fs_co_lstat(pdu, &path, &VAR_4);
if (VAR_3 < 0) {
goto out;
}
stat_to_qid(&VAR_4, &qids[VAR_1]);
v9fs_path_copy(&dpath, &path);
}
if (fid == newfid) {
BUG_ON(fidp->fid_type != P9_FID_NONE);
v9fs_path_copy(&fidp->path, &path);
} else {
newfidp = alloc_fid(s, newfid);
if (newfidp == NULL) {
VAR_3 = -EINVAL;
goto out;
}
newfidp->uid = fidp->uid;
v9fs_path_copy(&newfidp->path, &path);
}
VAR_3 = v9fs_walk_marshal(pdu, nwnames, qids);
trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids);
out:
put_fid(pdu, fidp);
if (newfidp) {
put_fid(pdu, newfidp);
}
v9fs_path_free(&dpath);
v9fs_path_free(&path);
out_nofid:
complete_pdu(s, pdu, VAR_3);
if (nwnames && nwnames <= P9_MAXWELEM) {
for (VAR_1 = 0; VAR_1 < nwnames; VAR_1++) {
v9fs_string_free(&wnames[VAR_1]);
}
g_free(wnames);
g_free(qids);
}
return;
}
| [
"static void FUNC_0(void *VAR_0)\n{",
"int VAR_1;",
"V9fsQID *qids = NULL;",
"int VAR_2, VAR_3 = 0;",
"V9fsPath dpath, path;",
"uint16_t nwnames;",
"struct stat VAR_4;",
"size_t offset = 7;",
"int32_t fid, newfid;",
"V9fsString *wnames = NULL;",
"V9fsFidState *fidp;",
"V9fsFidState *newfidp = NULL;",
"V9fsPDU *pdu = VAR_0;",
"V9fsState *s = pdu->s;",
"offset += pdu_unmarshal(pdu, offset, \"ddw\", &fid,\n&newfid, &nwnames);",
"trace_v9fs_walk(pdu->tag, pdu->id, fid, newfid, nwnames);",
"if (nwnames && nwnames <= P9_MAXWELEM) {",
"wnames = g_malloc0(sizeof(wnames[0]) * nwnames);",
"qids = g_malloc0(sizeof(qids[0]) * nwnames);",
"for (VAR_2 = 0; VAR_2 < nwnames; VAR_2++) {",
"offset += pdu_unmarshal(pdu, offset, \"s\", &wnames[VAR_2]);",
"}",
"} else if (nwnames > P9_MAXWELEM) {",
"VAR_3 = -EINVAL;",
"goto out_nofid;",
"}",
"fidp = get_fid(pdu, fid);",
"if (fidp == NULL) {",
"VAR_3 = -ENOENT;",
"goto out_nofid;",
"}",
"v9fs_path_init(&dpath);",
"v9fs_path_init(&path);",
"v9fs_path_copy(&dpath, &fidp->path);",
"v9fs_path_copy(&path, &fidp->path);",
"for (VAR_1 = 0; VAR_1 < nwnames; VAR_1++) {",
"VAR_3 = v9fs_co_name_to_path(pdu, &dpath, wnames[VAR_1].data, &path);",
"if (VAR_3 < 0) {",
"goto out;",
"}",
"VAR_3 = v9fs_co_lstat(pdu, &path, &VAR_4);",
"if (VAR_3 < 0) {",
"goto out;",
"}",
"stat_to_qid(&VAR_4, &qids[VAR_1]);",
"v9fs_path_copy(&dpath, &path);",
"}",
"if (fid == newfid) {",
"BUG_ON(fidp->fid_type != P9_FID_NONE);",
"v9fs_path_copy(&fidp->path, &path);",
"} else {",
"newfidp = alloc_fid(s, newfid);",
"if (newfidp == NULL) {",
"VAR_3 = -EINVAL;",
"goto out;",
"}",
"newfidp->uid = fidp->uid;",
"v9fs_path_copy(&newfidp->path, &path);",
"}",
"VAR_3 = v9fs_walk_marshal(pdu, nwnames, qids);",
"trace_v9fs_walk_return(pdu->tag, pdu->id, nwnames, qids);",
"out:\nput_fid(pdu, fidp);",
"if (newfidp) {",
"put_fid(pdu, newfidp);",
"}",
"v9fs_path_free(&dpath);",
"v9fs_path_free(&path);",
"out_nofid:\ncomplete_pdu(s, pdu, VAR_3);",
"if (nwnames && nwnames <= P9_MAXWELEM) {",
"for (VAR_1 = 0; VAR_1 < nwnames; VAR_1++) {",
"v9fs_string_free(&wnames[VAR_1]);",
"}",
"g_free(wnames);",
"g_free(qids);",
"}",
"return;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
33,
35
],
[
39
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
85
],
[
87
],
[
89
],
[
91
],
[
93
],
[
95
],
[
97
],
[
99
],
[
101
],
[
103
],
[
105
],
[
107
],
[
109
],
[
111
],
[
113
],
[
115
],
[
117
],
[
119
],
[
121
],
[
123
],
[
125
],
[
127
],
[
129
],
[
131
],
[
133
],
[
135
],
[
137
],
[
139
],
[
141,
143
],
[
145
],
[
147
],
[
149
],
[
151
],
[
153
],
[
155,
157
],
[
159
],
[
161
],
[
163
],
[
165
],
[
167
],
[
169
],
[
171
],
[
173
],
[
175
]
] |
21,260 | static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t pt_entry,
target_ulong *raddr, int *flags, int rw, bool exc)
{
if (pt_entry & _PAGE_INVALID) {
DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
return -1;
}
if (pt_entry & _PAGE_RO) {
*flags &= ~PAGE_WRITE;
}
*raddr = pt_entry & _ASCE_ORIGIN;
PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
return 0;
}
| false | qemu | b4ecbf8071022a2042624baaff78cab2bf7e94af | static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
uint64_t asc, uint64_t pt_entry,
target_ulong *raddr, int *flags, int rw, bool exc)
{
if (pt_entry & _PAGE_INVALID) {
DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
return -1;
}
if (pt_entry & _PAGE_RO) {
*flags &= ~PAGE_WRITE;
}
*raddr = pt_entry & _ASCE_ORIGIN;
PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(CPUS390XState *VAR_0, target_ulong VAR_1,
uint64_t VAR_2, uint64_t VAR_3,
target_ulong *VAR_4, int *VAR_5, int VAR_6, bool VAR_7)
{
if (VAR_3 & _PAGE_INVALID) {
DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, VAR_3);
trigger_page_fault(VAR_0, VAR_1, PGM_PAGE_TRANS, VAR_2, VAR_6, VAR_7);
return -1;
}
if (VAR_3 & _PAGE_RO) {
*VAR_5 &= ~PAGE_WRITE;
}
*VAR_4 = VAR_3 & _ASCE_ORIGIN;
PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, VAR_3);
return 0;
}
| [
"static int FUNC_0(CPUS390XState *VAR_0, target_ulong VAR_1,\nuint64_t VAR_2, uint64_t VAR_3,\ntarget_ulong *VAR_4, int *VAR_5, int VAR_6, bool VAR_7)\n{",
"if (VAR_3 & _PAGE_INVALID) {",
"DPRINTF(\"%s: PTE=0x%\" PRIx64 \" invalid\\n\", __func__, VAR_3);",
"trigger_page_fault(VAR_0, VAR_1, PGM_PAGE_TRANS, VAR_2, VAR_6, VAR_7);",
"return -1;",
"}",
"if (VAR_3 & _PAGE_RO) {",
"*VAR_5 &= ~PAGE_WRITE;",
"}",
"*VAR_4 = VAR_3 & _ASCE_ORIGIN;",
"PTE_DPRINTF(\"%s: PTE=0x%\" PRIx64 \"\\n\", __func__, VAR_3);",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
29
],
[
33
],
[
37
],
[
39
]
] |
21,262 | static void dbdma_cmdptr_load(DBDMA_channel *ch)
{
DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n",
be32_to_cpu(ch->regs[DBDMA_CMDPTR_LO]));
cpu_physical_memory_read(be32_to_cpu(ch->regs[DBDMA_CMDPTR_LO]),
(uint8_t*)&ch->current, sizeof(dbdma_cmd));
}
| false | qemu | ad674e53b5cce265fadafbde2c6a4f190345cd00 | static void dbdma_cmdptr_load(DBDMA_channel *ch)
{
DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n",
be32_to_cpu(ch->regs[DBDMA_CMDPTR_LO]));
cpu_physical_memory_read(be32_to_cpu(ch->regs[DBDMA_CMDPTR_LO]),
(uint8_t*)&ch->current, sizeof(dbdma_cmd));
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(DBDMA_channel *VAR_0)
{
DBDMA_DPRINTF("FUNC_0 0x%08x\n",
be32_to_cpu(VAR_0->regs[DBDMA_CMDPTR_LO]));
cpu_physical_memory_read(be32_to_cpu(VAR_0->regs[DBDMA_CMDPTR_LO]),
(uint8_t*)&VAR_0->current, sizeof(dbdma_cmd));
}
| [
"static void FUNC_0(DBDMA_channel *VAR_0)\n{",
"DBDMA_DPRINTF(\"FUNC_0 0x%08x\\n\",\nbe32_to_cpu(VAR_0->regs[DBDMA_CMDPTR_LO]));",
"cpu_physical_memory_read(be32_to_cpu(VAR_0->regs[DBDMA_CMDPTR_LO]),\n(uint8_t*)&VAR_0->current, sizeof(dbdma_cmd));",
"}"
] | [
0,
0,
0,
0
] | [
[
1,
3
],
[
5,
7
],
[
9,
11
],
[
13
]
] |
21,263 | static int img_check(int argc, char **argv)
{
int c, ret;
const char *filename, *fmt;
BlockDriverState *bs;
BdrvCheckResult result;
int fix = 0;
int flags = BDRV_O_FLAGS;
fmt = NULL;
for(;;) {
c = getopt(argc, argv, "f:hr:");
if (c == -1) {
break;
}
switch(c) {
case '?':
case 'h':
help();
break;
case 'f':
fmt = optarg;
break;
case 'r':
flags |= BDRV_O_RDWR;
if (!strcmp(optarg, "leaks")) {
fix = BDRV_FIX_LEAKS;
} else if (!strcmp(optarg, "all")) {
fix = BDRV_FIX_LEAKS | BDRV_FIX_ERRORS;
} else {
help();
}
break;
}
}
if (optind >= argc) {
help();
}
filename = argv[optind++];
bs = bdrv_new_open(filename, fmt, flags);
if (!bs) {
return 1;
}
ret = bdrv_check(bs, &result, fix);
if (ret == -ENOTSUP) {
error_report("This image format does not support checks");
bdrv_delete(bs);
return 1;
}
if (result.corruptions_fixed || result.leaks_fixed) {
printf("The following inconsistencies were found and repaired:\n\n"
" %d leaked clusters\n"
" %d corruptions\n\n"
"Double checking the fixed image now...\n",
result.leaks_fixed,
result.corruptions_fixed);
ret = bdrv_check(bs, &result, 0);
}
if (!(result.corruptions || result.leaks || result.check_errors)) {
printf("No errors were found on the image.\n");
} else {
if (result.corruptions) {
printf("\n%d errors were found on the image.\n"
"Data may be corrupted, or further writes to the image "
"may corrupt it.\n",
result.corruptions);
}
if (result.leaks) {
printf("\n%d leaked clusters were found on the image.\n"
"This means waste of disk space, but no harm to data.\n",
result.leaks);
}
if (result.check_errors) {
printf("\n%d internal errors have occurred during the check.\n",
result.check_errors);
}
}
if (result.bfi.total_clusters != 0 && result.bfi.allocated_clusters != 0) {
printf("%" PRId64 "/%" PRId64 "= %0.2f%% allocated, %0.2f%% fragmented\n",
result.bfi.allocated_clusters, result.bfi.total_clusters,
result.bfi.allocated_clusters * 100.0 / result.bfi.total_clusters,
result.bfi.fragmented_clusters * 100.0 / result.bfi.allocated_clusters);
}
bdrv_delete(bs);
if (ret < 0 || result.check_errors) {
printf("\nAn error has occurred during the check: %s\n"
"The check is not complete and may have missed error.\n",
strerror(-ret));
return 1;
}
if (result.corruptions) {
return 2;
} else if (result.leaks) {
return 3;
} else {
return 0;
}
}
| false | qemu | 058f8f16db0c1c528b665a6283457f019c8b0926 | static int img_check(int argc, char **argv)
{
int c, ret;
const char *filename, *fmt;
BlockDriverState *bs;
BdrvCheckResult result;
int fix = 0;
int flags = BDRV_O_FLAGS;
fmt = NULL;
for(;;) {
c = getopt(argc, argv, "f:hr:");
if (c == -1) {
break;
}
switch(c) {
case '?':
case 'h':
help();
break;
case 'f':
fmt = optarg;
break;
case 'r':
flags |= BDRV_O_RDWR;
if (!strcmp(optarg, "leaks")) {
fix = BDRV_FIX_LEAKS;
} else if (!strcmp(optarg, "all")) {
fix = BDRV_FIX_LEAKS | BDRV_FIX_ERRORS;
} else {
help();
}
break;
}
}
if (optind >= argc) {
help();
}
filename = argv[optind++];
bs = bdrv_new_open(filename, fmt, flags);
if (!bs) {
return 1;
}
ret = bdrv_check(bs, &result, fix);
if (ret == -ENOTSUP) {
error_report("This image format does not support checks");
bdrv_delete(bs);
return 1;
}
if (result.corruptions_fixed || result.leaks_fixed) {
printf("The following inconsistencies were found and repaired:\n\n"
" %d leaked clusters\n"
" %d corruptions\n\n"
"Double checking the fixed image now...\n",
result.leaks_fixed,
result.corruptions_fixed);
ret = bdrv_check(bs, &result, 0);
}
if (!(result.corruptions || result.leaks || result.check_errors)) {
printf("No errors were found on the image.\n");
} else {
if (result.corruptions) {
printf("\n%d errors were found on the image.\n"
"Data may be corrupted, or further writes to the image "
"may corrupt it.\n",
result.corruptions);
}
if (result.leaks) {
printf("\n%d leaked clusters were found on the image.\n"
"This means waste of disk space, but no harm to data.\n",
result.leaks);
}
if (result.check_errors) {
printf("\n%d internal errors have occurred during the check.\n",
result.check_errors);
}
}
if (result.bfi.total_clusters != 0 && result.bfi.allocated_clusters != 0) {
printf("%" PRId64 "/%" PRId64 "= %0.2f%% allocated, %0.2f%% fragmented\n",
result.bfi.allocated_clusters, result.bfi.total_clusters,
result.bfi.allocated_clusters * 100.0 / result.bfi.total_clusters,
result.bfi.fragmented_clusters * 100.0 / result.bfi.allocated_clusters);
}
bdrv_delete(bs);
if (ret < 0 || result.check_errors) {
printf("\nAn error has occurred during the check: %s\n"
"The check is not complete and may have missed error.\n",
strerror(-ret));
return 1;
}
if (result.corruptions) {
return 2;
} else if (result.leaks) {
return 3;
} else {
return 0;
}
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(int VAR_0, char **VAR_1)
{
int VAR_2, VAR_3;
const char *VAR_4, *VAR_5;
BlockDriverState *bs;
BdrvCheckResult result;
int VAR_6 = 0;
int VAR_7 = BDRV_O_FLAGS;
VAR_5 = NULL;
for(;;) {
VAR_2 = getopt(VAR_0, VAR_1, "f:hr:");
if (VAR_2 == -1) {
break;
}
switch(VAR_2) {
case '?':
case 'h':
help();
break;
case 'f':
VAR_5 = optarg;
break;
case 'r':
VAR_7 |= BDRV_O_RDWR;
if (!strcmp(optarg, "leaks")) {
VAR_6 = BDRV_FIX_LEAKS;
} else if (!strcmp(optarg, "all")) {
VAR_6 = BDRV_FIX_LEAKS | BDRV_FIX_ERRORS;
} else {
help();
}
break;
}
}
if (optind >= VAR_0) {
help();
}
VAR_4 = VAR_1[optind++];
bs = bdrv_new_open(VAR_4, VAR_5, VAR_7);
if (!bs) {
return 1;
}
VAR_3 = bdrv_check(bs, &result, VAR_6);
if (VAR_3 == -ENOTSUP) {
error_report("This image format does not support checks");
bdrv_delete(bs);
return 1;
}
if (result.corruptions_fixed || result.leaks_fixed) {
printf("The following inconsistencies were found and repaired:\n\n"
" %d leaked clusters\n"
" %d corruptions\n\n"
"Double checking the fixed image now...\n",
result.leaks_fixed,
result.corruptions_fixed);
VAR_3 = bdrv_check(bs, &result, 0);
}
if (!(result.corruptions || result.leaks || result.check_errors)) {
printf("No errors were found on the image.\n");
} else {
if (result.corruptions) {
printf("\n%d errors were found on the image.\n"
"Data may be corrupted, or further writes to the image "
"may corrupt it.\n",
result.corruptions);
}
if (result.leaks) {
printf("\n%d leaked clusters were found on the image.\n"
"This means waste of disk space, but no harm to data.\n",
result.leaks);
}
if (result.check_errors) {
printf("\n%d internal errors have occurred during the check.\n",
result.check_errors);
}
}
if (result.bfi.total_clusters != 0 && result.bfi.allocated_clusters != 0) {
printf("%" PRId64 "/%" PRId64 "= %0.2f%% allocated, %0.2f%% fragmented\n",
result.bfi.allocated_clusters, result.bfi.total_clusters,
result.bfi.allocated_clusters * 100.0 / result.bfi.total_clusters,
result.bfi.fragmented_clusters * 100.0 / result.bfi.allocated_clusters);
}
bdrv_delete(bs);
if (VAR_3 < 0 || result.check_errors) {
printf("\nAn error has occurred during the check: %s\n"
"The check is not complete and may have missed error.\n",
strerror(-VAR_3));
return 1;
}
if (result.corruptions) {
return 2;
} else if (result.leaks) {
return 3;
} else {
return 0;
}
}
| [
"static int FUNC_0(int VAR_0, char **VAR_1)\n{",
"int VAR_2, VAR_3;",
"const char *VAR_4, *VAR_5;",
"BlockDriverState *bs;",
"BdrvCheckResult result;",
"int VAR_6 = 0;",
"int VAR_7 = BDRV_O_FLAGS;",
"VAR_5 = NULL;",
"for(;;) {",
"VAR_2 = getopt(VAR_0, VAR_1, \"f:hr:\");",
"if (VAR_2 == -1) {",
"break;",
"}",
"switch(VAR_2) {",
"case '?':\ncase 'h':\nhelp();",
"break;",
"case 'f':\nVAR_5 = optarg;",
"break;",
"case 'r':\nVAR_7 |= BDRV_O_RDWR;",
"if (!strcmp(optarg, \"leaks\")) {",
"VAR_6 = BDRV_FIX_LEAKS;",
"} else if (!strcmp(optarg, \"all\")) {",
"VAR_6 = BDRV_FIX_LEAKS | BDRV_FIX_ERRORS;",
"} else {",
"help();",
"}",
"break;",
"}",
"}",
"if (optind >= VAR_0) {",
"help();",
"}",
"VAR_4 = VAR_1[optind++];",
"bs = bdrv_new_open(VAR_4, VAR_5, VAR_7);",
"if (!bs) {",
"return 1;",
"}",
"VAR_3 = bdrv_check(bs, &result, VAR_6);",
"if (VAR_3 == -ENOTSUP) {",
"error_report(\"This image format does not support checks\");",
"bdrv_delete(bs);",
"return 1;",
"}",
"if (result.corruptions_fixed || result.leaks_fixed) {",
"printf(\"The following inconsistencies were found and repaired:\\n\\n\"\n\" %d leaked clusters\\n\"\n\" %d corruptions\\n\\n\"\n\"Double checking the fixed image now...\\n\",\nresult.leaks_fixed,\nresult.corruptions_fixed);",
"VAR_3 = bdrv_check(bs, &result, 0);",
"}",
"if (!(result.corruptions || result.leaks || result.check_errors)) {",
"printf(\"No errors were found on the image.\\n\");",
"} else {",
"if (result.corruptions) {",
"printf(\"\\n%d errors were found on the image.\\n\"\n\"Data may be corrupted, or further writes to the image \"\n\"may corrupt it.\\n\",\nresult.corruptions);",
"}",
"if (result.leaks) {",
"printf(\"\\n%d leaked clusters were found on the image.\\n\"\n\"This means waste of disk space, but no harm to data.\\n\",\nresult.leaks);",
"}",
"if (result.check_errors) {",
"printf(\"\\n%d internal errors have occurred during the check.\\n\",\nresult.check_errors);",
"}",
"}",
"if (result.bfi.total_clusters != 0 && result.bfi.allocated_clusters != 0) {",
"printf(\"%\" PRId64 \"/%\" PRId64 \"= %0.2f%% allocated, %0.2f%% fragmented\\n\",\nresult.bfi.allocated_clusters, result.bfi.total_clusters,\nresult.bfi.allocated_clusters * 100.0 / result.bfi.total_clusters,\nresult.bfi.fragmented_clusters * 100.0 / result.bfi.allocated_clusters);",
"}",
"bdrv_delete(bs);",
"if (VAR_3 < 0 || result.check_errors) {",
"printf(\"\\nAn error has occurred during the check: %s\\n\"\n\"The check is not complete and may have missed error.\\n\",\nstrerror(-VAR_3));",
"return 1;",
"}",
"if (result.corruptions) {",
"return 2;",
"} else if (result.leaks) {",
"return 3;",
"} else {",
"return 0;",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33,
35,
37
],
[
39
],
[
41,
43
],
[
45
],
[
47,
49
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
83
],
[
85
],
[
87
],
[
89
],
[
91
],
[
95
],
[
97
],
[
99
],
[
101
],
[
103
],
[
107
],
[
109,
111,
113,
115,
117,
119
],
[
121
],
[
123
],
[
127
],
[
129
],
[
131
],
[
133
],
[
135,
137,
139,
141
],
[
143
],
[
147
],
[
149,
151,
153
],
[
155
],
[
159
],
[
161,
163
],
[
165
],
[
167
],
[
171
],
[
173,
175,
177,
179
],
[
181
],
[
185
],
[
189
],
[
191,
193,
195
],
[
197
],
[
199
],
[
203
],
[
205
],
[
207
],
[
209
],
[
211
],
[
213
],
[
215
],
[
217
]
] |
21,264 | static void decode_profile_tier_level(GetBitContext *gb, AVCodecContext *avctx,
PTLCommon *ptl)
{
int i;
ptl->profile_space = get_bits(gb, 2);
ptl->tier_flag = get_bits1(gb);
ptl->profile_idc = get_bits(gb, 5);
if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN)
av_log(avctx, AV_LOG_DEBUG, "Main profile bitstream\n");
else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_10)
av_log(avctx, AV_LOG_DEBUG, "Main 10 profile bitstream\n");
else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_STILL_PICTURE)
av_log(avctx, AV_LOG_DEBUG, "Main Still Picture profile bitstream\n");
else
av_log(avctx, AV_LOG_WARNING, "Unknown HEVC profile: %d\n", ptl->profile_idc);
for (i = 0; i < 32; i++)
ptl->profile_compatibility_flag[i] = get_bits1(gb);
ptl->progressive_source_flag = get_bits1(gb);
ptl->interlaced_source_flag = get_bits1(gb);
ptl->non_packed_constraint_flag = get_bits1(gb);
ptl->frame_only_constraint_flag = get_bits1(gb);
skip_bits(gb, 16); // XXX_reserved_zero_44bits[0..15]
skip_bits(gb, 16); // XXX_reserved_zero_44bits[16..31]
skip_bits(gb, 12); // XXX_reserved_zero_44bits[32..43]
}
| false | FFmpeg | 1ecb63cd1c1a4ddc5efed4abbc3158b969d8c5e4 | static void decode_profile_tier_level(GetBitContext *gb, AVCodecContext *avctx,
PTLCommon *ptl)
{
int i;
ptl->profile_space = get_bits(gb, 2);
ptl->tier_flag = get_bits1(gb);
ptl->profile_idc = get_bits(gb, 5);
if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN)
av_log(avctx, AV_LOG_DEBUG, "Main profile bitstream\n");
else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_10)
av_log(avctx, AV_LOG_DEBUG, "Main 10 profile bitstream\n");
else if (ptl->profile_idc == FF_PROFILE_HEVC_MAIN_STILL_PICTURE)
av_log(avctx, AV_LOG_DEBUG, "Main Still Picture profile bitstream\n");
else
av_log(avctx, AV_LOG_WARNING, "Unknown HEVC profile: %d\n", ptl->profile_idc);
for (i = 0; i < 32; i++)
ptl->profile_compatibility_flag[i] = get_bits1(gb);
ptl->progressive_source_flag = get_bits1(gb);
ptl->interlaced_source_flag = get_bits1(gb);
ptl->non_packed_constraint_flag = get_bits1(gb);
ptl->frame_only_constraint_flag = get_bits1(gb);
skip_bits(gb, 16);
skip_bits(gb, 16);
skip_bits(gb, 12);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(GetBitContext *VAR_0, AVCodecContext *VAR_1,
PTLCommon *VAR_2)
{
int VAR_3;
VAR_2->profile_space = get_bits(VAR_0, 2);
VAR_2->tier_flag = get_bits1(VAR_0);
VAR_2->profile_idc = get_bits(VAR_0, 5);
if (VAR_2->profile_idc == FF_PROFILE_HEVC_MAIN)
av_log(VAR_1, AV_LOG_DEBUG, "Main profile bitstream\n");
else if (VAR_2->profile_idc == FF_PROFILE_HEVC_MAIN_10)
av_log(VAR_1, AV_LOG_DEBUG, "Main 10 profile bitstream\n");
else if (VAR_2->profile_idc == FF_PROFILE_HEVC_MAIN_STILL_PICTURE)
av_log(VAR_1, AV_LOG_DEBUG, "Main Still Picture profile bitstream\n");
else
av_log(VAR_1, AV_LOG_WARNING, "Unknown HEVC profile: %d\n", VAR_2->profile_idc);
for (VAR_3 = 0; VAR_3 < 32; VAR_3++)
VAR_2->profile_compatibility_flag[VAR_3] = get_bits1(VAR_0);
VAR_2->progressive_source_flag = get_bits1(VAR_0);
VAR_2->interlaced_source_flag = get_bits1(VAR_0);
VAR_2->non_packed_constraint_flag = get_bits1(VAR_0);
VAR_2->frame_only_constraint_flag = get_bits1(VAR_0);
skip_bits(VAR_0, 16);
skip_bits(VAR_0, 16);
skip_bits(VAR_0, 12);
}
| [
"static void FUNC_0(GetBitContext *VAR_0, AVCodecContext *VAR_1,\nPTLCommon *VAR_2)\n{",
"int VAR_3;",
"VAR_2->profile_space = get_bits(VAR_0, 2);",
"VAR_2->tier_flag = get_bits1(VAR_0);",
"VAR_2->profile_idc = get_bits(VAR_0, 5);",
"if (VAR_2->profile_idc == FF_PROFILE_HEVC_MAIN)\nav_log(VAR_1, AV_LOG_DEBUG, \"Main profile bitstream\\n\");",
"else if (VAR_2->profile_idc == FF_PROFILE_HEVC_MAIN_10)\nav_log(VAR_1, AV_LOG_DEBUG, \"Main 10 profile bitstream\\n\");",
"else if (VAR_2->profile_idc == FF_PROFILE_HEVC_MAIN_STILL_PICTURE)\nav_log(VAR_1, AV_LOG_DEBUG, \"Main Still Picture profile bitstream\\n\");",
"else\nav_log(VAR_1, AV_LOG_WARNING, \"Unknown HEVC profile: %d\\n\", VAR_2->profile_idc);",
"for (VAR_3 = 0; VAR_3 < 32; VAR_3++)",
"VAR_2->profile_compatibility_flag[VAR_3] = get_bits1(VAR_0);",
"VAR_2->progressive_source_flag = get_bits1(VAR_0);",
"VAR_2->interlaced_source_flag = get_bits1(VAR_0);",
"VAR_2->non_packed_constraint_flag = get_bits1(VAR_0);",
"VAR_2->frame_only_constraint_flag = get_bits1(VAR_0);",
"skip_bits(VAR_0, 16);",
"skip_bits(VAR_0, 16);",
"skip_bits(VAR_0, 12);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
11
],
[
13
],
[
15
],
[
17,
19
],
[
21,
23
],
[
25,
27
],
[
29,
31
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
49
],
[
51
],
[
53
],
[
55
]
] |
21,265 | xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
XenHostPCIDevice *dev)
{
uint16_t gpu_dev_id;
PCIDevice *d = &s->dev;
gpu_dev_id = dev->device_id;
igd_passthrough_isa_bridge_create(d->bus, gpu_dev_id);
}
| false | qemu | fd56e0612b6454a282fa6a953fdb09281a98c589 | xen_igd_passthrough_isa_bridge_create(XenPCIPassthroughState *s,
XenHostPCIDevice *dev)
{
uint16_t gpu_dev_id;
PCIDevice *d = &s->dev;
gpu_dev_id = dev->device_id;
igd_passthrough_isa_bridge_create(d->bus, gpu_dev_id);
}
| {
"code": [],
"line_no": []
} | FUNC_0(XenPCIPassthroughState *VAR_0,
XenHostPCIDevice *VAR_1)
{
uint16_t gpu_dev_id;
PCIDevice *d = &VAR_0->VAR_1;
gpu_dev_id = VAR_1->device_id;
igd_passthrough_isa_bridge_create(d->bus, gpu_dev_id);
}
| [
"FUNC_0(XenPCIPassthroughState *VAR_0,\nXenHostPCIDevice *VAR_1)\n{",
"uint16_t gpu_dev_id;",
"PCIDevice *d = &VAR_0->VAR_1;",
"gpu_dev_id = VAR_1->device_id;",
"igd_passthrough_isa_bridge_create(d->bus, gpu_dev_id);",
"}"
] | [
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
]
] |
21,266 | qemu_irq spapr_allocate_irq(uint32_t hint, uint32_t *irq_num,
enum xics_irq_type type)
{
uint32_t irq;
qemu_irq qirq;
if (hint) {
irq = hint;
/* FIXME: we should probably check for collisions somehow */
} else {
irq = spapr->next_irq++;
}
qirq = xics_assign_irq(spapr->icp, irq, type);
if (!qirq) {
return NULL;
}
if (irq_num) {
*irq_num = irq;
}
return qirq;
}
| false | qemu | a307d59434ba78b97544b42b8cfd24a1b62e39a6 | qemu_irq spapr_allocate_irq(uint32_t hint, uint32_t *irq_num,
enum xics_irq_type type)
{
uint32_t irq;
qemu_irq qirq;
if (hint) {
irq = hint;
} else {
irq = spapr->next_irq++;
}
qirq = xics_assign_irq(spapr->icp, irq, type);
if (!qirq) {
return NULL;
}
if (irq_num) {
*irq_num = irq;
}
return qirq;
}
| {
"code": [],
"line_no": []
} | qemu_irq FUNC_0(uint32_t hint, uint32_t *irq_num,
enum xics_irq_type type)
{
uint32_t irq;
qemu_irq qirq;
if (hint) {
irq = hint;
} else {
irq = spapr->next_irq++;
}
qirq = xics_assign_irq(spapr->icp, irq, type);
if (!qirq) {
return NULL;
}
if (irq_num) {
*irq_num = irq;
}
return qirq;
}
| [
"qemu_irq FUNC_0(uint32_t hint, uint32_t *irq_num,\nenum xics_irq_type type)\n{",
"uint32_t irq;",
"qemu_irq qirq;",
"if (hint) {",
"irq = hint;",
"} else {",
"irq = spapr->next_irq++;",
"}",
"qirq = xics_assign_irq(spapr->icp, irq, type);",
"if (!qirq) {",
"return NULL;",
"}",
"if (irq_num) {",
"*irq_num = irq;",
"}",
"return qirq;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
],
[
27
],
[
29
],
[
31
],
[
33
],
[
37
],
[
39
],
[
41
],
[
45
],
[
47
]
] |
21,268 | static void omap_mcbsp_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
switch (size) {
case 2: return omap_mcbsp_writeh(opaque, addr, value);
case 4: return omap_mcbsp_writew(opaque, addr, value);
default: return omap_badwidth_write16(opaque, addr, value);
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | static void omap_mcbsp_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
switch (size) {
case 2: return omap_mcbsp_writeh(opaque, addr, value);
case 4: return omap_mcbsp_writew(opaque, addr, value);
default: return omap_badwidth_write16(opaque, addr, value);
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0, target_phys_addr_t VAR_1,
uint64_t VAR_2, unsigned VAR_3)
{
switch (VAR_3) {
case 2: return omap_mcbsp_writeh(VAR_0, VAR_1, VAR_2);
case 4: return omap_mcbsp_writew(VAR_0, VAR_1, VAR_2);
default: return omap_badwidth_write16(VAR_0, VAR_1, VAR_2);
}
}
| [
"static void FUNC_0(void *VAR_0, target_phys_addr_t VAR_1,\nuint64_t VAR_2, unsigned VAR_3)\n{",
"switch (VAR_3) {",
"case 2: return omap_mcbsp_writeh(VAR_0, VAR_1, VAR_2);",
"case 4: return omap_mcbsp_writew(VAR_0, VAR_1, VAR_2);",
"default: return omap_badwidth_write16(VAR_0, VAR_1, VAR_2);",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
]
] |
21,269 | vnc_display_setup_auth(VncDisplay *vs,
bool password,
bool sasl,
bool tls,
bool x509)
{
/*
* We have a choice of 3 authentication options
*
* 1. none
* 2. vnc
* 3. sasl
*
* The channel can be run in 2 modes
*
* 1. clear
* 2. tls
*
* And TLS can use 2 types of credentials
*
* 1. anon
* 2. x509
*
* We thus have 9 possible logical combinations
*
* 1. clear + none
* 2. clear + vnc
* 3. clear + sasl
* 4. tls + anon + none
* 5. tls + anon + vnc
* 6. tls + anon + sasl
* 7. tls + x509 + none
* 8. tls + x509 + vnc
* 9. tls + x509 + sasl
*
* These need to be mapped into the VNC auth schemes
* in an appropriate manner. In regular VNC, all the
* TLS options get mapped into VNC_AUTH_VENCRYPT
* sub-auth types.
*/
if (password) {
if (tls) {
vs->auth = VNC_AUTH_VENCRYPT;
if (x509) {
VNC_DEBUG("Initializing VNC server with x509 password auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_X509VNC;
} else {
VNC_DEBUG("Initializing VNC server with TLS password auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_TLSVNC;
}
} else {
VNC_DEBUG("Initializing VNC server with password auth\n");
vs->auth = VNC_AUTH_VNC;
vs->subauth = VNC_AUTH_INVALID;
}
} else if (sasl) {
if (tls) {
vs->auth = VNC_AUTH_VENCRYPT;
if (x509) {
VNC_DEBUG("Initializing VNC server with x509 SASL auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_X509SASL;
} else {
VNC_DEBUG("Initializing VNC server with TLS SASL auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_TLSSASL;
}
} else {
VNC_DEBUG("Initializing VNC server with SASL auth\n");
vs->auth = VNC_AUTH_SASL;
vs->subauth = VNC_AUTH_INVALID;
}
} else {
if (tls) {
vs->auth = VNC_AUTH_VENCRYPT;
if (x509) {
VNC_DEBUG("Initializing VNC server with x509 no auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_X509NONE;
} else {
VNC_DEBUG("Initializing VNC server with TLS no auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_TLSNONE;
}
} else {
VNC_DEBUG("Initializing VNC server with no auth\n");
vs->auth = VNC_AUTH_NONE;
vs->subauth = VNC_AUTH_INVALID;
}
}
}
| false | qemu | f9148c8ae7b1515776699387b4d59864f302c77d | vnc_display_setup_auth(VncDisplay *vs,
bool password,
bool sasl,
bool tls,
bool x509)
{
if (password) {
if (tls) {
vs->auth = VNC_AUTH_VENCRYPT;
if (x509) {
VNC_DEBUG("Initializing VNC server with x509 password auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_X509VNC;
} else {
VNC_DEBUG("Initializing VNC server with TLS password auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_TLSVNC;
}
} else {
VNC_DEBUG("Initializing VNC server with password auth\n");
vs->auth = VNC_AUTH_VNC;
vs->subauth = VNC_AUTH_INVALID;
}
} else if (sasl) {
if (tls) {
vs->auth = VNC_AUTH_VENCRYPT;
if (x509) {
VNC_DEBUG("Initializing VNC server with x509 SASL auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_X509SASL;
} else {
VNC_DEBUG("Initializing VNC server with TLS SASL auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_TLSSASL;
}
} else {
VNC_DEBUG("Initializing VNC server with SASL auth\n");
vs->auth = VNC_AUTH_SASL;
vs->subauth = VNC_AUTH_INVALID;
}
} else {
if (tls) {
vs->auth = VNC_AUTH_VENCRYPT;
if (x509) {
VNC_DEBUG("Initializing VNC server with x509 no auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_X509NONE;
} else {
VNC_DEBUG("Initializing VNC server with TLS no auth\n");
vs->subauth = VNC_AUTH_VENCRYPT_TLSNONE;
}
} else {
VNC_DEBUG("Initializing VNC server with no auth\n");
vs->auth = VNC_AUTH_NONE;
vs->subauth = VNC_AUTH_INVALID;
}
}
}
| {
"code": [],
"line_no": []
} | FUNC_0(VncDisplay *VAR_0,
bool VAR_1,
bool VAR_2,
bool VAR_3,
bool VAR_4)
{
if (VAR_1) {
if (VAR_3) {
VAR_0->auth = VNC_AUTH_VENCRYPT;
if (VAR_4) {
VNC_DEBUG("Initializing VNC server with VAR_4 VAR_1 auth\n");
VAR_0->subauth = VNC_AUTH_VENCRYPT_X509VNC;
} else {
VNC_DEBUG("Initializing VNC server with TLS VAR_1 auth\n");
VAR_0->subauth = VNC_AUTH_VENCRYPT_TLSVNC;
}
} else {
VNC_DEBUG("Initializing VNC server with VAR_1 auth\n");
VAR_0->auth = VNC_AUTH_VNC;
VAR_0->subauth = VNC_AUTH_INVALID;
}
} else if (VAR_2) {
if (VAR_3) {
VAR_0->auth = VNC_AUTH_VENCRYPT;
if (VAR_4) {
VNC_DEBUG("Initializing VNC server with VAR_4 SASL auth\n");
VAR_0->subauth = VNC_AUTH_VENCRYPT_X509SASL;
} else {
VNC_DEBUG("Initializing VNC server with TLS SASL auth\n");
VAR_0->subauth = VNC_AUTH_VENCRYPT_TLSSASL;
}
} else {
VNC_DEBUG("Initializing VNC server with SASL auth\n");
VAR_0->auth = VNC_AUTH_SASL;
VAR_0->subauth = VNC_AUTH_INVALID;
}
} else {
if (VAR_3) {
VAR_0->auth = VNC_AUTH_VENCRYPT;
if (VAR_4) {
VNC_DEBUG("Initializing VNC server with VAR_4 no auth\n");
VAR_0->subauth = VNC_AUTH_VENCRYPT_X509NONE;
} else {
VNC_DEBUG("Initializing VNC server with TLS no auth\n");
VAR_0->subauth = VNC_AUTH_VENCRYPT_TLSNONE;
}
} else {
VNC_DEBUG("Initializing VNC server with no auth\n");
VAR_0->auth = VNC_AUTH_NONE;
VAR_0->subauth = VNC_AUTH_INVALID;
}
}
}
| [
"FUNC_0(VncDisplay *VAR_0,\nbool VAR_1,\nbool VAR_2,\nbool VAR_3,\nbool VAR_4)\n{",
"if (VAR_1) {",
"if (VAR_3) {",
"VAR_0->auth = VNC_AUTH_VENCRYPT;",
"if (VAR_4) {",
"VNC_DEBUG(\"Initializing VNC server with VAR_4 VAR_1 auth\\n\");",
"VAR_0->subauth = VNC_AUTH_VENCRYPT_X509VNC;",
"} else {",
"VNC_DEBUG(\"Initializing VNC server with TLS VAR_1 auth\\n\");",
"VAR_0->subauth = VNC_AUTH_VENCRYPT_TLSVNC;",
"}",
"} else {",
"VNC_DEBUG(\"Initializing VNC server with VAR_1 auth\\n\");",
"VAR_0->auth = VNC_AUTH_VNC;",
"VAR_0->subauth = VNC_AUTH_INVALID;",
"}",
"} else if (VAR_2) {",
"if (VAR_3) {",
"VAR_0->auth = VNC_AUTH_VENCRYPT;",
"if (VAR_4) {",
"VNC_DEBUG(\"Initializing VNC server with VAR_4 SASL auth\\n\");",
"VAR_0->subauth = VNC_AUTH_VENCRYPT_X509SASL;",
"} else {",
"VNC_DEBUG(\"Initializing VNC server with TLS SASL auth\\n\");",
"VAR_0->subauth = VNC_AUTH_VENCRYPT_TLSSASL;",
"}",
"} else {",
"VNC_DEBUG(\"Initializing VNC server with SASL auth\\n\");",
"VAR_0->auth = VNC_AUTH_SASL;",
"VAR_0->subauth = VNC_AUTH_INVALID;",
"}",
"} else {",
"if (VAR_3) {",
"VAR_0->auth = VNC_AUTH_VENCRYPT;",
"if (VAR_4) {",
"VNC_DEBUG(\"Initializing VNC server with VAR_4 no auth\\n\");",
"VAR_0->subauth = VNC_AUTH_VENCRYPT_X509NONE;",
"} else {",
"VNC_DEBUG(\"Initializing VNC server with TLS no auth\\n\");",
"VAR_0->subauth = VNC_AUTH_VENCRYPT_TLSNONE;",
"}",
"} else {",
"VNC_DEBUG(\"Initializing VNC server with no auth\\n\");",
"VAR_0->auth = VNC_AUTH_NONE;",
"VAR_0->subauth = VNC_AUTH_INVALID;",
"}",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7,
9,
11
],
[
81
],
[
83
],
[
85
],
[
87
],
[
89
],
[
91
],
[
93
],
[
95
],
[
97
],
[
99
],
[
101
],
[
103
],
[
105
],
[
107
],
[
109
],
[
111
],
[
113
],
[
115
],
[
117
],
[
119
],
[
121
],
[
123
],
[
125
],
[
127
],
[
129
],
[
131
],
[
133
],
[
135
],
[
137
],
[
139
],
[
141
],
[
143
],
[
145
],
[
147
],
[
149
],
[
151
],
[
153
],
[
155
],
[
157
],
[
159
],
[
161
],
[
163
],
[
165
],
[
167
],
[
169
],
[
171
],
[
173
]
] |
21,270 | void usb_info(Monitor *mon)
{
USBBus *bus;
USBDevice *dev;
USBPort *port;
if (TAILQ_EMPTY(&busses)) {
monitor_printf(mon, "USB support not enabled\n");
return;
}
TAILQ_FOREACH(bus, &busses, next) {
TAILQ_FOREACH(port, &bus->used, next) {
dev = port->dev;
if (!dev)
continue;
monitor_printf(mon, " Device %d.%d, Speed %s Mb/s, Product %s\n",
bus->busnr, dev->addr, usb_speed(dev->speed), dev->devname);
}
}
}
| false | qemu | 72cf2d4f0e181d0d3a3122e04129c58a95da713e | void usb_info(Monitor *mon)
{
USBBus *bus;
USBDevice *dev;
USBPort *port;
if (TAILQ_EMPTY(&busses)) {
monitor_printf(mon, "USB support not enabled\n");
return;
}
TAILQ_FOREACH(bus, &busses, next) {
TAILQ_FOREACH(port, &bus->used, next) {
dev = port->dev;
if (!dev)
continue;
monitor_printf(mon, " Device %d.%d, Speed %s Mb/s, Product %s\n",
bus->busnr, dev->addr, usb_speed(dev->speed), dev->devname);
}
}
}
| {
"code": [],
"line_no": []
} | void FUNC_0(Monitor *VAR_0)
{
USBBus *bus;
USBDevice *dev;
USBPort *port;
if (TAILQ_EMPTY(&busses)) {
monitor_printf(VAR_0, "USB support not enabled\n");
return;
}
TAILQ_FOREACH(bus, &busses, next) {
TAILQ_FOREACH(port, &bus->used, next) {
dev = port->dev;
if (!dev)
continue;
monitor_printf(VAR_0, " Device %d.%d, Speed %s Mb/s, Product %s\n",
bus->busnr, dev->addr, usb_speed(dev->speed), dev->devname);
}
}
}
| [
"void FUNC_0(Monitor *VAR_0)\n{",
"USBBus *bus;",
"USBDevice *dev;",
"USBPort *port;",
"if (TAILQ_EMPTY(&busses)) {",
"monitor_printf(VAR_0, \"USB support not enabled\\n\");",
"return;",
"}",
"TAILQ_FOREACH(bus, &busses, next) {",
"TAILQ_FOREACH(port, &bus->used, next) {",
"dev = port->dev;",
"if (!dev)\ncontinue;",
"monitor_printf(VAR_0, \" Device %d.%d, Speed %s Mb/s, Product %s\\n\",\nbus->busnr, dev->addr, usb_speed(dev->speed), dev->devname);",
"}",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29,
31
],
[
33,
35
],
[
37
],
[
39
],
[
41
]
] |
21,271 | void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, int label)
{
if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(label);
} else if (cond != TCG_COND_NEVER) {
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2),
TCGV_HIGH(arg2), cond, label);
} else {
tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label);
}
}
}
| false | qemu | 42a268c241183877192c376d03bd9b6d527407c7 | void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, int label)
{
if (cond == TCG_COND_ALWAYS) {
tcg_gen_br(label);
} else if (cond != TCG_COND_NEVER) {
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1),
TCGV_HIGH(arg1), TCGV_LOW(arg2),
TCGV_HIGH(arg2), cond, label);
} else {
tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label);
}
}
}
| {
"code": [],
"line_no": []
} | void FUNC_0(TCGCond VAR_0, TCGv_i64 VAR_1, TCGv_i64 VAR_2, int VAR_3)
{
if (VAR_0 == TCG_COND_ALWAYS) {
tcg_gen_br(VAR_3);
} else if (VAR_0 != TCG_COND_NEVER) {
if (TCG_TARGET_REG_BITS == 32) {
tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(VAR_1),
TCGV_HIGH(VAR_1), TCGV_LOW(VAR_2),
TCGV_HIGH(VAR_2), VAR_0, VAR_3);
} else {
tcg_gen_op4ii_i64(INDEX_op_brcond_i64, VAR_1, VAR_2, VAR_0, VAR_3);
}
}
}
| [
"void FUNC_0(TCGCond VAR_0, TCGv_i64 VAR_1, TCGv_i64 VAR_2, int VAR_3)\n{",
"if (VAR_0 == TCG_COND_ALWAYS) {",
"tcg_gen_br(VAR_3);",
"} else if (VAR_0 != TCG_COND_NEVER) {",
"if (TCG_TARGET_REG_BITS == 32) {",
"tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(VAR_1),\nTCGV_HIGH(VAR_1), TCGV_LOW(VAR_2),\nTCGV_HIGH(VAR_2), VAR_0, VAR_3);",
"} else {",
"tcg_gen_op4ii_i64(INDEX_op_brcond_i64, VAR_1, VAR_2, VAR_0, VAR_3);",
"}",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13,
15,
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
]
] |
21,272 | static QList *channel_list_get(void)
{
return NULL;
}
| false | qemu | 4295e15aa730a95003a3639d6dad2eb1e65a59e2 | static QList *channel_list_get(void)
{
return NULL;
}
| {
"code": [],
"line_no": []
} | static QList *FUNC_0(void)
{
return NULL;
}
| [
"static QList *FUNC_0(void)\n{",
"return NULL;",
"}"
] | [
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
]
] |
21,273 | static int ssd0323_init(SSISlave *dev)
{
ssd0323_state *s = FROM_SSI_SLAVE(ssd0323_state, dev);
s->col_end = 63;
s->row_end = 79;
s->con = graphic_console_init(ssd0323_update_display,
ssd0323_invalidate_display,
NULL, NULL, s);
qemu_console_resize(s->con, 128 * MAGNIFY, 64 * MAGNIFY);
qdev_init_gpio_in(&dev->qdev, ssd0323_cd, 1);
register_savevm(&dev->qdev, "ssd0323_oled", -1, 1,
ssd0323_save, ssd0323_load, s);
return 0;
}
| false | qemu | 2c62f08ddbf3fa80dc7202eb9a2ea60ae44e2cc5 | static int ssd0323_init(SSISlave *dev)
{
ssd0323_state *s = FROM_SSI_SLAVE(ssd0323_state, dev);
s->col_end = 63;
s->row_end = 79;
s->con = graphic_console_init(ssd0323_update_display,
ssd0323_invalidate_display,
NULL, NULL, s);
qemu_console_resize(s->con, 128 * MAGNIFY, 64 * MAGNIFY);
qdev_init_gpio_in(&dev->qdev, ssd0323_cd, 1);
register_savevm(&dev->qdev, "ssd0323_oled", -1, 1,
ssd0323_save, ssd0323_load, s);
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(SSISlave *VAR_0)
{
ssd0323_state *s = FROM_SSI_SLAVE(ssd0323_state, VAR_0);
s->col_end = 63;
s->row_end = 79;
s->con = graphic_console_init(ssd0323_update_display,
ssd0323_invalidate_display,
NULL, NULL, s);
qemu_console_resize(s->con, 128 * MAGNIFY, 64 * MAGNIFY);
qdev_init_gpio_in(&VAR_0->qdev, ssd0323_cd, 1);
register_savevm(&VAR_0->qdev, "ssd0323_oled", -1, 1,
ssd0323_save, ssd0323_load, s);
return 0;
}
| [
"static int FUNC_0(SSISlave *VAR_0)\n{",
"ssd0323_state *s = FROM_SSI_SLAVE(ssd0323_state, VAR_0);",
"s->col_end = 63;",
"s->row_end = 79;",
"s->con = graphic_console_init(ssd0323_update_display,\nssd0323_invalidate_display,\nNULL, NULL, s);",
"qemu_console_resize(s->con, 128 * MAGNIFY, 64 * MAGNIFY);",
"qdev_init_gpio_in(&VAR_0->qdev, ssd0323_cd, 1);",
"register_savevm(&VAR_0->qdev, \"ssd0323_oled\", -1, 1,\nssd0323_save, ssd0323_load, s);",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
13,
15,
17
],
[
19
],
[
23
],
[
27,
29
],
[
31
],
[
33
]
] |
21,274 | static void ide_atapi_cmd_read_pio(IDEState *s, int lba, int nb_sectors,
int sector_size)
{
s->lba = lba;
s->packet_transfer_size = nb_sectors * sector_size;
s->elementary_transfer_size = 0;
s->io_buffer_index = sector_size;
s->cd_sector_size = sector_size;
s->status = READY_STAT;
ide_atapi_cmd_reply_end(s);
}
| false | qemu | 41a2b9596c9ed2a827e16e749632752dd2686647 | static void ide_atapi_cmd_read_pio(IDEState *s, int lba, int nb_sectors,
int sector_size)
{
s->lba = lba;
s->packet_transfer_size = nb_sectors * sector_size;
s->elementary_transfer_size = 0;
s->io_buffer_index = sector_size;
s->cd_sector_size = sector_size;
s->status = READY_STAT;
ide_atapi_cmd_reply_end(s);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(IDEState *VAR_0, int VAR_1, int VAR_2,
int VAR_3)
{
VAR_0->VAR_1 = VAR_1;
VAR_0->packet_transfer_size = VAR_2 * VAR_3;
VAR_0->elementary_transfer_size = 0;
VAR_0->io_buffer_index = VAR_3;
VAR_0->cd_sector_size = VAR_3;
VAR_0->status = READY_STAT;
ide_atapi_cmd_reply_end(VAR_0);
}
| [
"static void FUNC_0(IDEState *VAR_0, int VAR_1, int VAR_2,\nint VAR_3)\n{",
"VAR_0->VAR_1 = VAR_1;",
"VAR_0->packet_transfer_size = VAR_2 * VAR_3;",
"VAR_0->elementary_transfer_size = 0;",
"VAR_0->io_buffer_index = VAR_3;",
"VAR_0->cd_sector_size = VAR_3;",
"VAR_0->status = READY_STAT;",
"ide_atapi_cmd_reply_end(VAR_0);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
]
] |
21,275 | static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, DCTELEM *block)
{
int i;
register int t1,t2,t3,t4,t5,t6,t7,t8;
DCTELEM *src, *dst;
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
src = block;
dst = block;
for(i = 0; i < 4; i++){
t1 = 12 * (src[0] + src[4]) + 4;
t2 = 12 * (src[0] - src[4]) + 4;
t3 = 16 * src[2] + 6 * src[6];
t4 = 6 * src[2] - 16 * src[6];
t5 = t1 + t3;
t6 = t2 + t4;
t7 = t2 - t4;
t8 = t1 - t3;
t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
dst[0] = (t5 + t1) >> 3;
dst[1] = (t6 + t2) >> 3;
dst[2] = (t7 + t3) >> 3;
dst[3] = (t8 + t4) >> 3;
dst[4] = (t8 - t4) >> 3;
dst[5] = (t7 - t3) >> 3;
dst[6] = (t6 - t2) >> 3;
dst[7] = (t5 - t1) >> 3;
src += 8;
dst += 8;
}
src = block;
for(i = 0; i < 8; i++){
t1 = 17 * (src[ 0] + src[16]) + 64;
t2 = 17 * (src[ 0] - src[16]) + 64;
t3 = 22 * src[ 8] + 10 * src[24];
t4 = 22 * src[24] - 10 * src[ 8];
dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)];
dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)];
dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)];
dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)];
src ++;
dest++;
}
}
| false | FFmpeg | af796ba4b827a88912f9a9c59d1a57704a6fff38 | static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, DCTELEM *block)
{
int i;
register int t1,t2,t3,t4,t5,t6,t7,t8;
DCTELEM *src, *dst;
const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
src = block;
dst = block;
for(i = 0; i < 4; i++){
t1 = 12 * (src[0] + src[4]) + 4;
t2 = 12 * (src[0] - src[4]) + 4;
t3 = 16 * src[2] + 6 * src[6];
t4 = 6 * src[2] - 16 * src[6];
t5 = t1 + t3;
t6 = t2 + t4;
t7 = t2 - t4;
t8 = t1 - t3;
t1 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
t2 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
t3 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
t4 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
dst[0] = (t5 + t1) >> 3;
dst[1] = (t6 + t2) >> 3;
dst[2] = (t7 + t3) >> 3;
dst[3] = (t8 + t4) >> 3;
dst[4] = (t8 - t4) >> 3;
dst[5] = (t7 - t3) >> 3;
dst[6] = (t6 - t2) >> 3;
dst[7] = (t5 - t1) >> 3;
src += 8;
dst += 8;
}
src = block;
for(i = 0; i < 8; i++){
t1 = 17 * (src[ 0] + src[16]) + 64;
t2 = 17 * (src[ 0] - src[16]) + 64;
t3 = 22 * src[ 8] + 10 * src[24];
t4 = 22 * src[24] - 10 * src[ 8];
dest[0*linesize] = cm[dest[0*linesize] + ((t1 + t3) >> 7)];
dest[1*linesize] = cm[dest[1*linesize] + ((t2 - t4) >> 7)];
dest[2*linesize] = cm[dest[2*linesize] + ((t2 + t4) >> 7)];
dest[3*linesize] = cm[dest[3*linesize] + ((t1 - t3) >> 7)];
src ++;
dest++;
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(uint8_t *VAR_0, int VAR_1, DCTELEM *VAR_2)
{
int VAR_3;
register int VAR_4,VAR_5,VAR_6,VAR_7,VAR_8,VAR_9,VAR_10,VAR_11;
DCTELEM *src, *dst;
const uint8_t *VAR_12 = ff_cropTbl + MAX_NEG_CROP;
src = VAR_2;
dst = VAR_2;
for(VAR_3 = 0; VAR_3 < 4; VAR_3++){
VAR_4 = 12 * (src[0] + src[4]) + 4;
VAR_5 = 12 * (src[0] - src[4]) + 4;
VAR_6 = 16 * src[2] + 6 * src[6];
VAR_7 = 6 * src[2] - 16 * src[6];
VAR_8 = VAR_4 + VAR_6;
VAR_9 = VAR_5 + VAR_7;
VAR_10 = VAR_5 - VAR_7;
VAR_11 = VAR_4 - VAR_6;
VAR_4 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];
VAR_5 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];
VAR_6 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];
VAR_7 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];
dst[0] = (VAR_8 + VAR_4) >> 3;
dst[1] = (VAR_9 + VAR_5) >> 3;
dst[2] = (VAR_10 + VAR_6) >> 3;
dst[3] = (VAR_11 + VAR_7) >> 3;
dst[4] = (VAR_11 - VAR_7) >> 3;
dst[5] = (VAR_10 - VAR_6) >> 3;
dst[6] = (VAR_9 - VAR_5) >> 3;
dst[7] = (VAR_8 - VAR_4) >> 3;
src += 8;
dst += 8;
}
src = VAR_2;
for(VAR_3 = 0; VAR_3 < 8; VAR_3++){
VAR_4 = 17 * (src[ 0] + src[16]) + 64;
VAR_5 = 17 * (src[ 0] - src[16]) + 64;
VAR_6 = 22 * src[ 8] + 10 * src[24];
VAR_7 = 22 * src[24] - 10 * src[ 8];
VAR_0[0*VAR_1] = VAR_12[VAR_0[0*VAR_1] + ((VAR_4 + VAR_6) >> 7)];
VAR_0[1*VAR_1] = VAR_12[VAR_0[1*VAR_1] + ((VAR_5 - VAR_7) >> 7)];
VAR_0[2*VAR_1] = VAR_12[VAR_0[2*VAR_1] + ((VAR_5 + VAR_7) >> 7)];
VAR_0[3*VAR_1] = VAR_12[VAR_0[3*VAR_1] + ((VAR_4 - VAR_6) >> 7)];
src ++;
VAR_0++;
}
}
| [
"static void FUNC_0(uint8_t *VAR_0, int VAR_1, DCTELEM *VAR_2)\n{",
"int VAR_3;",
"register int VAR_4,VAR_5,VAR_6,VAR_7,VAR_8,VAR_9,VAR_10,VAR_11;",
"DCTELEM *src, *dst;",
"const uint8_t *VAR_12 = ff_cropTbl + MAX_NEG_CROP;",
"src = VAR_2;",
"dst = VAR_2;",
"for(VAR_3 = 0; VAR_3 < 4; VAR_3++){",
"VAR_4 = 12 * (src[0] + src[4]) + 4;",
"VAR_5 = 12 * (src[0] - src[4]) + 4;",
"VAR_6 = 16 * src[2] + 6 * src[6];",
"VAR_7 = 6 * src[2] - 16 * src[6];",
"VAR_8 = VAR_4 + VAR_6;",
"VAR_9 = VAR_5 + VAR_7;",
"VAR_10 = VAR_5 - VAR_7;",
"VAR_11 = VAR_4 - VAR_6;",
"VAR_4 = 16 * src[1] + 15 * src[3] + 9 * src[5] + 4 * src[7];",
"VAR_5 = 15 * src[1] - 4 * src[3] - 16 * src[5] - 9 * src[7];",
"VAR_6 = 9 * src[1] - 16 * src[3] + 4 * src[5] + 15 * src[7];",
"VAR_7 = 4 * src[1] - 9 * src[3] + 15 * src[5] - 16 * src[7];",
"dst[0] = (VAR_8 + VAR_4) >> 3;",
"dst[1] = (VAR_9 + VAR_5) >> 3;",
"dst[2] = (VAR_10 + VAR_6) >> 3;",
"dst[3] = (VAR_11 + VAR_7) >> 3;",
"dst[4] = (VAR_11 - VAR_7) >> 3;",
"dst[5] = (VAR_10 - VAR_6) >> 3;",
"dst[6] = (VAR_9 - VAR_5) >> 3;",
"dst[7] = (VAR_8 - VAR_4) >> 3;",
"src += 8;",
"dst += 8;",
"}",
"src = VAR_2;",
"for(VAR_3 = 0; VAR_3 < 8; VAR_3++){",
"VAR_4 = 17 * (src[ 0] + src[16]) + 64;",
"VAR_5 = 17 * (src[ 0] - src[16]) + 64;",
"VAR_6 = 22 * src[ 8] + 10 * src[24];",
"VAR_7 = 22 * src[24] - 10 * src[ 8];",
"VAR_0[0*VAR_1] = VAR_12[VAR_0[0*VAR_1] + ((VAR_4 + VAR_6) >> 7)];",
"VAR_0[1*VAR_1] = VAR_12[VAR_0[1*VAR_1] + ((VAR_5 - VAR_7) >> 7)];",
"VAR_0[2*VAR_1] = VAR_12[VAR_0[2*VAR_1] + ((VAR_5 + VAR_7) >> 7)];",
"VAR_0[3*VAR_1] = VAR_12[VAR_0[3*VAR_1] + ((VAR_4 - VAR_6) >> 7)];",
"src ++;",
"VAR_0++;",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
31
],
[
33
],
[
35
],
[
37
],
[
41
],
[
43
],
[
45
],
[
47
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
69
],
[
71
],
[
73
],
[
77
],
[
79
],
[
81
],
[
83
],
[
85
],
[
87
],
[
91
],
[
93
],
[
95
],
[
97
],
[
101
],
[
103
],
[
105
],
[
107
]
] |
21,276 | static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t val)
{
PCIXenPlatformState *s = opaque;
switch (addr) {
case 0: {
PCIDevice *pci_dev = PCI_DEVICE(s);
/* Unplug devices. Value is a bitmask of which devices to
unplug, with bit 0 the disk devices, bit 1 the network
devices, and bit 2 the non-primary-master IDE devices. */
if (val & UNPLUG_ALL_DISKS) {
DPRINTF("unplug disks\n");
pci_unplug_disks(pci_dev->bus);
}
if (val & UNPLUG_ALL_NICS) {
DPRINTF("unplug nics\n");
pci_unplug_nics(pci_dev->bus);
}
if (val & UNPLUG_AUX_IDE_DISKS) {
DPRINTF("unplug auxiliary disks not supported\n");
}
break;
}
case 2:
switch (val) {
case 1:
DPRINTF("Citrix Windows PV drivers loaded in guest\n");
break;
case 0:
DPRINTF("Guest claimed to be running PV product 0?\n");
break;
default:
DPRINTF("Unknown PV product %d loaded in guest\n", val);
break;
}
s->driver_product_version = val;
break;
}
}
| false | qemu | ae4d2eb273b167dad748ea4249720319240b1ac2 | static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t val)
{
PCIXenPlatformState *s = opaque;
switch (addr) {
case 0: {
PCIDevice *pci_dev = PCI_DEVICE(s);
if (val & UNPLUG_ALL_DISKS) {
DPRINTF("unplug disks\n");
pci_unplug_disks(pci_dev->bus);
}
if (val & UNPLUG_ALL_NICS) {
DPRINTF("unplug nics\n");
pci_unplug_nics(pci_dev->bus);
}
if (val & UNPLUG_AUX_IDE_DISKS) {
DPRINTF("unplug auxiliary disks not supported\n");
}
break;
}
case 2:
switch (val) {
case 1:
DPRINTF("Citrix Windows PV drivers loaded in guest\n");
break;
case 0:
DPRINTF("Guest claimed to be running PV product 0?\n");
break;
default:
DPRINTF("Unknown PV product %d loaded in guest\n", val);
break;
}
s->driver_product_version = val;
break;
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0, uint32_t VAR_1, uint32_t VAR_2)
{
PCIXenPlatformState *s = VAR_0;
switch (VAR_1) {
case 0: {
PCIDevice *pci_dev = PCI_DEVICE(s);
if (VAR_2 & UNPLUG_ALL_DISKS) {
DPRINTF("unplug disks\n");
pci_unplug_disks(pci_dev->bus);
}
if (VAR_2 & UNPLUG_ALL_NICS) {
DPRINTF("unplug nics\n");
pci_unplug_nics(pci_dev->bus);
}
if (VAR_2 & UNPLUG_AUX_IDE_DISKS) {
DPRINTF("unplug auxiliary disks not supported\n");
}
break;
}
case 2:
switch (VAR_2) {
case 1:
DPRINTF("Citrix Windows PV drivers loaded in guest\n");
break;
case 0:
DPRINTF("Guest claimed to be running PV product 0?\n");
break;
default:
DPRINTF("Unknown PV product %d loaded in guest\n", VAR_2);
break;
}
s->driver_product_version = VAR_2;
break;
}
}
| [
"static void FUNC_0(void *VAR_0, uint32_t VAR_1, uint32_t VAR_2)\n{",
"PCIXenPlatformState *s = VAR_0;",
"switch (VAR_1) {",
"case 0: {",
"PCIDevice *pci_dev = PCI_DEVICE(s);",
"if (VAR_2 & UNPLUG_ALL_DISKS) {",
"DPRINTF(\"unplug disks\\n\");",
"pci_unplug_disks(pci_dev->bus);",
"}",
"if (VAR_2 & UNPLUG_ALL_NICS) {",
"DPRINTF(\"unplug nics\\n\");",
"pci_unplug_nics(pci_dev->bus);",
"}",
"if (VAR_2 & UNPLUG_AUX_IDE_DISKS) {",
"DPRINTF(\"unplug auxiliary disks not supported\\n\");",
"}",
"break;",
"}",
"case 2:\nswitch (VAR_2) {",
"case 1:\nDPRINTF(\"Citrix Windows PV drivers loaded in guest\\n\");",
"break;",
"case 0:\nDPRINTF(\"Guest claimed to be running PV product 0?\\n\");",
"break;",
"default:\nDPRINTF(\"Unknown PV product %d loaded in guest\\n\", VAR_2);",
"break;",
"}",
"s->driver_product_version = VAR_2;",
"break;",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
13
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47,
49
],
[
51,
53
],
[
55
],
[
57,
59
],
[
61
],
[
63,
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
]
] |
21,277 | static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
XenPTReg *cfg_entry, uint16_t *val,
uint16_t dev_value, uint16_t valid_mask)
{
XenPTRegInfo *reg = cfg_entry->reg;
XenPTMSI *msi = s->msi;
uint16_t writable_mask = 0;
uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
/* Currently no support for multi-vector */
if (*val & PCI_MSI_FLAGS_QSIZE) {
XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
}
/* modify emulate register */
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE;
/* create value for writing to I/O device register */
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
/* update MSI */
if (*val & PCI_MSI_FLAGS_ENABLE) {
/* setup MSI pirq for the first time */
if (!msi->initialized) {
/* Init physical one */
XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val);
if (xen_pt_msi_setup(s)) {
/* We do not broadcast the error to the framework code, so
* that MSI errors are contained in MSI emulation code and
* QEMU can go on running.
* Guest MSI would be actually not working.
*/
*val &= ~PCI_MSI_FLAGS_ENABLE;
XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val);
return 0;
}
if (xen_pt_msi_update(s)) {
*val &= ~PCI_MSI_FLAGS_ENABLE;
XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val);
return 0;
}
msi->initialized = true;
msi->mapped = true;
}
msi->flags |= PCI_MSI_FLAGS_ENABLE;
} else if (msi->mapped) {
xen_pt_msi_disable(s);
}
return 0;
}
| false | qemu | e2779de053b64f023de382fd87b3596613d47d1e | static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
XenPTReg *cfg_entry, uint16_t *val,
uint16_t dev_value, uint16_t valid_mask)
{
XenPTRegInfo *reg = cfg_entry->reg;
XenPTMSI *msi = s->msi;
uint16_t writable_mask = 0;
uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
if (*val & PCI_MSI_FLAGS_QSIZE) {
XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
}
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE;
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
if (*val & PCI_MSI_FLAGS_ENABLE) {
if (!msi->initialized) {
XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val);
if (xen_pt_msi_setup(s)) {
*val &= ~PCI_MSI_FLAGS_ENABLE;
XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val);
return 0;
}
if (xen_pt_msi_update(s)) {
*val &= ~PCI_MSI_FLAGS_ENABLE;
XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val);
return 0;
}
msi->initialized = true;
msi->mapped = true;
}
msi->flags |= PCI_MSI_FLAGS_ENABLE;
} else if (msi->mapped) {
xen_pt_msi_disable(s);
}
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(XenPCIPassthroughState *VAR_0,
XenPTReg *VAR_1, uint16_t *VAR_2,
uint16_t VAR_3, uint16_t VAR_4)
{
XenPTRegInfo *reg = VAR_1->reg;
XenPTMSI *msi = VAR_0->msi;
uint16_t writable_mask = 0;
uint16_t throughable_mask = get_throughable_mask(VAR_0, reg, VAR_4);
if (*VAR_2 & PCI_MSI_FLAGS_QSIZE) {
XEN_PT_WARN(&VAR_0->dev, "Tries to set more than 1 vector ctrl %x\n", *VAR_2);
}
writable_mask = reg->emu_mask & ~reg->ro_mask & VAR_4;
VAR_1->data = XEN_PT_MERGE_VALUE(*VAR_2, VAR_1->data, writable_mask);
msi->flags |= VAR_1->data & ~PCI_MSI_FLAGS_ENABLE;
*VAR_2 = XEN_PT_MERGE_VALUE(*VAR_2, VAR_3, throughable_mask);
if (*VAR_2 & PCI_MSI_FLAGS_ENABLE) {
if (!msi->initialized) {
XEN_PT_LOG(&VAR_0->dev, "setup MSI (register: %x).\n", *VAR_2);
if (xen_pt_msi_setup(VAR_0)) {
*VAR_2 &= ~PCI_MSI_FLAGS_ENABLE;
XEN_PT_WARN(&VAR_0->dev, "Can not map MSI (register: %x)!\n", *VAR_2);
return 0;
}
if (xen_pt_msi_update(VAR_0)) {
*VAR_2 &= ~PCI_MSI_FLAGS_ENABLE;
XEN_PT_WARN(&VAR_0->dev, "Can not bind MSI (register: %x)!\n", *VAR_2);
return 0;
}
msi->initialized = true;
msi->mapped = true;
}
msi->flags |= PCI_MSI_FLAGS_ENABLE;
} else if (msi->mapped) {
xen_pt_msi_disable(VAR_0);
}
return 0;
}
| [
"static int FUNC_0(XenPCIPassthroughState *VAR_0,\nXenPTReg *VAR_1, uint16_t *VAR_2,\nuint16_t VAR_3, uint16_t VAR_4)\n{",
"XenPTRegInfo *reg = VAR_1->reg;",
"XenPTMSI *msi = VAR_0->msi;",
"uint16_t writable_mask = 0;",
"uint16_t throughable_mask = get_throughable_mask(VAR_0, reg, VAR_4);",
"if (*VAR_2 & PCI_MSI_FLAGS_QSIZE) {",
"XEN_PT_WARN(&VAR_0->dev, \"Tries to set more than 1 vector ctrl %x\\n\", *VAR_2);",
"}",
"writable_mask = reg->emu_mask & ~reg->ro_mask & VAR_4;",
"VAR_1->data = XEN_PT_MERGE_VALUE(*VAR_2, VAR_1->data, writable_mask);",
"msi->flags |= VAR_1->data & ~PCI_MSI_FLAGS_ENABLE;",
"*VAR_2 = XEN_PT_MERGE_VALUE(*VAR_2, VAR_3, throughable_mask);",
"if (*VAR_2 & PCI_MSI_FLAGS_ENABLE) {",
"if (!msi->initialized) {",
"XEN_PT_LOG(&VAR_0->dev, \"setup MSI (register: %x).\\n\", *VAR_2);",
"if (xen_pt_msi_setup(VAR_0)) {",
"*VAR_2 &= ~PCI_MSI_FLAGS_ENABLE;",
"XEN_PT_WARN(&VAR_0->dev, \"Can not map MSI (register: %x)!\\n\", *VAR_2);",
"return 0;",
"}",
"if (xen_pt_msi_update(VAR_0)) {",
"*VAR_2 &= ~PCI_MSI_FLAGS_ENABLE;",
"XEN_PT_WARN(&VAR_0->dev, \"Can not bind MSI (register: %x)!\\n\", *VAR_2);",
"return 0;",
"}",
"msi->initialized = true;",
"msi->mapped = true;",
"}",
"msi->flags |= PCI_MSI_FLAGS_ENABLE;",
"} else if (msi->mapped) {",
"xen_pt_msi_disable(VAR_0);",
"}",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
21
],
[
23
],
[
25
],
[
31
],
[
33
],
[
35
],
[
41
],
[
47
],
[
51
],
[
55
],
[
57
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83
],
[
85
],
[
87
],
[
89
],
[
91
],
[
93
],
[
95
],
[
97
],
[
99
],
[
103
],
[
105
]
] |
21,278 | static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVQcow2State *s = bs->opaque;
if (bs->encrypted) {
/* Encryption works on a sector granularity */
bs->request_alignment = BDRV_SECTOR_SIZE;
}
bs->bl.pwrite_zeroes_alignment = s->cluster_size;
}
| false | qemu | a5b8dd2ce83208cd7d6eb4562339ecf5aae13574 | static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVQcow2State *s = bs->opaque;
if (bs->encrypted) {
bs->request_alignment = BDRV_SECTOR_SIZE;
}
bs->bl.pwrite_zeroes_alignment = s->cluster_size;
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(BlockDriverState *VAR_0, Error **VAR_1)
{
BDRVQcow2State *s = VAR_0->opaque;
if (VAR_0->encrypted) {
VAR_0->request_alignment = BDRV_SECTOR_SIZE;
}
VAR_0->bl.pwrite_zeroes_alignment = s->cluster_size;
}
| [
"static void FUNC_0(BlockDriverState *VAR_0, Error **VAR_1)\n{",
"BDRVQcow2State *s = VAR_0->opaque;",
"if (VAR_0->encrypted) {",
"VAR_0->request_alignment = BDRV_SECTOR_SIZE;",
"}",
"VAR_0->bl.pwrite_zeroes_alignment = s->cluster_size;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
]
] |
21,279 | static void set_string(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
DeviceState *dev = DEVICE(obj);
Property *prop = opaque;
char **ptr = qdev_get_prop_ptr(dev, prop);
Error *local_err = NULL;
char *str;
if (dev->realized) {
qdev_prop_set_after_realize(dev, name, errp);
return;
}
visit_type_str(v, &str, name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
if (*ptr) {
g_free(*ptr);
}
*ptr = str;
}
| false | qemu | ef1e1e0782e99c9dcf2b35e5310cdd8ca9211374 | static void set_string(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
DeviceState *dev = DEVICE(obj);
Property *prop = opaque;
char **ptr = qdev_get_prop_ptr(dev, prop);
Error *local_err = NULL;
char *str;
if (dev->realized) {
qdev_prop_set_after_realize(dev, name, errp);
return;
}
visit_type_str(v, &str, name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
if (*ptr) {
g_free(*ptr);
}
*ptr = str;
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(Object *VAR_0, Visitor *VAR_1, void *VAR_2,
const char *VAR_3, Error **VAR_4)
{
DeviceState *dev = DEVICE(VAR_0);
Property *prop = VAR_2;
char **VAR_5 = qdev_get_prop_ptr(dev, prop);
Error *local_err = NULL;
char *VAR_6;
if (dev->realized) {
qdev_prop_set_after_realize(dev, VAR_3, VAR_4);
return;
}
visit_type_str(VAR_1, &VAR_6, VAR_3, &local_err);
if (local_err) {
error_propagate(VAR_4, local_err);
return;
}
if (*VAR_5) {
g_free(*VAR_5);
}
*VAR_5 = VAR_6;
}
| [
"static void FUNC_0(Object *VAR_0, Visitor *VAR_1, void *VAR_2,\nconst char *VAR_3, Error **VAR_4)\n{",
"DeviceState *dev = DEVICE(VAR_0);",
"Property *prop = VAR_2;",
"char **VAR_5 = qdev_get_prop_ptr(dev, prop);",
"Error *local_err = NULL;",
"char *VAR_6;",
"if (dev->realized) {",
"qdev_prop_set_after_realize(dev, VAR_3, VAR_4);",
"return;",
"}",
"visit_type_str(VAR_1, &VAR_6, VAR_3, &local_err);",
"if (local_err) {",
"error_propagate(VAR_4, local_err);",
"return;",
"}",
"if (*VAR_5) {",
"g_free(*VAR_5);",
"}",
"*VAR_5 = VAR_6;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
],
[
25
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
]
] |
21,283 | void OPPROTO op_idivb_AL_T0(void)
{
int num, den, q, r;
num = (int16_t)EAX;
den = (int8_t)T0;
if (den == 0) {
raise_exception(EXCP00_DIVZ);
}
q = (num / den) & 0xff;
r = (num % den) & 0xff;
EAX = (EAX & ~0xffff) | (r << 8) | q;
}
| true | qemu | 45bbbb466cf4a6280076ea5a51f67ef5bedee345 | void OPPROTO op_idivb_AL_T0(void)
{
int num, den, q, r;
num = (int16_t)EAX;
den = (int8_t)T0;
if (den == 0) {
raise_exception(EXCP00_DIVZ);
}
q = (num / den) & 0xff;
r = (num % den) & 0xff;
EAX = (EAX & ~0xffff) | (r << 8) | q;
}
| {
"code": [
" q = (num / den) & 0xff;",
" q = (num / den) & 0xff;"
],
"line_no": [
19,
19
]
} | void VAR_0 op_idivb_AL_T0(void)
{
int num, den, q, r;
num = (int16_t)EAX;
den = (int8_t)T0;
if (den == 0) {
raise_exception(EXCP00_DIVZ);
}
q = (num / den) & 0xff;
r = (num % den) & 0xff;
EAX = (EAX & ~0xffff) | (r << 8) | q;
}
| [
"void VAR_0 op_idivb_AL_T0(void)\n{",
"int num, den, q, r;",
"num = (int16_t)EAX;",
"den = (int8_t)T0;",
"if (den == 0) {",
"raise_exception(EXCP00_DIVZ);",
"}",
"q = (num / den) & 0xff;",
"r = (num % den) & 0xff;",
"EAX = (EAX & ~0xffff) | (r << 8) | q;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
]
] |
21,285 | static int print_uint8(DeviceState *dev, Property *prop, char *dest, size_t len)
{
uint8_t *ptr = qdev_get_prop_ptr(dev, prop);
return snprintf(dest, len, "%" PRIu8, *ptr);
}
| true | qemu | 5cb9b56acfc0b50acf7ccd2d044ab4991c47fdde | static int print_uint8(DeviceState *dev, Property *prop, char *dest, size_t len)
{
uint8_t *ptr = qdev_get_prop_ptr(dev, prop);
return snprintf(dest, len, "%" PRIu8, *ptr);
}
| {
"code": [
" uint8_t *ptr = qdev_get_prop_ptr(dev, prop);",
"static int print_uint8(DeviceState *dev, Property *prop, char *dest, size_t len)",
" uint8_t *ptr = qdev_get_prop_ptr(dev, prop);",
" return snprintf(dest, len, \"%\" PRIu8, *ptr);"
],
"line_no": [
5,
1,
5,
7
]
} | static int FUNC_0(DeviceState *VAR_0, Property *VAR_1, char *VAR_2, size_t VAR_3)
{
uint8_t *ptr = qdev_get_prop_ptr(VAR_0, VAR_1);
return snprintf(VAR_2, VAR_3, "%" PRIu8, *ptr);
}
| [
"static int FUNC_0(DeviceState *VAR_0, Property *VAR_1, char *VAR_2, size_t VAR_3)\n{",
"uint8_t *ptr = qdev_get_prop_ptr(VAR_0, VAR_1);",
"return snprintf(VAR_2, VAR_3, \"%\" PRIu8, *ptr);",
"}"
] | [
1,
1,
1,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
]
] |
21,286 | static int init_image(TiffContext *s, ThreadFrame *frame)
{
int ret;
switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) {
case 11:
if (!s->palette_is_set) {
s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
break;
}
case 21:
case 41:
case 81:
s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
break;
case 243:
if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
} else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
} else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
} else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
} else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
} else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
} else {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
return AVERROR_PATCHWELCOME;
}
} else
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
break;
case 161:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
break;
case 162:
s->avctx->pix_fmt = AV_PIX_FMT_YA8;
break;
case 322:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
break;
case 324:
s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
break;
case 483:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
break;
case 644:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
break;
case 1243:
s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
break;
case 1324:
s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
break;
case 1483:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
break;
case 1644:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
break;
default:
av_log(s->avctx, AV_LOG_ERROR,
"This format is not supported (bpp=%d, bppcount=%d)\n",
s->bpp, s->bppcount);
return AVERROR_INVALIDDATA;
}
if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
if((desc->flags & AV_PIX_FMT_FLAG_RGB) || desc->nb_components < 3) {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
return AVERROR_INVALIDDATA;
}
}
if (s->width != s->avctx->width || s->height != s->avctx->height) {
ret = ff_set_dimensions(s->avctx, s->width, s->height);
if (ret < 0)
return ret;
}
if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
return ret;
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
}
return 0;
}
| true | FFmpeg | ab1e4312887d8e560d027803871b55b883910714 | static int init_image(TiffContext *s, ThreadFrame *frame)
{
int ret;
switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) {
case 11:
if (!s->palette_is_set) {
s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
break;
}
case 21:
case 41:
case 81:
s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
break;
case 243:
if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
} else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
} else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
} else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
} else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
} else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
} else {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
return AVERROR_PATCHWELCOME;
}
} else
s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
break;
case 161:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
break;
case 162:
s->avctx->pix_fmt = AV_PIX_FMT_YA8;
break;
case 322:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
break;
case 324:
s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
break;
case 483:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
break;
case 644:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
break;
case 1243:
s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
break;
case 1324:
s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
break;
case 1483:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
break;
case 1644:
s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
break;
default:
av_log(s->avctx, AV_LOG_ERROR,
"This format is not supported (bpp=%d, bppcount=%d)\n",
s->bpp, s->bppcount);
return AVERROR_INVALIDDATA;
}
if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
if((desc->flags & AV_PIX_FMT_FLAG_RGB) || desc->nb_components < 3) {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
return AVERROR_INVALIDDATA;
}
}
if (s->width != s->avctx->width || s->height != s->avctx->height) {
ret = ff_set_dimensions(s->avctx, s->width, s->height);
if (ret < 0)
return ret;
}
if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
return ret;
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
}
return 0;
}
| {
"code": [
" if((desc->flags & AV_PIX_FMT_FLAG_RGB) || desc->nb_components < 3) {"
],
"line_no": [
151
]
} | static int FUNC_0(TiffContext *VAR_0, ThreadFrame *VAR_1)
{
int VAR_2;
switch (VAR_0->planar * 1000 + VAR_0->bpp * 10 + VAR_0->bppcount) {
case 11:
if (!VAR_0->palette_is_set) {
VAR_0->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
break;
}
case 21:
case 41:
case 81:
VAR_0->avctx->pix_fmt = VAR_0->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
break;
case 243:
if (VAR_0->photometric == TIFF_PHOTOMETRIC_YCBCR) {
if (VAR_0->subsampling[0] == 1 && VAR_0->subsampling[1] == 1) {
VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
} else if (VAR_0->subsampling[0] == 2 && VAR_0->subsampling[1] == 1) {
VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
} else if (VAR_0->subsampling[0] == 4 && VAR_0->subsampling[1] == 1) {
VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
} else if (VAR_0->subsampling[0] == 1 && VAR_0->subsampling[1] == 2) {
VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
} else if (VAR_0->subsampling[0] == 2 && VAR_0->subsampling[1] == 2) {
VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
} else if (VAR_0->subsampling[0] == 4 && VAR_0->subsampling[1] == 4) {
VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
} else {
av_log(VAR_0->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
return AVERROR_PATCHWELCOME;
}
} else
VAR_0->avctx->pix_fmt = AV_PIX_FMT_RGB24;
break;
case 161:
VAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
break;
case 162:
VAR_0->avctx->pix_fmt = AV_PIX_FMT_YA8;
break;
case 322:
VAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
break;
case 324:
VAR_0->avctx->pix_fmt = AV_PIX_FMT_RGBA;
break;
case 483:
VAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
break;
case 644:
VAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
break;
case 1243:
VAR_0->avctx->pix_fmt = AV_PIX_FMT_GBRP;
break;
case 1324:
VAR_0->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
break;
case 1483:
VAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
break;
case 1644:
VAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
break;
default:
av_log(VAR_0->avctx, AV_LOG_ERROR,
"This format is not supported (bpp=%d, bppcount=%d)\n",
VAR_0->bpp, VAR_0->bppcount);
return AVERROR_INVALIDDATA;
}
if (VAR_0->photometric == TIFF_PHOTOMETRIC_YCBCR) {
const AVPixFmtDescriptor *VAR_3 = av_pix_fmt_desc_get(VAR_0->avctx->pix_fmt);
if((VAR_3->flags & AV_PIX_FMT_FLAG_RGB) || VAR_3->nb_components < 3) {
av_log(VAR_0->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
return AVERROR_INVALIDDATA;
}
}
if (VAR_0->width != VAR_0->avctx->width || VAR_0->height != VAR_0->avctx->height) {
VAR_2 = ff_set_dimensions(VAR_0->avctx, VAR_0->width, VAR_0->height);
if (VAR_2 < 0)
return VAR_2;
}
if ((VAR_2 = ff_thread_get_buffer(VAR_0->avctx, VAR_1, 0)) < 0)
return VAR_2;
if (VAR_0->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
memcpy(VAR_1->f->data[1], VAR_0->palette, sizeof(VAR_0->palette));
}
return 0;
}
| [
"static int FUNC_0(TiffContext *VAR_0, ThreadFrame *VAR_1)\n{",
"int VAR_2;",
"switch (VAR_0->planar * 1000 + VAR_0->bpp * 10 + VAR_0->bppcount) {",
"case 11:\nif (!VAR_0->palette_is_set) {",
"VAR_0->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;",
"break;",
"}",
"case 21:\ncase 41:\ncase 81:\nVAR_0->avctx->pix_fmt = VAR_0->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;",
"break;",
"case 243:\nif (VAR_0->photometric == TIFF_PHOTOMETRIC_YCBCR) {",
"if (VAR_0->subsampling[0] == 1 && VAR_0->subsampling[1] == 1) {",
"VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV444P;",
"} else if (VAR_0->subsampling[0] == 2 && VAR_0->subsampling[1] == 1) {",
"VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV422P;",
"} else if (VAR_0->subsampling[0] == 4 && VAR_0->subsampling[1] == 1) {",
"VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV411P;",
"} else if (VAR_0->subsampling[0] == 1 && VAR_0->subsampling[1] == 2) {",
"VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV440P;",
"} else if (VAR_0->subsampling[0] == 2 && VAR_0->subsampling[1] == 2) {",
"VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV420P;",
"} else if (VAR_0->subsampling[0] == 4 && VAR_0->subsampling[1] == 4) {",
"VAR_0->avctx->pix_fmt = AV_PIX_FMT_YUV410P;",
"} else {",
"av_log(VAR_0->avctx, AV_LOG_ERROR, \"Unsupported YCbCr subsampling\\n\");",
"return AVERROR_PATCHWELCOME;",
"}",
"} else",
"VAR_0->avctx->pix_fmt = AV_PIX_FMT_RGB24;",
"break;",
"case 161:\nVAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;",
"break;",
"case 162:\nVAR_0->avctx->pix_fmt = AV_PIX_FMT_YA8;",
"break;",
"case 322:\nVAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;",
"break;",
"case 324:\nVAR_0->avctx->pix_fmt = AV_PIX_FMT_RGBA;",
"break;",
"case 483:\nVAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;",
"break;",
"case 644:\nVAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;",
"break;",
"case 1243:\nVAR_0->avctx->pix_fmt = AV_PIX_FMT_GBRP;",
"break;",
"case 1324:\nVAR_0->avctx->pix_fmt = AV_PIX_FMT_GBRAP;",
"break;",
"case 1483:\nVAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;",
"break;",
"case 1644:\nVAR_0->avctx->pix_fmt = VAR_0->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;",
"break;",
"default:\nav_log(VAR_0->avctx, AV_LOG_ERROR,\n\"This format is not supported (bpp=%d, bppcount=%d)\\n\",\nVAR_0->bpp, VAR_0->bppcount);",
"return AVERROR_INVALIDDATA;",
"}",
"if (VAR_0->photometric == TIFF_PHOTOMETRIC_YCBCR) {",
"const AVPixFmtDescriptor *VAR_3 = av_pix_fmt_desc_get(VAR_0->avctx->pix_fmt);",
"if((VAR_3->flags & AV_PIX_FMT_FLAG_RGB) || VAR_3->nb_components < 3) {",
"av_log(VAR_0->avctx, AV_LOG_ERROR, \"Unsupported YCbCr variant\\n\");",
"return AVERROR_INVALIDDATA;",
"}",
"}",
"if (VAR_0->width != VAR_0->avctx->width || VAR_0->height != VAR_0->avctx->height) {",
"VAR_2 = ff_set_dimensions(VAR_0->avctx, VAR_0->width, VAR_0->height);",
"if (VAR_2 < 0)\nreturn VAR_2;",
"}",
"if ((VAR_2 = ff_thread_get_buffer(VAR_0->avctx, VAR_1, 0)) < 0)\nreturn VAR_2;",
"if (VAR_0->avctx->pix_fmt == AV_PIX_FMT_PAL8) {",
"memcpy(VAR_1->f->data[1], VAR_0->palette, sizeof(VAR_0->palette));",
"}",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11,
13
],
[
15
],
[
17
],
[
19
],
[
21,
23,
25,
27
],
[
29
],
[
31,
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73,
75
],
[
77
],
[
79,
81
],
[
83
],
[
85,
87
],
[
89
],
[
91,
93
],
[
95
],
[
97,
99
],
[
101
],
[
103,
105
],
[
107
],
[
109,
111
],
[
113
],
[
115,
117
],
[
119
],
[
121,
123
],
[
125
],
[
127,
129
],
[
131
],
[
133,
135,
137,
139
],
[
141
],
[
143
],
[
147
],
[
149
],
[
151
],
[
153
],
[
155
],
[
157
],
[
159
],
[
163
],
[
165
],
[
167,
169
],
[
171
],
[
173,
175
],
[
177
],
[
179
],
[
181
],
[
183
],
[
185
]
] |
21,287 | static int connect_to_sdog(const char *addr, const char *port)
{
char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
int fd, ret;
struct addrinfo hints, *res, *res0;
if (!addr) {
addr = SD_DEFAULT_ADDR;
port = SD_DEFAULT_PORT;
}
memset(&hints, 0, sizeof(hints));
hints.ai_socktype = SOCK_STREAM;
ret = getaddrinfo(addr, port, &hints, &res0);
if (ret) {
error_report("unable to get address info %s, %s",
addr, strerror(errno));
return -errno;
}
for (res = res0; res; res = res->ai_next) {
ret = getnameinfo(res->ai_addr, res->ai_addrlen, hbuf, sizeof(hbuf),
sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);
if (ret) {
continue;
}
fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (fd < 0) {
continue;
}
reconnect:
ret = connect(fd, res->ai_addr, res->ai_addrlen);
if (ret < 0) {
if (errno == EINTR) {
goto reconnect;
}
break;
}
dprintf("connected to %s:%s\n", addr, port);
goto success;
}
fd = -errno;
error_report("failed connect to %s:%s", addr, port);
success:
freeaddrinfo(res0);
return fd;
} | true | qemu | a7e47d4bfcbf256fae06891a8599950ff8e1b61b | static int connect_to_sdog(const char *addr, const char *port)
{
char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
int fd, ret;
struct addrinfo hints, *res, *res0;
if (!addr) {
addr = SD_DEFAULT_ADDR;
port = SD_DEFAULT_PORT;
}
memset(&hints, 0, sizeof(hints));
hints.ai_socktype = SOCK_STREAM;
ret = getaddrinfo(addr, port, &hints, &res0);
if (ret) {
error_report("unable to get address info %s, %s",
addr, strerror(errno));
return -errno;
}
for (res = res0; res; res = res->ai_next) {
ret = getnameinfo(res->ai_addr, res->ai_addrlen, hbuf, sizeof(hbuf),
sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);
if (ret) {
continue;
}
fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (fd < 0) {
continue;
}
reconnect:
ret = connect(fd, res->ai_addr, res->ai_addrlen);
if (ret < 0) {
if (errno == EINTR) {
goto reconnect;
}
break;
}
dprintf("connected to %s:%s\n", addr, port);
goto success;
}
fd = -errno;
error_report("failed connect to %s:%s", addr, port);
success:
freeaddrinfo(res0);
return fd;
} | {
"code": [],
"line_no": []
} | static int FUNC_0(const char *VAR_0, const char *VAR_1)
{
char VAR_2[NI_MAXHOST], sbuf[NI_MAXSERV];
int VAR_3, VAR_4;
struct addrinfo VAR_5, *VAR_6, *VAR_7;
if (!VAR_0) {
VAR_0 = SD_DEFAULT_ADDR;
VAR_1 = SD_DEFAULT_PORT;
}
memset(&VAR_5, 0, sizeof(VAR_5));
VAR_5.ai_socktype = SOCK_STREAM;
VAR_4 = getaddrinfo(VAR_0, VAR_1, &VAR_5, &VAR_7);
if (VAR_4) {
error_report("unable to get address info %s, %s",
VAR_0, strerror(errno));
return -errno;
}
for (VAR_6 = VAR_7; VAR_6; VAR_6 = VAR_6->ai_next) {
VAR_4 = getnameinfo(VAR_6->ai_addr, VAR_6->ai_addrlen, VAR_2, sizeof(VAR_2),
sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);
if (VAR_4) {
continue;
}
VAR_3 = socket(VAR_6->ai_family, VAR_6->ai_socktype, VAR_6->ai_protocol);
if (VAR_3 < 0) {
continue;
}
reconnect:
VAR_4 = connect(VAR_3, VAR_6->ai_addr, VAR_6->ai_addrlen);
if (VAR_4 < 0) {
if (errno == EINTR) {
goto reconnect;
}
break;
}
dprintf("connected to %s:%s\n", VAR_0, VAR_1);
goto success;
}
VAR_3 = -errno;
error_report("failed connect to %s:%s", VAR_0, VAR_1);
success:
freeaddrinfo(VAR_7);
return VAR_3;
} | [
"static int FUNC_0(const char *VAR_0, const char *VAR_1)\n{",
"char VAR_2[NI_MAXHOST], sbuf[NI_MAXSERV];",
"int VAR_3, VAR_4;",
"struct addrinfo VAR_5, *VAR_6, *VAR_7;",
"if (!VAR_0) {",
"VAR_0 = SD_DEFAULT_ADDR;",
"VAR_1 = SD_DEFAULT_PORT;",
"}",
"memset(&VAR_5, 0, sizeof(VAR_5));",
"VAR_5.ai_socktype = SOCK_STREAM;",
"VAR_4 = getaddrinfo(VAR_0, VAR_1, &VAR_5, &VAR_7);",
"if (VAR_4) {",
"error_report(\"unable to get address info %s, %s\",\nVAR_0, strerror(errno));",
"return -errno;",
"}",
"for (VAR_6 = VAR_7; VAR_6; VAR_6 = VAR_6->ai_next) {",
"VAR_4 = getnameinfo(VAR_6->ai_addr, VAR_6->ai_addrlen, VAR_2, sizeof(VAR_2),\nsbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);",
"if (VAR_4) {",
"continue;",
"}",
"VAR_3 = socket(VAR_6->ai_family, VAR_6->ai_socktype, VAR_6->ai_protocol);",
"if (VAR_3 < 0) {",
"continue;",
"}",
"reconnect:\nVAR_4 = connect(VAR_3, VAR_6->ai_addr, VAR_6->ai_addrlen);",
"if (VAR_4 < 0) {",
"if (errno == EINTR) {",
"goto reconnect;",
"}",
"break;",
"}",
"dprintf(\"connected to %s:%s\\n\", VAR_0, VAR_1);",
"goto success;",
"}",
"VAR_3 = -errno;",
"error_report(\"failed connect to %s:%s\", VAR_0, VAR_1);",
"success:\nfreeaddrinfo(VAR_7);",
"return VAR_3;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25
],
[
29
],
[
31
],
[
33,
35
],
[
37
],
[
39
],
[
43
],
[
45,
47
],
[
49
],
[
51
],
[
53
],
[
57
],
[
59
],
[
61
],
[
63
],
[
67,
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
80
],
[
82
],
[
86
],
[
88
],
[
90
],
[
92
],
[
94
],
[
96,
98
],
[
100
],
[
102
]
] |
21,289 | int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
AVPacket *avpkt)
{
AVFrame frame;
int ret, got_frame = 0;
if (avctx->get_buffer != avcodec_default_get_buffer) {
av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with "
"avcodec_decode_audio3()\n");
return AVERROR(EINVAL);
}
ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
if (ret >= 0 && got_frame) {
int ch, plane_size;
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
frame.nb_samples,
avctx->sample_fmt, 1);
if (*frame_size_ptr < data_size) {
av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
"the current frame (%d < %d)\n", *frame_size_ptr, data_size);
return AVERROR(EINVAL);
}
memcpy(samples, frame.extended_data[0], plane_size);
if (planar && avctx->channels > 1) {
uint8_t *out = ((uint8_t *)samples) + plane_size;
for (ch = 1; ch < avctx->channels; ch++) {
memcpy(out, frame.extended_data[ch], plane_size);
out += plane_size;
}
}
*frame_size_ptr = data_size;
} else {
*frame_size_ptr = 0;
}
return ret;
}
| true | FFmpeg | e2ff436ef64589de8486517352e17f513886e15b | int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
AVPacket *avpkt)
{
AVFrame frame;
int ret, got_frame = 0;
if (avctx->get_buffer != avcodec_default_get_buffer) {
av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with "
"avcodec_decode_audio3()\n");
return AVERROR(EINVAL);
}
ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
if (ret >= 0 && got_frame) {
int ch, plane_size;
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
frame.nb_samples,
avctx->sample_fmt, 1);
if (*frame_size_ptr < data_size) {
av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
"the current frame (%d < %d)\n", *frame_size_ptr, data_size);
return AVERROR(EINVAL);
}
memcpy(samples, frame.extended_data[0], plane_size);
if (planar && avctx->channels > 1) {
uint8_t *out = ((uint8_t *)samples) + plane_size;
for (ch = 1; ch < avctx->channels; ch++) {
memcpy(out, frame.extended_data[ch], plane_size);
out += plane_size;
}
}
*frame_size_ptr = data_size;
} else {
*frame_size_ptr = 0;
}
return ret;
}
| {
"code": [
" av_log(avctx, AV_LOG_ERROR, \"A custom get_buffer() cannot be used with \"",
" \"avcodec_decode_audio3()\\n\");",
" return AVERROR(EINVAL);"
],
"line_no": [
17,
19,
21
]
} | int VAR_0 avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
AVPacket *avpkt)
{
AVFrame frame;
int ret, got_frame = 0;
if (avctx->get_buffer != avcodec_default_get_buffer) {
av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with "
"avcodec_decode_audio3()\n");
return AVERROR(EINVAL);
}
ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
if (ret >= 0 && got_frame) {
int ch, plane_size;
int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
frame.nb_samples,
avctx->sample_fmt, 1);
if (*frame_size_ptr < data_size) {
av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
"the current frame (%d < %d)\n", *frame_size_ptr, data_size);
return AVERROR(EINVAL);
}
memcpy(samples, frame.extended_data[0], plane_size);
if (planar && avctx->channels > 1) {
uint8_t *out = ((uint8_t *)samples) + plane_size;
for (ch = 1; ch < avctx->channels; ch++) {
memcpy(out, frame.extended_data[ch], plane_size);
out += plane_size;
}
}
*frame_size_ptr = data_size;
} else {
*frame_size_ptr = 0;
}
return ret;
}
| [
"int VAR_0 avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,\nint *frame_size_ptr,\nAVPacket *avpkt)\n{",
"AVFrame frame;",
"int ret, got_frame = 0;",
"if (avctx->get_buffer != avcodec_default_get_buffer) {",
"av_log(avctx, AV_LOG_ERROR, \"A custom get_buffer() cannot be used with \"\n\"avcodec_decode_audio3()\\n\");",
"return AVERROR(EINVAL);",
"}",
"ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);",
"if (ret >= 0 && got_frame) {",
"int ch, plane_size;",
"int planar = av_sample_fmt_is_planar(avctx->sample_fmt);",
"int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,\nframe.nb_samples,\navctx->sample_fmt, 1);",
"if (*frame_size_ptr < data_size) {",
"av_log(avctx, AV_LOG_ERROR, \"output buffer size is too small for \"\n\"the current frame (%d < %d)\\n\", *frame_size_ptr, data_size);",
"return AVERROR(EINVAL);",
"}",
"memcpy(samples, frame.extended_data[0], plane_size);",
"if (planar && avctx->channels > 1) {",
"uint8_t *out = ((uint8_t *)samples) + plane_size;",
"for (ch = 1; ch < avctx->channels; ch++) {",
"memcpy(out, frame.extended_data[ch], plane_size);",
"out += plane_size;",
"}",
"}",
"*frame_size_ptr = data_size;",
"} else {",
"*frame_size_ptr = 0;",
"}",
"return ret;",
"}"
] | [
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
15
],
[
17,
19
],
[
21
],
[
23
],
[
27
],
[
31
],
[
33
],
[
35
],
[
37,
39,
41
],
[
43
],
[
45,
47
],
[
49
],
[
51
],
[
55
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83
]
] |
21,291 | static int qemu_chr_open_win_con(QemuOpts *opts, CharDriverState **chr)
{
return qemu_chr_open_win_file(GetStdHandle(STD_OUTPUT_HANDLE), chr);
}
| true | qemu | 1f51470d044852592922f91000e741c381582cdc | static int qemu_chr_open_win_con(QemuOpts *opts, CharDriverState **chr)
{
return qemu_chr_open_win_file(GetStdHandle(STD_OUTPUT_HANDLE), chr);
}
| {
"code": [
"static int qemu_chr_open_win_con(QemuOpts *opts, CharDriverState **chr)",
" return qemu_chr_open_win_file(GetStdHandle(STD_OUTPUT_HANDLE), chr);"
],
"line_no": [
1,
5
]
} | static int FUNC_0(QemuOpts *VAR_0, CharDriverState **VAR_1)
{
return qemu_chr_open_win_file(GetStdHandle(STD_OUTPUT_HANDLE), VAR_1);
}
| [
"static int FUNC_0(QemuOpts *VAR_0, CharDriverState **VAR_1)\n{",
"return qemu_chr_open_win_file(GetStdHandle(STD_OUTPUT_HANDLE), VAR_1);",
"}"
] | [
1,
1,
0
] | [
[
1,
3
],
[
5
],
[
7
]
] |
21,292 | static void dca_exss_parse_header(DCAContext *s)
{
int asset_size[8];
int ss_index;
int blownup;
int num_audiop = 1;
int num_assets = 1;
int active_ss_mask[8];
int i, j;
int start_posn;
int hdrsize;
uint32_t mkr;
if (get_bits_left(&s->gb) < 52)
return;
start_posn = get_bits_count(&s->gb) - 32;
skip_bits(&s->gb, 8); // user data
ss_index = get_bits(&s->gb, 2);
blownup = get_bits1(&s->gb);
hdrsize = get_bits(&s->gb, 8 + 4 * blownup) + 1; // header_size
skip_bits(&s->gb, 16 + 4 * blownup); // hd_size
s->static_fields = get_bits1(&s->gb);
if (s->static_fields) {
skip_bits(&s->gb, 2); // reference clock code
skip_bits(&s->gb, 3); // frame duration code
if (get_bits1(&s->gb))
skip_bits_long(&s->gb, 36); // timestamp
/* a single stream can contain multiple audio assets that can be
* combined to form multiple audio presentations */
num_audiop = get_bits(&s->gb, 3) + 1;
if (num_audiop > 1) {
avpriv_request_sample(s->avctx,
"Multiple DTS-HD audio presentations");
/* ignore such streams for now */
return;
}
num_assets = get_bits(&s->gb, 3) + 1;
if (num_assets > 1) {
avpriv_request_sample(s->avctx, "Multiple DTS-HD audio assets");
/* ignore such streams for now */
return;
}
for (i = 0; i < num_audiop; i++)
active_ss_mask[i] = get_bits(&s->gb, ss_index + 1);
for (i = 0; i < num_audiop; i++)
for (j = 0; j <= ss_index; j++)
if (active_ss_mask[i] & (1 << j))
skip_bits(&s->gb, 8); // active asset mask
s->mix_metadata = get_bits1(&s->gb);
if (s->mix_metadata) {
int mix_out_mask_size;
skip_bits(&s->gb, 2); // adjustment level
mix_out_mask_size = (get_bits(&s->gb, 2) + 1) << 2;
s->num_mix_configs = get_bits(&s->gb, 2) + 1;
for (i = 0; i < s->num_mix_configs; i++) {
int mix_out_mask = get_bits(&s->gb, mix_out_mask_size);
s->mix_config_num_ch[i] = dca_exss_mask2count(mix_out_mask);
}
}
}
for (i = 0; i < num_assets; i++)
asset_size[i] = get_bits_long(&s->gb, 16 + 4 * blownup);
for (i = 0; i < num_assets; i++) {
if (dca_exss_parse_asset_header(s))
return;
}
/* not parsed further, we were only interested in the extensions mask
* from the asset header */
j = get_bits_count(&s->gb);
if (start_posn + hdrsize * 8 > j)
skip_bits_long(&s->gb, start_posn + hdrsize * 8 - j);
for (i = 0; i < num_assets; i++) {
start_posn = get_bits_count(&s->gb);
mkr = get_bits_long(&s->gb, 32);
/* parse extensions that we know about */
if (mkr == 0x655e315e) {
dca_xbr_parse_frame(s);
} else if (mkr == 0x47004a03) {
dca_xxch_decode_frame(s);
s->core_ext_mask |= DCA_EXT_XXCH; /* xxx use for chan reordering */
} else {
av_log(s->avctx, AV_LOG_DEBUG,
"DTS-ExSS: unknown marker = 0x%08x\n", mkr);
}
/* skip to end of block */
j = get_bits_count(&s->gb);
if (start_posn + asset_size[i] * 8 > j)
skip_bits_long(&s->gb, start_posn + asset_size[i] * 8 - j);
}
} | true | FFmpeg | 62a9725bc95ef3c5101e2a9e74668cc1ecbd8819 | static void dca_exss_parse_header(DCAContext *s)
{
int asset_size[8];
int ss_index;
int blownup;
int num_audiop = 1;
int num_assets = 1;
int active_ss_mask[8];
int i, j;
int start_posn;
int hdrsize;
uint32_t mkr;
if (get_bits_left(&s->gb) < 52)
return;
start_posn = get_bits_count(&s->gb) - 32;
skip_bits(&s->gb, 8);
ss_index = get_bits(&s->gb, 2);
blownup = get_bits1(&s->gb);
hdrsize = get_bits(&s->gb, 8 + 4 * blownup) + 1;
skip_bits(&s->gb, 16 + 4 * blownup);
s->static_fields = get_bits1(&s->gb);
if (s->static_fields) {
skip_bits(&s->gb, 2);
skip_bits(&s->gb, 3);
if (get_bits1(&s->gb))
skip_bits_long(&s->gb, 36);
num_audiop = get_bits(&s->gb, 3) + 1;
if (num_audiop > 1) {
avpriv_request_sample(s->avctx,
"Multiple DTS-HD audio presentations");
return;
}
num_assets = get_bits(&s->gb, 3) + 1;
if (num_assets > 1) {
avpriv_request_sample(s->avctx, "Multiple DTS-HD audio assets");
return;
}
for (i = 0; i < num_audiop; i++)
active_ss_mask[i] = get_bits(&s->gb, ss_index + 1);
for (i = 0; i < num_audiop; i++)
for (j = 0; j <= ss_index; j++)
if (active_ss_mask[i] & (1 << j))
skip_bits(&s->gb, 8);
s->mix_metadata = get_bits1(&s->gb);
if (s->mix_metadata) {
int mix_out_mask_size;
skip_bits(&s->gb, 2);
mix_out_mask_size = (get_bits(&s->gb, 2) + 1) << 2;
s->num_mix_configs = get_bits(&s->gb, 2) + 1;
for (i = 0; i < s->num_mix_configs; i++) {
int mix_out_mask = get_bits(&s->gb, mix_out_mask_size);
s->mix_config_num_ch[i] = dca_exss_mask2count(mix_out_mask);
}
}
}
for (i = 0; i < num_assets; i++)
asset_size[i] = get_bits_long(&s->gb, 16 + 4 * blownup);
for (i = 0; i < num_assets; i++) {
if (dca_exss_parse_asset_header(s))
return;
}
j = get_bits_count(&s->gb);
if (start_posn + hdrsize * 8 > j)
skip_bits_long(&s->gb, start_posn + hdrsize * 8 - j);
for (i = 0; i < num_assets; i++) {
start_posn = get_bits_count(&s->gb);
mkr = get_bits_long(&s->gb, 32);
if (mkr == 0x655e315e) {
dca_xbr_parse_frame(s);
} else if (mkr == 0x47004a03) {
dca_xxch_decode_frame(s);
s->core_ext_mask |= DCA_EXT_XXCH;
} else {
av_log(s->avctx, AV_LOG_DEBUG,
"DTS-ExSS: unknown marker = 0x%08x\n", mkr);
}
j = get_bits_count(&s->gb);
if (start_posn + asset_size[i] * 8 > j)
skip_bits_long(&s->gb, start_posn + asset_size[i] * 8 - j);
}
} | {
"code": [],
"line_no": []
} | static void FUNC_0(DCAContext *VAR_0)
{
int VAR_1[8];
int VAR_2;
int VAR_3;
int VAR_4 = 1;
int VAR_5 = 1;
int VAR_6[8];
int VAR_7, VAR_8;
int VAR_9;
int VAR_10;
uint32_t mkr;
if (get_bits_left(&VAR_0->gb) < 52)
return;
VAR_9 = get_bits_count(&VAR_0->gb) - 32;
skip_bits(&VAR_0->gb, 8);
VAR_2 = get_bits(&VAR_0->gb, 2);
VAR_3 = get_bits1(&VAR_0->gb);
VAR_10 = get_bits(&VAR_0->gb, 8 + 4 * VAR_3) + 1;
skip_bits(&VAR_0->gb, 16 + 4 * VAR_3);
VAR_0->static_fields = get_bits1(&VAR_0->gb);
if (VAR_0->static_fields) {
skip_bits(&VAR_0->gb, 2);
skip_bits(&VAR_0->gb, 3);
if (get_bits1(&VAR_0->gb))
skip_bits_long(&VAR_0->gb, 36);
VAR_4 = get_bits(&VAR_0->gb, 3) + 1;
if (VAR_4 > 1) {
avpriv_request_sample(VAR_0->avctx,
"Multiple DTS-HD audio presentations");
return;
}
VAR_5 = get_bits(&VAR_0->gb, 3) + 1;
if (VAR_5 > 1) {
avpriv_request_sample(VAR_0->avctx, "Multiple DTS-HD audio assets");
return;
}
for (VAR_7 = 0; VAR_7 < VAR_4; VAR_7++)
VAR_6[VAR_7] = get_bits(&VAR_0->gb, VAR_2 + 1);
for (VAR_7 = 0; VAR_7 < VAR_4; VAR_7++)
for (VAR_8 = 0; VAR_8 <= VAR_2; VAR_8++)
if (VAR_6[VAR_7] & (1 << VAR_8))
skip_bits(&VAR_0->gb, 8);
VAR_0->mix_metadata = get_bits1(&VAR_0->gb);
if (VAR_0->mix_metadata) {
int VAR_11;
skip_bits(&VAR_0->gb, 2);
VAR_11 = (get_bits(&VAR_0->gb, 2) + 1) << 2;
VAR_0->num_mix_configs = get_bits(&VAR_0->gb, 2) + 1;
for (VAR_7 = 0; VAR_7 < VAR_0->num_mix_configs; VAR_7++) {
int mix_out_mask = get_bits(&VAR_0->gb, VAR_11);
VAR_0->mix_config_num_ch[VAR_7] = dca_exss_mask2count(mix_out_mask);
}
}
}
for (VAR_7 = 0; VAR_7 < VAR_5; VAR_7++)
VAR_1[VAR_7] = get_bits_long(&VAR_0->gb, 16 + 4 * VAR_3);
for (VAR_7 = 0; VAR_7 < VAR_5; VAR_7++) {
if (dca_exss_parse_asset_header(VAR_0))
return;
}
VAR_8 = get_bits_count(&VAR_0->gb);
if (VAR_9 + VAR_10 * 8 > VAR_8)
skip_bits_long(&VAR_0->gb, VAR_9 + VAR_10 * 8 - VAR_8);
for (VAR_7 = 0; VAR_7 < VAR_5; VAR_7++) {
VAR_9 = get_bits_count(&VAR_0->gb);
mkr = get_bits_long(&VAR_0->gb, 32);
if (mkr == 0x655e315e) {
dca_xbr_parse_frame(VAR_0);
} else if (mkr == 0x47004a03) {
dca_xxch_decode_frame(VAR_0);
VAR_0->core_ext_mask |= DCA_EXT_XXCH;
} else {
av_log(VAR_0->avctx, AV_LOG_DEBUG,
"DTS-ExSS: unknown marker = 0x%08x\n", mkr);
}
VAR_8 = get_bits_count(&VAR_0->gb);
if (VAR_9 + VAR_1[VAR_7] * 8 > VAR_8)
skip_bits_long(&VAR_0->gb, VAR_9 + VAR_1[VAR_7] * 8 - VAR_8);
}
} | [
"static void FUNC_0(DCAContext *VAR_0)\n{",
"int VAR_1[8];",
"int VAR_2;",
"int VAR_3;",
"int VAR_4 = 1;",
"int VAR_5 = 1;",
"int VAR_6[8];",
"int VAR_7, VAR_8;",
"int VAR_9;",
"int VAR_10;",
"uint32_t mkr;",
"if (get_bits_left(&VAR_0->gb) < 52)\nreturn;",
"VAR_9 = get_bits_count(&VAR_0->gb) - 32;",
"skip_bits(&VAR_0->gb, 8);",
"VAR_2 = get_bits(&VAR_0->gb, 2);",
"VAR_3 = get_bits1(&VAR_0->gb);",
"VAR_10 = get_bits(&VAR_0->gb, 8 + 4 * VAR_3) + 1;",
"skip_bits(&VAR_0->gb, 16 + 4 * VAR_3);",
"VAR_0->static_fields = get_bits1(&VAR_0->gb);",
"if (VAR_0->static_fields) {",
"skip_bits(&VAR_0->gb, 2);",
"skip_bits(&VAR_0->gb, 3);",
"if (get_bits1(&VAR_0->gb))\nskip_bits_long(&VAR_0->gb, 36);",
"VAR_4 = get_bits(&VAR_0->gb, 3) + 1;",
"if (VAR_4 > 1) {",
"avpriv_request_sample(VAR_0->avctx,\n\"Multiple DTS-HD audio presentations\");",
"return;",
"}",
"VAR_5 = get_bits(&VAR_0->gb, 3) + 1;",
"if (VAR_5 > 1) {",
"avpriv_request_sample(VAR_0->avctx, \"Multiple DTS-HD audio assets\");",
"return;",
"}",
"for (VAR_7 = 0; VAR_7 < VAR_4; VAR_7++)",
"VAR_6[VAR_7] = get_bits(&VAR_0->gb, VAR_2 + 1);",
"for (VAR_7 = 0; VAR_7 < VAR_4; VAR_7++)",
"for (VAR_8 = 0; VAR_8 <= VAR_2; VAR_8++)",
"if (VAR_6[VAR_7] & (1 << VAR_8))\nskip_bits(&VAR_0->gb, 8);",
"VAR_0->mix_metadata = get_bits1(&VAR_0->gb);",
"if (VAR_0->mix_metadata) {",
"int VAR_11;",
"skip_bits(&VAR_0->gb, 2);",
"VAR_11 = (get_bits(&VAR_0->gb, 2) + 1) << 2;",
"VAR_0->num_mix_configs = get_bits(&VAR_0->gb, 2) + 1;",
"for (VAR_7 = 0; VAR_7 < VAR_0->num_mix_configs; VAR_7++) {",
"int mix_out_mask = get_bits(&VAR_0->gb, VAR_11);",
"VAR_0->mix_config_num_ch[VAR_7] = dca_exss_mask2count(mix_out_mask);",
"}",
"}",
"}",
"for (VAR_7 = 0; VAR_7 < VAR_5; VAR_7++)",
"VAR_1[VAR_7] = get_bits_long(&VAR_0->gb, 16 + 4 * VAR_3);",
"for (VAR_7 = 0; VAR_7 < VAR_5; VAR_7++) {",
"if (dca_exss_parse_asset_header(VAR_0))\nreturn;",
"}",
"VAR_8 = get_bits_count(&VAR_0->gb);",
"if (VAR_9 + VAR_10 * 8 > VAR_8)\nskip_bits_long(&VAR_0->gb, VAR_9 + VAR_10 * 8 - VAR_8);",
"for (VAR_7 = 0; VAR_7 < VAR_5; VAR_7++) {",
"VAR_9 = get_bits_count(&VAR_0->gb);",
"mkr = get_bits_long(&VAR_0->gb, 32);",
"if (mkr == 0x655e315e) {",
"dca_xbr_parse_frame(VAR_0);",
"} else if (mkr == 0x47004a03) {",
"dca_xxch_decode_frame(VAR_0);",
"VAR_0->core_ext_mask |= DCA_EXT_XXCH;",
"} else {",
"av_log(VAR_0->avctx, AV_LOG_DEBUG,\n\"DTS-ExSS: unknown marker = 0x%08x\\n\", mkr);",
"}",
"VAR_8 = get_bits_count(&VAR_0->gb);",
"if (VAR_9 + VAR_1[VAR_7] * 8 > VAR_8)\nskip_bits_long(&VAR_0->gb, VAR_9 + VAR_1[VAR_7] * 8 - VAR_8);",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
2
],
[
3
],
[
4
],
[
5
],
[
6
],
[
7
],
[
8
],
[
9
],
[
10
],
[
11
],
[
12
],
[
13,
14
],
[
15
],
[
16
],
[
17
],
[
18
],
[
19
],
[
20
],
[
21
],
[
22
],
[
23
],
[
24
],
[
25,
26
],
[
29
],
[
30
],
[
31,
32
],
[
34
],
[
35
],
[
36
],
[
37
],
[
38
],
[
40
],
[
41
],
[
42
],
[
43
],
[
44
],
[
45
],
[
46,
47
],
[
48
],
[
49
],
[
50
],
[
51
],
[
52
],
[
53
],
[
54
],
[
55
],
[
56
],
[
57
],
[
58
],
[
59
],
[
60
],
[
61
],
[
62
],
[
63,
64
],
[
65
],
[
68
],
[
69,
70
],
[
71
],
[
72
],
[
73
],
[
75
],
[
76
],
[
77
],
[
78
],
[
79
],
[
80
],
[
81,
82
],
[
83
],
[
85
],
[
86,
87
],
[
88
],
[
89
]
] |
21,293 | static int idcin_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
IdcinDemuxContext *idcin = s->priv_data;
AVStream *st;
unsigned int width, height;
unsigned int sample_rate, bytes_per_sample, channels;
/* get the 5 header parameters */
width = avio_rl32(pb);
height = avio_rl32(pb);
sample_rate = avio_rl32(pb);
bytes_per_sample = avio_rl32(pb);
channels = avio_rl32(pb);
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);
idcin->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_IDCIN;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = width;
st->codec->height = height;
/* load up the Huffman tables into extradata */
st->codec->extradata_size = HUFFMAN_TABLE_SIZE;
st->codec->extradata = av_malloc(HUFFMAN_TABLE_SIZE);
if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) !=
HUFFMAN_TABLE_SIZE)
return AVERROR(EIO);
/* if sample rate is 0, assume no audio */
if (sample_rate) {
idcin->audio_present = 1;
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);
idcin->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 1;
st->codec->channels = channels;
st->codec->sample_rate = sample_rate;
st->codec->bits_per_coded_sample = bytes_per_sample * 8;
st->codec->bit_rate = sample_rate * bytes_per_sample * 8 * channels;
st->codec->block_align = bytes_per_sample * channels;
if (bytes_per_sample == 1)
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
else
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
if (sample_rate % 14 != 0) {
idcin->audio_chunk_size1 = (sample_rate / 14) *
bytes_per_sample * channels;
idcin->audio_chunk_size2 = (sample_rate / 14 + 1) *
bytes_per_sample * channels;
} else {
idcin->audio_chunk_size1 = idcin->audio_chunk_size2 =
(sample_rate / 14) * bytes_per_sample * channels;
idcin->current_audio_chunk = 0;
} else
idcin->audio_present = 1;
idcin->next_chunk_is_video = 1;
idcin->pts = 0;
return 0; | true | FFmpeg | b0c96e06134d5c2aa3fa4f0951834c982ee99e3b | static int idcin_read_header(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
IdcinDemuxContext *idcin = s->priv_data;
AVStream *st;
unsigned int width, height;
unsigned int sample_rate, bytes_per_sample, channels;
width = avio_rl32(pb);
height = avio_rl32(pb);
sample_rate = avio_rl32(pb);
bytes_per_sample = avio_rl32(pb);
channels = avio_rl32(pb);
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);
idcin->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_IDCIN;
st->codec->codec_tag = 0;
st->codec->width = width;
st->codec->height = height;
st->codec->extradata_size = HUFFMAN_TABLE_SIZE;
st->codec->extradata = av_malloc(HUFFMAN_TABLE_SIZE);
if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) !=
HUFFMAN_TABLE_SIZE)
return AVERROR(EIO);
if (sample_rate) {
idcin->audio_present = 1;
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);
idcin->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 1;
st->codec->channels = channels;
st->codec->sample_rate = sample_rate;
st->codec->bits_per_coded_sample = bytes_per_sample * 8;
st->codec->bit_rate = sample_rate * bytes_per_sample * 8 * channels;
st->codec->block_align = bytes_per_sample * channels;
if (bytes_per_sample == 1)
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
else
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
if (sample_rate % 14 != 0) {
idcin->audio_chunk_size1 = (sample_rate / 14) *
bytes_per_sample * channels;
idcin->audio_chunk_size2 = (sample_rate / 14 + 1) *
bytes_per_sample * channels;
} else {
idcin->audio_chunk_size1 = idcin->audio_chunk_size2 =
(sample_rate / 14) * bytes_per_sample * channels;
idcin->current_audio_chunk = 0;
} else
idcin->audio_present = 1;
idcin->next_chunk_is_video = 1;
idcin->pts = 0;
return 0; | {
"code": [],
"line_no": []
} | static int FUNC_0(AVFormatContext *VAR_0)
{
AVIOContext *pb = VAR_0->pb;
IdcinDemuxContext *idcin = VAR_0->priv_data;
AVStream *st;
unsigned int VAR_1, VAR_2;
unsigned int VAR_3, VAR_4, VAR_5;
VAR_1 = avio_rl32(pb);
VAR_2 = avio_rl32(pb);
VAR_3 = avio_rl32(pb);
VAR_4 = avio_rl32(pb);
VAR_5 = avio_rl32(pb);
st = avformat_new_stream(VAR_0, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);
idcin->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_IDCIN;
st->codec->codec_tag = 0;
st->codec->VAR_1 = VAR_1;
st->codec->VAR_2 = VAR_2;
st->codec->extradata_size = HUFFMAN_TABLE_SIZE;
st->codec->extradata = av_malloc(HUFFMAN_TABLE_SIZE);
if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) !=
HUFFMAN_TABLE_SIZE)
return AVERROR(EIO);
if (VAR_3) {
idcin->audio_present = 1;
st = avformat_new_stream(VAR_0, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);
idcin->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = 1;
st->codec->VAR_5 = VAR_5;
st->codec->VAR_3 = VAR_3;
st->codec->bits_per_coded_sample = VAR_4 * 8;
st->codec->bit_rate = VAR_3 * VAR_4 * 8 * VAR_5;
st->codec->block_align = VAR_4 * VAR_5;
if (VAR_4 == 1)
st->codec->codec_id = AV_CODEC_ID_PCM_U8;
else
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
if (VAR_3 % 14 != 0) {
idcin->audio_chunk_size1 = (VAR_3 / 14) *
VAR_4 * VAR_5;
idcin->audio_chunk_size2 = (VAR_3 / 14 + 1) *
VAR_4 * VAR_5;
} else {
idcin->audio_chunk_size1 = idcin->audio_chunk_size2 =
(VAR_3 / 14) * VAR_4 * VAR_5;
idcin->current_audio_chunk = 0;
} else
idcin->audio_present = 1;
idcin->next_chunk_is_video = 1;
idcin->pts = 0;
return 0; | [
"static int FUNC_0(AVFormatContext *VAR_0)\n{",
"AVIOContext *pb = VAR_0->pb;",
"IdcinDemuxContext *idcin = VAR_0->priv_data;",
"AVStream *st;",
"unsigned int VAR_1, VAR_2;",
"unsigned int VAR_3, VAR_4, VAR_5;",
"VAR_1 = avio_rl32(pb);",
"VAR_2 = avio_rl32(pb);",
"VAR_3 = avio_rl32(pb);",
"VAR_4 = avio_rl32(pb);",
"VAR_5 = avio_rl32(pb);",
"st = avformat_new_stream(VAR_0, NULL);",
"if (!st)\nreturn AVERROR(ENOMEM);",
"avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);",
"idcin->video_stream_index = st->index;",
"st->codec->codec_type = AVMEDIA_TYPE_VIDEO;",
"st->codec->codec_id = AV_CODEC_ID_IDCIN;",
"st->codec->codec_tag = 0;",
"st->codec->VAR_1 = VAR_1;",
"st->codec->VAR_2 = VAR_2;",
"st->codec->extradata_size = HUFFMAN_TABLE_SIZE;",
"st->codec->extradata = av_malloc(HUFFMAN_TABLE_SIZE);",
"if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) !=\nHUFFMAN_TABLE_SIZE)\nreturn AVERROR(EIO);",
"if (VAR_3) {",
"idcin->audio_present = 1;",
"st = avformat_new_stream(VAR_0, NULL);",
"if (!st)\nreturn AVERROR(ENOMEM);",
"avpriv_set_pts_info(st, 33, 1, IDCIN_FPS);",
"idcin->audio_stream_index = st->index;",
"st->codec->codec_type = AVMEDIA_TYPE_AUDIO;",
"st->codec->codec_tag = 1;",
"st->codec->VAR_5 = VAR_5;",
"st->codec->VAR_3 = VAR_3;",
"st->codec->bits_per_coded_sample = VAR_4 * 8;",
"st->codec->bit_rate = VAR_3 * VAR_4 * 8 * VAR_5;",
"st->codec->block_align = VAR_4 * VAR_5;",
"if (VAR_4 == 1)\nst->codec->codec_id = AV_CODEC_ID_PCM_U8;",
"else\nst->codec->codec_id = AV_CODEC_ID_PCM_S16LE;",
"if (VAR_3 % 14 != 0) {",
"idcin->audio_chunk_size1 = (VAR_3 / 14) *\nVAR_4 * VAR_5;",
"idcin->audio_chunk_size2 = (VAR_3 / 14 + 1) *\nVAR_4 * VAR_5;",
"} else {",
"idcin->audio_chunk_size1 = idcin->audio_chunk_size2 =\n(VAR_3 / 14) * VAR_4 * VAR_5;",
"idcin->current_audio_chunk = 0;",
"} else",
"idcin->audio_present = 1;",
"idcin->next_chunk_is_video = 1;",
"idcin->pts = 0;",
"return 0;"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
2
],
[
3
],
[
4
],
[
5
],
[
6
],
[
7
],
[
9
],
[
10
],
[
11
],
[
12
],
[
13
],
[
14
],
[
15,
16
],
[
17
],
[
18
],
[
19
],
[
20
],
[
21
],
[
22
],
[
23
],
[
25
],
[
26
],
[
27,
28,
29
],
[
31
],
[
32
],
[
33
],
[
34,
35
],
[
36
],
[
37
],
[
38
],
[
39
],
[
40
],
[
41
],
[
42
],
[
43
],
[
44
],
[
45,
46
],
[
47,
48
],
[
49
],
[
50,
51
],
[
52,
53
],
[
54
],
[
55,
56
],
[
57
],
[
58
],
[
59
],
[
60
],
[
61
],
[
62
]
] |
21,294 | static void xilinx_enet_init(Object *obj)
{
XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
(Object **) &s->tx_data_dev, &error_abort);
object_property_add_link(obj, "axistream-control-connected",
TYPE_STREAM_SLAVE,
(Object **) &s->tx_control_dev, &error_abort);
object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
TYPE_XILINX_AXI_ENET_DATA_STREAM);
object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),
TYPE_XILINX_AXI_ENET_CONTROL_STREAM);
object_property_add_child(OBJECT(s), "axistream-connected-target",
(Object *)&s->rx_data_dev, &error_abort);
object_property_add_child(OBJECT(s), "axistream-control-connected-target",
(Object *)&s->rx_control_dev, &error_abort);
sysbus_init_irq(sbd, &s->irq);
memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
sysbus_init_mmio(sbd, &s->iomem);
}
| true | qemu | 9561fda8d90e176bef598ba87c42a1bd6ad03ef7 | static void xilinx_enet_init(Object *obj)
{
XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
(Object **) &s->tx_data_dev, &error_abort);
object_property_add_link(obj, "axistream-control-connected",
TYPE_STREAM_SLAVE,
(Object **) &s->tx_control_dev, &error_abort);
object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
TYPE_XILINX_AXI_ENET_DATA_STREAM);
object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),
TYPE_XILINX_AXI_ENET_CONTROL_STREAM);
object_property_add_child(OBJECT(s), "axistream-connected-target",
(Object *)&s->rx_data_dev, &error_abort);
object_property_add_child(OBJECT(s), "axistream-control-connected-target",
(Object *)&s->rx_control_dev, &error_abort);
sysbus_init_irq(sbd, &s->irq);
memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
sysbus_init_mmio(sbd, &s->iomem);
}
| {
"code": [
" (Object **) &s->tx_data_dev, &error_abort);",
" (Object **) &s->tx_control_dev, &error_abort);"
],
"line_no": [
13,
19
]
} | static void FUNC_0(Object *VAR_0)
{
XilinxAXIEnet *s = XILINX_AXI_ENET(VAR_0);
SysBusDevice *sbd = SYS_BUS_DEVICE(VAR_0);
object_property_add_link(VAR_0, "axistream-connected", TYPE_STREAM_SLAVE,
(Object **) &s->tx_data_dev, &error_abort);
object_property_add_link(VAR_0, "axistream-control-connected",
TYPE_STREAM_SLAVE,
(Object **) &s->tx_control_dev, &error_abort);
object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
TYPE_XILINX_AXI_ENET_DATA_STREAM);
object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),
TYPE_XILINX_AXI_ENET_CONTROL_STREAM);
object_property_add_child(OBJECT(s), "axistream-connected-target",
(Object *)&s->rx_data_dev, &error_abort);
object_property_add_child(OBJECT(s), "axistream-control-connected-target",
(Object *)&s->rx_control_dev, &error_abort);
sysbus_init_irq(sbd, &s->irq);
memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
sysbus_init_mmio(sbd, &s->iomem);
}
| [
"static void FUNC_0(Object *VAR_0)\n{",
"XilinxAXIEnet *s = XILINX_AXI_ENET(VAR_0);",
"SysBusDevice *sbd = SYS_BUS_DEVICE(VAR_0);",
"object_property_add_link(VAR_0, \"axistream-connected\", TYPE_STREAM_SLAVE,\n(Object **) &s->tx_data_dev, &error_abort);",
"object_property_add_link(VAR_0, \"axistream-control-connected\",\nTYPE_STREAM_SLAVE,\n(Object **) &s->tx_control_dev, &error_abort);",
"object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),\nTYPE_XILINX_AXI_ENET_DATA_STREAM);",
"object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),\nTYPE_XILINX_AXI_ENET_CONTROL_STREAM);",
"object_property_add_child(OBJECT(s), \"axistream-connected-target\",\n(Object *)&s->rx_data_dev, &error_abort);",
"object_property_add_child(OBJECT(s), \"axistream-control-connected-target\",\n(Object *)&s->rx_control_dev, &error_abort);",
"sysbus_init_irq(sbd, &s->irq);",
"memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, \"enet\", 0x40000);",
"sysbus_init_mmio(sbd, &s->iomem);",
"}"
] | [
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11,
13
],
[
15,
17,
19
],
[
23,
25
],
[
27,
29
],
[
31,
33
],
[
35,
37
],
[
41
],
[
45
],
[
47
],
[
49
]
] |
21,295 | static int update_size(AVCodecContext *ctx, int w, int h, enum AVPixelFormat fmt)
{
VP9Context *s = ctx->priv_data;
uint8_t *p;
int bytesperpixel = s->bytesperpixel;
av_assert0(w > 0 && h > 0);
if (s->intra_pred_data[0] && w == ctx->width && h == ctx->height && ctx->pix_fmt == fmt)
return 0;
ctx->width = w;
ctx->height = h;
ctx->pix_fmt = fmt;
s->sb_cols = (w + 63) >> 6;
s->sb_rows = (h + 63) >> 6;
s->cols = (w + 7) >> 3;
s->rows = (h + 7) >> 3;
#define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
av_freep(&s->intra_pred_data[0]);
// FIXME we slightly over-allocate here for subsampled chroma, but a little
// bit of padding shouldn't affect performance...
p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
if (!p)
return AVERROR(ENOMEM);
assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
assign(s->above_y_nnz_ctx, uint8_t *, 16);
assign(s->above_mode_ctx, uint8_t *, 16);
assign(s->above_mv_ctx, VP56mv(*)[2], 16);
assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
assign(s->above_partition_ctx, uint8_t *, 8);
assign(s->above_skip_ctx, uint8_t *, 8);
assign(s->above_txfm_ctx, uint8_t *, 8);
assign(s->above_segpred_ctx, uint8_t *, 8);
assign(s->above_intra_ctx, uint8_t *, 8);
assign(s->above_comp_ctx, uint8_t *, 8);
assign(s->above_ref_ctx, uint8_t *, 8);
assign(s->above_filter_ctx, uint8_t *, 8);
assign(s->lflvl, struct VP9Filter *, 1);
#undef assign
// these will be re-allocated a little later
av_freep(&s->b_base);
av_freep(&s->block_base);
if (s->bpp != s->last_bpp) {
ff_vp9dsp_init(&s->dsp, s->bpp);
ff_videodsp_init(&s->vdsp, s->bpp);
s->last_bpp = s->bpp;
}
return 0;
}
| true | FFmpeg | fd8b90f5f63de12c1ee1ec1cbe99791c5629c582 | static int update_size(AVCodecContext *ctx, int w, int h, enum AVPixelFormat fmt)
{
VP9Context *s = ctx->priv_data;
uint8_t *p;
int bytesperpixel = s->bytesperpixel;
av_assert0(w > 0 && h > 0);
if (s->intra_pred_data[0] && w == ctx->width && h == ctx->height && ctx->pix_fmt == fmt)
return 0;
ctx->width = w;
ctx->height = h;
ctx->pix_fmt = fmt;
s->sb_cols = (w + 63) >> 6;
s->sb_rows = (h + 63) >> 6;
s->cols = (w + 7) >> 3;
s->rows = (h + 7) >> 3;
#define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
av_freep(&s->intra_pred_data[0]);
p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
if (!p)
return AVERROR(ENOMEM);
assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
assign(s->above_y_nnz_ctx, uint8_t *, 16);
assign(s->above_mode_ctx, uint8_t *, 16);
assign(s->above_mv_ctx, VP56mv(*)[2], 16);
assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
assign(s->above_partition_ctx, uint8_t *, 8);
assign(s->above_skip_ctx, uint8_t *, 8);
assign(s->above_txfm_ctx, uint8_t *, 8);
assign(s->above_segpred_ctx, uint8_t *, 8);
assign(s->above_intra_ctx, uint8_t *, 8);
assign(s->above_comp_ctx, uint8_t *, 8);
assign(s->above_ref_ctx, uint8_t *, 8);
assign(s->above_filter_ctx, uint8_t *, 8);
assign(s->lflvl, struct VP9Filter *, 1);
#undef assign
av_freep(&s->b_base);
av_freep(&s->block_base);
if (s->bpp != s->last_bpp) {
ff_vp9dsp_init(&s->dsp, s->bpp);
ff_videodsp_init(&s->vdsp, s->bpp);
s->last_bpp = s->bpp;
}
return 0;
}
| {
"code": [
" ff_vp9dsp_init(&s->dsp, s->bpp);"
],
"line_no": [
103
]
} | static int FUNC_0(AVCodecContext *VAR_0, int VAR_1, int VAR_2, enum AVPixelFormat VAR_3)
{
VP9Context *s = VAR_0->priv_data;
uint8_t *p;
int VAR_4 = s->VAR_4;
av_assert0(VAR_1 > 0 && VAR_2 > 0);
if (s->intra_pred_data[0] && VAR_1 == VAR_0->width && VAR_2 == VAR_0->height && VAR_0->pix_fmt == VAR_3)
return 0;
VAR_0->width = VAR_1;
VAR_0->height = VAR_2;
VAR_0->pix_fmt = VAR_3;
s->sb_cols = (VAR_1 + 63) >> 6;
s->sb_rows = (VAR_2 + 63) >> 6;
s->cols = (VAR_1 + 7) >> 3;
s->rows = (VAR_2 + 7) >> 3;
#define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
av_freep(&s->intra_pred_data[0]);
p = av_malloc(s->sb_cols * (128 + 192 * VAR_4 +
sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
if (!p)
return AVERROR(ENOMEM);
assign(s->intra_pred_data[0], uint8_t *, 64 * VAR_4);
assign(s->intra_pred_data[1], uint8_t *, 64 * VAR_4);
assign(s->intra_pred_data[2], uint8_t *, 64 * VAR_4);
assign(s->above_y_nnz_ctx, uint8_t *, 16);
assign(s->above_mode_ctx, uint8_t *, 16);
assign(s->above_mv_ctx, VP56mv(*)[2], 16);
assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
assign(s->above_partition_ctx, uint8_t *, 8);
assign(s->above_skip_ctx, uint8_t *, 8);
assign(s->above_txfm_ctx, uint8_t *, 8);
assign(s->above_segpred_ctx, uint8_t *, 8);
assign(s->above_intra_ctx, uint8_t *, 8);
assign(s->above_comp_ctx, uint8_t *, 8);
assign(s->above_ref_ctx, uint8_t *, 8);
assign(s->above_filter_ctx, uint8_t *, 8);
assign(s->lflvl, struct VP9Filter *, 1);
#undef assign
av_freep(&s->b_base);
av_freep(&s->block_base);
if (s->bpp != s->last_bpp) {
ff_vp9dsp_init(&s->dsp, s->bpp);
ff_videodsp_init(&s->vdsp, s->bpp);
s->last_bpp = s->bpp;
}
return 0;
}
| [
"static int FUNC_0(AVCodecContext *VAR_0, int VAR_1, int VAR_2, enum AVPixelFormat VAR_3)\n{",
"VP9Context *s = VAR_0->priv_data;",
"uint8_t *p;",
"int VAR_4 = s->VAR_4;",
"av_assert0(VAR_1 > 0 && VAR_2 > 0);",
"if (s->intra_pred_data[0] && VAR_1 == VAR_0->width && VAR_2 == VAR_0->height && VAR_0->pix_fmt == VAR_3)\nreturn 0;",
"VAR_0->width = VAR_1;",
"VAR_0->height = VAR_2;",
"VAR_0->pix_fmt = VAR_3;",
"s->sb_cols = (VAR_1 + 63) >> 6;",
"s->sb_rows = (VAR_2 + 63) >> 6;",
"s->cols = (VAR_1 + 7) >> 3;",
"s->rows = (VAR_2 + 7) >> 3;",
"#define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)",
"av_freep(&s->intra_pred_data[0]);",
"p = av_malloc(s->sb_cols * (128 + 192 * VAR_4 +\nsizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));",
"if (!p)\nreturn AVERROR(ENOMEM);",
"assign(s->intra_pred_data[0], uint8_t *, 64 * VAR_4);",
"assign(s->intra_pred_data[1], uint8_t *, 64 * VAR_4);",
"assign(s->intra_pred_data[2], uint8_t *, 64 * VAR_4);",
"assign(s->above_y_nnz_ctx, uint8_t *, 16);",
"assign(s->above_mode_ctx, uint8_t *, 16);",
"assign(s->above_mv_ctx, VP56mv(*)[2], 16);",
"assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);",
"assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);",
"assign(s->above_partition_ctx, uint8_t *, 8);",
"assign(s->above_skip_ctx, uint8_t *, 8);",
"assign(s->above_txfm_ctx, uint8_t *, 8);",
"assign(s->above_segpred_ctx, uint8_t *, 8);",
"assign(s->above_intra_ctx, uint8_t *, 8);",
"assign(s->above_comp_ctx, uint8_t *, 8);",
"assign(s->above_ref_ctx, uint8_t *, 8);",
"assign(s->above_filter_ctx, uint8_t *, 8);",
"assign(s->lflvl, struct VP9Filter *, 1);",
"#undef assign\nav_freep(&s->b_base);",
"av_freep(&s->block_base);",
"if (s->bpp != s->last_bpp) {",
"ff_vp9dsp_init(&s->dsp, s->bpp);",
"ff_videodsp_init(&s->vdsp, s->bpp);",
"s->last_bpp = s->bpp;",
"}",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
17,
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
39
],
[
41
],
[
47,
49
],
[
51,
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83
],
[
85
],
[
87
],
[
89,
95
],
[
97
],
[
101
],
[
103
],
[
105
],
[
107
],
[
109
],
[
113
],
[
115
]
] |
21,297 | static TRBCCode xhci_reset_ep(XHCIState *xhci, unsigned int slotid,
unsigned int epid)
{
XHCISlot *slot;
XHCIEPContext *epctx;
USBDevice *dev;
trace_usb_xhci_ep_reset(slotid, epid);
assert(slotid >= 1 && slotid <= xhci->numslots);
if (epid < 1 || epid > 31) {
fprintf(stderr, "xhci: bad ep %d\n", epid);
return CC_TRB_ERROR;
}
slot = &xhci->slots[slotid-1];
if (!slot->eps[epid-1]) {
DPRINTF("xhci: slot %d ep %d not enabled\n", slotid, epid);
return CC_EP_NOT_ENABLED_ERROR;
}
epctx = slot->eps[epid-1];
if (epctx->state != EP_HALTED) {
fprintf(stderr, "xhci: reset EP while EP %d not halted (%d)\n",
epid, epctx->state);
return CC_CONTEXT_STATE_ERROR;
}
if (xhci_ep_nuke_xfers(xhci, slotid, epid) > 0) {
fprintf(stderr, "xhci: FIXME: endpoint reset w/ xfers running, "
"data might be lost\n");
}
uint8_t ep = epid>>1;
if (epid & 1) {
ep |= 0x80;
}
dev = xhci->slots[slotid-1].uport->dev;
if (!dev) {
return CC_USB_TRANSACTION_ERROR;
}
xhci_set_ep_state(xhci, epctx, NULL, EP_STOPPED);
if (epctx->nr_pstreams) {
xhci_reset_streams(epctx);
}
return CC_SUCCESS;
}
| true | qemu | 75cc1c1fcba1987bdf3979c4289ab756c2b15742 | static TRBCCode xhci_reset_ep(XHCIState *xhci, unsigned int slotid,
unsigned int epid)
{
XHCISlot *slot;
XHCIEPContext *epctx;
USBDevice *dev;
trace_usb_xhci_ep_reset(slotid, epid);
assert(slotid >= 1 && slotid <= xhci->numslots);
if (epid < 1 || epid > 31) {
fprintf(stderr, "xhci: bad ep %d\n", epid);
return CC_TRB_ERROR;
}
slot = &xhci->slots[slotid-1];
if (!slot->eps[epid-1]) {
DPRINTF("xhci: slot %d ep %d not enabled\n", slotid, epid);
return CC_EP_NOT_ENABLED_ERROR;
}
epctx = slot->eps[epid-1];
if (epctx->state != EP_HALTED) {
fprintf(stderr, "xhci: reset EP while EP %d not halted (%d)\n",
epid, epctx->state);
return CC_CONTEXT_STATE_ERROR;
}
if (xhci_ep_nuke_xfers(xhci, slotid, epid) > 0) {
fprintf(stderr, "xhci: FIXME: endpoint reset w/ xfers running, "
"data might be lost\n");
}
uint8_t ep = epid>>1;
if (epid & 1) {
ep |= 0x80;
}
dev = xhci->slots[slotid-1].uport->dev;
if (!dev) {
return CC_USB_TRANSACTION_ERROR;
}
xhci_set_ep_state(xhci, epctx, NULL, EP_STOPPED);
if (epctx->nr_pstreams) {
xhci_reset_streams(epctx);
}
return CC_SUCCESS;
}
| {
"code": [
" USBDevice *dev;",
" dev = xhci->slots[slotid-1].uport->dev;",
" if (!dev) {"
],
"line_no": [
11,
83,
85
]
} | static TRBCCode FUNC_0(XHCIState *xhci, unsigned int slotid,
unsigned int epid)
{
XHCISlot *slot;
XHCIEPContext *epctx;
USBDevice *dev;
trace_usb_xhci_ep_reset(slotid, epid);
assert(slotid >= 1 && slotid <= xhci->numslots);
if (epid < 1 || epid > 31) {
fprintf(stderr, "xhci: bad ep %d\n", epid);
return CC_TRB_ERROR;
}
slot = &xhci->slots[slotid-1];
if (!slot->eps[epid-1]) {
DPRINTF("xhci: slot %d ep %d not enabled\n", slotid, epid);
return CC_EP_NOT_ENABLED_ERROR;
}
epctx = slot->eps[epid-1];
if (epctx->state != EP_HALTED) {
fprintf(stderr, "xhci: reset EP while EP %d not halted (%d)\n",
epid, epctx->state);
return CC_CONTEXT_STATE_ERROR;
}
if (xhci_ep_nuke_xfers(xhci, slotid, epid) > 0) {
fprintf(stderr, "xhci: FIXME: endpoint reset w/ xfers running, "
"data might be lost\n");
}
uint8_t ep = epid>>1;
if (epid & 1) {
ep |= 0x80;
}
dev = xhci->slots[slotid-1].uport->dev;
if (!dev) {
return CC_USB_TRANSACTION_ERROR;
}
xhci_set_ep_state(xhci, epctx, NULL, EP_STOPPED);
if (epctx->nr_pstreams) {
xhci_reset_streams(epctx);
}
return CC_SUCCESS;
}
| [
"static TRBCCode FUNC_0(XHCIState *xhci, unsigned int slotid,\nunsigned int epid)\n{",
"XHCISlot *slot;",
"XHCIEPContext *epctx;",
"USBDevice *dev;",
"trace_usb_xhci_ep_reset(slotid, epid);",
"assert(slotid >= 1 && slotid <= xhci->numslots);",
"if (epid < 1 || epid > 31) {",
"fprintf(stderr, \"xhci: bad ep %d\\n\", epid);",
"return CC_TRB_ERROR;",
"}",
"slot = &xhci->slots[slotid-1];",
"if (!slot->eps[epid-1]) {",
"DPRINTF(\"xhci: slot %d ep %d not enabled\\n\", slotid, epid);",
"return CC_EP_NOT_ENABLED_ERROR;",
"}",
"epctx = slot->eps[epid-1];",
"if (epctx->state != EP_HALTED) {",
"fprintf(stderr, \"xhci: reset EP while EP %d not halted (%d)\\n\",\nepid, epctx->state);",
"return CC_CONTEXT_STATE_ERROR;",
"}",
"if (xhci_ep_nuke_xfers(xhci, slotid, epid) > 0) {",
"fprintf(stderr, \"xhci: FIXME: endpoint reset w/ xfers running, \"\n\"data might be lost\\n\");",
"}",
"uint8_t ep = epid>>1;",
"if (epid & 1) {",
"ep |= 0x80;",
"}",
"dev = xhci->slots[slotid-1].uport->dev;",
"if (!dev) {",
"return CC_USB_TRANSACTION_ERROR;",
"}",
"xhci_set_ep_state(xhci, epctx, NULL, EP_STOPPED);",
"if (epctx->nr_pstreams) {",
"xhci_reset_streams(epctx);",
"}",
"return CC_SUCCESS;",
"}"
] | [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
31
],
[
35
],
[
37
],
[
39
],
[
41
],
[
45
],
[
49
],
[
51,
53
],
[
55
],
[
57
],
[
61
],
[
63,
65
],
[
67
],
[
71
],
[
75
],
[
77
],
[
79
],
[
83
],
[
85
],
[
87
],
[
89
],
[
93
],
[
97
],
[
99
],
[
101
],
[
105
],
[
107
]
] |
21,298 | static int64_t mmsh_seek(URLContext *h, int64_t pos, int whence)
{
MMSHContext *mmsh = h->priv_data;
MMSContext *mms = &mmsh->mms;
if(pos == 0 && whence == SEEK_CUR)
return mms->asf_header_read_size + mms->remaining_in_len + mmsh->chunk_seq * mms->asf_packet_len;
return AVERROR(ENOSYS);
}
| true | FFmpeg | 830f7f189f7b41221b29d40e8127cf54a140ae86 | static int64_t mmsh_seek(URLContext *h, int64_t pos, int whence)
{
MMSHContext *mmsh = h->priv_data;
MMSContext *mms = &mmsh->mms;
if(pos == 0 && whence == SEEK_CUR)
return mms->asf_header_read_size + mms->remaining_in_len + mmsh->chunk_seq * mms->asf_packet_len;
return AVERROR(ENOSYS);
}
| {
"code": [
" return mms->asf_header_read_size + mms->remaining_in_len + mmsh->chunk_seq * mms->asf_packet_len;"
],
"line_no": [
13
]
} | static int64_t FUNC_0(URLContext *h, int64_t pos, int whence)
{
MMSHContext *mmsh = h->priv_data;
MMSContext *mms = &mmsh->mms;
if(pos == 0 && whence == SEEK_CUR)
return mms->asf_header_read_size + mms->remaining_in_len + mmsh->chunk_seq * mms->asf_packet_len;
return AVERROR(ENOSYS);
}
| [
"static int64_t FUNC_0(URLContext *h, int64_t pos, int whence)\n{",
"MMSHContext *mmsh = h->priv_data;",
"MMSContext *mms = &mmsh->mms;",
"if(pos == 0 && whence == SEEK_CUR)\nreturn mms->asf_header_read_size + mms->remaining_in_len + mmsh->chunk_seq * mms->asf_packet_len;",
"return AVERROR(ENOSYS);",
"}"
] | [
0,
0,
0,
1,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11,
13
],
[
15
],
[
17
]
] |
21,299 | static void kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int w, int h)
{
BitBuf bb;
int res, val;
int i, j;
int bx, by;
int l0x, l1x, l0y, l1y;
int mx, my;
kmvc_init_getbits(bb, src);
for (by = 0; by < h; by += 8)
for (bx = 0; bx < w; bx += 8) {
kmvc_getbit(bb, src, res);
if (!res) {
kmvc_getbit(bb, src, res);
if (!res) { // fill whole 8x8 block
val = *src++;
for (i = 0; i < 64; i++)
BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val;
} else { // copy block from previous frame
for (i = 0; i < 64; i++)
BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) =
BLK(ctx->prev, bx + (i & 0x7), by + (i >> 3));
}
} else { // handle four 4x4 subblocks
for (i = 0; i < 4; i++) {
l0x = bx + (i & 1) * 4;
l0y = by + (i & 2) * 2;
kmvc_getbit(bb, src, res);
if (!res) {
kmvc_getbit(bb, src, res);
if (!res) { // fill whole 4x4 block
val = *src++;
for (j = 0; j < 16; j++)
BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val;
} else { // copy block
val = *src++;
mx = (val & 0xF) - 8;
my = (val >> 4) - 8;
for (j = 0; j < 16; j++)
BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) =
BLK(ctx->prev, l0x + (j & 3) + mx, l0y + (j >> 2) + my);
}
} else { // descend to 2x2 sub-sub-blocks
for (j = 0; j < 4; j++) {
l1x = l0x + (j & 1) * 2;
l1y = l0y + (j & 2);
kmvc_getbit(bb, src, res);
if (!res) {
kmvc_getbit(bb, src, res);
if (!res) { // fill whole 2x2 block
val = *src++;
BLK(ctx->cur, l1x, l1y) = val;
BLK(ctx->cur, l1x + 1, l1y) = val;
BLK(ctx->cur, l1x, l1y + 1) = val;
BLK(ctx->cur, l1x + 1, l1y + 1) = val;
} else { // copy block
val = *src++;
mx = (val & 0xF) - 8;
my = (val >> 4) - 8;
BLK(ctx->cur, l1x, l1y) = BLK(ctx->prev, l1x + mx, l1y + my);
BLK(ctx->cur, l1x + 1, l1y) =
BLK(ctx->prev, l1x + 1 + mx, l1y + my);
BLK(ctx->cur, l1x, l1y + 1) =
BLK(ctx->prev, l1x + mx, l1y + 1 + my);
BLK(ctx->cur, l1x + 1, l1y + 1) =
BLK(ctx->prev, l1x + 1 + mx, l1y + 1 + my);
}
} else { // read values for block
BLK(ctx->cur, l1x, l1y) = *src++;
BLK(ctx->cur, l1x + 1, l1y) = *src++;
BLK(ctx->cur, l1x, l1y + 1) = *src++;
BLK(ctx->cur, l1x + 1, l1y + 1) = *src++;
}
}
}
}
}
}
}
| true | FFmpeg | ad3161ec1d70291efcf40121d703ef73c0b08e5b | static void kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int w, int h)
{
BitBuf bb;
int res, val;
int i, j;
int bx, by;
int l0x, l1x, l0y, l1y;
int mx, my;
kmvc_init_getbits(bb, src);
for (by = 0; by < h; by += 8)
for (bx = 0; bx < w; bx += 8) {
kmvc_getbit(bb, src, res);
if (!res) {
kmvc_getbit(bb, src, res);
if (!res) {
val = *src++;
for (i = 0; i < 64; i++)
BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) = val;
} else {
for (i = 0; i < 64; i++)
BLK(ctx->cur, bx + (i & 0x7), by + (i >> 3)) =
BLK(ctx->prev, bx + (i & 0x7), by + (i >> 3));
}
} else {
for (i = 0; i < 4; i++) {
l0x = bx + (i & 1) * 4;
l0y = by + (i & 2) * 2;
kmvc_getbit(bb, src, res);
if (!res) {
kmvc_getbit(bb, src, res);
if (!res) {
val = *src++;
for (j = 0; j < 16; j++)
BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) = val;
} else {
val = *src++;
mx = (val & 0xF) - 8;
my = (val >> 4) - 8;
for (j = 0; j < 16; j++)
BLK(ctx->cur, l0x + (j & 3), l0y + (j >> 2)) =
BLK(ctx->prev, l0x + (j & 3) + mx, l0y + (j >> 2) + my);
}
} else {
for (j = 0; j < 4; j++) {
l1x = l0x + (j & 1) * 2;
l1y = l0y + (j & 2);
kmvc_getbit(bb, src, res);
if (!res) {
kmvc_getbit(bb, src, res);
if (!res) {
val = *src++;
BLK(ctx->cur, l1x, l1y) = val;
BLK(ctx->cur, l1x + 1, l1y) = val;
BLK(ctx->cur, l1x, l1y + 1) = val;
BLK(ctx->cur, l1x + 1, l1y + 1) = val;
} else {
val = *src++;
mx = (val & 0xF) - 8;
my = (val >> 4) - 8;
BLK(ctx->cur, l1x, l1y) = BLK(ctx->prev, l1x + mx, l1y + my);
BLK(ctx->cur, l1x + 1, l1y) =
BLK(ctx->prev, l1x + 1 + mx, l1y + my);
BLK(ctx->cur, l1x, l1y + 1) =
BLK(ctx->prev, l1x + mx, l1y + 1 + my);
BLK(ctx->cur, l1x + 1, l1y + 1) =
BLK(ctx->prev, l1x + 1 + mx, l1y + 1 + my);
}
} else {
BLK(ctx->cur, l1x, l1y) = *src++;
BLK(ctx->cur, l1x + 1, l1y) = *src++;
BLK(ctx->cur, l1x, l1y + 1) = *src++;
BLK(ctx->cur, l1x + 1, l1y + 1) = *src++;
}
}
}
}
}
}
}
| {
"code": [
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);",
"static void kmvc_decode_inter_8x8(KmvcContext * ctx, const uint8_t * src, int w, int h)",
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);",
" kmvc_getbit(bb, src, res);"
],
"line_no": [
27,
59,
63,
97,
101,
1,
27,
31,
59,
63,
97,
101
]
} | static void FUNC_0(KmvcContext * VAR_0, const uint8_t * VAR_1, int VAR_2, int VAR_3)
{
BitBuf bb;
int VAR_4, VAR_5;
int VAR_6, VAR_7;
int VAR_8, VAR_9;
int VAR_10, VAR_11, VAR_12, VAR_13;
int VAR_14, VAR_15;
kmvc_init_getbits(bb, VAR_1);
for (VAR_9 = 0; VAR_9 < VAR_3; VAR_9 += 8)
for (VAR_8 = 0; VAR_8 < VAR_2; VAR_8 += 8) {
kmvc_getbit(bb, VAR_1, VAR_4);
if (!VAR_4) {
kmvc_getbit(bb, VAR_1, VAR_4);
if (!VAR_4) {
VAR_5 = *VAR_1++;
for (VAR_6 = 0; VAR_6 < 64; VAR_6++)
BLK(VAR_0->cur, VAR_8 + (VAR_6 & 0x7), VAR_9 + (VAR_6 >> 3)) = VAR_5;
} else {
for (VAR_6 = 0; VAR_6 < 64; VAR_6++)
BLK(VAR_0->cur, VAR_8 + (VAR_6 & 0x7), VAR_9 + (VAR_6 >> 3)) =
BLK(VAR_0->prev, VAR_8 + (VAR_6 & 0x7), VAR_9 + (VAR_6 >> 3));
}
} else {
for (VAR_6 = 0; VAR_6 < 4; VAR_6++) {
VAR_10 = VAR_8 + (VAR_6 & 1) * 4;
VAR_12 = VAR_9 + (VAR_6 & 2) * 2;
kmvc_getbit(bb, VAR_1, VAR_4);
if (!VAR_4) {
kmvc_getbit(bb, VAR_1, VAR_4);
if (!VAR_4) {
VAR_5 = *VAR_1++;
for (VAR_7 = 0; VAR_7 < 16; VAR_7++)
BLK(VAR_0->cur, VAR_10 + (VAR_7 & 3), VAR_12 + (VAR_7 >> 2)) = VAR_5;
} else {
VAR_5 = *VAR_1++;
VAR_14 = (VAR_5 & 0xF) - 8;
VAR_15 = (VAR_5 >> 4) - 8;
for (VAR_7 = 0; VAR_7 < 16; VAR_7++)
BLK(VAR_0->cur, VAR_10 + (VAR_7 & 3), VAR_12 + (VAR_7 >> 2)) =
BLK(VAR_0->prev, VAR_10 + (VAR_7 & 3) + VAR_14, VAR_12 + (VAR_7 >> 2) + VAR_15);
}
} else {
for (VAR_7 = 0; VAR_7 < 4; VAR_7++) {
VAR_11 = VAR_10 + (VAR_7 & 1) * 2;
VAR_13 = VAR_12 + (VAR_7 & 2);
kmvc_getbit(bb, VAR_1, VAR_4);
if (!VAR_4) {
kmvc_getbit(bb, VAR_1, VAR_4);
if (!VAR_4) {
VAR_5 = *VAR_1++;
BLK(VAR_0->cur, VAR_11, VAR_13) = VAR_5;
BLK(VAR_0->cur, VAR_11 + 1, VAR_13) = VAR_5;
BLK(VAR_0->cur, VAR_11, VAR_13 + 1) = VAR_5;
BLK(VAR_0->cur, VAR_11 + 1, VAR_13 + 1) = VAR_5;
} else {
VAR_5 = *VAR_1++;
VAR_14 = (VAR_5 & 0xF) - 8;
VAR_15 = (VAR_5 >> 4) - 8;
BLK(VAR_0->cur, VAR_11, VAR_13) = BLK(VAR_0->prev, VAR_11 + VAR_14, VAR_13 + VAR_15);
BLK(VAR_0->cur, VAR_11 + 1, VAR_13) =
BLK(VAR_0->prev, VAR_11 + 1 + VAR_14, VAR_13 + VAR_15);
BLK(VAR_0->cur, VAR_11, VAR_13 + 1) =
BLK(VAR_0->prev, VAR_11 + VAR_14, VAR_13 + 1 + VAR_15);
BLK(VAR_0->cur, VAR_11 + 1, VAR_13 + 1) =
BLK(VAR_0->prev, VAR_11 + 1 + VAR_14, VAR_13 + 1 + VAR_15);
}
} else {
BLK(VAR_0->cur, VAR_11, VAR_13) = *VAR_1++;
BLK(VAR_0->cur, VAR_11 + 1, VAR_13) = *VAR_1++;
BLK(VAR_0->cur, VAR_11, VAR_13 + 1) = *VAR_1++;
BLK(VAR_0->cur, VAR_11 + 1, VAR_13 + 1) = *VAR_1++;
}
}
}
}
}
}
}
| [
"static void FUNC_0(KmvcContext * VAR_0, const uint8_t * VAR_1, int VAR_2, int VAR_3)\n{",
"BitBuf bb;",
"int VAR_4, VAR_5;",
"int VAR_6, VAR_7;",
"int VAR_8, VAR_9;",
"int VAR_10, VAR_11, VAR_12, VAR_13;",
"int VAR_14, VAR_15;",
"kmvc_init_getbits(bb, VAR_1);",
"for (VAR_9 = 0; VAR_9 < VAR_3; VAR_9 += 8)",
"for (VAR_8 = 0; VAR_8 < VAR_2; VAR_8 += 8) {",
"kmvc_getbit(bb, VAR_1, VAR_4);",
"if (!VAR_4) {",
"kmvc_getbit(bb, VAR_1, VAR_4);",
"if (!VAR_4) {",
"VAR_5 = *VAR_1++;",
"for (VAR_6 = 0; VAR_6 < 64; VAR_6++)",
"BLK(VAR_0->cur, VAR_8 + (VAR_6 & 0x7), VAR_9 + (VAR_6 >> 3)) = VAR_5;",
"} else {",
"for (VAR_6 = 0; VAR_6 < 64; VAR_6++)",
"BLK(VAR_0->cur, VAR_8 + (VAR_6 & 0x7), VAR_9 + (VAR_6 >> 3)) =\nBLK(VAR_0->prev, VAR_8 + (VAR_6 & 0x7), VAR_9 + (VAR_6 >> 3));",
"}",
"} else {",
"for (VAR_6 = 0; VAR_6 < 4; VAR_6++) {",
"VAR_10 = VAR_8 + (VAR_6 & 1) * 4;",
"VAR_12 = VAR_9 + (VAR_6 & 2) * 2;",
"kmvc_getbit(bb, VAR_1, VAR_4);",
"if (!VAR_4) {",
"kmvc_getbit(bb, VAR_1, VAR_4);",
"if (!VAR_4) {",
"VAR_5 = *VAR_1++;",
"for (VAR_7 = 0; VAR_7 < 16; VAR_7++)",
"BLK(VAR_0->cur, VAR_10 + (VAR_7 & 3), VAR_12 + (VAR_7 >> 2)) = VAR_5;",
"} else {",
"VAR_5 = *VAR_1++;",
"VAR_14 = (VAR_5 & 0xF) - 8;",
"VAR_15 = (VAR_5 >> 4) - 8;",
"for (VAR_7 = 0; VAR_7 < 16; VAR_7++)",
"BLK(VAR_0->cur, VAR_10 + (VAR_7 & 3), VAR_12 + (VAR_7 >> 2)) =\nBLK(VAR_0->prev, VAR_10 + (VAR_7 & 3) + VAR_14, VAR_12 + (VAR_7 >> 2) + VAR_15);",
"}",
"} else {",
"for (VAR_7 = 0; VAR_7 < 4; VAR_7++) {",
"VAR_11 = VAR_10 + (VAR_7 & 1) * 2;",
"VAR_13 = VAR_12 + (VAR_7 & 2);",
"kmvc_getbit(bb, VAR_1, VAR_4);",
"if (!VAR_4) {",
"kmvc_getbit(bb, VAR_1, VAR_4);",
"if (!VAR_4) {",
"VAR_5 = *VAR_1++;",
"BLK(VAR_0->cur, VAR_11, VAR_13) = VAR_5;",
"BLK(VAR_0->cur, VAR_11 + 1, VAR_13) = VAR_5;",
"BLK(VAR_0->cur, VAR_11, VAR_13 + 1) = VAR_5;",
"BLK(VAR_0->cur, VAR_11 + 1, VAR_13 + 1) = VAR_5;",
"} else {",
"VAR_5 = *VAR_1++;",
"VAR_14 = (VAR_5 & 0xF) - 8;",
"VAR_15 = (VAR_5 >> 4) - 8;",
"BLK(VAR_0->cur, VAR_11, VAR_13) = BLK(VAR_0->prev, VAR_11 + VAR_14, VAR_13 + VAR_15);",
"BLK(VAR_0->cur, VAR_11 + 1, VAR_13) =\nBLK(VAR_0->prev, VAR_11 + 1 + VAR_14, VAR_13 + VAR_15);",
"BLK(VAR_0->cur, VAR_11, VAR_13 + 1) =\nBLK(VAR_0->prev, VAR_11 + VAR_14, VAR_13 + 1 + VAR_15);",
"BLK(VAR_0->cur, VAR_11 + 1, VAR_13 + 1) =\nBLK(VAR_0->prev, VAR_11 + 1 + VAR_14, VAR_13 + 1 + VAR_15);",
"}",
"} else {",
"BLK(VAR_0->cur, VAR_11, VAR_13) = *VAR_1++;",
"BLK(VAR_0->cur, VAR_11 + 1, VAR_13) = *VAR_1++;",
"BLK(VAR_0->cur, VAR_11, VAR_13 + 1) = *VAR_1++;",
"BLK(VAR_0->cur, VAR_11 + 1, VAR_13 + 1) = *VAR_1++;",
"}",
"}",
"}",
"}",
"}",
"}",
"}"
] | [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45,
47
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83,
85
],
[
87
],
[
89
],
[
91
],
[
93
],
[
95
],
[
97
],
[
99
],
[
101
],
[
103
],
[
105
],
[
107
],
[
109
],
[
111
],
[
113
],
[
115
],
[
117
],
[
119
],
[
121
],
[
123
],
[
125,
127
],
[
129,
131
],
[
133,
135
],
[
137
],
[
139
],
[
141
],
[
143
],
[
145
],
[
147
],
[
149
],
[
151
],
[
153
],
[
155
],
[
157
],
[
159
],
[
161
]
] |
21,300 | static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
CPUArchState *env = cpu->env_ptr;
unsigned long mmu_idx_bitmask = data.host_int;
int mmu_idx;
assert_cpu_is_self(cpu);
tb_lock();
tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
tlb_debug("%d\n", mmu_idx);
memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
}
}
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
tlb_debug("done\n");
tb_unlock();
}
| true | qemu | f3ced3c59287dabc253f83f0c70aa4934470c15e | static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
CPUArchState *env = cpu->env_ptr;
unsigned long mmu_idx_bitmask = data.host_int;
int mmu_idx;
assert_cpu_is_self(cpu);
tb_lock();
tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if (test_bit(mmu_idx, &mmu_idx_bitmask)) {
tlb_debug("%d\n", mmu_idx);
memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
}
}
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
tlb_debug("done\n");
tb_unlock();
}
| {
"code": [
" memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));",
" memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));"
],
"line_no": [
45,
45
]
} | static void FUNC_0(CPUState *VAR_0, run_on_cpu_data VAR_1)
{
CPUArchState *env = VAR_0->env_ptr;
unsigned long VAR_2 = VAR_1.host_int;
int VAR_3;
assert_cpu_is_self(VAR_0);
tb_lock();
tlb_debug("start: VAR_3:0x%04lx\n", VAR_2);
for (VAR_3 = 0; VAR_3 < NB_MMU_MODES; VAR_3++) {
if (test_bit(VAR_3, &VAR_2)) {
tlb_debug("%d\n", VAR_3);
memset(env->tlb_table[VAR_3], -1, sizeof(env->tlb_table[0]));
memset(env->tlb_v_table[VAR_3], -1, sizeof(env->tlb_v_table[0]));
}
}
memset(VAR_0->tb_jmp_cache, 0, sizeof(VAR_0->tb_jmp_cache));
tlb_debug("done\n");
tb_unlock();
}
| [
"static void FUNC_0(CPUState *VAR_0, run_on_cpu_data VAR_1)\n{",
"CPUArchState *env = VAR_0->env_ptr;",
"unsigned long VAR_2 = VAR_1.host_int;",
"int VAR_3;",
"assert_cpu_is_self(VAR_0);",
"tb_lock();",
"tlb_debug(\"start: VAR_3:0x%04lx\\n\", VAR_2);",
"for (VAR_3 = 0; VAR_3 < NB_MMU_MODES; VAR_3++) {",
"if (test_bit(VAR_3, &VAR_2)) {",
"tlb_debug(\"%d\\n\", VAR_3);",
"memset(env->tlb_table[VAR_3], -1, sizeof(env->tlb_table[0]));",
"memset(env->tlb_v_table[VAR_3], -1, sizeof(env->tlb_v_table[0]));",
"}",
"}",
"memset(VAR_0->tb_jmp_cache, 0, sizeof(VAR_0->tb_jmp_cache));",
"tlb_debug(\"done\\n\");",
"tb_unlock();",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
17
],
[
21
],
[
25
],
[
29
],
[
31
],
[
35
],
[
37
],
[
39
],
[
41
],
[
45
],
[
49
],
[
53
],
[
55
]
] |
21,301 | static int encode_hq_slice(AVCodecContext *avctx, void *arg)
{
SliceArgs *slice_dat = arg;
VC2EncContext *s = slice_dat->ctx;
PutBitContext *pb = &slice_dat->pb;
const int slice_x = slice_dat->x;
const int slice_y = slice_dat->y;
const int quant_idx = slice_dat->quant_idx;
const int slice_bytes_max = slice_dat->bytes;
uint8_t quants[MAX_DWT_LEVELS][4];
int p, level, orientation;
/* The reference decoder ignores it, and its typical length is 0 */
memset(put_bits_ptr(pb), 0, s->prefix_bytes);
skip_put_bytes(pb, s->prefix_bytes);
put_bits(pb, 8, quant_idx);
/* Slice quantization (slice_quantizers() in the specs) */
for (level = 0; level < s->wavelet_depth; level++)
for (orientation = !!level; orientation < 4; orientation++)
quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
/* Luma + 2 Chroma planes */
for (p = 0; p < 3; p++) {
int bytes_start, bytes_len, pad_s, pad_c;
bytes_start = put_bits_count(pb) >> 3;
put_bits(pb, 8, 0);
for (level = 0; level < s->wavelet_depth; level++) {
for (orientation = !!level; orientation < 4; orientation++) {
encode_subband(s, pb, slice_x, slice_y,
&s->plane[p].band[level][orientation],
quants[level][orientation]);
}
}
avpriv_align_put_bits(pb);
bytes_len = (put_bits_count(pb) >> 3) - bytes_start - 1;
if (p == 2) {
int len_diff = slice_bytes_max - (put_bits_count(pb) >> 3);
pad_s = FFALIGN((bytes_len + len_diff), s->size_scaler)/s->size_scaler;
pad_c = (pad_s*s->size_scaler) - bytes_len;
} else {
pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
pad_c = (pad_s*s->size_scaler) - bytes_len;
}
pb->buf[bytes_start] = pad_s;
flush_put_bits(pb);
skip_put_bytes(pb, pad_c);
}
return 0;
} | true | FFmpeg | 9c1aa14bf0b88da9f91dc114519e725cbd69180e | static int encode_hq_slice(AVCodecContext *avctx, void *arg)
{
SliceArgs *slice_dat = arg;
VC2EncContext *s = slice_dat->ctx;
PutBitContext *pb = &slice_dat->pb;
const int slice_x = slice_dat->x;
const int slice_y = slice_dat->y;
const int quant_idx = slice_dat->quant_idx;
const int slice_bytes_max = slice_dat->bytes;
uint8_t quants[MAX_DWT_LEVELS][4];
int p, level, orientation;
memset(put_bits_ptr(pb), 0, s->prefix_bytes);
skip_put_bytes(pb, s->prefix_bytes);
put_bits(pb, 8, quant_idx);
for (level = 0; level < s->wavelet_depth; level++)
for (orientation = !!level; orientation < 4; orientation++)
quants[level][orientation] = FFMAX(quant_idx - s->quant[level][orientation], 0);
for (p = 0; p < 3; p++) {
int bytes_start, bytes_len, pad_s, pad_c;
bytes_start = put_bits_count(pb) >> 3;
put_bits(pb, 8, 0);
for (level = 0; level < s->wavelet_depth; level++) {
for (orientation = !!level; orientation < 4; orientation++) {
encode_subband(s, pb, slice_x, slice_y,
&s->plane[p].band[level][orientation],
quants[level][orientation]);
}
}
avpriv_align_put_bits(pb);
bytes_len = (put_bits_count(pb) >> 3) - bytes_start - 1;
if (p == 2) {
int len_diff = slice_bytes_max - (put_bits_count(pb) >> 3);
pad_s = FFALIGN((bytes_len + len_diff), s->size_scaler)/s->size_scaler;
pad_c = (pad_s*s->size_scaler) - bytes_len;
} else {
pad_s = FFALIGN(bytes_len, s->size_scaler)/s->size_scaler;
pad_c = (pad_s*s->size_scaler) - bytes_len;
}
pb->buf[bytes_start] = pad_s;
flush_put_bits(pb);
skip_put_bytes(pb, pad_c);
}
return 0;
} | {
"code": [],
"line_no": []
} | static int FUNC_0(AVCodecContext *VAR_0, void *VAR_1)
{
SliceArgs *slice_dat = VAR_1;
VC2EncContext *s = slice_dat->ctx;
PutBitContext *pb = &slice_dat->pb;
const int VAR_2 = slice_dat->x;
const int VAR_3 = slice_dat->y;
const int VAR_4 = slice_dat->VAR_4;
const int VAR_5 = slice_dat->bytes;
uint8_t quants[MAX_DWT_LEVELS][4];
int VAR_6, VAR_7, VAR_8;
memset(put_bits_ptr(pb), 0, s->prefix_bytes);
skip_put_bytes(pb, s->prefix_bytes);
put_bits(pb, 8, VAR_4);
for (VAR_7 = 0; VAR_7 < s->wavelet_depth; VAR_7++)
for (VAR_8 = !!VAR_7; VAR_8 < 4; VAR_8++)
quants[VAR_7][VAR_8] = FFMAX(VAR_4 - s->quant[VAR_7][VAR_8], 0);
for (VAR_6 = 0; VAR_6 < 3; VAR_6++) {
int VAR_9, VAR_10, VAR_11, VAR_12;
VAR_9 = put_bits_count(pb) >> 3;
put_bits(pb, 8, 0);
for (VAR_7 = 0; VAR_7 < s->wavelet_depth; VAR_7++) {
for (VAR_8 = !!VAR_7; VAR_8 < 4; VAR_8++) {
encode_subband(s, pb, VAR_2, VAR_3,
&s->plane[VAR_6].band[VAR_7][VAR_8],
quants[VAR_7][VAR_8]);
}
}
avpriv_align_put_bits(pb);
VAR_10 = (put_bits_count(pb) >> 3) - VAR_9 - 1;
if (VAR_6 == 2) {
int VAR_13 = VAR_5 - (put_bits_count(pb) >> 3);
VAR_11 = FFALIGN((VAR_10 + VAR_13), s->size_scaler)/s->size_scaler;
VAR_12 = (VAR_11*s->size_scaler) - VAR_10;
} else {
VAR_11 = FFALIGN(VAR_10, s->size_scaler)/s->size_scaler;
VAR_12 = (VAR_11*s->size_scaler) - VAR_10;
}
pb->buf[VAR_9] = VAR_11;
flush_put_bits(pb);
skip_put_bytes(pb, VAR_12);
}
return 0;
} | [
"static int FUNC_0(AVCodecContext *VAR_0, void *VAR_1)\n{",
"SliceArgs *slice_dat = VAR_1;",
"VC2EncContext *s = slice_dat->ctx;",
"PutBitContext *pb = &slice_dat->pb;",
"const int VAR_2 = slice_dat->x;",
"const int VAR_3 = slice_dat->y;",
"const int VAR_4 = slice_dat->VAR_4;",
"const int VAR_5 = slice_dat->bytes;",
"uint8_t quants[MAX_DWT_LEVELS][4];",
"int VAR_6, VAR_7, VAR_8;",
"memset(put_bits_ptr(pb), 0, s->prefix_bytes);",
"skip_put_bytes(pb, s->prefix_bytes);",
"put_bits(pb, 8, VAR_4);",
"for (VAR_7 = 0; VAR_7 < s->wavelet_depth; VAR_7++)",
"for (VAR_8 = !!VAR_7; VAR_8 < 4; VAR_8++)",
"quants[VAR_7][VAR_8] = FFMAX(VAR_4 - s->quant[VAR_7][VAR_8], 0);",
"for (VAR_6 = 0; VAR_6 < 3; VAR_6++) {",
"int VAR_9, VAR_10, VAR_11, VAR_12;",
"VAR_9 = put_bits_count(pb) >> 3;",
"put_bits(pb, 8, 0);",
"for (VAR_7 = 0; VAR_7 < s->wavelet_depth; VAR_7++) {",
"for (VAR_8 = !!VAR_7; VAR_8 < 4; VAR_8++) {",
"encode_subband(s, pb, VAR_2, VAR_3,\n&s->plane[VAR_6].band[VAR_7][VAR_8],\nquants[VAR_7][VAR_8]);",
"}",
"}",
"avpriv_align_put_bits(pb);",
"VAR_10 = (put_bits_count(pb) >> 3) - VAR_9 - 1;",
"if (VAR_6 == 2) {",
"int VAR_13 = VAR_5 - (put_bits_count(pb) >> 3);",
"VAR_11 = FFALIGN((VAR_10 + VAR_13), s->size_scaler)/s->size_scaler;",
"VAR_12 = (VAR_11*s->size_scaler) - VAR_10;",
"} else {",
"VAR_11 = FFALIGN(VAR_10, s->size_scaler)/s->size_scaler;",
"VAR_12 = (VAR_11*s->size_scaler) - VAR_10;",
"}",
"pb->buf[VAR_9] = VAR_11;",
"flush_put_bits(pb);",
"skip_put_bytes(pb, VAR_12);",
"}",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
27
],
[
29
],
[
33
],
[
39
],
[
41
],
[
43
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61,
63,
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83
],
[
85
],
[
87
],
[
89
],
[
91
],
[
93
],
[
97
],
[
99
],
[
103
],
[
105
]
] |
21,303 | int ff_rtsp_setup_output_streams(AVFormatContext *s, const char *addr)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
int i;
char *sdp;
AVFormatContext sdp_ctx, *ctx_array[1];
s->start_time_realtime = av_gettime();
/* Announce the stream */
sdp = av_mallocz(SDP_MAX_SIZE);
if (sdp == NULL)
return AVERROR(ENOMEM);
/* We create the SDP based on the RTSP AVFormatContext where we
* aren't allowed to change the filename field. (We create the SDP
* based on the RTSP context since the contexts for the RTP streams
* don't exist yet.) In order to specify a custom URL with the actual
* peer IP instead of the originally specified hostname, we create
* a temporary copy of the AVFormatContext, where the custom URL is set.
*
* FIXME: Create the SDP without copying the AVFormatContext.
* This either requires setting up the RTP stream AVFormatContexts
* already here (complicating things immensely) or getting a more
* flexible SDP creation interface.
*/
sdp_ctx = *s;
ff_url_join(sdp_ctx.filename, sizeof(sdp_ctx.filename),
"rtsp", NULL, addr, -1, NULL);
ctx_array[0] = &sdp_ctx;
if (avf_sdp_create(ctx_array, 1, sdp, SDP_MAX_SIZE)) {
av_free(sdp);
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sdp);
ff_rtsp_send_cmd_with_content(s, "ANNOUNCE", rt->control_uri,
"Content-Type: application/sdp\r\n",
reply, NULL, sdp, strlen(sdp));
av_free(sdp);
if (reply->status_code != RTSP_STATUS_OK)
return AVERROR_INVALIDDATA;
/* Set up the RTSPStreams for each AVStream */
for (i = 0; i < s->nb_streams; i++) {
RTSPStream *rtsp_st;
AVStream *st = s->streams[i];
rtsp_st = av_mallocz(sizeof(RTSPStream));
if (!rtsp_st)
return AVERROR(ENOMEM);
dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);
st->priv_data = rtsp_st;
rtsp_st->stream_index = i;
av_strlcpy(rtsp_st->control_url, rt->control_uri, sizeof(rtsp_st->control_url));
/* Note, this must match the relative uri set in the sdp content */
av_strlcatf(rtsp_st->control_url, sizeof(rtsp_st->control_url),
"/streamid=%d", i);
}
return 0;
}
| true | FFmpeg | d9c0510e22821baa364306d867ffac45da0620c8 | int ff_rtsp_setup_output_streams(AVFormatContext *s, const char *addr)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
int i;
char *sdp;
AVFormatContext sdp_ctx, *ctx_array[1];
s->start_time_realtime = av_gettime();
sdp = av_mallocz(SDP_MAX_SIZE);
if (sdp == NULL)
return AVERROR(ENOMEM);
sdp_ctx = *s;
ff_url_join(sdp_ctx.filename, sizeof(sdp_ctx.filename),
"rtsp", NULL, addr, -1, NULL);
ctx_array[0] = &sdp_ctx;
if (avf_sdp_create(ctx_array, 1, sdp, SDP_MAX_SIZE)) {
av_free(sdp);
return AVERROR_INVALIDDATA;
}
av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sdp);
ff_rtsp_send_cmd_with_content(s, "ANNOUNCE", rt->control_uri,
"Content-Type: application/sdp\r\n",
reply, NULL, sdp, strlen(sdp));
av_free(sdp);
if (reply->status_code != RTSP_STATUS_OK)
return AVERROR_INVALIDDATA;
for (i = 0; i < s->nb_streams; i++) {
RTSPStream *rtsp_st;
AVStream *st = s->streams[i];
rtsp_st = av_mallocz(sizeof(RTSPStream));
if (!rtsp_st)
return AVERROR(ENOMEM);
dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);
st->priv_data = rtsp_st;
rtsp_st->stream_index = i;
av_strlcpy(rtsp_st->control_url, rt->control_uri, sizeof(rtsp_st->control_url));
av_strlcatf(rtsp_st->control_url, sizeof(rtsp_st->control_url),
"/streamid=%d", i);
}
return 0;
}
| {
"code": [
" AVStream *st = s->streams[i];",
" st->priv_data = rtsp_st;"
],
"line_no": [
91,
105
]
} | int FUNC_0(AVFormatContext *VAR_0, const char *VAR_1)
{
RTSPState *rt = VAR_0->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
int VAR_2;
char *VAR_3;
AVFormatContext sdp_ctx, *ctx_array[1];
VAR_0->start_time_realtime = av_gettime();
VAR_3 = av_mallocz(SDP_MAX_SIZE);
if (VAR_3 == NULL)
return AVERROR(ENOMEM);
sdp_ctx = *VAR_0;
ff_url_join(sdp_ctx.filename, sizeof(sdp_ctx.filename),
"rtsp", NULL, VAR_1, -1, NULL);
ctx_array[0] = &sdp_ctx;
if (avf_sdp_create(ctx_array, 1, VAR_3, SDP_MAX_SIZE)) {
av_free(VAR_3);
return AVERROR_INVALIDDATA;
}
av_log(VAR_0, AV_LOG_VERBOSE, "SDP:\n%VAR_0\n", VAR_3);
ff_rtsp_send_cmd_with_content(VAR_0, "ANNOUNCE", rt->control_uri,
"Content-Type: application/VAR_3\r\n",
reply, NULL, VAR_3, strlen(VAR_3));
av_free(VAR_3);
if (reply->status_code != RTSP_STATUS_OK)
return AVERROR_INVALIDDATA;
for (VAR_2 = 0; VAR_2 < VAR_0->nb_streams; VAR_2++) {
RTSPStream *rtsp_st;
AVStream *st = VAR_0->streams[VAR_2];
rtsp_st = av_mallocz(sizeof(RTSPStream));
if (!rtsp_st)
return AVERROR(ENOMEM);
dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);
st->priv_data = rtsp_st;
rtsp_st->stream_index = VAR_2;
av_strlcpy(rtsp_st->control_url, rt->control_uri, sizeof(rtsp_st->control_url));
av_strlcatf(rtsp_st->control_url, sizeof(rtsp_st->control_url),
"/streamid=%d", VAR_2);
}
return 0;
}
| [
"int FUNC_0(AVFormatContext *VAR_0, const char *VAR_1)\n{",
"RTSPState *rt = VAR_0->priv_data;",
"RTSPMessageHeader reply1, *reply = &reply1;",
"int VAR_2;",
"char *VAR_3;",
"AVFormatContext sdp_ctx, *ctx_array[1];",
"VAR_0->start_time_realtime = av_gettime();",
"VAR_3 = av_mallocz(SDP_MAX_SIZE);",
"if (VAR_3 == NULL)\nreturn AVERROR(ENOMEM);",
"sdp_ctx = *VAR_0;",
"ff_url_join(sdp_ctx.filename, sizeof(sdp_ctx.filename),\n\"rtsp\", NULL, VAR_1, -1, NULL);",
"ctx_array[0] = &sdp_ctx;",
"if (avf_sdp_create(ctx_array, 1, VAR_3, SDP_MAX_SIZE)) {",
"av_free(VAR_3);",
"return AVERROR_INVALIDDATA;",
"}",
"av_log(VAR_0, AV_LOG_VERBOSE, \"SDP:\\n%VAR_0\\n\", VAR_3);",
"ff_rtsp_send_cmd_with_content(VAR_0, \"ANNOUNCE\", rt->control_uri,\n\"Content-Type: application/VAR_3\\r\\n\",\nreply, NULL, VAR_3, strlen(VAR_3));",
"av_free(VAR_3);",
"if (reply->status_code != RTSP_STATUS_OK)\nreturn AVERROR_INVALIDDATA;",
"for (VAR_2 = 0; VAR_2 < VAR_0->nb_streams; VAR_2++) {",
"RTSPStream *rtsp_st;",
"AVStream *st = VAR_0->streams[VAR_2];",
"rtsp_st = av_mallocz(sizeof(RTSPStream));",
"if (!rtsp_st)\nreturn AVERROR(ENOMEM);",
"dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);",
"st->priv_data = rtsp_st;",
"rtsp_st->stream_index = VAR_2;",
"av_strlcpy(rtsp_st->control_url, rt->control_uri, sizeof(rtsp_st->control_url));",
"av_strlcatf(rtsp_st->control_url, sizeof(rtsp_st->control_url),\n\"/streamid=%d\", VAR_2);",
"}",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
23
],
[
25,
27
],
[
53
],
[
55,
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71,
73,
75
],
[
77
],
[
79,
81
],
[
87
],
[
89
],
[
91
],
[
95
],
[
97,
99
],
[
101
],
[
105
],
[
107
],
[
111
],
[
115,
117
],
[
119
],
[
123
],
[
125
]
] |
21,304 | static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SMJPEGContext *sc = s->priv_data;
uint32_t dtype, ret, size, timestamp;
int64_t pos;
if (s->pb->eof_reached)
return AVERROR_EOF;
pos = avio_tell(s->pb);
dtype = avio_rl32(s->pb);
switch (dtype) {
case SMJPEG_SNDD:
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->audio_stream_index;
pkt->pts = timestamp;
pkt->pos = pos;
break;
case SMJPEG_VIDD:
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->video_stream_index;
pkt->pts = timestamp;
pkt->pos = pos;
break;
case SMJPEG_DONE:
ret = AVERROR_EOF;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown chunk %x\n", dtype);
ret = AVERROR_INVALIDDATA;
break;
}
return ret;
}
| false | FFmpeg | 366484fff1720977b8591e3a90fbef9f4885e53c | static int smjpeg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
SMJPEGContext *sc = s->priv_data;
uint32_t dtype, ret, size, timestamp;
int64_t pos;
if (s->pb->eof_reached)
return AVERROR_EOF;
pos = avio_tell(s->pb);
dtype = avio_rl32(s->pb);
switch (dtype) {
case SMJPEG_SNDD:
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->audio_stream_index;
pkt->pts = timestamp;
pkt->pos = pos;
break;
case SMJPEG_VIDD:
timestamp = avio_rb32(s->pb);
size = avio_rb32(s->pb);
ret = av_get_packet(s->pb, pkt, size);
pkt->stream_index = sc->video_stream_index;
pkt->pts = timestamp;
pkt->pos = pos;
break;
case SMJPEG_DONE:
ret = AVERROR_EOF;
break;
default:
av_log(s, AV_LOG_ERROR, "unknown chunk %x\n", dtype);
ret = AVERROR_INVALIDDATA;
break;
}
return ret;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(AVFormatContext *VAR_0, AVPacket *VAR_1)
{
SMJPEGContext *sc = VAR_0->priv_data;
uint32_t dtype, ret, size, timestamp;
int64_t pos;
if (VAR_0->pb->eof_reached)
return AVERROR_EOF;
pos = avio_tell(VAR_0->pb);
dtype = avio_rl32(VAR_0->pb);
switch (dtype) {
case SMJPEG_SNDD:
timestamp = avio_rb32(VAR_0->pb);
size = avio_rb32(VAR_0->pb);
ret = av_get_packet(VAR_0->pb, VAR_1, size);
VAR_1->stream_index = sc->audio_stream_index;
VAR_1->pts = timestamp;
VAR_1->pos = pos;
break;
case SMJPEG_VIDD:
timestamp = avio_rb32(VAR_0->pb);
size = avio_rb32(VAR_0->pb);
ret = av_get_packet(VAR_0->pb, VAR_1, size);
VAR_1->stream_index = sc->video_stream_index;
VAR_1->pts = timestamp;
VAR_1->pos = pos;
break;
case SMJPEG_DONE:
ret = AVERROR_EOF;
break;
default:
av_log(VAR_0, AV_LOG_ERROR, "unknown chunk %x\n", dtype);
ret = AVERROR_INVALIDDATA;
break;
}
return ret;
}
| [
"static int FUNC_0(AVFormatContext *VAR_0, AVPacket *VAR_1)\n{",
"SMJPEGContext *sc = VAR_0->priv_data;",
"uint32_t dtype, ret, size, timestamp;",
"int64_t pos;",
"if (VAR_0->pb->eof_reached)\nreturn AVERROR_EOF;",
"pos = avio_tell(VAR_0->pb);",
"dtype = avio_rl32(VAR_0->pb);",
"switch (dtype) {",
"case SMJPEG_SNDD:\ntimestamp = avio_rb32(VAR_0->pb);",
"size = avio_rb32(VAR_0->pb);",
"ret = av_get_packet(VAR_0->pb, VAR_1, size);",
"VAR_1->stream_index = sc->audio_stream_index;",
"VAR_1->pts = timestamp;",
"VAR_1->pos = pos;",
"break;",
"case SMJPEG_VIDD:\ntimestamp = avio_rb32(VAR_0->pb);",
"size = avio_rb32(VAR_0->pb);",
"ret = av_get_packet(VAR_0->pb, VAR_1, size);",
"VAR_1->stream_index = sc->video_stream_index;",
"VAR_1->pts = timestamp;",
"VAR_1->pos = pos;",
"break;",
"case SMJPEG_DONE:\nret = AVERROR_EOF;",
"break;",
"default:\nav_log(VAR_0, AV_LOG_ERROR, \"unknown chunk %x\\n\", dtype);",
"ret = AVERROR_INVALIDDATA;",
"break;",
"}",
"return ret;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13,
15
],
[
17
],
[
19
],
[
21
],
[
23,
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39,
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
55,
57
],
[
59
],
[
61,
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73
]
] |
21,305 | static void apic_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
{
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | static void apic_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
{
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0, target_phys_addr_t VAR_1, uint32_t VAR_2)
{
}
| [
"static void FUNC_0(void *VAR_0, target_phys_addr_t VAR_1, uint32_t VAR_2)\n{",
"}"
] | [
0,
0
] | [
[
1,
3
],
[
5
]
] |
21,307 | static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
int flags)
{
unsigned int e1, e2;
uint32_t *p;
e1 = (addr << 16) | (limit & 0xffff);
e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
e2 |= flags;
p = ptr;
p[0] = tswapl(e1);
p[1] = tswapl(e2);
}
| false | qemu | d538e8f50d89a66ae14a2cf351d2e0e5365d463b | static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
int flags)
{
unsigned int e1, e2;
uint32_t *p;
e1 = (addr << 16) | (limit & 0xffff);
e2 = ((addr >> 16) & 0xff) | (addr & 0xff000000) | (limit & 0x000f0000);
e2 |= flags;
p = ptr;
p[0] = tswapl(e1);
p[1] = tswapl(e2);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0, unsigned long VAR_1, unsigned long VAR_2,
int VAR_3)
{
unsigned int VAR_4, VAR_5;
uint32_t *p;
VAR_4 = (VAR_1 << 16) | (VAR_2 & 0xffff);
VAR_5 = ((VAR_1 >> 16) & 0xff) | (VAR_1 & 0xff000000) | (VAR_2 & 0x000f0000);
VAR_5 |= VAR_3;
p = VAR_0;
p[0] = tswapl(VAR_4);
p[1] = tswapl(VAR_5);
}
| [
"static void FUNC_0(void *VAR_0, unsigned long VAR_1, unsigned long VAR_2,\nint VAR_3)\n{",
"unsigned int VAR_4, VAR_5;",
"uint32_t *p;",
"VAR_4 = (VAR_1 << 16) | (VAR_2 & 0xffff);",
"VAR_5 = ((VAR_1 >> 16) & 0xff) | (VAR_1 & 0xff000000) | (VAR_2 & 0x000f0000);",
"VAR_5 |= VAR_3;",
"p = VAR_0;",
"p[0] = tswapl(VAR_4);",
"p[1] = tswapl(VAR_5);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
]
] |
21,308 | static int vdi_co_write(BlockDriverState *bs,
int64_t sector_num, const uint8_t *buf, int nb_sectors)
{
BDRVVdiState *s = bs->opaque;
uint32_t bmap_entry;
uint32_t block_index;
uint32_t sector_in_block;
uint32_t n_sectors;
uint32_t bmap_first = VDI_UNALLOCATED;
uint32_t bmap_last = VDI_UNALLOCATED;
uint8_t *block = NULL;
int ret;
logout("\n");
restart:
block_index = sector_num / s->block_sectors;
sector_in_block = sector_num % s->block_sectors;
n_sectors = s->block_sectors - sector_in_block;
if (n_sectors > nb_sectors) {
n_sectors = nb_sectors;
}
logout("will write %u sectors starting at sector %" PRIu64 "\n",
n_sectors, sector_num);
/* prepare next AIO request */
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
/* Allocate new block and write to it. */
uint64_t offset;
bmap_entry = s->header.blocks_allocated;
s->bmap[block_index] = cpu_to_le32(bmap_entry);
s->header.blocks_allocated++;
offset = s->header.offset_data / SECTOR_SIZE +
(uint64_t)bmap_entry * s->block_sectors;
if (block == NULL) {
block = g_malloc(s->block_size);
bmap_first = block_index;
}
bmap_last = block_index;
/* Copy data to be written to new block and zero unused parts. */
memset(block, 0, sector_in_block * SECTOR_SIZE);
memcpy(block + sector_in_block * SECTOR_SIZE,
buf, n_sectors * SECTOR_SIZE);
memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
(s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
ret = bdrv_write(bs->file, offset, block, s->block_sectors);
} else {
uint64_t offset = s->header.offset_data / SECTOR_SIZE +
(uint64_t)bmap_entry * s->block_sectors +
sector_in_block;
ret = bdrv_write(bs->file, offset, buf, n_sectors);
}
nb_sectors -= n_sectors;
sector_num += n_sectors;
buf += n_sectors * SECTOR_SIZE;
logout("%u sectors written\n", n_sectors);
if (ret >= 0 && nb_sectors > 0) {
goto restart;
}
logout("finished data write\n");
if (ret < 0) {
return ret;
}
if (block) {
/* One or more new blocks were allocated. */
VdiHeader *header = (VdiHeader *) block;
uint8_t *base;
uint64_t offset;
logout("now writing modified header\n");
assert(VDI_IS_ALLOCATED(bmap_first));
*header = s->header;
vdi_header_to_le(header);
ret = bdrv_write(bs->file, 0, block, 1);
g_free(block);
block = NULL;
if (ret < 0) {
return ret;
}
logout("now writing modified block map entry %u...%u\n",
bmap_first, bmap_last);
/* Write modified sectors from block map. */
bmap_first /= (SECTOR_SIZE / sizeof(uint32_t));
bmap_last /= (SECTOR_SIZE / sizeof(uint32_t));
n_sectors = bmap_last - bmap_first + 1;
offset = s->bmap_sector + bmap_first;
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
logout("will write %u block map sectors starting from entry %u\n",
n_sectors, bmap_first);
ret = bdrv_write(bs->file, offset, base, n_sectors);
}
return ret;
}
| false | qemu | eb9566d13e30dd7e20d978632a13915cbdb9a668 | static int vdi_co_write(BlockDriverState *bs,
int64_t sector_num, const uint8_t *buf, int nb_sectors)
{
BDRVVdiState *s = bs->opaque;
uint32_t bmap_entry;
uint32_t block_index;
uint32_t sector_in_block;
uint32_t n_sectors;
uint32_t bmap_first = VDI_UNALLOCATED;
uint32_t bmap_last = VDI_UNALLOCATED;
uint8_t *block = NULL;
int ret;
logout("\n");
restart:
block_index = sector_num / s->block_sectors;
sector_in_block = sector_num % s->block_sectors;
n_sectors = s->block_sectors - sector_in_block;
if (n_sectors > nb_sectors) {
n_sectors = nb_sectors;
}
logout("will write %u sectors starting at sector %" PRIu64 "\n",
n_sectors, sector_num);
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
uint64_t offset;
bmap_entry = s->header.blocks_allocated;
s->bmap[block_index] = cpu_to_le32(bmap_entry);
s->header.blocks_allocated++;
offset = s->header.offset_data / SECTOR_SIZE +
(uint64_t)bmap_entry * s->block_sectors;
if (block == NULL) {
block = g_malloc(s->block_size);
bmap_first = block_index;
}
bmap_last = block_index;
memset(block, 0, sector_in_block * SECTOR_SIZE);
memcpy(block + sector_in_block * SECTOR_SIZE,
buf, n_sectors * SECTOR_SIZE);
memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
(s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
ret = bdrv_write(bs->file, offset, block, s->block_sectors);
} else {
uint64_t offset = s->header.offset_data / SECTOR_SIZE +
(uint64_t)bmap_entry * s->block_sectors +
sector_in_block;
ret = bdrv_write(bs->file, offset, buf, n_sectors);
}
nb_sectors -= n_sectors;
sector_num += n_sectors;
buf += n_sectors * SECTOR_SIZE;
logout("%u sectors written\n", n_sectors);
if (ret >= 0 && nb_sectors > 0) {
goto restart;
}
logout("finished data write\n");
if (ret < 0) {
return ret;
}
if (block) {
VdiHeader *header = (VdiHeader *) block;
uint8_t *base;
uint64_t offset;
logout("now writing modified header\n");
assert(VDI_IS_ALLOCATED(bmap_first));
*header = s->header;
vdi_header_to_le(header);
ret = bdrv_write(bs->file, 0, block, 1);
g_free(block);
block = NULL;
if (ret < 0) {
return ret;
}
logout("now writing modified block map entry %u...%u\n",
bmap_first, bmap_last);
bmap_first /= (SECTOR_SIZE / sizeof(uint32_t));
bmap_last /= (SECTOR_SIZE / sizeof(uint32_t));
n_sectors = bmap_last - bmap_first + 1;
offset = s->bmap_sector + bmap_first;
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
logout("will write %u block map sectors starting from entry %u\n",
n_sectors, bmap_first);
ret = bdrv_write(bs->file, offset, base, n_sectors);
}
return ret;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(BlockDriverState *VAR_0,
int64_t VAR_1, const uint8_t *VAR_2, int VAR_3)
{
BDRVVdiState *s = VAR_0->opaque;
uint32_t bmap_entry;
uint32_t block_index;
uint32_t sector_in_block;
uint32_t n_sectors;
uint32_t bmap_first = VDI_UNALLOCATED;
uint32_t bmap_last = VDI_UNALLOCATED;
uint8_t *block = NULL;
int VAR_4;
logout("\n");
restart:
block_index = VAR_1 / s->block_sectors;
sector_in_block = VAR_1 % s->block_sectors;
n_sectors = s->block_sectors - sector_in_block;
if (n_sectors > VAR_3) {
n_sectors = VAR_3;
}
logout("will write %u sectors starting at sector %" PRIu64 "\n",
n_sectors, VAR_1);
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
uint64_t offset;
bmap_entry = s->header.blocks_allocated;
s->bmap[block_index] = cpu_to_le32(bmap_entry);
s->header.blocks_allocated++;
offset = s->header.offset_data / SECTOR_SIZE +
(uint64_t)bmap_entry * s->block_sectors;
if (block == NULL) {
block = g_malloc(s->block_size);
bmap_first = block_index;
}
bmap_last = block_index;
memset(block, 0, sector_in_block * SECTOR_SIZE);
memcpy(block + sector_in_block * SECTOR_SIZE,
VAR_2, n_sectors * SECTOR_SIZE);
memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
(s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
VAR_4 = bdrv_write(VAR_0->file, offset, block, s->block_sectors);
} else {
uint64_t offset = s->header.offset_data / SECTOR_SIZE +
(uint64_t)bmap_entry * s->block_sectors +
sector_in_block;
VAR_4 = bdrv_write(VAR_0->file, offset, VAR_2, n_sectors);
}
VAR_3 -= n_sectors;
VAR_1 += n_sectors;
VAR_2 += n_sectors * SECTOR_SIZE;
logout("%u sectors written\n", n_sectors);
if (VAR_4 >= 0 && VAR_3 > 0) {
goto restart;
}
logout("finished data write\n");
if (VAR_4 < 0) {
return VAR_4;
}
if (block) {
VdiHeader *header = (VdiHeader *) block;
uint8_t *base;
uint64_t offset;
logout("now writing modified header\n");
assert(VDI_IS_ALLOCATED(bmap_first));
*header = s->header;
vdi_header_to_le(header);
VAR_4 = bdrv_write(VAR_0->file, 0, block, 1);
g_free(block);
block = NULL;
if (VAR_4 < 0) {
return VAR_4;
}
logout("now writing modified block map entry %u...%u\n",
bmap_first, bmap_last);
bmap_first /= (SECTOR_SIZE / sizeof(uint32_t));
bmap_last /= (SECTOR_SIZE / sizeof(uint32_t));
n_sectors = bmap_last - bmap_first + 1;
offset = s->bmap_sector + bmap_first;
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
logout("will write %u block map sectors starting from entry %u\n",
n_sectors, bmap_first);
VAR_4 = bdrv_write(VAR_0->file, offset, base, n_sectors);
}
return VAR_4;
}
| [
"static int FUNC_0(BlockDriverState *VAR_0,\nint64_t VAR_1, const uint8_t *VAR_2, int VAR_3)\n{",
"BDRVVdiState *s = VAR_0->opaque;",
"uint32_t bmap_entry;",
"uint32_t block_index;",
"uint32_t sector_in_block;",
"uint32_t n_sectors;",
"uint32_t bmap_first = VDI_UNALLOCATED;",
"uint32_t bmap_last = VDI_UNALLOCATED;",
"uint8_t *block = NULL;",
"int VAR_4;",
"logout(\"\\n\");",
"restart:\nblock_index = VAR_1 / s->block_sectors;",
"sector_in_block = VAR_1 % s->block_sectors;",
"n_sectors = s->block_sectors - sector_in_block;",
"if (n_sectors > VAR_3) {",
"n_sectors = VAR_3;",
"}",
"logout(\"will write %u sectors starting at sector %\" PRIu64 \"\\n\",\nn_sectors, VAR_1);",
"bmap_entry = le32_to_cpu(s->bmap[block_index]);",
"if (!VDI_IS_ALLOCATED(bmap_entry)) {",
"uint64_t offset;",
"bmap_entry = s->header.blocks_allocated;",
"s->bmap[block_index] = cpu_to_le32(bmap_entry);",
"s->header.blocks_allocated++;",
"offset = s->header.offset_data / SECTOR_SIZE +\n(uint64_t)bmap_entry * s->block_sectors;",
"if (block == NULL) {",
"block = g_malloc(s->block_size);",
"bmap_first = block_index;",
"}",
"bmap_last = block_index;",
"memset(block, 0, sector_in_block * SECTOR_SIZE);",
"memcpy(block + sector_in_block * SECTOR_SIZE,\nVAR_2, n_sectors * SECTOR_SIZE);",
"memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,\n(s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);",
"VAR_4 = bdrv_write(VAR_0->file, offset, block, s->block_sectors);",
"} else {",
"uint64_t offset = s->header.offset_data / SECTOR_SIZE +\n(uint64_t)bmap_entry * s->block_sectors +\nsector_in_block;",
"VAR_4 = bdrv_write(VAR_0->file, offset, VAR_2, n_sectors);",
"}",
"VAR_3 -= n_sectors;",
"VAR_1 += n_sectors;",
"VAR_2 += n_sectors * SECTOR_SIZE;",
"logout(\"%u sectors written\\n\", n_sectors);",
"if (VAR_4 >= 0 && VAR_3 > 0) {",
"goto restart;",
"}",
"logout(\"finished data write\\n\");",
"if (VAR_4 < 0) {",
"return VAR_4;",
"}",
"if (block) {",
"VdiHeader *header = (VdiHeader *) block;",
"uint8_t *base;",
"uint64_t offset;",
"logout(\"now writing modified header\\n\");",
"assert(VDI_IS_ALLOCATED(bmap_first));",
"*header = s->header;",
"vdi_header_to_le(header);",
"VAR_4 = bdrv_write(VAR_0->file, 0, block, 1);",
"g_free(block);",
"block = NULL;",
"if (VAR_4 < 0) {",
"return VAR_4;",
"}",
"logout(\"now writing modified block map entry %u...%u\\n\",\nbmap_first, bmap_last);",
"bmap_first /= (SECTOR_SIZE / sizeof(uint32_t));",
"bmap_last /= (SECTOR_SIZE / sizeof(uint32_t));",
"n_sectors = bmap_last - bmap_first + 1;",
"offset = s->bmap_sector + bmap_first;",
"base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;",
"logout(\"will write %u block map sectors starting from entry %u\\n\",\nn_sectors, bmap_first);",
"VAR_4 = bdrv_write(VAR_0->file, offset, base, n_sectors);",
"}",
"return VAR_4;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
27
],
[
31,
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
47,
49
],
[
55
],
[
57
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69,
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
85
],
[
87,
89
],
[
91,
93
],
[
95
],
[
97
],
[
99,
101,
103
],
[
105
],
[
107
],
[
111
],
[
113
],
[
115
],
[
119
],
[
121
],
[
123
],
[
125
],
[
129
],
[
131
],
[
133
],
[
135
],
[
139
],
[
143
],
[
145
],
[
147
],
[
151
],
[
153
],
[
155
],
[
157
],
[
159
],
[
161
],
[
163
],
[
167
],
[
169
],
[
171
],
[
175,
177
],
[
181
],
[
183
],
[
185
],
[
187
],
[
189
],
[
191,
193
],
[
195
],
[
197
],
[
201
],
[
203
]
] |
21,309 | static void vga_screen_dump(void *opaque, const char *filename, bool cswitch,
Error **errp)
{
VGACommonState *s = opaque;
DisplaySurface *surface = qemu_console_surface(s->con);
if (cswitch) {
vga_invalidate_display(s);
}
graphic_hw_update(s->con);
ppm_save(filename, surface, errp);
}
| false | qemu | 2c62f08ddbf3fa80dc7202eb9a2ea60ae44e2cc5 | static void vga_screen_dump(void *opaque, const char *filename, bool cswitch,
Error **errp)
{
VGACommonState *s = opaque;
DisplaySurface *surface = qemu_console_surface(s->con);
if (cswitch) {
vga_invalidate_display(s);
}
graphic_hw_update(s->con);
ppm_save(filename, surface, errp);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0, const char *VAR_1, bool VAR_2,
Error **VAR_3)
{
VGACommonState *s = VAR_0;
DisplaySurface *surface = qemu_console_surface(s->con);
if (VAR_2) {
vga_invalidate_display(s);
}
graphic_hw_update(s->con);
ppm_save(VAR_1, surface, VAR_3);
}
| [
"static void FUNC_0(void *VAR_0, const char *VAR_1, bool VAR_2,\nError **VAR_3)\n{",
"VGACommonState *s = VAR_0;",
"DisplaySurface *surface = qemu_console_surface(s->con);",
"if (VAR_2) {",
"vga_invalidate_display(s);",
"}",
"graphic_hw_update(s->con);",
"ppm_save(VAR_1, surface, VAR_3);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
]
] |
21,310 | static uint64_t omap_tcmi_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque;
uint32_t ret;
if (size != 4) {
return omap_badwidth_read32(opaque, addr);
}
switch (addr) {
case 0x00: /* IMIF_PRIO */
case 0x04: /* EMIFS_PRIO */
case 0x08: /* EMIFF_PRIO */
case 0x0c: /* EMIFS_CONFIG */
case 0x10: /* EMIFS_CS0_CONFIG */
case 0x14: /* EMIFS_CS1_CONFIG */
case 0x18: /* EMIFS_CS2_CONFIG */
case 0x1c: /* EMIFS_CS3_CONFIG */
case 0x24: /* EMIFF_MRS */
case 0x28: /* TIMEOUT1 */
case 0x2c: /* TIMEOUT2 */
case 0x30: /* TIMEOUT3 */
case 0x3c: /* EMIFF_SDRAM_CONFIG_2 */
case 0x40: /* EMIFS_CFG_DYN_WAIT */
return s->tcmi_regs[addr >> 2];
case 0x20: /* EMIFF_SDRAM_CONFIG */
ret = s->tcmi_regs[addr >> 2];
s->tcmi_regs[addr >> 2] &= ~1; /* XXX: Clear SLRF on SDRAM access */
/* XXX: We can try using the VGA_DIRTY flag for this */
return ret;
}
OMAP_BAD_REG(addr);
return 0;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | static uint64_t omap_tcmi_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque;
uint32_t ret;
if (size != 4) {
return omap_badwidth_read32(opaque, addr);
}
switch (addr) {
case 0x00:
case 0x04:
case 0x08:
case 0x0c:
case 0x10:
case 0x14:
case 0x18:
case 0x1c:
case 0x24:
case 0x28:
case 0x2c:
case 0x30:
case 0x3c:
case 0x40:
return s->tcmi_regs[addr >> 2];
case 0x20:
ret = s->tcmi_regs[addr >> 2];
s->tcmi_regs[addr >> 2] &= ~1;
return ret;
}
OMAP_BAD_REG(addr);
return 0;
}
| {
"code": [],
"line_no": []
} | static uint64_t FUNC_0(void *opaque, target_phys_addr_t addr,
unsigned size)
{
struct omap_mpu_state_s *VAR_0 = (struct omap_mpu_state_s *) opaque;
uint32_t ret;
if (size != 4) {
return omap_badwidth_read32(opaque, addr);
}
switch (addr) {
case 0x00:
case 0x04:
case 0x08:
case 0x0c:
case 0x10:
case 0x14:
case 0x18:
case 0x1c:
case 0x24:
case 0x28:
case 0x2c:
case 0x30:
case 0x3c:
case 0x40:
return VAR_0->tcmi_regs[addr >> 2];
case 0x20:
ret = VAR_0->tcmi_regs[addr >> 2];
VAR_0->tcmi_regs[addr >> 2] &= ~1;
return ret;
}
OMAP_BAD_REG(addr);
return 0;
}
| [
"static uint64_t FUNC_0(void *opaque, target_phys_addr_t addr,\nunsigned size)\n{",
"struct omap_mpu_state_s *VAR_0 = (struct omap_mpu_state_s *) opaque;",
"uint32_t ret;",
"if (size != 4) {",
"return omap_badwidth_read32(opaque, addr);",
"}",
"switch (addr) {",
"case 0x00:\ncase 0x04:\ncase 0x08:\ncase 0x0c:\ncase 0x10:\ncase 0x14:\ncase 0x18:\ncase 0x1c:\ncase 0x24:\ncase 0x28:\ncase 0x2c:\ncase 0x30:\ncase 0x3c:\ncase 0x40:\nreturn VAR_0->tcmi_regs[addr >> 2];",
"case 0x20:\nret = VAR_0->tcmi_regs[addr >> 2];",
"VAR_0->tcmi_regs[addr >> 2] &= ~1;",
"return ret;",
"}",
"OMAP_BAD_REG(addr);",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23,
25,
27,
29,
31,
33,
35,
37,
39,
41,
43,
45,
47,
49,
51
],
[
55,
57
],
[
59
],
[
63
],
[
65
],
[
69
],
[
71
],
[
73
]
] |
21,312 | static int vc1_decode_p_mb(VC1Context *v)
{
MpegEncContext *s = &v->s;
GetBitContext *gb = &s->gb;
int i, j;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
int cbp; /* cbp decoding stuff */
int mqdiff, mquant; /* MB quantization */
int ttmb = v->ttfrm; /* MB Transform type */
int mb_has_coeffs = 1; /* last_flag */
int dmv_x, dmv_y; /* Differential MV components */
int index, index1; /* LUT indexes */
int val, sign; /* temp values */
int first_block = 1;
int dst_idx, off;
int skipped, fourmv;
int block_cbp = 0, pat;
int apply_loop_filter;
mquant = v->pq; /* Loosy initialization */
if (v->mv_type_is_raw)
fourmv = get_bits1(gb);
else
fourmv = v->mv_type_mb_plane[mb_pos];
if (v->skip_is_raw)
skipped = get_bits1(gb);
else
skipped = v->s.mbskip_table[mb_pos];
s->dsp.clear_blocks(s->block[0]);
apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
if (!fourmv) /* 1MV mode */
{
if (!skipped)
{
GET_MVDATA(dmv_x, dmv_y);
if (s->mb_intra) {
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
}
s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
/* FIXME Set DC val for inter block ? */
if (s->mb_intra && !mb_has_coeffs)
{
GET_MQUANT();
s->ac_pred = get_bits1(gb);
cbp = 0;
}
else if (mb_has_coeffs)
{
if (s->mb_intra) s->ac_pred = get_bits1(gb);
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
}
else
{
mquant = v->pq;
cbp = 0;
}
s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
VC1_TTMB_VLC_BITS, 2);
if(!s->mb_intra) vc1_mc_1mv(v, 0);
dst_idx = 0;
for (i=0; i<6; i++)
{
s->dc_val[0][s->block_index[i]] = 0;
dst_idx += i >> 2;
val = ((cbp >> (5 - i)) & 1);
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
v->mb_type[0][s->block_index[i]] = s->mb_intra;
if(s->mb_intra) {
/* check if prediction blocks A and C are available */
v->a_avail = v->c_avail = 0;
if(i == 2 || i == 3 || !s->first_slice_line)
v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
if(i == 1 || i == 3 || s->mb_x)
v->c_avail = v->mb_type[0][s->block_index[i] - 1];
vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
s->dsp.vc1_inv_trans_8x8(s->block[i]);
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
if(v->pq >= 9 && v->overlap) {
if(v->c_avail)
s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
if(v->a_avail)
s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
}
if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
int left_cbp, top_cbp;
if(i & 4){
left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
}else{
left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
}
if(left_cbp & 0xC)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
if(top_cbp & 0xA)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
}
block_cbp |= 0xF << (i << 2);
} else if(val) {
int left_cbp = 0, top_cbp = 0, filter = 0;
if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
filter = 1;
if(i & 4){
left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
}else{
left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
}
if(left_cbp & 0xC)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
if(top_cbp & 0xA)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
}
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
block_cbp |= pat << (i << 2);
if(!v->ttmbf && ttmb < 8) ttmb = -1;
first_block = 0;
}
}
}
else //Skipped
{
s->mb_intra = 0;
for(i = 0; i < 6; i++) {
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
s->current_picture.qscale_table[mb_pos] = 0;
vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
vc1_mc_1mv(v, 0);
return 0;
}
} //1MV mode
else //4MV mode
{
if (!skipped /* unskipped MB */)
{
int intra_count = 0, coded_inter = 0;
int is_intra[6], is_coded[6];
/* Get CBPCY */
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
for (i=0; i<6; i++)
{
val = ((cbp >> (5 - i)) & 1);
s->dc_val[0][s->block_index[i]] = 0;
s->mb_intra = 0;
if(i < 4) {
dmv_x = dmv_y = 0;
s->mb_intra = 0;
mb_has_coeffs = 0;
if(val) {
GET_MVDATA(dmv_x, dmv_y);
}
vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
intra_count += s->mb_intra;
is_intra[i] = s->mb_intra;
is_coded[i] = mb_has_coeffs;
}
if(i&4){
is_intra[i] = (intra_count >= 3);
is_coded[i] = val;
}
if(i == 4) vc1_mc_4mv_chroma(v);
v->mb_type[0][s->block_index[i]] = is_intra[i];
if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
}
// if there are no coded blocks then don't do anything more
if(!intra_count && !coded_inter) return 0;
dst_idx = 0;
GET_MQUANT();
s->current_picture.qscale_table[mb_pos] = mquant;
/* test if block is intra and has pred */
{
int intrapred = 0;
for(i=0; i<6; i++)
if(is_intra[i]) {
if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
|| ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
intrapred = 1;
break;
}
}
if(intrapred)s->ac_pred = get_bits1(gb);
else s->ac_pred = 0;
}
if (!v->ttmbf && coded_inter)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
for (i=0; i<6; i++)
{
dst_idx += i >> 2;
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
s->mb_intra = is_intra[i];
if (is_intra[i]) {
/* check if prediction blocks A and C are available */
v->a_avail = v->c_avail = 0;
if(i == 2 || i == 3 || !s->first_slice_line)
v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
if(i == 1 || i == 3 || s->mb_x)
v->c_avail = v->mb_type[0][s->block_index[i] - 1];
vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
s->dsp.vc1_inv_trans_8x8(s->block[i]);
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
if(v->pq >= 9 && v->overlap) {
if(v->c_avail)
s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
if(v->a_avail)
s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
}
if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
int left_cbp, top_cbp;
if(i & 4){
left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
}else{
left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
}
if(left_cbp & 0xC)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
if(top_cbp & 0xA)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
}
block_cbp |= 0xF << (i << 2);
} else if(is_coded[i]) {
int left_cbp = 0, top_cbp = 0, filter = 0;
if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
filter = 1;
if(i & 4){
left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
}else{
left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
}
if(left_cbp & 0xC)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
if(top_cbp & 0xA)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
}
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
block_cbp |= pat << (i << 2);
if(!v->ttmbf && ttmb < 8) ttmb = -1;
first_block = 0;
}
}
return 0;
}
else //Skipped MB
{
s->mb_intra = 0;
s->current_picture.qscale_table[mb_pos] = 0;
for (i=0; i<6; i++) {
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
for (i=0; i<4; i++)
{
vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
vc1_mc_4mv_luma(v, i);
}
vc1_mc_4mv_chroma(v);
s->current_picture.qscale_table[mb_pos] = 0;
return 0;
}
}
v->cbp[s->mb_x] = block_cbp;
/* Should never happen */
return -1;
}
| false | FFmpeg | 3992526b3c43278945d00fac6e2ba5cb8f810ef3 | static int vc1_decode_p_mb(VC1Context *v)
{
MpegEncContext *s = &v->s;
GetBitContext *gb = &s->gb;
int i, j;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
int cbp;
int mqdiff, mquant;
int ttmb = v->ttfrm;
int mb_has_coeffs = 1;
int dmv_x, dmv_y;
int index, index1;
int val, sign;
int first_block = 1;
int dst_idx, off;
int skipped, fourmv;
int block_cbp = 0, pat;
int apply_loop_filter;
mquant = v->pq;
if (v->mv_type_is_raw)
fourmv = get_bits1(gb);
else
fourmv = v->mv_type_mb_plane[mb_pos];
if (v->skip_is_raw)
skipped = get_bits1(gb);
else
skipped = v->s.mbskip_table[mb_pos];
s->dsp.clear_blocks(s->block[0]);
apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
if (!fourmv)
{
if (!skipped)
{
GET_MVDATA(dmv_x, dmv_y);
if (s->mb_intra) {
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
}
s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
if (s->mb_intra && !mb_has_coeffs)
{
GET_MQUANT();
s->ac_pred = get_bits1(gb);
cbp = 0;
}
else if (mb_has_coeffs)
{
if (s->mb_intra) s->ac_pred = get_bits1(gb);
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
}
else
{
mquant = v->pq;
cbp = 0;
}
s->current_picture.qscale_table[mb_pos] = mquant;
if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
VC1_TTMB_VLC_BITS, 2);
if(!s->mb_intra) vc1_mc_1mv(v, 0);
dst_idx = 0;
for (i=0; i<6; i++)
{
s->dc_val[0][s->block_index[i]] = 0;
dst_idx += i >> 2;
val = ((cbp >> (5 - i)) & 1);
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
v->mb_type[0][s->block_index[i]] = s->mb_intra;
if(s->mb_intra) {
v->a_avail = v->c_avail = 0;
if(i == 2 || i == 3 || !s->first_slice_line)
v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
if(i == 1 || i == 3 || s->mb_x)
v->c_avail = v->mb_type[0][s->block_index[i] - 1];
vc1_decode_intra_block(v, s->block[i], i, val, mquant, (i&4)?v->codingset2:v->codingset);
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
s->dsp.vc1_inv_trans_8x8(s->block[i]);
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
if(v->pq >= 9 && v->overlap) {
if(v->c_avail)
s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
if(v->a_avail)
s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
}
if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
int left_cbp, top_cbp;
if(i & 4){
left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
}else{
left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
}
if(left_cbp & 0xC)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
if(top_cbp & 0xA)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
}
block_cbp |= 0xF << (i << 2);
} else if(val) {
int left_cbp = 0, top_cbp = 0, filter = 0;
if(apply_loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
filter = 1;
if(i & 4){
left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
}else{
left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
}
if(left_cbp & 0xC)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
if(top_cbp & 0xA)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
}
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
block_cbp |= pat << (i << 2);
if(!v->ttmbf && ttmb < 8) ttmb = -1;
first_block = 0;
}
}
}
else
{
s->mb_intra = 0;
for(i = 0; i < 6; i++) {
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
s->current_picture.qscale_table[mb_pos] = 0;
vc1_pred_mv(s, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
vc1_mc_1mv(v, 0);
return 0;
}
}
else
{
if (!skipped )
{
int intra_count = 0, coded_inter = 0;
int is_intra[6], is_coded[6];
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
for (i=0; i<6; i++)
{
val = ((cbp >> (5 - i)) & 1);
s->dc_val[0][s->block_index[i]] = 0;
s->mb_intra = 0;
if(i < 4) {
dmv_x = dmv_y = 0;
s->mb_intra = 0;
mb_has_coeffs = 0;
if(val) {
GET_MVDATA(dmv_x, dmv_y);
}
vc1_pred_mv(s, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
if(!s->mb_intra) vc1_mc_4mv_luma(v, i);
intra_count += s->mb_intra;
is_intra[i] = s->mb_intra;
is_coded[i] = mb_has_coeffs;
}
if(i&4){
is_intra[i] = (intra_count >= 3);
is_coded[i] = val;
}
if(i == 4) vc1_mc_4mv_chroma(v);
v->mb_type[0][s->block_index[i]] = is_intra[i];
if(!coded_inter) coded_inter = !is_intra[i] & is_coded[i];
}
if(!intra_count && !coded_inter) return 0;
dst_idx = 0;
GET_MQUANT();
s->current_picture.qscale_table[mb_pos] = mquant;
{
int intrapred = 0;
for(i=0; i<6; i++)
if(is_intra[i]) {
if(((!s->first_slice_line || (i==2 || i==3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
|| ((s->mb_x || (i==1 || i==3)) && v->mb_type[0][s->block_index[i] - 1])) {
intrapred = 1;
break;
}
}
if(intrapred)s->ac_pred = get_bits1(gb);
else s->ac_pred = 0;
}
if (!v->ttmbf && coded_inter)
ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
for (i=0; i<6; i++)
{
dst_idx += i >> 2;
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
s->mb_intra = is_intra[i];
if (is_intra[i]) {
v->a_avail = v->c_avail = 0;
if(i == 2 || i == 3 || !s->first_slice_line)
v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
if(i == 1 || i == 3 || s->mb_x)
v->c_avail = v->mb_type[0][s->block_index[i] - 1];
vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant, (i&4)?v->codingset2:v->codingset);
if((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
s->dsp.vc1_inv_trans_8x8(s->block[i]);
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
if(v->pq >= 9 && v->overlap) {
if(v->c_avail)
s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
if(v->a_avail)
s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
}
if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
int left_cbp, top_cbp;
if(i & 4){
left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
}else{
left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
}
if(left_cbp & 0xC)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
if(top_cbp & 0xA)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
}
block_cbp |= 0xF << (i << 2);
} else if(is_coded[i]) {
int left_cbp = 0, top_cbp = 0, filter = 0;
if(v->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
filter = 1;
if(i & 4){
left_cbp = v->cbp[s->mb_x - 1] >> (i * 4);
top_cbp = v->cbp[s->mb_x - s->mb_stride] >> (i * 4);
}else{
left_cbp = (i & 1) ? (cbp >> ((i-1)*4)) : (v->cbp[s->mb_x - 1] >> ((i+1)*4));
top_cbp = (i & 2) ? (cbp >> ((i-2)*4)) : (v->cbp[s->mb_x - s->mb_stride] >> ((i+2)*4));
}
if(left_cbp & 0xC)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, 1, i & 4 ? s->uvlinesize : s->linesize, 8, mquant);
if(top_cbp & 0xA)
s->dsp.vc1_loop_filter(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize, 1, 8, mquant);
}
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block, s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize, (i&4) && (s->flags & CODEC_FLAG_GRAY), filter, left_cbp, top_cbp);
block_cbp |= pat << (i << 2);
if(!v->ttmbf && ttmb < 8) ttmb = -1;
first_block = 0;
}
}
return 0;
}
else MB
{
s->mb_intra = 0;
s->current_picture.qscale_table[mb_pos] = 0;
for (i=0; i<6; i++) {
v->mb_type[0][s->block_index[i]] = 0;
s->dc_val[0][s->block_index[i]] = 0;
}
for (i=0; i<4; i++)
{
vc1_pred_mv(s, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0]);
vc1_mc_4mv_luma(v, i);
}
vc1_mc_4mv_chroma(v);
s->current_picture.qscale_table[mb_pos] = 0;
return 0;
}
}
v->cbp[s->mb_x] = block_cbp;
return -1;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(VC1Context *VAR_0)
{
MpegEncContext *s = &VAR_0->s;
GetBitContext *gb = &s->gb;
int VAR_1, VAR_2;
int VAR_3 = s->mb_x + s->mb_y * s->mb_stride;
int VAR_4;
int VAR_5, VAR_6;
int VAR_7 = VAR_0->ttfrm;
int VAR_8 = 1;
int VAR_9, VAR_10;
int VAR_11, VAR_12;
int VAR_13, VAR_14;
int VAR_15 = 1;
int VAR_16, VAR_17;
int VAR_18, VAR_19;
int VAR_20 = 0, VAR_21;
int VAR_22;
VAR_6 = VAR_0->pq;
if (VAR_0->mv_type_is_raw)
VAR_19 = get_bits1(gb);
else
VAR_19 = VAR_0->mv_type_mb_plane[VAR_3];
if (VAR_0->skip_is_raw)
VAR_18 = get_bits1(gb);
else
VAR_18 = VAR_0->s.mbskip_table[VAR_3];
s->dsp.clear_blocks(s->block[0]);
VAR_22 = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
if (!VAR_19)
{
if (!VAR_18)
{
GET_MVDATA(VAR_9, VAR_10);
if (s->mb_intra) {
s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
}
s->current_picture.mb_type[VAR_3] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
vc1_pred_mv(s, 0, VAR_9, VAR_10, 1, VAR_0->range_x, VAR_0->range_y, VAR_0->mb_type[0]);
if (s->mb_intra && !VAR_8)
{
GET_MQUANT();
s->ac_pred = get_bits1(gb);
VAR_4 = 0;
}
else if (VAR_8)
{
if (s->mb_intra) s->ac_pred = get_bits1(gb);
VAR_4 = get_vlc2(&VAR_0->s.gb, VAR_0->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
}
else
{
VAR_6 = VAR_0->pq;
VAR_4 = 0;
}
s->current_picture.qscale_table[VAR_3] = VAR_6;
if (!VAR_0->ttmbf && !s->mb_intra && VAR_8)
VAR_7 = get_vlc2(gb, ff_vc1_ttmb_vlc[VAR_0->tt_index].table,
VC1_TTMB_VLC_BITS, 2);
if(!s->mb_intra) vc1_mc_1mv(VAR_0, 0);
VAR_16 = 0;
for (VAR_1=0; VAR_1<6; VAR_1++)
{
s->dc_val[0][s->block_index[VAR_1]] = 0;
VAR_16 += VAR_1 >> 2;
VAR_13 = ((VAR_4 >> (5 - VAR_1)) & 1);
VAR_17 = (VAR_1 & 4) ? 0 : ((VAR_1 & 1) * 8 + (VAR_1 & 2) * 4 * s->linesize);
VAR_0->mb_type[0][s->block_index[VAR_1]] = s->mb_intra;
if(s->mb_intra) {
VAR_0->a_avail = VAR_0->c_avail = 0;
if(VAR_1 == 2 || VAR_1 == 3 || !s->first_slice_line)
VAR_0->a_avail = VAR_0->mb_type[0][s->block_index[VAR_1] - s->block_wrap[VAR_1]];
if(VAR_1 == 1 || VAR_1 == 3 || s->mb_x)
VAR_0->c_avail = VAR_0->mb_type[0][s->block_index[VAR_1] - 1];
vc1_decode_intra_block(VAR_0, s->block[VAR_1], VAR_1, VAR_13, VAR_6, (VAR_1&4)?VAR_0->codingset2:VAR_0->codingset);
if((VAR_1>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
s->dsp.vc1_inv_trans_8x8(s->block[VAR_1]);
if(VAR_0->rangeredfrm) for(VAR_2 = 0; VAR_2 < 64; VAR_2++) s->block[VAR_1][VAR_2] <<= 1;
s->dsp.put_signed_pixels_clamped(s->block[VAR_1], s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));
if(VAR_0->pq >= 9 && VAR_0->overlap) {
if(VAR_0->c_avail)
s->dsp.vc1_h_overlap(s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));
if(VAR_0->a_avail)
s->dsp.vc1_v_overlap(s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));
}
if(VAR_22 && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
int VAR_31, VAR_31;
if(VAR_1 & 4){
VAR_31 = VAR_0->VAR_4[s->mb_x - 1] >> (VAR_1 * 4);
VAR_31 = VAR_0->VAR_4[s->mb_x - s->mb_stride] >> (VAR_1 * 4);
}else{
VAR_31 = (VAR_1 & 1) ? (VAR_4 >> ((VAR_1-1)*4)) : (VAR_0->VAR_4[s->mb_x - 1] >> ((VAR_1+1)*4));
VAR_31 = (VAR_1 & 2) ? (VAR_4 >> ((VAR_1-2)*4)) : (VAR_0->VAR_4[s->mb_x - s->mb_stride] >> ((VAR_1+2)*4));
}
if(VAR_31 & 0xC)
s->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, 1, VAR_1 & 4 ? s->uvlinesize : s->linesize, 8, VAR_6);
if(VAR_31 & 0xA)
s->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, VAR_1 & 4 ? s->uvlinesize : s->linesize, 1, 8, VAR_6);
}
VAR_20 |= 0xF << (VAR_1 << 2);
} else if(VAR_13) {
int VAR_31 = 0, VAR_31 = 0, VAR_31 = 0;
if(VAR_22 && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
VAR_31 = 1;
if(VAR_1 & 4){
VAR_31 = VAR_0->VAR_4[s->mb_x - 1] >> (VAR_1 * 4);
VAR_31 = VAR_0->VAR_4[s->mb_x - s->mb_stride] >> (VAR_1 * 4);
}else{
VAR_31 = (VAR_1 & 1) ? (VAR_4 >> ((VAR_1-1)*4)) : (VAR_0->VAR_4[s->mb_x - 1] >> ((VAR_1+1)*4));
VAR_31 = (VAR_1 & 2) ? (VAR_4 >> ((VAR_1-2)*4)) : (VAR_0->VAR_4[s->mb_x - s->mb_stride] >> ((VAR_1+2)*4));
}
if(VAR_31 & 0xC)
s->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, 1, VAR_1 & 4 ? s->uvlinesize : s->linesize, 8, VAR_6);
if(VAR_31 & 0xA)
s->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, VAR_1 & 4 ? s->uvlinesize : s->linesize, 1, 8, VAR_6);
}
VAR_21 = vc1_decode_p_block(VAR_0, s->block[VAR_1], VAR_1, VAR_6, VAR_7, VAR_15, s->dest[VAR_16] + VAR_17, (VAR_1&4)?s->uvlinesize:s->linesize, (VAR_1&4) && (s->flags & CODEC_FLAG_GRAY), VAR_31, VAR_31, VAR_31);
VAR_20 |= VAR_21 << (VAR_1 << 2);
if(!VAR_0->ttmbf && VAR_7 < 8) VAR_7 = -1;
VAR_15 = 0;
}
}
}
else
{
s->mb_intra = 0;
for(VAR_1 = 0; VAR_1 < 6; VAR_1++) {
VAR_0->mb_type[0][s->block_index[VAR_1]] = 0;
s->dc_val[0][s->block_index[VAR_1]] = 0;
}
s->current_picture.mb_type[VAR_3] = MB_TYPE_SKIP;
s->current_picture.qscale_table[VAR_3] = 0;
vc1_pred_mv(s, 0, 0, 0, 1, VAR_0->range_x, VAR_0->range_y, VAR_0->mb_type[0]);
vc1_mc_1mv(VAR_0, 0);
return 0;
}
}
else
{
if (!VAR_18 )
{
int VAR_26 = 0, VAR_27 = 0;
int VAR_28[6], VAR_29[6];
VAR_4 = get_vlc2(&VAR_0->s.gb, VAR_0->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
for (VAR_1=0; VAR_1<6; VAR_1++)
{
VAR_13 = ((VAR_4 >> (5 - VAR_1)) & 1);
s->dc_val[0][s->block_index[VAR_1]] = 0;
s->mb_intra = 0;
if(VAR_1 < 4) {
VAR_9 = VAR_10 = 0;
s->mb_intra = 0;
VAR_8 = 0;
if(VAR_13) {
GET_MVDATA(VAR_9, VAR_10);
}
vc1_pred_mv(s, VAR_1, VAR_9, VAR_10, 0, VAR_0->range_x, VAR_0->range_y, VAR_0->mb_type[0]);
if(!s->mb_intra) vc1_mc_4mv_luma(VAR_0, VAR_1);
VAR_26 += s->mb_intra;
VAR_28[VAR_1] = s->mb_intra;
VAR_29[VAR_1] = VAR_8;
}
if(VAR_1&4){
VAR_28[VAR_1] = (VAR_26 >= 3);
VAR_29[VAR_1] = VAR_13;
}
if(VAR_1 == 4) vc1_mc_4mv_chroma(VAR_0);
VAR_0->mb_type[0][s->block_index[VAR_1]] = VAR_28[VAR_1];
if(!VAR_27) VAR_27 = !VAR_28[VAR_1] & VAR_29[VAR_1];
}
if(!VAR_26 && !VAR_27) return 0;
VAR_16 = 0;
GET_MQUANT();
s->current_picture.qscale_table[VAR_3] = VAR_6;
{
int VAR_30 = 0;
for(VAR_1=0; VAR_1<6; VAR_1++)
if(VAR_28[VAR_1]) {
if(((!s->first_slice_line || (VAR_1==2 || VAR_1==3)) && VAR_0->mb_type[0][s->block_index[VAR_1] - s->block_wrap[VAR_1]])
|| ((s->mb_x || (VAR_1==1 || VAR_1==3)) && VAR_0->mb_type[0][s->block_index[VAR_1] - 1])) {
VAR_30 = 1;
break;
}
}
if(VAR_30)s->ac_pred = get_bits1(gb);
else s->ac_pred = 0;
}
if (!VAR_0->ttmbf && VAR_27)
VAR_7 = get_vlc2(gb, ff_vc1_ttmb_vlc[VAR_0->tt_index].table, VC1_TTMB_VLC_BITS, 2);
for (VAR_1=0; VAR_1<6; VAR_1++)
{
VAR_16 += VAR_1 >> 2;
VAR_17 = (VAR_1 & 4) ? 0 : ((VAR_1 & 1) * 8 + (VAR_1 & 2) * 4 * s->linesize);
s->mb_intra = VAR_28[VAR_1];
if (VAR_28[VAR_1]) {
VAR_0->a_avail = VAR_0->c_avail = 0;
if(VAR_1 == 2 || VAR_1 == 3 || !s->first_slice_line)
VAR_0->a_avail = VAR_0->mb_type[0][s->block_index[VAR_1] - s->block_wrap[VAR_1]];
if(VAR_1 == 1 || VAR_1 == 3 || s->mb_x)
VAR_0->c_avail = VAR_0->mb_type[0][s->block_index[VAR_1] - 1];
vc1_decode_intra_block(VAR_0, s->block[VAR_1], VAR_1, VAR_29[VAR_1], VAR_6, (VAR_1&4)?VAR_0->codingset2:VAR_0->codingset);
if((VAR_1>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
s->dsp.vc1_inv_trans_8x8(s->block[VAR_1]);
if(VAR_0->rangeredfrm) for(VAR_2 = 0; VAR_2 < 64; VAR_2++) s->block[VAR_1][VAR_2] <<= 1;
s->dsp.put_signed_pixels_clamped(s->block[VAR_1], s->dest[VAR_16] + VAR_17, (VAR_1&4)?s->uvlinesize:s->linesize);
if(VAR_0->pq >= 9 && VAR_0->overlap) {
if(VAR_0->c_avail)
s->dsp.vc1_h_overlap(s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));
if(VAR_0->a_avail)
s->dsp.vc1_v_overlap(s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));
}
if(VAR_0->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
int VAR_31, VAR_31;
if(VAR_1 & 4){
VAR_31 = VAR_0->VAR_4[s->mb_x - 1] >> (VAR_1 * 4);
VAR_31 = VAR_0->VAR_4[s->mb_x - s->mb_stride] >> (VAR_1 * 4);
}else{
VAR_31 = (VAR_1 & 1) ? (VAR_4 >> ((VAR_1-1)*4)) : (VAR_0->VAR_4[s->mb_x - 1] >> ((VAR_1+1)*4));
VAR_31 = (VAR_1 & 2) ? (VAR_4 >> ((VAR_1-2)*4)) : (VAR_0->VAR_4[s->mb_x - s->mb_stride] >> ((VAR_1+2)*4));
}
if(VAR_31 & 0xC)
s->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, 1, VAR_1 & 4 ? s->uvlinesize : s->linesize, 8, VAR_6);
if(VAR_31 & 0xA)
s->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, VAR_1 & 4 ? s->uvlinesize : s->linesize, 1, 8, VAR_6);
}
VAR_20 |= 0xF << (VAR_1 << 2);
} else if(VAR_29[VAR_1]) {
int VAR_31 = 0, VAR_31 = 0, VAR_31 = 0;
if(VAR_0->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){
VAR_31 = 1;
if(VAR_1 & 4){
VAR_31 = VAR_0->VAR_4[s->mb_x - 1] >> (VAR_1 * 4);
VAR_31 = VAR_0->VAR_4[s->mb_x - s->mb_stride] >> (VAR_1 * 4);
}else{
VAR_31 = (VAR_1 & 1) ? (VAR_4 >> ((VAR_1-1)*4)) : (VAR_0->VAR_4[s->mb_x - 1] >> ((VAR_1+1)*4));
VAR_31 = (VAR_1 & 2) ? (VAR_4 >> ((VAR_1-2)*4)) : (VAR_0->VAR_4[s->mb_x - s->mb_stride] >> ((VAR_1+2)*4));
}
if(VAR_31 & 0xC)
s->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, 1, VAR_1 & 4 ? s->uvlinesize : s->linesize, 8, VAR_6);
if(VAR_31 & 0xA)
s->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, VAR_1 & 4 ? s->uvlinesize : s->linesize, 1, 8, VAR_6);
}
VAR_21 = vc1_decode_p_block(VAR_0, s->block[VAR_1], VAR_1, VAR_6, VAR_7, VAR_15, s->dest[VAR_16] + VAR_17, (VAR_1&4)?s->uvlinesize:s->linesize, (VAR_1&4) && (s->flags & CODEC_FLAG_GRAY), VAR_31, VAR_31, VAR_31);
VAR_20 |= VAR_21 << (VAR_1 << 2);
if(!VAR_0->ttmbf && VAR_7 < 8) VAR_7 = -1;
VAR_15 = 0;
}
}
return 0;
}
else MB
{
s->mb_intra = 0;
s->current_picture.qscale_table[VAR_3] = 0;
for (VAR_1=0; VAR_1<6; VAR_1++) {
VAR_0->mb_type[0][s->block_index[VAR_1]] = 0;
s->dc_val[0][s->block_index[VAR_1]] = 0;
}
for (VAR_1=0; VAR_1<4; VAR_1++)
{
vc1_pred_mv(s, VAR_1, 0, 0, 0, VAR_0->range_x, VAR_0->range_y, VAR_0->mb_type[0]);
vc1_mc_4mv_luma(VAR_0, VAR_1);
}
vc1_mc_4mv_chroma(VAR_0);
s->current_picture.qscale_table[VAR_3] = 0;
return 0;
}
}
VAR_0->VAR_4[s->mb_x] = VAR_20;
return -1;
}
| [
"static int FUNC_0(VC1Context *VAR_0)\n{",
"MpegEncContext *s = &VAR_0->s;",
"GetBitContext *gb = &s->gb;",
"int VAR_1, VAR_2;",
"int VAR_3 = s->mb_x + s->mb_y * s->mb_stride;",
"int VAR_4;",
"int VAR_5, VAR_6;",
"int VAR_7 = VAR_0->ttfrm;",
"int VAR_8 = 1;",
"int VAR_9, VAR_10;",
"int VAR_11, VAR_12;",
"int VAR_13, VAR_14;",
"int VAR_15 = 1;",
"int VAR_16, VAR_17;",
"int VAR_18, VAR_19;",
"int VAR_20 = 0, VAR_21;",
"int VAR_22;",
"VAR_6 = VAR_0->pq;",
"if (VAR_0->mv_type_is_raw)\nVAR_19 = get_bits1(gb);",
"else\nVAR_19 = VAR_0->mv_type_mb_plane[VAR_3];",
"if (VAR_0->skip_is_raw)\nVAR_18 = get_bits1(gb);",
"else\nVAR_18 = VAR_0->s.mbskip_table[VAR_3];",
"s->dsp.clear_blocks(s->block[0]);",
"VAR_22 = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);",
"if (!VAR_19)\n{",
"if (!VAR_18)\n{",
"GET_MVDATA(VAR_9, VAR_10);",
"if (s->mb_intra) {",
"s->current_picture.motion_val[1][s->block_index[0]][0] = 0;",
"s->current_picture.motion_val[1][s->block_index[0]][1] = 0;",
"}",
"s->current_picture.mb_type[VAR_3] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;",
"vc1_pred_mv(s, 0, VAR_9, VAR_10, 1, VAR_0->range_x, VAR_0->range_y, VAR_0->mb_type[0]);",
"if (s->mb_intra && !VAR_8)\n{",
"GET_MQUANT();",
"s->ac_pred = get_bits1(gb);",
"VAR_4 = 0;",
"}",
"else if (VAR_8)\n{",
"if (s->mb_intra) s->ac_pred = get_bits1(gb);",
"VAR_4 = get_vlc2(&VAR_0->s.gb, VAR_0->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);",
"GET_MQUANT();",
"}",
"else\n{",
"VAR_6 = VAR_0->pq;",
"VAR_4 = 0;",
"}",
"s->current_picture.qscale_table[VAR_3] = VAR_6;",
"if (!VAR_0->ttmbf && !s->mb_intra && VAR_8)\nVAR_7 = get_vlc2(gb, ff_vc1_ttmb_vlc[VAR_0->tt_index].table,\nVC1_TTMB_VLC_BITS, 2);",
"if(!s->mb_intra) vc1_mc_1mv(VAR_0, 0);",
"VAR_16 = 0;",
"for (VAR_1=0; VAR_1<6; VAR_1++)",
"{",
"s->dc_val[0][s->block_index[VAR_1]] = 0;",
"VAR_16 += VAR_1 >> 2;",
"VAR_13 = ((VAR_4 >> (5 - VAR_1)) & 1);",
"VAR_17 = (VAR_1 & 4) ? 0 : ((VAR_1 & 1) * 8 + (VAR_1 & 2) * 4 * s->linesize);",
"VAR_0->mb_type[0][s->block_index[VAR_1]] = s->mb_intra;",
"if(s->mb_intra) {",
"VAR_0->a_avail = VAR_0->c_avail = 0;",
"if(VAR_1 == 2 || VAR_1 == 3 || !s->first_slice_line)\nVAR_0->a_avail = VAR_0->mb_type[0][s->block_index[VAR_1] - s->block_wrap[VAR_1]];",
"if(VAR_1 == 1 || VAR_1 == 3 || s->mb_x)\nVAR_0->c_avail = VAR_0->mb_type[0][s->block_index[VAR_1] - 1];",
"vc1_decode_intra_block(VAR_0, s->block[VAR_1], VAR_1, VAR_13, VAR_6, (VAR_1&4)?VAR_0->codingset2:VAR_0->codingset);",
"if((VAR_1>3) && (s->flags & CODEC_FLAG_GRAY)) continue;",
"s->dsp.vc1_inv_trans_8x8(s->block[VAR_1]);",
"if(VAR_0->rangeredfrm) for(VAR_2 = 0; VAR_2 < 64; VAR_2++) s->block[VAR_1][VAR_2] <<= 1;",
"s->dsp.put_signed_pixels_clamped(s->block[VAR_1], s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));",
"if(VAR_0->pq >= 9 && VAR_0->overlap) {",
"if(VAR_0->c_avail)\ns->dsp.vc1_h_overlap(s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));",
"if(VAR_0->a_avail)\ns->dsp.vc1_v_overlap(s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));",
"}",
"if(VAR_22 && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){",
"int VAR_31, VAR_31;",
"if(VAR_1 & 4){",
"VAR_31 = VAR_0->VAR_4[s->mb_x - 1] >> (VAR_1 * 4);",
"VAR_31 = VAR_0->VAR_4[s->mb_x - s->mb_stride] >> (VAR_1 * 4);",
"}else{",
"VAR_31 = (VAR_1 & 1) ? (VAR_4 >> ((VAR_1-1)*4)) : (VAR_0->VAR_4[s->mb_x - 1] >> ((VAR_1+1)*4));",
"VAR_31 = (VAR_1 & 2) ? (VAR_4 >> ((VAR_1-2)*4)) : (VAR_0->VAR_4[s->mb_x - s->mb_stride] >> ((VAR_1+2)*4));",
"}",
"if(VAR_31 & 0xC)\ns->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, 1, VAR_1 & 4 ? s->uvlinesize : s->linesize, 8, VAR_6);",
"if(VAR_31 & 0xA)\ns->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, VAR_1 & 4 ? s->uvlinesize : s->linesize, 1, 8, VAR_6);",
"}",
"VAR_20 |= 0xF << (VAR_1 << 2);",
"} else if(VAR_13) {",
"int VAR_31 = 0, VAR_31 = 0, VAR_31 = 0;",
"if(VAR_22 && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){",
"VAR_31 = 1;",
"if(VAR_1 & 4){",
"VAR_31 = VAR_0->VAR_4[s->mb_x - 1] >> (VAR_1 * 4);",
"VAR_31 = VAR_0->VAR_4[s->mb_x - s->mb_stride] >> (VAR_1 * 4);",
"}else{",
"VAR_31 = (VAR_1 & 1) ? (VAR_4 >> ((VAR_1-1)*4)) : (VAR_0->VAR_4[s->mb_x - 1] >> ((VAR_1+1)*4));",
"VAR_31 = (VAR_1 & 2) ? (VAR_4 >> ((VAR_1-2)*4)) : (VAR_0->VAR_4[s->mb_x - s->mb_stride] >> ((VAR_1+2)*4));",
"}",
"if(VAR_31 & 0xC)\ns->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, 1, VAR_1 & 4 ? s->uvlinesize : s->linesize, 8, VAR_6);",
"if(VAR_31 & 0xA)\ns->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, VAR_1 & 4 ? s->uvlinesize : s->linesize, 1, 8, VAR_6);",
"}",
"VAR_21 = vc1_decode_p_block(VAR_0, s->block[VAR_1], VAR_1, VAR_6, VAR_7, VAR_15, s->dest[VAR_16] + VAR_17, (VAR_1&4)?s->uvlinesize:s->linesize, (VAR_1&4) && (s->flags & CODEC_FLAG_GRAY), VAR_31, VAR_31, VAR_31);",
"VAR_20 |= VAR_21 << (VAR_1 << 2);",
"if(!VAR_0->ttmbf && VAR_7 < 8) VAR_7 = -1;",
"VAR_15 = 0;",
"}",
"}",
"}",
"else\n{",
"s->mb_intra = 0;",
"for(VAR_1 = 0; VAR_1 < 6; VAR_1++) {",
"VAR_0->mb_type[0][s->block_index[VAR_1]] = 0;",
"s->dc_val[0][s->block_index[VAR_1]] = 0;",
"}",
"s->current_picture.mb_type[VAR_3] = MB_TYPE_SKIP;",
"s->current_picture.qscale_table[VAR_3] = 0;",
"vc1_pred_mv(s, 0, 0, 0, 1, VAR_0->range_x, VAR_0->range_y, VAR_0->mb_type[0]);",
"vc1_mc_1mv(VAR_0, 0);",
"return 0;",
"}",
"}",
"else\n{",
"if (!VAR_18 )\n{",
"int VAR_26 = 0, VAR_27 = 0;",
"int VAR_28[6], VAR_29[6];",
"VAR_4 = get_vlc2(&VAR_0->s.gb, VAR_0->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);",
"for (VAR_1=0; VAR_1<6; VAR_1++)",
"{",
"VAR_13 = ((VAR_4 >> (5 - VAR_1)) & 1);",
"s->dc_val[0][s->block_index[VAR_1]] = 0;",
"s->mb_intra = 0;",
"if(VAR_1 < 4) {",
"VAR_9 = VAR_10 = 0;",
"s->mb_intra = 0;",
"VAR_8 = 0;",
"if(VAR_13) {",
"GET_MVDATA(VAR_9, VAR_10);",
"}",
"vc1_pred_mv(s, VAR_1, VAR_9, VAR_10, 0, VAR_0->range_x, VAR_0->range_y, VAR_0->mb_type[0]);",
"if(!s->mb_intra) vc1_mc_4mv_luma(VAR_0, VAR_1);",
"VAR_26 += s->mb_intra;",
"VAR_28[VAR_1] = s->mb_intra;",
"VAR_29[VAR_1] = VAR_8;",
"}",
"if(VAR_1&4){",
"VAR_28[VAR_1] = (VAR_26 >= 3);",
"VAR_29[VAR_1] = VAR_13;",
"}",
"if(VAR_1 == 4) vc1_mc_4mv_chroma(VAR_0);",
"VAR_0->mb_type[0][s->block_index[VAR_1]] = VAR_28[VAR_1];",
"if(!VAR_27) VAR_27 = !VAR_28[VAR_1] & VAR_29[VAR_1];",
"}",
"if(!VAR_26 && !VAR_27) return 0;",
"VAR_16 = 0;",
"GET_MQUANT();",
"s->current_picture.qscale_table[VAR_3] = VAR_6;",
"{",
"int VAR_30 = 0;",
"for(VAR_1=0; VAR_1<6; VAR_1++)",
"if(VAR_28[VAR_1]) {",
"if(((!s->first_slice_line || (VAR_1==2 || VAR_1==3)) && VAR_0->mb_type[0][s->block_index[VAR_1] - s->block_wrap[VAR_1]])\n|| ((s->mb_x || (VAR_1==1 || VAR_1==3)) && VAR_0->mb_type[0][s->block_index[VAR_1] - 1])) {",
"VAR_30 = 1;",
"break;",
"}",
"}",
"if(VAR_30)s->ac_pred = get_bits1(gb);",
"else s->ac_pred = 0;",
"}",
"if (!VAR_0->ttmbf && VAR_27)\nVAR_7 = get_vlc2(gb, ff_vc1_ttmb_vlc[VAR_0->tt_index].table, VC1_TTMB_VLC_BITS, 2);",
"for (VAR_1=0; VAR_1<6; VAR_1++)",
"{",
"VAR_16 += VAR_1 >> 2;",
"VAR_17 = (VAR_1 & 4) ? 0 : ((VAR_1 & 1) * 8 + (VAR_1 & 2) * 4 * s->linesize);",
"s->mb_intra = VAR_28[VAR_1];",
"if (VAR_28[VAR_1]) {",
"VAR_0->a_avail = VAR_0->c_avail = 0;",
"if(VAR_1 == 2 || VAR_1 == 3 || !s->first_slice_line)\nVAR_0->a_avail = VAR_0->mb_type[0][s->block_index[VAR_1] - s->block_wrap[VAR_1]];",
"if(VAR_1 == 1 || VAR_1 == 3 || s->mb_x)\nVAR_0->c_avail = VAR_0->mb_type[0][s->block_index[VAR_1] - 1];",
"vc1_decode_intra_block(VAR_0, s->block[VAR_1], VAR_1, VAR_29[VAR_1], VAR_6, (VAR_1&4)?VAR_0->codingset2:VAR_0->codingset);",
"if((VAR_1>3) && (s->flags & CODEC_FLAG_GRAY)) continue;",
"s->dsp.vc1_inv_trans_8x8(s->block[VAR_1]);",
"if(VAR_0->rangeredfrm) for(VAR_2 = 0; VAR_2 < 64; VAR_2++) s->block[VAR_1][VAR_2] <<= 1;",
"s->dsp.put_signed_pixels_clamped(s->block[VAR_1], s->dest[VAR_16] + VAR_17, (VAR_1&4)?s->uvlinesize:s->linesize);",
"if(VAR_0->pq >= 9 && VAR_0->overlap) {",
"if(VAR_0->c_avail)\ns->dsp.vc1_h_overlap(s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));",
"if(VAR_0->a_avail)\ns->dsp.vc1_v_overlap(s->dest[VAR_16] + VAR_17, s->linesize >> ((VAR_1 & 4) >> 2));",
"}",
"if(VAR_0->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){",
"int VAR_31, VAR_31;",
"if(VAR_1 & 4){",
"VAR_31 = VAR_0->VAR_4[s->mb_x - 1] >> (VAR_1 * 4);",
"VAR_31 = VAR_0->VAR_4[s->mb_x - s->mb_stride] >> (VAR_1 * 4);",
"}else{",
"VAR_31 = (VAR_1 & 1) ? (VAR_4 >> ((VAR_1-1)*4)) : (VAR_0->VAR_4[s->mb_x - 1] >> ((VAR_1+1)*4));",
"VAR_31 = (VAR_1 & 2) ? (VAR_4 >> ((VAR_1-2)*4)) : (VAR_0->VAR_4[s->mb_x - s->mb_stride] >> ((VAR_1+2)*4));",
"}",
"if(VAR_31 & 0xC)\ns->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, 1, VAR_1 & 4 ? s->uvlinesize : s->linesize, 8, VAR_6);",
"if(VAR_31 & 0xA)\ns->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, VAR_1 & 4 ? s->uvlinesize : s->linesize, 1, 8, VAR_6);",
"}",
"VAR_20 |= 0xF << (VAR_1 << 2);",
"} else if(VAR_29[VAR_1]) {",
"int VAR_31 = 0, VAR_31 = 0, VAR_31 = 0;",
"if(VAR_0->s.loop_filter && s->mb_x && s->mb_x != (s->mb_width - 1) && s->mb_y && s->mb_y != (s->mb_height - 1)){",
"VAR_31 = 1;",
"if(VAR_1 & 4){",
"VAR_31 = VAR_0->VAR_4[s->mb_x - 1] >> (VAR_1 * 4);",
"VAR_31 = VAR_0->VAR_4[s->mb_x - s->mb_stride] >> (VAR_1 * 4);",
"}else{",
"VAR_31 = (VAR_1 & 1) ? (VAR_4 >> ((VAR_1-1)*4)) : (VAR_0->VAR_4[s->mb_x - 1] >> ((VAR_1+1)*4));",
"VAR_31 = (VAR_1 & 2) ? (VAR_4 >> ((VAR_1-2)*4)) : (VAR_0->VAR_4[s->mb_x - s->mb_stride] >> ((VAR_1+2)*4));",
"}",
"if(VAR_31 & 0xC)\ns->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, 1, VAR_1 & 4 ? s->uvlinesize : s->linesize, 8, VAR_6);",
"if(VAR_31 & 0xA)\ns->dsp.vc1_loop_filter(s->dest[VAR_16] + VAR_17, VAR_1 & 4 ? s->uvlinesize : s->linesize, 1, 8, VAR_6);",
"}",
"VAR_21 = vc1_decode_p_block(VAR_0, s->block[VAR_1], VAR_1, VAR_6, VAR_7, VAR_15, s->dest[VAR_16] + VAR_17, (VAR_1&4)?s->uvlinesize:s->linesize, (VAR_1&4) && (s->flags & CODEC_FLAG_GRAY), VAR_31, VAR_31, VAR_31);",
"VAR_20 |= VAR_21 << (VAR_1 << 2);",
"if(!VAR_0->ttmbf && VAR_7 < 8) VAR_7 = -1;",
"VAR_15 = 0;",
"}",
"}",
"return 0;",
"}",
"else MB\n{",
"s->mb_intra = 0;",
"s->current_picture.qscale_table[VAR_3] = 0;",
"for (VAR_1=0; VAR_1<6; VAR_1++) {",
"VAR_0->mb_type[0][s->block_index[VAR_1]] = 0;",
"s->dc_val[0][s->block_index[VAR_1]] = 0;",
"}",
"for (VAR_1=0; VAR_1<4; VAR_1++)",
"{",
"vc1_pred_mv(s, VAR_1, 0, 0, 0, VAR_0->range_x, VAR_0->range_y, VAR_0->mb_type[0]);",
"vc1_mc_4mv_luma(VAR_0, VAR_1);",
"}",
"vc1_mc_4mv_chroma(VAR_0);",
"s->current_picture.qscale_table[VAR_3] = 0;",
"return 0;",
"}",
"}",
"VAR_0->VAR_4[s->mb_x] = VAR_20;",
"return -1;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
41
],
[
45,
47
],
[
49,
51
],
[
53,
55
],
[
57,
59
],
[
63
],
[
67
],
[
69,
71
],
[
73,
75
],
[
77
],
[
81
],
[
83
],
[
85
],
[
87
],
[
89
],
[
91
],
[
97,
99
],
[
101
],
[
103
],
[
105
],
[
107
],
[
109,
111
],
[
113
],
[
115
],
[
117
],
[
119
],
[
121,
123
],
[
125
],
[
127
],
[
129
],
[
131
],
[
135,
137,
139
],
[
141
],
[
143
],
[
145
],
[
147
],
[
149
],
[
151
],
[
153
],
[
155
],
[
157
],
[
159
],
[
163
],
[
165,
167
],
[
169,
171
],
[
175
],
[
177
],
[
179
],
[
181
],
[
183
],
[
185
],
[
187,
189
],
[
191,
193
],
[
195
],
[
197
],
[
199
],
[
201
],
[
203
],
[
205
],
[
207
],
[
209
],
[
211
],
[
213
],
[
215,
217
],
[
219,
221
],
[
223
],
[
225
],
[
227
],
[
229
],
[
231
],
[
233
],
[
235
],
[
237
],
[
239
],
[
241
],
[
243
],
[
245
],
[
247
],
[
249,
251
],
[
253,
255
],
[
257
],
[
259
],
[
261
],
[
263
],
[
265
],
[
267
],
[
269
],
[
271
],
[
273,
275
],
[
277
],
[
279
],
[
281
],
[
283
],
[
285
],
[
287
],
[
289
],
[
291
],
[
293
],
[
295
],
[
297
],
[
299
],
[
301,
303
],
[
305,
307
],
[
309
],
[
311
],
[
315
],
[
317
],
[
319
],
[
321
],
[
323
],
[
325
],
[
327
],
[
329
],
[
331
],
[
333
],
[
335
],
[
337
],
[
339
],
[
341
],
[
343
],
[
345
],
[
347
],
[
349
],
[
351
],
[
353
],
[
355
],
[
357
],
[
359
],
[
361
],
[
363
],
[
365
],
[
367
],
[
371
],
[
373
],
[
375
],
[
377
],
[
381
],
[
383
],
[
385
],
[
387
],
[
389,
391
],
[
393
],
[
395
],
[
397
],
[
399
],
[
401
],
[
403
],
[
405
],
[
407,
409
],
[
411
],
[
413
],
[
415
],
[
417
],
[
419
],
[
421
],
[
425
],
[
427,
429
],
[
431,
433
],
[
437
],
[
439
],
[
441
],
[
443
],
[
445
],
[
447
],
[
449,
451
],
[
453,
455
],
[
457
],
[
459
],
[
461
],
[
463
],
[
465
],
[
467
],
[
469
],
[
471
],
[
473
],
[
475
],
[
477,
479
],
[
481,
483
],
[
485
],
[
487
],
[
489
],
[
491
],
[
493
],
[
495
],
[
497
],
[
499
],
[
501
],
[
503
],
[
505
],
[
507
],
[
509
],
[
511,
513
],
[
515,
517
],
[
519
],
[
521
],
[
523
],
[
525
],
[
527
],
[
529
],
[
531
],
[
533
],
[
535
],
[
537,
539
],
[
541
],
[
543
],
[
545
],
[
547
],
[
549
],
[
551
],
[
553
],
[
555
],
[
557
],
[
559
],
[
561
],
[
563
],
[
565
],
[
567
],
[
569
],
[
571
],
[
573
],
[
579
],
[
581
]
] |
21,314 | static void s390_pcihost_init_as(S390pciState *s)
{
int i;
S390PCIBusDevice *pbdev;
for (i = 0; i < PCI_SLOT_MAX; i++) {
pbdev = &s->pbdev[i];
memory_region_init(&pbdev->mr, OBJECT(s),
"iommu-root-s390", UINT64_MAX);
address_space_init(&pbdev->as, &pbdev->mr, "iommu-pci");
}
memory_region_init_io(&s->msix_notify_mr, OBJECT(s),
&s390_msi_ctrl_ops, s, "msix-s390", UINT64_MAX);
address_space_init(&s->msix_notify_as, &s->msix_notify_mr, "msix-pci");
}
| false | qemu | 67d5cd9722b230027d3d4267ae6069c5d8a65463 | static void s390_pcihost_init_as(S390pciState *s)
{
int i;
S390PCIBusDevice *pbdev;
for (i = 0; i < PCI_SLOT_MAX; i++) {
pbdev = &s->pbdev[i];
memory_region_init(&pbdev->mr, OBJECT(s),
"iommu-root-s390", UINT64_MAX);
address_space_init(&pbdev->as, &pbdev->mr, "iommu-pci");
}
memory_region_init_io(&s->msix_notify_mr, OBJECT(s),
&s390_msi_ctrl_ops, s, "msix-s390", UINT64_MAX);
address_space_init(&s->msix_notify_as, &s->msix_notify_mr, "msix-pci");
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(S390pciState *VAR_0)
{
int VAR_1;
S390PCIBusDevice *pbdev;
for (VAR_1 = 0; VAR_1 < PCI_SLOT_MAX; VAR_1++) {
pbdev = &VAR_0->pbdev[VAR_1];
memory_region_init(&pbdev->mr, OBJECT(VAR_0),
"iommu-root-s390", UINT64_MAX);
address_space_init(&pbdev->as, &pbdev->mr, "iommu-pci");
}
memory_region_init_io(&VAR_0->msix_notify_mr, OBJECT(VAR_0),
&s390_msi_ctrl_ops, VAR_0, "msix-s390", UINT64_MAX);
address_space_init(&VAR_0->msix_notify_as, &VAR_0->msix_notify_mr, "msix-pci");
}
| [
"static void FUNC_0(S390pciState *VAR_0)\n{",
"int VAR_1;",
"S390PCIBusDevice *pbdev;",
"for (VAR_1 = 0; VAR_1 < PCI_SLOT_MAX; VAR_1++) {",
"pbdev = &VAR_0->pbdev[VAR_1];",
"memory_region_init(&pbdev->mr, OBJECT(VAR_0),\n\"iommu-root-s390\", UINT64_MAX);",
"address_space_init(&pbdev->as, &pbdev->mr, \"iommu-pci\");",
"}",
"memory_region_init_io(&VAR_0->msix_notify_mr, OBJECT(VAR_0),\n&s390_msi_ctrl_ops, VAR_0, \"msix-s390\", UINT64_MAX);",
"address_space_init(&VAR_0->msix_notify_as, &VAR_0->msix_notify_mr, \"msix-pci\");",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11
],
[
13
],
[
15,
17
],
[
19
],
[
21
],
[
25,
27
],
[
29
],
[
31
]
] |
21,315 | static uint64_t tmu2_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
MilkymistTMU2State *s = opaque;
uint32_t r = 0;
addr >>= 2;
switch (addr) {
case R_CTL:
case R_HMESHLAST:
case R_VMESHLAST:
case R_BRIGHTNESS:
case R_CHROMAKEY:
case R_VERTICESADDR:
case R_TEXFBUF:
case R_TEXHRES:
case R_TEXVRES:
case R_TEXHMASK:
case R_TEXVMASK:
case R_DSTFBUF:
case R_DSTHRES:
case R_DSTVRES:
case R_DSTHOFFSET:
case R_DSTVOFFSET:
case R_DSTSQUAREW:
case R_DSTSQUAREH:
case R_ALPHA:
r = s->regs[addr];
break;
default:
error_report("milkymist_tmu2: read access to unknown register 0x"
TARGET_FMT_plx, addr << 2);
break;
}
trace_milkymist_tmu2_memory_read(addr << 2, r);
return r;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | static uint64_t tmu2_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
MilkymistTMU2State *s = opaque;
uint32_t r = 0;
addr >>= 2;
switch (addr) {
case R_CTL:
case R_HMESHLAST:
case R_VMESHLAST:
case R_BRIGHTNESS:
case R_CHROMAKEY:
case R_VERTICESADDR:
case R_TEXFBUF:
case R_TEXHRES:
case R_TEXVRES:
case R_TEXHMASK:
case R_TEXVMASK:
case R_DSTFBUF:
case R_DSTHRES:
case R_DSTVRES:
case R_DSTHOFFSET:
case R_DSTVOFFSET:
case R_DSTSQUAREW:
case R_DSTSQUAREH:
case R_ALPHA:
r = s->regs[addr];
break;
default:
error_report("milkymist_tmu2: read access to unknown register 0x"
TARGET_FMT_plx, addr << 2);
break;
}
trace_milkymist_tmu2_memory_read(addr << 2, r);
return r;
}
| {
"code": [],
"line_no": []
} | static uint64_t FUNC_0(void *opaque, target_phys_addr_t addr,
unsigned size)
{
MilkymistTMU2State *s = opaque;
uint32_t r = 0;
addr >>= 2;
switch (addr) {
case R_CTL:
case R_HMESHLAST:
case R_VMESHLAST:
case R_BRIGHTNESS:
case R_CHROMAKEY:
case R_VERTICESADDR:
case R_TEXFBUF:
case R_TEXHRES:
case R_TEXVRES:
case R_TEXHMASK:
case R_TEXVMASK:
case R_DSTFBUF:
case R_DSTHRES:
case R_DSTVRES:
case R_DSTHOFFSET:
case R_DSTVOFFSET:
case R_DSTSQUAREW:
case R_DSTSQUAREH:
case R_ALPHA:
r = s->regs[addr];
break;
default:
error_report("milkymist_tmu2: read access to unknown register 0x"
TARGET_FMT_plx, addr << 2);
break;
}
trace_milkymist_tmu2_memory_read(addr << 2, r);
return r;
}
| [
"static uint64_t FUNC_0(void *opaque, target_phys_addr_t addr,\nunsigned size)\n{",
"MilkymistTMU2State *s = opaque;",
"uint32_t r = 0;",
"addr >>= 2;",
"switch (addr) {",
"case R_CTL:\ncase R_HMESHLAST:\ncase R_VMESHLAST:\ncase R_BRIGHTNESS:\ncase R_CHROMAKEY:\ncase R_VERTICESADDR:\ncase R_TEXFBUF:\ncase R_TEXHRES:\ncase R_TEXVRES:\ncase R_TEXHMASK:\ncase R_TEXVMASK:\ncase R_DSTFBUF:\ncase R_DSTHRES:\ncase R_DSTVRES:\ncase R_DSTHOFFSET:\ncase R_DSTVOFFSET:\ncase R_DSTSQUAREW:\ncase R_DSTSQUAREH:\ncase R_ALPHA:\nr = s->regs[addr];",
"break;",
"default:\nerror_report(\"milkymist_tmu2: read access to unknown register 0x\"\nTARGET_FMT_plx, addr << 2);",
"break;",
"}",
"trace_milkymist_tmu2_memory_read(addr << 2, r);",
"return r;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17,
19,
21,
23,
25,
27,
29,
31,
33,
35,
37,
39,
41,
43,
45,
47,
49,
51,
53,
55
],
[
57
],
[
61,
63,
65
],
[
67
],
[
69
],
[
73
],
[
77
],
[
79
]
] |
21,316 | static void icount_adjust_vm(void *opaque)
{
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
icount_adjust();
}
| false | qemu | b39e3f34c9de7ead6a11a74aa2de78baf41d81a7 | static void icount_adjust_vm(void *opaque)
{
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
icount_adjust();
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0)
{
timer_mod(icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
icount_adjust();
}
| [
"static void FUNC_0(void *VAR_0)\n{",
"timer_mod(icount_vm_timer,\nqemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +\nNANOSECONDS_PER_SECOND / 10);",
"icount_adjust();",
"}"
] | [
0,
0,
0,
0
] | [
[
1,
3
],
[
5,
7,
9
],
[
11
],
[
13
]
] |
21,317 | static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, int *pnum)
{
BDRVQcowState *s = bs->opaque;
int index_in_cluster, n;
uint64_t cluster_offset;
cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
index_in_cluster = sector_num & (s->cluster_sectors - 1);
n = s->cluster_sectors - index_in_cluster;
if (n > nb_sectors)
n = nb_sectors;
*pnum = n;
return (cluster_offset != 0);
}
| false | qemu | f8a2e5e3ca6146d4cc66a4750daf44a0cf043319 | static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, int *pnum)
{
BDRVQcowState *s = bs->opaque;
int index_in_cluster, n;
uint64_t cluster_offset;
cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
index_in_cluster = sector_num & (s->cluster_sectors - 1);
n = s->cluster_sectors - index_in_cluster;
if (n > nb_sectors)
n = nb_sectors;
*pnum = n;
return (cluster_offset != 0);
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(BlockDriverState *VAR_0, int64_t VAR_1,
int VAR_2, int *VAR_3)
{
BDRVQcowState *s = VAR_0->opaque;
int VAR_4, VAR_5;
uint64_t cluster_offset;
cluster_offset = get_cluster_offset(VAR_0, VAR_1 << 9, 0, 0, 0, 0);
VAR_4 = VAR_1 & (s->cluster_sectors - 1);
VAR_5 = s->cluster_sectors - VAR_4;
if (VAR_5 > VAR_2)
VAR_5 = VAR_2;
*VAR_3 = VAR_5;
return (cluster_offset != 0);
}
| [
"static int FUNC_0(BlockDriverState *VAR_0, int64_t VAR_1,\nint VAR_2, int *VAR_3)\n{",
"BDRVQcowState *s = VAR_0->opaque;",
"int VAR_4, VAR_5;",
"uint64_t cluster_offset;",
"cluster_offset = get_cluster_offset(VAR_0, VAR_1 << 9, 0, 0, 0, 0);",
"VAR_4 = VAR_1 & (s->cluster_sectors - 1);",
"VAR_5 = s->cluster_sectors - VAR_4;",
"if (VAR_5 > VAR_2)\nVAR_5 = VAR_2;",
"*VAR_3 = VAR_5;",
"return (cluster_offset != 0);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
21,
23
],
[
25
],
[
27
],
[
29
]
] |
21,318 | static uint64_t mv88w8618_eth_read(void *opaque, target_phys_addr_t offset,
unsigned size)
{
mv88w8618_eth_state *s = opaque;
switch (offset) {
case MP_ETH_SMIR:
if (s->smir & MP_ETH_SMIR_OPCODE) {
switch (s->smir & MP_ETH_SMIR_ADDR) {
case MP_ETH_PHY1_BMSR:
return MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG |
MP_ETH_SMIR_RDVALID;
case MP_ETH_PHY1_PHYSID1:
return (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID;
case MP_ETH_PHY1_PHYSID2:
return (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID;
default:
return MP_ETH_SMIR_RDVALID;
}
}
return 0;
case MP_ETH_ICR:
return s->icr;
case MP_ETH_IMR:
return s->imr;
case MP_ETH_FRDP0 ... MP_ETH_FRDP3:
return s->frx_queue[(offset - MP_ETH_FRDP0)/4];
case MP_ETH_CRDP0 ... MP_ETH_CRDP3:
return s->rx_queue[(offset - MP_ETH_CRDP0)/4];
case MP_ETH_CTDP0 ... MP_ETH_CTDP3:
return s->tx_queue[(offset - MP_ETH_CTDP0)/4];
default:
return 0;
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | static uint64_t mv88w8618_eth_read(void *opaque, target_phys_addr_t offset,
unsigned size)
{
mv88w8618_eth_state *s = opaque;
switch (offset) {
case MP_ETH_SMIR:
if (s->smir & MP_ETH_SMIR_OPCODE) {
switch (s->smir & MP_ETH_SMIR_ADDR) {
case MP_ETH_PHY1_BMSR:
return MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG |
MP_ETH_SMIR_RDVALID;
case MP_ETH_PHY1_PHYSID1:
return (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID;
case MP_ETH_PHY1_PHYSID2:
return (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID;
default:
return MP_ETH_SMIR_RDVALID;
}
}
return 0;
case MP_ETH_ICR:
return s->icr;
case MP_ETH_IMR:
return s->imr;
case MP_ETH_FRDP0 ... MP_ETH_FRDP3:
return s->frx_queue[(offset - MP_ETH_FRDP0)/4];
case MP_ETH_CRDP0 ... MP_ETH_CRDP3:
return s->rx_queue[(offset - MP_ETH_CRDP0)/4];
case MP_ETH_CTDP0 ... MP_ETH_CTDP3:
return s->tx_queue[(offset - MP_ETH_CTDP0)/4];
default:
return 0;
}
}
| {
"code": [],
"line_no": []
} | static uint64_t FUNC_0(void *opaque, target_phys_addr_t offset,
unsigned size)
{
mv88w8618_eth_state *s = opaque;
switch (offset) {
case MP_ETH_SMIR:
if (s->smir & MP_ETH_SMIR_OPCODE) {
switch (s->smir & MP_ETH_SMIR_ADDR) {
case MP_ETH_PHY1_BMSR:
return MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG |
MP_ETH_SMIR_RDVALID;
case MP_ETH_PHY1_PHYSID1:
return (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID;
case MP_ETH_PHY1_PHYSID2:
return (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID;
default:
return MP_ETH_SMIR_RDVALID;
}
}
return 0;
case MP_ETH_ICR:
return s->icr;
case MP_ETH_IMR:
return s->imr;
case MP_ETH_FRDP0 ... MP_ETH_FRDP3:
return s->frx_queue[(offset - MP_ETH_FRDP0)/4];
case MP_ETH_CRDP0 ... MP_ETH_CRDP3:
return s->rx_queue[(offset - MP_ETH_CRDP0)/4];
case MP_ETH_CTDP0 ... MP_ETH_CTDP3:
return s->tx_queue[(offset - MP_ETH_CTDP0)/4];
default:
return 0;
}
}
| [
"static uint64_t FUNC_0(void *opaque, target_phys_addr_t offset,\nunsigned size)\n{",
"mv88w8618_eth_state *s = opaque;",
"switch (offset) {",
"case MP_ETH_SMIR:\nif (s->smir & MP_ETH_SMIR_OPCODE) {",
"switch (s->smir & MP_ETH_SMIR_ADDR) {",
"case MP_ETH_PHY1_BMSR:\nreturn MP_PHY_BMSR_LINK | MP_PHY_BMSR_AUTONEG |\nMP_ETH_SMIR_RDVALID;",
"case MP_ETH_PHY1_PHYSID1:\nreturn (MP_PHY_88E3015 >> 16) | MP_ETH_SMIR_RDVALID;",
"case MP_ETH_PHY1_PHYSID2:\nreturn (MP_PHY_88E3015 & 0xFFFF) | MP_ETH_SMIR_RDVALID;",
"default:\nreturn MP_ETH_SMIR_RDVALID;",
"}",
"}",
"return 0;",
"case MP_ETH_ICR:\nreturn s->icr;",
"case MP_ETH_IMR:\nreturn s->imr;",
"case MP_ETH_FRDP0 ... MP_ETH_FRDP3:\nreturn s->frx_queue[(offset - MP_ETH_FRDP0)/4];",
"case MP_ETH_CRDP0 ... MP_ETH_CRDP3:\nreturn s->rx_queue[(offset - MP_ETH_CRDP0)/4];",
"case MP_ETH_CTDP0 ... MP_ETH_CTDP3:\nreturn s->tx_queue[(offset - MP_ETH_CTDP0)/4];",
"default:\nreturn 0;",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
11
],
[
13,
15
],
[
17
],
[
19,
21,
23
],
[
25,
27
],
[
29,
31
],
[
33,
35
],
[
37
],
[
39
],
[
41
],
[
45,
47
],
[
51,
53
],
[
57,
59
],
[
63,
65
],
[
69,
71
],
[
75,
77
],
[
79
],
[
81
]
] |
21,320 | static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
{
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
BlockdevBackup *backup;
BlockBackend *blk, *target;
Error *local_err = NULL;
assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
backup = common->action->u.blockdev_backup;
blk = blk_by_name(backup->device);
if (!blk) {
error_setg(errp, "Device '%s' not found", backup->device);
return;
}
if (!blk_is_available(blk)) {
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
return;
}
target = blk_by_name(backup->target);
if (!target) {
error_setg(errp, "Device '%s' not found", backup->target);
return;
}
/* AioContext is released in .clean() */
state->aio_context = blk_get_aio_context(blk);
if (state->aio_context != blk_get_aio_context(target)) {
state->aio_context = NULL;
error_setg(errp, "Backup between two IO threads is not implemented");
return;
}
aio_context_acquire(state->aio_context);
state->bs = blk_bs(blk);
bdrv_drained_begin(state->bs);
do_blockdev_backup(backup->device, backup->target,
backup->sync,
backup->has_speed, backup->speed,
backup->has_on_source_error, backup->on_source_error,
backup->has_on_target_error, backup->on_target_error,
common->block_job_txn, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
state->job = state->bs->job;
}
| false | qemu | 32bafa8fdd098d52fbf1102d5a5e48d29398c0aa | static void blockdev_backup_prepare(BlkActionState *common, Error **errp)
{
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common);
BlockdevBackup *backup;
BlockBackend *blk, *target;
Error *local_err = NULL;
assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
backup = common->action->u.blockdev_backup;
blk = blk_by_name(backup->device);
if (!blk) {
error_setg(errp, "Device '%s' not found", backup->device);
return;
}
if (!blk_is_available(blk)) {
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
return;
}
target = blk_by_name(backup->target);
if (!target) {
error_setg(errp, "Device '%s' not found", backup->target);
return;
}
state->aio_context = blk_get_aio_context(blk);
if (state->aio_context != blk_get_aio_context(target)) {
state->aio_context = NULL;
error_setg(errp, "Backup between two IO threads is not implemented");
return;
}
aio_context_acquire(state->aio_context);
state->bs = blk_bs(blk);
bdrv_drained_begin(state->bs);
do_blockdev_backup(backup->device, backup->target,
backup->sync,
backup->has_speed, backup->speed,
backup->has_on_source_error, backup->on_source_error,
backup->has_on_target_error, backup->on_target_error,
common->block_job_txn, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
state->job = state->bs->job;
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(BlkActionState *VAR_0, Error **VAR_1)
{
BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, VAR_0, VAR_0);
BlockdevBackup *backup;
BlockBackend *blk, *target;
Error *local_err = NULL;
assert(VAR_0->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);
backup = VAR_0->action->u.blockdev_backup;
blk = blk_by_name(backup->device);
if (!blk) {
error_setg(VAR_1, "Device '%s' not found", backup->device);
return;
}
if (!blk_is_available(blk)) {
error_setg(VAR_1, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);
return;
}
target = blk_by_name(backup->target);
if (!target) {
error_setg(VAR_1, "Device '%s' not found", backup->target);
return;
}
state->aio_context = blk_get_aio_context(blk);
if (state->aio_context != blk_get_aio_context(target)) {
state->aio_context = NULL;
error_setg(VAR_1, "Backup between two IO threads is not implemented");
return;
}
aio_context_acquire(state->aio_context);
state->bs = blk_bs(blk);
bdrv_drained_begin(state->bs);
do_blockdev_backup(backup->device, backup->target,
backup->sync,
backup->has_speed, backup->speed,
backup->has_on_source_error, backup->on_source_error,
backup->has_on_target_error, backup->on_target_error,
VAR_0->block_job_txn, &local_err);
if (local_err) {
error_propagate(VAR_1, local_err);
return;
}
state->job = state->bs->job;
}
| [
"static void FUNC_0(BlkActionState *VAR_0, Error **VAR_1)\n{",
"BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, VAR_0, VAR_0);",
"BlockdevBackup *backup;",
"BlockBackend *blk, *target;",
"Error *local_err = NULL;",
"assert(VAR_0->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP);",
"backup = VAR_0->action->u.blockdev_backup;",
"blk = blk_by_name(backup->device);",
"if (!blk) {",
"error_setg(VAR_1, \"Device '%s' not found\", backup->device);",
"return;",
"}",
"if (!blk_is_available(blk)) {",
"error_setg(VAR_1, QERR_DEVICE_HAS_NO_MEDIUM, backup->device);",
"return;",
"}",
"target = blk_by_name(backup->target);",
"if (!target) {",
"error_setg(VAR_1, \"Device '%s' not found\", backup->target);",
"return;",
"}",
"state->aio_context = blk_get_aio_context(blk);",
"if (state->aio_context != blk_get_aio_context(target)) {",
"state->aio_context = NULL;",
"error_setg(VAR_1, \"Backup between two IO threads is not implemented\");",
"return;",
"}",
"aio_context_acquire(state->aio_context);",
"state->bs = blk_bs(blk);",
"bdrv_drained_begin(state->bs);",
"do_blockdev_backup(backup->device, backup->target,\nbackup->sync,\nbackup->has_speed, backup->speed,\nbackup->has_on_source_error, backup->on_source_error,\nbackup->has_on_target_error, backup->on_target_error,\nVAR_0->block_job_txn, &local_err);",
"if (local_err) {",
"error_propagate(VAR_1, local_err);",
"return;",
"}",
"state->job = state->bs->job;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
33
],
[
35
],
[
37
],
[
39
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
77,
79,
81,
83,
85,
87
],
[
89
],
[
91
],
[
93
],
[
95
],
[
99
],
[
101
]
] |
21,321 | static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
{
/* We're emulating OSF/1 PALcode. Many of these are trivial access
to internal cpu registers. */
/* Unprivileged PAL call */
if (palcode >= 0x80 && palcode < 0xC0) {
switch (palcode) {
case 0x86:
/* IMB */
/* No-op inside QEMU. */
break;
case 0x9E:
/* RDUNIQUE */
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
break;
case 0x9F:
/* WRUNIQUE */
tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
break;
default:
return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
}
return NO_EXIT;
}
#ifndef CONFIG_USER_ONLY
/* Privileged PAL code */
if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
switch (palcode) {
case 0x01:
/* CFLUSH */
/* No-op inside QEMU. */
break;
case 0x02:
/* DRAINA */
/* No-op inside QEMU. */
break;
case 0x2D:
/* WRVPTPTR */
tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
break;
case 0x31:
/* WRVAL */
tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
break;
case 0x32:
/* RDVAL */
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
break;
case 0x35: {
/* SWPIPL */
TCGv tmp;
/* Note that we already know we're in kernel mode, so we know
that PS only contains the 3 IPL bits. */
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
/* But make sure and store only the 3 IPL bits from the user. */
tmp = tcg_temp_new();
tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
tcg_temp_free(tmp);
break;
}
case 0x36:
/* RDPS */
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
break;
case 0x38:
/* WRUSP */
tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
break;
case 0x3A:
/* RDUSP */
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
break;
case 0x3C:
/* WHAMI */
tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break;
default:
return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
}
return NO_EXIT;
}
#endif
return gen_invalid(ctx);
}
| false | qemu | ba96394e20ad033a10eb790fdf2377e2a8892feb | static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
{
if (palcode >= 0x80 && palcode < 0xC0) {
switch (palcode) {
case 0x86:
break;
case 0x9E:
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
break;
case 0x9F:
tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
break;
default:
return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
}
return NO_EXIT;
}
#ifndef CONFIG_USER_ONLY
if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
switch (palcode) {
case 0x01:
break;
case 0x02:
break;
case 0x2D:
tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
break;
case 0x31:
tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
break;
case 0x32:
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
break;
case 0x35: {
TCGv tmp;
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
tmp = tcg_temp_new();
tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
tcg_temp_free(tmp);
break;
}
case 0x36:
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
break;
case 0x38:
tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
break;
case 0x3A:
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
break;
case 0x3C:
tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break;
default:
return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
}
return NO_EXIT;
}
#endif
return gen_invalid(ctx);
}
| {
"code": [],
"line_no": []
} | static ExitStatus FUNC_0(DisasContext *ctx, int palcode)
{
if (palcode >= 0x80 && palcode < 0xC0) {
switch (palcode) {
case 0x86:
break;
case 0x9E:
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);
break;
case 0x9F:
tcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);
break;
default:
return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);
}
return NO_EXIT;
}
#ifndef CONFIG_USER_ONLY
if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
switch (palcode) {
case 0x01:
break;
case 0x02:
break;
case 0x2D:
tcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));
break;
case 0x31:
tcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);
break;
case 0x32:
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);
break;
case 0x35: {
TCGv tmp;
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
tmp = tcg_temp_new();
tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);
tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
tcg_temp_free(tmp);
break;
}
case 0x36:
tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));
break;
case 0x38:
tcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);
break;
case 0x3A:
tcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);
break;
case 0x3C:
tcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,
-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
break;
default:
return gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);
}
return NO_EXIT;
}
#endif
return gen_invalid(ctx);
}
| [
"static ExitStatus FUNC_0(DisasContext *ctx, int palcode)\n{",
"if (palcode >= 0x80 && palcode < 0xC0) {",
"switch (palcode) {",
"case 0x86:\nbreak;",
"case 0x9E:\ntcg_gen_mov_i64(cpu_ir[IR_V0], cpu_unique);",
"break;",
"case 0x9F:\ntcg_gen_mov_i64(cpu_unique, cpu_ir[IR_A0]);",
"break;",
"default:\nreturn gen_excp(ctx, EXCP_CALL_PAL, palcode & 0xbf);",
"}",
"return NO_EXIT;",
"}",
"#ifndef CONFIG_USER_ONLY\nif (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {",
"switch (palcode) {",
"case 0x01:\nbreak;",
"case 0x02:\nbreak;",
"case 0x2D:\ntcg_gen_st_i64(cpu_ir[IR_A0], cpu_env, offsetof(CPUAlphaState, vptptr));",
"break;",
"case 0x31:\ntcg_gen_mov_i64(cpu_sysval, cpu_ir[IR_A0]);",
"break;",
"case 0x32:\ntcg_gen_mov_i64(cpu_ir[IR_V0], cpu_sysval);",
"break;",
"case 0x35: {",
"TCGv tmp;",
"tcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));",
"tmp = tcg_temp_new();",
"tcg_gen_andi_i64(tmp, cpu_ir[IR_A0], PS_INT_MASK);",
"tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));",
"tcg_temp_free(tmp);",
"break;",
"}",
"case 0x36:\ntcg_gen_ld8u_i64(cpu_ir[IR_V0], cpu_env, offsetof(CPUAlphaState, ps));",
"break;",
"case 0x38:\ntcg_gen_mov_i64(cpu_usp, cpu_ir[IR_A0]);",
"break;",
"case 0x3A:\ntcg_gen_mov_i64(cpu_ir[IR_V0], cpu_usp);",
"break;",
"case 0x3C:\ntcg_gen_ld32s_i64(cpu_ir[IR_V0], cpu_env,\n-offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));",
"break;",
"default:\nreturn gen_excp(ctx, EXCP_CALL_PAL, palcode & 0x3f);",
"}",
"return NO_EXIT;",
"}",
"#endif\nreturn gen_invalid(ctx);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
13
],
[
15
],
[
17,
23
],
[
25,
29
],
[
31
],
[
33,
37
],
[
39
],
[
41,
43
],
[
45
],
[
47
],
[
49
],
[
53,
57
],
[
59
],
[
61,
67
],
[
69,
75
],
[
77,
81
],
[
83
],
[
85,
89
],
[
91
],
[
93,
97
],
[
99
],
[
103
],
[
107
],
[
115
],
[
121
],
[
123
],
[
125
],
[
127
],
[
129
],
[
131
],
[
135,
139
],
[
141
],
[
143,
147
],
[
149
],
[
151,
155
],
[
157
],
[
159,
163,
165
],
[
167
],
[
171,
173
],
[
175
],
[
177
],
[
179
],
[
181,
185
],
[
187
]
] |
21,322 | static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg)
{
if (const_arg) {
assert(const_arg == 1);
tcg_out8(s, TCG_CONST);
tcg_out32(s, arg);
} else {
tcg_out_r(s, arg);
}
}
| false | qemu | eabb7b91b36b202b4dac2df2d59d698e3aff197a | static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg)
{
if (const_arg) {
assert(const_arg == 1);
tcg_out8(s, TCG_CONST);
tcg_out32(s, arg);
} else {
tcg_out_r(s, arg);
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(TCGContext *VAR_0, int VAR_1, TCGArg VAR_2)
{
if (VAR_1) {
assert(VAR_1 == 1);
tcg_out8(VAR_0, TCG_CONST);
tcg_out32(VAR_0, VAR_2);
} else {
tcg_out_r(VAR_0, VAR_2);
}
}
| [
"static void FUNC_0(TCGContext *VAR_0, int VAR_1, TCGArg VAR_2)\n{",
"if (VAR_1) {",
"assert(VAR_1 == 1);",
"tcg_out8(VAR_0, TCG_CONST);",
"tcg_out32(VAR_0, VAR_2);",
"} else {",
"tcg_out_r(VAR_0, VAR_2);",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
]
] |
21,323 | static void hybrid6_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int len)
{
int i, j, ssb;
int N = 8;
float temp[8][2];
for (i = 0; i < len; i++, in++) {
for (ssb = 0; ssb < N; ssb++) {
float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1];
for (j = 0; j < 6; j++) {
float in0_re = in[j][0];
float in0_im = in[j][1];
float in1_re = in[12-j][0];
float in1_im = in[12-j][1];
sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im);
sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re);
}
temp[ssb][0] = sum_re;
temp[ssb][1] = sum_im;
}
out[0][i][0] = temp[6][0];
out[0][i][1] = temp[6][1];
out[1][i][0] = temp[7][0];
out[1][i][1] = temp[7][1];
out[2][i][0] = temp[0][0];
out[2][i][1] = temp[0][1];
out[3][i][0] = temp[1][0];
out[3][i][1] = temp[1][1];
out[4][i][0] = temp[2][0] + temp[5][0];
out[4][i][1] = temp[2][1] + temp[5][1];
out[5][i][0] = temp[3][0] + temp[4][0];
out[5][i][1] = temp[3][1] + temp[4][1];
}
}
| false | FFmpeg | bf1945af301aff54c33352e75f17aec6cb5269d7 | static void hybrid6_cx(float (*in)[2], float (*out)[32][2], const float (*filter)[7][2], int len)
{
int i, j, ssb;
int N = 8;
float temp[8][2];
for (i = 0; i < len; i++, in++) {
for (ssb = 0; ssb < N; ssb++) {
float sum_re = filter[ssb][6][0] * in[6][0], sum_im = filter[ssb][6][0] * in[6][1];
for (j = 0; j < 6; j++) {
float in0_re = in[j][0];
float in0_im = in[j][1];
float in1_re = in[12-j][0];
float in1_im = in[12-j][1];
sum_re += filter[ssb][j][0] * (in0_re + in1_re) - filter[ssb][j][1] * (in0_im - in1_im);
sum_im += filter[ssb][j][0] * (in0_im + in1_im) + filter[ssb][j][1] * (in0_re - in1_re);
}
temp[ssb][0] = sum_re;
temp[ssb][1] = sum_im;
}
out[0][i][0] = temp[6][0];
out[0][i][1] = temp[6][1];
out[1][i][0] = temp[7][0];
out[1][i][1] = temp[7][1];
out[2][i][0] = temp[0][0];
out[2][i][1] = temp[0][1];
out[3][i][0] = temp[1][0];
out[3][i][1] = temp[1][1];
out[4][i][0] = temp[2][0] + temp[5][0];
out[4][i][1] = temp[2][1] + temp[5][1];
out[5][i][0] = temp[3][0] + temp[4][0];
out[5][i][1] = temp[3][1] + temp[4][1];
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(float (*VAR_0)[2], float (*VAR_1)[32][2], const float (*VAR_2)[7][2], int VAR_3)
{
int VAR_4, VAR_5, VAR_6;
int VAR_7 = 8;
float VAR_8[8][2];
for (VAR_4 = 0; VAR_4 < VAR_3; VAR_4++, VAR_0++) {
for (VAR_6 = 0; VAR_6 < VAR_7; VAR_6++) {
float VAR_9 = VAR_2[VAR_6][6][0] * VAR_0[6][0], VAR_10 = VAR_2[VAR_6][6][0] * VAR_0[6][1];
for (VAR_5 = 0; VAR_5 < 6; VAR_5++) {
float VAR_11 = VAR_0[VAR_5][0];
float VAR_12 = VAR_0[VAR_5][1];
float VAR_13 = VAR_0[12-VAR_5][0];
float VAR_14 = VAR_0[12-VAR_5][1];
VAR_9 += VAR_2[VAR_6][VAR_5][0] * (VAR_11 + VAR_13) - VAR_2[VAR_6][VAR_5][1] * (VAR_12 - VAR_14);
VAR_10 += VAR_2[VAR_6][VAR_5][0] * (VAR_12 + VAR_14) + VAR_2[VAR_6][VAR_5][1] * (VAR_11 - VAR_13);
}
VAR_8[VAR_6][0] = VAR_9;
VAR_8[VAR_6][1] = VAR_10;
}
VAR_1[0][VAR_4][0] = VAR_8[6][0];
VAR_1[0][VAR_4][1] = VAR_8[6][1];
VAR_1[1][VAR_4][0] = VAR_8[7][0];
VAR_1[1][VAR_4][1] = VAR_8[7][1];
VAR_1[2][VAR_4][0] = VAR_8[0][0];
VAR_1[2][VAR_4][1] = VAR_8[0][1];
VAR_1[3][VAR_4][0] = VAR_8[1][0];
VAR_1[3][VAR_4][1] = VAR_8[1][1];
VAR_1[4][VAR_4][0] = VAR_8[2][0] + VAR_8[5][0];
VAR_1[4][VAR_4][1] = VAR_8[2][1] + VAR_8[5][1];
VAR_1[5][VAR_4][0] = VAR_8[3][0] + VAR_8[4][0];
VAR_1[5][VAR_4][1] = VAR_8[3][1] + VAR_8[4][1];
}
}
| [
"static void FUNC_0(float (*VAR_0)[2], float (*VAR_1)[32][2], const float (*VAR_2)[7][2], int VAR_3)\n{",
"int VAR_4, VAR_5, VAR_6;",
"int VAR_7 = 8;",
"float VAR_8[8][2];",
"for (VAR_4 = 0; VAR_4 < VAR_3; VAR_4++, VAR_0++) {",
"for (VAR_6 = 0; VAR_6 < VAR_7; VAR_6++) {",
"float VAR_9 = VAR_2[VAR_6][6][0] * VAR_0[6][0], VAR_10 = VAR_2[VAR_6][6][0] * VAR_0[6][1];",
"for (VAR_5 = 0; VAR_5 < 6; VAR_5++) {",
"float VAR_11 = VAR_0[VAR_5][0];",
"float VAR_12 = VAR_0[VAR_5][1];",
"float VAR_13 = VAR_0[12-VAR_5][0];",
"float VAR_14 = VAR_0[12-VAR_5][1];",
"VAR_9 += VAR_2[VAR_6][VAR_5][0] * (VAR_11 + VAR_13) - VAR_2[VAR_6][VAR_5][1] * (VAR_12 - VAR_14);",
"VAR_10 += VAR_2[VAR_6][VAR_5][0] * (VAR_12 + VAR_14) + VAR_2[VAR_6][VAR_5][1] * (VAR_11 - VAR_13);",
"}",
"VAR_8[VAR_6][0] = VAR_9;",
"VAR_8[VAR_6][1] = VAR_10;",
"}",
"VAR_1[0][VAR_4][0] = VAR_8[6][0];",
"VAR_1[0][VAR_4][1] = VAR_8[6][1];",
"VAR_1[1][VAR_4][0] = VAR_8[7][0];",
"VAR_1[1][VAR_4][1] = VAR_8[7][1];",
"VAR_1[2][VAR_4][0] = VAR_8[0][0];",
"VAR_1[2][VAR_4][1] = VAR_8[0][1];",
"VAR_1[3][VAR_4][0] = VAR_8[1][0];",
"VAR_1[3][VAR_4][1] = VAR_8[1][1];",
"VAR_1[4][VAR_4][0] = VAR_8[2][0] + VAR_8[5][0];",
"VAR_1[4][VAR_4][1] = VAR_8[2][1] + VAR_8[5][1];",
"VAR_1[5][VAR_4][0] = VAR_8[3][0] + VAR_8[4][0];",
"VAR_1[5][VAR_4][1] = VAR_8[3][1] + VAR_8[4][1];",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
67
]
] |
21,325 | static int qsort_strcmp(const void *a, const void *b)
{
return strcmp(a, b);
}
| false | qemu | 61007b316cd71ee7333ff7a0a749a8949527575f | static int qsort_strcmp(const void *a, const void *b)
{
return strcmp(a, b);
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(const void *VAR_0, const void *VAR_1)
{
return strcmp(VAR_0, VAR_1);
}
| [
"static int FUNC_0(const void *VAR_0, const void *VAR_1)\n{",
"return strcmp(VAR_0, VAR_1);",
"}"
] | [
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
]
] |
21,326 | static uint32_t mb_add_cmdline(MultibootState *s, const char *cmdline)
{
target_phys_addr_t p = s->offset_cmdlines;
char *b = (char *)s->mb_buf + p;
get_opt_value(b, strlen(cmdline) + 1, cmdline);
s->offset_cmdlines += strlen(b) + 1;
return s->mb_buf_phys + p;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | static uint32_t mb_add_cmdline(MultibootState *s, const char *cmdline)
{
target_phys_addr_t p = s->offset_cmdlines;
char *b = (char *)s->mb_buf + p;
get_opt_value(b, strlen(cmdline) + 1, cmdline);
s->offset_cmdlines += strlen(b) + 1;
return s->mb_buf_phys + p;
}
| {
"code": [],
"line_no": []
} | static uint32_t FUNC_0(MultibootState *s, const char *cmdline)
{
target_phys_addr_t p = s->offset_cmdlines;
char *VAR_0 = (char *)s->mb_buf + p;
get_opt_value(VAR_0, strlen(cmdline) + 1, cmdline);
s->offset_cmdlines += strlen(VAR_0) + 1;
return s->mb_buf_phys + p;
}
| [
"static uint32_t FUNC_0(MultibootState *s, const char *cmdline)\n{",
"target_phys_addr_t p = s->offset_cmdlines;",
"char *VAR_0 = (char *)s->mb_buf + p;",
"get_opt_value(VAR_0, strlen(cmdline) + 1, cmdline);",
"s->offset_cmdlines += strlen(VAR_0) + 1;",
"return s->mb_buf_phys + p;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11
],
[
13
],
[
15
],
[
17
]
] |
21,327 | static void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc,
const int16_t *chrUSrc, const int16_t *chrVSrc,
const int16_t *alpSrc,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
uint8_t *aDest, int dstW, int chrDstW)
{
int p= 4;
const int16_t *src[4]= { alpSrc + dstW, lumSrc + dstW, chrUSrc + chrDstW, chrVSrc + chrDstW };
uint8_t *dst[4]= { aDest, dest, uDest, vDest };
x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW };
while (p--) {
if (dst[p]) {
__asm__ volatile(
"mov %2, %%"REG_a" \n\t"
".p2align 4 \n\t" /* FIXME Unroll? */
"1: \n\t"
"movq (%0, %%"REG_a", 2), %%mm0 \n\t"
"movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"
"psraw $7, %%mm0 \n\t"
"psraw $7, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
MOVNTQ(%%mm0, (%1, %%REGa))
"add $8, %%"REG_a" \n\t"
"jnc 1b \n\t"
:: "r" (src[p]), "r" (dst[p] + counter[p]),
"g" (-counter[p])
: "%"REG_a
);
}
}
}
| false | FFmpeg | 13a099799e89a76eb921ca452e1b04a7a28a9855 | static void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc,
const int16_t *chrUSrc, const int16_t *chrVSrc,
const int16_t *alpSrc,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
uint8_t *aDest, int dstW, int chrDstW)
{
int p= 4;
const int16_t *src[4]= { alpSrc + dstW, lumSrc + dstW, chrUSrc + chrDstW, chrVSrc + chrDstW };
uint8_t *dst[4]= { aDest, dest, uDest, vDest };
x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW };
while (p--) {
if (dst[p]) {
__asm__ volatile(
"mov %2, %%"REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%0, %%"REG_a", 2), %%mm0 \n\t"
"movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"
"psraw $7, %%mm0 \n\t"
"psraw $7, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
MOVNTQ(%%mm0, (%1, %%REGa))
"add $8, %%"REG_a" \n\t"
"jnc 1b \n\t"
:: "r" (src[p]), "r" (dst[p] + counter[p]),
"g" (-counter[p])
: "%"REG_a
);
}
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc,
const int16_t *chrUSrc, const int16_t *chrVSrc,
const int16_t *alpSrc,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
uint8_t *aDest, int dstW, int chrDstW)
{
int VAR_0= 4;
const int16_t *VAR_1[4]= { alpSrc + dstW, lumSrc + dstW, chrUSrc + chrDstW, chrVSrc + chrDstW };
uint8_t *dst[4]= { aDest, dest, uDest, vDest };
x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW };
while (VAR_0--) {
if (dst[VAR_0]) {
__asm__ volatile(
"mov %2, %%"REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%0, %%"REG_a", 2), %%mm0 \n\t"
"movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"
"psraw $7, %%mm0 \n\t"
"psraw $7, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
MOVNTQ(%%mm0, (%1, %%REGa))
"add $8, %%"REG_a" \n\t"
"jnc 1b \n\t"
:: "r" (VAR_1[VAR_0]), "r" (dst[VAR_0] + counter[VAR_0]),
"g" (-counter[VAR_0])
: "%"REG_a
);
}
}
}
| [
"static void FUNC_0(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc,\nconst int16_t *chrUSrc, const int16_t *chrVSrc,\nconst int16_t *alpSrc,\nuint8_t *dest, uint8_t *uDest, uint8_t *vDest,\nuint8_t *aDest, int dstW, int chrDstW)\n{",
"int VAR_0= 4;",
"const int16_t *VAR_1[4]= { alpSrc + dstW, lumSrc + dstW, chrUSrc + chrDstW, chrVSrc + chrDstW };",
"uint8_t *dst[4]= { aDest, dest, uDest, vDest };",
"x86_reg counter[4]= { dstW, dstW, chrDstW, chrDstW };",
"while (VAR_0--) {",
"if (dst[VAR_0]) {",
"__asm__ volatile(\n\"mov %2, %%\"REG_a\" \\n\\t\"\n\".p2align 4 \\n\\t\"\n\"1: \\n\\t\"\n\"movq (%0, %%\"REG_a\", 2), %%mm0 \\n\\t\"\n\"movq 8(%0, %%\"REG_a\", 2), %%mm1 \\n\\t\"\n\"psraw $7, %%mm0 \\n\\t\"\n\"psraw $7, %%mm1 \\n\\t\"\n\"packuswb %%mm1, %%mm0 \\n\\t\"\nMOVNTQ(%%mm0, (%1, %%REGa))\n\"add $8, %%\"REG_a\" \\n\\t\"\n\"jnc 1b \\n\\t\"\n:: \"r\" (VAR_1[VAR_0]), \"r\" (dst[VAR_0] + counter[VAR_0]),\n\"g\" (-counter[VAR_0])\n: \"%\"REG_a\n);",
"}",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7,
9,
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25
],
[
27,
29,
31,
33,
35,
37,
39,
41,
43,
45,
47,
49,
51,
53,
55,
57
],
[
59
],
[
61
],
[
63
]
] |
21,328 | static int vp3_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
const uint8_t *buf, int buf_size)
{
Vp3DecodeContext *s = avctx->priv_data;
GetBitContext gb;
static int counter = 0;
int i;
init_get_bits(&gb, buf, buf_size * 8);
if (s->theora && get_bits1(&gb))
{
av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
return -1;
}
s->keyframe = !get_bits1(&gb);
if (!s->theora)
skip_bits(&gb, 1);
s->last_quality_index = s->quality_index;
s->nqis=0;
do{
s->qis[s->nqis++]= get_bits(&gb, 6);
} while(s->theora >= 0x030200 && s->nqis<3 && get_bits1(&gb));
s->quality_index= s->qis[0];
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
s->keyframe?"key":"", counter, s->quality_index);
counter++;
if (s->quality_index != s->last_quality_index) {
init_dequantizer(s);
init_loop_filter(s);
}
if (s->keyframe) {
if (!s->theora)
{
skip_bits(&gb, 4); /* width code */
skip_bits(&gb, 4); /* height code */
if (s->version)
{
s->version = get_bits(&gb, 5);
if (counter == 1)
av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
}
}
if (s->version || s->theora)
{
if (get_bits1(&gb))
av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
skip_bits(&gb, 2); /* reserved? */
}
if (s->last_frame.data[0] == s->golden_frame.data[0]) {
if (s->golden_frame.data[0])
avctx->release_buffer(avctx, &s->golden_frame);
s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
} else {
if (s->golden_frame.data[0])
avctx->release_buffer(avctx, &s->golden_frame);
if (s->last_frame.data[0])
avctx->release_buffer(avctx, &s->last_frame);
}
s->golden_frame.reference = 3;
if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
return -1;
}
/* golden frame is also the current frame */
s->current_frame= s->golden_frame;
/* time to figure out pixel addresses? */
if (!s->pixel_addresses_inited)
{
if (!s->flipped_image)
vp3_calculate_pixel_addresses(s);
else
theora_calculate_pixel_addresses(s);
s->pixel_addresses_inited = 1;
}
} else {
/* allocate a new current frame */
s->current_frame.reference = 3;
if (!s->pixel_addresses_inited) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n");
return -1;
}
if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
return -1;
}
}
s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
s->current_frame.qstride= 0;
{START_TIMER
init_frame(s, &gb);
STOP_TIMER("init_frame")}
#if KEYFRAMES_ONLY
if (!s->keyframe) {
memcpy(s->current_frame.data[0], s->golden_frame.data[0],
s->current_frame.linesize[0] * s->height);
memcpy(s->current_frame.data[1], s->golden_frame.data[1],
s->current_frame.linesize[1] * s->height / 2);
memcpy(s->current_frame.data[2], s->golden_frame.data[2],
s->current_frame.linesize[2] * s->height / 2);
} else {
#endif
{START_TIMER
if (unpack_superblocks(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
return -1;
}
STOP_TIMER("unpack_superblocks")}
{START_TIMER
if (unpack_modes(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
return -1;
}
STOP_TIMER("unpack_modes")}
{START_TIMER
if (unpack_vectors(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
return -1;
}
STOP_TIMER("unpack_vectors")}
{START_TIMER
if (unpack_dct_coeffs(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
return -1;
}
STOP_TIMER("unpack_dct_coeffs")}
{START_TIMER
reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
if ((avctx->flags & CODEC_FLAG_GRAY) == 0) {
reverse_dc_prediction(s, s->fragment_start[1],
s->fragment_width / 2, s->fragment_height / 2);
reverse_dc_prediction(s, s->fragment_start[2],
s->fragment_width / 2, s->fragment_height / 2);
}
STOP_TIMER("reverse_dc_prediction")}
{START_TIMER
for (i = 0; i < s->macroblock_height; i++)
render_slice(s, i);
STOP_TIMER("render_fragments")}
{START_TIMER
apply_loop_filter(s);
STOP_TIMER("apply_loop_filter")}
#if KEYFRAMES_ONLY
}
#endif
*data_size=sizeof(AVFrame);
*(AVFrame*)data= s->current_frame;
/* release the last frame, if it is allocated and if it is not the
* golden frame */
if ((s->last_frame.data[0]) &&
(s->last_frame.data[0] != s->golden_frame.data[0]))
avctx->release_buffer(avctx, &s->last_frame);
/* shuffle frames (last = current) */
s->last_frame= s->current_frame;
s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
return buf_size;
}
| false | FFmpeg | 5e53486545726987ab4482321d4dcf7e23e7652f | static int vp3_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
const uint8_t *buf, int buf_size)
{
Vp3DecodeContext *s = avctx->priv_data;
GetBitContext gb;
static int counter = 0;
int i;
init_get_bits(&gb, buf, buf_size * 8);
if (s->theora && get_bits1(&gb))
{
av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
return -1;
}
s->keyframe = !get_bits1(&gb);
if (!s->theora)
skip_bits(&gb, 1);
s->last_quality_index = s->quality_index;
s->nqis=0;
do{
s->qis[s->nqis++]= get_bits(&gb, 6);
} while(s->theora >= 0x030200 && s->nqis<3 && get_bits1(&gb));
s->quality_index= s->qis[0];
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
s->keyframe?"key":"", counter, s->quality_index);
counter++;
if (s->quality_index != s->last_quality_index) {
init_dequantizer(s);
init_loop_filter(s);
}
if (s->keyframe) {
if (!s->theora)
{
skip_bits(&gb, 4);
skip_bits(&gb, 4);
if (s->version)
{
s->version = get_bits(&gb, 5);
if (counter == 1)
av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
}
}
if (s->version || s->theora)
{
if (get_bits1(&gb))
av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
skip_bits(&gb, 2);
}
if (s->last_frame.data[0] == s->golden_frame.data[0]) {
if (s->golden_frame.data[0])
avctx->release_buffer(avctx, &s->golden_frame);
s->last_frame= s->golden_frame;
} else {
if (s->golden_frame.data[0])
avctx->release_buffer(avctx, &s->golden_frame);
if (s->last_frame.data[0])
avctx->release_buffer(avctx, &s->last_frame);
}
s->golden_frame.reference = 3;
if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
return -1;
}
s->current_frame= s->golden_frame;
if (!s->pixel_addresses_inited)
{
if (!s->flipped_image)
vp3_calculate_pixel_addresses(s);
else
theora_calculate_pixel_addresses(s);
s->pixel_addresses_inited = 1;
}
} else {
s->current_frame.reference = 3;
if (!s->pixel_addresses_inited) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n");
return -1;
}
if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
return -1;
}
}
s->current_frame.qscale_table= s->qscale_table;
s->current_frame.qstride= 0;
{START_TIMER
init_frame(s, &gb);
STOP_TIMER("init_frame")}
#if KEYFRAMES_ONLY
if (!s->keyframe) {
memcpy(s->current_frame.data[0], s->golden_frame.data[0],
s->current_frame.linesize[0] * s->height);
memcpy(s->current_frame.data[1], s->golden_frame.data[1],
s->current_frame.linesize[1] * s->height / 2);
memcpy(s->current_frame.data[2], s->golden_frame.data[2],
s->current_frame.linesize[2] * s->height / 2);
} else {
#endif
{START_TIMER
if (unpack_superblocks(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
return -1;
}
STOP_TIMER("unpack_superblocks")}
{START_TIMER
if (unpack_modes(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
return -1;
}
STOP_TIMER("unpack_modes")}
{START_TIMER
if (unpack_vectors(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
return -1;
}
STOP_TIMER("unpack_vectors")}
{START_TIMER
if (unpack_dct_coeffs(s, &gb)){
av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
return -1;
}
STOP_TIMER("unpack_dct_coeffs")}
{START_TIMER
reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
if ((avctx->flags & CODEC_FLAG_GRAY) == 0) {
reverse_dc_prediction(s, s->fragment_start[1],
s->fragment_width / 2, s->fragment_height / 2);
reverse_dc_prediction(s, s->fragment_start[2],
s->fragment_width / 2, s->fragment_height / 2);
}
STOP_TIMER("reverse_dc_prediction")}
{START_TIMER
for (i = 0; i < s->macroblock_height; i++)
render_slice(s, i);
STOP_TIMER("render_fragments")}
{START_TIMER
apply_loop_filter(s);
STOP_TIMER("apply_loop_filter")}
#if KEYFRAMES_ONLY
}
#endif
*data_size=sizeof(AVFrame);
*(AVFrame*)data= s->current_frame;
if ((s->last_frame.data[0]) &&
(s->last_frame.data[0] != s->golden_frame.data[0]))
avctx->release_buffer(avctx, &s->last_frame);
s->last_frame= s->current_frame;
s->current_frame.data[0]= NULL;
return buf_size;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(AVCodecContext *VAR_0,
void *VAR_1, int *VAR_2,
const uint8_t *VAR_3, int VAR_4)
{
Vp3DecodeContext *s = VAR_0->priv_data;
GetBitContext gb;
static int VAR_5 = 0;
int VAR_6;
init_get_bits(&gb, VAR_3, VAR_4 * 8);
if (s->theora && get_bits1(&gb))
{
av_log(VAR_0, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
return -1;
}
s->keyframe = !get_bits1(&gb);
if (!s->theora)
skip_bits(&gb, 1);
s->last_quality_index = s->quality_index;
s->nqis=0;
do{
s->qis[s->nqis++]= get_bits(&gb, 6);
} while(s->theora >= 0x030200 && s->nqis<3 && get_bits1(&gb));
s->quality_index= s->qis[0];
if (s->VAR_0->debug & FF_DEBUG_PICT_INFO)
av_log(s->VAR_0, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
s->keyframe?"key":"", VAR_5, s->quality_index);
VAR_5++;
if (s->quality_index != s->last_quality_index) {
init_dequantizer(s);
init_loop_filter(s);
}
if (s->keyframe) {
if (!s->theora)
{
skip_bits(&gb, 4);
skip_bits(&gb, 4);
if (s->version)
{
s->version = get_bits(&gb, 5);
if (VAR_5 == 1)
av_log(s->VAR_0, AV_LOG_DEBUG, "VP version: %d\n", s->version);
}
}
if (s->version || s->theora)
{
if (get_bits1(&gb))
av_log(s->VAR_0, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
skip_bits(&gb, 2);
}
if (s->last_frame.VAR_1[0] == s->golden_frame.VAR_1[0]) {
if (s->golden_frame.VAR_1[0])
VAR_0->release_buffer(VAR_0, &s->golden_frame);
s->last_frame= s->golden_frame;
} else {
if (s->golden_frame.VAR_1[0])
VAR_0->release_buffer(VAR_0, &s->golden_frame);
if (s->last_frame.VAR_1[0])
VAR_0->release_buffer(VAR_0, &s->last_frame);
}
s->golden_frame.reference = 3;
if(VAR_0->get_buffer(VAR_0, &s->golden_frame) < 0) {
av_log(s->VAR_0, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
return -1;
}
s->current_frame= s->golden_frame;
if (!s->pixel_addresses_inited)
{
if (!s->flipped_image)
vp3_calculate_pixel_addresses(s);
else
theora_calculate_pixel_addresses(s);
s->pixel_addresses_inited = 1;
}
} else {
s->current_frame.reference = 3;
if (!s->pixel_addresses_inited) {
av_log(s->VAR_0, AV_LOG_ERROR, "vp3: first frame not a keyframe\n");
return -1;
}
if(VAR_0->get_buffer(VAR_0, &s->current_frame) < 0) {
av_log(s->VAR_0, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
return -1;
}
}
s->current_frame.qscale_table= s->qscale_table;
s->current_frame.qstride= 0;
{START_TIMER
init_frame(s, &gb);
STOP_TIMER("init_frame")}
#if KEYFRAMES_ONLY
if (!s->keyframe) {
memcpy(s->current_frame.VAR_1[0], s->golden_frame.VAR_1[0],
s->current_frame.linesize[0] * s->height);
memcpy(s->current_frame.VAR_1[1], s->golden_frame.VAR_1[1],
s->current_frame.linesize[1] * s->height / 2);
memcpy(s->current_frame.VAR_1[2], s->golden_frame.VAR_1[2],
s->current_frame.linesize[2] * s->height / 2);
} else {
#endif
{START_TIMER
if (unpack_superblocks(s, &gb)){
av_log(s->VAR_0, AV_LOG_ERROR, "error in unpack_superblocks\n");
return -1;
}
STOP_TIMER("unpack_superblocks")}
{START_TIMER
if (unpack_modes(s, &gb)){
av_log(s->VAR_0, AV_LOG_ERROR, "error in unpack_modes\n");
return -1;
}
STOP_TIMER("unpack_modes")}
{START_TIMER
if (unpack_vectors(s, &gb)){
av_log(s->VAR_0, AV_LOG_ERROR, "error in unpack_vectors\n");
return -1;
}
STOP_TIMER("unpack_vectors")}
{START_TIMER
if (unpack_dct_coeffs(s, &gb)){
av_log(s->VAR_0, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
return -1;
}
STOP_TIMER("unpack_dct_coeffs")}
{START_TIMER
reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
if ((VAR_0->flags & CODEC_FLAG_GRAY) == 0) {
reverse_dc_prediction(s, s->fragment_start[1],
s->fragment_width / 2, s->fragment_height / 2);
reverse_dc_prediction(s, s->fragment_start[2],
s->fragment_width / 2, s->fragment_height / 2);
}
STOP_TIMER("reverse_dc_prediction")}
{START_TIMER
for (VAR_6 = 0; VAR_6 < s->macroblock_height; VAR_6++)
render_slice(s, VAR_6);
STOP_TIMER("render_fragments")}
{START_TIMER
apply_loop_filter(s);
STOP_TIMER("apply_loop_filter")}
#if KEYFRAMES_ONLY
}
#endif
*VAR_2=sizeof(AVFrame);
*(AVFrame*)VAR_1= s->current_frame;
if ((s->last_frame.VAR_1[0]) &&
(s->last_frame.VAR_1[0] != s->golden_frame.VAR_1[0]))
VAR_0->release_buffer(VAR_0, &s->last_frame);
s->last_frame= s->current_frame;
s->current_frame.VAR_1[0]= NULL;
return VAR_4;
}
| [
"static int FUNC_0(AVCodecContext *VAR_0,\nvoid *VAR_1, int *VAR_2,\nconst uint8_t *VAR_3, int VAR_4)\n{",
"Vp3DecodeContext *s = VAR_0->priv_data;",
"GetBitContext gb;",
"static int VAR_5 = 0;",
"int VAR_6;",
"init_get_bits(&gb, VAR_3, VAR_4 * 8);",
"if (s->theora && get_bits1(&gb))\n{",
"av_log(VAR_0, AV_LOG_ERROR, \"Header packet passed to frame decoder, skipping\\n\");",
"return -1;",
"}",
"s->keyframe = !get_bits1(&gb);",
"if (!s->theora)\nskip_bits(&gb, 1);",
"s->last_quality_index = s->quality_index;",
"s->nqis=0;",
"do{",
"s->qis[s->nqis++]= get_bits(&gb, 6);",
"} while(s->theora >= 0x030200 && s->nqis<3 && get_bits1(&gb));",
"s->quality_index= s->qis[0];",
"if (s->VAR_0->debug & FF_DEBUG_PICT_INFO)\nav_log(s->VAR_0, AV_LOG_INFO, \" VP3 %sframe #%d: Q index = %d\\n\",\ns->keyframe?\"key\":\"\", VAR_5, s->quality_index);",
"VAR_5++;",
"if (s->quality_index != s->last_quality_index) {",
"init_dequantizer(s);",
"init_loop_filter(s);",
"}",
"if (s->keyframe) {",
"if (!s->theora)\n{",
"skip_bits(&gb, 4);",
"skip_bits(&gb, 4);",
"if (s->version)\n{",
"s->version = get_bits(&gb, 5);",
"if (VAR_5 == 1)\nav_log(s->VAR_0, AV_LOG_DEBUG, \"VP version: %d\\n\", s->version);",
"}",
"}",
"if (s->version || s->theora)\n{",
"if (get_bits1(&gb))\nav_log(s->VAR_0, AV_LOG_ERROR, \"Warning, unsupported keyframe coding type?!\\n\");",
"skip_bits(&gb, 2);",
"}",
"if (s->last_frame.VAR_1[0] == s->golden_frame.VAR_1[0]) {",
"if (s->golden_frame.VAR_1[0])\nVAR_0->release_buffer(VAR_0, &s->golden_frame);",
"s->last_frame= s->golden_frame;",
"} else {",
"if (s->golden_frame.VAR_1[0])\nVAR_0->release_buffer(VAR_0, &s->golden_frame);",
"if (s->last_frame.VAR_1[0])\nVAR_0->release_buffer(VAR_0, &s->last_frame);",
"}",
"s->golden_frame.reference = 3;",
"if(VAR_0->get_buffer(VAR_0, &s->golden_frame) < 0) {",
"av_log(s->VAR_0, AV_LOG_ERROR, \"vp3: get_buffer() failed\\n\");",
"return -1;",
"}",
"s->current_frame= s->golden_frame;",
"if (!s->pixel_addresses_inited)\n{",
"if (!s->flipped_image)\nvp3_calculate_pixel_addresses(s);",
"else\ntheora_calculate_pixel_addresses(s);",
"s->pixel_addresses_inited = 1;",
"}",
"} else {",
"s->current_frame.reference = 3;",
"if (!s->pixel_addresses_inited) {",
"av_log(s->VAR_0, AV_LOG_ERROR, \"vp3: first frame not a keyframe\\n\");",
"return -1;",
"}",
"if(VAR_0->get_buffer(VAR_0, &s->current_frame) < 0) {",
"av_log(s->VAR_0, AV_LOG_ERROR, \"vp3: get_buffer() failed\\n\");",
"return -1;",
"}",
"}",
"s->current_frame.qscale_table= s->qscale_table;",
"s->current_frame.qstride= 0;",
"{START_TIMER",
"init_frame(s, &gb);",
"STOP_TIMER(\"init_frame\")}",
"#if KEYFRAMES_ONLY\nif (!s->keyframe) {",
"memcpy(s->current_frame.VAR_1[0], s->golden_frame.VAR_1[0],\ns->current_frame.linesize[0] * s->height);",
"memcpy(s->current_frame.VAR_1[1], s->golden_frame.VAR_1[1],\ns->current_frame.linesize[1] * s->height / 2);",
"memcpy(s->current_frame.VAR_1[2], s->golden_frame.VAR_1[2],\ns->current_frame.linesize[2] * s->height / 2);",
"} else {",
"#endif\n{START_TIMER",
"if (unpack_superblocks(s, &gb)){",
"av_log(s->VAR_0, AV_LOG_ERROR, \"error in unpack_superblocks\\n\");",
"return -1;",
"}",
"STOP_TIMER(\"unpack_superblocks\")}",
"{START_TIMER",
"if (unpack_modes(s, &gb)){",
"av_log(s->VAR_0, AV_LOG_ERROR, \"error in unpack_modes\\n\");",
"return -1;",
"}",
"STOP_TIMER(\"unpack_modes\")}",
"{START_TIMER",
"if (unpack_vectors(s, &gb)){",
"av_log(s->VAR_0, AV_LOG_ERROR, \"error in unpack_vectors\\n\");",
"return -1;",
"}",
"STOP_TIMER(\"unpack_vectors\")}",
"{START_TIMER",
"if (unpack_dct_coeffs(s, &gb)){",
"av_log(s->VAR_0, AV_LOG_ERROR, \"error in unpack_dct_coeffs\\n\");",
"return -1;",
"}",
"STOP_TIMER(\"unpack_dct_coeffs\")}",
"{START_TIMER",
"reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);",
"if ((VAR_0->flags & CODEC_FLAG_GRAY) == 0) {",
"reverse_dc_prediction(s, s->fragment_start[1],\ns->fragment_width / 2, s->fragment_height / 2);",
"reverse_dc_prediction(s, s->fragment_start[2],\ns->fragment_width / 2, s->fragment_height / 2);",
"}",
"STOP_TIMER(\"reverse_dc_prediction\")}",
"{START_TIMER",
"for (VAR_6 = 0; VAR_6 < s->macroblock_height; VAR_6++)",
"render_slice(s, VAR_6);",
"STOP_TIMER(\"render_fragments\")}",
"{START_TIMER",
"apply_loop_filter(s);",
"STOP_TIMER(\"apply_loop_filter\")}",
"#if KEYFRAMES_ONLY\n}",
"#endif\n*VAR_2=sizeof(AVFrame);",
"*(AVFrame*)VAR_1= s->current_frame;",
"if ((s->last_frame.VAR_1[0]) &&\n(s->last_frame.VAR_1[0] != s->golden_frame.VAR_1[0]))\nVAR_0->release_buffer(VAR_0, &s->last_frame);",
"s->last_frame= s->current_frame;",
"s->current_frame.VAR_1[0]= NULL;",
"return VAR_4;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
23,
25
],
[
27
],
[
29
],
[
31
],
[
35
],
[
37,
39
],
[
41
],
[
45
],
[
47
],
[
49
],
[
51
],
[
55
],
[
59,
61,
63
],
[
65
],
[
69
],
[
71
],
[
73
],
[
75
],
[
79
],
[
81,
83
],
[
85
],
[
87
],
[
89,
91
],
[
93
],
[
95,
97
],
[
99
],
[
101
],
[
103,
105
],
[
107,
109
],
[
111
],
[
113
],
[
117
],
[
119,
121
],
[
123
],
[
125
],
[
127,
129
],
[
131,
133
],
[
135
],
[
139
],
[
141
],
[
143
],
[
145
],
[
147
],
[
153
],
[
159,
161
],
[
163,
165
],
[
167,
169
],
[
171
],
[
173
],
[
175
],
[
179
],
[
181
],
[
183
],
[
185
],
[
187
],
[
189
],
[
191
],
[
193
],
[
195
],
[
197
],
[
201
],
[
203
],
[
207
],
[
209
],
[
211
],
[
215,
217
],
[
221,
223
],
[
225,
227
],
[
229,
231
],
[
235
],
[
237,
241
],
[
243
],
[
245
],
[
247
],
[
249
],
[
251
],
[
253
],
[
255
],
[
257
],
[
259
],
[
261
],
[
263
],
[
265
],
[
267
],
[
269
],
[
271
],
[
273
],
[
275
],
[
277
],
[
279
],
[
281
],
[
283
],
[
285
],
[
287
],
[
289
],
[
293
],
[
295
],
[
297,
299
],
[
301,
303
],
[
305
],
[
307
],
[
309
],
[
313
],
[
315
],
[
317
],
[
321
],
[
323
],
[
325
],
[
327,
329
],
[
331,
335
],
[
337
],
[
345,
347,
349
],
[
355
],
[
357
],
[
361
],
[
363
]
] |
21,329 | static char *ts_value_string (char *buf, int buf_size, int64_t ts)
{
if (ts == AV_NOPTS_VALUE) {
snprintf(buf, buf_size, "N/A");
} else {
snprintf(buf, buf_size, "%"PRId64, ts);
}
return buf;
}
| false | FFmpeg | 0491a2a07a44f6e5e6f34081835e402c07025fd2 | static char *ts_value_string (char *buf, int buf_size, int64_t ts)
{
if (ts == AV_NOPTS_VALUE) {
snprintf(buf, buf_size, "N/A");
} else {
snprintf(buf, buf_size, "%"PRId64, ts);
}
return buf;
}
| {
"code": [],
"line_no": []
} | static char *FUNC_0 (char *VAR_0, int VAR_1, int64_t VAR_2)
{
if (VAR_2 == AV_NOPTS_VALUE) {
snprintf(VAR_0, VAR_1, "N/A");
} else {
snprintf(VAR_0, VAR_1, "%"PRId64, VAR_2);
}
return VAR_0;
}
| [
"static char *FUNC_0 (char *VAR_0, int VAR_1, int64_t VAR_2)\n{",
"if (VAR_2 == AV_NOPTS_VALUE) {",
"snprintf(VAR_0, VAR_1, \"N/A\");",
"} else {",
"snprintf(VAR_0, VAR_1, \"%\"PRId64, VAR_2);",
"}",
"return VAR_0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19
]
] |
21,330 | void pci_ne2000_init(PCIBus *bus, NICInfo *nd)
{
PCINE2000State *d;
NE2000State *s;
uint8_t *pci_conf;
d = (PCINE2000State *)pci_register_device(bus,
"NE2000", sizeof(PCINE2000State),
-1,
NULL, NULL);
pci_conf = d->dev.config;
pci_conf[0x00] = 0xec; // Realtek 8029
pci_conf[0x01] = 0x10;
pci_conf[0x02] = 0x29;
pci_conf[0x03] = 0x80;
pci_conf[0x0a] = 0x00; // ethernet network controller
pci_conf[0x0b] = 0x02;
pci_conf[0x0e] = 0x00; // header_type
pci_conf[0x3d] = 1; // interrupt pin 0
pci_register_io_region(&d->dev, 0, 0x100,
PCI_ADDRESS_SPACE_IO, ne2000_map);
s = &d->ne2000;
s->irq = 16; // PCI interrupt
s->pci_dev = (PCIDevice *)d;
memcpy(s->macaddr, nd->macaddr, 6);
ne2000_reset(s);
s->vc = qemu_new_vlan_client(nd->vlan, ne2000_receive, s);
snprintf(s->vc->info_str, sizeof(s->vc->info_str),
"ne2000 pci macaddr=%02x:%02x:%02x:%02x:%02x:%02x",
s->macaddr[0],
s->macaddr[1],
s->macaddr[2],
s->macaddr[3],
s->macaddr[4],
s->macaddr[5]);
/* XXX: instance number ? */
register_savevm("ne2000", 0, 2, ne2000_save, ne2000_load, s);
register_savevm("ne2000_pci", 0, 1, generic_pci_save, generic_pci_load,
&d->dev);
}
| true | qemu | d861b05ea30e6ac177de9b679da96194ebe21afc | void pci_ne2000_init(PCIBus *bus, NICInfo *nd)
{
PCINE2000State *d;
NE2000State *s;
uint8_t *pci_conf;
d = (PCINE2000State *)pci_register_device(bus,
"NE2000", sizeof(PCINE2000State),
-1,
NULL, NULL);
pci_conf = d->dev.config;
pci_conf[0x00] = 0xec;
pci_conf[0x01] = 0x10;
pci_conf[0x02] = 0x29;
pci_conf[0x03] = 0x80;
pci_conf[0x0a] = 0x00;
pci_conf[0x0b] = 0x02;
pci_conf[0x0e] = 0x00;
pci_conf[0x3d] = 1;
pci_register_io_region(&d->dev, 0, 0x100,
PCI_ADDRESS_SPACE_IO, ne2000_map);
s = &d->ne2000;
s->irq = 16;
s->pci_dev = (PCIDevice *)d;
memcpy(s->macaddr, nd->macaddr, 6);
ne2000_reset(s);
s->vc = qemu_new_vlan_client(nd->vlan, ne2000_receive, s);
snprintf(s->vc->info_str, sizeof(s->vc->info_str),
"ne2000 pci macaddr=%02x:%02x:%02x:%02x:%02x:%02x",
s->macaddr[0],
s->macaddr[1],
s->macaddr[2],
s->macaddr[3],
s->macaddr[4],
s->macaddr[5]);
register_savevm("ne2000", 0, 2, ne2000_save, ne2000_load, s);
register_savevm("ne2000_pci", 0, 1, generic_pci_save, generic_pci_load,
&d->dev);
}
| {
"code": [
" s->vc = qemu_new_vlan_client(nd->vlan, ne2000_receive, s);",
" s->vc = qemu_new_vlan_client(nd->vlan, ne2000_receive, s);"
],
"line_no": [
55,
55
]
} | void FUNC_0(PCIBus *VAR_0, NICInfo *VAR_1)
{
PCINE2000State *d;
NE2000State *s;
uint8_t *pci_conf;
d = (PCINE2000State *)pci_register_device(VAR_0,
"NE2000", sizeof(PCINE2000State),
-1,
NULL, NULL);
pci_conf = d->dev.config;
pci_conf[0x00] = 0xec;
pci_conf[0x01] = 0x10;
pci_conf[0x02] = 0x29;
pci_conf[0x03] = 0x80;
pci_conf[0x0a] = 0x00;
pci_conf[0x0b] = 0x02;
pci_conf[0x0e] = 0x00;
pci_conf[0x3d] = 1;
pci_register_io_region(&d->dev, 0, 0x100,
PCI_ADDRESS_SPACE_IO, ne2000_map);
s = &d->ne2000;
s->irq = 16;
s->pci_dev = (PCIDevice *)d;
memcpy(s->macaddr, VAR_1->macaddr, 6);
ne2000_reset(s);
s->vc = qemu_new_vlan_client(VAR_1->vlan, ne2000_receive, s);
snprintf(s->vc->info_str, sizeof(s->vc->info_str),
"ne2000 pci macaddr=%02x:%02x:%02x:%02x:%02x:%02x",
s->macaddr[0],
s->macaddr[1],
s->macaddr[2],
s->macaddr[3],
s->macaddr[4],
s->macaddr[5]);
register_savevm("ne2000", 0, 2, ne2000_save, ne2000_load, s);
register_savevm("ne2000_pci", 0, 1, generic_pci_save, generic_pci_load,
&d->dev);
}
| [
"void FUNC_0(PCIBus *VAR_0, NICInfo *VAR_1)\n{",
"PCINE2000State *d;",
"NE2000State *s;",
"uint8_t *pci_conf;",
"d = (PCINE2000State *)pci_register_device(VAR_0,\n\"NE2000\", sizeof(PCINE2000State),\n-1,\nNULL, NULL);",
"pci_conf = d->dev.config;",
"pci_conf[0x00] = 0xec;",
"pci_conf[0x01] = 0x10;",
"pci_conf[0x02] = 0x29;",
"pci_conf[0x03] = 0x80;",
"pci_conf[0x0a] = 0x00;",
"pci_conf[0x0b] = 0x02;",
"pci_conf[0x0e] = 0x00;",
"pci_conf[0x3d] = 1;",
"pci_register_io_region(&d->dev, 0, 0x100,\nPCI_ADDRESS_SPACE_IO, ne2000_map);",
"s = &d->ne2000;",
"s->irq = 16;",
"s->pci_dev = (PCIDevice *)d;",
"memcpy(s->macaddr, VAR_1->macaddr, 6);",
"ne2000_reset(s);",
"s->vc = qemu_new_vlan_client(VAR_1->vlan, ne2000_receive, s);",
"snprintf(s->vc->info_str, sizeof(s->vc->info_str),\n\"ne2000 pci macaddr=%02x:%02x:%02x:%02x:%02x:%02x\",\ns->macaddr[0],\ns->macaddr[1],\ns->macaddr[2],\ns->macaddr[3],\ns->macaddr[4],\ns->macaddr[5]);",
"register_savevm(\"ne2000\", 0, 2, ne2000_save, ne2000_load, s);",
"register_savevm(\"ne2000_pci\", 0, 1, generic_pci_save, generic_pci_load,\n&d->dev);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13,
15,
17,
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
41,
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
55
],
[
59,
61,
63,
65,
67,
69,
71,
73
],
[
79
],
[
81,
83
],
[
85
]
] |
21,331 | static int decode_bdlt(uint8_t *frame, int width, int height,
const uint8_t *src, const uint8_t *src_end)
{
const uint8_t *frame_end = frame + width * height;
uint8_t *line_ptr;
int count, lines, segments;
count = bytestream_get_le16(&src);
if (count >= height || width * count < 0)
return -1;
frame += width * count;
lines = bytestream_get_le16(&src);
if (frame + lines * width > frame_end || src >= src_end)
return -1;
while (lines--) {
line_ptr = frame;
frame += width;
segments = *src++;
while (segments--) {
if (src_end - src < 3)
return -1;
line_ptr += *src++;
if (line_ptr >= frame)
return -1;
count = (int8_t)*src++;
if (count >= 0) {
if (line_ptr + count > frame || src_end - src < count)
return -1;
bytestream_get_buffer(&src, line_ptr, count);
} else {
count = -count;
if (line_ptr + count > frame || src >= src_end)
return -1;
memset(line_ptr, *src++, count);
}
line_ptr += count;
}
}
return 0;
}
| true | FFmpeg | 65daa942eb51c348e205ae3a54f77b8781907a81 | static int decode_bdlt(uint8_t *frame, int width, int height,
const uint8_t *src, const uint8_t *src_end)
{
const uint8_t *frame_end = frame + width * height;
uint8_t *line_ptr;
int count, lines, segments;
count = bytestream_get_le16(&src);
if (count >= height || width * count < 0)
return -1;
frame += width * count;
lines = bytestream_get_le16(&src);
if (frame + lines * width > frame_end || src >= src_end)
return -1;
while (lines--) {
line_ptr = frame;
frame += width;
segments = *src++;
while (segments--) {
if (src_end - src < 3)
return -1;
line_ptr += *src++;
if (line_ptr >= frame)
return -1;
count = (int8_t)*src++;
if (count >= 0) {
if (line_ptr + count > frame || src_end - src < count)
return -1;
bytestream_get_buffer(&src, line_ptr, count);
} else {
count = -count;
if (line_ptr + count > frame || src >= src_end)
return -1;
memset(line_ptr, *src++, count);
}
line_ptr += count;
}
}
return 0;
}
| {
"code": [
" if (count >= height || width * count < 0)",
" if (frame + lines * width > frame_end || src >= src_end)",
" line_ptr += *src++;",
" if (line_ptr >= frame)",
" if (line_ptr + count > frame || src_end - src < count)",
" if (line_ptr + count > frame || src >= src_end)",
" if (frame + lines * width > frame_end || src >= src_end)",
" line_ptr += *src++;",
" if (line_ptr >= frame)"
],
"line_no": [
17,
25,
45,
47,
55,
65,
25,
45,
47
]
} | static int FUNC_0(uint8_t *VAR_0, int VAR_1, int VAR_2,
const uint8_t *VAR_3, const uint8_t *VAR_4)
{
const uint8_t *VAR_5 = VAR_0 + VAR_1 * VAR_2;
uint8_t *line_ptr;
int VAR_6, VAR_7, VAR_8;
VAR_6 = bytestream_get_le16(&VAR_3);
if (VAR_6 >= VAR_2 || VAR_1 * VAR_6 < 0)
return -1;
VAR_0 += VAR_1 * VAR_6;
VAR_7 = bytestream_get_le16(&VAR_3);
if (VAR_0 + VAR_7 * VAR_1 > VAR_5 || VAR_3 >= VAR_4)
return -1;
while (VAR_7--) {
line_ptr = VAR_0;
VAR_0 += VAR_1;
VAR_8 = *VAR_3++;
while (VAR_8--) {
if (VAR_4 - VAR_3 < 3)
return -1;
line_ptr += *VAR_3++;
if (line_ptr >= VAR_0)
return -1;
VAR_6 = (int8_t)*VAR_3++;
if (VAR_6 >= 0) {
if (line_ptr + VAR_6 > VAR_0 || VAR_4 - VAR_3 < VAR_6)
return -1;
bytestream_get_buffer(&VAR_3, line_ptr, VAR_6);
} else {
VAR_6 = -VAR_6;
if (line_ptr + VAR_6 > VAR_0 || VAR_3 >= VAR_4)
return -1;
memset(line_ptr, *VAR_3++, VAR_6);
}
line_ptr += VAR_6;
}
}
return 0;
}
| [
"static int FUNC_0(uint8_t *VAR_0, int VAR_1, int VAR_2,\nconst uint8_t *VAR_3, const uint8_t *VAR_4)\n{",
"const uint8_t *VAR_5 = VAR_0 + VAR_1 * VAR_2;",
"uint8_t *line_ptr;",
"int VAR_6, VAR_7, VAR_8;",
"VAR_6 = bytestream_get_le16(&VAR_3);",
"if (VAR_6 >= VAR_2 || VAR_1 * VAR_6 < 0)\nreturn -1;",
"VAR_0 += VAR_1 * VAR_6;",
"VAR_7 = bytestream_get_le16(&VAR_3);",
"if (VAR_0 + VAR_7 * VAR_1 > VAR_5 || VAR_3 >= VAR_4)\nreturn -1;",
"while (VAR_7--) {",
"line_ptr = VAR_0;",
"VAR_0 += VAR_1;",
"VAR_8 = *VAR_3++;",
"while (VAR_8--) {",
"if (VAR_4 - VAR_3 < 3)\nreturn -1;",
"line_ptr += *VAR_3++;",
"if (line_ptr >= VAR_0)\nreturn -1;",
"VAR_6 = (int8_t)*VAR_3++;",
"if (VAR_6 >= 0) {",
"if (line_ptr + VAR_6 > VAR_0 || VAR_4 - VAR_3 < VAR_6)\nreturn -1;",
"bytestream_get_buffer(&VAR_3, line_ptr, VAR_6);",
"} else {",
"VAR_6 = -VAR_6;",
"if (line_ptr + VAR_6 > VAR_0 || VAR_3 >= VAR_4)\nreturn -1;",
"memset(line_ptr, *VAR_3++, VAR_6);",
"}",
"line_ptr += VAR_6;",
"}",
"}",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17,
19
],
[
21
],
[
23
],
[
25,
27
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41,
43
],
[
45
],
[
47,
49
],
[
51
],
[
53
],
[
55,
57
],
[
59
],
[
61
],
[
63
],
[
65,
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
81
],
[
83
]
] |
21,332 | gdb_handlesig (CPUState *env, int sig)
{
GDBState *s;
char buf[256];
int n;
s = gdbserver_state;
if (gdbserver_fd < 0 || s->fd < 0)
return sig;
/* disable single step if it was enabled */
cpu_single_step(env, 0);
tb_flush(env);
if (sig != 0)
{
snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
put_packet(s, buf);
}
/* put_packet() might have detected that the peer terminated the
connection. */
if (s->fd < 0)
return sig;
sig = 0;
s->state = RS_IDLE;
s->running_state = 0;
while (s->running_state == 0) {
n = read (s->fd, buf, 256);
if (n > 0)
{
int i;
for (i = 0; i < n; i++)
gdb_read_byte (s, buf[i]);
}
else if (n == 0 || errno != EAGAIN)
{
/* XXX: Connection closed. Should probably wait for annother
connection before continuing. */
return sig;
}
}
sig = s->signal;
s->signal = 0;
return sig;
}
| true | qemu | e7d81004e486b0e80a674d164d8aec0e83fa812f | gdb_handlesig (CPUState *env, int sig)
{
GDBState *s;
char buf[256];
int n;
s = gdbserver_state;
if (gdbserver_fd < 0 || s->fd < 0)
return sig;
cpu_single_step(env, 0);
tb_flush(env);
if (sig != 0)
{
snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
put_packet(s, buf);
}
if (s->fd < 0)
return sig;
sig = 0;
s->state = RS_IDLE;
s->running_state = 0;
while (s->running_state == 0) {
n = read (s->fd, buf, 256);
if (n > 0)
{
int i;
for (i = 0; i < n; i++)
gdb_read_byte (s, buf[i]);
}
else if (n == 0 || errno != EAGAIN)
{
return sig;
}
}
sig = s->signal;
s->signal = 0;
return sig;
}
| {
"code": [],
"line_no": []
} | FUNC_0 (CPUState *VAR_0, int VAR_1)
{
GDBState *s;
char VAR_2[256];
int VAR_3;
s = gdbserver_state;
if (gdbserver_fd < 0 || s->fd < 0)
return VAR_1;
cpu_single_step(VAR_0, 0);
tb_flush(VAR_0);
if (VAR_1 != 0)
{
snprintf(VAR_2, sizeof(VAR_2), "S%02x", target_signal_to_gdb (VAR_1));
put_packet(s, VAR_2);
}
if (s->fd < 0)
return VAR_1;
VAR_1 = 0;
s->state = RS_IDLE;
s->running_state = 0;
while (s->running_state == 0) {
VAR_3 = read (s->fd, VAR_2, 256);
if (VAR_3 > 0)
{
int VAR_4;
for (VAR_4 = 0; VAR_4 < VAR_3; VAR_4++)
gdb_read_byte (s, VAR_2[VAR_4]);
}
else if (VAR_3 == 0 || errno != EAGAIN)
{
return VAR_1;
}
}
VAR_1 = s->signal;
s->signal = 0;
return VAR_1;
}
| [
"FUNC_0 (CPUState *VAR_0, int VAR_1)\n{",
"GDBState *s;",
"char VAR_2[256];",
"int VAR_3;",
"s = gdbserver_state;",
"if (gdbserver_fd < 0 || s->fd < 0)\nreturn VAR_1;",
"cpu_single_step(VAR_0, 0);",
"tb_flush(VAR_0);",
"if (VAR_1 != 0)\n{",
"snprintf(VAR_2, sizeof(VAR_2), \"S%02x\", target_signal_to_gdb (VAR_1));",
"put_packet(s, VAR_2);",
"}",
"if (s->fd < 0)\nreturn VAR_1;",
"VAR_1 = 0;",
"s->state = RS_IDLE;",
"s->running_state = 0;",
"while (s->running_state == 0) {",
"VAR_3 = read (s->fd, VAR_2, 256);",
"if (VAR_3 > 0)\n{",
"int VAR_4;",
"for (VAR_4 = 0; VAR_4 < VAR_3; VAR_4++)",
"gdb_read_byte (s, VAR_2[VAR_4]);",
"}",
"else if (VAR_3 == 0 || errno != EAGAIN)\n{",
"return VAR_1;",
"}",
"}",
"VAR_1 = s->signal;",
"s->signal = 0;",
"return VAR_1;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15,
17
],
[
23
],
[
25
],
[
29,
31
],
[
33
],
[
35
],
[
37
],
[
43,
45
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59,
61
],
[
63
],
[
67
],
[
69
],
[
71
],
[
73,
75
],
[
81
],
[
83
],
[
85
],
[
87
],
[
89
],
[
91
],
[
93
]
] |
21,333 | int spapr_h_cas_compose_response(target_ulong addr, target_ulong size)
{
void *fdt, *fdt_skel;
sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
size -= sizeof(hdr);
/* Create sceleton */
fdt_skel = g_malloc0(size);
_FDT((fdt_create(fdt_skel, size)));
_FDT((fdt_begin_node(fdt_skel, "")));
_FDT((fdt_end_node(fdt_skel)));
_FDT((fdt_finish(fdt_skel)));
fdt = g_malloc0(size);
_FDT((fdt_open_into(fdt_skel, fdt, size)));
g_free(fdt_skel);
/* Place to make changes to the tree */
/* Pack resulting tree */
_FDT((fdt_pack(fdt)));
if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
trace_spapr_cas_failed(size);
return -1;
}
cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
g_free(fdt);
return 0;
}
| true | qemu | 3794d5482d74dc0031cee6d5be2c61c88ca723bd | int spapr_h_cas_compose_response(target_ulong addr, target_ulong size)
{
void *fdt, *fdt_skel;
sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
size -= sizeof(hdr);
fdt_skel = g_malloc0(size);
_FDT((fdt_create(fdt_skel, size)));
_FDT((fdt_begin_node(fdt_skel, "")));
_FDT((fdt_end_node(fdt_skel)));
_FDT((fdt_finish(fdt_skel)));
fdt = g_malloc0(size);
_FDT((fdt_open_into(fdt_skel, fdt, size)));
g_free(fdt_skel);
_FDT((fdt_pack(fdt)));
if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
trace_spapr_cas_failed(size);
return -1;
}
cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
g_free(fdt);
return 0;
}
| {
"code": [],
"line_no": []
} | int FUNC_0(target_ulong VAR_0, target_ulong VAR_1)
{
void *VAR_2, *VAR_3;
sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
VAR_1 -= sizeof(hdr);
VAR_3 = g_malloc0(VAR_1);
_FDT((fdt_create(VAR_3, VAR_1)));
_FDT((fdt_begin_node(VAR_3, "")));
_FDT((fdt_end_node(VAR_3)));
_FDT((fdt_finish(VAR_3)));
VAR_2 = g_malloc0(VAR_1);
_FDT((fdt_open_into(VAR_3, VAR_2, VAR_1)));
g_free(VAR_3);
_FDT((fdt_pack(VAR_2)));
if (fdt_totalsize(VAR_2) + sizeof(hdr) > VAR_1) {
trace_spapr_cas_failed(VAR_1);
return -1;
}
cpu_physical_memory_write(VAR_0, &hdr, sizeof(hdr));
cpu_physical_memory_write(VAR_0 + sizeof(hdr), VAR_2, fdt_totalsize(VAR_2));
trace_spapr_cas_continue(fdt_totalsize(VAR_2) + sizeof(hdr));
g_free(VAR_2);
return 0;
}
| [
"int FUNC_0(target_ulong VAR_0, target_ulong VAR_1)\n{",
"void *VAR_2, *VAR_3;",
"sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };",
"VAR_1 -= sizeof(hdr);",
"VAR_3 = g_malloc0(VAR_1);",
"_FDT((fdt_create(VAR_3, VAR_1)));",
"_FDT((fdt_begin_node(VAR_3, \"\")));",
"_FDT((fdt_end_node(VAR_3)));",
"_FDT((fdt_finish(VAR_3)));",
"VAR_2 = g_malloc0(VAR_1);",
"_FDT((fdt_open_into(VAR_3, VAR_2, VAR_1)));",
"g_free(VAR_3);",
"_FDT((fdt_pack(VAR_2)));",
"if (fdt_totalsize(VAR_2) + sizeof(hdr) > VAR_1) {",
"trace_spapr_cas_failed(VAR_1);",
"return -1;",
"}",
"cpu_physical_memory_write(VAR_0, &hdr, sizeof(hdr));",
"cpu_physical_memory_write(VAR_0 + sizeof(hdr), VAR_2, fdt_totalsize(VAR_2));",
"trace_spapr_cas_continue(fdt_totalsize(VAR_2) + sizeof(hdr));",
"g_free(VAR_2);",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
41
],
[
45
],
[
47
],
[
49
],
[
51
],
[
55
],
[
57
],
[
59
],
[
61
],
[
65
],
[
67
]
] |
21,336 | int do_snapshot_blkdev(Monitor *mon, const QDict *qdict, QObject **ret_data)
{
const char *device = qdict_get_str(qdict, "device");
const char *filename = qdict_get_try_str(qdict, "snapshot_file");
const char *format = qdict_get_try_str(qdict, "format");
BlockDriverState *bs;
BlockDriver *drv, *proto_drv;
int ret = 0;
int flags;
if (!filename) {
qerror_report(QERR_MISSING_PARAMETER, "snapshot_file");
ret = -1;
goto out;
}
bs = bdrv_find(device);
if (!bs) {
qerror_report(QERR_DEVICE_NOT_FOUND, device);
ret = -1;
goto out;
}
if (!format) {
format = "qcow2";
}
drv = bdrv_find_format(format);
if (!drv) {
qerror_report(QERR_INVALID_BLOCK_FORMAT, format);
ret = -1;
goto out;
}
proto_drv = bdrv_find_protocol(filename);
if (!proto_drv) {
qerror_report(QERR_INVALID_BLOCK_FORMAT, format);
ret = -1;
goto out;
}
ret = bdrv_img_create(filename, format, bs->filename,
bs->drv->format_name, NULL, -1, bs->open_flags);
if (ret) {
goto out;
}
qemu_aio_flush();
bdrv_flush(bs);
flags = bs->open_flags;
bdrv_close(bs);
ret = bdrv_open(bs, filename, flags, drv);
/*
* If reopening the image file we just created fails, we really
* are in trouble :(
*/
if (ret != 0) {
abort();
}
out:
if (ret) {
ret = -1;
}
return ret;
}
| true | qemu | 52f9a172b6db89ba1f4389883be805d65dd3ca8c | int do_snapshot_blkdev(Monitor *mon, const QDict *qdict, QObject **ret_data)
{
const char *device = qdict_get_str(qdict, "device");
const char *filename = qdict_get_try_str(qdict, "snapshot_file");
const char *format = qdict_get_try_str(qdict, "format");
BlockDriverState *bs;
BlockDriver *drv, *proto_drv;
int ret = 0;
int flags;
if (!filename) {
qerror_report(QERR_MISSING_PARAMETER, "snapshot_file");
ret = -1;
goto out;
}
bs = bdrv_find(device);
if (!bs) {
qerror_report(QERR_DEVICE_NOT_FOUND, device);
ret = -1;
goto out;
}
if (!format) {
format = "qcow2";
}
drv = bdrv_find_format(format);
if (!drv) {
qerror_report(QERR_INVALID_BLOCK_FORMAT, format);
ret = -1;
goto out;
}
proto_drv = bdrv_find_protocol(filename);
if (!proto_drv) {
qerror_report(QERR_INVALID_BLOCK_FORMAT, format);
ret = -1;
goto out;
}
ret = bdrv_img_create(filename, format, bs->filename,
bs->drv->format_name, NULL, -1, bs->open_flags);
if (ret) {
goto out;
}
qemu_aio_flush();
bdrv_flush(bs);
flags = bs->open_flags;
bdrv_close(bs);
ret = bdrv_open(bs, filename, flags, drv);
if (ret != 0) {
abort();
}
out:
if (ret) {
ret = -1;
}
return ret;
}
| {
"code": [
" BlockDriver *drv, *proto_drv;",
" bs->drv->format_name, NULL, -1, bs->open_flags);",
" flags = bs->open_flags;",
" abort();"
],
"line_no": [
13,
85,
101,
117
]
} | int FUNC_0(Monitor *VAR_0, const QDict *VAR_1, QObject **VAR_2)
{
const char *VAR_3 = qdict_get_str(VAR_1, "VAR_3");
const char *VAR_4 = qdict_get_try_str(VAR_1, "snapshot_file");
const char *VAR_5 = qdict_get_try_str(VAR_1, "VAR_5");
BlockDriverState *bs;
BlockDriver *drv, *proto_drv;
int VAR_6 = 0;
int VAR_7;
if (!VAR_4) {
qerror_report(QERR_MISSING_PARAMETER, "snapshot_file");
VAR_6 = -1;
goto out;
}
bs = bdrv_find(VAR_3);
if (!bs) {
qerror_report(QERR_DEVICE_NOT_FOUND, VAR_3);
VAR_6 = -1;
goto out;
}
if (!VAR_5) {
VAR_5 = "qcow2";
}
drv = bdrv_find_format(VAR_5);
if (!drv) {
qerror_report(QERR_INVALID_BLOCK_FORMAT, VAR_5);
VAR_6 = -1;
goto out;
}
proto_drv = bdrv_find_protocol(VAR_4);
if (!proto_drv) {
qerror_report(QERR_INVALID_BLOCK_FORMAT, VAR_5);
VAR_6 = -1;
goto out;
}
VAR_6 = bdrv_img_create(VAR_4, VAR_5, bs->VAR_4,
bs->drv->format_name, NULL, -1, bs->open_flags);
if (VAR_6) {
goto out;
}
qemu_aio_flush();
bdrv_flush(bs);
VAR_7 = bs->open_flags;
bdrv_close(bs);
VAR_6 = bdrv_open(bs, VAR_4, VAR_7, drv);
if (VAR_6 != 0) {
abort();
}
out:
if (VAR_6) {
VAR_6 = -1;
}
return VAR_6;
}
| [
"int FUNC_0(Monitor *VAR_0, const QDict *VAR_1, QObject **VAR_2)\n{",
"const char *VAR_3 = qdict_get_str(VAR_1, \"VAR_3\");",
"const char *VAR_4 = qdict_get_try_str(VAR_1, \"snapshot_file\");",
"const char *VAR_5 = qdict_get_try_str(VAR_1, \"VAR_5\");",
"BlockDriverState *bs;",
"BlockDriver *drv, *proto_drv;",
"int VAR_6 = 0;",
"int VAR_7;",
"if (!VAR_4) {",
"qerror_report(QERR_MISSING_PARAMETER, \"snapshot_file\");",
"VAR_6 = -1;",
"goto out;",
"}",
"bs = bdrv_find(VAR_3);",
"if (!bs) {",
"qerror_report(QERR_DEVICE_NOT_FOUND, VAR_3);",
"VAR_6 = -1;",
"goto out;",
"}",
"if (!VAR_5) {",
"VAR_5 = \"qcow2\";",
"}",
"drv = bdrv_find_format(VAR_5);",
"if (!drv) {",
"qerror_report(QERR_INVALID_BLOCK_FORMAT, VAR_5);",
"VAR_6 = -1;",
"goto out;",
"}",
"proto_drv = bdrv_find_protocol(VAR_4);",
"if (!proto_drv) {",
"qerror_report(QERR_INVALID_BLOCK_FORMAT, VAR_5);",
"VAR_6 = -1;",
"goto out;",
"}",
"VAR_6 = bdrv_img_create(VAR_4, VAR_5, bs->VAR_4,\nbs->drv->format_name, NULL, -1, bs->open_flags);",
"if (VAR_6) {",
"goto out;",
"}",
"qemu_aio_flush();",
"bdrv_flush(bs);",
"VAR_7 = bs->open_flags;",
"bdrv_close(bs);",
"VAR_6 = bdrv_open(bs, VAR_4, VAR_7, drv);",
"if (VAR_6 != 0) {",
"abort();",
"}",
"out:\nif (VAR_6) {",
"VAR_6 = -1;",
"}",
"return VAR_6;",
"}"
] | [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
47
],
[
49
],
[
51
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63
],
[
65
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
83,
85
],
[
87
],
[
89
],
[
91
],
[
95
],
[
97
],
[
101
],
[
103
],
[
105
],
[
115
],
[
117
],
[
119
],
[
121,
123
],
[
125
],
[
127
],
[
131
],
[
133
]
] |
21,337 | static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
{
VC9Context *v = avctx->priv_data;
v->profile = get_bits(gb, 2);
av_log(avctx, AV_LOG_DEBUG, "Profile: %i\n", v->profile);
#if HAS_ADVANCED_PROFILE
if (v->profile > PROFILE_MAIN)
{
v->level = get_bits(gb, 3);
v->chromaformat = get_bits(gb, 2);
if (v->chromaformat != 1)
{
av_log(avctx, AV_LOG_ERROR,
"Only 4:2:0 chroma format supported\n");
return -1;
}
}
else
#endif
{
v->res_sm = get_bits(gb, 2); //reserved
if (v->res_sm)
{
av_log(avctx, AV_LOG_ERROR,
"Reserved RES_SM=%i is forbidden\n", v->res_sm);
//return -1;
}
}
// (fps-2)/4 (->30)
v->frmrtq_postproc = get_bits(gb, 3); //common
// (bitrate-32kbps)/64kbps
v->bitrtq_postproc = get_bits(gb, 5); //common
v->s.loop_filter = get_bits(gb, 1); //common
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->res_x8 = get_bits(gb, 1); //reserved
if (v->res_x8)
{
av_log(avctx, AV_LOG_ERROR,
"1 for reserved RES_X8 is forbidden\n");
//return -1;
}
v->multires = get_bits(gb, 1);
v->res_fasttx = get_bits(gb, 1);
if (!v->res_fasttx)
{
av_log(avctx, AV_LOG_ERROR,
"0 for reserved RES_FASTTX is forbidden\n");
//return -1;
}
}
v->fastuvmc = get_bits(gb, 1); //common
if (!v->profile && !v->fastuvmc)
{
av_log(avctx, AV_LOG_ERROR,
"FASTUVMC unavailable in Simple Profile\n");
return -1;
}
v->extended_mv = get_bits(gb, 1); //common
if (!v->profile && v->extended_mv)
{
av_log(avctx, AV_LOG_ERROR,
"Extended MVs unavailable in Simple Profile\n");
return -1;
}
v->dquant = get_bits(gb, 2); //common
v->vstransform = get_bits(gb, 1); //common
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->res_transtab = get_bits(gb, 1);
if (v->res_transtab)
{
av_log(avctx, AV_LOG_ERROR,
"1 for reserved RES_TRANSTAB is forbidden\n");
return -1;
}
}
v->overlap = get_bits(gb, 1); //common
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->s.resync_marker = get_bits(gb, 1);
v->rangered = get_bits(gb, 1);
}
v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
v->quantizer_mode = get_bits(gb, 2); //common
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->finterpflag = get_bits(gb, 1); //common
v->res_rtm_flag = get_bits(gb, 1); //reserved
if (!v->res_rtm_flag)
{
av_log(avctx, AV_LOG_ERROR,
"0 for reserved RES_RTM_FLAG is forbidden\n");
//return -1;
}
#if TRACE
av_log(avctx, AV_LOG_INFO,
"Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
"LoopFilter=%i, MultiRes=%i, FastUVMV=%i, Extended MV=%i\n"
"Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
"DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
v->dquant, v->quantizer_mode, avctx->max_b_frames
);
return 0;
#endif
}
#if HAS_ADVANCED_PROFILE
else return decode_advanced_sequence_header(avctx, gb);
#endif
}
| true | FFmpeg | 7cc84d241ba6ef8e27e4d057176a4ad385ad3d59 | static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
{
VC9Context *v = avctx->priv_data;
v->profile = get_bits(gb, 2);
av_log(avctx, AV_LOG_DEBUG, "Profile: %i\n", v->profile);
#if HAS_ADVANCED_PROFILE
if (v->profile > PROFILE_MAIN)
{
v->level = get_bits(gb, 3);
v->chromaformat = get_bits(gb, 2);
if (v->chromaformat != 1)
{
av_log(avctx, AV_LOG_ERROR,
"Only 4:2:0 chroma format supported\n");
return -1;
}
}
else
#endif
{
v->res_sm = get_bits(gb, 2);
if (v->res_sm)
{
av_log(avctx, AV_LOG_ERROR,
"Reserved RES_SM=%i is forbidden\n", v->res_sm);
}
}
v->frmrtq_postproc = get_bits(gb, 3);
v->bitrtq_postproc = get_bits(gb, 5);
v->s.loop_filter = get_bits(gb, 1);
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->res_x8 = get_bits(gb, 1);
if (v->res_x8)
{
av_log(avctx, AV_LOG_ERROR,
"1 for reserved RES_X8 is forbidden\n");
}
v->multires = get_bits(gb, 1);
v->res_fasttx = get_bits(gb, 1);
if (!v->res_fasttx)
{
av_log(avctx, AV_LOG_ERROR,
"0 for reserved RES_FASTTX is forbidden\n");
}
}
v->fastuvmc = get_bits(gb, 1);
if (!v->profile && !v->fastuvmc)
{
av_log(avctx, AV_LOG_ERROR,
"FASTUVMC unavailable in Simple Profile\n");
return -1;
}
v->extended_mv = get_bits(gb, 1);
if (!v->profile && v->extended_mv)
{
av_log(avctx, AV_LOG_ERROR,
"Extended MVs unavailable in Simple Profile\n");
return -1;
}
v->dquant = get_bits(gb, 2);
v->vstransform = get_bits(gb, 1);
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->res_transtab = get_bits(gb, 1);
if (v->res_transtab)
{
av_log(avctx, AV_LOG_ERROR,
"1 for reserved RES_TRANSTAB is forbidden\n");
return -1;
}
}
v->overlap = get_bits(gb, 1);
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->s.resync_marker = get_bits(gb, 1);
v->rangered = get_bits(gb, 1);
}
v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3);
v->quantizer_mode = get_bits(gb, 2);
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->finterpflag = get_bits(gb, 1);
v->res_rtm_flag = get_bits(gb, 1);
if (!v->res_rtm_flag)
{
av_log(avctx, AV_LOG_ERROR,
"0 for reserved RES_RTM_FLAG is forbidden\n");
}
#if TRACE
av_log(avctx, AV_LOG_INFO,
"Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
"LoopFilter=%i, MultiRes=%i, FastUVMV=%i, Extended MV=%i\n"
"Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
"DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
v->dquant, v->quantizer_mode, avctx->max_b_frames
);
return 0;
#endif
}
#if HAS_ADVANCED_PROFILE
else return decode_advanced_sequence_header(avctx, gb);
#endif
}
| {
"code": [
"#endif",
"#endif",
"#endif",
"#endif",
" av_log(avctx, AV_LOG_DEBUG, \"Profile: %i\\n\", v->profile);",
" if (v->profile > PROFILE_MAIN)",
" if (v->profile <= PROFILE_MAIN)",
" if (v->profile <= PROFILE_MAIN)",
" if (v->profile <= PROFILE_MAIN)",
" if (v->profile <= PROFILE_MAIN)",
"#endif",
" if (v->profile > PROFILE_MAIN)",
" if (v->profile > PROFILE_MAIN)",
" if (v->profile > PROFILE_MAIN)",
" if (v->profile > PROFILE_MAIN)",
" if (v->profile > PROFILE_MAIN)",
" if (v->profile > PROFILE_MAIN)",
" if (v->profile > PROFILE_MAIN)",
"#endif",
"#endif"
],
"line_no": [
41,
41,
41,
41,
11,
17,
77,
77,
77,
77,
41,
17,
17,
17,
17,
17,
17,
17,
41,
41
]
} | static int FUNC_0(AVCodecContext *VAR_0, GetBitContext *VAR_1)
{
VC9Context *v = VAR_0->priv_data;
v->profile = get_bits(VAR_1, 2);
av_log(VAR_0, AV_LOG_DEBUG, "Profile: %i\n", v->profile);
#if HAS_ADVANCED_PROFILE
if (v->profile > PROFILE_MAIN)
{
v->level = get_bits(VAR_1, 3);
v->chromaformat = get_bits(VAR_1, 2);
if (v->chromaformat != 1)
{
av_log(VAR_0, AV_LOG_ERROR,
"Only 4:2:0 chroma format supported\n");
return -1;
}
}
else
#endif
{
v->res_sm = get_bits(VAR_1, 2);
if (v->res_sm)
{
av_log(VAR_0, AV_LOG_ERROR,
"Reserved RES_SM=%i is forbidden\n", v->res_sm);
}
}
v->frmrtq_postproc = get_bits(VAR_1, 3);
v->bitrtq_postproc = get_bits(VAR_1, 5);
v->s.loop_filter = get_bits(VAR_1, 1);
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->res_x8 = get_bits(VAR_1, 1);
if (v->res_x8)
{
av_log(VAR_0, AV_LOG_ERROR,
"1 for reserved RES_X8 is forbidden\n");
}
v->multires = get_bits(VAR_1, 1);
v->res_fasttx = get_bits(VAR_1, 1);
if (!v->res_fasttx)
{
av_log(VAR_0, AV_LOG_ERROR,
"0 for reserved RES_FASTTX is forbidden\n");
}
}
v->fastuvmc = get_bits(VAR_1, 1);
if (!v->profile && !v->fastuvmc)
{
av_log(VAR_0, AV_LOG_ERROR,
"FASTUVMC unavailable in Simple Profile\n");
return -1;
}
v->extended_mv = get_bits(VAR_1, 1);
if (!v->profile && v->extended_mv)
{
av_log(VAR_0, AV_LOG_ERROR,
"Extended MVs unavailable in Simple Profile\n");
return -1;
}
v->dquant = get_bits(VAR_1, 2);
v->vstransform = get_bits(VAR_1, 1);
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->res_transtab = get_bits(VAR_1, 1);
if (v->res_transtab)
{
av_log(VAR_0, AV_LOG_ERROR,
"1 for reserved RES_TRANSTAB is forbidden\n");
return -1;
}
}
v->overlap = get_bits(VAR_1, 1);
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->s.resync_marker = get_bits(VAR_1, 1);
v->rangered = get_bits(VAR_1, 1);
}
v->s.max_b_frames = VAR_0->max_b_frames = get_bits(VAR_1, 3);
v->quantizer_mode = get_bits(VAR_1, 2);
#if HAS_ADVANCED_PROFILE
if (v->profile <= PROFILE_MAIN)
#endif
{
v->finterpflag = get_bits(VAR_1, 1);
v->res_rtm_flag = get_bits(VAR_1, 1);
if (!v->res_rtm_flag)
{
av_log(VAR_0, AV_LOG_ERROR,
"0 for reserved RES_RTM_FLAG is forbidden\n");
}
#if TRACE
av_log(VAR_0, AV_LOG_INFO,
"Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
"LoopFilter=%i, MultiRes=%i, FastUVMV=%i, Extended MV=%i\n"
"Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
"DQuant=%i, Quantizer mode=%i, Max B frames=%i\n",
v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
v->dquant, v->quantizer_mode, VAR_0->max_b_frames
);
return 0;
#endif
}
#if HAS_ADVANCED_PROFILE
else return decode_advanced_sequence_header(VAR_0, VAR_1);
#endif
}
| [
"static int FUNC_0(AVCodecContext *VAR_0, GetBitContext *VAR_1)\n{",
"VC9Context *v = VAR_0->priv_data;",
"v->profile = get_bits(VAR_1, 2);",
"av_log(VAR_0, AV_LOG_DEBUG, \"Profile: %i\\n\", v->profile);",
"#if HAS_ADVANCED_PROFILE\nif (v->profile > PROFILE_MAIN)\n{",
"v->level = get_bits(VAR_1, 3);",
"v->chromaformat = get_bits(VAR_1, 2);",
"if (v->chromaformat != 1)\n{",
"av_log(VAR_0, AV_LOG_ERROR,\n\"Only 4:2:0 chroma format supported\\n\");",
"return -1;",
"}",
"}",
"else\n#endif\n{",
"v->res_sm = get_bits(VAR_1, 2);",
"if (v->res_sm)\n{",
"av_log(VAR_0, AV_LOG_ERROR,\n\"Reserved RES_SM=%i is forbidden\\n\", v->res_sm);",
"}",
"}",
"v->frmrtq_postproc = get_bits(VAR_1, 3);",
"v->bitrtq_postproc = get_bits(VAR_1, 5);",
"v->s.loop_filter = get_bits(VAR_1, 1);",
"#if HAS_ADVANCED_PROFILE\nif (v->profile <= PROFILE_MAIN)\n#endif\n{",
"v->res_x8 = get_bits(VAR_1, 1);",
"if (v->res_x8)\n{",
"av_log(VAR_0, AV_LOG_ERROR,\n\"1 for reserved RES_X8 is forbidden\\n\");",
"}",
"v->multires = get_bits(VAR_1, 1);",
"v->res_fasttx = get_bits(VAR_1, 1);",
"if (!v->res_fasttx)\n{",
"av_log(VAR_0, AV_LOG_ERROR,\n\"0 for reserved RES_FASTTX is forbidden\\n\");",
"}",
"}",
"v->fastuvmc = get_bits(VAR_1, 1);",
"if (!v->profile && !v->fastuvmc)\n{",
"av_log(VAR_0, AV_LOG_ERROR,\n\"FASTUVMC unavailable in Simple Profile\\n\");",
"return -1;",
"}",
"v->extended_mv = get_bits(VAR_1, 1);",
"if (!v->profile && v->extended_mv)\n{",
"av_log(VAR_0, AV_LOG_ERROR,\n\"Extended MVs unavailable in Simple Profile\\n\");",
"return -1;",
"}",
"v->dquant = get_bits(VAR_1, 2);",
"v->vstransform = get_bits(VAR_1, 1);",
"#if HAS_ADVANCED_PROFILE\nif (v->profile <= PROFILE_MAIN)\n#endif\n{",
"v->res_transtab = get_bits(VAR_1, 1);",
"if (v->res_transtab)\n{",
"av_log(VAR_0, AV_LOG_ERROR,\n\"1 for reserved RES_TRANSTAB is forbidden\\n\");",
"return -1;",
"}",
"}",
"v->overlap = get_bits(VAR_1, 1);",
"#if HAS_ADVANCED_PROFILE\nif (v->profile <= PROFILE_MAIN)\n#endif\n{",
"v->s.resync_marker = get_bits(VAR_1, 1);",
"v->rangered = get_bits(VAR_1, 1);",
"}",
"v->s.max_b_frames = VAR_0->max_b_frames = get_bits(VAR_1, 3);",
"v->quantizer_mode = get_bits(VAR_1, 2);",
"#if HAS_ADVANCED_PROFILE\nif (v->profile <= PROFILE_MAIN)\n#endif\n{",
"v->finterpflag = get_bits(VAR_1, 1);",
"v->res_rtm_flag = get_bits(VAR_1, 1);",
"if (!v->res_rtm_flag)\n{",
"av_log(VAR_0, AV_LOG_ERROR,\n\"0 for reserved RES_RTM_FLAG is forbidden\\n\");",
"}",
"#if TRACE\nav_log(VAR_0, AV_LOG_INFO,\n\"Profile %i:\\nfrmrtq_postproc=%i, bitrtq_postproc=%i\\n\"\n\"LoopFilter=%i, MultiRes=%i, FastUVMV=%i, Extended MV=%i\\n\"\n\"Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\\n\"\n\"DQuant=%i, Quantizer mode=%i, Max B frames=%i\\n\",\nv->profile, v->frmrtq_postproc, v->bitrtq_postproc,\nv->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,\nv->rangered, v->vstransform, v->overlap, v->s.resync_marker,\nv->dquant, v->quantizer_mode, VAR_0->max_b_frames\n);",
"return 0;",
"#endif\n}",
"#if HAS_ADVANCED_PROFILE\nelse return decode_advanced_sequence_header(VAR_0, VAR_1);",
"#endif\n}"
] | [
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
15,
17,
19
],
[
21
],
[
23
],
[
25,
27
],
[
29,
31
],
[
33
],
[
35
],
[
37
],
[
39,
41,
43
],
[
45
],
[
47,
49
],
[
51,
53
],
[
57
],
[
59
],
[
65
],
[
69
],
[
71
],
[
75,
77,
79,
81
],
[
83
],
[
85,
87
],
[
89,
91
],
[
95
],
[
97
],
[
99
],
[
101,
103
],
[
105,
107
],
[
111
],
[
113
],
[
117
],
[
119,
121
],
[
123,
125
],
[
127
],
[
129
],
[
131
],
[
133,
135
],
[
137,
139
],
[
141
],
[
143
],
[
145
],
[
147
],
[
151,
153,
155,
157
],
[
159
],
[
161,
163
],
[
165,
167
],
[
169
],
[
171
],
[
173
],
[
177
],
[
181,
183,
185,
187
],
[
189
],
[
191
],
[
193
],
[
197
],
[
199
],
[
203,
205,
207,
209
],
[
211
],
[
213
],
[
215,
217
],
[
219,
221
],
[
225
],
[
227,
229,
231,
233,
235,
237,
239,
241,
243,
245,
247
],
[
249
],
[
251,
253
],
[
255,
257
],
[
259,
261
]
] |
21,338 | static void rdma_accept_incoming_migration(void *opaque)
{
RDMAContext *rdma = opaque;
int ret;
QEMUFile *f;
Error *local_err = NULL, **errp = &local_err;
DPRINTF("Accepting rdma connection...\n");
ret = qemu_rdma_accept(rdma);
if (ret) {
ERROR(errp, "RDMA Migration initialization failed!");
return;
}
DPRINTF("Accepted migration\n");
f = qemu_fopen_rdma(rdma, "rb");
if (f == NULL) {
ERROR(errp, "could not qemu_fopen_rdma!");
qemu_rdma_cleanup(rdma);
return;
}
rdma->migration_started_on_destination = 1;
process_incoming_migration(f);
}
| true | qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 | static void rdma_accept_incoming_migration(void *opaque)
{
RDMAContext *rdma = opaque;
int ret;
QEMUFile *f;
Error *local_err = NULL, **errp = &local_err;
DPRINTF("Accepting rdma connection...\n");
ret = qemu_rdma_accept(rdma);
if (ret) {
ERROR(errp, "RDMA Migration initialization failed!");
return;
}
DPRINTF("Accepted migration\n");
f = qemu_fopen_rdma(rdma, "rb");
if (f == NULL) {
ERROR(errp, "could not qemu_fopen_rdma!");
qemu_rdma_cleanup(rdma);
return;
}
rdma->migration_started_on_destination = 1;
process_incoming_migration(f);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0)
{
RDMAContext *rdma = VAR_0;
int VAR_1;
QEMUFile *f;
Error *local_err = NULL, **errp = &local_err;
DPRINTF("Accepting rdma connection...\n");
VAR_1 = qemu_rdma_accept(rdma);
if (VAR_1) {
ERROR(errp, "RDMA Migration initialization failed!");
return;
}
DPRINTF("Accepted migration\n");
f = qemu_fopen_rdma(rdma, "rb");
if (f == NULL) {
ERROR(errp, "could not qemu_fopen_rdma!");
qemu_rdma_cleanup(rdma);
return;
}
rdma->migration_started_on_destination = 1;
process_incoming_migration(f);
}
| [
"static void FUNC_0(void *VAR_0)\n{",
"RDMAContext *rdma = VAR_0;",
"int VAR_1;",
"QEMUFile *f;",
"Error *local_err = NULL, **errp = &local_err;",
"DPRINTF(\"Accepting rdma connection...\\n\");",
"VAR_1 = qemu_rdma_accept(rdma);",
"if (VAR_1) {",
"ERROR(errp, \"RDMA Migration initialization failed!\");",
"return;",
"}",
"DPRINTF(\"Accepted migration\\n\");",
"f = qemu_fopen_rdma(rdma, \"rb\");",
"if (f == NULL) {",
"ERROR(errp, \"could not qemu_fopen_rdma!\");",
"qemu_rdma_cleanup(rdma);",
"return;",
"}",
"rdma->migration_started_on_destination = 1;",
"process_incoming_migration(f);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
31
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
49
],
[
51
],
[
53
]
] |
21,339 | static int extract_header(AVCodecContext *const avctx,
const AVPacket *const avpkt) {
const uint8_t *buf;
unsigned buf_size;
IffContext *s = avctx->priv_data;
int palette_size;
if (avctx->extradata_size < 2) {
av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
return AVERROR_INVALIDDATA;
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
if (avpkt) {
int image_size;
if (avpkt->size < 2)
return AVERROR_INVALIDDATA;
image_size = avpkt->size - AV_RB16(avpkt->data);
buf = avpkt->data;
buf_size = bytestream_get_be16(&buf);
if (buf_size <= 1 || image_size <= 1) {
av_log(avctx, AV_LOG_ERROR,
"Invalid image size received: %u -> image data offset: %d\n",
buf_size, image_size);
return AVERROR_INVALIDDATA;
} else {
buf = avctx->extradata;
buf_size = bytestream_get_be16(&buf);
if (buf_size <= 1 || palette_size < 0) {
av_log(avctx, AV_LOG_ERROR,
"Invalid palette size received: %u -> palette data offset: %d\n",
buf_size, palette_size);
return AVERROR_INVALIDDATA;
if (buf_size > 8) {
s->compression = bytestream_get_byte(&buf);
s->bpp = bytestream_get_byte(&buf);
s->ham = bytestream_get_byte(&buf);
s->flags = bytestream_get_byte(&buf);
s->transparency = bytestream_get_be16(&buf);
s->masking = bytestream_get_byte(&buf);
if (s->masking == MASK_HAS_MASK) {
if (s->bpp >= 8) {
avctx->pix_fmt = PIX_FMT_RGB32;
av_freep(&s->mask_palbuf);
s->mask_buf = av_malloc((s->planesize * 32) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->mask_buf)
s->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->mask_palbuf) {
s->bpp++;
} else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {
av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
return AVERROR_PATCHWELCOME;
if (!s->bpp || s->bpp > 32) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
return AVERROR_INVALIDDATA;
} else if (s->ham >= 8) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
return AVERROR_INVALIDDATA;
av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf);
if (s->ham) {
int i, count = FFMIN(palette_size / 3, 1 << s->ham);
int ham_count;
const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_buf)
ham_count = 8 * (1 << s->ham);
s->ham_palbuf = av_malloc((ham_count << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_palbuf) {
av_freep(&s->ham_buf);
if (count) { // HAM with color palette attached
// prefill with black and palette and set HAM take direct value mask to zero
memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
for (i=0; i < count; i++) {
s->ham_palbuf[i*2+1] = 0xFF000000 | AV_RL24(palette + i*3);
count = 1 << s->ham;
} else { // HAM with grayscale color palette
count = 1 << s->ham;
for (i=0; i < count; i++) {
s->ham_palbuf[i*2] = 0xFF000000; // take direct color value from palette
s->ham_palbuf[i*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((i * 255) >> s->ham));
for (i=0; i < count; i++) {
uint32_t tmp = i << (8 - s->ham);
tmp |= tmp >> s->ham;
s->ham_palbuf[(i+count)*2] = 0xFF00FFFF; // just modify blue color component
s->ham_palbuf[(i+count*2)*2] = 0xFFFFFF00; // just modify red color component
s->ham_palbuf[(i+count*3)*2] = 0xFFFF00FF; // just modify green color component
s->ham_palbuf[(i+count)*2+1] = 0xFF000000 | tmp << 16;
s->ham_palbuf[(i+count*2)*2+1] = 0xFF000000 | tmp;
s->ham_palbuf[(i+count*3)*2+1] = 0xFF000000 | tmp << 8;
if (s->masking == MASK_HAS_MASK) {
for (i = 0; i < ham_count; i++)
s->ham_palbuf[(1 << s->bpp) + i] = s->ham_palbuf[i] | 0xFF000000;
return 0;
| true | FFmpeg | 0e1925ddc4bb1499fcfc6a1a3990115f8d30c243 | static int extract_header(AVCodecContext *const avctx,
const AVPacket *const avpkt) {
const uint8_t *buf;
unsigned buf_size;
IffContext *s = avctx->priv_data;
int palette_size;
if (avctx->extradata_size < 2) {
av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
return AVERROR_INVALIDDATA;
palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
if (avpkt) {
int image_size;
if (avpkt->size < 2)
return AVERROR_INVALIDDATA;
image_size = avpkt->size - AV_RB16(avpkt->data);
buf = avpkt->data;
buf_size = bytestream_get_be16(&buf);
if (buf_size <= 1 || image_size <= 1) {
av_log(avctx, AV_LOG_ERROR,
"Invalid image size received: %u -> image data offset: %d\n",
buf_size, image_size);
return AVERROR_INVALIDDATA;
} else {
buf = avctx->extradata;
buf_size = bytestream_get_be16(&buf);
if (buf_size <= 1 || palette_size < 0) {
av_log(avctx, AV_LOG_ERROR,
"Invalid palette size received: %u -> palette data offset: %d\n",
buf_size, palette_size);
return AVERROR_INVALIDDATA;
if (buf_size > 8) {
s->compression = bytestream_get_byte(&buf);
s->bpp = bytestream_get_byte(&buf);
s->ham = bytestream_get_byte(&buf);
s->flags = bytestream_get_byte(&buf);
s->transparency = bytestream_get_be16(&buf);
s->masking = bytestream_get_byte(&buf);
if (s->masking == MASK_HAS_MASK) {
if (s->bpp >= 8) {
avctx->pix_fmt = PIX_FMT_RGB32;
av_freep(&s->mask_palbuf);
s->mask_buf = av_malloc((s->planesize * 32) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->mask_buf)
s->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->mask_palbuf) {
s->bpp++;
} else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {
av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
return AVERROR_PATCHWELCOME;
if (!s->bpp || s->bpp > 32) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
return AVERROR_INVALIDDATA;
} else if (s->ham >= 8) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
return AVERROR_INVALIDDATA;
av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf);
if (s->ham) {
int i, count = FFMIN(palette_size / 3, 1 << s->ham);
int ham_count;
const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_buf)
ham_count = 8 * (1 << s->ham);
s->ham_palbuf = av_malloc((ham_count << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_palbuf) {
av_freep(&s->ham_buf);
if (count) {
memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
for (i=0; i < count; i++) {
s->ham_palbuf[i*2+1] = 0xFF000000 | AV_RL24(palette + i*3);
count = 1 << s->ham;
} else {
count = 1 << s->ham;
for (i=0; i < count; i++) {
s->ham_palbuf[i*2] = 0xFF000000;
s->ham_palbuf[i*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((i * 255) >> s->ham));
for (i=0; i < count; i++) {
uint32_t tmp = i << (8 - s->ham);
tmp |= tmp >> s->ham;
s->ham_palbuf[(i+count)*2] = 0xFF00FFFF;
s->ham_palbuf[(i+count*2)*2] = 0xFFFFFF00;
s->ham_palbuf[(i+count*3)*2] = 0xFFFF00FF;
s->ham_palbuf[(i+count)*2+1] = 0xFF000000 | tmp << 16;
s->ham_palbuf[(i+count*2)*2+1] = 0xFF000000 | tmp;
s->ham_palbuf[(i+count*3)*2+1] = 0xFF000000 | tmp << 8;
if (s->masking == MASK_HAS_MASK) {
for (i = 0; i < ham_count; i++)
s->ham_palbuf[(1 << s->bpp) + i] = s->ham_palbuf[i] | 0xFF000000;
return 0;
| {
"code": [],
"line_no": []
} | static int FUNC_0(AVCodecContext *const VAR_0,
const AVPacket *const VAR_1) {
const uint8_t *VAR_2;
unsigned VAR_3;
IffContext *s = VAR_0->priv_data;
int VAR_4;
if (VAR_0->extradata_size < 2) {
av_log(VAR_0, AV_LOG_ERROR, "not enough extradata\n");
return AVERROR_INVALIDDATA;
VAR_4 = VAR_0->extradata_size - AV_RB16(VAR_0->extradata);
if (VAR_1) {
int VAR_5;
if (VAR_1->size < 2)
return AVERROR_INVALIDDATA;
VAR_5 = VAR_1->size - AV_RB16(VAR_1->data);
VAR_2 = VAR_1->data;
VAR_3 = bytestream_get_be16(&VAR_2);
if (VAR_3 <= 1 || VAR_5 <= 1) {
av_log(VAR_0, AV_LOG_ERROR,
"Invalid image size received: %u -> image data offset: %d\n",
VAR_3, VAR_5);
return AVERROR_INVALIDDATA;
} else {
VAR_2 = VAR_0->extradata;
VAR_3 = bytestream_get_be16(&VAR_2);
if (VAR_3 <= 1 || VAR_4 < 0) {
av_log(VAR_0, AV_LOG_ERROR,
"Invalid VAR_9 size received: %u -> VAR_9 data offset: %d\n",
VAR_3, VAR_4);
return AVERROR_INVALIDDATA;
if (VAR_3 > 8) {
s->compression = bytestream_get_byte(&VAR_2);
s->bpp = bytestream_get_byte(&VAR_2);
s->ham = bytestream_get_byte(&VAR_2);
s->flags = bytestream_get_byte(&VAR_2);
s->transparency = bytestream_get_be16(&VAR_2);
s->masking = bytestream_get_byte(&VAR_2);
if (s->masking == MASK_HAS_MASK) {
if (s->bpp >= 8) {
VAR_0->pix_fmt = PIX_FMT_RGB32;
av_freep(&s->mask_palbuf);
s->mask_buf = av_malloc((s->planesize * 32) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->mask_buf)
s->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->mask_palbuf) {
s->bpp++;
} else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {
av_log(VAR_0, AV_LOG_ERROR, "Masking not supported\n");
return AVERROR_PATCHWELCOME;
if (!s->bpp || s->bpp > 32) {
av_log(VAR_0, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
return AVERROR_INVALIDDATA;
} else if (s->ham >= 8) {
av_log(VAR_0, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
return AVERROR_INVALIDDATA;
av_freep(&s->ham_buf);
av_freep(&s->ham_palbuf);
if (s->ham) {
int VAR_6, VAR_7 = FFMIN(VAR_4 / 3, 1 << s->ham);
int VAR_8;
const uint8_t *const VAR_9 = VAR_0->extradata + AV_RB16(VAR_0->extradata);
s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_buf)
VAR_8 = 8 * (1 << s->ham);
s->ham_palbuf = av_malloc((VAR_8 << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
if (!s->ham_palbuf) {
av_freep(&s->ham_buf);
if (VAR_7) {
memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
for (VAR_6=0; VAR_6 < VAR_7; VAR_6++) {
s->ham_palbuf[VAR_6*2+1] = 0xFF000000 | AV_RL24(VAR_9 + VAR_6*3);
VAR_7 = 1 << s->ham;
} else {
VAR_7 = 1 << s->ham;
for (VAR_6=0; VAR_6 < VAR_7; VAR_6++) {
s->ham_palbuf[VAR_6*2] = 0xFF000000;
s->ham_palbuf[VAR_6*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((VAR_6 * 255) >> s->ham));
for (VAR_6=0; VAR_6 < VAR_7; VAR_6++) {
uint32_t tmp = VAR_6 << (8 - s->ham);
tmp |= tmp >> s->ham;
s->ham_palbuf[(VAR_6+VAR_7)*2] = 0xFF00FFFF;
s->ham_palbuf[(VAR_6+VAR_7*2)*2] = 0xFFFFFF00;
s->ham_palbuf[(VAR_6+VAR_7*3)*2] = 0xFFFF00FF;
s->ham_palbuf[(VAR_6+VAR_7)*2+1] = 0xFF000000 | tmp << 16;
s->ham_palbuf[(VAR_6+VAR_7*2)*2+1] = 0xFF000000 | tmp;
s->ham_palbuf[(VAR_6+VAR_7*3)*2+1] = 0xFF000000 | tmp << 8;
if (s->masking == MASK_HAS_MASK) {
for (VAR_6 = 0; VAR_6 < VAR_8; VAR_6++)
s->ham_palbuf[(1 << s->bpp) + VAR_6] = s->ham_palbuf[VAR_6] | 0xFF000000;
return 0;
| [
"static int FUNC_0(AVCodecContext *const VAR_0,\nconst AVPacket *const VAR_1) {",
"const uint8_t *VAR_2;",
"unsigned VAR_3;",
"IffContext *s = VAR_0->priv_data;",
"int VAR_4;",
"if (VAR_0->extradata_size < 2) {",
"av_log(VAR_0, AV_LOG_ERROR, \"not enough extradata\\n\");",
"return AVERROR_INVALIDDATA;",
"VAR_4 = VAR_0->extradata_size - AV_RB16(VAR_0->extradata);",
"if (VAR_1) {",
"int VAR_5;",
"if (VAR_1->size < 2)\nreturn AVERROR_INVALIDDATA;",
"VAR_5 = VAR_1->size - AV_RB16(VAR_1->data);",
"VAR_2 = VAR_1->data;",
"VAR_3 = bytestream_get_be16(&VAR_2);",
"if (VAR_3 <= 1 || VAR_5 <= 1) {",
"av_log(VAR_0, AV_LOG_ERROR,\n\"Invalid image size received: %u -> image data offset: %d\\n\",\nVAR_3, VAR_5);",
"return AVERROR_INVALIDDATA;",
"} else {",
"VAR_2 = VAR_0->extradata;",
"VAR_3 = bytestream_get_be16(&VAR_2);",
"if (VAR_3 <= 1 || VAR_4 < 0) {",
"av_log(VAR_0, AV_LOG_ERROR,\n\"Invalid VAR_9 size received: %u -> VAR_9 data offset: %d\\n\",\nVAR_3, VAR_4);",
"return AVERROR_INVALIDDATA;",
"if (VAR_3 > 8) {",
"s->compression = bytestream_get_byte(&VAR_2);",
"s->bpp = bytestream_get_byte(&VAR_2);",
"s->ham = bytestream_get_byte(&VAR_2);",
"s->flags = bytestream_get_byte(&VAR_2);",
"s->transparency = bytestream_get_be16(&VAR_2);",
"s->masking = bytestream_get_byte(&VAR_2);",
"if (s->masking == MASK_HAS_MASK) {",
"if (s->bpp >= 8) {",
"VAR_0->pix_fmt = PIX_FMT_RGB32;",
"av_freep(&s->mask_palbuf);",
"s->mask_buf = av_malloc((s->planesize * 32) + FF_INPUT_BUFFER_PADDING_SIZE);",
"if (!s->mask_buf)\ns->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);",
"if (!s->mask_palbuf) {",
"s->bpp++;",
"} else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {",
"av_log(VAR_0, AV_LOG_ERROR, \"Masking not supported\\n\");",
"return AVERROR_PATCHWELCOME;",
"if (!s->bpp || s->bpp > 32) {",
"av_log(VAR_0, AV_LOG_ERROR, \"Invalid number of bitplanes: %u\\n\", s->bpp);",
"return AVERROR_INVALIDDATA;",
"} else if (s->ham >= 8) {",
"av_log(VAR_0, AV_LOG_ERROR, \"Invalid number of hold bits for HAM: %u\\n\", s->ham);",
"return AVERROR_INVALIDDATA;",
"av_freep(&s->ham_buf);",
"av_freep(&s->ham_palbuf);",
"if (s->ham) {",
"int VAR_6, VAR_7 = FFMIN(VAR_4 / 3, 1 << s->ham);",
"int VAR_8;",
"const uint8_t *const VAR_9 = VAR_0->extradata + AV_RB16(VAR_0->extradata);",
"s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);",
"if (!s->ham_buf)\nVAR_8 = 8 * (1 << s->ham);",
"s->ham_palbuf = av_malloc((VAR_8 << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);",
"if (!s->ham_palbuf) {",
"av_freep(&s->ham_buf);",
"if (VAR_7) {",
"memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));",
"for (VAR_6=0; VAR_6 < VAR_7; VAR_6++) {",
"s->ham_palbuf[VAR_6*2+1] = 0xFF000000 | AV_RL24(VAR_9 + VAR_6*3);",
"VAR_7 = 1 << s->ham;",
"} else {",
"VAR_7 = 1 << s->ham;",
"for (VAR_6=0; VAR_6 < VAR_7; VAR_6++) {",
"s->ham_palbuf[VAR_6*2] = 0xFF000000;",
"s->ham_palbuf[VAR_6*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((VAR_6 * 255) >> s->ham));",
"for (VAR_6=0; VAR_6 < VAR_7; VAR_6++) {",
"uint32_t tmp = VAR_6 << (8 - s->ham);",
"tmp |= tmp >> s->ham;",
"s->ham_palbuf[(VAR_6+VAR_7)*2] = 0xFF00FFFF;",
"s->ham_palbuf[(VAR_6+VAR_7*2)*2] = 0xFFFFFF00;",
"s->ham_palbuf[(VAR_6+VAR_7*3)*2] = 0xFFFF00FF;",
"s->ham_palbuf[(VAR_6+VAR_7)*2+1] = 0xFF000000 | tmp << 16;",
"s->ham_palbuf[(VAR_6+VAR_7*2)*2+1] = 0xFF000000 | tmp;",
"s->ham_palbuf[(VAR_6+VAR_7*3)*2+1] = 0xFF000000 | tmp << 8;",
"if (s->masking == MASK_HAS_MASK) {",
"for (VAR_6 = 0; VAR_6 < VAR_8; VAR_6++)",
"s->ham_palbuf[(1 << s->bpp) + VAR_6] = s->ham_palbuf[VAR_6] | 0xFF000000;",
"return 0;"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
22
],
[
26
],
[
28
],
[
30,
32
],
[
34
],
[
36
],
[
38
],
[
40
],
[
42,
44,
46
],
[
48
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59,
61,
63
],
[
65
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83
],
[
85
],
[
87
],
[
89
],
[
92
],
[
94
],
[
96,
104
],
[
106
],
[
112
],
[
114
],
[
116
],
[
118
],
[
121
],
[
123
],
[
125
],
[
127
],
[
129
],
[
131
],
[
136
],
[
138
],
[
142
],
[
144
],
[
146
],
[
148
],
[
152
],
[
154,
159
],
[
161
],
[
163
],
[
165
],
[
171
],
[
175
],
[
177
],
[
179
],
[
182
],
[
184
],
[
186
],
[
188
],
[
190
],
[
192
],
[
196
],
[
198
],
[
200
],
[
202
],
[
204
],
[
206
],
[
208
],
[
210
],
[
212
],
[
215
],
[
217
],
[
219
],
[
226
]
] |
21,340 | static void handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSerial *vser;
VirtIOSerialPort *port;
VirtIOSerialPortInfo *info;
vser = DO_UPCAST(VirtIOSerial, vdev, vdev);
port = find_port_by_vq(vser, vq);
info = port ? DO_UPCAST(VirtIOSerialPortInfo, qdev, port->dev.info) : NULL;
if (!port || !port->host_connected || !info->have_data) {
discard_vq_data(vq, vdev);
return;
}
if (!port->throttled) {
do_flush_queued_data(port, vq, vdev);
return;
}
}
| false | qemu | 03ecd2c80a64d030a22fe67cc7a60f24e17ff211 | static void handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSerial *vser;
VirtIOSerialPort *port;
VirtIOSerialPortInfo *info;
vser = DO_UPCAST(VirtIOSerial, vdev, vdev);
port = find_port_by_vq(vser, vq);
info = port ? DO_UPCAST(VirtIOSerialPortInfo, qdev, port->dev.info) : NULL;
if (!port || !port->host_connected || !info->have_data) {
discard_vq_data(vq, vdev);
return;
}
if (!port->throttled) {
do_flush_queued_data(port, vq, vdev);
return;
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(VirtIODevice *VAR_0, VirtQueue *VAR_1)
{
VirtIOSerial *vser;
VirtIOSerialPort *port;
VirtIOSerialPortInfo *info;
vser = DO_UPCAST(VirtIOSerial, VAR_0, VAR_0);
port = find_port_by_vq(vser, VAR_1);
info = port ? DO_UPCAST(VirtIOSerialPortInfo, qdev, port->dev.info) : NULL;
if (!port || !port->host_connected || !info->have_data) {
discard_vq_data(VAR_1, VAR_0);
return;
}
if (!port->throttled) {
do_flush_queued_data(port, VAR_1, VAR_0);
return;
}
}
| [
"static void FUNC_0(VirtIODevice *VAR_0, VirtQueue *VAR_1)\n{",
"VirtIOSerial *vser;",
"VirtIOSerialPort *port;",
"VirtIOSerialPortInfo *info;",
"vser = DO_UPCAST(VirtIOSerial, VAR_0, VAR_0);",
"port = find_port_by_vq(vser, VAR_1);",
"info = port ? DO_UPCAST(VirtIOSerialPortInfo, qdev, port->dev.info) : NULL;",
"if (!port || !port->host_connected || !info->have_data) {",
"discard_vq_data(VAR_1, VAR_0);",
"return;",
"}",
"if (!port->throttled) {",
"do_flush_queued_data(port, VAR_1, VAR_0);",
"return;",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
]
] |
21,341 | static abi_long do_getsockname(int fd, abi_ulong target_addr,
abi_ulong target_addrlen_addr)
{
socklen_t addrlen;
void *addr;
abi_long ret;
if (target_addr == 0)
return get_errno(accept(fd, NULL, NULL));
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EFAULT;
if (addrlen < 0)
return -TARGET_EINVAL;
addr = alloca(addrlen);
ret = get_errno(getsockname(fd, addr, &addrlen));
if (!is_error(ret)) {
host_to_target_sockaddr(target_addr, addr, addrlen);
if (put_user_u32(addrlen, target_addrlen_addr))
ret = -TARGET_EFAULT;
}
return ret;
}
| false | qemu | 917507b01efea8017bfcb4188ac696612e363e72 | static abi_long do_getsockname(int fd, abi_ulong target_addr,
abi_ulong target_addrlen_addr)
{
socklen_t addrlen;
void *addr;
abi_long ret;
if (target_addr == 0)
return get_errno(accept(fd, NULL, NULL));
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EFAULT;
if (addrlen < 0)
return -TARGET_EINVAL;
addr = alloca(addrlen);
ret = get_errno(getsockname(fd, addr, &addrlen));
if (!is_error(ret)) {
host_to_target_sockaddr(target_addr, addr, addrlen);
if (put_user_u32(addrlen, target_addrlen_addr))
ret = -TARGET_EFAULT;
}
return ret;
}
| {
"code": [],
"line_no": []
} | static abi_long FUNC_0(int fd, abi_ulong target_addr,
abi_ulong target_addrlen_addr)
{
socklen_t addrlen;
void *VAR_0;
abi_long ret;
if (target_addr == 0)
return get_errno(accept(fd, NULL, NULL));
if (get_user_u32(addrlen, target_addrlen_addr))
return -TARGET_EFAULT;
if (addrlen < 0)
return -TARGET_EINVAL;
VAR_0 = alloca(addrlen);
ret = get_errno(getsockname(fd, VAR_0, &addrlen));
if (!is_error(ret)) {
host_to_target_sockaddr(target_addr, VAR_0, addrlen);
if (put_user_u32(addrlen, target_addrlen_addr))
ret = -TARGET_EFAULT;
}
return ret;
}
| [
"static abi_long FUNC_0(int fd, abi_ulong target_addr,\nabi_ulong target_addrlen_addr)\n{",
"socklen_t addrlen;",
"void *VAR_0;",
"abi_long ret;",
"if (target_addr == 0)\nreturn get_errno(accept(fd, NULL, NULL));",
"if (get_user_u32(addrlen, target_addrlen_addr))\nreturn -TARGET_EFAULT;",
"if (addrlen < 0)\nreturn -TARGET_EINVAL;",
"VAR_0 = alloca(addrlen);",
"ret = get_errno(getsockname(fd, VAR_0, &addrlen));",
"if (!is_error(ret)) {",
"host_to_target_sockaddr(target_addr, VAR_0, addrlen);",
"if (put_user_u32(addrlen, target_addrlen_addr))\nret = -TARGET_EFAULT;",
"}",
"return ret;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
15,
17
],
[
21,
23
],
[
27,
29
],
[
33
],
[
37
],
[
39
],
[
41
],
[
43,
45
],
[
47
],
[
49
],
[
51
]
] |
21,342 | static int pc_rec_cmp(const void *p1, const void *p2)
{
PCRecord *r1 = *(PCRecord **)p1;
PCRecord *r2 = *(PCRecord **)p2;
if (r1->count < r2->count)
return 1;
else if (r1->count == r2->count)
return 0;
else
return -1;
}
| false | qemu | 4a1418e07bdcfaa3177739e04707ecaec75d89e1 | static int pc_rec_cmp(const void *p1, const void *p2)
{
PCRecord *r1 = *(PCRecord **)p1;
PCRecord *r2 = *(PCRecord **)p2;
if (r1->count < r2->count)
return 1;
else if (r1->count == r2->count)
return 0;
else
return -1;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(const void *VAR_0, const void *VAR_1)
{
PCRecord *r1 = *(PCRecord **)VAR_0;
PCRecord *r2 = *(PCRecord **)VAR_1;
if (r1->count < r2->count)
return 1;
else if (r1->count == r2->count)
return 0;
else
return -1;
}
| [
"static int FUNC_0(const void *VAR_0, const void *VAR_1)\n{",
"PCRecord *r1 = *(PCRecord **)VAR_0;",
"PCRecord *r2 = *(PCRecord **)VAR_1;",
"if (r1->count < r2->count)\nreturn 1;",
"else if (r1->count == r2->count)\nreturn 0;",
"else\nreturn -1;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9,
11
],
[
13,
15
],
[
17,
19
],
[
21
]
] |
21,343 | static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
bool want_zero,
int64_t sector_num,
int nb_sectors, int *pnum,
BlockDriverState **file)
{
int64_t total_sectors;
int64_t n;
int64_t ret, ret2;
BlockDriverState *local_file = NULL;
assert(pnum);
*pnum = 0;
total_sectors = bdrv_nb_sectors(bs);
if (total_sectors < 0) {
ret = total_sectors;
goto early_out;
}
if (sector_num >= total_sectors) {
ret = BDRV_BLOCK_EOF;
goto early_out;
}
if (!nb_sectors) {
ret = 0;
goto early_out;
}
n = total_sectors - sector_num;
if (n < nb_sectors) {
nb_sectors = n;
}
if (!bs->drv->bdrv_co_get_block_status) {
*pnum = nb_sectors;
ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
if (sector_num + nb_sectors == total_sectors) {
ret |= BDRV_BLOCK_EOF;
}
if (bs->drv->protocol_name) {
ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
local_file = bs;
}
goto early_out;
}
bdrv_inc_in_flight(bs);
ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
&local_file);
if (ret < 0) {
*pnum = 0;
goto out;
}
if (ret & BDRV_BLOCK_RAW) {
assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
ret = bdrv_co_get_block_status(local_file, want_zero,
ret >> BDRV_SECTOR_BITS,
*pnum, pnum, &local_file);
goto out;
}
if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
ret |= BDRV_BLOCK_ALLOCATED;
} else if (want_zero) {
if (bdrv_unallocated_blocks_are_zero(bs)) {
ret |= BDRV_BLOCK_ZERO;
} else if (bs->backing) {
BlockDriverState *bs2 = bs->backing->bs;
int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
ret |= BDRV_BLOCK_ZERO;
}
}
}
if (want_zero && local_file && local_file != bs &&
(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
(ret & BDRV_BLOCK_OFFSET_VALID)) {
int file_pnum;
ret2 = bdrv_co_get_block_status(local_file, want_zero,
ret >> BDRV_SECTOR_BITS,
*pnum, &file_pnum, NULL);
if (ret2 >= 0) {
/* Ignore errors. This is just providing extra information, it
* is useful but not necessary.
*/
if (ret2 & BDRV_BLOCK_EOF &&
(!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
/*
* It is valid for the format block driver to read
* beyond the end of the underlying file's current
* size; such areas read as zero.
*/
ret |= BDRV_BLOCK_ZERO;
} else {
/* Limit request to the range reported by the protocol driver */
*pnum = file_pnum;
ret |= (ret2 & BDRV_BLOCK_ZERO);
}
}
}
out:
bdrv_dec_in_flight(bs);
if (ret >= 0 && sector_num + *pnum == total_sectors) {
ret |= BDRV_BLOCK_EOF;
}
early_out:
if (file) {
*file = local_file;
}
return ret;
}
| false | qemu | 2e8bc7874bb674b7d6837706b1249bf871941637 | static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
bool want_zero,
int64_t sector_num,
int nb_sectors, int *pnum,
BlockDriverState **file)
{
int64_t total_sectors;
int64_t n;
int64_t ret, ret2;
BlockDriverState *local_file = NULL;
assert(pnum);
*pnum = 0;
total_sectors = bdrv_nb_sectors(bs);
if (total_sectors < 0) {
ret = total_sectors;
goto early_out;
}
if (sector_num >= total_sectors) {
ret = BDRV_BLOCK_EOF;
goto early_out;
}
if (!nb_sectors) {
ret = 0;
goto early_out;
}
n = total_sectors - sector_num;
if (n < nb_sectors) {
nb_sectors = n;
}
if (!bs->drv->bdrv_co_get_block_status) {
*pnum = nb_sectors;
ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
if (sector_num + nb_sectors == total_sectors) {
ret |= BDRV_BLOCK_EOF;
}
if (bs->drv->protocol_name) {
ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
local_file = bs;
}
goto early_out;
}
bdrv_inc_in_flight(bs);
ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
&local_file);
if (ret < 0) {
*pnum = 0;
goto out;
}
if (ret & BDRV_BLOCK_RAW) {
assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
ret = bdrv_co_get_block_status(local_file, want_zero,
ret >> BDRV_SECTOR_BITS,
*pnum, pnum, &local_file);
goto out;
}
if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
ret |= BDRV_BLOCK_ALLOCATED;
} else if (want_zero) {
if (bdrv_unallocated_blocks_are_zero(bs)) {
ret |= BDRV_BLOCK_ZERO;
} else if (bs->backing) {
BlockDriverState *bs2 = bs->backing->bs;
int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
ret |= BDRV_BLOCK_ZERO;
}
}
}
if (want_zero && local_file && local_file != bs &&
(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
(ret & BDRV_BLOCK_OFFSET_VALID)) {
int file_pnum;
ret2 = bdrv_co_get_block_status(local_file, want_zero,
ret >> BDRV_SECTOR_BITS,
*pnum, &file_pnum, NULL);
if (ret2 >= 0) {
if (ret2 & BDRV_BLOCK_EOF &&
(!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
ret |= BDRV_BLOCK_ZERO;
} else {
*pnum = file_pnum;
ret |= (ret2 & BDRV_BLOCK_ZERO);
}
}
}
out:
bdrv_dec_in_flight(bs);
if (ret >= 0 && sector_num + *pnum == total_sectors) {
ret |= BDRV_BLOCK_EOF;
}
early_out:
if (file) {
*file = local_file;
}
return ret;
}
| {
"code": [],
"line_no": []
} | static int64_t VAR_0 bdrv_co_get_block_status(BlockDriverState *bs,
bool want_zero,
int64_t sector_num,
int nb_sectors, int *pnum,
BlockDriverState **file)
{
int64_t total_sectors;
int64_t n;
int64_t ret, ret2;
BlockDriverState *local_file = NULL;
assert(pnum);
*pnum = 0;
total_sectors = bdrv_nb_sectors(bs);
if (total_sectors < 0) {
ret = total_sectors;
goto early_out;
}
if (sector_num >= total_sectors) {
ret = BDRV_BLOCK_EOF;
goto early_out;
}
if (!nb_sectors) {
ret = 0;
goto early_out;
}
n = total_sectors - sector_num;
if (n < nb_sectors) {
nb_sectors = n;
}
if (!bs->drv->bdrv_co_get_block_status) {
*pnum = nb_sectors;
ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
if (sector_num + nb_sectors == total_sectors) {
ret |= BDRV_BLOCK_EOF;
}
if (bs->drv->protocol_name) {
ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
local_file = bs;
}
goto early_out;
}
bdrv_inc_in_flight(bs);
ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
&local_file);
if (ret < 0) {
*pnum = 0;
goto out;
}
if (ret & BDRV_BLOCK_RAW) {
assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
ret = bdrv_co_get_block_status(local_file, want_zero,
ret >> BDRV_SECTOR_BITS,
*pnum, pnum, &local_file);
goto out;
}
if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
ret |= BDRV_BLOCK_ALLOCATED;
} else if (want_zero) {
if (bdrv_unallocated_blocks_are_zero(bs)) {
ret |= BDRV_BLOCK_ZERO;
} else if (bs->backing) {
BlockDriverState *bs2 = bs->backing->bs;
int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
ret |= BDRV_BLOCK_ZERO;
}
}
}
if (want_zero && local_file && local_file != bs &&
(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
(ret & BDRV_BLOCK_OFFSET_VALID)) {
int file_pnum;
ret2 = bdrv_co_get_block_status(local_file, want_zero,
ret >> BDRV_SECTOR_BITS,
*pnum, &file_pnum, NULL);
if (ret2 >= 0) {
if (ret2 & BDRV_BLOCK_EOF &&
(!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
ret |= BDRV_BLOCK_ZERO;
} else {
*pnum = file_pnum;
ret |= (ret2 & BDRV_BLOCK_ZERO);
}
}
}
out:
bdrv_dec_in_flight(bs);
if (ret >= 0 && sector_num + *pnum == total_sectors) {
ret |= BDRV_BLOCK_EOF;
}
early_out:
if (file) {
*file = local_file;
}
return ret;
}
| [
"static int64_t VAR_0 bdrv_co_get_block_status(BlockDriverState *bs,\nbool want_zero,\nint64_t sector_num,\nint nb_sectors, int *pnum,\nBlockDriverState **file)\n{",
"int64_t total_sectors;",
"int64_t n;",
"int64_t ret, ret2;",
"BlockDriverState *local_file = NULL;",
"assert(pnum);",
"*pnum = 0;",
"total_sectors = bdrv_nb_sectors(bs);",
"if (total_sectors < 0) {",
"ret = total_sectors;",
"goto early_out;",
"}",
"if (sector_num >= total_sectors) {",
"ret = BDRV_BLOCK_EOF;",
"goto early_out;",
"}",
"if (!nb_sectors) {",
"ret = 0;",
"goto early_out;",
"}",
"n = total_sectors - sector_num;",
"if (n < nb_sectors) {",
"nb_sectors = n;",
"}",
"if (!bs->drv->bdrv_co_get_block_status) {",
"*pnum = nb_sectors;",
"ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;",
"if (sector_num + nb_sectors == total_sectors) {",
"ret |= BDRV_BLOCK_EOF;",
"}",
"if (bs->drv->protocol_name) {",
"ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);",
"local_file = bs;",
"}",
"goto early_out;",
"}",
"bdrv_inc_in_flight(bs);",
"ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,\n&local_file);",
"if (ret < 0) {",
"*pnum = 0;",
"goto out;",
"}",
"if (ret & BDRV_BLOCK_RAW) {",
"assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);",
"ret = bdrv_co_get_block_status(local_file, want_zero,\nret >> BDRV_SECTOR_BITS,\n*pnum, pnum, &local_file);",
"goto out;",
"}",
"if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {",
"ret |= BDRV_BLOCK_ALLOCATED;",
"} else if (want_zero) {",
"if (bdrv_unallocated_blocks_are_zero(bs)) {",
"ret |= BDRV_BLOCK_ZERO;",
"} else if (bs->backing) {",
"BlockDriverState *bs2 = bs->backing->bs;",
"int64_t nb_sectors2 = bdrv_nb_sectors(bs2);",
"if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {",
"ret |= BDRV_BLOCK_ZERO;",
"}",
"}",
"}",
"if (want_zero && local_file && local_file != bs &&\n(ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&\n(ret & BDRV_BLOCK_OFFSET_VALID)) {",
"int file_pnum;",
"ret2 = bdrv_co_get_block_status(local_file, want_zero,\nret >> BDRV_SECTOR_BITS,\n*pnum, &file_pnum, NULL);",
"if (ret2 >= 0) {",
"if (ret2 & BDRV_BLOCK_EOF &&\n(!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {",
"ret |= BDRV_BLOCK_ZERO;",
"} else {",
"*pnum = file_pnum;",
"ret |= (ret2 & BDRV_BLOCK_ZERO);",
"}",
"}",
"}",
"out:\nbdrv_dec_in_flight(bs);",
"if (ret >= 0 && sector_num + *pnum == total_sectors) {",
"ret |= BDRV_BLOCK_EOF;",
"}",
"early_out:\nif (file) {",
"*file = local_file;",
"}",
"return ret;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7,
9,
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[
57
],
[
59
],
[
61
],
[
63
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83
],
[
85
],
[
87
],
[
89
],
[
93
],
[
95,
97
],
[
99
],
[
101
],
[
103
],
[
105
],
[
109
],
[
111
],
[
113,
115,
117
],
[
119
],
[
121
],
[
125
],
[
127
],
[
129
],
[
131
],
[
133
],
[
135
],
[
137
],
[
139
],
[
143
],
[
145
],
[
147
],
[
149
],
[
151
],
[
155,
157,
159
],
[
161
],
[
165,
167,
169
],
[
171
],
[
179,
181
],
[
193
],
[
195
],
[
199
],
[
201
],
[
203
],
[
205
],
[
207
],
[
211,
213
],
[
215
],
[
217
],
[
219
],
[
221,
223
],
[
225
],
[
227
],
[
229
],
[
231
]
] |
21,344 | static void usbredir_bulk_packet(void *priv, uint32_t id,
struct usb_redir_bulk_packet_header *bulk_packet,
uint8_t *data, int data_len)
{
USBRedirDevice *dev = priv;
uint8_t ep = bulk_packet->endpoint;
int len = bulk_packet->length;
AsyncURB *aurb;
DPRINTF("bulk-in status %d ep %02X len %d id %u\n", bulk_packet->status,
ep, len, id);
aurb = async_find(dev, id);
if (!aurb) {
free(data);
return;
}
if (aurb->bulk_packet.endpoint != bulk_packet->endpoint ||
aurb->bulk_packet.stream_id != bulk_packet->stream_id) {
ERROR("return bulk packet mismatch, please report this!\n");
len = USB_RET_NAK;
}
if (aurb->packet) {
len = usbredir_handle_status(dev, bulk_packet->status, len);
if (len > 0) {
usbredir_log_data(dev, "bulk data in:", data, data_len);
if (data_len <= aurb->packet->iov.size) {
usb_packet_copy(aurb->packet, data, data_len);
} else {
ERROR("bulk buffer too small (%d > %zd)\n", data_len,
aurb->packet->iov.size);
len = USB_RET_STALL;
}
}
aurb->packet->result = len;
usb_packet_complete(&dev->dev, aurb->packet);
}
async_free(dev, aurb);
free(data);
}
| false | qemu | 104981d52b63dc3d68f39d4442881c667f44bbb9 | static void usbredir_bulk_packet(void *priv, uint32_t id,
struct usb_redir_bulk_packet_header *bulk_packet,
uint8_t *data, int data_len)
{
USBRedirDevice *dev = priv;
uint8_t ep = bulk_packet->endpoint;
int len = bulk_packet->length;
AsyncURB *aurb;
DPRINTF("bulk-in status %d ep %02X len %d id %u\n", bulk_packet->status,
ep, len, id);
aurb = async_find(dev, id);
if (!aurb) {
free(data);
return;
}
if (aurb->bulk_packet.endpoint != bulk_packet->endpoint ||
aurb->bulk_packet.stream_id != bulk_packet->stream_id) {
ERROR("return bulk packet mismatch, please report this!\n");
len = USB_RET_NAK;
}
if (aurb->packet) {
len = usbredir_handle_status(dev, bulk_packet->status, len);
if (len > 0) {
usbredir_log_data(dev, "bulk data in:", data, data_len);
if (data_len <= aurb->packet->iov.size) {
usb_packet_copy(aurb->packet, data, data_len);
} else {
ERROR("bulk buffer too small (%d > %zd)\n", data_len,
aurb->packet->iov.size);
len = USB_RET_STALL;
}
}
aurb->packet->result = len;
usb_packet_complete(&dev->dev, aurb->packet);
}
async_free(dev, aurb);
free(data);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0, uint32_t VAR_1,
struct usb_redir_bulk_packet_header *VAR_2,
uint8_t *VAR_3, int VAR_4)
{
USBRedirDevice *dev = VAR_0;
uint8_t ep = VAR_2->endpoint;
int VAR_5 = VAR_2->length;
AsyncURB *aurb;
DPRINTF("bulk-in status %d ep %02X VAR_5 %d VAR_1 %u\n", VAR_2->status,
ep, VAR_5, VAR_1);
aurb = async_find(dev, VAR_1);
if (!aurb) {
free(VAR_3);
return;
}
if (aurb->VAR_2.endpoint != VAR_2->endpoint ||
aurb->VAR_2.stream_id != VAR_2->stream_id) {
ERROR("return bulk packet mismatch, please report this!\n");
VAR_5 = USB_RET_NAK;
}
if (aurb->packet) {
VAR_5 = usbredir_handle_status(dev, VAR_2->status, VAR_5);
if (VAR_5 > 0) {
usbredir_log_data(dev, "bulk VAR_3 in:", VAR_3, VAR_4);
if (VAR_4 <= aurb->packet->iov.size) {
usb_packet_copy(aurb->packet, VAR_3, VAR_4);
} else {
ERROR("bulk buffer too small (%d > %zd)\n", VAR_4,
aurb->packet->iov.size);
VAR_5 = USB_RET_STALL;
}
}
aurb->packet->result = VAR_5;
usb_packet_complete(&dev->dev, aurb->packet);
}
async_free(dev, aurb);
free(VAR_3);
}
| [
"static void FUNC_0(void *VAR_0, uint32_t VAR_1,\nstruct usb_redir_bulk_packet_header *VAR_2,\nuint8_t *VAR_3, int VAR_4)\n{",
"USBRedirDevice *dev = VAR_0;",
"uint8_t ep = VAR_2->endpoint;",
"int VAR_5 = VAR_2->length;",
"AsyncURB *aurb;",
"DPRINTF(\"bulk-in status %d ep %02X VAR_5 %d VAR_1 %u\\n\", VAR_2->status,\nep, VAR_5, VAR_1);",
"aurb = async_find(dev, VAR_1);",
"if (!aurb) {",
"free(VAR_3);",
"return;",
"}",
"if (aurb->VAR_2.endpoint != VAR_2->endpoint ||\naurb->VAR_2.stream_id != VAR_2->stream_id) {",
"ERROR(\"return bulk packet mismatch, please report this!\\n\");",
"VAR_5 = USB_RET_NAK;",
"}",
"if (aurb->packet) {",
"VAR_5 = usbredir_handle_status(dev, VAR_2->status, VAR_5);",
"if (VAR_5 > 0) {",
"usbredir_log_data(dev, \"bulk VAR_3 in:\", VAR_3, VAR_4);",
"if (VAR_4 <= aurb->packet->iov.size) {",
"usb_packet_copy(aurb->packet, VAR_3, VAR_4);",
"} else {",
"ERROR(\"bulk buffer too small (%d > %zd)\\n\", VAR_4,\naurb->packet->iov.size);",
"VAR_5 = USB_RET_STALL;",
"}",
"}",
"aurb->packet->result = VAR_5;",
"usb_packet_complete(&dev->dev, aurb->packet);",
"}",
"async_free(dev, aurb);",
"free(VAR_3);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19,
21
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
37,
39
],
[
41
],
[
43
],
[
45
],
[
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
],
[
63,
65
],
[
67
],
[
69
],
[
71
],
[
73
],
[
75
],
[
77
],
[
79
],
[
81
],
[
83
]
] |
21,346 | static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
unsigned int fcc_offset)
{
gen_mov_reg_FCC0(dst, src, fcc_offset);
gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
tcg_gen_and_tl(dst, dst, cpu_tmp0);
}
| false | qemu | de9e9d9f17a36ff76c1a02a5348835e5e0a081b0 | static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
unsigned int fcc_offset)
{
gen_mov_reg_FCC0(dst, src, fcc_offset);
gen_mov_reg_FCC1(cpu_tmp0, src, fcc_offset);
tcg_gen_and_tl(dst, dst, cpu_tmp0);
}
| {
"code": [],
"line_no": []
} | static inline void FUNC_0(TCGv VAR_0, TCGv VAR_1,
unsigned int VAR_2)
{
gen_mov_reg_FCC0(VAR_0, VAR_1, VAR_2);
gen_mov_reg_FCC1(cpu_tmp0, VAR_1, VAR_2);
tcg_gen_and_tl(VAR_0, VAR_0, cpu_tmp0);
}
| [
"static inline void FUNC_0(TCGv VAR_0, TCGv VAR_1,\nunsigned int VAR_2)\n{",
"gen_mov_reg_FCC0(VAR_0, VAR_1, VAR_2);",
"gen_mov_reg_FCC1(cpu_tmp0, VAR_1, VAR_2);",
"tcg_gen_and_tl(VAR_0, VAR_0, cpu_tmp0);",
"}"
] | [
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
]
] |
21,347 | static void virtio_pci_device_plugged(DeviceState *d)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
VirtioBusState *bus = &proxy->bus;
uint8_t *config;
uint32_t size;
config = proxy->pci_dev.config;
if (proxy->class_code) {
pci_config_set_class(config, proxy->class_code);
}
pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
pci_get_word(config + PCI_VENDOR_ID));
pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
config[PCI_INTERRUPT_PIN] = 1;
if (proxy->nvectors &&
msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
error_report("unable to init msix vectors to %" PRIu32,
proxy->nvectors);
proxy->nvectors = 0;
}
proxy->pci_dev.config_write = virtio_write_config;
size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
+ virtio_bus_get_vdev_config_len(bus);
if (size & (size - 1)) {
size = 1 << qemu_fls(size);
}
memory_region_init_io(&proxy->bar, OBJECT(proxy), &virtio_pci_config_ops,
proxy, "virtio-pci", size);
pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
&proxy->bar);
if (!kvm_has_many_ioeventfds()) {
proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
}
proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
proxy->host_features = virtio_bus_get_vdev_features(bus,
proxy->host_features);
}
| false | qemu | 0cd09c3a6cc2230ba38c462fc410b4acce59eb6f | static void virtio_pci_device_plugged(DeviceState *d)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
VirtioBusState *bus = &proxy->bus;
uint8_t *config;
uint32_t size;
config = proxy->pci_dev.config;
if (proxy->class_code) {
pci_config_set_class(config, proxy->class_code);
}
pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
pci_get_word(config + PCI_VENDOR_ID));
pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
config[PCI_INTERRUPT_PIN] = 1;
if (proxy->nvectors &&
msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
error_report("unable to init msix vectors to %" PRIu32,
proxy->nvectors);
proxy->nvectors = 0;
}
proxy->pci_dev.config_write = virtio_write_config;
size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
+ virtio_bus_get_vdev_config_len(bus);
if (size & (size - 1)) {
size = 1 << qemu_fls(size);
}
memory_region_init_io(&proxy->bar, OBJECT(proxy), &virtio_pci_config_ops,
proxy, "virtio-pci", size);
pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
&proxy->bar);
if (!kvm_has_many_ioeventfds()) {
proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
}
proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
proxy->host_features = virtio_bus_get_vdev_features(bus,
proxy->host_features);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(DeviceState *VAR_0)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(VAR_0);
VirtioBusState *bus = &proxy->bus;
uint8_t *config;
uint32_t size;
config = proxy->pci_dev.config;
if (proxy->class_code) {
pci_config_set_class(config, proxy->class_code);
}
pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
pci_get_word(config + PCI_VENDOR_ID));
pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
config[PCI_INTERRUPT_PIN] = 1;
if (proxy->nvectors &&
msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
error_report("unable to init msix vectors to %" PRIu32,
proxy->nvectors);
proxy->nvectors = 0;
}
proxy->pci_dev.config_write = virtio_write_config;
size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
+ virtio_bus_get_vdev_config_len(bus);
if (size & (size - 1)) {
size = 1 << qemu_fls(size);
}
memory_region_init_io(&proxy->bar, OBJECT(proxy), &virtio_pci_config_ops,
proxy, "virtio-pci", size);
pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
&proxy->bar);
if (!kvm_has_many_ioeventfds()) {
proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
}
proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
proxy->host_features = virtio_bus_get_vdev_features(bus,
proxy->host_features);
}
| [
"static void FUNC_0(DeviceState *VAR_0)\n{",
"VirtIOPCIProxy *proxy = VIRTIO_PCI(VAR_0);",
"VirtioBusState *bus = &proxy->bus;",
"uint8_t *config;",
"uint32_t size;",
"config = proxy->pci_dev.config;",
"if (proxy->class_code) {",
"pci_config_set_class(config, proxy->class_code);",
"}",
"pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,\npci_get_word(config + PCI_VENDOR_ID));",
"pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));",
"config[PCI_INTERRUPT_PIN] = 1;",
"if (proxy->nvectors &&\nmsix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {",
"error_report(\"unable to init msix vectors to %\" PRIu32,\nproxy->nvectors);",
"proxy->nvectors = 0;",
"}",
"proxy->pci_dev.config_write = virtio_write_config;",
"size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)\n+ virtio_bus_get_vdev_config_len(bus);",
"if (size & (size - 1)) {",
"size = 1 << qemu_fls(size);",
"}",
"memory_region_init_io(&proxy->bar, OBJECT(proxy), &virtio_pci_config_ops,\nproxy, \"virtio-pci\", size);",
"pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,\n&proxy->bar);",
"if (!kvm_has_many_ioeventfds()) {",
"proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;",
"}",
"proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;",
"proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;",
"proxy->host_features = virtio_bus_get_vdev_features(bus,\nproxy->host_features);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23,
25
],
[
27
],
[
29
],
[
33,
35
],
[
37,
39
],
[
41
],
[
43
],
[
47
],
[
51,
53
],
[
55
],
[
57
],
[
59
],
[
63,
65
],
[
67,
69
],
[
73
],
[
75
],
[
77
],
[
81
],
[
83
],
[
85,
87
],
[
89
]
] |
21,349 | void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
{
int i, j = 0;
const int mb_block_count = 4 + (1 << s->chroma_format);
cbp <<= 12-mb_block_count;
for (i = 0; i < mb_block_count; i++) {
if (cbp & (1 << 11))
s->pblocks[i] = &s->block[j++];
else
s->pblocks[i] = NULL;
cbp += cbp;
}
}
| false | FFmpeg | dcc39ee10e82833ce24aa57926c00ffeb1948198 | void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
{
int i, j = 0;
const int mb_block_count = 4 + (1 << s->chroma_format);
cbp <<= 12-mb_block_count;
for (i = 0; i < mb_block_count; i++) {
if (cbp & (1 << 11))
s->pblocks[i] = &s->block[j++];
else
s->pblocks[i] = NULL;
cbp += cbp;
}
}
| {
"code": [],
"line_no": []
} | void FUNC_0(MpegEncContext *VAR_0, int VAR_1)
{
int VAR_2, VAR_3 = 0;
const int VAR_4 = 4 + (1 << VAR_0->chroma_format);
VAR_1 <<= 12-VAR_4;
for (VAR_2 = 0; VAR_2 < VAR_4; VAR_2++) {
if (VAR_1 & (1 << 11))
VAR_0->pblocks[VAR_2] = &VAR_0->block[VAR_3++];
else
VAR_0->pblocks[VAR_2] = NULL;
VAR_1 += VAR_1;
}
}
| [
"void FUNC_0(MpegEncContext *VAR_0, int VAR_1)\n{",
"int VAR_2, VAR_3 = 0;",
"const int VAR_4 = 4 + (1 << VAR_0->chroma_format);",
"VAR_1 <<= 12-VAR_4;",
"for (VAR_2 = 0; VAR_2 < VAR_4; VAR_2++) {",
"if (VAR_1 & (1 << 11))\nVAR_0->pblocks[VAR_2] = &VAR_0->block[VAR_3++];",
"else\nVAR_0->pblocks[VAR_2] = NULL;",
"VAR_1 += VAR_1;",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11
],
[
13
],
[
15,
17
],
[
19,
21
],
[
23
],
[
25
],
[
27
]
] |
21,350 | static void cpu_print_cc(FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
uint32_t cc)
{
cpu_fprintf(f, "%c%c%c%c", cc & PSR_NEG? 'N' : '-',
cc & PSR_ZERO? 'Z' : '-', cc & PSR_OVF? 'V' : '-',
cc & PSR_CARRY? 'C' : '-');
}
| false | qemu | 9a78eead0c74333a394c0f7bbfc4423ac746fcd5 | static void cpu_print_cc(FILE *f,
int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
uint32_t cc)
{
cpu_fprintf(f, "%c%c%c%c", cc & PSR_NEG? 'N' : '-',
cc & PSR_ZERO? 'Z' : '-', cc & PSR_OVF? 'V' : '-',
cc & PSR_CARRY? 'C' : '-');
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(FILE *VAR_2,
int (*VAR_1)(FILE *VAR_2, const char *VAR_2, ...),
uint32_t VAR_3)
{
VAR_1(VAR_2, "%c%c%c%c", VAR_3 & PSR_NEG? 'N' : '-',
VAR_3 & PSR_ZERO? 'Z' : '-', VAR_3 & PSR_OVF? 'V' : '-',
VAR_3 & PSR_CARRY? 'C' : '-');
}
| [
"static void FUNC_0(FILE *VAR_2,\nint (*VAR_1)(FILE *VAR_2, const char *VAR_2, ...),\nuint32_t VAR_3)\n{",
"VAR_1(VAR_2, \"%c%c%c%c\", VAR_3 & PSR_NEG? 'N' : '-',\nVAR_3 & PSR_ZERO? 'Z' : '-', VAR_3 & PSR_OVF? 'V' : '-',\nVAR_3 & PSR_CARRY? 'C' : '-');",
"}"
] | [
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9,
11,
13
],
[
15
]
] |
21,351 | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc)
{
CPUArchState *env = cpu->env_ptr;
TCGContext *s = &tcg_ctx;
int j;
uintptr_t tc_ptr;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
tcg_func_start(s);
gen_intermediate_code_pc(env, tb);
if (use_icount) {
/* Reset the cycle counter to the start of the block. */
cpu->icount_decr.u16.low += tb->icount;
/* Clear the IO flag. */
cpu->can_do_io = 0;
}
/* find opc index corresponding to search_pc */
tc_ptr = (uintptr_t)tb->tc_ptr;
if (searched_pc < tc_ptr)
return -1;
s->tb_next_offset = tb->tb_next_offset;
#ifdef USE_DIRECT_JUMP
s->tb_jmp_offset = tb->tb_jmp_offset;
s->tb_next = NULL;
#else
s->tb_jmp_offset = NULL;
s->tb_next = tb->tb_next;
#endif
j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
searched_pc - tc_ptr);
if (j < 0)
return -1;
/* now find start of instruction before */
while (s->gen_opc_instr_start[j] == 0) {
j--;
}
cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
restore_state_to_opc(env, tb, j);
#ifdef CONFIG_PROFILER
s->restore_time += profile_getclock() - ti;
s->restore_count++;
#endif
return 0;
}
| false | qemu | bd79255d2571a3c68820117caf94ea9afe1d527e | static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc)
{
CPUArchState *env = cpu->env_ptr;
TCGContext *s = &tcg_ctx;
int j;
uintptr_t tc_ptr;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
tcg_func_start(s);
gen_intermediate_code_pc(env, tb);
if (use_icount) {
cpu->icount_decr.u16.low += tb->icount;
cpu->can_do_io = 0;
}
tc_ptr = (uintptr_t)tb->tc_ptr;
if (searched_pc < tc_ptr)
return -1;
s->tb_next_offset = tb->tb_next_offset;
#ifdef USE_DIRECT_JUMP
s->tb_jmp_offset = tb->tb_jmp_offset;
s->tb_next = NULL;
#else
s->tb_jmp_offset = NULL;
s->tb_next = tb->tb_next;
#endif
j = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
searched_pc - tc_ptr);
if (j < 0)
return -1;
while (s->gen_opc_instr_start[j] == 0) {
j--;
}
cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
restore_state_to_opc(env, tb, j);
#ifdef CONFIG_PROFILER
s->restore_time += profile_getclock() - ti;
s->restore_count++;
#endif
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(CPUState *VAR_0, TranslationBlock *VAR_1,
uintptr_t VAR_2)
{
CPUArchState *env = VAR_0->env_ptr;
TCGContext *s = &tcg_ctx;
int VAR_3;
uintptr_t tc_ptr;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
tcg_func_start(s);
gen_intermediate_code_pc(env, VAR_1);
if (use_icount) {
VAR_0->icount_decr.u16.low += VAR_1->icount;
VAR_0->can_do_io = 0;
}
tc_ptr = (uintptr_t)VAR_1->tc_ptr;
if (VAR_2 < tc_ptr)
return -1;
s->tb_next_offset = VAR_1->tb_next_offset;
#ifdef USE_DIRECT_JUMP
s->tb_jmp_offset = VAR_1->tb_jmp_offset;
s->tb_next = NULL;
#else
s->tb_jmp_offset = NULL;
s->tb_next = VAR_1->tb_next;
#endif
VAR_3 = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,
VAR_2 - tc_ptr);
if (VAR_3 < 0)
return -1;
while (s->gen_opc_instr_start[VAR_3] == 0) {
VAR_3--;
}
VAR_0->icount_decr.u16.low -= s->gen_opc_icount[VAR_3];
restore_state_to_opc(env, VAR_1, VAR_3);
#ifdef CONFIG_PROFILER
s->restore_time += profile_getclock() - ti;
s->restore_count++;
#endif
return 0;
}
| [
"static int FUNC_0(CPUState *VAR_0, TranslationBlock *VAR_1,\nuintptr_t VAR_2)\n{",
"CPUArchState *env = VAR_0->env_ptr;",
"TCGContext *s = &tcg_ctx;",
"int VAR_3;",
"uintptr_t tc_ptr;",
"#ifdef CONFIG_PROFILER\nint64_t ti;",
"#endif\n#ifdef CONFIG_PROFILER\nti = profile_getclock();",
"#endif\ntcg_func_start(s);",
"gen_intermediate_code_pc(env, VAR_1);",
"if (use_icount) {",
"VAR_0->icount_decr.u16.low += VAR_1->icount;",
"VAR_0->can_do_io = 0;",
"}",
"tc_ptr = (uintptr_t)VAR_1->tc_ptr;",
"if (VAR_2 < tc_ptr)\nreturn -1;",
"s->tb_next_offset = VAR_1->tb_next_offset;",
"#ifdef USE_DIRECT_JUMP\ns->tb_jmp_offset = VAR_1->tb_jmp_offset;",
"s->tb_next = NULL;",
"#else\ns->tb_jmp_offset = NULL;",
"s->tb_next = VAR_1->tb_next;",
"#endif\nVAR_3 = tcg_gen_code_search_pc(s, (tcg_insn_unit *)tc_ptr,\nVAR_2 - tc_ptr);",
"if (VAR_3 < 0)\nreturn -1;",
"while (s->gen_opc_instr_start[VAR_3] == 0) {",
"VAR_3--;",
"}",
"VAR_0->icount_decr.u16.low -= s->gen_opc_icount[VAR_3];",
"restore_state_to_opc(env, VAR_1, VAR_3);",
"#ifdef CONFIG_PROFILER\ns->restore_time += profile_getclock() - ti;",
"s->restore_count++;",
"#endif\nreturn 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15,
17
],
[
19,
23,
25
],
[
27,
29
],
[
33
],
[
37
],
[
41
],
[
45
],
[
47
],
[
53
],
[
55,
57
],
[
61
],
[
63,
65
],
[
67
],
[
69,
71
],
[
73
],
[
75,
77,
79
],
[
81,
83
],
[
87
],
[
89
],
[
91
],
[
93
],
[
97
],
[
101,
103
],
[
105
],
[
107,
109
],
[
111
]
] |
21,352 | static uint8_t virtio_scsi_do_command(QVirtIOSCSI *vs, const uint8_t *cdb,
const uint8_t *data_in,
size_t data_in_len,
uint8_t *data_out, size_t data_out_len)
{
QVirtQueue *vq;
QVirtIOSCSICmdReq req = { { 0 } };
QVirtIOSCSICmdResp resp = { .response = 0xff, .status = 0xff };
uint64_t req_addr, resp_addr, data_in_addr = 0, data_out_addr = 0;
uint8_t response;
uint32_t free_head;
vq = vs->vq[2];
req.lun[0] = 1; /* Select LUN */
req.lun[1] = 1; /* Select target 1 */
memcpy(req.cdb, cdb, CDB_SIZE);
/* XXX: Fix endian if any multi-byte field in req/resp is used */
/* Add request header */
req_addr = qvirtio_scsi_alloc(vs, sizeof(req), &req);
free_head = qvirtqueue_add(vq, req_addr, sizeof(req), false, true);
if (data_out_len) {
data_out_addr = qvirtio_scsi_alloc(vs, data_out_len, data_out);
qvirtqueue_add(vq, data_out_addr, data_out_len, false, true);
}
/* Add response header */
resp_addr = qvirtio_scsi_alloc(vs, sizeof(resp), &resp);
qvirtqueue_add(vq, resp_addr, sizeof(resp), true, !!data_in_len);
if (data_in_len) {
data_in_addr = qvirtio_scsi_alloc(vs, data_in_len, data_in);
qvirtqueue_add(vq, data_in_addr, data_in_len, true, false);
}
qvirtqueue_kick(&qvirtio_pci, vs->dev, vq, free_head);
qvirtio_wait_queue_isr(&qvirtio_pci, vs->dev, vq, QVIRTIO_SCSI_TIMEOUT_US);
response = readb(resp_addr + offsetof(QVirtIOSCSICmdResp, response));
guest_free(vs->alloc, req_addr);
guest_free(vs->alloc, resp_addr);
guest_free(vs->alloc, data_in_addr);
guest_free(vs->alloc, data_out_addr);
return response;
}
| false | qemu | 4bb7b0daf8ea34bcc582642d35a2e4902f7841db | static uint8_t virtio_scsi_do_command(QVirtIOSCSI *vs, const uint8_t *cdb,
const uint8_t *data_in,
size_t data_in_len,
uint8_t *data_out, size_t data_out_len)
{
QVirtQueue *vq;
QVirtIOSCSICmdReq req = { { 0 } };
QVirtIOSCSICmdResp resp = { .response = 0xff, .status = 0xff };
uint64_t req_addr, resp_addr, data_in_addr = 0, data_out_addr = 0;
uint8_t response;
uint32_t free_head;
vq = vs->vq[2];
req.lun[0] = 1;
req.lun[1] = 1;
memcpy(req.cdb, cdb, CDB_SIZE);
req_addr = qvirtio_scsi_alloc(vs, sizeof(req), &req);
free_head = qvirtqueue_add(vq, req_addr, sizeof(req), false, true);
if (data_out_len) {
data_out_addr = qvirtio_scsi_alloc(vs, data_out_len, data_out);
qvirtqueue_add(vq, data_out_addr, data_out_len, false, true);
}
resp_addr = qvirtio_scsi_alloc(vs, sizeof(resp), &resp);
qvirtqueue_add(vq, resp_addr, sizeof(resp), true, !!data_in_len);
if (data_in_len) {
data_in_addr = qvirtio_scsi_alloc(vs, data_in_len, data_in);
qvirtqueue_add(vq, data_in_addr, data_in_len, true, false);
}
qvirtqueue_kick(&qvirtio_pci, vs->dev, vq, free_head);
qvirtio_wait_queue_isr(&qvirtio_pci, vs->dev, vq, QVIRTIO_SCSI_TIMEOUT_US);
response = readb(resp_addr + offsetof(QVirtIOSCSICmdResp, response));
guest_free(vs->alloc, req_addr);
guest_free(vs->alloc, resp_addr);
guest_free(vs->alloc, data_in_addr);
guest_free(vs->alloc, data_out_addr);
return response;
}
| {
"code": [],
"line_no": []
} | static uint8_t FUNC_0(QVirtIOSCSI *vs, const uint8_t *cdb,
const uint8_t *data_in,
size_t data_in_len,
uint8_t *data_out, size_t data_out_len)
{
QVirtQueue *vq;
QVirtIOSCSICmdReq req = { { 0 } };
QVirtIOSCSICmdResp resp = { .response = 0xff, .status = 0xff };
uint64_t req_addr, resp_addr, data_in_addr = 0, data_out_addr = 0;
uint8_t response;
uint32_t free_head;
vq = vs->vq[2];
req.lun[0] = 1;
req.lun[1] = 1;
memcpy(req.cdb, cdb, CDB_SIZE);
req_addr = qvirtio_scsi_alloc(vs, sizeof(req), &req);
free_head = qvirtqueue_add(vq, req_addr, sizeof(req), false, true);
if (data_out_len) {
data_out_addr = qvirtio_scsi_alloc(vs, data_out_len, data_out);
qvirtqueue_add(vq, data_out_addr, data_out_len, false, true);
}
resp_addr = qvirtio_scsi_alloc(vs, sizeof(resp), &resp);
qvirtqueue_add(vq, resp_addr, sizeof(resp), true, !!data_in_len);
if (data_in_len) {
data_in_addr = qvirtio_scsi_alloc(vs, data_in_len, data_in);
qvirtqueue_add(vq, data_in_addr, data_in_len, true, false);
}
qvirtqueue_kick(&qvirtio_pci, vs->dev, vq, free_head);
qvirtio_wait_queue_isr(&qvirtio_pci, vs->dev, vq, QVIRTIO_SCSI_TIMEOUT_US);
response = readb(resp_addr + offsetof(QVirtIOSCSICmdResp, response));
guest_free(vs->alloc, req_addr);
guest_free(vs->alloc, resp_addr);
guest_free(vs->alloc, data_in_addr);
guest_free(vs->alloc, data_out_addr);
return response;
}
| [
"static uint8_t FUNC_0(QVirtIOSCSI *vs, const uint8_t *cdb,\nconst uint8_t *data_in,\nsize_t data_in_len,\nuint8_t *data_out, size_t data_out_len)\n{",
"QVirtQueue *vq;",
"QVirtIOSCSICmdReq req = { { 0 } };",
"QVirtIOSCSICmdResp resp = { .response = 0xff, .status = 0xff };",
"uint64_t req_addr, resp_addr, data_in_addr = 0, data_out_addr = 0;",
"uint8_t response;",
"uint32_t free_head;",
"vq = vs->vq[2];",
"req.lun[0] = 1;",
"req.lun[1] = 1;",
"memcpy(req.cdb, cdb, CDB_SIZE);",
"req_addr = qvirtio_scsi_alloc(vs, sizeof(req), &req);",
"free_head = qvirtqueue_add(vq, req_addr, sizeof(req), false, true);",
"if (data_out_len) {",
"data_out_addr = qvirtio_scsi_alloc(vs, data_out_len, data_out);",
"qvirtqueue_add(vq, data_out_addr, data_out_len, false, true);",
"}",
"resp_addr = qvirtio_scsi_alloc(vs, sizeof(resp), &resp);",
"qvirtqueue_add(vq, resp_addr, sizeof(resp), true, !!data_in_len);",
"if (data_in_len) {",
"data_in_addr = qvirtio_scsi_alloc(vs, data_in_len, data_in);",
"qvirtqueue_add(vq, data_in_addr, data_in_len, true, false);",
"}",
"qvirtqueue_kick(&qvirtio_pci, vs->dev, vq, free_head);",
"qvirtio_wait_queue_isr(&qvirtio_pci, vs->dev, vq, QVIRTIO_SCSI_TIMEOUT_US);",
"response = readb(resp_addr + offsetof(QVirtIOSCSICmdResp, response));",
"guest_free(vs->alloc, req_addr);",
"guest_free(vs->alloc, resp_addr);",
"guest_free(vs->alloc, data_in_addr);",
"guest_free(vs->alloc, data_out_addr);",
"return response;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7,
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
25
],
[
29
],
[
31
],
[
33
],
[
43
],
[
45
],
[
49
],
[
51
],
[
53
],
[
55
],
[
61
],
[
63
],
[
67
],
[
69
],
[
71
],
[
73
],
[
77
],
[
79
],
[
83
],
[
87
],
[
89
],
[
91
],
[
93
],
[
95
],
[
97
]
] |
21,353 | static uint64_t pic_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
HeathrowPICS *s = opaque;
HeathrowPIC *pic;
unsigned int n;
uint32_t value;
n = ((addr & 0xfff) - 0x10) >> 4;
if (n >= 2) {
value = 0;
} else {
pic = &s->pics[n];
switch(addr & 0xf) {
case 0x0:
value = pic->events;
break;
case 0x4:
value = pic->mask;
break;
case 0xc:
value = pic->levels;
break;
default:
value = 0;
break;
}
}
PIC_DPRINTF("readl: " TARGET_FMT_plx " %u: %08x\n", addr, n, value);
return value;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | static uint64_t pic_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
HeathrowPICS *s = opaque;
HeathrowPIC *pic;
unsigned int n;
uint32_t value;
n = ((addr & 0xfff) - 0x10) >> 4;
if (n >= 2) {
value = 0;
} else {
pic = &s->pics[n];
switch(addr & 0xf) {
case 0x0:
value = pic->events;
break;
case 0x4:
value = pic->mask;
break;
case 0xc:
value = pic->levels;
break;
default:
value = 0;
break;
}
}
PIC_DPRINTF("readl: " TARGET_FMT_plx " %u: %08x\n", addr, n, value);
return value;
}
| {
"code": [],
"line_no": []
} | static uint64_t FUNC_0(void *opaque, target_phys_addr_t addr,
unsigned size)
{
HeathrowPICS *s = opaque;
HeathrowPIC *pic;
unsigned int VAR_0;
uint32_t value;
VAR_0 = ((addr & 0xfff) - 0x10) >> 4;
if (VAR_0 >= 2) {
value = 0;
} else {
pic = &s->pics[VAR_0];
switch(addr & 0xf) {
case 0x0:
value = pic->events;
break;
case 0x4:
value = pic->mask;
break;
case 0xc:
value = pic->levels;
break;
default:
value = 0;
break;
}
}
PIC_DPRINTF("readl: " TARGET_FMT_plx " %u: %08x\VAR_0", addr, VAR_0, value);
return value;
}
| [
"static uint64_t FUNC_0(void *opaque, target_phys_addr_t addr,\nunsigned size)\n{",
"HeathrowPICS *s = opaque;",
"HeathrowPIC *pic;",
"unsigned int VAR_0;",
"uint32_t value;",
"VAR_0 = ((addr & 0xfff) - 0x10) >> 4;",
"if (VAR_0 >= 2) {",
"value = 0;",
"} else {",
"pic = &s->pics[VAR_0];",
"switch(addr & 0xf) {",
"case 0x0:\nvalue = pic->events;",
"break;",
"case 0x4:\nvalue = pic->mask;",
"break;",
"case 0xc:\nvalue = pic->levels;",
"break;",
"default:\nvalue = 0;",
"break;",
"}",
"}",
"PIC_DPRINTF(\"readl: \" TARGET_FMT_plx \" %u: %08x\\VAR_0\", addr, VAR_0, value);",
"return value;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29,
31
],
[
33
],
[
35,
37
],
[
39
],
[
41,
43
],
[
45
],
[
47,
49
],
[
51
],
[
53
],
[
55
],
[
57
],
[
59
],
[
61
]
] |
21,354 | static void pc_dimm_realize(DeviceState *dev, Error **errp)
{
PCDIMMDevice *dimm = PC_DIMM(dev);
if (!dimm->hostmem) {
error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property is not set");
return;
}
if (dimm->node >= nb_numa_nodes) {
error_setg(errp, "'DIMM property " PC_DIMM_NODE_PROP " has value %"
PRIu32 "' which exceeds the number of numa nodes: %d",
dimm->node, nb_numa_nodes);
return;
}
}
| false | qemu | fc50ff0666315be5120c70ad00cd0b0097484b84 | static void pc_dimm_realize(DeviceState *dev, Error **errp)
{
PCDIMMDevice *dimm = PC_DIMM(dev);
if (!dimm->hostmem) {
error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property is not set");
return;
}
if (dimm->node >= nb_numa_nodes) {
error_setg(errp, "'DIMM property " PC_DIMM_NODE_PROP " has value %"
PRIu32 "' which exceeds the number of numa nodes: %d",
dimm->node, nb_numa_nodes);
return;
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(DeviceState *VAR_0, Error **VAR_1)
{
PCDIMMDevice *dimm = PC_DIMM(VAR_0);
if (!dimm->hostmem) {
error_setg(VAR_1, "'" PC_DIMM_MEMDEV_PROP "' property is not set");
return;
}
if (dimm->node >= nb_numa_nodes) {
error_setg(VAR_1, "'DIMM property " PC_DIMM_NODE_PROP " has value %"
PRIu32 "' which exceeds the number of numa nodes: %d",
dimm->node, nb_numa_nodes);
return;
}
}
| [
"static void FUNC_0(DeviceState *VAR_0, Error **VAR_1)\n{",
"PCDIMMDevice *dimm = PC_DIMM(VAR_0);",
"if (!dimm->hostmem) {",
"error_setg(VAR_1, \"'\" PC_DIMM_MEMDEV_PROP \"' property is not set\");",
"return;",
"}",
"if (dimm->node >= nb_numa_nodes) {",
"error_setg(VAR_1, \"'DIMM property \" PC_DIMM_NODE_PROP \" has value %\"\nPRIu32 \"' which exceeds the number of numa nodes: %d\",\ndimm->node, nb_numa_nodes);",
"return;",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19,
21,
23
],
[
25
],
[
27
],
[
29
]
] |
21,355 | static void vtd_reset_context_cache(IntelIOMMUState *s)
{
VTDAddressSpace **pvtd_as;
VTDAddressSpace *vtd_as;
uint32_t bus_it;
uint32_t devfn_it;
VTD_DPRINTF(CACHE, "global context_cache_gen=1");
for (bus_it = 0; bus_it < VTD_PCI_BUS_MAX; ++bus_it) {
pvtd_as = s->address_spaces[bus_it];
if (!pvtd_as) {
continue;
}
for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
vtd_as = pvtd_as[devfn_it];
if (!vtd_as) {
continue;
}
vtd_as->context_cache_entry.context_cache_gen = 0;
}
}
s->context_cache_gen = 1;
}
| false | qemu | 7df953bd456da45f761064974820ab5c3fd7b2aa | static void vtd_reset_context_cache(IntelIOMMUState *s)
{
VTDAddressSpace **pvtd_as;
VTDAddressSpace *vtd_as;
uint32_t bus_it;
uint32_t devfn_it;
VTD_DPRINTF(CACHE, "global context_cache_gen=1");
for (bus_it = 0; bus_it < VTD_PCI_BUS_MAX; ++bus_it) {
pvtd_as = s->address_spaces[bus_it];
if (!pvtd_as) {
continue;
}
for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
vtd_as = pvtd_as[devfn_it];
if (!vtd_as) {
continue;
}
vtd_as->context_cache_entry.context_cache_gen = 0;
}
}
s->context_cache_gen = 1;
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(IntelIOMMUState *VAR_0)
{
VTDAddressSpace **pvtd_as;
VTDAddressSpace *vtd_as;
uint32_t bus_it;
uint32_t devfn_it;
VTD_DPRINTF(CACHE, "global context_cache_gen=1");
for (bus_it = 0; bus_it < VTD_PCI_BUS_MAX; ++bus_it) {
pvtd_as = VAR_0->address_spaces[bus_it];
if (!pvtd_as) {
continue;
}
for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {
vtd_as = pvtd_as[devfn_it];
if (!vtd_as) {
continue;
}
vtd_as->context_cache_entry.context_cache_gen = 0;
}
}
VAR_0->context_cache_gen = 1;
}
| [
"static void FUNC_0(IntelIOMMUState *VAR_0)\n{",
"VTDAddressSpace **pvtd_as;",
"VTDAddressSpace *vtd_as;",
"uint32_t bus_it;",
"uint32_t devfn_it;",
"VTD_DPRINTF(CACHE, \"global context_cache_gen=1\");",
"for (bus_it = 0; bus_it < VTD_PCI_BUS_MAX; ++bus_it) {",
"pvtd_as = VAR_0->address_spaces[bus_it];",
"if (!pvtd_as) {",
"continue;",
"}",
"for (devfn_it = 0; devfn_it < VTD_PCI_DEVFN_MAX; ++devfn_it) {",
"vtd_as = pvtd_as[devfn_it];",
"if (!vtd_as) {",
"continue;",
"}",
"vtd_as->context_cache_entry.context_cache_gen = 0;",
"}",
"}",
"VAR_0->context_cache_gen = 1;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
]
] |
21,356 | static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
uint32_t val, int len)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
if (PCI_COMMAND == address) {
if (!(val & PCI_COMMAND_MASTER)) {
proxy->vdev->status &= !VIRTIO_CONFIG_S_DRIVER_OK;
}
}
pci_default_write_config(pci_dev, address, val, len);
if(proxy->vdev->nvectors)
msix_write_config(pci_dev, address, val, len);
}
| false | qemu | 85352471ce78d73b8306822959caace2e8880535 | static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
uint32_t val, int len)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
if (PCI_COMMAND == address) {
if (!(val & PCI_COMMAND_MASTER)) {
proxy->vdev->status &= !VIRTIO_CONFIG_S_DRIVER_OK;
}
}
pci_default_write_config(pci_dev, address, val, len);
if(proxy->vdev->nvectors)
msix_write_config(pci_dev, address, val, len);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(PCIDevice *VAR_0, uint32_t VAR_1,
uint32_t VAR_2, int VAR_3)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, VAR_0, VAR_0);
if (PCI_COMMAND == VAR_1) {
if (!(VAR_2 & PCI_COMMAND_MASTER)) {
proxy->vdev->status &= !VIRTIO_CONFIG_S_DRIVER_OK;
}
}
pci_default_write_config(VAR_0, VAR_1, VAR_2, VAR_3);
if(proxy->vdev->nvectors)
msix_write_config(VAR_0, VAR_1, VAR_2, VAR_3);
}
| [
"static void FUNC_0(PCIDevice *VAR_0, uint32_t VAR_1,\nuint32_t VAR_2, int VAR_3)\n{",
"VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, VAR_0, VAR_0);",
"if (PCI_COMMAND == VAR_1) {",
"if (!(VAR_2 & PCI_COMMAND_MASTER)) {",
"proxy->vdev->status &= !VIRTIO_CONFIG_S_DRIVER_OK;",
"}",
"}",
"pci_default_write_config(VAR_0, VAR_1, VAR_2, VAR_3);",
"if(proxy->vdev->nvectors)\nmsix_write_config(VAR_0, VAR_1, VAR_2, VAR_3);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25,
27
],
[
29
]
] |
21,357 | void fpu_clear_exceptions(void)
{
struct __attribute__((packed)) {
uint16_t fpuc;
uint16_t dummy1;
uint16_t fpus;
uint16_t dummy2;
uint16_t fptag;
uint16_t dummy3;
uint32_t ignored[4];
long double fpregs[8];
} float_env32;
asm volatile ("fnstenv %0\n" : : "m" (float_env32));
float_env32.fpus &= ~0x7f;
asm volatile ("fldenv %0\n" : : "m" (float_env32));
}
| false | qemu | 541dc0d47f10973c241e9955afc2aefc96adec51 | void fpu_clear_exceptions(void)
{
struct __attribute__((packed)) {
uint16_t fpuc;
uint16_t dummy1;
uint16_t fpus;
uint16_t dummy2;
uint16_t fptag;
uint16_t dummy3;
uint32_t ignored[4];
long double fpregs[8];
} float_env32;
asm volatile ("fnstenv %0\n" : : "m" (float_env32));
float_env32.fpus &= ~0x7f;
asm volatile ("fldenv %0\n" : : "m" (float_env32));
}
| {
"code": [],
"line_no": []
} | void FUNC_0(void)
{
struct __attribute__((packed)) {
uint16_t fpuc;
uint16_t dummy1;
uint16_t fpus;
uint16_t dummy2;
uint16_t fptag;
uint16_t dummy3;
uint32_t ignored[4];
long double fpregs[8];
} VAR_0;
asm volatile ("fnstenv %0\n" : : "m" (VAR_0));
VAR_0.fpus &= ~0x7f;
asm volatile ("fldenv %0\n" : : "m" (VAR_0));
}
| [
"void FUNC_0(void)\n{",
"struct __attribute__((packed)) {",
"uint16_t fpuc;",
"uint16_t dummy1;",
"uint16_t fpus;",
"uint16_t dummy2;",
"uint16_t fptag;",
"uint16_t dummy3;",
"uint32_t ignored[4];",
"long double fpregs[8];",
"} VAR_0;",
"asm volatile (\"fnstenv %0\\n\" : : \"m\" (VAR_0));",
"VAR_0.fpus &= ~0x7f;",
"asm volatile (\"fldenv %0\\n\" : : \"m\" (VAR_0));",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
27
],
[
29
],
[
31
],
[
33
]
] |
21,358 | static unsigned int dec_movs_r(DisasContext *dc)
{
TCGv t0;
int size = memsize_z(dc);
DIS(fprintf (logfile, "movs.%c $r%u, $r%u\n",
memsize_char(size),
dc->op1, dc->op2));
cris_cc_mask(dc, CC_MASK_NZ);
t0 = tcg_temp_new(TCG_TYPE_TL);
/* Size can only be qi or hi. */
t_gen_sext(t0, cpu_R[dc->op1], size);
cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
tcg_temp_free(t0);
return 2;
}
| false | qemu | a7812ae412311d7d47f8aa85656faadac9d64b56 | static unsigned int dec_movs_r(DisasContext *dc)
{
TCGv t0;
int size = memsize_z(dc);
DIS(fprintf (logfile, "movs.%c $r%u, $r%u\n",
memsize_char(size),
dc->op1, dc->op2));
cris_cc_mask(dc, CC_MASK_NZ);
t0 = tcg_temp_new(TCG_TYPE_TL);
t_gen_sext(t0, cpu_R[dc->op1], size);
cris_alu(dc, CC_OP_MOVE,
cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
tcg_temp_free(t0);
return 2;
}
| {
"code": [],
"line_no": []
} | static unsigned int FUNC_0(DisasContext *VAR_0)
{
TCGv t0;
int VAR_1 = memsize_z(VAR_0);
DIS(fprintf (logfile, "movs.%c $r%u, $r%u\n",
memsize_char(VAR_1),
VAR_0->op1, VAR_0->op2));
cris_cc_mask(VAR_0, CC_MASK_NZ);
t0 = tcg_temp_new(TCG_TYPE_TL);
t_gen_sext(t0, cpu_R[VAR_0->op1], VAR_1);
cris_alu(VAR_0, CC_OP_MOVE,
cpu_R[VAR_0->op2], cpu_R[VAR_0->op1], t0, 4);
tcg_temp_free(t0);
return 2;
}
| [
"static unsigned int FUNC_0(DisasContext *VAR_0)\n{",
"TCGv t0;",
"int VAR_1 = memsize_z(VAR_0);",
"DIS(fprintf (logfile, \"movs.%c $r%u, $r%u\\n\",\nmemsize_char(VAR_1),\nVAR_0->op1, VAR_0->op2));",
"cris_cc_mask(VAR_0, CC_MASK_NZ);",
"t0 = tcg_temp_new(TCG_TYPE_TL);",
"t_gen_sext(t0, cpu_R[VAR_0->op1], VAR_1);",
"cris_alu(VAR_0, CC_OP_MOVE,\ncpu_R[VAR_0->op2], cpu_R[VAR_0->op1], t0, 4);",
"tcg_temp_free(t0);",
"return 2;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9,
11,
13
],
[
17
],
[
19
],
[
23
],
[
25,
27
],
[
29
],
[
31
],
[
33
]
] |
21,359 | void hpet_pit_disable(void) {
PITChannelState *s;
s = &pit_state.channels[0];
qemu_del_timer(s->irq_timer);
}
| false | qemu | e0dd114c163bfba86a736dae00fb70758e1c0200 | void hpet_pit_disable(void) {
PITChannelState *s;
s = &pit_state.channels[0];
qemu_del_timer(s->irq_timer);
}
| {
"code": [],
"line_no": []
} | void FUNC_0(void) {
PITChannelState *s;
s = &pit_state.channels[0];
qemu_del_timer(s->irq_timer);
}
| [
"void FUNC_0(void) {",
"PITChannelState *s;",
"s = &pit_state.channels[0];",
"qemu_del_timer(s->irq_timer);",
"}"
] | [
0,
0,
0,
0,
0
] | [
[
1
],
[
3
],
[
5
],
[
7
],
[
9
]
] |
21,360 | void ff_h264_h_lpf_luma_inter_msa(uint8_t *data, int img_width,
int alpha, int beta, int8_t *tc)
{
uint8_t bs0 = 1;
uint8_t bs1 = 1;
uint8_t bs2 = 1;
uint8_t bs3 = 1;
if (tc[0] < 0)
bs0 = 0;
if (tc[1] < 0)
bs1 = 0;
if (tc[2] < 0)
bs2 = 0;
if (tc[3] < 0)
bs3 = 0;
avc_loopfilter_luma_inter_edge_ver_msa(data,
bs0, bs1, bs2, bs3,
tc[0], tc[1], tc[2], tc[3],
alpha, beta, img_width);
}
| false | FFmpeg | bcd7bf7eeb09a395cc01698842d1b8be9af483fc | void ff_h264_h_lpf_luma_inter_msa(uint8_t *data, int img_width,
int alpha, int beta, int8_t *tc)
{
uint8_t bs0 = 1;
uint8_t bs1 = 1;
uint8_t bs2 = 1;
uint8_t bs3 = 1;
if (tc[0] < 0)
bs0 = 0;
if (tc[1] < 0)
bs1 = 0;
if (tc[2] < 0)
bs2 = 0;
if (tc[3] < 0)
bs3 = 0;
avc_loopfilter_luma_inter_edge_ver_msa(data,
bs0, bs1, bs2, bs3,
tc[0], tc[1], tc[2], tc[3],
alpha, beta, img_width);
}
| {
"code": [],
"line_no": []
} | void FUNC_0(uint8_t *VAR_0, int VAR_1,
int VAR_2, int VAR_3, int8_t *VAR_4)
{
uint8_t bs0 = 1;
uint8_t bs1 = 1;
uint8_t bs2 = 1;
uint8_t bs3 = 1;
if (VAR_4[0] < 0)
bs0 = 0;
if (VAR_4[1] < 0)
bs1 = 0;
if (VAR_4[2] < 0)
bs2 = 0;
if (VAR_4[3] < 0)
bs3 = 0;
avc_loopfilter_luma_inter_edge_ver_msa(VAR_0,
bs0, bs1, bs2, bs3,
VAR_4[0], VAR_4[1], VAR_4[2], VAR_4[3],
VAR_2, VAR_3, VAR_1);
}
| [
"void FUNC_0(uint8_t *VAR_0, int VAR_1,\nint VAR_2, int VAR_3, int8_t *VAR_4)\n{",
"uint8_t bs0 = 1;",
"uint8_t bs1 = 1;",
"uint8_t bs2 = 1;",
"uint8_t bs3 = 1;",
"if (VAR_4[0] < 0)\nbs0 = 0;",
"if (VAR_4[1] < 0)\nbs1 = 0;",
"if (VAR_4[2] < 0)\nbs2 = 0;",
"if (VAR_4[3] < 0)\nbs3 = 0;",
"avc_loopfilter_luma_inter_edge_ver_msa(VAR_0,\nbs0, bs1, bs2, bs3,\nVAR_4[0], VAR_4[1], VAR_4[2], VAR_4[3],\nVAR_2, VAR_3, VAR_1);",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17,
19
],
[
21,
23
],
[
25,
27
],
[
29,
31
],
[
35,
37,
39,
41
],
[
43
]
] |
21,361 | void bdrv_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
BdrvAioNotifier *ban;
if (!bs->drv) {
return;
}
bs->aio_context = new_context;
if (bs->backing) {
bdrv_attach_aio_context(bs->backing->bs, new_context);
}
if (bs->file) {
bdrv_attach_aio_context(bs->file->bs, new_context);
}
if (bs->drv->bdrv_attach_aio_context) {
bs->drv->bdrv_attach_aio_context(bs, new_context);
}
if (bs->io_limits_enabled) {
throttle_timers_attach_aio_context(&bs->throttle_timers, new_context);
}
QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
ban->attached_aio_context(new_context, ban->opaque);
}
}
| false | qemu | a0d64a61db602696f4f1895a890c65eda5b3b618 | void bdrv_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
BdrvAioNotifier *ban;
if (!bs->drv) {
return;
}
bs->aio_context = new_context;
if (bs->backing) {
bdrv_attach_aio_context(bs->backing->bs, new_context);
}
if (bs->file) {
bdrv_attach_aio_context(bs->file->bs, new_context);
}
if (bs->drv->bdrv_attach_aio_context) {
bs->drv->bdrv_attach_aio_context(bs, new_context);
}
if (bs->io_limits_enabled) {
throttle_timers_attach_aio_context(&bs->throttle_timers, new_context);
}
QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
ban->attached_aio_context(new_context, ban->opaque);
}
}
| {
"code": [],
"line_no": []
} | void FUNC_0(BlockDriverState *VAR_0,
AioContext *VAR_1)
{
BdrvAioNotifier *ban;
if (!VAR_0->drv) {
return;
}
VAR_0->aio_context = VAR_1;
if (VAR_0->backing) {
FUNC_0(VAR_0->backing->VAR_0, VAR_1);
}
if (VAR_0->file) {
FUNC_0(VAR_0->file->VAR_0, VAR_1);
}
if (VAR_0->drv->FUNC_0) {
VAR_0->drv->FUNC_0(VAR_0, VAR_1);
}
if (VAR_0->io_limits_enabled) {
throttle_timers_attach_aio_context(&VAR_0->throttle_timers, VAR_1);
}
QLIST_FOREACH(ban, &VAR_0->aio_notifiers, list) {
ban->attached_aio_context(VAR_1, ban->opaque);
}
}
| [
"void FUNC_0(BlockDriverState *VAR_0,\nAioContext *VAR_1)\n{",
"BdrvAioNotifier *ban;",
"if (!VAR_0->drv) {",
"return;",
"}",
"VAR_0->aio_context = VAR_1;",
"if (VAR_0->backing) {",
"FUNC_0(VAR_0->backing->VAR_0, VAR_1);",
"}",
"if (VAR_0->file) {",
"FUNC_0(VAR_0->file->VAR_0, VAR_1);",
"}",
"if (VAR_0->drv->FUNC_0) {",
"VAR_0->drv->FUNC_0(VAR_0, VAR_1);",
"}",
"if (VAR_0->io_limits_enabled) {",
"throttle_timers_attach_aio_context(&VAR_0->throttle_timers, VAR_1);",
"}",
"QLIST_FOREACH(ban, &VAR_0->aio_notifiers, list) {",
"ban->attached_aio_context(VAR_1, ban->opaque);",
"}",
"}"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
11
],
[
13
],
[
15
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
49
],
[
51
],
[
53
],
[
55
]
] |
21,362 | uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
__func__, l, a1, a2);
return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
}
| false | qemu | a3084e8055067b3fe8ed653a609021d2ab368564 | uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
__func__, l, a1, a2);
return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
}
| {
"code": [],
"line_no": []
} | uint32_t FUNC_0(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
{
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
__func__, l, a1, a2);
return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
}
| [
"uint32_t FUNC_0(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)\n{",
"HELPER_LOG(\"%s: %16\" PRIx64 \" %16\" PRIx64 \" %16\" PRIx64 \"\\n\",\n__func__, l, a1, a2);",
"return mvc_asc(env, l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);",
"}"
] | [
0,
0,
0,
0
] | [
[
1,
3
],
[
5,
7
],
[
11
],
[
13
]
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.