id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
3,916 | MigrationParameters *qmp_query_migrate_parameters(Error **errp)
{
MigrationParameters *params;
MigrationState *s = migrate_get_current();
params = g_malloc0(sizeof(*params));
params->compress_level = s->parameters.compress_level;
params->compress_threads = s->parameters.compress_threads;
params->decompress_threads = s->parameters.decompress_threads;
params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
return params;
} | true | qemu | 69ef1f36b0f882fc5ba9491fb272fa5f83ac1d3d |
3,918 | static int kvm_ppc_register_host_cpu_type(void)
{
TypeInfo type_info = {
.name = TYPE_HOST_POWERPC_CPU,
.instance_init = kvmppc_host_cpu_initfn,
.class_init = kvmppc_host_cpu_class_init,
};
PowerPCCPUClass *pvr_pcc;
DeviceClass *dc;
pvr_pcc = kvm_ppc_get_host_cpu_class();
if (pvr_pcc == NULL) {
return -1;
}
type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
type_register(&type_info);
/* Register generic family CPU class for a family */
pvr_pcc = ppc_cpu_get_family_class(pvr_pcc);
dc = DEVICE_CLASS(pvr_pcc);
type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
type_info.name = g_strdup_printf("%s-"TYPE_POWERPC_CPU, dc->desc);
type_register(&type_info);
#if defined(TARGET_PPC64)
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
type_info.parent = TYPE_SPAPR_CPU_CORE,
type_info.instance_size = sizeof(sPAPRCPUCore),
type_info.instance_init = spapr_cpu_core_host_initfn,
type_info.class_init = NULL;
type_register(&type_info);
g_free((void *)type_info.name);
/* Register generic spapr CPU family class for current host CPU type */
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, dc->desc);
type_register(&type_info);
g_free((void *)type_info.name);
#endif
return 0;
}
| false | qemu | 7ebaf7955603cc50988e0eafd5e6074320fefc70 |
3,919 | static inline int tcg_gen_code_common(TCGContext *s,
tcg_insn_unit *gen_code_buf,
long search_pc)
{
int oi, oi_next;
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
qemu_log("OP:\n");
tcg_dump_ops(s);
qemu_log("\n");
}
#endif
#ifdef CONFIG_PROFILER
s->opt_time -= profile_getclock();
#endif
#ifdef USE_TCG_OPTIMIZATIONS
tcg_optimize(s);
#endif
#ifdef CONFIG_PROFILER
s->opt_time += profile_getclock();
s->la_time -= profile_getclock();
#endif
tcg_liveness_analysis(s);
#ifdef CONFIG_PROFILER
s->la_time += profile_getclock();
#endif
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) {
qemu_log("OP after optimization and liveness analysis:\n");
tcg_dump_ops(s);
qemu_log("\n");
}
#endif
tcg_reg_alloc_start(s);
s->code_buf = gen_code_buf;
s->code_ptr = gen_code_buf;
tcg_out_tb_init(s);
for (oi = s->gen_first_op_idx; oi >= 0; oi = oi_next) {
TCGOp * const op = &s->gen_op_buf[oi];
TCGArg * const args = &s->gen_opparam_buf[op->args];
TCGOpcode opc = op->opc;
const TCGOpDef *def = &tcg_op_defs[opc];
uint16_t dead_args = s->op_dead_args[oi];
uint8_t sync_args = s->op_sync_args[oi];
oi_next = op->next;
#ifdef CONFIG_PROFILER
tcg_table_op_count[opc]++;
#endif
switch (opc) {
case INDEX_op_mov_i32:
case INDEX_op_mov_i64:
tcg_reg_alloc_mov(s, def, args, dead_args, sync_args);
break;
case INDEX_op_movi_i32:
case INDEX_op_movi_i64:
tcg_reg_alloc_movi(s, args, dead_args, sync_args);
break;
case INDEX_op_debug_insn_start:
break;
case INDEX_op_discard:
temp_dead(s, args[0]);
break;
case INDEX_op_set_label:
tcg_reg_alloc_bb_end(s, s->reserved_regs);
tcg_out_label(s, args[0], s->code_ptr);
break;
case INDEX_op_call:
tcg_reg_alloc_call(s, op->callo, op->calli, args,
dead_args, sync_args);
break;
default:
/* Sanity check that we've not introduced any unhandled opcodes. */
if (def->flags & TCG_OPF_NOT_PRESENT) {
tcg_abort();
}
/* Note: in order to speed up the code, it would be much
faster to have specialized register allocator functions for
some common argument patterns */
tcg_reg_alloc_op(s, def, opc, args, dead_args, sync_args);
break;
}
if (search_pc >= 0 && search_pc < tcg_current_code_size(s)) {
return oi;
}
#ifndef NDEBUG
check_regs(s);
#endif
}
/* Generate TB finalization at the end of block */
tcg_out_tb_finalize(s);
return -1;
}
| false | qemu | bec1631100323fac0900aea71043d5c4e22fc2fa |
3,922 | void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time)
{
QEMUTimerList *timer_list = ts->timer_list;
bool rearm;
qemu_mutex_lock(&timer_list->active_timers_lock);
if (ts->expire_time == -1 || ts->expire_time > expire_time) {
if (ts->expire_time != -1) {
timer_del_locked(timer_list, ts);
}
rearm = timer_mod_ns_locked(timer_list, ts, expire_time);
} else {
rearm = false;
}
qemu_mutex_unlock(&timer_list->active_timers_lock);
if (rearm) {
timerlist_rearm(timer_list);
}
}
| false | qemu | c2b38b277a7882a592f4f2ec955084b2b756daaa |
3,923 | static void pxa2xx_rtc_write(void *opaque, hwaddr addr,
uint64_t value64, unsigned size)
{
PXA2xxRTCState *s = (PXA2xxRTCState *) opaque;
uint32_t value = value64;
switch (addr) {
case RTTR:
if (!(s->rttr & (1U << 31))) {
pxa2xx_rtc_hzupdate(s);
s->rttr = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
}
break;
case RTSR:
if ((s->rtsr ^ value) & (1 << 15))
pxa2xx_rtc_piupdate(s);
if ((s->rtsr ^ value) & (1 << 12))
pxa2xx_rtc_swupdate(s);
if (((s->rtsr ^ value) & 0x4aac) | (value & ~0xdaac))
pxa2xx_rtc_alarm_update(s, value);
s->rtsr = (value & 0xdaac) | (s->rtsr & ~(value & ~0xdaac));
pxa2xx_rtc_int_update(s);
break;
case RTAR:
s->rtar = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case RDAR1:
s->rdar1 = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case RDAR2:
s->rdar2 = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case RYAR1:
s->ryar1 = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case RYAR2:
s->ryar2 = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case SWAR1:
pxa2xx_rtc_swupdate(s);
s->swar1 = value;
s->last_swcr = 0;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case SWAR2:
s->swar2 = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case PIAR:
s->piar = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case RCNR:
pxa2xx_rtc_hzupdate(s);
s->last_rcnr = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case RDCR:
pxa2xx_rtc_hzupdate(s);
s->last_rdcr = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case RYCR:
s->last_rycr = value;
break;
case SWCR:
pxa2xx_rtc_swupdate(s);
s->last_swcr = value;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
case RTCPICR:
pxa2xx_rtc_piupdate(s);
s->last_rtcpicr = value & 0xffff;
pxa2xx_rtc_alarm_update(s, s->rtsr);
break;
default:
printf("%s: Bad register " REG_FMT "\n", __FUNCTION__, addr);
}
}
| false | qemu | a89f364ae8740dfc31b321eed9ee454e996dc3c1 |
3,925 | static void omap_disc_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
struct omap_dss_s *s = (struct omap_dss_s *) opaque;
if (size != 4) {
omap_badwidth_write32(opaque, addr, value);
return;
}
switch (addr) {
case 0x010: /* DISPC_SYSCONFIG */
if (value & 2) /* SOFTRESET */
omap_dss_reset(s);
s->dispc.idlemode = value & 0x301b;
break;
case 0x018: /* DISPC_IRQSTATUS */
s->dispc.irqst &= ~value;
omap_dispc_interrupt_update(s);
break;
case 0x01c: /* DISPC_IRQENABLE */
s->dispc.irqen = value & 0xffff;
omap_dispc_interrupt_update(s);
break;
case 0x040: /* DISPC_CONTROL */
s->dispc.control = value & 0x07ff9fff;
s->dig.enable = (value >> 1) & 1;
s->lcd.enable = (value >> 0) & 1;
if (value & (1 << 12)) /* OVERLAY_OPTIMIZATION */
if (!((s->dispc.l[1].attr | s->dispc.l[2].attr) & 1)) {
fprintf(stderr, "%s: Overlay Optimization when no overlay "
"region effectively exists leads to "
"unpredictable behaviour!\n", __func__);
}
if (value & (1 << 6)) { /* GODIGITAL */
/* XXX: Shadowed fields are:
* s->dispc.config
* s->dispc.capable
* s->dispc.bg[0]
* s->dispc.bg[1]
* s->dispc.trans[0]
* s->dispc.trans[1]
* s->dispc.line
* s->dispc.timing[0]
* s->dispc.timing[1]
* s->dispc.timing[2]
* s->dispc.timing[3]
* s->lcd.nx
* s->lcd.ny
* s->dig.nx
* s->dig.ny
* s->dispc.l[0].addr[0]
* s->dispc.l[0].addr[1]
* s->dispc.l[0].addr[2]
* s->dispc.l[0].posx
* s->dispc.l[0].posy
* s->dispc.l[0].nx
* s->dispc.l[0].ny
* s->dispc.l[0].tresh
* s->dispc.l[0].rowinc
* s->dispc.l[0].colinc
* s->dispc.l[0].wininc
* All they need to be loaded here from their shadow registers.
*/
}
if (value & (1 << 5)) { /* GOLCD */
/* XXX: Likewise for LCD here. */
}
s->dispc.invalidate = 1;
break;
case 0x044: /* DISPC_CONFIG */
s->dispc.config = value & 0x3fff;
/* XXX:
* bits 2:1 (LOADMODE) reset to 0 after set to 1 and palette loaded
* bits 2:1 (LOADMODE) reset to 2 after set to 3 and palette loaded
*/
s->dispc.invalidate = 1;
break;
case 0x048: /* DISPC_CAPABLE */
s->dispc.capable = value & 0x3ff;
break;
case 0x04c: /* DISPC_DEFAULT_COLOR0 */
s->dispc.bg[0] = value & 0xffffff;
s->dispc.invalidate = 1;
break;
case 0x050: /* DISPC_DEFAULT_COLOR1 */
s->dispc.bg[1] = value & 0xffffff;
s->dispc.invalidate = 1;
break;
case 0x054: /* DISPC_TRANS_COLOR0 */
s->dispc.trans[0] = value & 0xffffff;
s->dispc.invalidate = 1;
break;
case 0x058: /* DISPC_TRANS_COLOR1 */
s->dispc.trans[1] = value & 0xffffff;
s->dispc.invalidate = 1;
break;
case 0x060: /* DISPC_LINE_NUMBER */
s->dispc.line = value & 0x7ff;
break;
case 0x064: /* DISPC_TIMING_H */
s->dispc.timing[0] = value & 0x0ff0ff3f;
break;
case 0x068: /* DISPC_TIMING_V */
s->dispc.timing[1] = value & 0x0ff0ff3f;
break;
case 0x06c: /* DISPC_POL_FREQ */
s->dispc.timing[2] = value & 0x0003ffff;
break;
case 0x070: /* DISPC_DIVISOR */
s->dispc.timing[3] = value & 0x00ff00ff;
break;
case 0x078: /* DISPC_SIZE_DIG */
s->dig.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */
s->dig.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */
s->dispc.invalidate = 1;
break;
case 0x07c: /* DISPC_SIZE_LCD */
s->lcd.nx = ((value >> 0) & 0x7ff) + 1; /* PPL */
s->lcd.ny = ((value >> 16) & 0x7ff) + 1; /* LPP */
s->dispc.invalidate = 1;
break;
case 0x080: /* DISPC_GFX_BA0 */
s->dispc.l[0].addr[0] = (hwaddr) value;
s->dispc.invalidate = 1;
break;
case 0x084: /* DISPC_GFX_BA1 */
s->dispc.l[0].addr[1] = (hwaddr) value;
s->dispc.invalidate = 1;
break;
case 0x088: /* DISPC_GFX_POSITION */
s->dispc.l[0].posx = ((value >> 0) & 0x7ff); /* GFXPOSX */
s->dispc.l[0].posy = ((value >> 16) & 0x7ff); /* GFXPOSY */
s->dispc.invalidate = 1;
break;
case 0x08c: /* DISPC_GFX_SIZE */
s->dispc.l[0].nx = ((value >> 0) & 0x7ff) + 1; /* GFXSIZEX */
s->dispc.l[0].ny = ((value >> 16) & 0x7ff) + 1; /* GFXSIZEY */
s->dispc.invalidate = 1;
break;
case 0x0a0: /* DISPC_GFX_ATTRIBUTES */
s->dispc.l[0].attr = value & 0x7ff;
if (value & (3 << 9))
fprintf(stderr, "%s: Big-endian pixel format not supported\n",
__FUNCTION__);
s->dispc.l[0].enable = value & 1;
s->dispc.l[0].bpp = (value >> 1) & 0xf;
s->dispc.invalidate = 1;
break;
case 0x0a4: /* DISPC_GFX_FIFO_TRESHOLD */
s->dispc.l[0].tresh = value & 0x01ff01ff;
break;
case 0x0ac: /* DISPC_GFX_ROW_INC */
s->dispc.l[0].rowinc = value;
s->dispc.invalidate = 1;
break;
case 0x0b0: /* DISPC_GFX_PIXEL_INC */
s->dispc.l[0].colinc = value;
s->dispc.invalidate = 1;
break;
case 0x0b4: /* DISPC_GFX_WINDOW_SKIP */
s->dispc.l[0].wininc = value;
break;
case 0x0b8: /* DISPC_GFX_TABLE_BA */
s->dispc.l[0].addr[2] = (hwaddr) value;
s->dispc.invalidate = 1;
break;
case 0x0bc: /* DISPC_VID1_BA0 */
case 0x0c0: /* DISPC_VID1_BA1 */
case 0x0c4: /* DISPC_VID1_POSITION */
case 0x0c8: /* DISPC_VID1_SIZE */
case 0x0cc: /* DISPC_VID1_ATTRIBUTES */
case 0x0d0: /* DISPC_VID1_FIFO_TRESHOLD */
case 0x0d8: /* DISPC_VID1_ROW_INC */
case 0x0dc: /* DISPC_VID1_PIXEL_INC */
case 0x0e0: /* DISPC_VID1_FIR */
case 0x0e4: /* DISPC_VID1_PICTURE_SIZE */
case 0x0e8: /* DISPC_VID1_ACCU0 */
case 0x0ec: /* DISPC_VID1_ACCU1 */
case 0x0f0 ... 0x140: /* DISPC_VID1_FIR_COEF, DISPC_VID1_CONV_COEF */
case 0x14c: /* DISPC_VID2_BA0 */
case 0x150: /* DISPC_VID2_BA1 */
case 0x154: /* DISPC_VID2_POSITION */
case 0x158: /* DISPC_VID2_SIZE */
case 0x15c: /* DISPC_VID2_ATTRIBUTES */
case 0x160: /* DISPC_VID2_FIFO_TRESHOLD */
case 0x168: /* DISPC_VID2_ROW_INC */
case 0x16c: /* DISPC_VID2_PIXEL_INC */
case 0x170: /* DISPC_VID2_FIR */
case 0x174: /* DISPC_VID2_PICTURE_SIZE */
case 0x178: /* DISPC_VID2_ACCU0 */
case 0x17c: /* DISPC_VID2_ACCU1 */
case 0x180 ... 0x1d0: /* DISPC_VID2_FIR_COEF, DISPC_VID2_CONV_COEF */
case 0x1d4: /* DISPC_DATA_CYCLE1 */
case 0x1d8: /* DISPC_DATA_CYCLE2 */
case 0x1dc: /* DISPC_DATA_CYCLE3 */
break;
default:
OMAP_BAD_REG(addr);
}
}
| false | qemu | a89f364ae8740dfc31b321eed9ee454e996dc3c1 |
3,927 | static void bdrv_replace_child(BdrvChild *child, BlockDriverState *new_bs,
bool check_new_perm)
{
BlockDriverState *old_bs = child->bs;
uint64_t perm, shared_perm;
if (old_bs) {
/* Update permissions for old node. This is guaranteed to succeed
* because we're just taking a parent away, so we're loosening
* restrictions. */
bdrv_get_cumulative_perm(old_bs, &perm, &shared_perm);
bdrv_check_perm(old_bs, perm, shared_perm, NULL, &error_abort);
bdrv_set_perm(old_bs, perm, shared_perm);
}
bdrv_replace_child_noperm(child, new_bs);
if (new_bs) {
bdrv_get_cumulative_perm(new_bs, &perm, &shared_perm);
if (check_new_perm) {
bdrv_check_perm(new_bs, perm, shared_perm, NULL, &error_abort);
}
bdrv_set_perm(new_bs, perm, shared_perm);
}
}
| false | qemu | 466787fbca9b25b47365b3d2c09d308df67a61db |
3,928 | void cpu_exec_init(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
int cpu_index;
Error *local_err = NULL;
#ifndef CONFIG_USER_ONLY
cpu->as = &address_space_memory;
cpu->thread_id = qemu_get_thread_id();
#endif
#if defined(CONFIG_USER_ONLY)
cpu_list_lock();
#endif
cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
if (local_err) {
error_propagate(errp, local_err);
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
#endif
return;
}
QTAILQ_INSERT_TAIL(&cpus, cpu, node);
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
#endif
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
}
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
cpu_save, cpu_load, cpu->env_ptr);
assert(cc->vmsd == NULL);
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
#endif
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
}
}
| false | qemu | 56943e8cc14b7eeeab67d1942fa5d8bcafe3e53f |
3,929 | static int ide_qdev_init(DeviceState *qdev)
{
IDEDevice *dev = IDE_DEVICE(qdev);
IDEDeviceClass *dc = IDE_DEVICE_GET_CLASS(dev);
IDEBus *bus = DO_UPCAST(IDEBus, qbus, qdev->parent_bus);
if (!dev->conf.bs) {
error_report("No drive specified");
goto err;
}
if (dev->unit == -1) {
dev->unit = bus->master ? 1 : 0;
}
if (dev->unit >= bus->max_units) {
error_report("Can't create IDE unit %d, bus supports only %d units",
dev->unit, bus->max_units);
goto err;
}
switch (dev->unit) {
case 0:
if (bus->master) {
error_report("IDE unit %d is in use", dev->unit);
goto err;
}
bus->master = dev;
break;
case 1:
if (bus->slave) {
error_report("IDE unit %d is in use", dev->unit);
goto err;
}
bus->slave = dev;
break;
default:
error_report("Invalid IDE unit %d", dev->unit);
goto err;
}
return dc->init(dev);
err:
return -1;
}
| false | qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce |
3,930 | static int default_fdset_dup_fd_remove(int dup_fd)
{
return -1;
}
| false | qemu | 1f001dc7bc9e435bf231a5b0edcad1c7c2bd6214 |
3,931 | void pc_hot_add_cpu(const int64_t id, Error **errp)
{
X86CPU *cpu;
ObjectClass *oc;
PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
int64_t apic_id = x86_cpu_apic_id_from_index(id);
Error *local_err = NULL;
if (id < 0) {
error_setg(errp, "Invalid CPU id: %" PRIi64, id);
return;
}
if (cpu_exists(apic_id)) {
error_setg(errp, "Unable to add CPU: %" PRIi64
", it already exists", id);
return;
}
if (id >= max_cpus) {
error_setg(errp, "Unable to add CPU: %" PRIi64
", max allowed: %d", id, max_cpus - 1);
return;
}
if (apic_id >= ACPI_CPU_HOTPLUG_ID_LIMIT) {
error_setg(errp, "Unable to add CPU: %" PRIi64
", resulting APIC ID (%" PRIi64 ") is too large",
id, apic_id);
return;
}
assert(pcms->possible_cpus->cpus[0].cpu); /* BSP is always present */
oc = OBJECT_CLASS(CPU_GET_CLASS(pcms->possible_cpus->cpus[0].cpu));
cpu = pc_new_cpu(object_class_get_name(oc), apic_id, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
object_unref(OBJECT(cpu));
}
| false | qemu | 4ec60c76d5ab513e375f17b043d2b9cb849adf6c |
3,932 | static void show_packet(AVFormatContext *fmt_ctx, AVPacket *pkt)
{
char val_str[128];
AVStream *st = fmt_ctx->streams[pkt->stream_index];
printf("[PACKET]\n");
printf("codec_type=%s\n", media_type_string(st->codec->codec_type));
printf("stream_index=%d\n", pkt->stream_index);
printf("pts=%s\n", ts_value_string(val_str, sizeof(val_str), pkt->pts));
printf("pts_time=%s\n", time_value_string(val_str, sizeof(val_str),
pkt->pts, &st->time_base));
printf("dts=%s\n", ts_value_string(val_str, sizeof(val_str), pkt->dts));
printf("dts_time=%s\n", time_value_string(val_str, sizeof(val_str),
pkt->dts, &st->time_base));
printf("duration=%s\n", ts_value_string(val_str, sizeof(val_str),
pkt->duration));
printf("duration_time=%s\n", time_value_string(val_str, sizeof(val_str),
pkt->duration,
&st->time_base));
printf("size=%s\n", value_string(val_str, sizeof(val_str),
pkt->size, unit_byte_str));
printf("pos=%"PRId64"\n", pkt->pos);
printf("flags=%c\n", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
printf("[/PACKET]\n");
}
| false | FFmpeg | 3a8c95f730732b9f1ffacdbfbf79a01b202a67af |
3,934 | BusState *qbus_create(BusInfo *info, DeviceState *parent, const char *name)
{
BusState *bus;
char *buf;
int i,len;
bus = qemu_mallocz(info->size);
bus->info = info;
bus->parent = parent;
if (name) {
/* use supplied name */
bus->name = qemu_strdup(name);
} else if (parent && parent->id) {
/* parent device has id -> use it for bus name */
len = strlen(parent->id) + 16;
buf = qemu_malloc(len);
snprintf(buf, len, "%s.%d", parent->id, parent->num_child_bus);
bus->name = buf;
} else {
/* no id -> use lowercase bus type for bus name */
len = strlen(info->name) + 16;
buf = qemu_malloc(len);
len = snprintf(buf, len, "%s.%d", info->name,
parent ? parent->num_child_bus : 0);
for (i = 0; i < len; i++)
buf[i] = qemu_tolower(buf[i]);
bus->name = buf;
}
LIST_INIT(&bus->children);
if (parent) {
LIST_INSERT_HEAD(&parent->child_bus, bus, sibling);
parent->num_child_bus++;
}
return bus;
}
| false | qemu | 72cf2d4f0e181d0d3a3122e04129c58a95da713e |
3,935 | static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs,
int64_t sector_num,
int nb_sectors, int *pnum)
{
IscsiLun *iscsilun = bs->opaque;
struct scsi_get_lba_status *lbas = NULL;
struct scsi_lba_status_descriptor *lbasd = NULL;
struct IscsiTask iTask;
int64_t ret;
iscsi_co_init_iscsitask(iscsilun, &iTask);
if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) {
ret = -EINVAL;
goto out;
}
/* default to all sectors allocated */
ret = BDRV_BLOCK_DATA;
ret |= (sector_num << BDRV_SECTOR_BITS) | BDRV_BLOCK_OFFSET_VALID;
*pnum = nb_sectors;
/* LUN does not support logical block provisioning */
if (!iscsilun->lbpme) {
goto out;
}
retry:
if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun,
sector_qemu2lun(sector_num, iscsilun),
8 + 16, iscsi_co_generic_cb,
&iTask) == NULL) {
ret = -ENOMEM;
goto out;
}
while (!iTask.complete) {
iscsi_set_events(iscsilun);
qemu_coroutine_yield();
}
if (iTask.do_retry) {
if (iTask.task != NULL) {
scsi_free_scsi_task(iTask.task);
iTask.task = NULL;
}
iTask.complete = 0;
goto retry;
}
if (iTask.status != SCSI_STATUS_GOOD) {
/* in case the get_lba_status_callout fails (i.e.
* because the device is busy or the cmd is not
* supported) we pretend all blocks are allocated
* for backwards compatibility */
goto out;
}
lbas = scsi_datain_unmarshall(iTask.task);
if (lbas == NULL) {
ret = -EIO;
goto out;
}
lbasd = &lbas->descriptors[0];
if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) {
ret = -EIO;
goto out;
}
*pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun);
if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED ||
lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) {
ret &= ~BDRV_BLOCK_DATA;
if (iscsilun->lbprz) {
ret |= BDRV_BLOCK_ZERO;
}
}
if (ret & BDRV_BLOCK_ZERO) {
iscsi_allocationmap_clear(iscsilun, sector_num, *pnum);
} else {
iscsi_allocationmap_set(iscsilun, sector_num, *pnum);
}
if (*pnum > nb_sectors) {
*pnum = nb_sectors;
}
out:
if (iTask.task != NULL) {
scsi_free_scsi_task(iTask.task);
}
return ret;
}
| false | qemu | 67a0fd2a9bca204d2b39f910a97c7137636a0715 |
3,936 | static inline void assert_fp_access_checked(DisasContext *s)
{
#ifdef CONFIG_DEBUG_TCG
if (unlikely(!s->fp_access_checked || !s->cpacr_fpen)) {
fprintf(stderr, "target-arm: FP access check missing for "
"instruction 0x%08x\n", s->insn);
abort();
}
#endif
}
| false | qemu | 9dbbc748d671c70599101836cd1c2719d92f3017 |
3,937 | static void dp8393x_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
{
dp8393xState *s = opaque;
int reg;
if ((addr & ((1 << s->it_shift) - 1)) != 0) {
return;
}
reg = addr >> s->it_shift;
write_register(s, reg, (uint16_t)val);
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
3,938 | socket_sockaddr_to_address_vsock(struct sockaddr_storage *sa,
socklen_t salen,
Error **errp)
{
SocketAddressLegacy *addr;
VsockSocketAddress *vaddr;
struct sockaddr_vm *svm = (struct sockaddr_vm *)sa;
addr = g_new0(SocketAddressLegacy, 1);
addr->type = SOCKET_ADDRESS_LEGACY_KIND_VSOCK;
addr->u.vsock.data = vaddr = g_new0(VsockSocketAddress, 1);
vaddr->cid = g_strdup_printf("%u", svm->svm_cid);
vaddr->port = g_strdup_printf("%u", svm->svm_port);
return addr;
}
| false | qemu | bd269ebc82fbaa5fe7ce5bc7c1770ac8acecd884 |
3,939 | QObject *object_property_get_qobject(Object *obj, const char *name,
Error **errp)
{
QObject *ret = NULL;
Error *local_err = NULL;
Visitor *v;
v = qmp_output_visitor_new(&ret);
object_property_get(obj, v, name, &local_err);
if (!local_err) {
visit_complete(v, &ret);
}
error_propagate(errp, local_err);
visit_free(v);
return ret;
}
| false | qemu | 7d5e199ade76c53ec316ab6779800581bb47c50a |
3,940 | static KVMSlot *kvm_alloc_slot(KVMState *s)
{
int i;
for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
/* KVM private memory slots */
if (i >= 8 && i < 12)
continue;
if (s->slots[i].memory_size == 0)
return &s->slots[i];
}
fprintf(stderr, "%s: no free slot available\n", __func__);
abort();
}
| false | qemu | a426e122173f36f05ea2cb72dcff77b7408546ce |
3,941 | static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
if (s->ctx) {
virtio_scsi_dataplane_start(s);
if (!s->dataplane_fenced) {
return;
}
}
virtio_scsi_handle_event_vq(s, vq);
}
| false | qemu | ad07cd69ecaffbaa015459a46975ab32e50df805 |
3,942 | static void mirror_do_zero_or_discard(MirrorBlockJob *s,
int64_t sector_num,
int nb_sectors,
bool is_discard)
{
MirrorOp *op;
/* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
* so the freeing in mirror_iteration_done is nop. */
op = g_new0(MirrorOp, 1);
op->s = s;
op->sector_num = sector_num;
op->nb_sectors = nb_sectors;
s->in_flight++;
s->sectors_in_flight += nb_sectors;
if (is_discard) {
blk_aio_pdiscard(s->target, sector_num << BDRV_SECTOR_BITS,
op->nb_sectors << BDRV_SECTOR_BITS,
mirror_write_complete, op);
} else {
blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
op->nb_sectors * BDRV_SECTOR_SIZE,
s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
mirror_write_complete, op);
}
}
| false | qemu | b436982f04fb33bb29fcdea190bd1fdc97dc65ef |
3,943 | static int encode_individual_channel(AVCodecContext *avctx, AACEncContext *s,
SingleChannelElement *sce,
int common_window)
{
put_bits(&s->pb, 8, sce->sf_idx[0]);
if (!common_window) {
put_ics_info(s, &sce->ics);
if (s->coder->encode_main_pred)
s->coder->encode_main_pred(s, sce);
}
encode_band_info(s, sce);
encode_scale_factors(avctx, s, sce);
encode_pulses(s, &sce->pulse);
if (s->coder->encode_tns_info)
s->coder->encode_tns_info(s, sce);
else
put_bits(&s->pb, 1, 0);
put_bits(&s->pb, 1, 0); //ssr
encode_spectral_coeffs(s, sce);
return 0;
}
| false | FFmpeg | f20b67173ca6a05b8c3dee02dad3b7243b96292b |
3,944 | int float32_eq_signaling( float32 a, float32 b STATUS_PARAM )
{
if ( ( ( extractFloat32Exp( a ) == 0xFF ) && extractFloat32Frac( a ) )
|| ( ( extractFloat32Exp( b ) == 0xFF ) && extractFloat32Frac( b ) )
) {
float_raise( float_flag_invalid STATUS_VAR);
return 0;
}
return ( a == b ) || ( (bits32) ( ( a | b )<<1 ) == 0 );
}
| false | qemu | f090c9d4ad5812fb92843d6470a1111c15190c4c |
3,945 | static void ich9_lpc_update_pic(ICH9LPCState *lpc, int gsi)
{
int i, pic_level;
assert(gsi < ICH9_LPC_PIC_NUM_PINS);
/* The pic level is the logical OR of all the PCI irqs mapped to it */
pic_level = 0;
for (i = 0; i < ICH9_LPC_NB_PIRQS; i++) {
int tmp_irq;
int tmp_dis;
ich9_lpc_pic_irq(lpc, i, &tmp_irq, &tmp_dis);
if (!tmp_dis && tmp_irq == gsi) {
pic_level |= pci_bus_get_irq_level(lpc->d.bus, i);
}
}
if (gsi == lpc->sci_gsi) {
pic_level |= lpc->sci_level;
}
qemu_set_irq(lpc->gsi[gsi], pic_level);
}
| false | qemu | fd56e0612b6454a282fa6a953fdb09281a98c589 |
3,947 | static void FUNCC(pred8x8_top_dc)(uint8_t *_src, int stride){
int i;
int dc0, dc1;
pixel4 dc0splat, dc1splat;
pixel *src = (pixel*)_src;
stride /= sizeof(pixel);
dc0=dc1=0;
for(i=0;i<4; i++){
dc0+= src[i-stride];
dc1+= src[4+i-stride];
}
dc0splat = PIXEL_SPLAT_X4((dc0 + 2)>>2);
dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
for(i=0; i<4; i++){
((pixel4*)(src+i*stride))[0]= dc0splat;
((pixel4*)(src+i*stride))[1]= dc1splat;
}
for(i=4; i<8; i++){
((pixel4*)(src+i*stride))[0]= dc0splat;
((pixel4*)(src+i*stride))[1]= dc1splat;
}
}
| true | FFmpeg | 2caf19e90f270abe1e80a3e85acaf0eb5c9d0aac |
3,948 | static uint64_t strongarm_gpio_read(void *opaque, hwaddr offset,
unsigned size)
{
StrongARMGPIOInfo *s = opaque;
switch (offset) {
case GPDR: /* GPIO Pin-Direction registers */
return s->dir;
case GPSR: /* GPIO Pin-Output Set registers */
DPRINTF("%s: Read from a write-only register 0x" TARGET_FMT_plx "\n",
__func__, offset);
return s->gpsr; /* Return last written value. */
case GPCR: /* GPIO Pin-Output Clear registers */
DPRINTF("%s: Read from a write-only register 0x" TARGET_FMT_plx "\n",
__func__, offset);
return 31337; /* Specified as unpredictable in the docs. */
case GRER: /* GPIO Rising-Edge Detect Enable registers */
return s->rising;
case GFER: /* GPIO Falling-Edge Detect Enable registers */
return s->falling;
case GAFR: /* GPIO Alternate Function registers */
return s->gafr;
case GPLR: /* GPIO Pin-Level registers */
return (s->olevel & s->dir) |
(s->ilevel & ~s->dir);
case GEDR: /* GPIO Edge Detect Status registers */
return s->status;
default:
printf("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset);
}
return 0;
}
| true | qemu | 92335a0d4021a3b44ccc88c9fc6c0fd2113f1882 |
3,949 | static int draw_glyphs(DrawTextContext *dtext, AVFilterBufferRef *picref,
int width, int height, const uint8_t rgbcolor[4], const uint8_t yuvcolor[4], int x, int y)
{
char *text = dtext->text;
uint32_t code = 0;
int i;
uint8_t *p;
Glyph *glyph = NULL;
for (i = 0, p = text; *p; i++) {
Glyph dummy = { 0 };
GET_UTF8(code, *p++, continue;);
/* skip new line chars, just go to new line */
if (code == '\n' || code == '\r' || code == '\t')
continue;
dummy.code = code;
glyph = av_tree_find(dtext->glyphs, &dummy, (void *)glyph_cmp, NULL);
if (glyph->bitmap.pixel_mode != FT_PIXEL_MODE_MONO &&
glyph->bitmap.pixel_mode != FT_PIXEL_MODE_GRAY)
return AVERROR(EINVAL);
if (dtext->is_packed_rgb) {
draw_glyph_rgb(picref, &glyph->bitmap,
dtext->positions[i].x+x, dtext->positions[i].y+y, width, height,
dtext->pixel_step[0], rgbcolor, dtext->rgba_map);
} else {
draw_glyph_yuv(picref, &glyph->bitmap,
dtext->positions[i].x+x, dtext->positions[i].y+y, width, height,
yuvcolor, dtext->hsub, dtext->vsub);
}
}
return 0;
}
| true | FFmpeg | efc8c709c93875dffa4c4181fa9b56fa6d20d4c7 |
3,950 | static void text_console_resize(QemuConsole *s)
{
TextCell *cells, *c, *c1;
int w1, x, y, last_width;
last_width = s->width;
s->width = surface_width(s->surface) / FONT_WIDTH;
s->height = surface_height(s->surface) / FONT_HEIGHT;
w1 = last_width;
if (s->width < w1)
w1 = s->width;
cells = g_malloc(s->width * s->total_height * sizeof(TextCell));
for(y = 0; y < s->total_height; y++) {
c = &cells[y * s->width];
if (w1 > 0) {
c1 = &s->cells[y * last_width];
for(x = 0; x < w1; x++) {
*c++ = *c1++;
}
}
for(x = w1; x < s->width; x++) {
c->ch = ' ';
c->t_attrib = s->t_attrib_default;
c++;
}
}
g_free(s->cells);
s->cells = cells;
}
| true | qemu | fedf0d35aafc4f1f1e5f6dbc80cb23ae1ae49f0b |
3,951 | static int qcow_create(const char *filename, QemuOpts *opts, Error **errp)
{
int header_size, backing_filename_len, l1_size, shift, i;
QCowHeader header;
uint8_t *tmp;
int64_t total_size = 0;
char *backing_file = NULL;
Error *local_err = NULL;
int ret;
BlockBackend *qcow_blk;
const char *encryptfmt = NULL;
QDict *options;
QDict *encryptopts = NULL;
QCryptoBlockCreateOptions *crypto_opts = NULL;
QCryptoBlock *crypto = NULL;
/* Read out options */
total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
BDRV_SECTOR_SIZE);
if (total_size == 0) {
error_setg(errp, "Image size is too small, cannot be zero length");
ret = -EINVAL;
goto cleanup;
}
backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
encryptfmt = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT);
if (encryptfmt) {
if (qemu_opt_get(opts, BLOCK_OPT_ENCRYPT)) {
error_setg(errp, "Options " BLOCK_OPT_ENCRYPT " and "
BLOCK_OPT_ENCRYPT_FORMAT " are mutually exclusive");
ret = -EINVAL;
goto cleanup;
}
} else if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) {
encryptfmt = "aes";
}
ret = bdrv_create_file(filename, opts, &local_err);
if (ret < 0) {
error_propagate(errp, local_err);
goto cleanup;
}
qcow_blk = blk_new_open(filename, NULL, NULL,
BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
&local_err);
if (qcow_blk == NULL) {
error_propagate(errp, local_err);
ret = -EIO;
goto cleanup;
}
blk_set_allow_write_beyond_eof(qcow_blk, true);
ret = blk_truncate(qcow_blk, 0, PREALLOC_MODE_OFF, errp);
if (ret < 0) {
goto exit;
}
memset(&header, 0, sizeof(header));
header.magic = cpu_to_be32(QCOW_MAGIC);
header.version = cpu_to_be32(QCOW_VERSION);
header.size = cpu_to_be64(total_size);
header_size = sizeof(header);
backing_filename_len = 0;
if (backing_file) {
if (strcmp(backing_file, "fat:")) {
header.backing_file_offset = cpu_to_be64(header_size);
backing_filename_len = strlen(backing_file);
header.backing_file_size = cpu_to_be32(backing_filename_len);
header_size += backing_filename_len;
} else {
/* special backing file for vvfat */
g_free(backing_file);
backing_file = NULL;
}
header.cluster_bits = 9; /* 512 byte cluster to avoid copying
unmodified sectors */
header.l2_bits = 12; /* 32 KB L2 tables */
} else {
header.cluster_bits = 12; /* 4 KB clusters */
header.l2_bits = 9; /* 4 KB L2 tables */
}
header_size = (header_size + 7) & ~7;
shift = header.cluster_bits + header.l2_bits;
l1_size = (total_size + (1LL << shift) - 1) >> shift;
header.l1_table_offset = cpu_to_be64(header_size);
options = qemu_opts_to_qdict(opts, NULL);
qdict_extract_subqdict(options, &encryptopts, "encrypt.");
QDECREF(options);
if (encryptfmt) {
if (!g_str_equal(encryptfmt, "aes")) {
error_setg(errp, "Unknown encryption format '%s', expected 'aes'",
encryptfmt);
ret = -EINVAL;
goto exit;
}
header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
crypto_opts = block_crypto_create_opts_init(
Q_CRYPTO_BLOCK_FORMAT_QCOW, encryptopts, errp);
if (!crypto_opts) {
ret = -EINVAL;
goto exit;
}
crypto = qcrypto_block_create(crypto_opts, "encrypt.",
NULL, NULL, NULL, errp);
if (!crypto) {
ret = -EINVAL;
goto exit;
}
} else {
header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
}
/* write all the data */
ret = blk_pwrite(qcow_blk, 0, &header, sizeof(header), 0);
if (ret != sizeof(header)) {
goto exit;
}
if (backing_file) {
ret = blk_pwrite(qcow_blk, sizeof(header),
backing_file, backing_filename_len, 0);
if (ret != backing_filename_len) {
goto exit;
}
}
tmp = g_malloc0(BDRV_SECTOR_SIZE);
for (i = 0; i < DIV_ROUND_UP(sizeof(uint64_t) * l1_size, BDRV_SECTOR_SIZE);
i++) {
ret = blk_pwrite(qcow_blk, header_size + BDRV_SECTOR_SIZE * i,
tmp, BDRV_SECTOR_SIZE, 0);
if (ret != BDRV_SECTOR_SIZE) {
g_free(tmp);
goto exit;
}
}
g_free(tmp);
ret = 0;
exit:
blk_unref(qcow_blk);
cleanup:
QDECREF(encryptopts);
qcrypto_block_free(crypto);
qapi_free_QCryptoBlockCreateOptions(crypto_opts);
g_free(backing_file);
return ret;
}
| true | qemu | 0696ae2c9236a3589f5eaf5b00c12868b6f30a17 |
3,952 | static int hls_append_segment(struct AVFormatContext *s, HLSContext *hls, double duration,
int64_t pos, int64_t size)
{
HLSSegment *en = av_malloc(sizeof(*en));
const char *filename;
int ret;
if (!en)
return AVERROR(ENOMEM);
if ((hls->flags & (HLS_SECOND_LEVEL_SEGMENT_SIZE | HLS_SECOND_LEVEL_SEGMENT_DURATION)) &&
strlen(hls->current_segment_final_filename_fmt)) {
char * old_filename = av_strdup(hls->avf->filename); // %%s will be %s after strftime
av_strlcpy(hls->avf->filename, hls->current_segment_final_filename_fmt, sizeof(hls->avf->filename));
if (hls->flags & HLS_SECOND_LEVEL_SEGMENT_SIZE) {
char * filename = av_strdup(hls->avf->filename); // %%s will be %s after strftime
if (!filename)
return AVERROR(ENOMEM);
if (replace_int_data_in_filename(hls->avf->filename, sizeof(hls->avf->filename),
filename, 's', pos + size) < 1) {
av_log(hls, AV_LOG_ERROR,
"Invalid second level segment filename template '%s', "
"you can try to remove second_level_segment_size flag\n",
filename);
av_free(filename);
av_free(old_filename);
return AVERROR(EINVAL);
}
av_free(filename);
}
if (hls->flags & HLS_SECOND_LEVEL_SEGMENT_DURATION) {
char * filename = av_strdup(hls->avf->filename); // %%t will be %t after strftime
if (!filename)
return AVERROR(ENOMEM);
if (replace_int_data_in_filename(hls->avf->filename, sizeof(hls->avf->filename),
filename, 't', (int64_t)round(1000000 * duration)) < 1) {
av_log(hls, AV_LOG_ERROR,
"Invalid second level segment filename template '%s', "
"you can try to remove second_level_segment_time flag\n",
filename);
av_free(filename);
av_free(old_filename);
return AVERROR(EINVAL);
}
av_free(filename);
}
ff_rename(old_filename, hls->avf->filename, hls);
av_free(old_filename);
}
filename = av_basename(hls->avf->filename);
if (hls->use_localtime_mkdir) {
filename = hls->avf->filename;
}
if (find_segment_by_filename(hls->segments, filename)
|| find_segment_by_filename(hls->old_segments, filename)) {
av_log(hls, AV_LOG_WARNING, "Duplicated segment filename detected: %s\n", filename);
}
av_strlcpy(en->filename, filename, sizeof(en->filename));
if(hls->has_subtitle)
av_strlcpy(en->sub_filename, av_basename(hls->vtt_avf->filename), sizeof(en->sub_filename));
else
en->sub_filename[0] = '\0';
en->duration = duration;
en->pos = pos;
en->size = size;
en->next = NULL;
en->discont = 0;
if (hls->discontinuity) {
en->discont = 1;
hls->discontinuity = 0;
}
if (hls->key_info_file) {
av_strlcpy(en->key_uri, hls->key_uri, sizeof(en->key_uri));
av_strlcpy(en->iv_string, hls->iv_string, sizeof(en->iv_string));
}
if (!hls->segments)
hls->segments = en;
else
hls->last_segment->next = en;
hls->last_segment = en;
// EVENT or VOD playlists imply sliding window cannot be used
if (hls->pl_type != PLAYLIST_TYPE_NONE)
hls->max_nb_segments = 0;
if (hls->max_nb_segments && hls->nb_entries >= hls->max_nb_segments) {
en = hls->segments;
hls->initial_prog_date_time += en->duration;
hls->segments = en->next;
if (en && hls->flags & HLS_DELETE_SEGMENTS &&
!(hls->flags & HLS_SINGLE_FILE || hls->wrap)) {
en->next = hls->old_segments;
hls->old_segments = en;
if ((ret = hls_delete_old_segments(hls)) < 0)
return ret;
} else
av_free(en);
} else
hls->nb_entries++;
if (hls->max_seg_size > 0) {
return 0;
}
hls->sequence++;
return 0;
}
| true | FFmpeg | 93593674bc8d85a40e0648f21a7cdbf3554f21ff |
3,953 | static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
MpegEncContext * const s= w->s;
uint8_t * scantable;
int final,run,level;
int ac_mode,dc_mode,est_run,dc_level;
int pos,n;
int zeros_only;
int use_quant_matrix;
int sign;
assert(w->orient<12);
s->dsp.clear_block(s->block[0]);
if(chroma){
dc_mode=2;
}else{
dc_mode=!!w->est_run;//0,1
}
if(x8_get_dc_rlf(w, dc_mode, &dc_level, &final)) return -1;
n=0;
zeros_only=0;
if(!final){//decode ac
use_quant_matrix=w->use_quant_matrix;
if(chroma){
ac_mode = 1;
est_run = 64;//not used
}else{
if (w->raw_orient < 3){
use_quant_matrix = 0;
}
if(w->raw_orient > 4){
ac_mode = 0;
est_run = 64;
}else{
if(w->est_run > 1){
ac_mode = 2;
est_run=w->est_run;
}else{
ac_mode = 3;
est_run = 64;
}
}
}
x8_select_ac_table(w,ac_mode);
/*scantable_selector[12]={0,2,0,1,1,1,0,2,2,0,1,2};<-
-> 10'01' 00'10' 10'00' 01'01' 01'00' 10'00 =>0x928548 */
scantable = w->scantable[ (0x928548>>(2*w->orient))&3 ].permutated;
pos=0;
do {
n++;
if( n >= est_run ){
ac_mode=3;
x8_select_ac_table(w,3);
}
x8_get_ac_rlf(w,ac_mode,&run,&level,&final);
pos+=run+1;
if(pos>63){
//this also handles vlc error in x8_get_ac_rlf
return -1;
}
level= (level+1) * w->dquant;
level+= w->qsum;
sign = - get_bits1(&s->gb);
level = (level ^ sign) - sign;
if(use_quant_matrix){
level = (level*quant_table[pos])>>8;
}
s->block[0][ scantable[pos] ]=level;
}while(!final);
s->block_last_index[0]=pos;
}else{//DC only
s->block_last_index[0]=0;
if(w->flat_dc && ((unsigned)(dc_level+1)) < 3){//[-1;1]
int32_t divide_quant= !chroma ? w->divide_quant_dc_luma:
w->divide_quant_dc_chroma;
int32_t dc_quant = !chroma ? w->quant:
w->quant_dc_chroma;
//original intent dc_level+=predicted_dc/quant; but it got lost somewhere in the rounding
dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13;
dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3),
s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
goto block_placed;
}
zeros_only = (dc_level == 0);
}
if(!chroma){
s->block[0][0] = dc_level*w->quant;
}else{
s->block[0][0] = dc_level*w->quant_dc_chroma;
}
//there is !zero_only check in the original, but dc_level check is enough
if( (unsigned int)(dc_level+1) >= 3 && (w->edges&3) != 3 ){
int direction;
/*ac_comp_direction[orient] = { 0, 3, 3, 1, 1, 0, 0, 0, 2, 2, 2, 1 };<-
-> 01'10' 10'10' 00'00' 00'01' 01'11' 11'00 =>0x6A017C */
direction= (0x6A017C>>(w->orient*2))&3;
if (direction != 3){
x8_ac_compensation(w, direction, s->block[0][0]);//modify block_last[]
}
}
if(w->flat_dc){
dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
}else{
w->dsp.spatial_compensation[w->orient]( s->edge_emu_buffer,
s->dest[chroma],
s->current_picture.f.linesize[!!chroma] );
}
if(!zeros_only)
s->dsp.idct_add ( s->dest[chroma],
s->current_picture.f.linesize[!!chroma],
s->block[0] );
block_placed:
if(!chroma){
x8_update_predictions(w,w->orient,n);
}
if(s->loop_filter){
uint8_t* ptr = s->dest[chroma];
int linesize = s->current_picture.f.linesize[!!chroma];
if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){
w->dsp.h_loop_filter(ptr, linesize, w->quant);
}
if(!( (w->edges&1) || ( zeros_only && (w->orient|8)==8 ) )){
w->dsp.v_loop_filter(ptr, linesize, w->quant);
}
}
return 0;
}
| true | FFmpeg | f6774f905fb3cfdc319523ac640be30b14c1bc55 |
3,954 | static void pci_device_reset(PCIDevice *dev)
{
int r;
memset(dev->irq_state, 0, sizeof dev->irq_state);
dev->config[PCI_COMMAND] &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER);
dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
dev->config[PCI_INTERRUPT_LINE] = 0x0;
for (r = 0; r < PCI_NUM_REGIONS; ++r) {
if (!dev->io_regions[r].size) {
continue;
}
pci_set_long(dev->config + pci_bar(dev, r), dev->io_regions[r].type);
}
pci_update_mappings(dev);
}
| true | qemu | d036bb215e0ac1d1fd467239f1d3b7d904cac90a |
3,957 | static int mov_read_stts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
//MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
int entries, i;
int64_t duration=0;
int64_t total_sample_count=0;
print_atom("stts", atom);
get_byte(pb); /* version */
get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
entries = get_be32(pb);
c->streams[c->fc->nb_streams-1]->stts_count = entries;
c->streams[c->fc->nb_streams-1]->stts_data = (uint64_t*) av_malloc(entries * sizeof(uint64_t));
#ifdef DEBUG
av_log(NULL, AV_LOG_DEBUG, "track[%i].stts.entries = %i\n", c->fc->nb_streams-1, entries);
#endif
for(i=0; i<entries; i++) {
int32_t sample_duration;
int32_t sample_count;
sample_count=get_be32(pb);
sample_duration = get_be32(pb);
c->streams[c->fc->nb_streams - 1]->stts_data[i] = (uint64_t)sample_count<<32 | (uint64_t)sample_duration;
#ifdef DEBUG
av_log(NULL, AV_LOG_DEBUG, "sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
#endif
duration+=sample_duration*sample_count;
total_sample_count+=sample_count;
#if 0 //We calculate an average instead, needed by .mp4-files created with nec e606 3g phone
if (!i && st->codec.codec_type==CODEC_TYPE_VIDEO) {
st->codec.frame_rate_base = sample_duration ? sample_duration : 1;
st->codec.frame_rate = c->streams[c->fc->nb_streams-1]->time_scale;
#ifdef DEBUG
av_log(NULL, AV_LOG_DEBUG, "VIDEO FRAME RATE= %i (sd= %i)\n", st->codec.frame_rate, sample_duration);
#endif
}
#endif
}
/*The stsd atom which contain codec type sometimes comes after the stts so we cannot check for codec_type*/
if(duration>0)
{
av_reduce(
&st->codec.frame_rate,
&st->codec.frame_rate_base,
c->streams[c->fc->nb_streams-1]->time_scale * total_sample_count,
duration,
INT_MAX
);
#ifdef DEBUG
av_log(NULL, AV_LOG_DEBUG, "FRAME RATE average (video or audio)= %f (tot sample count= %i ,tot dur= %i timescale=%d)\n", (float)st->codec.frame_rate/st->codec.frame_rate_base,total_sample_count,duration,c->streams[c->fc->nb_streams-1]->time_scale);
#endif
}
else
{
st->codec.frame_rate_base = 1;
st->codec.frame_rate = c->streams[c->fc->nb_streams-1]->time_scale;
}
return 0;
}
| true | FFmpeg | 568e18b15e2ddf494fd8926707d34ca08c8edce5 |
3,958 | static int window(venc_context_t * venc, signed short * audio, int samples) {
int i, j, channel;
const float * win = venc->win[0];
int window_len = 1 << (venc->blocksize[0] - 1);
float n = (float)(1 << venc->blocksize[0]) / 4.;
// FIXME use dsp
if (!venc->have_saved && !samples) return 0;
if (venc->have_saved) {
for (channel = 0; channel < venc->channels; channel++) {
memcpy(venc->samples + channel*window_len*2, venc->saved + channel*window_len, sizeof(float)*window_len);
}
} else {
for (channel = 0; channel < venc->channels; channel++) {
memset(venc->samples + channel*window_len*2, 0, sizeof(float)*window_len);
}
}
if (samples) {
for (channel = 0; channel < venc->channels; channel++) {
float * offset = venc->samples + channel*window_len*2 + window_len;
j = channel;
for (i = 0; i < samples; i++, j += venc->channels)
offset[i] = audio[j] / 32768. * win[window_len - i] / n;
}
} else {
for (channel = 0; channel < venc->channels; channel++) {
memset(venc->samples + channel*window_len*2 + window_len, 0, sizeof(float)*window_len);
}
}
for (channel = 0; channel < venc->channels; channel++) {
ff_mdct_calc(&venc->mdct[0], venc->coeffs + channel*window_len, venc->samples + channel*window_len*2, venc->floor/*tmp*/);
}
if (samples) {
for (channel = 0; channel < venc->channels; channel++) {
float * offset = venc->saved + channel*window_len;
j = channel;
for (i = 0; i < samples; i++, j += venc->channels)
offset[i] = audio[j] / 32768. * win[i] / n;
}
venc->have_saved = 1;
} else {
venc->have_saved = 0;
}
return 1;
}
| true | FFmpeg | af1cb7ee4c6fb68904e94dc4cf243ba26b3cbb47 |
3,959 | DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi, int *fatal_error)
{
const char *buf;
const char *file = NULL;
char devname[128];
const char *serial;
const char *mediastr = "";
BlockInterfaceType type;
enum { MEDIA_DISK, MEDIA_CDROM } media;
int bus_id, unit_id;
int cyls, heads, secs, translation;
BlockDriver *drv = NULL;
int max_devs;
int index;
int ro = 0;
int bdrv_flags = 0;
int on_read_error, on_write_error;
const char *devaddr;
DriveInfo *dinfo;
int snapshot = 0;
int ret;
*fatal_error = 1;
translation = BIOS_ATA_TRANSLATION_AUTO;
if (default_to_scsi) {
type = IF_SCSI;
max_devs = MAX_SCSI_DEVS;
pstrcpy(devname, sizeof(devname), "scsi");
} else {
type = IF_IDE;
max_devs = MAX_IDE_DEVS;
pstrcpy(devname, sizeof(devname), "ide");
}
media = MEDIA_DISK;
/* extract parameters */
bus_id = qemu_opt_get_number(opts, "bus", 0);
unit_id = qemu_opt_get_number(opts, "unit", -1);
index = qemu_opt_get_number(opts, "index", -1);
cyls = qemu_opt_get_number(opts, "cyls", 0);
heads = qemu_opt_get_number(opts, "heads", 0);
secs = qemu_opt_get_number(opts, "secs", 0);
snapshot = qemu_opt_get_bool(opts, "snapshot", 0);
ro = qemu_opt_get_bool(opts, "readonly", 0);
file = qemu_opt_get(opts, "file");
serial = qemu_opt_get(opts, "serial");
if ((buf = qemu_opt_get(opts, "if")) != NULL) {
pstrcpy(devname, sizeof(devname), buf);
if (!strcmp(buf, "ide")) {
type = IF_IDE;
max_devs = MAX_IDE_DEVS;
} else if (!strcmp(buf, "scsi")) {
type = IF_SCSI;
max_devs = MAX_SCSI_DEVS;
} else if (!strcmp(buf, "floppy")) {
type = IF_FLOPPY;
max_devs = 0;
} else if (!strcmp(buf, "pflash")) {
type = IF_PFLASH;
max_devs = 0;
} else if (!strcmp(buf, "mtd")) {
type = IF_MTD;
max_devs = 0;
} else if (!strcmp(buf, "sd")) {
type = IF_SD;
max_devs = 0;
} else if (!strcmp(buf, "virtio")) {
type = IF_VIRTIO;
max_devs = 0;
} else if (!strcmp(buf, "xen")) {
type = IF_XEN;
max_devs = 0;
} else if (!strcmp(buf, "none")) {
type = IF_NONE;
max_devs = 0;
} else {
fprintf(stderr, "qemu: unsupported bus type '%s'\n", buf);
return NULL;
}
}
if (cyls || heads || secs) {
if (cyls < 1 || (type == IF_IDE && cyls > 16383)) {
fprintf(stderr, "qemu: '%s' invalid physical cyls number\n", buf);
return NULL;
}
if (heads < 1 || (type == IF_IDE && heads > 16)) {
fprintf(stderr, "qemu: '%s' invalid physical heads number\n", buf);
return NULL;
}
if (secs < 1 || (type == IF_IDE && secs > 63)) {
fprintf(stderr, "qemu: '%s' invalid physical secs number\n", buf);
return NULL;
}
}
if ((buf = qemu_opt_get(opts, "trans")) != NULL) {
if (!cyls) {
fprintf(stderr,
"qemu: '%s' trans must be used with cyls,heads and secs\n",
buf);
return NULL;
}
if (!strcmp(buf, "none"))
translation = BIOS_ATA_TRANSLATION_NONE;
else if (!strcmp(buf, "lba"))
translation = BIOS_ATA_TRANSLATION_LBA;
else if (!strcmp(buf, "auto"))
translation = BIOS_ATA_TRANSLATION_AUTO;
else {
fprintf(stderr, "qemu: '%s' invalid translation type\n", buf);
return NULL;
}
}
if ((buf = qemu_opt_get(opts, "media")) != NULL) {
if (!strcmp(buf, "disk")) {
media = MEDIA_DISK;
} else if (!strcmp(buf, "cdrom")) {
if (cyls || secs || heads) {
fprintf(stderr,
"qemu: '%s' invalid physical CHS format\n", buf);
return NULL;
}
media = MEDIA_CDROM;
} else {
fprintf(stderr, "qemu: '%s' invalid media\n", buf);
return NULL;
}
}
if ((buf = qemu_opt_get(opts, "cache")) != NULL) {
if (!strcmp(buf, "off") || !strcmp(buf, "none")) {
bdrv_flags |= BDRV_O_NOCACHE;
} else if (!strcmp(buf, "writeback")) {
bdrv_flags |= BDRV_O_CACHE_WB;
} else if (!strcmp(buf, "unsafe")) {
bdrv_flags |= BDRV_O_CACHE_WB;
bdrv_flags |= BDRV_O_NO_FLUSH;
} else if (!strcmp(buf, "writethrough")) {
/* this is the default */
} else {
fprintf(stderr, "qemu: invalid cache option\n");
return NULL;
}
}
#ifdef CONFIG_LINUX_AIO
if ((buf = qemu_opt_get(opts, "aio")) != NULL) {
if (!strcmp(buf, "native")) {
bdrv_flags |= BDRV_O_NATIVE_AIO;
} else if (!strcmp(buf, "threads")) {
/* this is the default */
} else {
fprintf(stderr, "qemu: invalid aio option\n");
return NULL;
}
}
#endif
if ((buf = qemu_opt_get(opts, "format")) != NULL) {
if (strcmp(buf, "?") == 0) {
fprintf(stderr, "qemu: Supported formats:");
bdrv_iterate_format(bdrv_format_print, NULL);
fprintf(stderr, "\n");
return NULL;
}
drv = bdrv_find_whitelisted_format(buf);
if (!drv) {
fprintf(stderr, "qemu: '%s' invalid format\n", buf);
return NULL;
}
}
on_write_error = BLOCK_ERR_STOP_ENOSPC;
if ((buf = qemu_opt_get(opts, "werror")) != NULL) {
if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) {
fprintf(stderr, "werror is no supported by this format\n");
return NULL;
}
on_write_error = parse_block_error_action(buf, 0);
if (on_write_error < 0) {
return NULL;
}
}
on_read_error = BLOCK_ERR_REPORT;
if ((buf = qemu_opt_get(opts, "rerror")) != NULL) {
if (type != IF_IDE && type != IF_VIRTIO && type != IF_NONE) {
fprintf(stderr, "rerror is no supported by this format\n");
return NULL;
}
on_read_error = parse_block_error_action(buf, 1);
if (on_read_error < 0) {
return NULL;
}
}
if ((devaddr = qemu_opt_get(opts, "addr")) != NULL) {
if (type != IF_VIRTIO) {
fprintf(stderr, "addr is not supported\n");
return NULL;
}
}
/* compute bus and unit according index */
if (index != -1) {
if (bus_id != 0 || unit_id != -1) {
fprintf(stderr,
"qemu: index cannot be used with bus and unit\n");
return NULL;
}
if (max_devs == 0)
{
unit_id = index;
bus_id = 0;
} else {
unit_id = index % max_devs;
bus_id = index / max_devs;
}
}
/* if user doesn't specify a unit_id,
* try to find the first free
*/
if (unit_id == -1) {
unit_id = 0;
while (drive_get(type, bus_id, unit_id) != NULL) {
unit_id++;
if (max_devs && unit_id >= max_devs) {
unit_id -= max_devs;
bus_id++;
}
}
}
/* check unit id */
if (max_devs && unit_id >= max_devs) {
fprintf(stderr, "qemu: unit %d too big (max is %d)\n",
unit_id, max_devs - 1);
return NULL;
}
/*
* ignore multiple definitions
*/
if (drive_get(type, bus_id, unit_id) != NULL) {
*fatal_error = 0;
return NULL;
}
/* init */
dinfo = qemu_mallocz(sizeof(*dinfo));
if ((buf = qemu_opts_id(opts)) != NULL) {
dinfo->id = qemu_strdup(buf);
} else {
/* no id supplied -> create one */
dinfo->id = qemu_mallocz(32);
if (type == IF_IDE || type == IF_SCSI)
mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd";
if (max_devs)
snprintf(dinfo->id, 32, "%s%i%s%i",
devname, bus_id, mediastr, unit_id);
else
snprintf(dinfo->id, 32, "%s%s%i",
devname, mediastr, unit_id);
}
dinfo->bdrv = bdrv_new(dinfo->id);
dinfo->devaddr = devaddr;
dinfo->type = type;
dinfo->bus = bus_id;
dinfo->unit = unit_id;
dinfo->on_read_error = on_read_error;
dinfo->on_write_error = on_write_error;
dinfo->opts = opts;
if (serial)
strncpy(dinfo->serial, serial, sizeof(serial));
QTAILQ_INSERT_TAIL(&drives, dinfo, next);
switch(type) {
case IF_IDE:
case IF_SCSI:
case IF_XEN:
case IF_NONE:
switch(media) {
case MEDIA_DISK:
if (cyls != 0) {
bdrv_set_geometry_hint(dinfo->bdrv, cyls, heads, secs);
bdrv_set_translation_hint(dinfo->bdrv, translation);
}
break;
case MEDIA_CDROM:
bdrv_set_type_hint(dinfo->bdrv, BDRV_TYPE_CDROM);
break;
}
break;
case IF_SD:
/* FIXME: This isn't really a floppy, but it's a reasonable
approximation. */
case IF_FLOPPY:
bdrv_set_type_hint(dinfo->bdrv, BDRV_TYPE_FLOPPY);
break;
case IF_PFLASH:
case IF_MTD:
break;
case IF_VIRTIO:
/* add virtio block device */
opts = qemu_opts_create(&qemu_device_opts, NULL, 0);
qemu_opt_set(opts, "driver", "virtio-blk-pci");
qemu_opt_set(opts, "drive", dinfo->id);
if (devaddr)
qemu_opt_set(opts, "addr", devaddr);
break;
case IF_COUNT:
abort();
}
if (!file) {
*fatal_error = 0;
return NULL;
}
if (snapshot) {
/* always use cache=unsafe with snapshot */
bdrv_flags &= ~BDRV_O_CACHE_MASK;
bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH);
}
if (media == MEDIA_CDROM) {
/* CDROM is fine for any interface, don't check. */
ro = 1;
} else if (ro == 1) {
if (type != IF_SCSI && type != IF_VIRTIO && type != IF_FLOPPY && type != IF_NONE) {
fprintf(stderr, "qemu: readonly flag not supported for drive with this interface\n");
return NULL;
}
}
bdrv_flags |= ro ? 0 : BDRV_O_RDWR;
ret = bdrv_open(dinfo->bdrv, file, bdrv_flags, drv);
if (ret < 0) {
fprintf(stderr, "qemu: could not open disk image %s: %s\n",
file, strerror(-ret));
return NULL;
}
if (bdrv_key_required(dinfo->bdrv))
autostart = 0;
*fatal_error = 0;
return dinfo;
}
| true | qemu | 653dbec7c97cb51d19636423902719e5850da265 |
3,961 | static int rtsp_read_play(AVFormatContext *s)
{
RTSPState *rt = s->priv_data;
RTSPMessageHeader reply1, *reply = &reply1;
int i;
char cmd[1024];
av_log(s, AV_LOG_DEBUG, "hello state=%d\n", rt->state);
if (!(rt->server_type == RTSP_SERVER_REAL && rt->need_subscription)) {
if (rt->state == RTSP_STATE_PAUSED) {
cmd[0] = 0;
} else {
snprintf(cmd, sizeof(cmd),
"Range: npt=%0.3f-\r\n",
(double)rt->seek_timestamp / AV_TIME_BASE);
}
ff_rtsp_send_cmd(s, "PLAY", rt->control_uri, cmd, reply, NULL);
if (reply->status_code != RTSP_STATUS_OK) {
return -1;
}
if (reply->range_start != AV_NOPTS_VALUE &&
rt->transport == RTSP_TRANSPORT_RTP) {
for (i = 0; i < rt->nb_rtsp_streams; i++) {
RTSPStream *rtsp_st = rt->rtsp_streams[i];
RTPDemuxContext *rtpctx = rtsp_st->transport_priv;
AVStream *st = NULL;
if (rtsp_st->stream_index >= 0)
st = s->streams[rtsp_st->stream_index];
rtpctx->last_rtcp_ntp_time = AV_NOPTS_VALUE;
rtpctx->first_rtcp_ntp_time = AV_NOPTS_VALUE;
if (st)
rtpctx->range_start_offset = av_rescale_q(reply->range_start,
AV_TIME_BASE_Q,
st->time_base);
}
}
}
rt->state = RTSP_STATE_STREAMING;
return 0;
} | true | FFmpeg | 8d168a9207f231c22a04a5a2b252d0ab89477b02 |
3,962 | static int aiff_read_header(AVFormatContext *s)
{
int size, filesize;
int64_t offset = 0;
uint32_t tag;
unsigned version = AIFF_C_VERSION1;
AVIOContext *pb = s->pb;
AVStream * st;
AIFFInputContext *aiff = s->priv_data;
/* check FORM header */
filesize = get_tag(pb, &tag);
if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M'))
return AVERROR_INVALIDDATA;
/* AIFF data type */
tag = avio_rl32(pb);
if (tag == MKTAG('A', 'I', 'F', 'F')) /* Got an AIFF file */
version = AIFF;
else if (tag != MKTAG('A', 'I', 'F', 'C')) /* An AIFF-C file then */
return AVERROR_INVALIDDATA;
filesize -= 4;
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
while (filesize > 0) {
/* parse different chunks */
size = get_tag(pb, &tag);
if (size < 0)
return size;
filesize -= size + 8;
switch (tag) {
case MKTAG('C', 'O', 'M', 'M'): /* Common chunk */
/* Then for the complete header info */
st->nb_frames = get_aiff_header(s, size, version);
if (st->nb_frames < 0)
return st->nb_frames;
if (offset > 0) // COMM is after SSND
goto got_sound;
break;
case MKTAG('F', 'V', 'E', 'R'): /* Version chunk */
version = avio_rb32(pb);
break;
case MKTAG('N', 'A', 'M', 'E'): /* Sample name chunk */
get_meta(s, "title" , size);
break;
case MKTAG('A', 'U', 'T', 'H'): /* Author chunk */
get_meta(s, "author" , size);
break;
case MKTAG('(', 'c', ')', ' '): /* Copyright chunk */
get_meta(s, "copyright", size);
break;
case MKTAG('A', 'N', 'N', 'O'): /* Annotation chunk */
get_meta(s, "comment" , size);
break;
case MKTAG('S', 'S', 'N', 'D'): /* Sampled sound chunk */
aiff->data_end = avio_tell(pb) + size;
offset = avio_rb32(pb); /* Offset of sound data */
avio_rb32(pb); /* BlockSize... don't care */
offset += avio_tell(pb); /* Compute absolute data offset */
if (st->codecpar->block_align) /* Assume COMM already parsed */
goto got_sound;
if (!pb->seekable) {
av_log(s, AV_LOG_ERROR, "file is not seekable\n");
return -1;
}
avio_skip(pb, size - 8);
break;
case MKTAG('w', 'a', 'v', 'e'):
if ((uint64_t)size > (1<<30))
return -1;
st->codecpar->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!st->codecpar->extradata)
return AVERROR(ENOMEM);
st->codecpar->extradata_size = size;
avio_read(pb, st->codecpar->extradata, size);
break;
default: /* Jump */
avio_skip(pb, size);
}
/* Skip required padding byte for odd-sized chunks. */
if (size & 1) {
filesize--;
avio_skip(pb, 1);
}
}
got_sound:
if (!st->codecpar->block_align) {
av_log(s, AV_LOG_ERROR, "could not find COMM tag or invalid block_align value\n");
return -1;
}
/* Now positioned, get the sound data start and end */
avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate);
st->start_time = 0;
st->duration = st->nb_frames * aiff->block_duration;
/* Position the stream at the first block */
avio_seek(pb, offset, SEEK_SET);
return 0;
}
| false | FFmpeg | 83548fe894cdb455cc127f754d09905b6d23c173 |
3,963 | static void piix4_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->no_hotplug = 1;
k->init = piix4_initfn;
k->vendor_id = PCI_VENDOR_ID_INTEL;
k->device_id = PCI_DEVICE_ID_INTEL_82371AB_0;
k->class_id = PCI_CLASS_BRIDGE_ISA;
dc->desc = "ISA bridge";
dc->no_user = 1;
dc->vmsd = &vmstate_piix4;
}
| true | qemu | efec3dd631d94160288392721a5f9c39e50fb2bc |
3,964 | static int asfrtp_parse_packet(AVFormatContext *s, PayloadContext *asf,
AVStream *st, AVPacket *pkt,
uint32_t *timestamp,
const uint8_t *buf, int len, int flags)
{
AVIOContext *pb = &asf->pb;
int res, mflags, len_off;
RTSPState *rt = s->priv_data;
if (!rt->asf_ctx)
if (len > 0) {
int off, out_len = 0;
if (len < 4)
av_freep(&asf->buf);
ffio_init_context(pb, buf, len, 0, NULL, NULL, NULL, NULL);
while (avio_tell(pb) + 4 < len) {
int start_off = avio_tell(pb);
mflags = avio_r8(pb);
if (mflags & 0x80)
flags |= RTP_FLAG_KEY;
len_off = avio_rb24(pb);
if (mflags & 0x20) /**< relative timestamp */
avio_skip(pb, 4);
if (mflags & 0x10) /**< has duration */
avio_skip(pb, 4);
if (mflags & 0x8) /**< has location ID */
avio_skip(pb, 4);
off = avio_tell(pb);
if (!(mflags & 0x40)) {
/**
* If 0x40 is not set, the len_off field specifies an offset
* of this packet's payload data in the complete (reassembled)
* ASF packet. This is used to spread one ASF packet over
* multiple RTP packets.
*/
if (asf->pktbuf && len_off != avio_tell(asf->pktbuf)) {
uint8_t *p;
avio_close_dyn_buf(asf->pktbuf, &p);
asf->pktbuf = NULL;
av_free(p);
}
if (!len_off && !asf->pktbuf &&
(res = avio_open_dyn_buf(&asf->pktbuf)) < 0)
return res;
if (!asf->pktbuf)
return AVERROR(EIO);
avio_write(asf->pktbuf, buf + off, len - off);
avio_skip(pb, len - off);
if (!(flags & RTP_FLAG_MARKER))
out_len = avio_close_dyn_buf(asf->pktbuf, &asf->buf);
asf->pktbuf = NULL;
} else {
/**
* If 0x40 is set, the len_off field specifies the length of
* the next ASF packet that can be read from this payload
* data alone. This is commonly the same as the payload size,
* but could be less in case of packet splitting (i.e.
* multiple ASF packets in one RTP packet).
*/
int cur_len = start_off + len_off - off;
int prev_len = out_len;
out_len += cur_len;
asf->buf = av_realloc(asf->buf, out_len);
memcpy(asf->buf + prev_len, buf + off,
FFMIN(cur_len, len - off));
avio_skip(pb, cur_len);
}
}
init_packetizer(pb, asf->buf, out_len);
pb->pos += rt->asf_pb_pos;
pb->eof_reached = 0;
rt->asf_ctx->pb = pb;
}
for (;;) {
int i;
res = av_read_packet(rt->asf_ctx, pkt);
rt->asf_pb_pos = avio_tell(pb);
if (res != 0)
break;
for (i = 0; i < s->nb_streams; i++) {
if (s->streams[i]->id == rt->asf_ctx->streams[pkt->stream_index]->id) {
pkt->stream_index = i;
return 1; // FIXME: return 0 if last packet
}
}
av_free_packet(pkt);
}
return res == 1 ? -1 : res;
} | true | FFmpeg | ba9a7e0d71bd34f8b89ae99322b62a310be163a6 |
3,965 | int match_ext(const char *filename, const char *extensions)
{
const char *ext, *p;
char ext1[32], *q;
if(!filename)
return 0;
ext = strrchr(filename, '.');
if (ext) {
ext++;
p = extensions;
for(;;) {
q = ext1;
while (*p != '\0' && *p != ',')
*q++ = *p++;
*q = '\0';
if (!strcasecmp(ext1, ext))
return 1;
if (*p == '\0')
break;
p++;
}
}
return 0;
}
| true | FFmpeg | 0ecca7a49f8e254c12a3a1de048d738bfbb614c6 |
3,966 | static int avi_read_header(AVFormatContext *s)
{
AVIContext *avi = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int tag, tag1, handler;
int codec_type, stream_index, frame_period;
unsigned int size;
int i;
AVStream *st;
AVIStream *ast = NULL;
int avih_width = 0, avih_height = 0;
int amv_file_format = 0;
uint64_t list_end = 0;
int64_t pos;
int ret;
AVDictionaryEntry *dict_entry;
avi->stream_index = -1;
ret = get_riff(s, pb);
if (ret < 0)
return ret;
av_log(avi, AV_LOG_DEBUG, "use odml:%d\n", avi->use_odml);
avi->io_fsize = avi->fsize = avio_size(pb);
if (avi->fsize <= 0 || avi->fsize < avi->riff_end)
avi->fsize = avi->riff_end == 8 ? INT64_MAX : avi->riff_end;
/* first list tag */
stream_index = -1;
codec_type = -1;
frame_period = 0;
for (;;) {
if (avio_feof(pb))
goto fail;
tag = avio_rl32(pb);
size = avio_rl32(pb);
print_tag("tag", tag, size);
switch (tag) {
case MKTAG('L', 'I', 'S', 'T'):
list_end = avio_tell(pb) + size;
/* Ignored, except at start of video packets. */
tag1 = avio_rl32(pb);
print_tag("list", tag1, 0);
if (tag1 == MKTAG('m', 'o', 'v', 'i')) {
avi->movi_list = avio_tell(pb) - 4;
if (size)
avi->movi_end = avi->movi_list + size + (size & 1);
else
avi->movi_end = avi->fsize;
av_log(NULL, AV_LOG_TRACE, "movi end=%"PRIx64"\n", avi->movi_end);
goto end_of_header;
} else if (tag1 == MKTAG('I', 'N', 'F', 'O'))
ff_read_riff_info(s, size - 4);
else if (tag1 == MKTAG('n', 'c', 'd', 't'))
avi_read_nikon(s, list_end);
break;
case MKTAG('I', 'D', 'I', 'T'):
{
unsigned char date[64] = { 0 };
size += (size & 1);
size -= avio_read(pb, date, FFMIN(size, sizeof(date) - 1));
avio_skip(pb, size);
avi_metadata_creation_time(&s->metadata, date);
break;
case MKTAG('d', 'm', 'l', 'h'):
avi->is_odml = 1;
avio_skip(pb, size + (size & 1));
break;
case MKTAG('a', 'm', 'v', 'h'):
amv_file_format = 1;
case MKTAG('a', 'v', 'i', 'h'):
/* AVI header */
/* using frame_period is bad idea */
frame_period = avio_rl32(pb);
avio_rl32(pb); /* max. bytes per second */
avio_rl32(pb);
avi->non_interleaved |= avio_rl32(pb) & AVIF_MUSTUSEINDEX;
avio_skip(pb, 2 * 4);
avio_rl32(pb);
avio_rl32(pb);
avih_width = avio_rl32(pb);
avih_height = avio_rl32(pb);
avio_skip(pb, size - 10 * 4);
break;
case MKTAG('s', 't', 'r', 'h'):
/* stream header */
tag1 = avio_rl32(pb);
handler = avio_rl32(pb); /* codec tag */
if (tag1 == MKTAG('p', 'a', 'd', 's')) {
avio_skip(pb, size - 8);
break;
} else {
stream_index++;
st = avformat_new_stream(s, NULL);
if (!st)
goto fail;
st->id = stream_index;
ast = av_mallocz(sizeof(AVIStream));
if (!ast)
goto fail;
st->priv_data = ast;
if (amv_file_format)
tag1 = stream_index ? MKTAG('a', 'u', 'd', 's')
: MKTAG('v', 'i', 'd', 's');
print_tag("strh", tag1, -1);
if (tag1 == MKTAG('i', 'a', 'v', 's') ||
tag1 == MKTAG('i', 'v', 'a', 's')) {
int64_t dv_dur;
/* After some consideration -- I don't think we
* have to support anything but DV in type1 AVIs. */
if (s->nb_streams != 1)
goto fail;
if (handler != MKTAG('d', 'v', 's', 'd') &&
handler != MKTAG('d', 'v', 'h', 'd') &&
handler != MKTAG('d', 'v', 's', 'l'))
goto fail;
ast = s->streams[0]->priv_data;
av_freep(&s->streams[0]->codecpar->extradata);
av_freep(&s->streams[0]->codecpar);
#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
av_freep(&s->streams[0]->codec);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (s->streams[0]->info)
av_freep(&s->streams[0]->info->duration_error);
av_freep(&s->streams[0]->info);
if (s->streams[0]->internal)
av_freep(&s->streams[0]->internal->avctx);
av_freep(&s->streams[0]->internal);
av_freep(&s->streams[0]);
s->nb_streams = 0;
if (CONFIG_DV_DEMUXER) {
avi->dv_demux = avpriv_dv_init_demux(s);
if (!avi->dv_demux)
goto fail;
} else
goto fail;
s->streams[0]->priv_data = ast;
avio_skip(pb, 3 * 4);
ast->scale = avio_rl32(pb);
ast->rate = avio_rl32(pb);
avio_skip(pb, 4); /* start time */
dv_dur = avio_rl32(pb);
if (ast->scale > 0 && ast->rate > 0 && dv_dur > 0) {
dv_dur *= AV_TIME_BASE;
s->duration = av_rescale(dv_dur, ast->scale, ast->rate);
/* else, leave duration alone; timing estimation in utils.c
* will make a guess based on bitrate. */
stream_index = s->nb_streams - 1;
avio_skip(pb, size - 9 * 4);
break;
av_assert0(stream_index < s->nb_streams);
ast->handler = handler;
avio_rl32(pb); /* flags */
avio_rl16(pb); /* priority */
avio_rl16(pb); /* language */
avio_rl32(pb); /* initial frame */
ast->scale = avio_rl32(pb);
ast->rate = avio_rl32(pb);
if (!(ast->scale && ast->rate)) {
av_log(s, AV_LOG_WARNING,
"scale/rate is %"PRIu32"/%"PRIu32" which is invalid. "
"(This file has been generated by broken software.)\n",
ast->scale,
ast->rate);
if (frame_period) {
ast->rate = 1000000;
ast->scale = frame_period;
} else {
ast->rate = 25;
ast->scale = 1;
avpriv_set_pts_info(st, 64, ast->scale, ast->rate);
ast->cum_len = avio_rl32(pb); /* start */
st->nb_frames = avio_rl32(pb);
st->start_time = 0;
avio_rl32(pb); /* buffer size */
avio_rl32(pb); /* quality */
if (ast->cum_len*ast->scale/ast->rate > 3600) {
av_log(s, AV_LOG_ERROR, "crazy start time, iam scared, giving up\n");
ast->cum_len = 0;
ast->sample_size = avio_rl32(pb);
ast->cum_len *= FFMAX(1, ast->sample_size);
av_log(s, AV_LOG_TRACE, "%"PRIu32" %"PRIu32" %d\n",
ast->rate, ast->scale, ast->sample_size);
switch (tag1) {
case MKTAG('v', 'i', 'd', 's'):
codec_type = AVMEDIA_TYPE_VIDEO;
ast->sample_size = 0;
st->avg_frame_rate = av_inv_q(st->time_base);
break;
case MKTAG('a', 'u', 'd', 's'):
codec_type = AVMEDIA_TYPE_AUDIO;
break;
case MKTAG('t', 'x', 't', 's'):
codec_type = AVMEDIA_TYPE_SUBTITLE;
break;
case MKTAG('d', 'a', 't', 's'):
codec_type = AVMEDIA_TYPE_DATA;
break;
default:
av_log(s, AV_LOG_INFO, "unknown stream type %X\n", tag1);
if (ast->sample_size < 0) {
if (s->error_recognition & AV_EF_EXPLODE) {
av_log(s, AV_LOG_ERROR,
"Invalid sample_size %d at stream %d\n",
ast->sample_size,
stream_index);
goto fail;
av_log(s, AV_LOG_WARNING,
"Invalid sample_size %d at stream %d "
"setting it to 0\n",
ast->sample_size,
stream_index);
ast->sample_size = 0;
if (ast->sample_size == 0) {
st->duration = st->nb_frames;
if (st->duration > 0 && avi->io_fsize > 0 && avi->riff_end > avi->io_fsize) {
av_log(s, AV_LOG_DEBUG, "File is truncated adjusting duration\n");
st->duration = av_rescale(st->duration, avi->io_fsize, avi->riff_end);
ast->frame_offset = ast->cum_len;
avio_skip(pb, size - 12 * 4);
break;
case MKTAG('s', 't', 'r', 'f'):
/* stream header */
if (!size)
break;
if (stream_index >= (unsigned)s->nb_streams || avi->dv_demux) {
avio_skip(pb, size);
} else {
uint64_t cur_pos = avio_tell(pb);
unsigned esize;
if (cur_pos < list_end)
size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index];
if (st->codecpar->codec_type != AVMEDIA_TYPE_UNKNOWN) {
avio_skip(pb, size);
break;
switch (codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (amv_file_format) {
st->codecpar->width = avih_width;
st->codecpar->height = avih_height;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_AMV;
avio_skip(pb, size);
break;
tag1 = ff_get_bmp_header(pb, st, &esize);
if (tag1 == MKTAG('D', 'X', 'S', 'B') ||
tag1 == MKTAG('D', 'X', 'S', 'A')) {
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->codecpar->codec_tag = tag1;
st->codecpar->codec_id = AV_CODEC_ID_XSUB;
break;
if (size > 10 * 4 && size < (1 << 30) && size < avi->fsize) {
if (esize == size-1 && (esize&1)) {
st->codecpar->extradata_size = esize - 10 * 4;
} else
st->codecpar->extradata_size = size - 10 * 4;
if (ff_get_extradata(s, st->codecpar, pb, st->codecpar->extradata_size) < 0)
return AVERROR(ENOMEM);
// FIXME: check if the encoder really did this correctly
if (st->codecpar->extradata_size & 1)
avio_r8(pb);
/* Extract palette from extradata if bpp <= 8.
* This code assumes that extradata contains only palette.
* This is true for all paletted codecs implemented in
* FFmpeg. */
if (st->codecpar->extradata_size &&
(st->codecpar->bits_per_coded_sample <= 8)) {
int pal_size = (1 << st->codecpar->bits_per_coded_sample) << 2;
const uint8_t *pal_src;
pal_size = FFMIN(pal_size, st->codecpar->extradata_size);
pal_src = st->codecpar->extradata +
st->codecpar->extradata_size - pal_size;
/* Exclude the "BottomUp" field from the palette */
if (pal_src - st->codecpar->extradata >= 9 &&
!memcmp(st->codecpar->extradata + st->codecpar->extradata_size - 9, "BottomUp", 9))
pal_src -= 9;
for (i = 0; i < pal_size / 4; i++)
ast->pal[i] = 0xFFU<<24 | AV_RL32(pal_src+4*i);
ast->has_pal = 1;
print_tag("video", tag1, 0);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_tag = tag1;
st->codecpar->codec_id = ff_codec_get_id(ff_codec_bmp_tags,
tag1);
/* If codec is not found yet, try with the mov tags. */
if (!st->codecpar->codec_id) {
char tag_buf[32];
av_get_codec_tag_string(tag_buf, sizeof(tag_buf), tag1);
st->codecpar->codec_id =
ff_codec_get_id(ff_codec_movvideo_tags, tag1);
if (st->codecpar->codec_id)
av_log(s, AV_LOG_WARNING,
"mov tag found in avi (fourcc %s)\n",
tag_buf);
/* This is needed to get the pict type which is necessary
* for generating correct pts. */
st->need_parsing = AVSTREAM_PARSE_HEADERS;
if (st->codecpar->codec_id == AV_CODEC_ID_MPEG4 &&
ast->handler == MKTAG('X', 'V', 'I', 'D'))
st->codecpar->codec_tag = MKTAG('X', 'V', 'I', 'D');
if (st->codecpar->codec_tag == MKTAG('V', 'S', 'S', 'H'))
st->need_parsing = AVSTREAM_PARSE_FULL;
if (st->codecpar->codec_id == AV_CODEC_ID_RV40)
st->need_parsing = AVSTREAM_PARSE_NONE;
if (st->codecpar->codec_tag == 0 && st->codecpar->height > 0 &&
st->codecpar->extradata_size < 1U << 30) {
st->codecpar->extradata_size += 9;
if ((ret = av_reallocp(&st->codecpar->extradata,
st->codecpar->extradata_size +
AV_INPUT_BUFFER_PADDING_SIZE)) < 0) {
st->codecpar->extradata_size = 0;
return ret;
} else
memcpy(st->codecpar->extradata + st->codecpar->extradata_size - 9,
"BottomUp", 9);
st->codecpar->height = FFABS(st->codecpar->height);
// avio_skip(pb, size - 5 * 4);
break;
case AVMEDIA_TYPE_AUDIO:
ret = ff_get_wav_header(s, pb, st->codecpar, size, 0);
if (ret < 0)
return ret;
ast->dshow_block_align = st->codecpar->block_align;
if (ast->sample_size && st->codecpar->block_align &&
ast->sample_size != st->codecpar->block_align) {
av_log(s,
AV_LOG_WARNING,
"sample size (%d) != block align (%d)\n",
ast->sample_size,
st->codecpar->block_align);
ast->sample_size = st->codecpar->block_align;
/* 2-aligned
* (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
if (size & 1)
avio_skip(pb, 1);
/* Force parsing as several audio frames can be in
* one packet and timestamps refer to packet start. */
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
/* ADTS header is in extradata, AAC without header must be
* stored as exact frames. Parser not needed and it will
* fail. */
if (st->codecpar->codec_id == AV_CODEC_ID_AAC &&
st->codecpar->extradata_size)
st->need_parsing = AVSTREAM_PARSE_NONE;
// The flac parser does not work with AVSTREAM_PARSE_TIMESTAMPS
if (st->codecpar->codec_id == AV_CODEC_ID_FLAC)
st->need_parsing = AVSTREAM_PARSE_NONE;
/* AVI files with Xan DPCM audio (wrongly) declare PCM
* audio in the header but have Axan as stream_code_tag. */
if (ast->handler == AV_RL32("Axan")) {
st->codecpar->codec_id = AV_CODEC_ID_XAN_DPCM;
st->codecpar->codec_tag = 0;
ast->dshow_block_align = 0;
if (amv_file_format) {
st->codecpar->codec_id = AV_CODEC_ID_ADPCM_IMA_AMV;
ast->dshow_block_align = 0;
if ((st->codecpar->codec_id == AV_CODEC_ID_AAC ||
st->codecpar->codec_id == AV_CODEC_ID_FLAC ||
st->codecpar->codec_id == AV_CODEC_ID_MP2 ) && ast->dshow_block_align <= 4 && ast->dshow_block_align) {
av_log(s, AV_LOG_DEBUG, "overriding invalid dshow_block_align of %d\n", ast->dshow_block_align);
ast->dshow_block_align = 0;
if (st->codecpar->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align == 1024 && ast->sample_size == 1024 ||
st->codecpar->codec_id == AV_CODEC_ID_AAC && ast->dshow_block_align == 4096 && ast->sample_size == 4096 ||
st->codecpar->codec_id == AV_CODEC_ID_MP3 && ast->dshow_block_align == 1152 && ast->sample_size == 1152) {
av_log(s, AV_LOG_DEBUG, "overriding sample_size\n");
ast->sample_size = 0;
break;
case AVMEDIA_TYPE_SUBTITLE:
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
st->request_probe= 1;
avio_skip(pb, size);
break;
default:
st->codecpar->codec_type = AVMEDIA_TYPE_DATA;
st->codecpar->codec_id = AV_CODEC_ID_NONE;
st->codecpar->codec_tag = 0;
avio_skip(pb, size);
break;
break;
case MKTAG('s', 't', 'r', 'd'):
if (stream_index >= (unsigned)s->nb_streams
|| s->streams[stream_index]->codecpar->extradata_size
|| s->streams[stream_index]->codecpar->codec_tag == MKTAG('H','2','6','4')) {
avio_skip(pb, size);
} else {
uint64_t cur_pos = avio_tell(pb);
if (cur_pos < list_end)
size = FFMIN(size, list_end - cur_pos);
st = s->streams[stream_index];
if (size<(1<<30)) {
av_log(s, AV_LOG_WARNING, "New extradata in strd chunk, freeing previous one.\n");
if (ff_get_extradata(s, st->codecpar, pb, size) < 0)
return AVERROR(ENOMEM);
if (st->codecpar->extradata_size & 1) //FIXME check if the encoder really did this correctly
avio_r8(pb);
ret = avi_extract_stream_metadata(s, st);
if (ret < 0) {
av_log(s, AV_LOG_WARNING, "could not decoding EXIF data in stream header.\n");
break;
case MKTAG('i', 'n', 'd', 'x'):
pos = avio_tell(pb);
if (pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) &&
avi->use_odml &&
read_braindead_odml_indx(s, 0) < 0 &&
(s->error_recognition & AV_EF_EXPLODE))
goto fail;
avio_seek(pb, pos + size, SEEK_SET);
break;
case MKTAG('v', 'p', 'r', 'p'):
if (stream_index < (unsigned)s->nb_streams && size > 9 * 4) {
AVRational active, active_aspect;
st = s->streams[stream_index];
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
avio_rl32(pb);
active_aspect.den = avio_rl16(pb);
active_aspect.num = avio_rl16(pb);
active.num = avio_rl32(pb);
active.den = avio_rl32(pb);
avio_rl32(pb); // nbFieldsPerFrame
if (active_aspect.num && active_aspect.den &&
active.num && active.den) {
st->sample_aspect_ratio = av_div_q(active_aspect, active);
av_log(s, AV_LOG_TRACE, "vprp %d/%d %d/%d\n",
active_aspect.num, active_aspect.den,
active.num, active.den);
size -= 9 * 4;
avio_skip(pb, size);
break;
case MKTAG('s', 't', 'r', 'n'):
if (s->nb_streams) {
ret = avi_read_tag(s, s->streams[s->nb_streams - 1], tag, size);
if (ret < 0)
return ret;
break;
default:
if (size > 1000000) {
char tag_buf[32];
av_get_codec_tag_string(tag_buf, sizeof(tag_buf), tag);
av_log(s, AV_LOG_ERROR,
"Something went wrong during header parsing, "
"tag %s has size %u, "
"I will ignore it and try to continue anyway.\n",
tag_buf, size);
if (s->error_recognition & AV_EF_EXPLODE)
goto fail;
avi->movi_list = avio_tell(pb) - 4;
avi->movi_end = avi->fsize;
goto end_of_header;
/* Do not fail for very large idx1 tags */
case MKTAG('i', 'd', 'x', '1'):
/* skip tag */
size += (size & 1);
avio_skip(pb, size);
break;
end_of_header:
/* check stream number */
if (stream_index != s->nb_streams - 1) {
fail:
return AVERROR_INVALIDDATA;
if (!avi->index_loaded && pb->seekable)
avi_load_index(s);
calculate_bitrate(s);
avi->index_loaded |= 1;
if ((ret = guess_ni_flag(s)) < 0)
return ret;
avi->non_interleaved |= ret | (s->flags & AVFMT_FLAG_SORT_DTS);
dict_entry = av_dict_get(s->metadata, "ISFT", NULL, 0);
if (dict_entry && !strcmp(dict_entry->value, "PotEncoder"))
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if ( st->codecpar->codec_id == AV_CODEC_ID_MPEG1VIDEO
|| st->codecpar->codec_id == AV_CODEC_ID_MPEG2VIDEO)
st->need_parsing = AVSTREAM_PARSE_FULL;
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
if (st->nb_index_entries)
break;
// DV-in-AVI cannot be non-interleaved, if set this must be
// a mis-detection.
if (avi->dv_demux)
avi->non_interleaved = 0;
if (i == s->nb_streams && avi->non_interleaved) {
av_log(s, AV_LOG_WARNING,
"Non-interleaved AVI without index, switching to interleaved\n");
avi->non_interleaved = 0;
if (avi->non_interleaved) {
av_log(s, AV_LOG_INFO, "non-interleaved AVI\n");
clean_index(s);
ff_metadata_conv_ctx(s, NULL, avi_metadata_conv);
ff_metadata_conv_ctx(s, NULL, ff_riff_info_conv);
return 0;
| true | FFmpeg | 2c908f22c2af0775a6ffe3466aad174a0ace00dd |
3,967 | static void cirrus_invalidate_region(CirrusVGAState * s, int off_begin,
int off_pitch, int bytesperline,
int lines)
{
int y;
int off_cur;
int off_cur_end;
for (y = 0; y < lines; y++) {
off_cur = off_begin;
off_cur_end = off_cur + bytesperline;
off_cur &= TARGET_PAGE_MASK;
while (off_cur < off_cur_end) {
cpu_physical_memory_set_dirty(s->vram_offset + off_cur);
off_cur += TARGET_PAGE_SIZE;
}
off_begin += off_pitch;
}
}
| true | qemu | b2eb849d4b1fdb6f35d5c46958c7f703cf64cfef |
3,972 | static inline float to_float(uint8_t exp, int16_t mantissa)
{
return ((float) (mantissa * scale_factors[exp]));
}
| false | FFmpeg | 486637af8ef29ec215e0e0b7ecd3b5470f0e04e5 |
3,973 | static int encode_init(AVCodecContext *avctx)
{
HYuvContext *s = avctx->priv_data;
int i, j, width, height;
s->avctx= avctx;
s->flags= avctx->flags;
dsputil_init(&s->dsp, avctx);
width= s->width= avctx->width;
height= s->height= avctx->height;
assert(width && height);
avctx->extradata= av_mallocz(1024*30);
avctx->stats_out= av_mallocz(1024*30);
s->version=2;
avctx->coded_frame= &s->picture;
switch(avctx->pix_fmt){
case PIX_FMT_YUV420P:
s->bitstream_bpp= 12;
break;
case PIX_FMT_YUV422P:
s->bitstream_bpp= 16;
break;
default:
av_log(avctx, AV_LOG_ERROR, "format not supported\n");
return -1;
}
avctx->bits_per_sample= s->bitstream_bpp;
s->decorrelate= s->bitstream_bpp >= 24;
s->predictor= avctx->prediction_method;
s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
if(avctx->context_model==1){
s->context= avctx->context_model;
if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
return -1;
}
}else s->context= 0;
if(avctx->codec->id==CODEC_ID_HUFFYUV){
if(avctx->pix_fmt==PIX_FMT_YUV420P){
av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
return -1;
}
if(avctx->context_model){
av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
return -1;
}
if(s->interlaced != ( height > 288 ))
av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
}else if(avctx->strict_std_compliance>=0){
av_log(avctx, AV_LOG_ERROR, "This codec is under development; files encoded with it may not be decodeable with future versions!!! Set vstrict=-1 to use it anyway.\n");
return -1;
}
((uint8_t*)avctx->extradata)[0]= s->predictor;
((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
((uint8_t*)avctx->extradata)[2]= 0x20 | (s->interlaced ? 0x10 : 0);
if(s->context)
((uint8_t*)avctx->extradata)[2]|= 0x40;
((uint8_t*)avctx->extradata)[3]= 0;
s->avctx->extradata_size= 4;
if(avctx->stats_in){
char *p= avctx->stats_in;
for(i=0; i<3; i++)
for(j=0; j<256; j++)
s->stats[i][j]= 1;
for(;;){
for(i=0; i<3; i++){
char *next;
for(j=0; j<256; j++){
s->stats[i][j]+= strtol(p, &next, 0);
if(next==p) return -1;
p=next;
}
}
if(p[0]==0 || p[1]==0 || p[2]==0) break;
}
}else{
for(i=0; i<3; i++)
for(j=0; j<256; j++){
int d= FFMIN(j, 256-j);
s->stats[i][j]= 100000000/(d+1);
}
}
for(i=0; i<3; i++){
generate_len_table(s->len[i], s->stats[i], 256);
if(generate_bits_table(s->bits[i], s->len[i])<0){
return -1;
}
s->avctx->extradata_size+=
store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
}
if(s->context){
for(i=0; i<3; i++){
int pels = width*height / (i?40:10);
for(j=0; j<256; j++){
int d= FFMIN(j, 256-j);
s->stats[i][j]= pels/(d+1);
}
}
}else{
for(i=0; i<3; i++)
for(j=0; j<256; j++)
s->stats[i][j]= 0;
}
// printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_sample, s->interlaced);
s->picture_number=0;
return 0;
}
| false | FFmpeg | 42dd22c4a5c8463488d442eeb234b822bcc39625 |
3,975 | void error_set_win32(Error **errp, int win32_err, ErrorClass err_class,
const char *fmt, ...)
{
va_list ap;
char *msg1, *msg2;
if (errp == NULL) {
return;
}
va_start(ap, fmt);
error_setv(errp, err_class, fmt, ap);
va_end(ap);
if (win32_err != 0) {
msg1 = (*errp)->msg;
msg2 = g_win32_error_message(win32_err);
(*errp)->msg = g_strdup_printf("%s: %s (error: %x)", msg1, msg2,
(unsigned)win32_err);
g_free(msg2);
g_free(msg1);
}
}
| true | qemu | e7cf59e84767e30b507b6bd7c1347072ec12b636 |
3,976 | av_cold void ff_dither_init_x86(DitherDSPContext *ddsp,
enum AVResampleDitherMethod method)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_SSE2(cpu_flags)) {
ddsp->quantize = ff_quantize_sse2;
ddsp->ptr_align = 16;
ddsp->samples_align = 8;
}
if (method == AV_RESAMPLE_DITHER_RECTANGULAR) {
if (EXTERNAL_SSE2(cpu_flags)) {
ddsp->dither_int_to_float = ff_dither_int_to_float_rectangular_sse2;
}
if (EXTERNAL_AVX(cpu_flags)) {
ddsp->dither_int_to_float = ff_dither_int_to_float_rectangular_avx;
}
} else {
if (EXTERNAL_SSE2(cpu_flags)) {
ddsp->dither_int_to_float = ff_dither_int_to_float_triangular_sse2;
}
if (EXTERNAL_AVX(cpu_flags)) {
ddsp->dither_int_to_float = ff_dither_int_to_float_triangular_avx;
}
}
}
| false | FFmpeg | d68c05380cebf563915412182643a8be04ef890b |
3,978 | static int qio_channel_socket_dgram_worker(QIOTask *task,
Error **errp,
gpointer opaque)
{
QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task));
struct QIOChannelSocketDGramWorkerData *data = opaque;
int ret;
/* socket_dgram() blocks in DNS lookups, so we must use a thread */
ret = qio_channel_socket_dgram_sync(ioc,
data->localAddr,
data->remoteAddr,
errp);
object_unref(OBJECT(ioc));
return ret;
}
| true | qemu | 937470bb5470825e781ae50e92ff973a6b54d80f |
3,979 | static void acpi_dsdt_add_cpus(Aml *scope, int smp_cpus)
{
uint16_t i;
for (i = 0; i < smp_cpus; i++) {
Aml *dev = aml_device("C%03x", i);
aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
aml_append(dev, aml_name_decl("_UID", aml_int(i)));
aml_append(scope, dev);
}
}
| true | qemu | f460be435f8750d5d1484d3d8b9e5b2c334f0e20 |
3,980 | static int svq1_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MpegEncContext *s=avctx->priv_data;
uint8_t *current, *previous;
int result, i, x, y, width, height;
AVFrame *pict = data;
svq1_pmv *pmv;
/* initialize bit buffer */
init_get_bits(&s->gb,buf,buf_size*8);
/* decode frame header */
s->f_code = get_bits (&s->gb, 22);
if ((s->f_code & ~0x70) || !(s->f_code & 0x60))
return -1;
/* swap some header bytes (why?) */
if (s->f_code != 0x20) {
uint32_t *src = (uint32_t *) (buf + 4);
for (i=0; i < 4; i++) {
src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
}
}
result = svq1_decode_frame_header (&s->gb, s);
if (result != 0)
{
av_dlog(s->avctx, "Error in svq1_decode_frame_header %i\n",result);
return result;
}
//FIXME this avoids some confusion for "B frames" without 2 references
//this should be removed after libavcodec can handle more flexible picture types & ordering
if(s->pict_type==AV_PICTURE_TYPE_B && s->last_picture_ptr==NULL) return buf_size;
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
|| avctx->skip_frame >= AVDISCARD_ALL)
return buf_size;
if(MPV_frame_start(s, avctx) < 0)
return -1;
pmv = av_malloc((FFALIGN(s->width, 16)/8 + 3) * sizeof(*pmv));
if (!pmv)
return -1;
/* decode y, u and v components */
for (i=0; i < 3; i++) {
int linesize;
if (i == 0) {
width = FFALIGN(s->width, 16);
height = FFALIGN(s->height, 16);
linesize= s->linesize;
} else {
if(s->flags&CODEC_FLAG_GRAY) break;
width = FFALIGN(s->width/4, 16);
height = FFALIGN(s->height/4, 16);
linesize= s->uvlinesize;
}
current = s->current_picture.f.data[i];
if(s->pict_type==AV_PICTURE_TYPE_B){
previous = s->next_picture.f.data[i];
}else{
previous = s->last_picture.f.data[i];
}
if (s->pict_type == AV_PICTURE_TYPE_I) {
/* keyframe */
for (y=0; y < height; y+=16) {
for (x=0; x < width; x+=16) {
result = svq1_decode_block_intra (&s->gb, ¤t[x], linesize);
if (result != 0)
{
av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_block %i (keyframe)\n",result);
goto err;
}
}
current += 16*linesize;
}
} else {
/* delta frame */
memset (pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv));
for (y=0; y < height; y+=16) {
for (x=0; x < width; x+=16) {
result = svq1_decode_delta_block (s, &s->gb, ¤t[x], previous,
linesize, pmv, x, y);
if (result != 0)
{
av_dlog(s->avctx, "Error in svq1_decode_delta_block %i\n",result);
goto err;
}
}
pmv[0].x =
pmv[0].y = 0;
current += 16*linesize;
}
}
}
*pict = *(AVFrame*)&s->current_picture;
MPV_frame_end(s);
*data_size=sizeof(AVFrame);
result = buf_size;
err:
av_free(pmv);
return result;
} | true | FFmpeg | 6e24b9488e67849a28e64a8056e05f83cf439229 |
3,981 | static int parse_object_segment(AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
PGSSubContext *ctx = avctx->priv_data;
PGSSubObject *object;
uint8_t sequence_desc;
unsigned int rle_bitmap_len, width, height;
int id;
if (buf_size <= 4)
return AVERROR_INVALIDDATA;
buf_size -= 4;
id = bytestream_get_be16(&buf);
object = find_object(id, &ctx->objects);
if (!object) {
if (ctx->objects.count >= MAX_EPOCH_OBJECTS) {
av_log(avctx, AV_LOG_ERROR, "Too many objects in epoch\n");
return AVERROR_INVALIDDATA;
}
object = &ctx->objects.object[ctx->objects.count++];
object->id = id;
}
/* skip object version number */
buf += 1;
/* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */
sequence_desc = bytestream_get_byte(&buf);
if (!(sequence_desc & 0x80)) {
/* Additional RLE data */
if (buf_size > object->rle_remaining_len)
return AVERROR_INVALIDDATA;
memcpy(object->rle + object->rle_data_len, buf, buf_size);
object->rle_data_len += buf_size;
object->rle_remaining_len -= buf_size;
return 0;
}
if (buf_size <= 7)
return AVERROR_INVALIDDATA;
buf_size -= 7;
/* Decode rle bitmap length, stored size includes width/height data */
rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
if (buf_size > rle_bitmap_len) {
av_log(avctx, AV_LOG_ERROR,
"Buffer dimension %d larger than the expected RLE data %d\n",
buf_size, rle_bitmap_len);
return AVERROR_INVALIDDATA;
}
/* Get bitmap dimensions from data */
width = bytestream_get_be16(&buf);
height = bytestream_get_be16(&buf);
/* Make sure the bitmap is not too large */
if (avctx->width < width || avctx->height < height) {
av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger than video.\n");
return AVERROR_INVALIDDATA;
}
object->w = width;
object->h = height;
av_fast_padded_malloc(&object->rle, &object->rle_buffer_size, rle_bitmap_len);
if (!object->rle)
return AVERROR(ENOMEM);
memcpy(object->rle, buf, buf_size);
object->rle_data_len = buf_size;
object->rle_remaining_len = rle_bitmap_len - buf_size;
return 0;
}
| true | FFmpeg | ebf5264cd6bbda6c0c379dfeaaba3b9afc3279a8 |
3,982 | static int ram_load_postcopy(QEMUFile *f)
{
int flags = 0, ret = 0;
bool place_needed = false;
bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
MigrationIncomingState *mis = migration_incoming_get_current();
/* Temporary page that is later 'placed' */
void *postcopy_host_page = postcopy_get_tmp_page(mis);
void *last_host = NULL;
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
ram_addr_t addr;
void *host = NULL;
void *page_buffer = NULL;
void *place_source = NULL;
uint8_t ch;
bool all_zero = false;
addr = qemu_get_be64(f);
flags = addr & ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
trace_ram_load_postcopy_loop((uint64_t)addr, flags);
place_needed = false;
if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
host = host_from_stream_offset(f, addr, flags);
if (!host) {
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
break;
}
page_buffer = host;
/*
* Postcopy requires that we place whole host pages atomically.
* To make it atomic, the data is read into a temporary page
* that's moved into place later.
* The migration protocol uses, possibly smaller, target-pages
* however the source ensures it always sends all the components
* of a host page in order.
*/
page_buffer = postcopy_host_page +
((uintptr_t)host & ~qemu_host_page_mask);
/* If all TP are zero then we can optimise the place */
if (!((uintptr_t)host & ~qemu_host_page_mask)) {
all_zero = true;
} else {
/* not the 1st TP within the HP */
if (host != (last_host + TARGET_PAGE_SIZE)) {
error_report("Non-sequential target page %p/%p\n",
host, last_host);
ret = -EINVAL;
break;
}
}
/*
* If it's the last part of a host page then we place the host
* page
*/
place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
~qemu_host_page_mask) == 0;
place_source = postcopy_host_page;
}
switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
case RAM_SAVE_FLAG_COMPRESS:
ch = qemu_get_byte(f);
memset(page_buffer, ch, TARGET_PAGE_SIZE);
if (ch) {
all_zero = false;
}
break;
case RAM_SAVE_FLAG_PAGE:
all_zero = false;
if (!place_needed || !matching_page_sizes) {
qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
} else {
/* Avoids the qemu_file copy during postcopy, which is
* going to do a copy later; can only do it when we
* do this read in one go (matching page sizes)
*/
qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
TARGET_PAGE_SIZE);
}
break;
case RAM_SAVE_FLAG_EOS:
/* normal exit */
break;
default:
error_report("Unknown combination of migration flags: %#x"
" (postcopy mode)", flags);
ret = -EINVAL;
}
if (place_needed) {
/* This gets called at the last target page in the host page */
if (all_zero) {
ret = postcopy_place_page_zero(mis,
host + TARGET_PAGE_SIZE -
qemu_host_page_size);
} else {
ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
qemu_host_page_size,
place_source);
}
}
if (!ret) {
ret = qemu_file_get_error(f);
}
}
return ret;
} | true | qemu | c53b7ddc61198c4af8290d6310592e48e3507c47 |
3,983 | static bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
VirtIOSCSICommon *vs = &s->parent_obj;
SCSIDevice *d;
int rc;
rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
sizeof(VirtIOSCSICmdResp) + vs->sense_size);
if (rc < 0) {
if (rc == -ENOTSUP) {
virtio_scsi_fail_cmd_req(req);
} else {
virtio_scsi_bad_req();
}
return false;
}
d = virtio_scsi_device_find(s, req->req.cmd.lun);
if (!d) {
req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
virtio_scsi_complete_cmd_req(req);
return false;
}
virtio_scsi_ctx_check(s, d);
req->sreq = scsi_req_new(d, req->req.cmd.tag,
virtio_scsi_get_lun(req->req.cmd.lun),
req->req.cmd.cdb, req);
if (req->sreq->cmd.mode != SCSI_XFER_NONE
&& (req->sreq->cmd.mode != req->mode ||
req->sreq->cmd.xfer > req->qsgl.size)) {
req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
virtio_scsi_complete_cmd_req(req);
return false;
}
scsi_req_ref(req->sreq);
blk_io_plug(d->conf.blk);
return true;
}
| true | qemu | 661e32fb3cb71c7e019daee375be4bb487b9917c |
3,985 | static void calc_sums(int pmin, int pmax, uint32_t *data, int n, int pred_order,
uint32_t sums[][MAX_PARTITIONS])
{
int i, j;
int parts;
uint32_t *res, *res_end;
/* sums for highest level */
parts = (1 << pmax);
res = &data[pred_order];
res_end = &data[n >> pmax];
for (i = 0; i < parts; i++) {
uint32_t sum = 0;
while (res < res_end)
sum += *(res++);
sums[pmax][i] = sum;
res_end += n >> pmax;
}
/* sums for lower levels */
for (i = pmax - 1; i >= pmin; i--) {
parts = (1 << i);
for (j = 0; j < parts; j++)
sums[i][j] = sums[i+1][2*j] + sums[i+1][2*j+1];
}
}
| true | FFmpeg | 5ff998a233d759d0de83ea6f95c383d03d25d88e |
3,986 | static MTPData *usb_mtp_get_object(MTPState *s, MTPControl *c,
MTPObject *o)
{
MTPData *d = usb_mtp_data_alloc(c);
trace_usb_mtp_op_get_object(s->dev.addr, o->handle, o->path);
d->fd = open(o->path, O_RDONLY);
if (d->fd == -1) {
return NULL;
}
d->length = o->stat.st_size;
d->alloc = 512;
d->data = g_malloc(d->alloc);
return d;
} | true | qemu | 2dc7fdf33d28940255f171b8ea4b692d9d5b7a7d |
3,987 | static void init_block_index(VC1Context *v)
{
MpegEncContext *s = &v->s;
ff_init_block_index(s);
if (v->field_mode && !(v->second_field ^ v->tff)) {
s->dest[0] += s->current_picture_ptr->f.linesize[0];
s->dest[1] += s->current_picture_ptr->f.linesize[1];
s->dest[2] += s->current_picture_ptr->f.linesize[2];
}
}
| true | FFmpeg | f6774f905fb3cfdc319523ac640be30b14c1bc55 |
3,989 | bool qvirtio_wait_queue_isr(const QVirtioBus *bus, QVirtioDevice *d,
QVirtQueue *vq, uint64_t timeout)
{
do {
clock_step(100);
if (bus->get_queue_isr_status(d, vq)) {
break; /* It has ended */
}
} while (--timeout);
return timeout != 0;
}
| true | qemu | 70556264a89a268efba1d7e8e341adcdd7881eb4 |
3,990 | static int64_t mkv_write_cues(AVIOContext *pb, mkv_cues *cues, int num_tracks)
{
ebml_master cues_element;
int64_t currentpos;
int i, j;
currentpos = avio_tell(pb);
cues_element = start_ebml_master(pb, MATROSKA_ID_CUES, 0);
for (i = 0; i < cues->num_entries; i++) {
ebml_master cuepoint, track_positions;
mkv_cuepoint *entry = &cues->entries[i];
uint64_t pts = entry->pts;
cuepoint = start_ebml_master(pb, MATROSKA_ID_POINTENTRY, MAX_CUEPOINT_SIZE(num_tracks));
put_ebml_uint(pb, MATROSKA_ID_CUETIME, pts);
// put all the entries from different tracks that have the exact same
// timestamp into the same CuePoint
for (j = 0; j < cues->num_entries - i && entry[j].pts == pts; j++) {
track_positions = start_ebml_master(pb, MATROSKA_ID_CUETRACKPOSITION, MAX_CUETRACKPOS_SIZE);
put_ebml_uint(pb, MATROSKA_ID_CUETRACK , entry[j].tracknum );
put_ebml_uint(pb, MATROSKA_ID_CUECLUSTERPOSITION, entry[j].cluster_pos);
end_ebml_master(pb, track_positions);
}
i += j - 1;
end_ebml_master(pb, cuepoint);
}
end_ebml_master(pb, cues_element);
av_free(cues->entries);
av_free(cues);
return currentpos;
}
| true | FFmpeg | eddd580b743692bc930692cb0c5a3e930ab45ad4 |
3,991 | static int dxva2_device_create9ex(AVHWDeviceContext *ctx, UINT adapter)
{
DXVA2DevicePriv *priv = ctx->user_opaque;
D3DPRESENT_PARAMETERS d3dpp = dxva2_present_params;
D3DDISPLAYMODEEX modeex = {0};
IDirect3D9Ex *d3d9ex = NULL;
IDirect3DDevice9Ex *exdev = NULL;
HRESULT hr;
pDirect3DCreate9Ex *createD3DEx = (pDirect3DCreate9Ex *)dlsym(priv->d3dlib, "Direct3DCreate9Ex");
if (!createD3DEx)
return AVERROR(ENOSYS);
hr = createD3DEx(D3D_SDK_VERSION, &d3d9ex);
if (FAILED(hr))
return AVERROR_UNKNOWN;
IDirect3D9Ex_GetAdapterDisplayModeEx(d3d9ex, adapter, &modeex, NULL);
d3dpp.BackBufferFormat = modeex.Format;
hr = IDirect3D9Ex_CreateDeviceEx(d3d9ex, adapter, D3DDEVTYPE_HAL, GetDesktopWindow(),
FF_D3DCREATE_FLAGS,
&d3dpp, NULL, &exdev);
if (FAILED(hr)) {
IDirect3D9Ex_Release(d3d9ex);
return AVERROR_UNKNOWN;
}
av_log(ctx, AV_LOG_VERBOSE, "Using D3D9Ex device.\n");
priv->d3d9 = (IDirect3D9 *)d3d9ex;
priv->d3d9device = (IDirect3DDevice9 *)exdev;
return 0;
}
| true | FFmpeg | 59b126f92225316e0cd77bb952d630553801dc85 |
3,993 | static void mov_metadata_creation_time(AVDictionary **metadata, int64_t time)
{
if (time) {
if(time >= 2082844800)
time -= 2082844800; /* seconds between 1904-01-01 and Epoch */
avpriv_dict_set_timestamp(metadata, "creation_time", time * 1000000); | true | FFmpeg | 39ee3ddff87a12e108fc4e0d36f756d0ca080472 |
3,995 | static void register_all(void)
{
/* hardware accelerators */
REGISTER_HWACCEL(H263_VAAPI, h263_vaapi);
REGISTER_HWACCEL(H263_VIDEOTOOLBOX, h263_videotoolbox);
REGISTER_HWACCEL(H264_CUVID, h264_cuvid);
REGISTER_HWACCEL(H264_D3D11VA, h264_d3d11va);
REGISTER_HWACCEL(H264_D3D11VA2, h264_d3d11va2);
REGISTER_HWACCEL(H264_DXVA2, h264_dxva2);
REGISTER_HWACCEL(H264_MEDIACODEC, h264_mediacodec);
REGISTER_HWACCEL(H264_MMAL, h264_mmal);
REGISTER_HWACCEL(H264_NVDEC, h264_nvdec);
REGISTER_HWACCEL(H264_QSV, h264_qsv);
REGISTER_HWACCEL(H264_VAAPI, h264_vaapi);
REGISTER_HWACCEL(H264_VDPAU, h264_vdpau);
REGISTER_HWACCEL(H264_VIDEOTOOLBOX, h264_videotoolbox);
REGISTER_HWACCEL(HEVC_CUVID, hevc_cuvid);
REGISTER_HWACCEL(HEVC_D3D11VA, hevc_d3d11va);
REGISTER_HWACCEL(HEVC_D3D11VA2, hevc_d3d11va2);
REGISTER_HWACCEL(HEVC_DXVA2, hevc_dxva2);
REGISTER_HWACCEL(HEVC_NVDEC, hevc_nvdec);
REGISTER_HWACCEL(HEVC_MEDIACODEC, hevc_mediacodec);
REGISTER_HWACCEL(HEVC_QSV, hevc_qsv);
REGISTER_HWACCEL(HEVC_VAAPI, hevc_vaapi);
REGISTER_HWACCEL(HEVC_VDPAU, hevc_vdpau);
REGISTER_HWACCEL(HEVC_VIDEOTOOLBOX, hevc_videotoolbox);
REGISTER_HWACCEL(MJPEG_CUVID, mjpeg_cuvid);
REGISTER_HWACCEL(MPEG1_CUVID, mpeg1_cuvid);
REGISTER_HWACCEL(MPEG1_XVMC, mpeg1_xvmc);
REGISTER_HWACCEL(MPEG1_VDPAU, mpeg1_vdpau);
REGISTER_HWACCEL(MPEG1_VIDEOTOOLBOX, mpeg1_videotoolbox);
REGISTER_HWACCEL(MPEG2_CUVID, mpeg2_cuvid);
REGISTER_HWACCEL(MPEG2_XVMC, mpeg2_xvmc);
REGISTER_HWACCEL(MPEG2_D3D11VA, mpeg2_d3d11va);
REGISTER_HWACCEL(MPEG2_D3D11VA2, mpeg2_d3d11va2);
REGISTER_HWACCEL(MPEG2_DXVA2, mpeg2_dxva2);
REGISTER_HWACCEL(MPEG2_MMAL, mpeg2_mmal);
REGISTER_HWACCEL(MPEG2_QSV, mpeg2_qsv);
REGISTER_HWACCEL(MPEG2_VAAPI, mpeg2_vaapi);
REGISTER_HWACCEL(MPEG2_VDPAU, mpeg2_vdpau);
REGISTER_HWACCEL(MPEG2_VIDEOTOOLBOX, mpeg2_videotoolbox);
REGISTER_HWACCEL(MPEG2_MEDIACODEC, mpeg2_mediacodec);
REGISTER_HWACCEL(MPEG4_CUVID, mpeg4_cuvid);
REGISTER_HWACCEL(MPEG4_MEDIACODEC, mpeg4_mediacodec);
REGISTER_HWACCEL(MPEG4_MMAL, mpeg4_mmal);
REGISTER_HWACCEL(MPEG4_VAAPI, mpeg4_vaapi);
REGISTER_HWACCEL(MPEG4_VDPAU, mpeg4_vdpau);
REGISTER_HWACCEL(MPEG4_VIDEOTOOLBOX, mpeg4_videotoolbox);
REGISTER_HWACCEL(VC1_CUVID, vc1_cuvid);
REGISTER_HWACCEL(VC1_D3D11VA, vc1_d3d11va);
REGISTER_HWACCEL(VC1_D3D11VA2, vc1_d3d11va2);
REGISTER_HWACCEL(VC1_DXVA2, vc1_dxva2);
REGISTER_HWACCEL(VC1_NVDEC, vc1_nvdec);
REGISTER_HWACCEL(VC1_VAAPI, vc1_vaapi);
REGISTER_HWACCEL(VC1_VDPAU, vc1_vdpau);
REGISTER_HWACCEL(VC1_MMAL, vc1_mmal);
REGISTER_HWACCEL(VC1_QSV, vc1_qsv);
REGISTER_HWACCEL(VP8_CUVID, vp8_cuvid);
REGISTER_HWACCEL(VP8_MEDIACODEC, vp8_mediacodec);
REGISTER_HWACCEL(VP8_QSV, vp8_qsv);
REGISTER_HWACCEL(VP9_CUVID, vp9_cuvid);
REGISTER_HWACCEL(VP9_D3D11VA, vp9_d3d11va);
REGISTER_HWACCEL(VP9_D3D11VA2, vp9_d3d11va2);
REGISTER_HWACCEL(VP9_DXVA2, vp9_dxva2);
REGISTER_HWACCEL(VP9_MEDIACODEC, vp9_mediacodec);
REGISTER_HWACCEL(VP9_NVDEC, vp9_nvdec);
REGISTER_HWACCEL(VP9_VAAPI, vp9_vaapi);
REGISTER_HWACCEL(WMV3_D3D11VA, wmv3_d3d11va);
REGISTER_HWACCEL(WMV3_D3D11VA2, wmv3_d3d11va2);
REGISTER_HWACCEL(WMV3_DXVA2, wmv3_dxva2);
REGISTER_HWACCEL(WMV3_NVDEC, wmv3_nvdec);
REGISTER_HWACCEL(WMV3_VAAPI, wmv3_vaapi);
REGISTER_HWACCEL(WMV3_VDPAU, wmv3_vdpau);
/* video codecs */
REGISTER_ENCODER(A64MULTI, a64multi);
REGISTER_ENCODER(A64MULTI5, a64multi5);
REGISTER_DECODER(AASC, aasc);
REGISTER_DECODER(AIC, aic);
REGISTER_ENCDEC (ALIAS_PIX, alias_pix);
REGISTER_ENCDEC (AMV, amv);
REGISTER_DECODER(ANM, anm);
REGISTER_DECODER(ANSI, ansi);
REGISTER_ENCDEC (APNG, apng);
REGISTER_ENCDEC (ASV1, asv1);
REGISTER_ENCDEC (ASV2, asv2);
REGISTER_DECODER(AURA, aura);
REGISTER_DECODER(AURA2, aura2);
REGISTER_ENCDEC (AVRP, avrp);
REGISTER_DECODER(AVRN, avrn);
REGISTER_DECODER(AVS, avs);
REGISTER_ENCDEC (AVUI, avui);
REGISTER_ENCDEC (AYUV, ayuv);
REGISTER_DECODER(BETHSOFTVID, bethsoftvid);
REGISTER_DECODER(BFI, bfi);
REGISTER_DECODER(BINK, bink);
REGISTER_ENCDEC (BMP, bmp);
REGISTER_DECODER(BMV_VIDEO, bmv_video);
REGISTER_DECODER(BRENDER_PIX, brender_pix);
REGISTER_DECODER(C93, c93);
REGISTER_DECODER(CAVS, cavs);
REGISTER_DECODER(CDGRAPHICS, cdgraphics);
REGISTER_DECODER(CDXL, cdxl);
REGISTER_DECODER(CFHD, cfhd);
REGISTER_ENCDEC (CINEPAK, cinepak);
REGISTER_DECODER(CLEARVIDEO, clearvideo);
REGISTER_ENCDEC (CLJR, cljr);
REGISTER_DECODER(CLLC, cllc);
REGISTER_ENCDEC (COMFORTNOISE, comfortnoise);
REGISTER_DECODER(CPIA, cpia);
REGISTER_DECODER(CSCD, cscd);
REGISTER_DECODER(CYUV, cyuv);
REGISTER_DECODER(DDS, dds);
REGISTER_DECODER(DFA, dfa);
REGISTER_DECODER(DIRAC, dirac);
REGISTER_ENCDEC (DNXHD, dnxhd);
REGISTER_ENCDEC (DPX, dpx);
REGISTER_DECODER(DSICINVIDEO, dsicinvideo);
REGISTER_DECODER(DVAUDIO, dvaudio);
REGISTER_ENCDEC (DVVIDEO, dvvideo);
REGISTER_DECODER(DXA, dxa);
REGISTER_DECODER(DXTORY, dxtory);
REGISTER_DECODER(DXV, dxv);
REGISTER_DECODER(EACMV, eacmv);
REGISTER_DECODER(EAMAD, eamad);
REGISTER_DECODER(EATGQ, eatgq);
REGISTER_DECODER(EATGV, eatgv);
REGISTER_DECODER(EATQI, eatqi);
REGISTER_DECODER(EIGHTBPS, eightbps);
REGISTER_DECODER(EIGHTSVX_EXP, eightsvx_exp);
REGISTER_DECODER(EIGHTSVX_FIB, eightsvx_fib);
REGISTER_DECODER(ESCAPE124, escape124);
REGISTER_DECODER(ESCAPE130, escape130);
REGISTER_DECODER(EXR, exr);
REGISTER_ENCDEC (FFV1, ffv1);
REGISTER_ENCDEC (FFVHUFF, ffvhuff);
REGISTER_DECODER(FIC, fic);
REGISTER_ENCDEC (FITS, fits);
REGISTER_ENCDEC (FLASHSV, flashsv);
REGISTER_ENCDEC (FLASHSV2, flashsv2);
REGISTER_DECODER(FLIC, flic);
REGISTER_ENCDEC (FLV, flv);
REGISTER_DECODER(FMVC, fmvc);
REGISTER_DECODER(FOURXM, fourxm);
REGISTER_DECODER(FRAPS, fraps);
REGISTER_DECODER(FRWU, frwu);
REGISTER_DECODER(G2M, g2m);
REGISTER_DECODER(GDV, gdv);
REGISTER_ENCDEC (GIF, gif);
REGISTER_ENCDEC (H261, h261);
REGISTER_ENCDEC (H263, h263);
REGISTER_DECODER(H263I, h263i);
REGISTER_ENCDEC (H263P, h263p);
REGISTER_DECODER(H263_V4L2M2M, h263_v4l2m2m);
REGISTER_DECODER(H264, h264);
REGISTER_DECODER(H264_CRYSTALHD, h264_crystalhd);
REGISTER_DECODER(H264_V4L2M2M, h264_v4l2m2m);
REGISTER_DECODER(H264_MEDIACODEC, h264_mediacodec);
REGISTER_DECODER(H264_MMAL, h264_mmal);
REGISTER_DECODER(H264_QSV, h264_qsv);
REGISTER_DECODER(H264_RKMPP, h264_rkmpp);
REGISTER_ENCDEC (HAP, hap);
REGISTER_DECODER(HEVC, hevc);
REGISTER_DECODER(HEVC_QSV, hevc_qsv);
REGISTER_DECODER(HEVC_RKMPP, hevc_rkmpp);
REGISTER_DECODER(HEVC_V4L2M2M, hevc_v4l2m2m);
REGISTER_DECODER(HNM4_VIDEO, hnm4_video);
REGISTER_DECODER(HQ_HQA, hq_hqa);
REGISTER_DECODER(HQX, hqx);
REGISTER_ENCDEC (HUFFYUV, huffyuv);
REGISTER_DECODER(IDCIN, idcin);
REGISTER_DECODER(IFF_ILBM, iff_ilbm);
REGISTER_DECODER(INDEO2, indeo2);
REGISTER_DECODER(INDEO3, indeo3);
REGISTER_DECODER(INDEO4, indeo4);
REGISTER_DECODER(INDEO5, indeo5);
REGISTER_DECODER(INTERPLAY_VIDEO, interplay_video);
REGISTER_ENCDEC (JPEG2000, jpeg2000);
REGISTER_ENCDEC (JPEGLS, jpegls);
REGISTER_DECODER(JV, jv);
REGISTER_DECODER(KGV1, kgv1);
REGISTER_DECODER(KMVC, kmvc);
REGISTER_DECODER(LAGARITH, lagarith);
REGISTER_ENCODER(LJPEG, ljpeg);
REGISTER_DECODER(LOCO, loco);
REGISTER_DECODER(M101, m101);
REGISTER_ENCDEC (MAGICYUV, magicyuv);
REGISTER_DECODER(MDEC, mdec);
REGISTER_DECODER(MIMIC, mimic);
REGISTER_ENCDEC (MJPEG, mjpeg);
REGISTER_DECODER(MJPEGB, mjpegb);
REGISTER_DECODER(MMVIDEO, mmvideo);
REGISTER_DECODER(MOTIONPIXELS, motionpixels);
REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video);
REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video);
REGISTER_ENCDEC (MPEG4, mpeg4);
REGISTER_DECODER(MPEG4_CRYSTALHD, mpeg4_crystalhd);
REGISTER_DECODER(MPEG4_V4L2M2M, mpeg4_v4l2m2m);
REGISTER_DECODER(MPEG4_MMAL, mpeg4_mmal);
REGISTER_DECODER(MPEGVIDEO, mpegvideo);
REGISTER_DECODER(MPEG1_V4L2M2M, mpeg1_v4l2m2m);
REGISTER_DECODER(MPEG2_MMAL, mpeg2_mmal);
REGISTER_DECODER(MPEG2_CRYSTALHD, mpeg2_crystalhd);
REGISTER_DECODER(MPEG2_V4L2M2M, mpeg2_v4l2m2m);
REGISTER_DECODER(MPEG2_QSV, mpeg2_qsv);
REGISTER_DECODER(MPEG2_MEDIACODEC, mpeg2_mediacodec);
REGISTER_DECODER(MSA1, msa1);
REGISTER_DECODER(MSCC, mscc);
REGISTER_DECODER(MSMPEG4V1, msmpeg4v1);
REGISTER_ENCDEC (MSMPEG4V2, msmpeg4v2);
REGISTER_ENCDEC (MSMPEG4V3, msmpeg4v3);
REGISTER_DECODER(MSMPEG4_CRYSTALHD, msmpeg4_crystalhd);
REGISTER_DECODER(MSRLE, msrle);
REGISTER_DECODER(MSS1, mss1);
REGISTER_DECODER(MSS2, mss2);
REGISTER_ENCDEC (MSVIDEO1, msvideo1);
REGISTER_DECODER(MSZH, mszh);
REGISTER_DECODER(MTS2, mts2);
REGISTER_DECODER(MVC1, mvc1);
REGISTER_DECODER(MVC2, mvc2);
REGISTER_DECODER(MXPEG, mxpeg);
REGISTER_DECODER(NUV, nuv);
REGISTER_DECODER(PAF_VIDEO, paf_video);
REGISTER_ENCDEC (PAM, pam);
REGISTER_ENCDEC (PBM, pbm);
REGISTER_ENCDEC (PCX, pcx);
REGISTER_ENCDEC (PGM, pgm);
REGISTER_ENCDEC (PGMYUV, pgmyuv);
REGISTER_DECODER(PICTOR, pictor);
REGISTER_DECODER(PIXLET, pixlet);
REGISTER_ENCDEC (PNG, png);
REGISTER_ENCDEC (PPM, ppm);
REGISTER_ENCDEC (PRORES, prores);
REGISTER_ENCODER(PRORES_AW, prores_aw);
REGISTER_ENCODER(PRORES_KS, prores_ks);
REGISTER_DECODER(PRORES_LGPL, prores_lgpl);
REGISTER_DECODER(PSD, psd);
REGISTER_DECODER(PTX, ptx);
REGISTER_DECODER(QDRAW, qdraw);
REGISTER_DECODER(QPEG, qpeg);
REGISTER_ENCDEC (QTRLE, qtrle);
REGISTER_ENCDEC (R10K, r10k);
REGISTER_ENCDEC (R210, r210);
REGISTER_ENCDEC (RAWVIDEO, rawvideo);
REGISTER_DECODER(RL2, rl2);
REGISTER_ENCDEC (ROQ, roq);
REGISTER_DECODER(RPZA, rpza);
REGISTER_DECODER(RSCC, rscc);
REGISTER_ENCDEC (RV10, rv10);
REGISTER_ENCDEC (RV20, rv20);
REGISTER_DECODER(RV30, rv30);
REGISTER_DECODER(RV40, rv40);
REGISTER_ENCDEC (S302M, s302m);
REGISTER_DECODER(SANM, sanm);
REGISTER_DECODER(SCPR, scpr);
REGISTER_DECODER(SCREENPRESSO, screenpresso);
REGISTER_DECODER(SDX2_DPCM, sdx2_dpcm);
REGISTER_ENCDEC (SGI, sgi);
REGISTER_DECODER(SGIRLE, sgirle);
REGISTER_DECODER(SHEERVIDEO, sheervideo);
REGISTER_DECODER(SMACKER, smacker);
REGISTER_DECODER(SMC, smc);
REGISTER_DECODER(SMVJPEG, smvjpeg);
REGISTER_ENCDEC (SNOW, snow);
REGISTER_DECODER(SP5X, sp5x);
REGISTER_DECODER(SPEEDHQ, speedhq);
REGISTER_DECODER(SRGC, srgc);
REGISTER_ENCDEC (SUNRAST, sunrast);
REGISTER_ENCDEC (SVQ1, svq1);
REGISTER_DECODER(SVQ3, svq3);
REGISTER_ENCDEC (TARGA, targa);
REGISTER_DECODER(TARGA_Y216, targa_y216);
REGISTER_DECODER(TDSC, tdsc);
REGISTER_DECODER(THEORA, theora);
REGISTER_DECODER(THP, thp);
REGISTER_DECODER(TIERTEXSEQVIDEO, tiertexseqvideo);
REGISTER_ENCDEC (TIFF, tiff);
REGISTER_DECODER(TMV, tmv);
REGISTER_DECODER(TRUEMOTION1, truemotion1);
REGISTER_DECODER(TRUEMOTION2, truemotion2);
REGISTER_DECODER(TRUEMOTION2RT, truemotion2rt);
REGISTER_DECODER(TSCC, tscc);
REGISTER_DECODER(TSCC2, tscc2);
REGISTER_DECODER(TXD, txd);
REGISTER_DECODER(ULTI, ulti);
REGISTER_ENCDEC (UTVIDEO, utvideo);
REGISTER_ENCDEC (V210, v210);
REGISTER_DECODER(V210X, v210x);
REGISTER_ENCDEC (V308, v308);
REGISTER_ENCDEC (V408, v408);
REGISTER_ENCDEC (V410, v410);
REGISTER_DECODER(VB, vb);
REGISTER_DECODER(VBLE, vble);
REGISTER_DECODER(VC1, vc1);
REGISTER_DECODER(VC1_CRYSTALHD, vc1_crystalhd);
REGISTER_DECODER(VC1IMAGE, vc1image);
REGISTER_DECODER(VC1_MMAL, vc1_mmal);
REGISTER_DECODER(VC1_QSV, vc1_qsv);
REGISTER_DECODER(VC1_V4L2M2M, vc1_v4l2m2m);
REGISTER_ENCODER(VC2, vc2);
REGISTER_DECODER(VCR1, vcr1);
REGISTER_DECODER(VMDVIDEO, vmdvideo);
REGISTER_DECODER(VMNC, vmnc);
REGISTER_DECODER(VP3, vp3);
REGISTER_DECODER(VP5, vp5);
REGISTER_DECODER(VP6, vp6);
REGISTER_DECODER(VP6A, vp6a);
REGISTER_DECODER(VP6F, vp6f);
REGISTER_DECODER(VP7, vp7);
REGISTER_DECODER(VP8, vp8);
REGISTER_DECODER(VP8_RKMPP, vp8_rkmpp);
REGISTER_DECODER(VP8_V4L2M2M, vp8_v4l2m2m);
REGISTER_DECODER(VP9, vp9);
REGISTER_DECODER(VP9_RKMPP, vp9_rkmpp);
REGISTER_DECODER(VP9_V4L2M2M, vp9_v4l2m2m);
REGISTER_DECODER(VQA, vqa);
REGISTER_DECODER(BITPACKED, bitpacked);
REGISTER_DECODER(WEBP, webp);
REGISTER_ENCDEC (WRAPPED_AVFRAME, wrapped_avframe);
REGISTER_ENCDEC (WMV1, wmv1);
REGISTER_ENCDEC (WMV2, wmv2);
REGISTER_DECODER(WMV3, wmv3);
REGISTER_DECODER(WMV3_CRYSTALHD, wmv3_crystalhd);
REGISTER_DECODER(WMV3IMAGE, wmv3image);
REGISTER_DECODER(WNV1, wnv1);
REGISTER_DECODER(XAN_WC3, xan_wc3);
REGISTER_DECODER(XAN_WC4, xan_wc4);
REGISTER_ENCDEC (XBM, xbm);
REGISTER_ENCDEC (XFACE, xface);
REGISTER_DECODER(XL, xl);
REGISTER_DECODER(XPM, xpm);
REGISTER_ENCDEC (XWD, xwd);
REGISTER_ENCDEC (Y41P, y41p);
REGISTER_DECODER(YLC, ylc);
REGISTER_DECODER(YOP, yop);
REGISTER_ENCDEC (YUV4, yuv4);
REGISTER_DECODER(ZERO12V, zero12v);
REGISTER_DECODER(ZEROCODEC, zerocodec);
REGISTER_ENCDEC (ZLIB, zlib);
REGISTER_ENCDEC (ZMBV, zmbv);
/* audio codecs */
REGISTER_ENCDEC (AAC, aac);
REGISTER_DECODER(AAC_FIXED, aac_fixed);
REGISTER_DECODER(AAC_LATM, aac_latm);
REGISTER_ENCDEC (AC3, ac3);
REGISTER_ENCDEC (AC3_FIXED, ac3_fixed);
REGISTER_ENCDEC (ALAC, alac);
REGISTER_DECODER(ALS, als);
REGISTER_DECODER(AMRNB, amrnb);
REGISTER_DECODER(AMRWB, amrwb);
REGISTER_DECODER(APE, ape);
REGISTER_ENCDEC (APTX, aptx);
REGISTER_DECODER(ATRAC1, atrac1);
REGISTER_DECODER(ATRAC3, atrac3);
REGISTER_DECODER(ATRAC3AL, atrac3al);
REGISTER_DECODER(ATRAC3P, atrac3p);
REGISTER_DECODER(ATRAC3PAL, atrac3pal);
REGISTER_DECODER(BINKAUDIO_DCT, binkaudio_dct);
REGISTER_DECODER(BINKAUDIO_RDFT, binkaudio_rdft);
REGISTER_DECODER(BMV_AUDIO, bmv_audio);
REGISTER_DECODER(COOK, cook);
REGISTER_ENCDEC (DCA, dca);
REGISTER_DECODER(DOLBY_E, dolby_e);
REGISTER_DECODER(DSD_LSBF, dsd_lsbf);
REGISTER_DECODER(DSD_MSBF, dsd_msbf);
REGISTER_DECODER(DSD_LSBF_PLANAR, dsd_lsbf_planar);
REGISTER_DECODER(DSD_MSBF_PLANAR, dsd_msbf_planar);
REGISTER_DECODER(DSICINAUDIO, dsicinaudio);
REGISTER_DECODER(DSS_SP, dss_sp);
REGISTER_DECODER(DST, dst);
REGISTER_ENCDEC (EAC3, eac3);
REGISTER_DECODER(EVRC, evrc);
REGISTER_DECODER(FFWAVESYNTH, ffwavesynth);
REGISTER_ENCDEC (FLAC, flac);
REGISTER_ENCDEC (G723_1, g723_1);
REGISTER_DECODER(G729, g729);
REGISTER_DECODER(GSM, gsm);
REGISTER_DECODER(GSM_MS, gsm_ms);
REGISTER_DECODER(IAC, iac);
REGISTER_DECODER(IMC, imc);
REGISTER_DECODER(INTERPLAY_ACM, interplay_acm);
REGISTER_DECODER(MACE3, mace3);
REGISTER_DECODER(MACE6, mace6);
REGISTER_DECODER(METASOUND, metasound);
REGISTER_ENCDEC (MLP, mlp);
REGISTER_DECODER(MP1, mp1);
REGISTER_DECODER(MP1FLOAT, mp1float);
REGISTER_ENCDEC (MP2, mp2);
REGISTER_DECODER(MP2FLOAT, mp2float);
REGISTER_ENCODER(MP2FIXED, mp2fixed);
REGISTER_DECODER(MP3, mp3);
REGISTER_DECODER(MP3FLOAT, mp3float);
REGISTER_DECODER(MP3ADU, mp3adu);
REGISTER_DECODER(MP3ADUFLOAT, mp3adufloat);
REGISTER_DECODER(MP3ON4, mp3on4);
REGISTER_DECODER(MP3ON4FLOAT, mp3on4float);
REGISTER_DECODER(MPC7, mpc7);
REGISTER_DECODER(MPC8, mpc8);
REGISTER_ENCDEC (NELLYMOSER, nellymoser);
REGISTER_DECODER(ON2AVC, on2avc);
REGISTER_ENCDEC (OPUS, opus);
REGISTER_DECODER(PAF_AUDIO, paf_audio);
REGISTER_DECODER(QCELP, qcelp);
REGISTER_DECODER(QDM2, qdm2);
REGISTER_DECODER(QDMC, qdmc);
REGISTER_ENCDEC (RA_144, ra_144);
REGISTER_DECODER(RA_288, ra_288);
REGISTER_DECODER(RALF, ralf);
REGISTER_DECODER(SHORTEN, shorten);
REGISTER_DECODER(SIPR, sipr);
REGISTER_DECODER(SMACKAUD, smackaud);
REGISTER_ENCDEC (SONIC, sonic);
REGISTER_ENCODER(SONIC_LS, sonic_ls);
REGISTER_DECODER(TAK, tak);
REGISTER_ENCDEC (TRUEHD, truehd);
REGISTER_DECODER(TRUESPEECH, truespeech);
REGISTER_ENCDEC (TTA, tta);
REGISTER_DECODER(TWINVQ, twinvq);
REGISTER_DECODER(VMDAUDIO, vmdaudio);
REGISTER_ENCDEC (VORBIS, vorbis);
REGISTER_ENCDEC (WAVPACK, wavpack);
REGISTER_DECODER(WMALOSSLESS, wmalossless);
REGISTER_DECODER(WMAPRO, wmapro);
REGISTER_ENCDEC (WMAV1, wmav1);
REGISTER_ENCDEC (WMAV2, wmav2);
REGISTER_DECODER(WMAVOICE, wmavoice);
REGISTER_DECODER(WS_SND1, ws_snd1);
REGISTER_DECODER(XMA1, xma1);
REGISTER_DECODER(XMA2, xma2);
/* PCM codecs */
REGISTER_ENCDEC (PCM_ALAW, pcm_alaw);
REGISTER_DECODER(PCM_BLURAY, pcm_bluray);
REGISTER_DECODER(PCM_DVD, pcm_dvd);
REGISTER_DECODER(PCM_F16LE, pcm_f16le);
REGISTER_DECODER(PCM_F24LE, pcm_f24le);
REGISTER_ENCDEC (PCM_F32BE, pcm_f32be);
REGISTER_ENCDEC (PCM_F32LE, pcm_f32le);
REGISTER_ENCDEC (PCM_F64BE, pcm_f64be);
REGISTER_ENCDEC (PCM_F64LE, pcm_f64le);
REGISTER_DECODER(PCM_LXF, pcm_lxf);
REGISTER_ENCDEC (PCM_MULAW, pcm_mulaw);
REGISTER_ENCDEC (PCM_S8, pcm_s8);
REGISTER_ENCDEC (PCM_S8_PLANAR, pcm_s8_planar);
REGISTER_ENCDEC (PCM_S16BE, pcm_s16be);
REGISTER_ENCDEC (PCM_S16BE_PLANAR, pcm_s16be_planar);
REGISTER_ENCDEC (PCM_S16LE, pcm_s16le);
REGISTER_ENCDEC (PCM_S16LE_PLANAR, pcm_s16le_planar);
REGISTER_ENCDEC (PCM_S24BE, pcm_s24be);
REGISTER_ENCDEC (PCM_S24DAUD, pcm_s24daud);
REGISTER_ENCDEC (PCM_S24LE, pcm_s24le);
REGISTER_ENCDEC (PCM_S24LE_PLANAR, pcm_s24le_planar);
REGISTER_ENCDEC (PCM_S32BE, pcm_s32be);
REGISTER_ENCDEC (PCM_S32LE, pcm_s32le);
REGISTER_ENCDEC (PCM_S32LE_PLANAR, pcm_s32le_planar);
REGISTER_ENCDEC (PCM_S64BE, pcm_s64be);
REGISTER_ENCDEC (PCM_S64LE, pcm_s64le);
REGISTER_ENCDEC (PCM_U8, pcm_u8);
REGISTER_ENCDEC (PCM_U16BE, pcm_u16be);
REGISTER_ENCDEC (PCM_U16LE, pcm_u16le);
REGISTER_ENCDEC (PCM_U24BE, pcm_u24be);
REGISTER_ENCDEC (PCM_U24LE, pcm_u24le);
REGISTER_ENCDEC (PCM_U32BE, pcm_u32be);
REGISTER_ENCDEC (PCM_U32LE, pcm_u32le);
REGISTER_DECODER(PCM_ZORK, pcm_zork);
/* DPCM codecs */
REGISTER_DECODER(GREMLIN_DPCM, gremlin_dpcm);
REGISTER_DECODER(INTERPLAY_DPCM, interplay_dpcm);
REGISTER_ENCDEC (ROQ_DPCM, roq_dpcm);
REGISTER_DECODER(SOL_DPCM, sol_dpcm);
REGISTER_DECODER(XAN_DPCM, xan_dpcm);
/* ADPCM codecs */
REGISTER_DECODER(ADPCM_4XM, adpcm_4xm);
REGISTER_ENCDEC (ADPCM_ADX, adpcm_adx);
REGISTER_DECODER(ADPCM_AFC, adpcm_afc);
REGISTER_DECODER(ADPCM_AICA, adpcm_aica);
REGISTER_DECODER(ADPCM_CT, adpcm_ct);
REGISTER_DECODER(ADPCM_DTK, adpcm_dtk);
REGISTER_DECODER(ADPCM_EA, adpcm_ea);
REGISTER_DECODER(ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa);
REGISTER_DECODER(ADPCM_EA_R1, adpcm_ea_r1);
REGISTER_DECODER(ADPCM_EA_R2, adpcm_ea_r2);
REGISTER_DECODER(ADPCM_EA_R3, adpcm_ea_r3);
REGISTER_DECODER(ADPCM_EA_XAS, adpcm_ea_xas);
REGISTER_ENCDEC (ADPCM_G722, adpcm_g722);
REGISTER_ENCDEC (ADPCM_G726, adpcm_g726);
REGISTER_ENCDEC (ADPCM_G726LE, adpcm_g726le);
REGISTER_DECODER(ADPCM_IMA_AMV, adpcm_ima_amv);
REGISTER_DECODER(ADPCM_IMA_APC, adpcm_ima_apc);
REGISTER_DECODER(ADPCM_IMA_DAT4, adpcm_ima_dat4);
REGISTER_DECODER(ADPCM_IMA_DK3, adpcm_ima_dk3);
REGISTER_DECODER(ADPCM_IMA_DK4, adpcm_ima_dk4);
REGISTER_DECODER(ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs);
REGISTER_DECODER(ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead);
REGISTER_DECODER(ADPCM_IMA_ISS, adpcm_ima_iss);
REGISTER_DECODER(ADPCM_IMA_OKI, adpcm_ima_oki);
REGISTER_ENCDEC (ADPCM_IMA_QT, adpcm_ima_qt);
REGISTER_DECODER(ADPCM_IMA_RAD, adpcm_ima_rad);
REGISTER_DECODER(ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg);
REGISTER_ENCDEC (ADPCM_IMA_WAV, adpcm_ima_wav);
REGISTER_DECODER(ADPCM_IMA_WS, adpcm_ima_ws);
REGISTER_ENCDEC (ADPCM_MS, adpcm_ms);
REGISTER_DECODER(ADPCM_MTAF, adpcm_mtaf);
REGISTER_DECODER(ADPCM_PSX, adpcm_psx);
REGISTER_DECODER(ADPCM_SBPRO_2, adpcm_sbpro_2);
REGISTER_DECODER(ADPCM_SBPRO_3, adpcm_sbpro_3);
REGISTER_DECODER(ADPCM_SBPRO_4, adpcm_sbpro_4);
REGISTER_ENCDEC (ADPCM_SWF, adpcm_swf);
REGISTER_DECODER(ADPCM_THP, adpcm_thp);
REGISTER_DECODER(ADPCM_THP_LE, adpcm_thp_le);
REGISTER_DECODER(ADPCM_VIMA, adpcm_vima);
REGISTER_DECODER(ADPCM_XA, adpcm_xa);
REGISTER_ENCDEC (ADPCM_YAMAHA, adpcm_yamaha);
/* subtitles */
REGISTER_ENCDEC (SSA, ssa);
REGISTER_ENCDEC (ASS, ass);
REGISTER_DECODER(CCAPTION, ccaption);
REGISTER_ENCDEC (DVBSUB, dvbsub);
REGISTER_ENCDEC (DVDSUB, dvdsub);
REGISTER_DECODER(JACOSUB, jacosub);
REGISTER_DECODER(MICRODVD, microdvd);
REGISTER_ENCDEC (MOVTEXT, movtext);
REGISTER_DECODER(MPL2, mpl2);
REGISTER_DECODER(PGSSUB, pgssub);
REGISTER_DECODER(PJS, pjs);
REGISTER_DECODER(REALTEXT, realtext);
REGISTER_DECODER(SAMI, sami);
REGISTER_ENCDEC (SRT, srt);
REGISTER_DECODER(STL, stl);
REGISTER_ENCDEC (SUBRIP, subrip);
REGISTER_DECODER(SUBVIEWER, subviewer);
REGISTER_DECODER(SUBVIEWER1, subviewer1);
REGISTER_ENCDEC (TEXT, text);
REGISTER_DECODER(VPLAYER, vplayer);
REGISTER_ENCDEC (WEBVTT, webvtt);
REGISTER_ENCDEC (XSUB, xsub);
/* external libraries */
REGISTER_ENCDEC (AAC_AT, aac_at);
REGISTER_DECODER(AC3_AT, ac3_at);
REGISTER_DECODER(ADPCM_IMA_QT_AT, adpcm_ima_qt_at);
REGISTER_ENCDEC (ALAC_AT, alac_at);
REGISTER_DECODER(AMR_NB_AT, amr_nb_at);
REGISTER_DECODER(EAC3_AT, eac3_at);
REGISTER_DECODER(GSM_MS_AT, gsm_ms_at);
REGISTER_ENCDEC (ILBC_AT, ilbc_at);
REGISTER_DECODER(MP1_AT, mp1_at);
REGISTER_DECODER(MP2_AT, mp2_at);
REGISTER_DECODER(MP3_AT, mp3_at);
REGISTER_ENCDEC (PCM_ALAW_AT, pcm_alaw_at);
REGISTER_ENCDEC (PCM_MULAW_AT, pcm_mulaw_at);
REGISTER_DECODER(QDMC_AT, qdmc_at);
REGISTER_DECODER(QDM2_AT, qdm2_at);
REGISTER_DECODER(LIBCELT, libcelt);
REGISTER_ENCDEC (LIBFDK_AAC, libfdk_aac);
REGISTER_ENCDEC (LIBGSM, libgsm);
REGISTER_ENCDEC (LIBGSM_MS, libgsm_ms);
REGISTER_ENCDEC (LIBILBC, libilbc);
REGISTER_ENCODER(LIBMP3LAME, libmp3lame);
REGISTER_ENCDEC (LIBOPENCORE_AMRNB, libopencore_amrnb);
REGISTER_DECODER(LIBOPENCORE_AMRWB, libopencore_amrwb);
REGISTER_ENCDEC (LIBOPENJPEG, libopenjpeg);
REGISTER_ENCDEC (LIBOPUS, libopus);
REGISTER_DECODER(LIBRSVG, librsvg);
REGISTER_ENCODER(LIBSHINE, libshine);
REGISTER_ENCDEC (LIBSPEEX, libspeex);
REGISTER_ENCODER(LIBTHEORA, libtheora);
REGISTER_ENCODER(LIBTWOLAME, libtwolame);
REGISTER_ENCODER(LIBVO_AMRWBENC, libvo_amrwbenc);
REGISTER_ENCDEC (LIBVORBIS, libvorbis);
REGISTER_ENCDEC (LIBVPX_VP8, libvpx_vp8);
REGISTER_ENCDEC (LIBVPX_VP9, libvpx_vp9);
REGISTER_ENCODER(LIBWAVPACK, libwavpack);
REGISTER_ENCODER(LIBWEBP_ANIM, libwebp_anim); /* preferred over libwebp */
REGISTER_ENCODER(LIBWEBP, libwebp);
REGISTER_ENCODER(LIBX262, libx262);
REGISTER_ENCODER(LIBX264, libx264);
REGISTER_ENCODER(LIBX264RGB, libx264rgb);
REGISTER_ENCODER(LIBX265, libx265);
REGISTER_ENCODER(LIBXAVS, libxavs);
REGISTER_ENCODER(LIBXVID, libxvid);
REGISTER_DECODER(LIBZVBI_TELETEXT, libzvbi_teletext);
/* text */
REGISTER_DECODER(BINTEXT, bintext);
REGISTER_DECODER(XBIN, xbin);
REGISTER_DECODER(IDF, idf);
/* external libraries, that shouldn't be used by default if one of the
* above is available */
REGISTER_ENCODER(H263_V4L2M2M, h263_v4l2m2m);
REGISTER_ENCDEC (LIBOPENH264, libopenh264);
REGISTER_DECODER(H264_CUVID, h264_cuvid);
REGISTER_ENCODER(H264_NVENC, h264_nvenc);
REGISTER_ENCODER(H264_OMX, h264_omx);
REGISTER_ENCODER(H264_QSV, h264_qsv);
REGISTER_ENCODER(H264_V4L2M2M, h264_v4l2m2m);
REGISTER_ENCODER(H264_VAAPI, h264_vaapi);
REGISTER_ENCODER(H264_VIDEOTOOLBOX, h264_videotoolbox);
#if FF_API_NVENC_OLD_NAME
REGISTER_ENCODER(NVENC, nvenc);
REGISTER_ENCODER(NVENC_H264, nvenc_h264);
REGISTER_ENCODER(NVENC_HEVC, nvenc_hevc);
#endif
REGISTER_DECODER(HEVC_CUVID, hevc_cuvid);
REGISTER_DECODER(HEVC_MEDIACODEC, hevc_mediacodec);
REGISTER_ENCODER(HEVC_NVENC, hevc_nvenc);
REGISTER_ENCODER(HEVC_QSV, hevc_qsv);
REGISTER_ENCODER(HEVC_V4L2M2M, hevc_v4l2m2m);
REGISTER_ENCODER(HEVC_VAAPI, hevc_vaapi);
REGISTER_ENCODER(HEVC_VIDEOTOOLBOX, hevc_videotoolbox);
REGISTER_ENCODER(LIBKVAZAAR, libkvazaar);
REGISTER_DECODER(MJPEG_CUVID, mjpeg_cuvid);
REGISTER_ENCODER(MJPEG_QSV, mjpeg_qsv);
REGISTER_ENCODER(MJPEG_VAAPI, mjpeg_vaapi);
REGISTER_DECODER(MPEG1_CUVID, mpeg1_cuvid);
REGISTER_DECODER(MPEG2_CUVID, mpeg2_cuvid);
REGISTER_ENCODER(MPEG2_QSV, mpeg2_qsv);
REGISTER_ENCODER(MPEG2_VAAPI, mpeg2_vaapi);
REGISTER_DECODER(MPEG4_CUVID, mpeg4_cuvid);
REGISTER_DECODER(MPEG4_MEDIACODEC, mpeg4_mediacodec);
REGISTER_ENCODER(MPEG4_V4L2M2M, mpeg4_v4l2m2m);
REGISTER_DECODER(VC1_CUVID, vc1_cuvid);
REGISTER_DECODER(VP8_CUVID, vp8_cuvid);
REGISTER_DECODER(VP8_MEDIACODEC, vp8_mediacodec);
REGISTER_DECODER(VP8_QSV, vp8_qsv);
REGISTER_ENCODER(VP8_V4L2M2M, vp8_v4l2m2m);
REGISTER_ENCODER(VP8_VAAPI, vp8_vaapi);
REGISTER_DECODER(VP9_CUVID, vp9_cuvid);
REGISTER_DECODER(VP9_MEDIACODEC, vp9_mediacodec);
REGISTER_ENCODER(VP9_VAAPI, vp9_vaapi);
/* parsers */
REGISTER_PARSER(AAC, aac);
REGISTER_PARSER(AAC_LATM, aac_latm);
REGISTER_PARSER(AC3, ac3);
REGISTER_PARSER(ADX, adx);
REGISTER_PARSER(BMP, bmp);
REGISTER_PARSER(CAVSVIDEO, cavsvideo);
REGISTER_PARSER(COOK, cook);
REGISTER_PARSER(DCA, dca);
REGISTER_PARSER(DIRAC, dirac);
REGISTER_PARSER(DNXHD, dnxhd);
REGISTER_PARSER(DPX, dpx);
REGISTER_PARSER(DVAUDIO, dvaudio);
REGISTER_PARSER(DVBSUB, dvbsub);
REGISTER_PARSER(DVDSUB, dvdsub);
REGISTER_PARSER(DVD_NAV, dvd_nav);
REGISTER_PARSER(FLAC, flac);
REGISTER_PARSER(G729, g729);
REGISTER_PARSER(GSM, gsm);
REGISTER_PARSER(H261, h261);
REGISTER_PARSER(H263, h263);
REGISTER_PARSER(H264, h264);
REGISTER_PARSER(HEVC, hevc);
REGISTER_PARSER(MJPEG, mjpeg);
REGISTER_PARSER(MLP, mlp);
REGISTER_PARSER(MPEG4VIDEO, mpeg4video);
REGISTER_PARSER(MPEGAUDIO, mpegaudio);
REGISTER_PARSER(MPEGVIDEO, mpegvideo);
REGISTER_PARSER(OPUS, opus);
REGISTER_PARSER(PNG, png);
REGISTER_PARSER(PNM, pnm);
REGISTER_PARSER(RV30, rv30);
REGISTER_PARSER(RV40, rv40);
REGISTER_PARSER(SIPR, sipr);
REGISTER_PARSER(TAK, tak);
REGISTER_PARSER(VC1, vc1);
REGISTER_PARSER(VORBIS, vorbis);
REGISTER_PARSER(VP3, vp3);
REGISTER_PARSER(VP8, vp8);
REGISTER_PARSER(VP9, vp9);
REGISTER_PARSER(XMA, xma);
} | true | FFmpeg | 7c9f739d864c0ed8f1b433d6a7d9f674edda9cf5 |
3,996 | void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
{
uint64_t old_mask = env->psw.mask;
env->psw.addr = addr;
env->psw.mask = mask;
if (tcg_enabled()) {
env->cc_op = (mask >> 44) & 3;
}
if ((old_mask ^ mask) & PSW_MASK_PER) {
s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
}
if (mask & PSW_MASK_WAIT) {
S390CPU *cpu = s390_env_get_cpu(env);
if (s390_cpu_halt(cpu) == 0) {
#ifndef CONFIG_USER_ONLY
qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
#endif
}
}
}
| true | qemu | 83f7f32901c630f4fc01acd0d9082da466b17102 |
3,997 | static int amr_read_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecContext *enc = s->streams[0]->codec;
int read, size = 0, toc, mode;
int64_t pos = avio_tell(s->pb);
if (url_feof(s->pb)) {
return AVERROR(EIO);
}
// FIXME this is wrong, this should rather be in a AVParset
toc = avio_r8(s->pb);
mode = (toc >> 3) & 0x0F;
if (enc->codec_id == AV_CODEC_ID_AMR_NB) {
static const uint8_t packed_size[16] = {
12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0
};
size = packed_size[mode] + 1;
} else if (enc->codec_id == AV_CODEC_ID_AMR_WB) {
static const uint8_t packed_size[16] = {
18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1
};
size = packed_size[mode];
} else {
av_assert0(0);
}
if (!size || av_new_packet(pkt, size))
return AVERROR(EIO);
/* Both AMR formats have 50 frames per second */
s->streams[0]->codec->bit_rate = size*8*50;
pkt->stream_index = 0;
pkt->pos = pos;
pkt->data[0] = toc;
pkt->duration = enc->codec_id == AV_CODEC_ID_AMR_NB ? 160 : 320;
read = avio_read(s->pb, pkt->data + 1, size - 1);
if (read != size - 1) {
av_free_packet(pkt);
return AVERROR(EIO);
}
return 0;
}
| false | FFmpeg | d87ff555025e90ef285425216c29be95034e2485 |
3,998 | static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size)
{
uint8_t *buf1;
RMContext *rm = s->priv_data;
ByteIOContext *pb = &s->pb;
StreamInfo *stream = rm->audio_stream;
int i;
/* XXX: suppress this malloc */
buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );
write_packet_header(s, stream, size, stream->enc->coded_frame->key_frame);
/* for AC3, the words seems to be reversed */
for(i=0;i<size;i+=2) {
buf1[i] = buf[i+1];
buf1[i+1] = buf[i];
}
put_buffer(pb, buf1, size);
put_flush_packet(pb);
stream->nb_frames++;
av_free(buf1);
return 0;
}
| false | FFmpeg | 3c895fc098f7637f6d5ec3a9d6766e724a8b9e41 |
3,999 | static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
{
#if COMPILE_TEMPLATE_MMX
RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
#else
int i;
for (i=0; i<width; i++) {
int b= src[i*3+0];
int g= src[i*3+1];
int r= src[i*3+2];
dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
}
#endif /* COMPILE_TEMPLATE_MMX */
}
| false | FFmpeg | d1adad3cca407f493c3637e20ecd4f7124e69212 |
4,000 | void ff_put_h264_qpel8_mc30_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_hz_qrt_8w_msa(src - 2, stride, dst, stride, 8, 1);
}
| false | FFmpeg | b5da07d4340a8e8e40dcd1900977a76ff31fbb84 |
4,001 | static int bmp_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
BMPContext *s = avctx->priv_data;
AVFrame *picture = data;
AVFrame *p = &s->picture;
unsigned int fsize, hsize;
int width, height;
unsigned int depth;
BiCompression comp;
unsigned int ihsize;
int i, j, n, linesize;
uint32_t rgb[3];
uint8_t *ptr;
int dsize;
const uint8_t *buf0 = buf;
if(buf_size < 14){
av_log(avctx, AV_LOG_ERROR, "buf size too small (%d)\n", buf_size);
return -1;
}
if(bytestream_get_byte(&buf) != 'B' ||
bytestream_get_byte(&buf) != 'M') {
av_log(avctx, AV_LOG_ERROR, "bad magic number\n");
return -1;
}
fsize = bytestream_get_le32(&buf);
if(buf_size < fsize){
av_log(avctx, AV_LOG_ERROR, "not enough data (%d < %d), trying to decode anyway\n",
buf_size, fsize);
fsize = buf_size;
}
buf += 2; /* reserved1 */
buf += 2; /* reserved2 */
hsize = bytestream_get_le32(&buf); /* header size */
ihsize = bytestream_get_le32(&buf); /* more header size */
if(ihsize + 14 > hsize){
av_log(avctx, AV_LOG_ERROR, "invalid header size %d\n", hsize);
return -1;
}
/* sometimes file size is set to some headers size, set a real size in that case */
if(fsize == 14 || fsize == ihsize + 14)
fsize = buf_size - 2;
if(fsize <= hsize){
av_log(avctx, AV_LOG_ERROR, "declared file size is less than header size (%d < %d)\n",
fsize, hsize);
return -1;
}
switch(ihsize){
case 40: // windib v3
case 64: // OS/2 v2
case 108: // windib v4
case 124: // windib v5
width = bytestream_get_le32(&buf);
height = bytestream_get_le32(&buf);
break;
case 12: // OS/2 v1
width = bytestream_get_le16(&buf);
height = bytestream_get_le16(&buf);
break;
default:
av_log(avctx, AV_LOG_ERROR, "unsupported BMP file, patch welcome\n");
return -1;
}
if(bytestream_get_le16(&buf) != 1){ /* planes */
av_log(avctx, AV_LOG_ERROR, "invalid BMP header\n");
return -1;
}
depth = bytestream_get_le16(&buf);
if(ihsize == 40)
comp = bytestream_get_le32(&buf);
else
comp = BMP_RGB;
if(comp != BMP_RGB && comp != BMP_BITFIELDS && comp != BMP_RLE4 && comp != BMP_RLE8){
av_log(avctx, AV_LOG_ERROR, "BMP coding %d not supported\n", comp);
return -1;
}
if(comp == BMP_BITFIELDS){
buf += 20;
rgb[0] = bytestream_get_le32(&buf);
rgb[1] = bytestream_get_le32(&buf);
rgb[2] = bytestream_get_le32(&buf);
}
avctx->width = width;
avctx->height = height > 0? height: -height;
avctx->pix_fmt = PIX_FMT_NONE;
switch(depth){
case 32:
if(comp == BMP_BITFIELDS){
rgb[0] = (rgb[0] >> 15) & 3;
rgb[1] = (rgb[1] >> 15) & 3;
rgb[2] = (rgb[2] >> 15) & 3;
if(rgb[0] + rgb[1] + rgb[2] != 3 ||
rgb[0] == rgb[1] || rgb[0] == rgb[2] || rgb[1] == rgb[2]){
break;
}
} else {
rgb[0] = 2;
rgb[1] = 1;
rgb[2] = 0;
}
avctx->pix_fmt = PIX_FMT_BGR24;
break;
case 24:
avctx->pix_fmt = PIX_FMT_BGR24;
break;
case 16:
if(comp == BMP_RGB)
avctx->pix_fmt = PIX_FMT_RGB555;
if(comp == BMP_BITFIELDS)
avctx->pix_fmt = rgb[1] == 0x07E0 ? PIX_FMT_RGB565 : PIX_FMT_RGB555;
break;
case 8:
if(hsize - ihsize - 14 > 0)
avctx->pix_fmt = PIX_FMT_PAL8;
else
avctx->pix_fmt = PIX_FMT_GRAY8;
break;
case 1:
case 4:
if(hsize - ihsize - 14 > 0){
avctx->pix_fmt = PIX_FMT_PAL8;
}else{
av_log(avctx, AV_LOG_ERROR, "Unknown palette for %d-colour BMP\n", 1<<depth);
return -1;
}
break;
default:
av_log(avctx, AV_LOG_ERROR, "depth %d not supported\n", depth);
return -1;
}
if(avctx->pix_fmt == PIX_FMT_NONE){
av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
return -1;
}
if(p->data[0])
avctx->release_buffer(avctx, p);
p->reference = 0;
if(avctx->get_buffer(avctx, p) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
buf = buf0 + hsize;
dsize = buf_size - hsize;
/* Line size in file multiple of 4 */
n = ((avctx->width * depth) / 8 + 3) & ~3;
if(n * avctx->height > dsize && comp != BMP_RLE4 && comp != BMP_RLE8){
av_log(avctx, AV_LOG_ERROR, "not enough data (%d < %d)\n",
dsize, n * avctx->height);
return -1;
}
// RLE may skip decoding some picture areas, so blank picture before decoding
if(comp == BMP_RLE4 || comp == BMP_RLE8)
memset(p->data[0], 0, avctx->height * p->linesize[0]);
if(depth == 4 || depth == 8)
memset(p->data[1], 0, 1024);
if(height > 0){
ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
linesize = -p->linesize[0];
} else {
ptr = p->data[0];
linesize = p->linesize[0];
}
if(avctx->pix_fmt == PIX_FMT_PAL8){
int colors = 1 << depth;
if(ihsize >= 36){
int t;
buf = buf0 + 46;
t = bytestream_get_le32(&buf);
if(t < 0 || t > (1 << depth)){
av_log(avctx, AV_LOG_ERROR, "Incorrect number of colors - %X for bitdepth %d\n", t, depth);
}else if(t){
colors = t;
}
}
buf = buf0 + 14 + ihsize; //palette location
if((hsize-ihsize-14) < (colors << 2)){ // OS/2 bitmap, 3 bytes per palette entry
for(i = 0; i < colors; i++)
((uint32_t*)p->data[1])[i] = bytestream_get_le24(&buf);
}else{
for(i = 0; i < colors; i++)
((uint32_t*)p->data[1])[i] = bytestream_get_le32(&buf);
}
buf = buf0 + hsize;
}
if(comp == BMP_RLE4 || comp == BMP_RLE8){
if(height < 0){
p->data[0] += p->linesize[0] * (avctx->height - 1);
p->linesize[0] = -p->linesize[0];
}
ff_msrle_decode(avctx, (AVPicture*)p, depth, buf, dsize);
if(height < 0){
p->data[0] += p->linesize[0] * (avctx->height - 1);
p->linesize[0] = -p->linesize[0];
}
}else{
switch(depth){
case 1:
for (i = 0; i < avctx->height; i++) {
int j;
for (j = 0; j < n; j++) {
ptr[j*8+0] = buf[j] >> 7;
ptr[j*8+1] = (buf[j] >> 6) & 1;
ptr[j*8+2] = (buf[j] >> 5) & 1;
ptr[j*8+3] = (buf[j] >> 4) & 1;
ptr[j*8+4] = (buf[j] >> 3) & 1;
ptr[j*8+5] = (buf[j] >> 2) & 1;
ptr[j*8+6] = (buf[j] >> 1) & 1;
ptr[j*8+7] = buf[j] & 1;
}
buf += n;
ptr += linesize;
}
break;
case 8:
case 24:
for(i = 0; i < avctx->height; i++){
memcpy(ptr, buf, n);
buf += n;
ptr += linesize;
}
break;
case 4:
for(i = 0; i < avctx->height; i++){
int j;
for(j = 0; j < n; j++){
ptr[j*2+0] = (buf[j] >> 4) & 0xF;
ptr[j*2+1] = buf[j] & 0xF;
}
buf += n;
ptr += linesize;
}
break;
case 16:
for(i = 0; i < avctx->height; i++){
const uint16_t *src = (const uint16_t *) buf;
uint16_t *dst = (uint16_t *) ptr;
for(j = 0; j < avctx->width; j++)
*dst++ = av_le2ne16(*src++);
buf += n;
ptr += linesize;
}
break;
case 32:
for(i = 0; i < avctx->height; i++){
const uint8_t *src = buf;
uint8_t *dst = ptr;
for(j = 0; j < avctx->width; j++){
dst[0] = src[rgb[2]];
dst[1] = src[rgb[1]];
dst[2] = src[rgb[0]];
dst += 3;
src += 4;
}
buf += n;
ptr += linesize;
}
break;
default:
av_log(avctx, AV_LOG_ERROR, "BMP decoder is broken\n");
return -1;
}
}
*picture = s->picture;
*data_size = sizeof(AVPicture);
return buf_size;
}
| false | FFmpeg | 353a2d2164c09740e42f33014c4773b93e96a0d2 |
4,002 | static int pcm_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, size, bps;
// AVStream *st = s->streams[0];
size= RAW_SAMPLES*s->streams[0]->codec->block_align;
ret= av_get_packet(s->pb, pkt, size);
pkt->stream_index = 0;
if (ret < 0)
return ret;
bps= av_get_bits_per_sample(s->streams[0]->codec->codec_id);
assert(bps); // if false there IS a bug elsewhere (NOT in this function)
pkt->dts=
pkt->pts= pkt->pos*8 / (bps * s->streams[0]->codec->channels);
return ret;
}
| false | FFmpeg | 38893dc028e458eaf3f906833d4ee515689edb7e |
4,003 | static void dump_stream_format(AVFormatContext *ic, int i,
int index, int is_output)
{
char buf[256];
int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
AVStream *st = ic->streams[i];
AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
char *separator = ic->dump_separator;
char **codec_separator = av_opt_ptr(st->codec->av_class, st->codec, "dump_separator");
int use_format_separator = !*codec_separator;
if (use_format_separator)
*codec_separator = av_strdup(separator);
avcodec_string(buf, sizeof(buf), st->codec, is_output);
if (use_format_separator)
av_freep(codec_separator);
av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
/* the pid is an important information, so we display it */
/* XXX: add a generic system */
if (flags & AVFMT_SHOW_IDS)
av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
if (lang)
av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames,
st->time_base.num, st->time_base.den);
av_log(NULL, AV_LOG_INFO, ": %s", buf);
if (st->sample_aspect_ratio.num && // default
av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
AVRational display_aspect_ratio;
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
st->codec->width * st->sample_aspect_ratio.num,
st->codec->height * st->sample_aspect_ratio.den,
1024 * 1024);
av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
display_aspect_ratio.num, display_aspect_ratio.den);
}
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
int fps = st->avg_frame_rate.den && st->avg_frame_rate.num;
int tbr = st->r_frame_rate.den && st->r_frame_rate.num;
int tbn = st->time_base.den && st->time_base.num;
int tbc = st->codec->time_base.den && st->codec->time_base.num;
if (fps || tbr || tbn || tbc)
av_log(NULL, AV_LOG_INFO, "%s", separator);
if (fps)
print_fps(av_q2d(st->avg_frame_rate), tbr || tbn || tbc ? "fps, " : "fps");
if (tbr)
print_fps(av_q2d(st->r_frame_rate), tbn || tbc ? "tbr, " : "tbr");
if (tbn)
print_fps(1 / av_q2d(st->time_base), tbc ? "tbn, " : "tbn");
if (tbc)
print_fps(1 / av_q2d(st->codec->time_base), "tbc");
}
if (st->disposition & AV_DISPOSITION_DEFAULT)
av_log(NULL, AV_LOG_INFO, " (default)");
if (st->disposition & AV_DISPOSITION_DUB)
av_log(NULL, AV_LOG_INFO, " (dub)");
if (st->disposition & AV_DISPOSITION_ORIGINAL)
av_log(NULL, AV_LOG_INFO, " (original)");
if (st->disposition & AV_DISPOSITION_COMMENT)
av_log(NULL, AV_LOG_INFO, " (comment)");
if (st->disposition & AV_DISPOSITION_LYRICS)
av_log(NULL, AV_LOG_INFO, " (lyrics)");
if (st->disposition & AV_DISPOSITION_KARAOKE)
av_log(NULL, AV_LOG_INFO, " (karaoke)");
if (st->disposition & AV_DISPOSITION_FORCED)
av_log(NULL, AV_LOG_INFO, " (forced)");
if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
av_log(NULL, AV_LOG_INFO, " (visual impaired)");
if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
av_log(NULL, AV_LOG_INFO, " (clean effects)");
av_log(NULL, AV_LOG_INFO, "\n");
dump_metadata(NULL, st->metadata, " ");
dump_sidedata(NULL, st, " ");
}
| true | FFmpeg | d1bdaf3fb2c45020f72a378bb64eab1bf136581c |
4,004 | static av_cold int encode_close(AVCodecContext *avctx)
{
if (avctx->priv_data) {
DCAEncContext *c = avctx->priv_data;
subband_bufer_free(c);
ff_dcaadpcm_free(&c->adpcm_ctx);
}
return 0;
}
| true | FFmpeg | 56e11ebf55a5e51a8a7131d382c2020e35d34f42 |
4,005 | static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
int async_ret;
BlockDriverAIOCB *acb;
async_ret = NOT_DONE;
qemu_aio_wait_start();
acb = bdrv_aio_read(bs, sector_num, buf, nb_sectors,
bdrv_rw_em_cb, &async_ret);
if (acb == NULL) {
qemu_aio_wait_end();
return -1;
}
while (async_ret == NOT_DONE) {
qemu_aio_wait();
}
qemu_aio_wait_end();
return async_ret;
}
| true | qemu | baf35cb90204d75404892aa4e52628ae7a00669b |
4,007 | void hmp_info_cpus(Monitor *mon, const QDict *qdict)
{
CpuInfoList *cpu_list, *cpu;
cpu_list = qmp_query_cpus(NULL);
for (cpu = cpu_list; cpu; cpu = cpu->next) {
int active = ' ';
if (cpu->value->CPU == monitor_get_cpu_index()) {
active = '*';
}
monitor_printf(mon, "%c CPU #%" PRId64 ":", active, cpu->value->CPU);
switch (cpu->value->arch) {
case CPU_INFO_ARCH_X86:
monitor_printf(mon, " pc=0x%016" PRIx64, cpu->value->u.x86->pc);
break;
case CPU_INFO_ARCH_PPC:
monitor_printf(mon, " nip=0x%016" PRIx64, cpu->value->u.ppc->nip);
break;
case CPU_INFO_ARCH_SPARC:
monitor_printf(mon, " pc=0x%016" PRIx64,
cpu->value->u.q_sparc->pc);
monitor_printf(mon, " npc=0x%016" PRIx64,
cpu->value->u.q_sparc->npc);
break;
case CPU_INFO_ARCH_MIPS:
monitor_printf(mon, " PC=0x%016" PRIx64, cpu->value->u.q_mips->PC);
break;
case CPU_INFO_ARCH_TRICORE:
monitor_printf(mon, " PC=0x%016" PRIx64, cpu->value->u.tricore->PC);
break;
default:
break;
}
if (cpu->value->halted) {
monitor_printf(mon, " (halted)");
}
monitor_printf(mon, " thread_id=%" PRId64 "\n", cpu->value->thread_id);
}
qapi_free_CpuInfoList(cpu_list);
}
| true | qemu | 544a3731591f5d53e15f22de00ce5ac758d490b3 |
4,008 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
hwaddr addr,
uint64_t data,
unsigned size,
MemTxAttrs attrs)
{
if (!memory_region_access_valid(mr, addr, size, true)) {
unassigned_mem_write(mr, addr, data, size);
return MEMTX_DECODE_ERROR;
adjust_endianness(mr, &data, size);
if (mr->ops->write) {
return access_with_adjusted_size(addr, &data, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_write_accessor, mr,
attrs);
} else if (mr->ops->write_with_attrs) {
return
access_with_adjusted_size(addr, &data, size,
mr->ops->impl.min_access_size,
mr->ops->impl.max_access_size,
memory_region_write_with_attrs_accessor,
mr, attrs);
} else {
return access_with_adjusted_size(addr, &data, size, 1, 4,
memory_region_oldmmio_write_accessor,
mr, attrs); | true | qemu | 8c56c1a592b5092d91da8d8943c17777d6462a6f |
4,010 | static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
{
H264Context *h = &svq3->h;
int i, j, k, m, dir, mode;
int cbp = 0;
uint32_t vlc;
int8_t *top, *left;
MpegEncContext *const s = (MpegEncContext *) h;
const int mb_xy = h->mb_xy;
const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
h->topright_samples_available = 0xFFFF;
if (mb_type == 0) { /* SKIP */
if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.f.mb_type[mb_xy] == -1) {
svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0);
if (s->pict_type == AV_PICTURE_TYPE_B) {
svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1);
}
mb_type = MB_TYPE_SKIP;
} else {
mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6);
if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
return -1;
if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
return -1;
mb_type = MB_TYPE_16x16;
}
} else if (mb_type < 8) { /* INTER */
if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1 (&s->gb)) {
mode = THIRDPEL_MODE;
} else if (svq3->halfpel_flag && svq3->thirdpel_flag == !get_bits1 (&s->gb)) {
mode = HALFPEL_MODE;
} else {
mode = FULLPEL_MODE;
}
/* fill caches */
/* note ref_cache should contain here:
????????
???11111
N??11111
N??11111
N??11111
*/
for (m = 0; m < 2; m++) {
if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) {
for (i = 0; i < 4; i++) {
*(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - 1 + i*h->b_stride];
}
} else {
for (i = 0; i < 4; i++) {
*(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0;
}
}
if (s->mb_y > 0) {
memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.f.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t));
memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
if (s->mb_x < (s->mb_width - 1)) {
*(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4];
h->ref_cache[m][scan8[0] + 4 - 1*8] =
(h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 ||
h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1;
}else
h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE;
if (s->mb_x > 0) {
*(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1];
h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1;
}else
h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE;
}else
memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8);
if (s->pict_type != AV_PICTURE_TYPE_B)
break;
}
/* decode motion vector(s) and form prediction(s) */
if (s->pict_type == AV_PICTURE_TYPE_P) {
if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0)
return -1;
} else { /* AV_PICTURE_TYPE_B */
if (mb_type != 2) {
if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
return -1;
} else {
for (i = 0; i < 4; i++) {
memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
}
if (mb_type != 1) {
if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0)
return -1;
} else {
for (i = 0; i < 4; i++) {
memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
}
}
mb_type = MB_TYPE_16x16;
} else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t));
if (mb_type == 8) {
if (s->mb_x > 0) {
for (i = 0; i < 4; i++) {
h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6-i];
}
if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) {
h->left_samples_available = 0x5F5F;
}
}
if (s->mb_y > 0) {
h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+0];
h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+1];
h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+2];
h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+3];
if (h->intra4x4_pred_mode_cache[4+8*0] == -1) {
h->top_samples_available = 0x33FF;
}
}
/* decode prediction codes for luma blocks */
for (i = 0; i < 16; i+=2) {
vlc = svq3_get_ue_golomb(&s->gb);
if (vlc >= 25){
av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
return -1;
}
left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
if (left[1] == -1 || left[2] == -1){
av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
return -1;
}
}
} else { /* mb_type == 33, DC_128_PRED block type */
for (i = 0; i < 4; i++) {
memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4);
}
}
write_back_intra_pred_mode(h);
if (mb_type == 8) {
ff_h264_check_intra4x4_pred_mode(h);
h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
} else {
for (i = 0; i < 4; i++) {
memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4);
}
h->top_samples_available = 0x33FF;
h->left_samples_available = 0x5F5F;
}
mb_type = MB_TYPE_INTRA4x4;
} else { /* INTRA16x16 */
dir = i_mb_type_info[mb_type - 8].pred_mode;
dir = (dir >> 1) ^ 3*(dir & 1) ^ 1;
if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir)) == -1){
av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
return -1;
}
cbp = i_mb_type_info[mb_type - 8].cbp;
mb_type = MB_TYPE_INTRA16x16;
}
if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
for (i = 0; i < 4; i++) {
memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
if (s->pict_type == AV_PICTURE_TYPE_B) {
for (i = 0; i < 4; i++) {
memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t));
}
}
}
if (!IS_INTRA4x4(mb_type)) {
memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy], DC_PRED, 8);
}
if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
memset(h->non_zero_count_cache + 8, 0, 14*8*sizeof(uint8_t));
s->dsp.clear_blocks(h->mb+ 0);
s->dsp.clear_blocks(h->mb+384);
}
if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){
av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
return -1;
}
cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc];
}
if (IS_INTRA16x16(mb_type) || (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
s->qscale += svq3_get_se_golomb(&s->gb);
if (s->qscale > 31){
av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
return -1;
}
}
if (IS_INTRA16x16(mb_type)) {
AV_ZERO128(h->mb_luma_dc[0]+0);
AV_ZERO128(h->mb_luma_dc[0]+8);
if (svq3_decode_block(&s->gb, h->mb_luma_dc, 0, 1)){
av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n");
return -1;
}
}
if (cbp) {
const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
for (i = 0; i < 4; i++) {
if ((cbp & (1 << i))) {
for (j = 0; j < 4; j++) {
k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j);
h->non_zero_count_cache[ scan8[k] ] = 1;
if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){
av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n");
return -1;
}
}
}
}
if ((cbp & 0x30)) {
for (i = 1; i < 3; ++i) {
if (svq3_decode_block(&s->gb, &h->mb[16*16*i], 0, 3)){
av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n");
return -1;
}
}
if ((cbp & 0x20)) {
for (i = 1; i < 3; i++) {
for (j = 0; j < 4; j++) {
k = 16*i + j;
h->non_zero_count_cache[ scan8[k] ] = 1;
if (svq3_decode_block(&s->gb, &h->mb[16*k], 1, 1)){
av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n");
return -1;
}
}
}
}
}
}
h->cbp= cbp;
s->current_picture.f.mb_type[mb_xy] = mb_type;
if (IS_INTRA(mb_type)) {
h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8);
}
return 0;
}
| false | FFmpeg | 979bea13003ef489d95d2538ac2fb1c26c6f103b |
4,011 | static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic_arg, int *got_packet)
{
MpegEncContext *s = avctx->priv_data;
AVFrame *pic;
int i, ret;
int chroma_h_shift, chroma_v_shift;
av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
//CODEC_FLAG_EMU_EDGE have to be cleared
if(s->avctx->flags & CODEC_FLAG_EMU_EDGE)
return AVERROR(EINVAL);
pic = av_frame_alloc();
if (!pic)
return AVERROR(ENOMEM);
av_frame_ref(pic, pic_arg);
//picture should be flipped upside-down
for(i=0; i < 3; i++) {
int vsample = i ? 2 >> chroma_v_shift : 2;
pic->data[i] += (pic->linesize[i] * (vsample * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 ));
pic->linesize[i] *= -1;
}
ret = ff_MPV_encode_picture(avctx, pkt, pic, got_packet);
av_frame_free(&pic);
return ret;
}
| false | FFmpeg | a26e9c1040afeecf9013da742b0dec7009445f2b |
4,012 | static int mpeg1_decode_sequence(AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
Mpeg1Context *s1 = avctx->priv_data;
MpegEncContext *s = &s1->mpeg_enc_ctx;
int width, height;
int i, v, j;
init_get_bits(&s->gb, buf, buf_size * 8);
width = get_bits(&s->gb, 12);
height = get_bits(&s->gb, 12);
if (width == 0 || height == 0) {
av_log(avctx, AV_LOG_WARNING,
"Invalid horizontal or vertical size value.\n");
if (avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_COMPLIANT))
return AVERROR_INVALIDDATA;
}
s->aspect_ratio_info = get_bits(&s->gb, 4);
if (s->aspect_ratio_info == 0) {
av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
if (avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_COMPLIANT))
return AVERROR_INVALIDDATA;
}
s->frame_rate_index = get_bits(&s->gb, 4);
if (s->frame_rate_index == 0 || s->frame_rate_index > 13) {
av_log(avctx, AV_LOG_WARNING,
"frame_rate_index %d is invalid\n", s->frame_rate_index);
s->frame_rate_index = 1;
}
s->bit_rate = get_bits(&s->gb, 18) * 400;
if (get_bits1(&s->gb) == 0) { /* marker */
av_log(avctx, AV_LOG_ERROR, "Marker in sequence header missing\n");
return AVERROR_INVALIDDATA;
}
s->width = width;
s->height = height;
s->avctx->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
skip_bits(&s->gb, 1);
/* get matrix */
if (get_bits1(&s->gb)) {
load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
} else {
for (i = 0; i < 64; i++) {
j = s->idsp.idct_permutation[i];
v = ff_mpeg1_default_intra_matrix[i];
s->intra_matrix[j] = v;
s->chroma_intra_matrix[j] = v;
}
}
if (get_bits1(&s->gb)) {
load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
} else {
for (i = 0; i < 64; i++) {
int j = s->idsp.idct_permutation[i];
v = ff_mpeg1_default_non_intra_matrix[i];
s->inter_matrix[j] = v;
s->chroma_inter_matrix[j] = v;
}
}
if (show_bits(&s->gb, 23) != 0) {
av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
return AVERROR_INVALIDDATA;
}
/* We set MPEG-2 parameters so that it emulates MPEG-1. */
s->progressive_sequence = 1;
s->progressive_frame = 1;
s->picture_structure = PICT_FRAME;
s->first_field = 0;
s->frame_pred_frame_dct = 1;
s->chroma_format = 1;
s->codec_id =
s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
s->out_format = FMT_MPEG1;
s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER
if (s->flags & CODEC_FLAG_LOW_DELAY)
s->low_delay = 1;
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%d, aspect_ratio_info: %d \n",
s->avctx->rc_buffer_size, s->bit_rate, s->aspect_ratio_info);
return 0;
}
| false | FFmpeg | 37d93fdbf0fec0eac885974c01fba99826ae7763 |
4,013 | static int build_table(VLC *vlc, int table_nb_bits,
int nb_codes,
const void *bits, int bits_wrap, int bits_size,
const void *codes, int codes_wrap, int codes_size,
uint32_t code_prefix, int n_prefix)
{
int i, j, k, n, table_size, table_index, nb, n1, index;
uint32_t code;
VLC_TYPE (*table)[2];
table_size = 1 << table_nb_bits;
table_index = alloc_table(vlc, table_size);
#ifdef DEBUG_VLC
printf("new table index=%d size=%d code_prefix=%x n=%d\n",
table_index, table_size, code_prefix, n_prefix);
#endif
if (table_index < 0)
return -1;
table = &vlc->table[table_index];
for(i=0;i<table_size;i++) {
table[i][1] = 0; //bits
table[i][0] = -1; //codes
}
/* first pass: map codes and compute auxillary table sizes */
for(i=0;i<nb_codes;i++) {
GET_DATA(n, bits, i, bits_wrap, bits_size);
GET_DATA(code, codes, i, codes_wrap, codes_size);
/* we accept tables with holes */
if (n <= 0)
continue;
#if defined(DEBUG_VLC) && 0
printf("i=%d n=%d code=0x%x\n", i, n, code);
#endif
/* if code matches the prefix, it is in the table */
n -= n_prefix;
if (n > 0 && (code >> n) == code_prefix) {
if (n <= table_nb_bits) {
/* no need to add another table */
j = (code << (table_nb_bits - n)) & (table_size - 1);
nb = 1 << (table_nb_bits - n);
for(k=0;k<nb;k++) {
#ifdef DEBUG_VLC
av_log(NULL, AV_LOG_DEBUG, "%4x: code=%d n=%d\n",
j, i, n);
#endif
if (table[j][1] /*bits*/ != 0) {
av_log(NULL, AV_LOG_ERROR, "incorrect codes\n");
return -1;
}
table[j][1] = n; //bits
table[j][0] = i; //code
j++;
}
} else {
n -= table_nb_bits;
j = (code >> n) & ((1 << table_nb_bits) - 1);
#ifdef DEBUG_VLC
printf("%4x: n=%d (subtable)\n",
j, n);
#endif
/* compute table size */
n1 = -table[j][1]; //bits
if (n > n1)
n1 = n;
table[j][1] = -n1; //bits
}
}
}
/* second pass : fill auxillary tables recursively */
for(i=0;i<table_size;i++) {
n = table[i][1]; //bits
if (n < 0) {
n = -n;
if (n > table_nb_bits) {
n = table_nb_bits;
table[i][1] = -n; //bits
}
index = build_table(vlc, n, nb_codes,
bits, bits_wrap, bits_size,
codes, codes_wrap, codes_size,
(code_prefix << table_nb_bits) | i,
n_prefix + table_nb_bits);
if (index < 0)
return -1;
/* note: realloc has been done, so reload tables */
table = &vlc->table[table_index];
table[i][0] = index; //code
}
}
return table_index;
}
| true | FFmpeg | 073c2593c9f0aa4445a6fc1b9b24e6e52a8cc2c1 |
4,014 | av_cold static int lavfi_read_header(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
AVFilter *buffersink, *abuffersink;
int *pix_fmts = create_all_formats(AV_PIX_FMT_NB);
enum AVMediaType type;
int ret = 0, i, n;
#define FAIL(ERR) { ret = ERR; goto end; }
if (!pix_fmts)
FAIL(AVERROR(ENOMEM));
avfilter_register_all();
buffersink = avfilter_get_by_name("ffbuffersink");
abuffersink = avfilter_get_by_name("ffabuffersink");
if (lavfi->graph_filename && lavfi->graph_str) {
av_log(avctx, AV_LOG_ERROR,
"Only one of the graph or graph_file options must be specified\n");
return AVERROR(EINVAL);
}
if (lavfi->graph_filename) {
uint8_t *file_buf, *graph_buf;
size_t file_bufsize;
ret = av_file_map(lavfi->graph_filename,
&file_buf, &file_bufsize, 0, avctx);
if (ret < 0)
return ret;
/* create a 0-terminated string based on the read file */
graph_buf = av_malloc(file_bufsize + 1);
if (!graph_buf) {
av_file_unmap(file_buf, file_bufsize);
return AVERROR(ENOMEM);
}
memcpy(graph_buf, file_buf, file_bufsize);
graph_buf[file_bufsize] = 0;
av_file_unmap(file_buf, file_bufsize);
lavfi->graph_str = graph_buf;
}
if (!lavfi->graph_str)
lavfi->graph_str = av_strdup(avctx->filename);
/* parse the graph, create a stream for each open output */
if (!(lavfi->graph = avfilter_graph_alloc()))
FAIL(AVERROR(ENOMEM));
if ((ret = avfilter_graph_parse(lavfi->graph, lavfi->graph_str,
&input_links, &output_links, avctx)) < 0)
FAIL(ret);
if (input_links) {
av_log(avctx, AV_LOG_ERROR,
"Open inputs in the filtergraph are not acceptable\n");
FAIL(AVERROR(EINVAL));
}
/* count the outputs */
for (n = 0, inout = output_links; inout; n++, inout = inout->next);
if (!(lavfi->sink_stream_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->sink_eof = av_mallocz(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->stream_sink_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
for (i = 0; i < n; i++)
lavfi->stream_sink_map[i] = -1;
/* parse the output link names - they need to be of the form out0, out1, ...
* create a mapping between them and the streams */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
int stream_idx;
if (!strcmp(inout->name, "out"))
stream_idx = 0;
else if (sscanf(inout->name, "out%d\n", &stream_idx) != 1) {
av_log(avctx, AV_LOG_ERROR,
"Invalid outpad name '%s'\n", inout->name);
FAIL(AVERROR(EINVAL));
}
if ((unsigned)stream_idx >= n) {
av_log(avctx, AV_LOG_ERROR,
"Invalid index was specified in output '%s', "
"must be a non-negative value < %d\n",
inout->name, n);
FAIL(AVERROR(EINVAL));
}
/* is an audio or video output? */
type = inout->filter_ctx->output_pads[inout->pad_idx].type;
if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
av_log(avctx, AV_LOG_ERROR,
"Output '%s' is not a video or audio output, not yet supported\n", inout->name);
FAIL(AVERROR(EINVAL));
}
if (lavfi->stream_sink_map[stream_idx] != -1) {
av_log(avctx, AV_LOG_ERROR,
"An output with stream index %d was already specified\n",
stream_idx);
FAIL(AVERROR(EINVAL));
}
lavfi->sink_stream_map[i] = stream_idx;
lavfi->stream_sink_map[stream_idx] = i;
}
/* for each open output create a corresponding stream */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVStream *st;
if (!(st = avformat_new_stream(avctx, NULL)))
FAIL(AVERROR(ENOMEM));
st->id = i;
}
/* create a sink for each output and connect them to the graph */
lavfi->sinks = av_malloc(sizeof(AVFilterContext *) * avctx->nb_streams);
if (!lavfi->sinks)
FAIL(AVERROR(ENOMEM));
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVFilterContext *sink;
type = inout->filter_ctx->output_pads[inout->pad_idx].type;
if (type == AVMEDIA_TYPE_VIDEO && ! buffersink ||
type == AVMEDIA_TYPE_AUDIO && ! abuffersink) {
av_log(avctx, AV_LOG_ERROR, "Missing required buffersink filter, aborting.\n");
FAIL(AVERROR_FILTER_NOT_FOUND);
}
if (type == AVMEDIA_TYPE_VIDEO) {
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
buffersink_params, lavfi->graph);
av_freep(&buffersink_params);
if (ret < 0)
goto end;
} else if (type == AVMEDIA_TYPE_AUDIO) {
enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL, -1 };
AVABufferSinkParams *abuffersink_params = av_abuffersink_params_alloc();
abuffersink_params->sample_fmts = sample_fmts;
ret = avfilter_graph_create_filter(&sink, abuffersink,
inout->name, NULL,
abuffersink_params, lavfi->graph);
av_free(abuffersink_params);
if (ret < 0)
goto end;
}
lavfi->sinks[i] = sink;
if ((ret = avfilter_link(inout->filter_ctx, inout->pad_idx, sink, 0)) < 0)
FAIL(ret);
}
/* configure the graph */
if ((ret = avfilter_graph_config(lavfi->graph, avctx)) < 0)
FAIL(ret);
if (lavfi->dump_graph) {
char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
fputs(dump, stderr);
fflush(stderr);
av_free(dump);
}
/* fill each stream with the information in the corresponding sink */
for (i = 0; i < avctx->nb_streams; i++) {
AVFilterLink *link = lavfi->sinks[lavfi->stream_sink_map[i]]->inputs[0];
AVStream *st = avctx->streams[i];
st->codec->codec_type = link->type;
avpriv_set_pts_info(st, 64, link->time_base.num, link->time_base.den);
if (link->type == AVMEDIA_TYPE_VIDEO) {
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = link->format;
st->codec->time_base = link->time_base;
st->codec->width = link->w;
st->codec->height = link->h;
st ->sample_aspect_ratio =
st->codec->sample_aspect_ratio = link->sample_aspect_ratio;
} else if (link->type == AVMEDIA_TYPE_AUDIO) {
st->codec->codec_id = av_get_pcm_codec(link->format, -1);
st->codec->channels = av_get_channel_layout_nb_channels(link->channel_layout);
st->codec->sample_fmt = link->format;
st->codec->sample_rate = link->sample_rate;
st->codec->time_base = link->time_base;
st->codec->channel_layout = link->channel_layout;
if (st->codec->codec_id == AV_CODEC_ID_NONE)
av_log(avctx, AV_LOG_ERROR,
"Could not find PCM codec for sample format %s.\n",
av_get_sample_fmt_name(link->format));
}
}
end:
av_free(pix_fmts);
avfilter_inout_free(&input_links);
avfilter_inout_free(&output_links);
if (ret < 0)
lavfi_read_close(avctx);
return ret;
}
| true | FFmpeg | b19bfd6c9f42588c7a172bb019e27696972b8d2c |
4,015 | static gboolean register_signal_handlers(void)
{
struct sigaction sigact, sigact_chld;
int ret;
memset(&sigact, 0, sizeof(struct sigaction));
sigact.sa_handler = quit_handler;
ret = sigaction(SIGINT, &sigact, NULL);
if (ret == -1) {
g_error("error configuring signal handler: %s", strerror(errno));
return false;
}
ret = sigaction(SIGTERM, &sigact, NULL);
if (ret == -1) {
g_error("error configuring signal handler: %s", strerror(errno));
return false;
}
memset(&sigact_chld, 0, sizeof(struct sigaction));
sigact_chld.sa_handler = child_handler;
sigact_chld.sa_flags = SA_NOCLDSTOP;
ret = sigaction(SIGCHLD, &sigact_chld, NULL);
if (ret == -1) {
g_error("error configuring signal handler: %s", strerror(errno));
}
return true;
}
| true | qemu | dc8764f06155a7b3e635e02281b747a9e292127e |
4,016 | static void sch_handle_start_func_virtual(SubchDev *sch)
{
PMCW *p = &sch->curr_status.pmcw;
SCSW *s = &sch->curr_status.scsw;
int path;
int ret;
bool suspend_allowed;
/* Path management: In our simple css, we always choose the only path. */
path = 0x80;
if (!(s->ctrl & SCSW_ACTL_SUSP)) {
/* Start Function triggered via ssch, i.e. we have an ORB */
ORB *orb = &sch->orb;
s->cstat = 0;
s->dstat = 0;
/* Look at the orb and try to execute the channel program. */
p->intparm = orb->intparm;
if (!(orb->lpm & path)) {
/* Generate a deferred cc 3 condition. */
s->flags |= SCSW_FLAGS_MASK_CC;
s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
return;
}
sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
s->flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0;
sch->ccw_no_data_cnt = 0;
suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
} else {
/* Start Function resumed via rsch */
s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
/* The channel program had been suspended before. */
suspend_allowed = true;
}
sch->last_cmd_valid = false;
do {
ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed);
switch (ret) {
case -EAGAIN:
/* ccw chain, continue processing */
break;
case 0:
/* success */
s->ctrl &= ~SCSW_ACTL_START_PEND;
s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
SCSW_STCTL_STATUS_PEND;
s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
s->cpa = sch->channel_prog + 8;
break;
case -EIO:
/* I/O errors, status depends on specific devices */
break;
case -ENOSYS:
/* unsupported command, generate unit check (command reject) */
s->ctrl &= ~SCSW_ACTL_START_PEND;
s->dstat = SCSW_DSTAT_UNIT_CHECK;
/* Set sense bit 0 in ecw0. */
sch->sense_data[0] = 0x80;
s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
s->cpa = sch->channel_prog + 8;
break;
case -EFAULT:
/* memory problem, generate channel data check */
s->ctrl &= ~SCSW_ACTL_START_PEND;
s->cstat = SCSW_CSTAT_DATA_CHECK;
s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
s->cpa = sch->channel_prog + 8;
break;
case -EBUSY:
/* subchannel busy, generate deferred cc 1 */
s->flags &= ~SCSW_FLAGS_MASK_CC;
s->flags |= (1 << 8);
s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
break;
case -EINPROGRESS:
/* channel program has been suspended */
s->ctrl &= ~SCSW_ACTL_START_PEND;
s->ctrl |= SCSW_ACTL_SUSP;
break;
default:
/* error, generate channel program check */
s->ctrl &= ~SCSW_ACTL_START_PEND;
s->cstat = SCSW_CSTAT_PROG_CHECK;
s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
s->cpa = sch->channel_prog + 8;
break;
}
} while (ret == -EAGAIN);
}
| true | qemu | 248b920df95a5e3df10c16be63b017653c7ba730 |
4,018 | static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ShowWavesContext *showwaves = ctx->priv;
const int nb_samples = insamples->audio->nb_samples;
AVFilterBufferRef *outpicref = showwaves->outpicref;
int linesize = outpicref ? outpicref->linesize[0] : 0;
int16_t *p = (int16_t *)insamples->data[0];
int nb_channels = av_get_channel_layout_nb_channels(insamples->audio->channel_layout);
int i, j, h;
const int n = showwaves->n;
const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */
/* draw data in the buffer */
for (i = 0; i < nb_samples; i++) {
if (showwaves->buf_idx == 0 && showwaves->sample_count_mod == 0) {
showwaves->outpicref = outpicref =
ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN,
outlink->w, outlink->h);
outpicref->video->w = outlink->w;
outpicref->video->h = outlink->h;
outpicref->pts = insamples->pts +
av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels,
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
outlink->out_buf = outpicref;
linesize = outpicref->linesize[0];
memset(outpicref->data[0], 0, showwaves->h*linesize);
}
for (j = 0; j < nb_channels; j++) {
h = showwaves->h/2 - av_rescale(*p++, showwaves->h/2, MAX_INT16);
if (h >= 0 && h < outlink->h)
*(outpicref->data[0] + showwaves->buf_idx + h * linesize) += x;
}
showwaves->sample_count_mod++;
if (showwaves->sample_count_mod == n) {
showwaves->sample_count_mod = 0;
showwaves->buf_idx++;
}
if (showwaves->buf_idx == showwaves->w)
push_frame(outlink);
}
avfilter_unref_buffer(insamples);
return 0;
}
| true | FFmpeg | 7afd42d9f26be1e95f15cbcfa0e09308a6591036 |
4,019 | int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
{
int ret = 0;
AVDictionary *tmp = NULL;
if (avcodec_is_open(avctx))
return 0;
if ((!codec && !avctx->codec)) {
av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2().\n");
return AVERROR(EINVAL);
if ((codec && avctx->codec && codec != avctx->codec)) {
av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, "
"but %s passed to avcodec_open2().\n", avctx->codec->name, codec->name);
return AVERROR(EINVAL);
if (!codec)
codec = avctx->codec;
if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
return AVERROR(EINVAL);
if (options)
av_dict_copy(&tmp, *options, 0);
/* If there is a user-supplied mutex locking routine, call it. */
if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init) {
if (lockmgr_cb) {
if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
return -1;
entangled_thread_counter++;
if (entangled_thread_counter != 1) {
av_log(avctx, AV_LOG_ERROR,
"Insufficient thread locking. At least %d threads are "
"calling avcodec_open2() at the same time right now.\n",
entangled_thread_counter);
ret = -1;
goto end;
avctx->internal = av_mallocz(sizeof(AVCodecInternal));
if (!avctx->internal) {
ret = AVERROR(ENOMEM);
goto end;
avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool));
if (!avctx->internal->pool) {
ret = AVERROR(ENOMEM);
avctx->internal->to_free = av_frame_alloc();
if (!avctx->internal->to_free) {
ret = AVERROR(ENOMEM);
avctx->internal->buffer_frame = av_frame_alloc();
if (!avctx->internal->buffer_frame) {
ret = AVERROR(ENOMEM);
avctx->internal->buffer_pkt = av_packet_alloc();
if (!avctx->internal->buffer_pkt) {
ret = AVERROR(ENOMEM);
if (codec->priv_data_size > 0) {
if (!avctx->priv_data) {
avctx->priv_data = av_mallocz(codec->priv_data_size);
if (!avctx->priv_data) {
ret = AVERROR(ENOMEM);
goto end;
if (codec->priv_class) {
*(const AVClass **)avctx->priv_data = codec->priv_class;
av_opt_set_defaults(avctx->priv_data);
if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0)
} else {
avctx->priv_data = NULL;
if ((ret = av_opt_set_dict(avctx, &tmp)) < 0)
if (avctx->coded_width && avctx->coded_height && !avctx->width && !avctx->height)
ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
else if (avctx->width && avctx->height)
ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
if (ret < 0)
if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
&& ( av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx) < 0
|| av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0)) {
av_log(avctx, AV_LOG_WARNING, "ignoring invalid width/height values\n");
ff_set_dimensions(avctx, 0, 0);
if (avctx->width > 0 && avctx->height > 0) {
if (av_image_check_sar(avctx->width, avctx->height,
avctx->sample_aspect_ratio) < 0) {
av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
avctx->sample_aspect_ratio.num,
avctx->sample_aspect_ratio.den);
avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
/* if the decoder init function was already called previously,
* free the already allocated subtitle_header before overwriting it */
if (av_codec_is_decoder(codec))
av_freep(&avctx->subtitle_header);
if (avctx->channels > FF_SANE_NB_CHANNELS) {
avctx->codec = codec;
if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) &&
avctx->codec_id == AV_CODEC_ID_NONE) {
avctx->codec_type = codec->type;
avctx->codec_id = codec->id;
if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
&& avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n");
avctx->frame_number = 0;
if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) &&
avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
ret = AVERROR_EXPERIMENTAL;
if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
(!avctx->time_base.num || !avctx->time_base.den)) {
avctx->time_base.num = 1;
avctx->time_base.den = avctx->sample_rate;
if (HAVE_THREADS) {
ret = ff_thread_init(avctx);
if (ret < 0) {
if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS))
avctx->thread_count = 1;
if (av_codec_is_encoder(avctx->codec)) {
int i;
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (avctx->codec->sample_fmts) {
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
break;
if (avctx->channels == 1 &&
av_get_planar_sample_fmt(avctx->sample_fmt) ==
av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
avctx->sample_fmt = avctx->codec->sample_fmts[i];
break;
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n");
if (avctx->codec->pix_fmts) {
for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
break;
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE) {
av_log(avctx, AV_LOG_ERROR, "Specified pix_fmt is not supported\n");
if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
avctx->color_range = AVCOL_RANGE_JPEG;
if (avctx->codec->supported_samplerates) {
for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
break;
if (avctx->codec->supported_samplerates[i] == 0) {
av_log(avctx, AV_LOG_ERROR, "Specified sample_rate is not supported\n");
if (avctx->codec->channel_layouts) {
if (!avctx->channel_layout) {
av_log(avctx, AV_LOG_WARNING, "channel_layout not specified\n");
} else {
for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
if (avctx->channel_layout == avctx->codec->channel_layouts[i])
break;
if (avctx->codec->channel_layouts[i] == 0) {
av_log(avctx, AV_LOG_ERROR, "Specified channel_layout is not supported\n");
if (avctx->channel_layout && avctx->channels) {
if (av_get_channel_layout_nb_channels(avctx->channel_layout) != avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "channel layout does not match number of channels\n");
} else if (avctx->channel_layout) {
avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
if (!avctx->rc_initial_buffer_occupancy)
avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3 / 4;
if (avctx->ticks_per_frame &&
avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
av_log(avctx, AV_LOG_ERROR,
"ticks_per_frame %d too large for the timebase %d/%d.",
avctx->ticks_per_frame,
avctx->time_base.num,
avctx->time_base.den);
if (avctx->hw_frames_ctx) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
if (frames_ctx->format != avctx->pix_fmt) {
av_log(avctx, AV_LOG_ERROR,
"Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
if (avctx->codec->init && !(avctx->active_thread_type & FF_THREAD_FRAME)) {
ret = avctx->codec->init(avctx);
if (ret < 0) {
#if FF_API_AUDIOENC_DELAY
if (av_codec_is_encoder(avctx->codec))
avctx->delay = avctx->initial_padding;
#endif
if (av_codec_is_decoder(avctx->codec)) {
/* validate channel layout from the decoder */
if (avctx->channel_layout) {
int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
if (!avctx->channels)
avctx->channels = channels;
else if (channels != avctx->channels) {
av_log(avctx, AV_LOG_WARNING,
"channel layout does not match number of channels\n");
avctx->channel_layout = 0;
if (avctx->channels && avctx->channels < 0 ||
avctx->channels > FF_SANE_NB_CHANNELS) {
#if FF_API_AVCTX_TIMEBASE
if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
avctx->time_base = av_inv_q(avctx->framerate);
#endif
end:
if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init) {
entangled_thread_counter--;
/* Release any user-supplied mutex. */
if (lockmgr_cb) {
(*lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE);
if (options) {
av_dict_free(options);
*options = tmp;
return ret;
free_and_end:
if (avctx->codec &&
(avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP))
avctx->codec->close(avctx);
if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
av_opt_free(avctx->priv_data);
av_opt_free(avctx);
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
av_frame_free(&avctx->coded_frame);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
av_dict_free(&tmp);
av_freep(&avctx->priv_data);
if (avctx->internal) {
av_frame_free(&avctx->internal->to_free);
av_freep(&avctx->internal->pool);
av_freep(&avctx->internal);
avctx->codec = NULL;
goto end; | true | FFmpeg | e62ff72fc1052273deb708ba715f73e5187281d4 |
4,020 | DECLARE_LOOP_FILTER(mmxext)
DECLARE_LOOP_FILTER(sse2)
DECLARE_LOOP_FILTER(ssse3)
DECLARE_LOOP_FILTER(sse4)
#endif /* HAVE_YASM */
#define VP8_LUMA_MC_FUNC(IDX, SIZE, OPT) \
c->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel ## SIZE ## _h6_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel ## SIZE ## _v6_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel ## SIZE ## _h6v6_ ## OPT
#define VP8_MC_FUNC(IDX, SIZE, OPT) \
c->put_vp8_epel_pixels_tab[IDX][0][1] = ff_put_vp8_epel ## SIZE ## _h4_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel ## SIZE ## _v4_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][1][1] = ff_put_vp8_epel ## SIZE ## _h4v4_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel ## SIZE ## _h6v4_ ## OPT; \
c->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel ## SIZE ## _h4v6_ ## OPT; \
VP8_LUMA_MC_FUNC(IDX, SIZE, OPT)
#define VP8_BILINEAR_MC_FUNC(IDX, SIZE, OPT) \
c->put_vp8_bilinear_pixels_tab[IDX][0][1] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][0][2] = ff_put_vp8_bilinear ## SIZE ## _h_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][1][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][1][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][1][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][2][0] = ff_put_vp8_bilinear ## SIZE ## _v_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][2][1] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT; \
c->put_vp8_bilinear_pixels_tab[IDX][2][2] = ff_put_vp8_bilinear ## SIZE ## _hv_ ## OPT
av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
{
#if HAVE_YASM
int mm_flags = av_get_cpu_flags();
if (mm_flags & AV_CPU_FLAG_MMX) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
#if ARCH_X86_32
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_add = ff_vp8_idct_add_mmx;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_mmx;
c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_mmx;
#endif
c->put_vp8_epel_pixels_tab[1][0][0] =
c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_mmx;
#if ARCH_X86_32
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmx;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmx;
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmx;
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmx;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmx;
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmx;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmx;
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmx;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmx;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmx;
#endif
}
/* note that 4-tap width=16 functions are missing because w=16
* is only used for luma, and luma is always a copy or sixtap. */
if (mm_flags & AV_CPU_FLAG_MMXEXT) {
VP8_MC_FUNC(2, 4, mmxext);
VP8_BILINEAR_MC_FUNC(2, 4, mmxext);
#if ARCH_X86_32
VP8_LUMA_MC_FUNC(0, 16, mmxext);
VP8_MC_FUNC(1, 8, mmxext);
VP8_BILINEAR_MC_FUNC(0, 16, mmxext);
VP8_BILINEAR_MC_FUNC(1, 8, mmxext);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_mmxext;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_mmxext;
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_mmxext;
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_mmxext;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_mmxext;
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_mmxext;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_mmxext;
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_mmxext;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_mmxext;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSE) {
c->vp8_idct_add = ff_vp8_idct_add_sse;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
}
if (mm_flags & (AV_CPU_FLAG_SSE2|AV_CPU_FLAG_SSE2SLOW)) {
VP8_LUMA_MC_FUNC(0, 16, sse2);
VP8_MC_FUNC(1, 8, sse2);
VP8_BILINEAR_MC_FUNC(0, 16, sse2);
VP8_BILINEAR_MC_FUNC(1, 8, sse2);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_sse2;
#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_sse2;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_sse2;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_sse2;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSE2) {
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_sse2;
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_sse2;
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse2;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSSE3) {
VP8_LUMA_MC_FUNC(0, 16, ssse3);
VP8_MC_FUNC(1, 8, ssse3);
VP8_MC_FUNC(2, 4, ssse3);
VP8_BILINEAR_MC_FUNC(0, 16, ssse3);
VP8_BILINEAR_MC_FUNC(1, 8, ssse3);
VP8_BILINEAR_MC_FUNC(2, 4, ssse3);
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter_simple_ssse3;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_ssse3;
#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16y_inner_ssse3;
c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16y_inner_ssse3;
c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_ssse3;
c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_ssse3;
c->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16y_mbedge_ssse3;
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_ssse3;
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_ssse3;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
#endif
}
if (mm_flags & AV_CPU_FLAG_SSE4) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
#if ARCH_X86_64 || HAVE_ALIGNED_STACK
c->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16y_mbedge_sse4;
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse4;
#endif
}
#endif /* HAVE_YASM */
}
| false | FFmpeg | 6f40e9f070f7a6ccf745561409ddbcc2be5e47e5 |
4,021 | static av_always_inline int get_dst_color_err(PaletteUseContext *s,
uint32_t c, int *er, int *eg, int *eb,
const enum color_search_method search_method)
{
const uint8_t a = c >> 24 & 0xff;
const uint8_t r = c >> 16 & 0xff;
const uint8_t g = c >> 8 & 0xff;
const uint8_t b = c & 0xff;
const int dstx = color_get(s, c, a, r, g, b, search_method);
const uint32_t dstc = s->palette[dstx];
*er = r - (dstc >> 16 & 0xff);
*eg = g - (dstc >> 8 & 0xff);
*eb = b - (dstc & 0xff);
return dstx;
}
| false | FFmpeg | 237ccd8a165d2128e8c6bcb14c8c6c3e793cfe05 |
4,022 | static inline void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int is_signed){
int i;
if(v){
const int a= FFABS(v);
const int e= av_log2(a);
put_rac(c, state+0, 0);
assert(e<=9);
for(i=0; i<e; i++){
put_rac(c, state+1+i, 1); //1..10
}
put_rac(c, state+1+i, 0);
for(i=e-1; i>=0; i--){
put_rac(c, state+22+i, (a>>i)&1); //22..31
}
if(is_signed)
put_rac(c, state+11 + e, v < 0); //11..21
}else{
put_rac(c, state+0, 1);
}
}
| false | FFmpeg | 68f8d33becbd73b4d0aa277f472a6e8e72ea6849 |
4,023 | static void rgb24_to_rgb555(AVPicture *dst, AVPicture *src,
int width, int height)
{
const unsigned char *p;
unsigned char *q;
int r, g, b, dst_wrap, src_wrap;
int x, y;
p = src->data[0];
src_wrap = src->linesize[0] - 3 * width;
q = dst->data[0];
dst_wrap = dst->linesize[0] - 2 * width;
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
r = p[0];
g = p[1];
b = p[2];
((unsigned short *)q)[0] =
((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | 0x8000;
q += 2;
p += 3;
}
p += src_wrap;
q += dst_wrap;
}
}
| false | FFmpeg | 7e7e59409294af9caa63808e56c5cc824c98b4fc |
4,024 | static void avc_wgt_8width_msa(uint8_t *data,
int32_t stride,
int32_t height,
int32_t log2_denom,
int32_t src_weight,
int32_t offset_in)
{
uint8_t cnt;
v16u8 zero = { 0 };
v16u8 src0, src1, src2, src3;
v8u16 src0_r, src1_r, src2_r, src3_r;
v8u16 temp0, temp1, temp2, temp3;
v8u16 wgt, denom, offset;
offset_in <<= (log2_denom);
if (log2_denom) {
offset_in += (1 << (log2_denom - 1));
}
wgt = (v8u16) __msa_fill_h(src_weight);
offset = (v8u16) __msa_fill_h(offset_in);
denom = (v8u16) __msa_fill_h(log2_denom);
for (cnt = height / 4; cnt--;) {
LOAD_4VECS_UB(data, stride, src0, src1, src2, src3);
ILVR_B_4VECS_UH(src0, src1, src2, src3, zero, zero, zero, zero,
src0_r, src1_r, src2_r, src3_r);
temp0 = wgt * src0_r;
temp1 = wgt * src1_r;
temp2 = wgt * src2_r;
temp3 = wgt * src3_r;
ADDS_S_H_4VECS_UH(temp0, offset, temp1, offset,
temp2, offset, temp3, offset,
temp0, temp1, temp2, temp3);
MAXI_S_H_4VECS_UH(temp0, temp1, temp2, temp3, 0);
SRL_H_4VECS_UH(temp0, temp1, temp2, temp3,
temp0, temp1, temp2, temp3, denom);
SAT_U_H_4VECS_UH(temp0, temp1, temp2, temp3, 7);
PCKEV_B_STORE_8_BYTES_4(temp0, temp1, temp2, temp3, data, stride);
data += (4 * stride);
}
}
| false | FFmpeg | bcd7bf7eeb09a395cc01698842d1b8be9af483fc |
4,027 | static void opt_frame_pad_left(const char *arg)
{
frame_padleft = atoi(arg);
if (frame_padleft < 0) {
fprintf(stderr, "Incorrect left pad size\n");
av_exit(1);
}
}
| false | FFmpeg | 0c22311b56e66115675c4a96e4c78547886a4171 |
4,028 | ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
{
int name_size = 8; /* "CORE" or "QEMU" rounded */
size_t elf_note_size = 0;
int note_head_size;
const NoteFuncDesc *nf;
assert(class == ELFCLASS64);
assert(machine == EM_S390);
note_head_size = sizeof(Elf64_Nhdr);
for (nf = note_func; nf->note_contents_func; nf++) {
elf_note_size = elf_note_size + note_head_size + name_size +
nf->contents_size;
}
return (elf_note_size) * nr_cpus;
}
| true | qemu | 5f706fdc164b20b48254eadf7bd413edace34499 |
4,031 | static void add_query_tests(QmpSchema *schema)
{
SchemaInfoList *tail;
SchemaInfo *si, *arg_type, *ret_type;
const char *test_name;
/* Test the query-like commands */
for (tail = schema->list; tail; tail = tail->next) {
si = tail->value;
if (si->meta_type != SCHEMA_META_TYPE_COMMAND) {
continue;
}
if (query_is_blacklisted(si->name)) {
continue;
}
arg_type = qmp_schema_lookup(schema, si->u.command.arg_type);
if (object_type_has_mandatory_members(arg_type)) {
continue;
}
ret_type = qmp_schema_lookup(schema, si->u.command.ret_type);
if (ret_type->meta_type == SCHEMA_META_TYPE_OBJECT
&& !ret_type->u.object.members) {
continue;
}
test_name = g_strdup_printf("qmp/%s", si->name);
qtest_add_data_func(test_name, si->name, test_query);
}
}
| true | qemu | e313d5cec564a9b708bad1bb44c291530a5a4935 |
4,032 | static void cpu_handle_guest_debug(CPUState *env)
{
gdb_set_stop_cpu(env);
qemu_system_debug_request();
#ifdef CONFIG_IOTHREAD
env->stopped = 1;
#endif
}
| true | qemu | 12d4536f7d911b6d87a766ad7300482ea663cea2 |
4,033 | vmxnet3_io_bar1_write(void *opaque,
hwaddr addr,
uint64_t val,
unsigned size)
{
VMXNET3State *s = opaque;
switch (addr) {
/* Vmxnet3 Revision Report Selection */
case VMXNET3_REG_VRRS:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_VRRS] = %" PRIx64 ", size %d",
val, size);
break;
/* UPT Version Report Selection */
case VMXNET3_REG_UVRS:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_UVRS] = %" PRIx64 ", size %d",
val, size);
break;
/* Driver Shared Address Low */
case VMXNET3_REG_DSAL:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAL] = %" PRIx64 ", size %d",
val, size);
/*
* Guest driver will first write the low part of the shared
* memory address. We save it to temp variable and set the
* shared address only after we get the high part
*/
if (val == 0) {
s->device_active = false;
}
s->temp_shared_guest_driver_memory = val;
s->drv_shmem = 0;
break;
/* Driver Shared Address High */
case VMXNET3_REG_DSAH:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAH] = %" PRIx64 ", size %d",
val, size);
/*
* Set the shared memory between guest driver and device.
* We already should have low address part.
*/
s->drv_shmem = s->temp_shared_guest_driver_memory | (val << 32);
break;
/* Command */
case VMXNET3_REG_CMD:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_CMD] = %" PRIx64 ", size %d",
val, size);
vmxnet3_handle_command(s, val);
break;
/* MAC Address Low */
case VMXNET3_REG_MACL:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACL] = %" PRIx64 ", size %d",
val, size);
s->temp_mac = val;
break;
/* MAC Address High */
case VMXNET3_REG_MACH:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACH] = %" PRIx64 ", size %d",
val, size);
vmxnet3_set_variable_mac(s, val, s->temp_mac);
break;
/* Interrupt Cause Register */
case VMXNET3_REG_ICR:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d",
val, size);
g_assert_not_reached();
break;
/* Event Cause Register */
case VMXNET3_REG_ECR:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_ECR] = %" PRIx64 ", size %d",
val, size);
vmxnet3_ack_events(s, val);
break;
default:
VMW_CBPRN("Unknown Write to BAR1 [%" PRIx64 "] = %" PRIx64 ", size %d",
addr, val, size);
break;
}
}
| true | qemu | aa4a3dce1c88ed51b616806b8214b7c8428b7470 |
4,035 | static int64_t alloc_block(BlockDriverState* bs, int64_t offset)
{
BDRVVPCState *s = bs->opaque;
int64_t bat_offset;
uint32_t index, bat_value;
int ret;
uint8_t bitmap[s->bitmap_size];
/* Check if sector_num is valid */
if ((offset < 0) || (offset > bs->total_sectors * BDRV_SECTOR_SIZE)) {
return -EINVAL;
}
/* Write entry into in-memory BAT */
index = offset / s->block_size;
assert(s->pagetable[index] == 0xFFFFFFFF);
s->pagetable[index] = s->free_data_block_offset / 512;
/* Initialize the block's bitmap */
memset(bitmap, 0xff, s->bitmap_size);
ret = bdrv_pwrite_sync(bs->file, s->free_data_block_offset, bitmap,
s->bitmap_size);
if (ret < 0) {
return ret;
}
/* Write new footer (the old one will be overwritten) */
s->free_data_block_offset += s->block_size + s->bitmap_size;
ret = rewrite_footer(bs);
if (ret < 0)
goto fail;
/* Write BAT entry to disk */
bat_offset = s->bat_offset + (4 * index);
bat_value = cpu_to_be32(s->pagetable[index]);
ret = bdrv_pwrite_sync(bs->file, bat_offset, &bat_value, 4);
if (ret < 0)
goto fail;
return get_image_offset(bs, offset, false);
fail:
s->free_data_block_offset -= (s->block_size + s->bitmap_size);
return ret;
}
| true | qemu | cfc87e00c22ab4ea0262c9771c803ed03d754001 |
4,036 | static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, unsigned int src_size)
{
const uint8_t *s = src;
const uint8_t *end;
#ifdef HAVE_MMX
const uint8_t *mm_end;
#endif
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
#ifdef HAVE_MMX
__asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm __volatile(
"movq %0, %%mm7\n\t"
"movq %1, %%mm6\n\t"
::"m"(red_16mask),"m"(green_16mask));
mm_end = end - 15;
while(s < mm_end)
{
__asm __volatile(
PREFETCH" 32%1\n\t"
"movd %1, %%mm0\n\t"
"movd 4%1, %%mm3\n\t"
"punpckldq 8%1, %%mm0\n\t"
"punpckldq 12%1, %%mm3\n\t"
"movq %%mm0, %%mm1\n\t"
"movq %%mm0, %%mm2\n\t"
"movq %%mm3, %%mm4\n\t"
"movq %%mm3, %%mm5\n\t"
"psllq $8, %%mm0\n\t"
"psllq $8, %%mm3\n\t"
"pand %%mm7, %%mm0\n\t"
"pand %%mm7, %%mm3\n\t"
"psrlq $5, %%mm1\n\t"
"psrlq $5, %%mm4\n\t"
"pand %%mm6, %%mm1\n\t"
"pand %%mm6, %%mm4\n\t"
"psrlq $19, %%mm2\n\t"
"psrlq $19, %%mm5\n\t"
"pand %2, %%mm2\n\t"
"pand %2, %%mm5\n\t"
"por %%mm1, %%mm0\n\t"
"por %%mm4, %%mm3\n\t"
"por %%mm2, %%mm0\n\t"
"por %%mm5, %%mm3\n\t"
"psllq $16, %%mm3\n\t"
"por %%mm3, %%mm0\n\t"
MOVNTQ" %%mm0, %0\n\t"
:"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
d += 4;
s += 16;
}
__asm __volatile(SFENCE:::"memory");
__asm __volatile(EMMS:::"memory");
#endif
while(s < end)
{
// FIXME on bigendian
const int src= *s; s += 4;
*d++ = ((src&0xF8)<<8) + ((src&0xFC00)>>5) + ((src&0xF80000)>>19);
}
}
| true | FFmpeg | 7f526efd17973ec6d2204f7a47b6923e2be31363 |
4,037 | int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
{
int mb_xy;
int partition_count;
unsigned int mb_type, cbp;
int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
int decode_chroma = h->ps.sps->chroma_format_idc == 1 || h->ps.sps->chroma_format_idc == 2;
const int pixel_shift = h->pixel_shift;
mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
ff_tlog(h->avctx, "pic:%d mb:%d/%d\n", h->poc.frame_num, sl->mb_x, sl->mb_y);
cbp = 0; /* avoid warning. FIXME: find a solution without slowing
down the code */
if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
if (sl->mb_skip_run == -1)
sl->mb_skip_run = get_ue_golomb_long(&sl->gb);
if (sl->mb_skip_run--) {
if (FRAME_MBAFF(h) && (sl->mb_y & 1) == 0) {
if (sl->mb_skip_run == 0)
sl->mb_mbaff = sl->mb_field_decoding_flag = get_bits1(&sl->gb);
}
decode_mb_skip(h, sl);
return 0;
}
}
if (FRAME_MBAFF(h)) {
if ((sl->mb_y & 1) == 0)
sl->mb_mbaff = sl->mb_field_decoding_flag = get_bits1(&sl->gb);
}
sl->prev_mb_skipped = 0;
mb_type= get_ue_golomb(&sl->gb);
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
if(mb_type < 23){
partition_count = ff_h264_b_mb_type_info[mb_type].partition_count;
mb_type = ff_h264_b_mb_type_info[mb_type].type;
}else{
mb_type -= 23;
goto decode_intra_mb;
}
} else if (sl->slice_type_nos == AV_PICTURE_TYPE_P) {
if(mb_type < 5){
partition_count = ff_h264_p_mb_type_info[mb_type].partition_count;
mb_type = ff_h264_p_mb_type_info[mb_type].type;
}else{
mb_type -= 5;
goto decode_intra_mb;
}
}else{
av_assert2(sl->slice_type_nos == AV_PICTURE_TYPE_I);
if (sl->slice_type == AV_PICTURE_TYPE_SI && mb_type)
mb_type--;
decode_intra_mb:
if(mb_type > 25){
av_log(h->avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(sl->slice_type), sl->mb_x, sl->mb_y);
return -1;
}
partition_count=0;
cbp = ff_h264_i_mb_type_info[mb_type].cbp;
sl->intra16x16_pred_mode = ff_h264_i_mb_type_info[mb_type].pred_mode;
mb_type = ff_h264_i_mb_type_info[mb_type].type;
}
if (MB_FIELD(sl))
mb_type |= MB_TYPE_INTERLACED;
h->slice_table[mb_xy] = sl->slice_num;
if(IS_INTRA_PCM(mb_type)){
const int mb_size = ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] *
h->ps.sps->bit_depth_luma;
// We assume these blocks are very rare so we do not optimize it.
sl->intra_pcm_ptr = align_get_bits(&sl->gb);
if (get_bits_left(&sl->gb) < mb_size) {
av_log(h->avctx, AV_LOG_ERROR, "Not enough data for an intra PCM block.\n");
return AVERROR_INVALIDDATA;
}
skip_bits_long(&sl->gb, mb_size);
// In deblocking, the quantizer is 0
h->cur_pic.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
h->cur_pic.mb_type[mb_xy] = mb_type;
return 0;
}
fill_decode_neighbors(h, sl, mb_type);
fill_decode_caches(h, sl, mb_type);
//mb_pred
if(IS_INTRA(mb_type)){
int pred_mode;
// init_top_left_availability(h);
if(IS_INTRA4x4(mb_type)){
int i;
int di = 1;
if(dct8x8_allowed && get_bits1(&sl->gb)){
mb_type |= MB_TYPE_8x8DCT;
di = 4;
}
// fill_intra4x4_pred_table(h);
for(i=0; i<16; i+=di){
int mode = pred_intra_mode(h, sl, i);
if(!get_bits1(&sl->gb)){
const int rem_mode= get_bits(&sl->gb, 3);
mode = rem_mode + (rem_mode >= mode);
}
if(di==4)
fill_rectangle(&sl->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1);
else
sl->intra4x4_pred_mode_cache[scan8[i]] = mode;
}
write_back_intra_pred_mode(h, sl);
if (ff_h264_check_intra4x4_pred_mode(sl->intra4x4_pred_mode_cache, h->avctx,
sl->top_samples_available, sl->left_samples_available) < 0)
return -1;
}else{
sl->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h->avctx, sl->top_samples_available,
sl->left_samples_available, sl->intra16x16_pred_mode, 0);
if (sl->intra16x16_pred_mode < 0)
return -1;
}
if(decode_chroma){
pred_mode= ff_h264_check_intra_pred_mode(h->avctx, sl->top_samples_available,
sl->left_samples_available, get_ue_golomb_31(&sl->gb), 1);
if(pred_mode < 0)
return -1;
sl->chroma_pred_mode = pred_mode;
} else {
sl->chroma_pred_mode = DC_128_PRED8x8;
}
}else if(partition_count==4){
int i, j, sub_partition_count[4], list, ref[2][4];
if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
for(i=0; i<4; i++){
sl->sub_mb_type[i]= get_ue_golomb_31(&sl->gb);
if(sl->sub_mb_type[i] >=13){
av_log(h->avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", sl->sub_mb_type[i], sl->mb_x, sl->mb_y);
return -1;
}
sub_partition_count[i] = ff_h264_b_sub_mb_type_info[sl->sub_mb_type[i]].partition_count;
sl->sub_mb_type[i] = ff_h264_b_sub_mb_type_info[sl->sub_mb_type[i]].type;
}
if( IS_DIRECT(sl->sub_mb_type[0]|sl->sub_mb_type[1]|sl->sub_mb_type[2]|sl->sub_mb_type[3])) {
ff_h264_pred_direct_motion(h, sl, &mb_type);
sl->ref_cache[0][scan8[4]] =
sl->ref_cache[1][scan8[4]] =
sl->ref_cache[0][scan8[12]] =
sl->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE;
}
}else{
av_assert2(sl->slice_type_nos == AV_PICTURE_TYPE_P); //FIXME SP correct ?
for(i=0; i<4; i++){
sl->sub_mb_type[i]= get_ue_golomb_31(&sl->gb);
if(sl->sub_mb_type[i] >=4){
av_log(h->avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", sl->sub_mb_type[i], sl->mb_x, sl->mb_y);
return -1;
}
sub_partition_count[i] = ff_h264_p_sub_mb_type_info[sl->sub_mb_type[i]].partition_count;
sl->sub_mb_type[i] = ff_h264_p_sub_mb_type_info[sl->sub_mb_type[i]].type;
}
}
for (list = 0; list < sl->list_count; list++) {
int ref_count = IS_REF0(mb_type) ? 1 : sl->ref_count[list] << MB_MBAFF(sl);
for(i=0; i<4; i++){
if(IS_DIRECT(sl->sub_mb_type[i])) continue;
if(IS_DIR(sl->sub_mb_type[i], 0, list)){
unsigned int tmp;
if(ref_count == 1){
tmp= 0;
}else if(ref_count == 2){
tmp= get_bits1(&sl->gb)^1;
}else{
tmp= get_ue_golomb_31(&sl->gb);
if(tmp>=ref_count){
av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", tmp);
return -1;
}
}
ref[list][i]= tmp;
}else{
//FIXME
ref[list][i] = -1;
}
}
}
if(dct8x8_allowed)
dct8x8_allowed = get_dct8x8_allowed(h, sl);
for (list = 0; list < sl->list_count; list++) {
for(i=0; i<4; i++){
if(IS_DIRECT(sl->sub_mb_type[i])) {
sl->ref_cache[list][ scan8[4*i] ] = sl->ref_cache[list][ scan8[4*i]+1 ];
continue;
}
sl->ref_cache[list][ scan8[4*i] ]=sl->ref_cache[list][ scan8[4*i]+1 ]=
sl->ref_cache[list][ scan8[4*i]+8 ]=sl->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i];
if(IS_DIR(sl->sub_mb_type[i], 0, list)){
const int sub_mb_type= sl->sub_mb_type[i];
const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1;
for(j=0; j<sub_partition_count[i]; j++){
int mx, my;
const int index= 4*i + block_width*j;
int16_t (* mv_cache)[2]= &sl->mv_cache[list][ scan8[index] ];
pred_motion(h, sl, index, block_width, list, sl->ref_cache[list][ scan8[index] ], &mx, &my);
mx += get_se_golomb(&sl->gb);
my += get_se_golomb(&sl->gb);
ff_tlog(h->avctx, "final mv:%d %d\n", mx, my);
if(IS_SUB_8X8(sub_mb_type)){
mv_cache[ 1 ][0]=
mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx;
mv_cache[ 1 ][1]=
mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my;
}else if(IS_SUB_8X4(sub_mb_type)){
mv_cache[ 1 ][0]= mx;
mv_cache[ 1 ][1]= my;
}else if(IS_SUB_4X8(sub_mb_type)){
mv_cache[ 8 ][0]= mx;
mv_cache[ 8 ][1]= my;
}
mv_cache[ 0 ][0]= mx;
mv_cache[ 0 ][1]= my;
}
}else{
uint32_t *p= (uint32_t *)&sl->mv_cache[list][ scan8[4*i] ][0];
p[0] = p[1]=
p[8] = p[9]= 0;
}
}
}
}else if(IS_DIRECT(mb_type)){
ff_h264_pred_direct_motion(h, sl, &mb_type);
dct8x8_allowed &= h->ps.sps->direct_8x8_inference_flag;
}else{
int list, mx, my, i;
//FIXME we should set ref_idx_l? to 0 if we use that later ...
if(IS_16X16(mb_type)){
for (list = 0; list < sl->list_count; list++) {
unsigned int val;
if(IS_DIR(mb_type, 0, list)){
unsigned rc = sl->ref_count[list] << MB_MBAFF(sl);
if (rc == 1) {
val= 0;
} else if (rc == 2) {
val= get_bits1(&sl->gb)^1;
}else{
val= get_ue_golomb_31(&sl->gb);
if (val >= rc) {
av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
return -1;
}
}
fill_rectangle(&sl->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1);
}
}
for (list = 0; list < sl->list_count; list++) {
if(IS_DIR(mb_type, 0, list)){
pred_motion(h, sl, 0, 4, list, sl->ref_cache[list][ scan8[0] ], &mx, &my);
mx += get_se_golomb(&sl->gb);
my += get_se_golomb(&sl->gb);
ff_tlog(h->avctx, "final mv:%d %d\n", mx, my);
fill_rectangle(sl->mv_cache[list][ scan8[0] ], 4, 4, 8, pack16to32(mx,my), 4);
}
}
}
else if(IS_16X8(mb_type)){
for (list = 0; list < sl->list_count; list++) {
for(i=0; i<2; i++){
unsigned int val;
if(IS_DIR(mb_type, i, list)){
unsigned rc = sl->ref_count[list] << MB_MBAFF(sl);
if (rc == 1) {
val= 0;
} else if (rc == 2) {
val= get_bits1(&sl->gb)^1;
}else{
val= get_ue_golomb_31(&sl->gb);
if (val >= rc) {
av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
return -1;
}
}
}else
val= LIST_NOT_USED&0xFF;
fill_rectangle(&sl->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1);
}
}
for (list = 0; list < sl->list_count; list++) {
for(i=0; i<2; i++){
unsigned int val;
if(IS_DIR(mb_type, i, list)){
pred_16x8_motion(h, sl, 8*i, list, sl->ref_cache[list][scan8[0] + 16*i], &mx, &my);
mx += get_se_golomb(&sl->gb);
my += get_se_golomb(&sl->gb);
ff_tlog(h->avctx, "final mv:%d %d\n", mx, my);
val= pack16to32(mx,my);
}else
val=0;
fill_rectangle(sl->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 4);
}
}
}else{
av_assert2(IS_8X16(mb_type));
for (list = 0; list < sl->list_count; list++) {
for(i=0; i<2; i++){
unsigned int val;
if(IS_DIR(mb_type, i, list)){ //FIXME optimize
unsigned rc = sl->ref_count[list] << MB_MBAFF(sl);
if (rc == 1) {
val= 0;
} else if (rc == 2) {
val= get_bits1(&sl->gb)^1;
}else{
val= get_ue_golomb_31(&sl->gb);
if (val >= rc) {
av_log(h->avctx, AV_LOG_ERROR, "ref %u overflow\n", val);
return -1;
}
}
}else
val= LIST_NOT_USED&0xFF;
fill_rectangle(&sl->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1);
}
}
for (list = 0; list < sl->list_count; list++) {
for(i=0; i<2; i++){
unsigned int val;
if(IS_DIR(mb_type, i, list)){
pred_8x16_motion(h, sl, i*4, list, sl->ref_cache[list][ scan8[0] + 2*i ], &mx, &my);
mx += get_se_golomb(&sl->gb);
my += get_se_golomb(&sl->gb);
ff_tlog(h->avctx, "final mv:%d %d\n", mx, my);
val= pack16to32(mx,my);
}else
val=0;
fill_rectangle(sl->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 4);
}
}
}
}
if(IS_INTER(mb_type))
write_back_motion(h, sl, mb_type);
if(!IS_INTRA16x16(mb_type)){
cbp= get_ue_golomb(&sl->gb);
if(decode_chroma){
if(cbp > 47){
av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, sl->mb_x, sl->mb_y);
return -1;
}
if (IS_INTRA4x4(mb_type))
cbp = ff_h264_golomb_to_intra4x4_cbp[cbp];
else
cbp = ff_h264_golomb_to_inter_cbp[cbp];
}else{
if(cbp > 15){
av_log(h->avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, sl->mb_x, sl->mb_y);
return -1;
}
if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp_gray[cbp];
else cbp= golomb_to_inter_cbp_gray[cbp];
}
} else {
if (!decode_chroma && cbp>15) {
av_log(h->avctx, AV_LOG_ERROR, "gray chroma\n");
return AVERROR_INVALIDDATA;
}
}
if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){
mb_type |= MB_TYPE_8x8DCT*get_bits1(&sl->gb);
}
sl->cbp=
h->cbp_table[mb_xy]= cbp;
h->cur_pic.mb_type[mb_xy] = mb_type;
if(cbp || IS_INTRA16x16(mb_type)){
int i4x4, i8x8, chroma_idx;
int dquant;
int ret;
GetBitContext *gb = &sl->gb;
const uint8_t *scan, *scan8x8;
const int max_qp = 51 + 6 * (h->ps.sps->bit_depth_luma - 8);
if(IS_INTERLACED(mb_type)){
scan8x8 = sl->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0;
scan = sl->qscale ? h->field_scan : h->field_scan_q0;
}else{
scan8x8 = sl->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0;
scan = sl->qscale ? h->zigzag_scan : h->zigzag_scan_q0;
}
dquant= get_se_golomb(&sl->gb);
sl->qscale += dquant;
if (((unsigned)sl->qscale) > max_qp){
if (sl->qscale < 0) sl->qscale += max_qp + 1;
else sl->qscale -= max_qp+1;
if (((unsigned)sl->qscale) > max_qp){
av_log(h->avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, sl->mb_x, sl->mb_y);
return -1;
}
}
sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
if ((ret = decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 0)) < 0 ) {
return -1;
}
h->cbp_table[mb_xy] |= ret << 12;
if (CHROMA444(h)) {
if (decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 1) < 0 ) {
return -1;
}
if (decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 2) < 0 ) {
return -1;
}
} else {
const int num_c8x8 = h->ps.sps->chroma_format_idc;
if(cbp&0x30){
for(chroma_idx=0; chroma_idx<2; chroma_idx++)
if (decode_residual(h, sl, gb, sl->mb + ((256 + 16*16*chroma_idx) << pixel_shift),
CHROMA_DC_BLOCK_INDEX + chroma_idx,
CHROMA422(h) ? ff_h264_chroma422_dc_scan : ff_h264_chroma_dc_scan,
NULL, 4 * num_c8x8) < 0) {
return -1;
}
}
if(cbp&0x20){
for(chroma_idx=0; chroma_idx<2; chroma_idx++){
const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) << pixel_shift);
for (i8x8 = 0; i8x8<num_c8x8; i8x8++) {
for (i4x4 = 0; i4x4 < 4; i4x4++) {
const int index = 16 + 16*chroma_idx + 8*i8x8 + i4x4;
if (decode_residual(h, sl, gb, mb, index, scan + 1, qmul, 15) < 0)
return -1;
mb += 16 << pixel_shift;
}
}
}
}else{
fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
}
}
}else{
fill_rectangle(&sl->non_zero_count_cache[scan8[ 0]], 4, 4, 8, 0, 1);
fill_rectangle(&sl->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&sl->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
}
h->cur_pic.qscale_table[mb_xy] = sl->qscale;
write_back_non_zero_count(h, sl);
return 0;
}
| true | FFmpeg | fc8cff96ed45dfdb91ed03e9942845f28be0e770 |
4,038 | static void client_close(NBDClient *client)
{
if (client->closing) {
return;
}
client->closing = true;
/* Force requests to finish. They will drop their own references,
* then we'll close the socket and free the NBDClient.
*/
qio_channel_shutdown(client->ioc, QIO_CHANNEL_SHUTDOWN_BOTH,
NULL);
/* Also tell the client, so that they release their reference. */
if (client->close) {
client->close(client);
}
}
| true | qemu | 0c9390d978cbf61e8f16c9f580fa96b305c43568 |
4,039 | static int init_tile(Jpeg2000DecoderContext *s, int tileno)
{
int compno;
int tilex = tileno % s->numXtiles;
int tiley = tileno / s->numXtiles;
Jpeg2000Tile *tile = s->tile + tileno;
if (!tile->comp)
return AVERROR(ENOMEM);
tile->coord[0][0] = FFMAX(tilex * s->tile_width + s->tile_offset_x, s->image_offset_x);
tile->coord[0][1] = FFMIN((tilex + 1) * s->tile_width + s->tile_offset_x, s->width);
tile->coord[1][0] = FFMAX(tiley * s->tile_height + s->tile_offset_y, s->image_offset_y);
tile->coord[1][1] = FFMIN((tiley + 1) * s->tile_height + s->tile_offset_y, s->height);
for (compno = 0; compno < s->ncomponents; compno++) {
Jpeg2000Component *comp = tile->comp + compno;
Jpeg2000CodingStyle *codsty = tile->codsty + compno;
Jpeg2000QuantStyle *qntsty = tile->qntsty + compno;
int ret; // global bandno
comp->coord_o[0][0] = tile->coord[0][0];
comp->coord_o[0][1] = tile->coord[0][1];
comp->coord_o[1][0] = tile->coord[1][0];
comp->coord_o[1][1] = tile->coord[1][1];
if (compno) {
comp->coord_o[0][0] /= s->cdx[compno];
comp->coord_o[0][1] /= s->cdx[compno];
comp->coord_o[1][0] /= s->cdy[compno];
comp->coord_o[1][1] /= s->cdy[compno];
}
comp->coord[0][0] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][0], s->reduction_factor);
comp->coord[0][1] = ff_jpeg2000_ceildivpow2(comp->coord_o[0][1], s->reduction_factor);
comp->coord[1][0] = ff_jpeg2000_ceildivpow2(comp->coord_o[1][0], s->reduction_factor);
comp->coord[1][1] = ff_jpeg2000_ceildivpow2(comp->coord_o[1][1], s->reduction_factor);
if (ret = ff_jpeg2000_init_component(comp, codsty, qntsty,
s->cbps[compno], s->cdx[compno],
s->cdy[compno], s->avctx))
return ret;
}
return 0;
}
| false | FFmpeg | 43492ff3ab68a343c1264801baa1d5a02de10167 |
4,040 | void ff_avg_h264_qpel8_mc11_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_hv_qrt_and_aver_dst_8x8_msa(src - 2,
src - (stride * 2),
stride, dst, stride);
}
| false | FFmpeg | 1181d93231e9b807965724587d363c1cfd5a1d0d |
4,042 | int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
H264ParamSets *ps, int bit_length)
{
AVBufferRef *pps_buf;
const SPS *sps;
unsigned int pps_id = get_ue_golomb(gb);
PPS *pps;
int qp_bd_offset;
int bits_left;
int ret;
if (pps_id >= MAX_PPS_COUNT) {
av_log(avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id);
return AVERROR_INVALIDDATA;
pps_buf = av_buffer_allocz(sizeof(*pps));
if (!pps_buf)
return AVERROR(ENOMEM);
pps = (PPS*)pps_buf->data;
pps->data_size = gb->buffer_end - gb->buffer;
if (pps->data_size > sizeof(pps->data)) {
av_log(avctx, AV_LOG_WARNING, "Truncating likely oversized PPS "
"(%"SIZE_SPECIFIER" > %"SIZE_SPECIFIER")\n",
pps->data_size, sizeof(pps->data));
pps->data_size = sizeof(pps->data);
memcpy(pps->data, gb->buffer, pps->data_size);
pps->sps_id = get_ue_golomb_31(gb);
if ((unsigned)pps->sps_id >= MAX_SPS_COUNT ||
!ps->sps_list[pps->sps_id]) {
av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id);
sps = (const SPS*)ps->sps_list[pps->sps_id]->data;
if (sps->bit_depth_luma > 14) {
av_log(avctx, AV_LOG_ERROR,
"Invalid luma bit depth=%d\n",
sps->bit_depth_luma);
} else if (sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13) {
av_log(avctx, AV_LOG_ERROR,
"Unimplemented luma bit depth=%d\n",
sps->bit_depth_luma);
ret = AVERROR_PATCHWELCOME;
pps->cabac = get_bits1(gb);
pps->pic_order_present = get_bits1(gb);
pps->slice_group_count = get_ue_golomb(gb) + 1;
if (pps->slice_group_count > 1) {
pps->mb_slice_group_map_type = get_ue_golomb(gb);
av_log(avctx, AV_LOG_ERROR, "FMO not supported\n");
switch (pps->mb_slice_group_map_type) {
case 0:
#if 0
| for (i = 0; i <= num_slice_groups_minus1; i++) | | |
| run_length[i] |1 |ue(v) |
#endif
break;
case 2:
#if 0
| for (i = 0; i < num_slice_groups_minus1; i++) { | | |
| top_left_mb[i] |1 |ue(v) |
| bottom_right_mb[i] |1 |ue(v) |
| } | | |
#endif
break;
case 3:
case 4:
case 5:
#if 0
| slice_group_change_direction_flag |1 |u(1) |
| slice_group_change_rate_minus1 |1 |ue(v) |
#endif
break;
case 6:
#if 0
| slice_group_id_cnt_minus1 |1 |ue(v) |
| for (i = 0; i <= slice_group_id_cnt_minus1; i++)| | |
| slice_group_id[i] |1 |u(v) |
#endif
break;
pps->ref_count[0] = get_ue_golomb(gb) + 1;
pps->ref_count[1] = get_ue_golomb(gb) + 1;
if (pps->ref_count[0] - 1 > 32 - 1 || pps->ref_count[1] - 1 > 32 - 1) {
av_log(avctx, AV_LOG_ERROR, "reference overflow (pps)\n");
qp_bd_offset = 6 * (sps->bit_depth_luma - 8);
pps->weighted_pred = get_bits1(gb);
pps->weighted_bipred_idc = get_bits(gb, 2);
pps->init_qp = get_se_golomb(gb) + 26 + qp_bd_offset;
pps->init_qs = get_se_golomb(gb) + 26 + qp_bd_offset;
pps->chroma_qp_index_offset[0] = get_se_golomb(gb);
if (pps->chroma_qp_index_offset[0] < -12 || pps->chroma_qp_index_offset[0] > 12) {
pps->deblocking_filter_parameters_present = get_bits1(gb);
pps->constrained_intra_pred = get_bits1(gb);
pps->redundant_pic_cnt_present = get_bits1(gb);
pps->transform_8x8_mode = 0;
memcpy(pps->scaling_matrix4, sps->scaling_matrix4,
sizeof(pps->scaling_matrix4));
memcpy(pps->scaling_matrix8, sps->scaling_matrix8,
sizeof(pps->scaling_matrix8));
bits_left = bit_length - get_bits_count(gb);
if (bits_left > 0 && more_rbsp_data_in_pps(sps, avctx)) {
pps->transform_8x8_mode = get_bits1(gb);
decode_scaling_matrices(gb, sps, pps, 0,
pps->scaling_matrix4, pps->scaling_matrix8);
// second_chroma_qp_index_offset
pps->chroma_qp_index_offset[1] = get_se_golomb(gb);
} else {
pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0];
build_qp_table(pps, 0, pps->chroma_qp_index_offset[0],
sps->bit_depth_luma);
build_qp_table(pps, 1, pps->chroma_qp_index_offset[1],
sps->bit_depth_luma);
init_dequant_tables(pps, sps);
if (pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1])
pps->chroma_qp_diff = 1;
if (avctx->debug & FF_DEBUG_PICT_INFO) {
av_log(avctx, AV_LOG_DEBUG,
"pps:%u sps:%u %s slice_groups:%d ref:%u/%u %s qp:%d/%d/%d/%d %s %s %s %s\n",
pps_id, pps->sps_id,
pps->cabac ? "CABAC" : "CAVLC",
pps->slice_group_count,
pps->ref_count[0], pps->ref_count[1],
pps->weighted_pred ? "weighted" : "",
pps->init_qp, pps->init_qs, pps->chroma_qp_index_offset[0], pps->chroma_qp_index_offset[1],
pps->deblocking_filter_parameters_present ? "LPAR" : "",
pps->constrained_intra_pred ? "CONSTR" : "",
pps->redundant_pic_cnt_present ? "REDU" : "",
pps->transform_8x8_mode ? "8x8DCT" : "");
remove_pps(ps, pps_id);
ps->pps_list[pps_id] = pps_buf;
return 0;
fail:
av_buffer_unref(&pps_buf);
return ret;
| true | FFmpeg | 9568b2e425f127031ddc91dd78cb9b9f2cae206d |
4,043 | static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
BDRVVmdkState *s = bs->opaque;
int index_in_cluster, n, ret;
uint64_t cluster_offset;
while (nb_sectors > 0) {
cluster_offset = get_cluster_offset(bs, sector_num << 9, 0);
index_in_cluster = sector_num % s->cluster_sectors;
n = s->cluster_sectors - index_in_cluster;
if (n > nb_sectors)
n = nb_sectors;
if (!cluster_offset) {
// try to read from parent image, if exist
if (s->hd->backing_hd) {
if (!vmdk_is_cid_valid(bs))
return -1;
ret = bdrv_read(s->hd->backing_hd, sector_num, buf, n);
if (ret < 0)
return -1;
} else {
memset(buf, 0, 512 * n);
}
} else {
if(bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512) != n * 512)
return -1;
}
nb_sectors -= n;
sector_num += n;
buf += n * 512;
}
return 0;
}
| true | qemu | 630530a6529bc3da9ab8aead7053dc753cb9ac77 |
4,044 | int32_t ff_mlp_pack_output(int32_t lossless_check_data,
uint16_t blockpos,
int32_t (*sample_buffer)[MAX_CHANNELS],
void *data,
uint8_t *ch_assign,
int8_t *output_shift,
uint8_t max_matrix_channel,
int is32)
{
unsigned int i, out_ch = 0;
int32_t *data_32 = data;
int16_t *data_16 = data;
for (i = 0; i < blockpos; i++) {
for (out_ch = 0; out_ch <= max_matrix_channel; out_ch++) {
int mat_ch = ch_assign[out_ch];
int32_t sample = sample_buffer[i][mat_ch]
<< output_shift[mat_ch];
lossless_check_data ^= (sample & 0xffffff) << mat_ch;
if (is32)
*data_32++ = sample << 8;
else
*data_16++ = sample >> 8;
}
}
return lossless_check_data;
}
| true | FFmpeg | 74dc728a2c2cc353da20cdc09b8cdfbbe14b7be8 |
Subsets and Splits