label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | QInt *qint_from_int(int64_t value) { QInt *qi; qi = g_malloc(sizeof(*qi)); qi->value = value; QOBJECT_INIT(qi, &qint_type); return qi; } | 3,045 |
0 | int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors) { Coroutine *co; DiscardCo rwco = { .bs = bs, .sector_num = sector_num, .nb_sectors = nb_sectors, .ret = NOT_DONE, }; if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_discard_co_entry(&rwco); } else { co = qemu_coroutine_create(bdrv_discard_co_entry); qemu_coroutine_enter(co, &rwco); while (rwco.ret == NOT_DONE) { qemu_aio_wait(); } } return rwco.ret; } | 3,046 |
0 | static int tusb6010_init(SysBusDevice *dev) { TUSBState *s = FROM_SYSBUS(TUSBState, dev); qemu_irq *musb_irqs; int i; s->otg_timer = qemu_new_timer_ns(vm_clock, tusb_otg_tick, s); s->pwr_timer = qemu_new_timer_ns(vm_clock, tusb_power_tick, s); memory_region_init_io(&s->iomem[1], &tusb_async_ops, s, "tusb-async", UINT32_MAX); sysbus_init_mmio_region(dev, &s->iomem[0]); sysbus_init_mmio_region(dev, &s->iomem[1]); sysbus_init_irq(dev, &s->irq); qdev_init_gpio_in(&dev->qdev, tusb6010_irq, musb_irq_max + 1); musb_irqs = g_new0(qemu_irq, musb_irq_max); for (i = 0; i < musb_irq_max; i++) { musb_irqs[i] = qdev_get_gpio_in(&dev->qdev, i + 1); } s->musb = musb_init(musb_irqs); return 0; } | 3,048 |
0 | void qpci_msix_disable(QPCIDevice *dev) { uint8_t addr; uint16_t val; g_assert(dev->msix_enabled); addr = qpci_find_capability(dev, PCI_CAP_ID_MSIX); g_assert_cmphex(addr, !=, 0); val = qpci_config_readw(dev, addr + PCI_MSIX_FLAGS); qpci_config_writew(dev, addr + PCI_MSIX_FLAGS, val & ~PCI_MSIX_FLAGS_ENABLE); qpci_iounmap(dev, dev->msix_table_bar); qpci_iounmap(dev, dev->msix_pba_bar); dev->msix_enabled = 0; dev->msix_table_off = 0; dev->msix_pba_off = 0; } | 3,049 |
0 | void vnc_display_open(const char *id, Error **errp) { VncDisplay *vs = vnc_display_find(id); QemuOpts *opts = qemu_opts_find(&qemu_vnc_opts, id); QemuOpts *sopts, *wsopts; const char *share, *device_id; QemuConsole *con; bool password = false; bool reverse = false; const char *vnc; const char *has_to; char *h; bool has_ipv4 = false; bool has_ipv6 = false; const char *websocket; bool tls = false, x509 = false; #ifdef CONFIG_VNC_TLS const char *path; #endif bool sasl = false; #ifdef CONFIG_VNC_SASL int saslErr; #endif #if defined(CONFIG_VNC_TLS) || defined(CONFIG_VNC_SASL) int acl = 0; #endif int lock_key_sync = 1; if (!vs) { error_setg(errp, "VNC display not active"); return; } vnc_display_close(vs); if (!opts) { return; } vnc = qemu_opt_get(opts, "vnc"); if (!vnc || strcmp(vnc, "none") == 0) { return; } sopts = qemu_opts_create(&socket_optslist, NULL, 0, &error_abort); wsopts = qemu_opts_create(&socket_optslist, NULL, 0, &error_abort); h = strrchr(vnc, ':'); if (h) { char *host = g_strndup(vnc, h - vnc); qemu_opt_set(sopts, "host", host, &error_abort); qemu_opt_set(wsopts, "host", host, &error_abort); qemu_opt_set(sopts, "port", h+1, &error_abort); g_free(host); } else { error_setg(errp, "no vnc port specified"); goto fail; } has_to = qemu_opt_get(opts, "to"); has_ipv4 = qemu_opt_get_bool(opts, "ipv4", false); has_ipv6 = qemu_opt_get_bool(opts, "ipv6", false); if (has_to) { qemu_opt_set(sopts, "to", has_to, &error_abort); qemu_opt_set(wsopts, "to", has_to, &error_abort); } if (has_ipv4) { qemu_opt_set(sopts, "ipv4", "on", &error_abort); qemu_opt_set(wsopts, "ipv4", "on", &error_abort); } if (has_ipv6) { qemu_opt_set(sopts, "ipv6", "on", &error_abort); qemu_opt_set(wsopts, "ipv6", "on", &error_abort); } password = qemu_opt_get_bool(opts, "password", false); if (password && fips_get_state()) { error_setg(errp, "VNC password auth disabled due to FIPS mode, " "consider using the VeNCrypt or SASL authentication " "methods as an alternative"); goto fail; } reverse = qemu_opt_get_bool(opts, "reverse", false); lock_key_sync = qemu_opt_get_bool(opts, "lock-key-sync", true); sasl = qemu_opt_get_bool(opts, "sasl", false); #ifndef CONFIG_VNC_SASL if (sasl) { error_setg(errp, "VNC SASL auth requires cyrus-sasl support"); goto fail; } #endif /* CONFIG_VNC_SASL */ tls = qemu_opt_get_bool(opts, "tls", false); #ifdef CONFIG_VNC_TLS path = qemu_opt_get(opts, "x509"); if (!path) { path = qemu_opt_get(opts, "x509verify"); if (path) { vs->tls.x509verify = true; } } if (path) { x509 = true; if (vnc_tls_set_x509_creds_dir(vs, path) < 0) { error_setg(errp, "Failed to find x509 certificates/keys in %s", path); goto fail; } } #else /* ! CONFIG_VNC_TLS */ if (tls) { error_setg(errp, "VNC TLS auth requires gnutls support"); goto fail; } #endif /* ! CONFIG_VNC_TLS */ #if defined(CONFIG_VNC_TLS) || defined(CONFIG_VNC_SASL) acl = qemu_opt_get_bool(opts, "acl", false); #endif share = qemu_opt_get(opts, "share"); if (share) { if (strcmp(share, "ignore") == 0) { vs->share_policy = VNC_SHARE_POLICY_IGNORE; } else if (strcmp(share, "allow-exclusive") == 0) { vs->share_policy = VNC_SHARE_POLICY_ALLOW_EXCLUSIVE; } else if (strcmp(share, "force-shared") == 0) { vs->share_policy = VNC_SHARE_POLICY_FORCE_SHARED; } else { error_setg(errp, "unknown vnc share= option"); goto fail; } } else { vs->share_policy = VNC_SHARE_POLICY_ALLOW_EXCLUSIVE; } vs->connections_limit = qemu_opt_get_number(opts, "connections", 32); websocket = qemu_opt_get(opts, "websocket"); if (websocket) { #ifdef CONFIG_VNC_WS vs->ws_enabled = true; qemu_opt_set(wsopts, "port", websocket, &error_abort); #else /* ! CONFIG_VNC_WS */ error_setg(errp, "Websockets protocol requires gnutls support"); goto fail; #endif /* ! CONFIG_VNC_WS */ } #ifdef CONFIG_VNC_JPEG vs->lossy = qemu_opt_get_bool(opts, "lossy", false); #endif vs->non_adaptive = qemu_opt_get_bool(opts, "non-adaptive", false); /* adaptive updates are only used with tight encoding and * if lossy updates are enabled so we can disable all the * calculations otherwise */ if (!vs->lossy) { vs->non_adaptive = true; } #ifdef CONFIG_VNC_TLS if (acl && x509 && vs->tls.x509verify) { char *aclname; if (strcmp(vs->id, "default") == 0) { aclname = g_strdup("vnc.x509dname"); } else { aclname = g_strdup_printf("vnc.%s.x509dname", vs->id); } vs->tls.acl = qemu_acl_init(aclname); if (!vs->tls.acl) { fprintf(stderr, "Failed to create x509 dname ACL\n"); exit(1); } g_free(aclname); } #endif #ifdef CONFIG_VNC_SASL if (acl && sasl) { char *aclname; if (strcmp(vs->id, "default") == 0) { aclname = g_strdup("vnc.username"); } else { aclname = g_strdup_printf("vnc.%s.username", vs->id); } vs->sasl.acl = qemu_acl_init(aclname); if (!vs->sasl.acl) { fprintf(stderr, "Failed to create username ACL\n"); exit(1); } g_free(aclname); } #endif vnc_display_setup_auth(vs, password, sasl, tls, x509); #ifdef CONFIG_VNC_SASL if ((saslErr = sasl_server_init(NULL, "qemu")) != SASL_OK) { error_setg(errp, "Failed to initialize SASL auth: %s", sasl_errstring(saslErr, NULL, NULL)); goto fail; } #endif vs->lock_key_sync = lock_key_sync; device_id = qemu_opt_get(opts, "display"); if (device_id) { DeviceState *dev; int head = qemu_opt_get_number(opts, "head", 0); dev = qdev_find_recursive(sysbus_get_default(), device_id); if (dev == NULL) { error_setg(errp, "Device '%s' not found", device_id); goto fail; } con = qemu_console_lookup_by_device(dev, head); if (con == NULL) { error_setg(errp, "Device %s is not bound to a QemuConsole", device_id); goto fail; } } else { con = NULL; } if (con != vs->dcl.con) { unregister_displaychangelistener(&vs->dcl); vs->dcl.con = con; register_displaychangelistener(&vs->dcl); } if (reverse) { /* connect to viewer */ int csock; vs->lsock = -1; #ifdef CONFIG_VNC_WS vs->lwebsock = -1; #endif if (strncmp(vnc, "unix:", 5) == 0) { csock = unix_connect(vnc+5, errp); } else { csock = inet_connect(vnc, errp); } if (csock < 0) { goto fail; } vnc_connect(vs, csock, false, false); } else { /* listen for connects */ if (strncmp(vnc, "unix:", 5) == 0) { vs->lsock = unix_listen(vnc+5, NULL, 0, errp); vs->is_unix = true; } else { vs->lsock = inet_listen_opts(sopts, 5900, errp); if (vs->lsock < 0) { goto fail; } #ifdef CONFIG_VNC_WS if (vs->ws_enabled) { vs->lwebsock = inet_listen_opts(wsopts, 0, errp); if (vs->lwebsock < 0) { if (vs->lsock != -1) { close(vs->lsock); vs->lsock = -1; } goto fail; } } #endif /* CONFIG_VNC_WS */ } vs->enabled = true; qemu_set_fd_handler2(vs->lsock, NULL, vnc_listen_regular_read, NULL, vs); #ifdef CONFIG_VNC_WS if (vs->ws_enabled) { qemu_set_fd_handler2(vs->lwebsock, NULL, vnc_listen_websocket_read, NULL, vs); } #endif /* CONFIG_VNC_WS */ } qemu_opts_del(sopts); qemu_opts_del(wsopts); return; fail: qemu_opts_del(sopts); qemu_opts_del(wsopts); vs->enabled = false; #ifdef CONFIG_VNC_WS vs->ws_enabled = false; #endif /* CONFIG_VNC_WS */ } | 3,050 |
0 | static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) { int ret; BDRVQcow2State *s = bs->opaque; if (!QEMU_IS_ALIGNED(offset | count, s->cluster_size)) { assert(count < s->cluster_size); /* Ignore partial clusters, except for the special case of the * complete partial cluster at the end of an unaligned file */ if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || offset + count != bs->total_sectors * BDRV_SECTOR_SIZE) { return -ENOTSUP; } } qemu_co_mutex_lock(&s->lock); ret = qcow2_discard_clusters(bs, offset, count >> BDRV_SECTOR_BITS, QCOW2_DISCARD_REQUEST, false); qemu_co_mutex_unlock(&s->lock); return ret; } | 3,051 |
0 | static int nbd_handle_list(NBDClient *client, uint32_t length) { int csock; NBDExport *exp; csock = client->sock; if (length) { if (drop_sync(csock, length) != length) { return -EIO; } return nbd_send_rep(csock, NBD_REP_ERR_INVALID, NBD_OPT_LIST); } /* For each export, send a NBD_REP_SERVER reply. */ QTAILQ_FOREACH(exp, &exports, next) { if (nbd_send_rep_list(csock, exp)) { return -EINVAL; } } /* Finish with a NBD_REP_ACK. */ return nbd_send_rep(csock, NBD_REP_ACK, NBD_OPT_LIST); } | 3,052 |
0 | static void acpi_build_update(void *build_opaque, uint32_t offset) { AcpiBuildState *build_state = build_opaque; AcpiBuildTables tables; /* No state to update or already patched? Nothing to do. */ if (!build_state || build_state->patched) { return; } build_state->patched = 1; acpi_build_tables_init(&tables); acpi_build(build_state->guest_info, &tables); assert(acpi_data_len(tables.table_data) == build_state->table_size); /* Make sure RAM size is correct - in case it got changed by migration */ qemu_ram_resize(build_state->table_ram, build_state->table_size, &error_abort); memcpy(qemu_get_ram_ptr(build_state->table_ram), tables.table_data->data, build_state->table_size); memcpy(build_state->rsdp, tables.rsdp->data, acpi_data_len(tables.rsdp)); memcpy(qemu_get_ram_ptr(build_state->linker_ram), tables.linker->data, build_state->linker_size); cpu_physical_memory_set_dirty_range_nocode(build_state->table_ram, build_state->table_size); acpi_build_tables_cleanup(&tables, true); } | 3,053 |
0 | static void disas_cond_select(DisasContext *s, uint32_t insn) { unsigned int sf, else_inv, rm, cond, else_inc, rn, rd; TCGv_i64 tcg_rd, tcg_src; if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) { /* S == 1 or op2<1> == 1 */ unallocated_encoding(s); return; } sf = extract32(insn, 31, 1); else_inv = extract32(insn, 30, 1); rm = extract32(insn, 16, 5); cond = extract32(insn, 12, 4); else_inc = extract32(insn, 10, 1); rn = extract32(insn, 5, 5); rd = extract32(insn, 0, 5); if (rd == 31) { /* silly no-op write; until we use movcond we must special-case * this to avoid a dead temporary across basic blocks. */ return; } tcg_rd = cpu_reg(s, rd); if (cond >= 0x0e) { /* condition "always" */ tcg_src = read_cpu_reg(s, rn, sf); tcg_gen_mov_i64(tcg_rd, tcg_src); } else { /* OPTME: we could use movcond here, at the cost of duplicating * a lot of the arm_gen_test_cc() logic. */ int label_match = gen_new_label(); int label_continue = gen_new_label(); arm_gen_test_cc(cond, label_match); /* nomatch: */ tcg_src = cpu_reg(s, rm); if (else_inv && else_inc) { tcg_gen_neg_i64(tcg_rd, tcg_src); } else if (else_inv) { tcg_gen_not_i64(tcg_rd, tcg_src); } else if (else_inc) { tcg_gen_addi_i64(tcg_rd, tcg_src, 1); } else { tcg_gen_mov_i64(tcg_rd, tcg_src); } if (!sf) { tcg_gen_ext32u_i64(tcg_rd, tcg_rd); } tcg_gen_br(label_continue); /* match: */ gen_set_label(label_match); tcg_src = read_cpu_reg(s, rn, sf); tcg_gen_mov_i64(tcg_rd, tcg_src); /* continue: */ gen_set_label(label_continue); } } | 3,055 |
0 | static FWCfgState *bochs_bios_init(AddressSpace *as, PCMachineState *pcms) { FWCfgState *fw_cfg; uint64_t *numa_fw_cfg; int i; const CPUArchIdList *cpus; MachineClass *mc = MACHINE_GET_CLASS(pcms); fw_cfg = fw_cfg_init_io_dma(FW_CFG_IO_BASE, FW_CFG_IO_BASE + 4, as); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus); /* FW_CFG_MAX_CPUS is a bit confusing/problematic on x86: * * For machine types prior to 1.8, SeaBIOS needs FW_CFG_MAX_CPUS for * building MPTable, ACPI MADT, ACPI CPU hotplug and ACPI SRAT table, * that tables are based on xAPIC ID and QEMU<->SeaBIOS interface * for CPU hotplug also uses APIC ID and not "CPU index". * This means that FW_CFG_MAX_CPUS is not the "maximum number of CPUs", * but the "limit to the APIC ID values SeaBIOS may see". * * So for compatibility reasons with old BIOSes we are stuck with * "etc/max-cpus" actually being apic_id_limit */ fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)pcms->apic_id_limit); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES, acpi_tables, acpi_tables_len); fw_cfg_add_i32(fw_cfg, FW_CFG_IRQ0_OVERRIDE, kvm_allows_irq0_override()); fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE, &e820_reserve, sizeof(e820_reserve)); fw_cfg_add_file(fw_cfg, "etc/e820", e820_table, sizeof(struct e820_entry) * e820_entries); fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_cfg, sizeof(hpet_cfg)); /* allocate memory for the NUMA channel: one (64bit) word for the number * of nodes, one word for each VCPU->node and one word for each node to * hold the amount of memory. */ numa_fw_cfg = g_new0(uint64_t, 1 + pcms->apic_id_limit + nb_numa_nodes); numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes); cpus = mc->possible_cpu_arch_ids(MACHINE(pcms)); for (i = 0; i < cpus->len; i++) { unsigned int apic_id = cpus->cpus[i].arch_id; assert(apic_id < pcms->apic_id_limit); if (cpus->cpus[i].props.has_node_id) { numa_fw_cfg[apic_id + 1] = cpu_to_le64(cpus->cpus[i].props.node_id); } } for (i = 0; i < nb_numa_nodes; i++) { numa_fw_cfg[pcms->apic_id_limit + 1 + i] = cpu_to_le64(numa_info[i].node_mem); } fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg, (1 + pcms->apic_id_limit + nb_numa_nodes) * sizeof(*numa_fw_cfg)); return fw_cfg; } | 3,056 |
0 | static void init_timers(void) { init_get_clock(); rt_clock = qemu_new_clock(QEMU_TIMER_REALTIME); vm_clock = qemu_new_clock(QEMU_TIMER_VIRTUAL); } | 3,057 |
0 | void qemu_spice_destroy_primary_surface(SimpleSpiceDisplay *ssd, uint32_t id, qxl_async_io async) { if (async != QXL_SYNC) { #if SPICE_INTERFACE_QXL_MINOR >= 1 spice_qxl_destroy_primary_surface_async(&ssd->qxl, id, 0); #else abort(); #endif } else { ssd->worker->destroy_primary_surface(ssd->worker, id); } } | 3,058 |
0 | int virtio_blk_handle_scsi_req(VirtIOBlock *blk, VirtQueueElement *elem) { int status = VIRTIO_BLK_S_OK; struct virtio_scsi_inhdr *scsi = NULL; VirtIODevice *vdev = VIRTIO_DEVICE(blk); #ifdef __linux__ int i; struct sg_io_hdr hdr; #endif /* * We require at least one output segment each for the virtio_blk_outhdr * and the SCSI command block. * * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr * and the sense buffer pointer in the input segments. */ if (elem->out_num < 2 || elem->in_num < 3) { status = VIRTIO_BLK_S_IOERR; goto fail; } /* * The scsi inhdr is placed in the second-to-last input segment, just * before the regular inhdr. */ scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base; if (!blk->conf.scsi) { status = VIRTIO_BLK_S_UNSUPP; goto fail; } /* * No support for bidirection commands yet. */ if (elem->out_num > 2 && elem->in_num > 3) { status = VIRTIO_BLK_S_UNSUPP; goto fail; } #ifdef __linux__ memset(&hdr, 0, sizeof(struct sg_io_hdr)); hdr.interface_id = 'S'; hdr.cmd_len = elem->out_sg[1].iov_len; hdr.cmdp = elem->out_sg[1].iov_base; hdr.dxfer_len = 0; if (elem->out_num > 2) { /* * If there are more than the minimally required 2 output segments * there is write payload starting from the third iovec. */ hdr.dxfer_direction = SG_DXFER_TO_DEV; hdr.iovec_count = elem->out_num - 2; for (i = 0; i < hdr.iovec_count; i++) hdr.dxfer_len += elem->out_sg[i + 2].iov_len; hdr.dxferp = elem->out_sg + 2; } else if (elem->in_num > 3) { /* * If we have more than 3 input segments the guest wants to actually * read data. */ hdr.dxfer_direction = SG_DXFER_FROM_DEV; hdr.iovec_count = elem->in_num - 3; for (i = 0; i < hdr.iovec_count; i++) hdr.dxfer_len += elem->in_sg[i].iov_len; hdr.dxferp = elem->in_sg; } else { /* * Some SCSI commands don't actually transfer any data. */ hdr.dxfer_direction = SG_DXFER_NONE; } hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base; hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len; status = bdrv_ioctl(blk->bs, SG_IO, &hdr); if (status) { status = VIRTIO_BLK_S_UNSUPP; goto fail; } /* * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) * clear the masked_status field [hence status gets cleared too, see * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED * status has occurred. However they do set DRIVER_SENSE in driver_status * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. */ if (hdr.status == 0 && hdr.sb_len_wr > 0) { hdr.status = CHECK_CONDITION; } virtio_stl_p(vdev, &scsi->errors, hdr.status | (hdr.msg_status << 8) | (hdr.host_status << 16) | (hdr.driver_status << 24)); virtio_stl_p(vdev, &scsi->residual, hdr.resid); virtio_stl_p(vdev, &scsi->sense_len, hdr.sb_len_wr); virtio_stl_p(vdev, &scsi->data_len, hdr.dxfer_len); return status; #else abort(); #endif fail: /* Just put anything nonzero so that the ioctl fails in the guest. */ if (scsi) { virtio_stl_p(vdev, &scsi->errors, 255); } return status; } | 3,060 |
0 | static void test_visitor_out_null(TestOutputVisitorData *data, const void *unused) { QObject *arg; QDict *qdict; QObject *nil; visit_start_struct(data->ov, NULL, NULL, 0, &error_abort); visit_type_null(data->ov, "a", &error_abort); visit_check_struct(data->ov, &error_abort); visit_end_struct(data->ov, NULL); arg = visitor_get(data); g_assert(qobject_type(arg) == QTYPE_QDICT); qdict = qobject_to_qdict(arg); g_assert_cmpint(qdict_size(qdict), ==, 1); nil = qdict_get(qdict, "a"); g_assert(nil); g_assert(qobject_type(nil) == QTYPE_QNULL); } | 3,062 |
0 | struct vhost_net *vhost_net_init(VhostNetOptions *options) { int r; bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL; struct vhost_net *net = g_malloc(sizeof *net); if (!options->net_backend) { fprintf(stderr, "vhost-net requires net backend to be setup\n"); goto fail; } if (backend_kernel) { r = vhost_net_get_fd(options->net_backend); if (r < 0) { goto fail; } net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend) ? 0 : (1 << VHOST_NET_F_VIRTIO_NET_HDR); net->backend = r; } else { net->dev.backend_features = 0; net->backend = -1; } net->nc = options->net_backend; net->dev.nvqs = 2; net->dev.vqs = net->vqs; r = vhost_dev_init(&net->dev, options->opaque, options->backend_type, options->force); if (r < 0) { goto fail; } if (!qemu_has_vnet_hdr_len(options->net_backend, sizeof(struct virtio_net_hdr_mrg_rxbuf))) { net->dev.features &= ~(1 << VIRTIO_NET_F_MRG_RXBUF); } if (backend_kernel) { if (~net->dev.features & net->dev.backend_features) { fprintf(stderr, "vhost lacks feature mask %" PRIu64 " for backend\n", (uint64_t)(~net->dev.features & net->dev.backend_features)); vhost_dev_cleanup(&net->dev); goto fail; } } /* Set sane init value. Override when guest acks. */ vhost_net_ack_features(net, 0); return net; fail: g_free(net); return NULL; } | 3,063 |
0 | static void lsi_do_dma(LSIState *s, int out) { uint32_t count; target_phys_addr_t addr; if (!s->current_dma_len) { /* Wait until data is available. */ DPRINTF("DMA no data available\n"); return; } count = s->dbc; if (count > s->current_dma_len) count = s->current_dma_len; addr = s->dnad; if (lsi_dma_40bit(s)) addr |= ((uint64_t)s->dnad64 << 32); else if (s->sbms) addr |= ((uint64_t)s->sbms << 32); DPRINTF("DMA addr=0x" TARGET_FMT_plx " len=%d\n", addr, count); s->csbc += count; s->dnad += count; s->dbc -= count; if (s->dma_buf == NULL) { s->dma_buf = s->current_dev->get_buf(s->current_dev, s->current_tag); } /* ??? Set SFBR to first data byte. */ if (out) { cpu_physical_memory_read(addr, s->dma_buf, count); } else { cpu_physical_memory_write(addr, s->dma_buf, count); } s->current_dma_len -= count; if (s->current_dma_len == 0) { s->dma_buf = NULL; if (out) { /* Write the data. */ s->current_dev->write_data(s->current_dev, s->current_tag); } else { /* Request any remaining data. */ s->current_dev->read_data(s->current_dev, s->current_tag); } } else { s->dma_buf += count; lsi_resume_script(s); } } | 3,064 |
0 | static inline uint64_t ucf64_dtoi(float64 d) { union { uint64_t i; float64 d; } v; v.d = d; return v.i; } | 3,066 |
0 | static ssize_t handle_aiocb_ioctl(RawPosixAIOData *aiocb) { int ret; ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_ioctl_buf); if (ret == -1) { return -errno; } /* * This looks weird, but the aio code only considers a request * successful if it has written the full number of bytes. * * Now we overload aio_nbytes as aio_ioctl_cmd for the ioctl command, * so in fact we return the ioctl command here to make posix_aio_read() * happy.. */ return aiocb->aio_nbytes; } | 3,067 |
0 | static int dump_init(DumpState *s, int fd, bool has_format, DumpGuestMemoryFormat format, bool paging, bool has_filter, int64_t begin, int64_t length, Error **errp) { CPUState *cpu; int nr_cpus; Error *err = NULL; int ret; /* kdump-compressed is conflict with paging and filter */ if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { assert(!paging && !has_filter); } if (runstate_is_running()) { vm_stop(RUN_STATE_SAVE_VM); s->resume = true; } else { s->resume = false; } /* If we use KVM, we should synchronize the registers before we get dump * info or physmap info. */ cpu_synchronize_all_states(); nr_cpus = 0; CPU_FOREACH(cpu) { nr_cpus++; } s->fd = fd; s->has_filter = has_filter; s->begin = begin; s->length = length; guest_phys_blocks_init(&s->guest_phys_blocks); guest_phys_blocks_append(&s->guest_phys_blocks); s->start = get_start_block(s); if (s->start == -1) { error_set(errp, QERR_INVALID_PARAMETER, "begin"); goto cleanup; } /* get dump info: endian, class and architecture. * If the target architecture is not supported, cpu_get_dump_info() will * return -1. */ ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks); if (ret < 0) { error_set(errp, QERR_UNSUPPORTED); goto cleanup; } s->note_size = cpu_get_note_size(s->dump_info.d_class, s->dump_info.d_machine, nr_cpus); if (s->note_size < 0) { error_set(errp, QERR_UNSUPPORTED); goto cleanup; } /* get memory mapping */ memory_mapping_list_init(&s->list); if (paging) { qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err); if (err != NULL) { error_propagate(errp, err); goto cleanup; } } else { qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks); } s->nr_cpus = nr_cpus; s->page_size = TARGET_PAGE_SIZE; get_max_mapnr(s); uint64_t tmp; tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), s->page_size); s->len_dump_bitmap = tmp * s->page_size; /* init for kdump-compressed format */ if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { switch (format) { case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB: s->flag_compress = DUMP_DH_COMPRESSED_ZLIB; break; case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO: s->flag_compress = DUMP_DH_COMPRESSED_LZO; break; case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY: s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY; break; default: s->flag_compress = 0; } return 0; } if (s->has_filter) { memory_mapping_filter(&s->list, s->begin, s->length); } /* * calculate phdr_num * * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow */ s->phdr_num = 1; /* PT_NOTE */ if (s->list.num < UINT16_MAX - 2) { s->phdr_num += s->list.num; s->have_section = false; } else { s->have_section = true; s->phdr_num = PN_XNUM; s->sh_info = 1; /* PT_NOTE */ /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */ if (s->list.num <= UINT32_MAX - 1) { s->sh_info += s->list.num; } else { s->sh_info = UINT32_MAX; } } if (s->dump_info.d_class == ELFCLASS64) { if (s->have_section) { s->memory_offset = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info + sizeof(Elf64_Shdr) + s->note_size; } else { s->memory_offset = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->phdr_num + s->note_size; } } else { if (s->have_section) { s->memory_offset = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info + sizeof(Elf32_Shdr) + s->note_size; } else { s->memory_offset = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->phdr_num + s->note_size; } } return 0; cleanup: guest_phys_blocks_free(&s->guest_phys_blocks); if (s->resume) { vm_start(); } return -1; } | 3,068 |
1 | static void test_wait_event_notifier_noflush(void) { EventNotifierTestData data = { .n = 0 }; EventNotifierTestData dummy = { .n = 0, .active = 1 }; event_notifier_init(&data.e, false); aio_set_event_notifier(ctx, &data.e, event_ready_cb, NULL); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); /* Until there is an active descriptor, aio_poll may or may not call * event_ready_cb. Still, it must not block. */ event_notifier_set(&data.e); g_assert(!aio_poll(ctx, true)); data.n = 0; /* An active event notifier forces aio_poll to look at EventNotifiers. */ event_notifier_init(&dummy.e, false); aio_set_event_notifier(ctx, &dummy.e, event_ready_cb, event_active_cb); event_notifier_set(&data.e); g_assert(aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 1); g_assert(aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 1); event_notifier_set(&data.e); g_assert(aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); g_assert(aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); event_notifier_set(&dummy.e); wait_for_aio(); g_assert_cmpint(data.n, ==, 2); g_assert_cmpint(dummy.n, ==, 1); g_assert_cmpint(dummy.active, ==, 0); aio_set_event_notifier(ctx, &dummy.e, NULL, NULL); event_notifier_cleanup(&dummy.e); aio_set_event_notifier(ctx, &data.e, NULL, NULL); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 2); event_notifier_cleanup(&data.e); } | 3,069 |
1 | static void celt_search_for_intensity(OpusPsyContext *s, CeltFrame *f) { int i, best_band = CELT_MAX_BANDS - 1; float dist, best_dist = FLT_MAX; /* TODO: fix, make some heuristic up here using the lambda value */ float end_band = 0; for (i = f->end_band; i >= end_band; i--) { f->intensity_stereo = i; bands_dist(s, f, &dist); if (best_dist > dist) { best_dist = dist; best_band = i; } } f->intensity_stereo = best_band; s->avg_is_band = (s->avg_is_band + f->intensity_stereo)/2.0f; } | 3,072 |
1 | unsigned int qemu_get_be32(QEMUFile *f) { unsigned int v; v = qemu_get_byte(f) << 24; v |= qemu_get_byte(f) << 16; v |= qemu_get_byte(f) << 8; v |= qemu_get_byte(f); return v; } | 3,073 |
1 | static int usage(int ret) { fprintf(stderr, "dump (up to maxpkts) AVPackets as they are demuxed by libavformat.\n"); fprintf(stderr, "each packet is dumped in its own file named like `basename file.ext`_$PKTNUM_$STREAMINDEX_$STAMP_$SIZE_$FLAGS.bin\n"); fprintf(stderr, "pktdumper file [maxpkts]\n"); return ret; } | 3,074 |
1 | static void ide_dma_cb(void *opaque, int ret) { IDEState *s = opaque; int n; int64_t sector_num; bool stay_active = false; if (ret == -ECANCELED) { return; } if (ret < 0) { int op = IDE_RETRY_DMA; if (s->dma_cmd == IDE_DMA_READ) op |= IDE_RETRY_READ; else if (s->dma_cmd == IDE_DMA_TRIM) op |= IDE_RETRY_TRIM; if (ide_handle_rw_error(s, -ret, op)) { return; } } n = s->io_buffer_size >> 9; if (n > s->nsector) { /* The PRDs were longer than needed for this request. Shorten them so * we don't get a negative remainder. The Active bit must remain set * after the request completes. */ n = s->nsector; stay_active = true; } sector_num = ide_get_sector(s); if (n > 0) { assert(s->io_buffer_size == s->sg.size); dma_buf_commit(s, s->io_buffer_size); sector_num += n; ide_set_sector(s, sector_num); s->nsector -= n; } /* end of transfer ? */ if (s->nsector == 0) { s->status = READY_STAT | SEEK_STAT; ide_set_irq(s->bus); goto eot; } /* launch next transfer */ n = s->nsector; s->io_buffer_index = 0; s->io_buffer_size = n * 512; if (s->bus->dma->ops->prepare_buf(s->bus->dma, ide_cmd_is_read(s)) < 512) { /* The PRDs were too short. Reset the Active bit, but don't raise an * interrupt. */ s->status = READY_STAT | SEEK_STAT; dma_buf_commit(s, 0); goto eot; } #ifdef DEBUG_AIO printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n", sector_num, n, s->dma_cmd); #endif if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) && !ide_sect_range_ok(s, sector_num, n)) { ide_dma_error(s); return; } switch (s->dma_cmd) { case IDE_DMA_READ: s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num, ide_dma_cb, s); break; case IDE_DMA_WRITE: s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num, ide_dma_cb, s); break; case IDE_DMA_TRIM: s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num, ide_issue_trim, ide_dma_cb, s, DMA_DIRECTION_TO_DEVICE); break; } return; eot: if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { block_acct_done(blk_get_stats(s->blk), &s->acct); } ide_set_inactive(s, stay_active); } | 3,075 |
1 | static av_noinline void emulated_edge_mc_sse(uint8_t *buf, ptrdiff_t buf_stride, const uint8_t *src, ptrdiff_t src_stride, int block_w, int block_h, int src_x, int src_y, int w, int h) { emulated_edge_mc(buf, buf_stride, src, src_stride, block_w, block_h, src_x, src_y, w, h, vfixtbl_sse, &ff_emu_edge_vvar_sse, hfixtbl_sse, #if ARCH_X86_64 &ff_emu_edge_hvar_sse #else &ff_emu_edge_hvar_mmx #endif ); } | 3,077 |
0 | static int yuv4_read_header(AVFormatContext *s) { char header[MAX_YUV4_HEADER + 10]; // Include headroom for // the longest option char *tokstart, *tokend, *header_end; int i; AVIOContext *pb = s->pb; int width = -1, height = -1, raten = 0, rated = 0, aspectn = 0, aspectd = 0; enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE, alt_pix_fmt = AV_PIX_FMT_NONE; enum AVChromaLocation chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED; AVStream *st; enum AVFieldOrder field_order; for (i = 0; i < MAX_YUV4_HEADER; i++) { header[i] = avio_r8(pb); if (header[i] == '\n') { header[i + 1] = 0x20; // Add a space after last option. // Makes parsing "444" vs "444alpha" easier. header[i + 2] = 0; break; } } if (i == MAX_YUV4_HEADER) return -1; if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC))) return -1; header_end = &header[i + 1]; // Include space for (tokstart = &header[strlen(Y4M_MAGIC) + 1]; tokstart < header_end; tokstart++) { if (*tokstart == 0x20) continue; switch (*tokstart++) { case 'W': // Width. Required. width = strtol(tokstart, &tokend, 10); tokstart = tokend; break; case 'H': // Height. Required. height = strtol(tokstart, &tokend, 10); tokstart = tokend; break; case 'C': // Color space if (strncmp("420jpeg", tokstart, 7) == 0) { pix_fmt = AV_PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_CENTER; } else if (strncmp("420mpeg2", tokstart, 8) == 0) { pix_fmt = AV_PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_LEFT; } else if (strncmp("420paldv", tokstart, 8) == 0) { pix_fmt = AV_PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_TOPLEFT; } else if (strncmp("420", tokstart, 3) == 0) { pix_fmt = AV_PIX_FMT_YUV420P; chroma_sample_location = AVCHROMA_LOC_CENTER; } else if (strncmp("411", tokstart, 3) == 0) pix_fmt = AV_PIX_FMT_YUV411P; else if (strncmp("422", tokstart, 3) == 0) pix_fmt = AV_PIX_FMT_YUV422P; else if (strncmp("444alpha", tokstart, 8) == 0 ) { av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 " "YUV4MPEG stream.\n"); return -1; } else if (strncmp("444", tokstart, 3) == 0) pix_fmt = AV_PIX_FMT_YUV444P; else if (strncmp("mono", tokstart, 4) == 0) { pix_fmt = AV_PIX_FMT_GRAY8; } else { av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown " "pixel format.\n"); return -1; } while (tokstart < header_end && *tokstart != 0x20) tokstart++; break; case 'I': // Interlace type switch (*tokstart++){ case '?': field_order = AV_FIELD_UNKNOWN; break; case 'p': field_order = AV_FIELD_PROGRESSIVE; break; case 't': field_order = AV_FIELD_TT; break; case 'b': field_order = AV_FIELD_BB; break; case 'm': av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed " "interlaced and non-interlaced frames.\n"); return -1; default: av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n"); return -1; } break; case 'F': // Frame rate sscanf(tokstart, "%d:%d", &raten, &rated); // 0:0 if unknown while (tokstart < header_end && *tokstart != 0x20) tokstart++; break; case 'A': // Pixel aspect sscanf(tokstart, "%d:%d", &aspectn, &aspectd); // 0:0 if unknown while (tokstart < header_end && *tokstart != 0x20) tokstart++; break; case 'X': // Vendor extensions if (strncmp("YSCSS=", tokstart, 6) == 0) { // Older nonstandard pixel format representation tokstart += 6; if (strncmp("420JPEG", tokstart, 7) == 0) alt_pix_fmt = AV_PIX_FMT_YUV420P; else if (strncmp("420MPEG2", tokstart, 8) == 0) alt_pix_fmt = AV_PIX_FMT_YUV420P; else if (strncmp("420PALDV", tokstart, 8) == 0) alt_pix_fmt = AV_PIX_FMT_YUV420P; else if (strncmp("411", tokstart, 3) == 0) alt_pix_fmt = AV_PIX_FMT_YUV411P; else if (strncmp("422", tokstart, 3) == 0) alt_pix_fmt = AV_PIX_FMT_YUV422P; else if (strncmp("444", tokstart, 3) == 0) alt_pix_fmt = AV_PIX_FMT_YUV444P; } while (tokstart < header_end && *tokstart != 0x20) tokstart++; break; } } if (width == -1 || height == -1) { av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n"); return -1; } if (pix_fmt == AV_PIX_FMT_NONE) { if (alt_pix_fmt == AV_PIX_FMT_NONE) pix_fmt = AV_PIX_FMT_YUV420P; else pix_fmt = alt_pix_fmt; } if (raten <= 0 || rated <= 0) { // Frame rate unknown raten = 25; rated = 1; } if (aspectn == 0 && aspectd == 0) { // Pixel aspect unknown aspectd = 1; } st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->width = width; st->codec->height = height; av_reduce(&raten, &rated, raten, rated, (1UL << 31) - 1); avpriv_set_pts_info(st, 64, rated, raten); st->avg_frame_rate = av_inv_q(st->time_base); st->codec->pix_fmt = pix_fmt; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_RAWVIDEO; st->sample_aspect_ratio = (AVRational){ aspectn, aspectd }; st->codec->chroma_sample_location = chroma_sample_location; st->codec->field_order = field_order; return 0; } | 3,078 |
0 | static int bmp_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { BMPContext *s = avctx->priv_data; AVFrame *picture = data; AVFrame *p = &s->picture; unsigned int fsize, hsize; int width, height; unsigned int depth; BiCompression comp; unsigned int ihsize; int i, j, n, linesize; uint32_t rgb[3]; uint8_t *ptr; int dsize; const uint8_t *buf0 = buf; if(buf_size < 14){ av_log(avctx, AV_LOG_ERROR, "buf size too small (%d)\n", buf_size); return -1; } if(bytestream_get_byte(&buf) != 'B' || bytestream_get_byte(&buf) != 'M') { av_log(avctx, AV_LOG_ERROR, "bad magic number\n"); return -1; } fsize = bytestream_get_le32(&buf); if(buf_size < fsize){ av_log(avctx, AV_LOG_ERROR, "not enough data (%d < %d)\n", buf_size, fsize); return -1; } buf += 2; /* reserved1 */ buf += 2; /* reserved2 */ hsize = bytestream_get_le32(&buf); /* header size */ if(fsize <= hsize){ av_log(avctx, AV_LOG_ERROR, "declared file size is less than header size (%d < %d)\n", fsize, hsize); return -1; } ihsize = bytestream_get_le32(&buf); /* more header size */ if(ihsize + 14 > hsize){ av_log(avctx, AV_LOG_ERROR, "invalid header size %d\n", hsize); return -1; } switch(ihsize){ case 40: // windib v3 case 64: // OS/2 v2 case 108: // windib v4 case 124: // windib v5 width = bytestream_get_le32(&buf); height = bytestream_get_le32(&buf); break; case 12: // OS/2 v1 width = bytestream_get_le16(&buf); height = bytestream_get_le16(&buf); break; default: av_log(avctx, AV_LOG_ERROR, "unsupported BMP file, patch welcome\n"); return -1; } if(bytestream_get_le16(&buf) != 1){ /* planes */ av_log(avctx, AV_LOG_ERROR, "invalid BMP header\n"); return -1; } depth = bytestream_get_le16(&buf); if(ihsize == 40) comp = bytestream_get_le32(&buf); else comp = BMP_RGB; if(comp != BMP_RGB && comp != BMP_BITFIELDS && comp != BMP_RLE4 && comp != BMP_RLE8){ av_log(avctx, AV_LOG_ERROR, "BMP coding %d not supported\n", comp); return -1; } if(comp == BMP_BITFIELDS){ buf += 20; rgb[0] = bytestream_get_le32(&buf); rgb[1] = bytestream_get_le32(&buf); rgb[2] = bytestream_get_le32(&buf); } avctx->width = width; avctx->height = height > 0? height: -height; avctx->pix_fmt = PIX_FMT_NONE; switch(depth){ case 32: if(comp == BMP_BITFIELDS){ rgb[0] = (rgb[0] >> 15) & 3; rgb[1] = (rgb[1] >> 15) & 3; rgb[2] = (rgb[2] >> 15) & 3; if(rgb[0] + rgb[1] + rgb[2] != 3 || rgb[0] == rgb[1] || rgb[0] == rgb[2] || rgb[1] == rgb[2]){ break; } } else { rgb[0] = 2; rgb[1] = 1; rgb[2] = 0; } avctx->pix_fmt = PIX_FMT_BGR24; break; case 24: avctx->pix_fmt = PIX_FMT_BGR24; break; case 16: if(comp == BMP_RGB) avctx->pix_fmt = PIX_FMT_RGB555; if(comp == BMP_BITFIELDS) avctx->pix_fmt = rgb[1] == 0x07E0 ? PIX_FMT_RGB565 : PIX_FMT_RGB555; break; case 8: if(hsize - ihsize - 14 > 0) avctx->pix_fmt = PIX_FMT_PAL8; else avctx->pix_fmt = PIX_FMT_GRAY8; break; case 4: if(hsize - ihsize - 14 > 0){ avctx->pix_fmt = PIX_FMT_PAL8; }else{ av_log(avctx, AV_LOG_ERROR, "Unknown palette for 16-colour BMP\n"); return -1; } break; case 1: avctx->pix_fmt = PIX_FMT_MONOBLACK; break; default: av_log(avctx, AV_LOG_ERROR, "depth %d not supported\n", depth); return -1; } if(avctx->pix_fmt == PIX_FMT_NONE){ av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n"); return -1; } if(p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; if(avctx->get_buffer(avctx, p) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } p->pict_type = FF_I_TYPE; p->key_frame = 1; buf = buf0 + hsize; dsize = buf_size - hsize; /* Line size in file multiple of 4 */ n = ((avctx->width * depth) / 8 + 3) & ~3; if(n * avctx->height > dsize && comp != BMP_RLE4 && comp != BMP_RLE8){ av_log(avctx, AV_LOG_ERROR, "not enough data (%d < %d)\n", dsize, n * avctx->height); return -1; } // RLE may skip decoding some picture areas, so blank picture before decoding if(comp == BMP_RLE4 || comp == BMP_RLE8) memset(p->data[0], 0, avctx->height * p->linesize[0]); if(depth == 4 || depth == 8) memset(p->data[1], 0, 1024); if(height > 0){ ptr = p->data[0] + (avctx->height - 1) * p->linesize[0]; linesize = -p->linesize[0]; } else { ptr = p->data[0]; linesize = p->linesize[0]; } if(avctx->pix_fmt == PIX_FMT_PAL8){ buf = buf0 + 14 + ihsize; //palette location if((hsize-ihsize-14)>>depth < 4){ // OS/2 bitmap, 3 bytes per palette entry for(i = 0; i < (1 << depth); i++) ((uint32_t*)p->data[1])[i] = bytestream_get_le24(&buf); }else{ for(i = 0; i < (1 << depth); i++) ((uint32_t*)p->data[1])[i] = bytestream_get_le32(&buf); } buf = buf0 + hsize; } if(comp == BMP_RLE4 || comp == BMP_RLE8){ ff_msrle_decode(avctx, p, depth, buf, dsize); }else{ switch(depth){ case 1: for(i = 0; i < avctx->height; i++){ memcpy(ptr, buf, n); buf += n; ptr += linesize; } break; case 4: for(i = 0; i < avctx->height; i++){ int j; for(j = 0; j < n; j++){ ptr[j*2+0] = (buf[j] >> 4) & 0xF; ptr[j*2+1] = buf[j] & 0xF; } buf += n; ptr += linesize; } break; case 8: for(i = 0; i < avctx->height; i++){ memcpy(ptr, buf, avctx->width); buf += n; ptr += linesize; } break; case 24: for(i = 0; i < avctx->height; i++){ memcpy(ptr, buf, avctx->width*(depth>>3)); buf += n; ptr += linesize; } break; case 16: for(i = 0; i < avctx->height; i++){ const uint16_t *src = (const uint16_t *) buf; uint16_t *dst = (uint16_t *) ptr; for(j = 0; j < avctx->width; j++) *dst++ = le2me_16(*src++); buf += n; ptr += linesize; } break; case 32: for(i = 0; i < avctx->height; i++){ const uint8_t *src = buf; uint8_t *dst = ptr; for(j = 0; j < avctx->width; j++){ dst[0] = src[rgb[2]]; dst[1] = src[rgb[1]]; dst[2] = src[rgb[0]]; dst += 3; src += 4; } buf += n; ptr += linesize; } break; default: av_log(avctx, AV_LOG_ERROR, "BMP decoder is broken\n"); return -1; } } *picture = s->picture; *data_size = sizeof(AVPicture); return buf_size; } | 3,079 |
1 | static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, int smt_threads) { int i, ret = 0; uint32_t servers_prop[smt_threads]; uint32_t gservers_prop[smt_threads * 2]; int index = ppc_get_vcpu_dt_id(cpu); if (cpu->cpu_version) { ret = fdt_setprop(fdt, offset, "cpu-version", &cpu->cpu_version, sizeof(cpu->cpu_version)); if (ret < 0) { return ret; } } /* Build interrupt servers and gservers properties */ for (i = 0; i < smt_threads; i++) { servers_prop[i] = cpu_to_be32(index + i); /* Hack, direct the group queues back to cpu 0 */ gservers_prop[i*2] = cpu_to_be32(index + i); gservers_prop[i*2 + 1] = 0; } ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s", servers_prop, sizeof(servers_prop)); if (ret < 0) { return ret; } ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s", gservers_prop, sizeof(gservers_prop)); return ret; } | 3,080 |
1 | static int net_socket_mcast_create(struct sockaddr_in *mcastaddr) { struct ip_mreq imr; int fd; int val, ret; if (!IN_MULTICAST(ntohl(mcastaddr->sin_addr.s_addr))) { fprintf(stderr, "qemu: error: specified mcastaddr \"%s\" (0x%08x) does not contain a multicast address\n", inet_ntoa(mcastaddr->sin_addr), (int)ntohl(mcastaddr->sin_addr.s_addr)); return -1; } fd = socket(PF_INET, SOCK_DGRAM, 0); if (fd < 0) { perror("socket(PF_INET, SOCK_DGRAM)"); return -1; } val = 1; ret=setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (const char *)&val, sizeof(val)); if (ret < 0) { perror("setsockopt(SOL_SOCKET, SO_REUSEADDR)"); goto fail; } ret = bind(fd, (struct sockaddr *)mcastaddr, sizeof(*mcastaddr)); if (ret < 0) { perror("bind"); goto fail; } /* Add host to multicast group */ imr.imr_multiaddr = mcastaddr->sin_addr; imr.imr_interface.s_addr = htonl(INADDR_ANY); ret = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, (const char *)&imr, sizeof(struct ip_mreq)); if (ret < 0) { perror("setsockopt(IP_ADD_MEMBERSHIP)"); goto fail; } /* Force mcast msgs to loopback (eg. several QEMUs in same host */ val = 1; ret=setsockopt(fd, IPPROTO_IP, IP_MULTICAST_LOOP, (const char *)&val, sizeof(val)); if (ret < 0) { perror("setsockopt(SOL_IP, IP_MULTICAST_LOOP)"); goto fail; } socket_set_nonblock(fd); return fd; fail: if (fd >= 0) closesocket(fd); return -1; } | 3,081 |
1 | static void packet_id_queue_add(struct PacketIdQueue *q, uint64_t id) { USBRedirDevice *dev = q->dev; struct PacketIdQueueEntry *e; DPRINTF("adding packet id %"PRIu64" to %s queue\n", id, q->name); e = g_malloc0(sizeof(struct PacketIdQueueEntry)); e->id = id; QTAILQ_INSERT_TAIL(&q->head, e, next); q->size++; } | 3,083 |
1 | static void xtensa_cpu_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); CPUClass *cc = CPU_CLASS(oc); XtensaCPUClass *xcc = XTENSA_CPU_CLASS(cc); xcc->parent_realize = dc->realize; dc->realize = xtensa_cpu_realizefn; xcc->parent_reset = cc->reset; cc->reset = xtensa_cpu_reset; cc->class_by_name = xtensa_cpu_class_by_name; cc->has_work = xtensa_cpu_has_work; cc->do_interrupt = xtensa_cpu_do_interrupt; cc->cpu_exec_interrupt = xtensa_cpu_exec_interrupt; cc->dump_state = xtensa_cpu_dump_state; cc->set_pc = xtensa_cpu_set_pc; cc->gdb_read_register = xtensa_cpu_gdb_read_register; cc->gdb_write_register = xtensa_cpu_gdb_write_register; cc->gdb_stop_before_watchpoint = true; #ifndef CONFIG_USER_ONLY cc->do_unaligned_access = xtensa_cpu_do_unaligned_access; cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug; cc->do_unassigned_access = xtensa_cpu_do_unassigned_access; #endif cc->debug_excp_handler = xtensa_breakpoint_handler; dc->vmsd = &vmstate_xtensa_cpu; /* * Reason: xtensa_cpu_initfn() calls cpu_exec_init(), which saves * the object in cpus -> dangling pointer after final * object_unref(). */ dc->cannot_destroy_with_object_finalize_yet = true; } | 3,084 |
1 | static void fdt_add_gic_node(const VirtBoardInfo *vbi) { uint32_t gic_phandle; gic_phandle = qemu_fdt_alloc_phandle(vbi->fdt); qemu_fdt_setprop_cell(vbi->fdt, "/", "interrupt-parent", gic_phandle); qemu_fdt_add_subnode(vbi->fdt, "/intc"); /* 'cortex-a15-gic' means 'GIC v2' */ qemu_fdt_setprop_string(vbi->fdt, "/intc", "compatible", "arm,cortex-a15-gic"); qemu_fdt_setprop_cell(vbi->fdt, "/intc", "#interrupt-cells", 3); qemu_fdt_setprop(vbi->fdt, "/intc", "interrupt-controller", NULL, 0); qemu_fdt_setprop_sized_cells(vbi->fdt, "/intc", "reg", 2, vbi->memmap[VIRT_GIC_DIST].base, 2, vbi->memmap[VIRT_GIC_DIST].size, 2, vbi->memmap[VIRT_GIC_CPU].base, 2, vbi->memmap[VIRT_GIC_CPU].size); qemu_fdt_setprop_cell(vbi->fdt, "/intc", "phandle", gic_phandle); } | 3,085 |
0 | static inline void downmix_3f_to_mono(float *samples) { int i; for (i = 0; i < 256; i++) { samples[i] += (samples[i + 256] + samples[i + 512]); samples[i + 256] = samples[i + 512] = 0; } } | 3,086 |
1 | static int mov_read_stsd(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; int ret, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams - 1]; sc = st->priv_data; avio_r8(pb); /* version */ avio_rb24(pb); /* flags */ entries = avio_rb32(pb); if (entries <= 0) { av_log(c->fc, AV_LOG_ERROR, "invalid STSD entries %d\n", entries); return AVERROR_INVALIDDATA; } if (sc->extradata) { av_log(c->fc, AV_LOG_ERROR, "Duplicate stsd found in this track.\n"); return AVERROR_INVALIDDATA; } /* Prepare space for hosting multiple extradata. */ sc->extradata = av_mallocz_array(entries, sizeof(*sc->extradata)); if (!sc->extradata) return AVERROR(ENOMEM); sc->extradata_size = av_mallocz_array(entries, sizeof(*sc->extradata_size)); if (!sc->extradata_size) { ret = AVERROR(ENOMEM); goto fail; } ret = ff_mov_read_stsd_entries(c, pb, entries); if (ret < 0) goto fail; sc->stsd_count = entries; /* Restore back the primary extradata. */ av_freep(&st->codecpar->extradata); st->codecpar->extradata_size = sc->extradata_size[0]; if (sc->extradata_size[0]) { st->codecpar->extradata = av_mallocz(sc->extradata_size[0] + AV_INPUT_BUFFER_PADDING_SIZE); if (!st->codecpar->extradata) return AVERROR(ENOMEM); memcpy(st->codecpar->extradata, sc->extradata[0], sc->extradata_size[0]); } return mov_finalize_stsd_codec(c, pb, st, sc); fail: av_freep(&sc->extradata); av_freep(&sc->extradata_size); return ret; } | 3,087 |
1 | static void i6300esb_restart_timer(I6300State *d, int stage) { int64_t timeout; if (!d->enabled) return; d->stage = stage; if (d->stage <= 1) timeout = d->timer1_preload; else timeout = d->timer2_preload; if (d->clock_scale == CLOCK_SCALE_1KHZ) timeout <<= 15; else timeout <<= 5; /* Get the timeout in units of ticks_per_sec. */ timeout = get_ticks_per_sec() * timeout / 33000000; i6300esb_debug("stage %d, timeout %" PRIi64 "\n", d->stage, timeout); timer_mod(d->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + timeout); } | 3,088 |
1 | static void uhci_queue_fill(UHCIQueue *q, UHCI_TD *td) { uint32_t int_mask = 0; uint32_t plink = td->link; UHCI_TD ptd; int ret; while (is_valid(plink)) { uhci_read_td(q->uhci, &ptd, plink); if (!(ptd.ctrl & TD_CTRL_ACTIVE)) { break; } if (uhci_queue_token(&ptd) != q->token) { break; } trace_usb_uhci_td_queue(plink & ~0xf, ptd.ctrl, ptd.token); ret = uhci_handle_td(q->uhci, q, &ptd, plink, &int_mask); if (ret == TD_RESULT_ASYNC_CONT) { break; } assert(ret == TD_RESULT_ASYNC_START); assert(int_mask == 0); plink = ptd.link; } usb_device_flush_ep_queue(q->ep->dev, q->ep); } | 3,089 |
1 | static int hls_write_trailer(struct AVFormatContext *s) { HLSContext *hls = s->priv_data; AVFormatContext *oc = hls->avf; av_write_trailer(oc); hls->size = avio_tell(hls->avf->pb) - hls->start_pos; avio_closep(&oc->pb); avformat_free_context(oc); av_free(hls->basename); hls_append_segment(hls, hls->duration, hls->start_pos, hls->size); hls_window(s, 1); hls_free_segments(hls); avio_close(hls->pb); return 0; } | 3,090 |
1 | static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx) { uint16_t num_heads = vring_avail_idx(vq) - idx; /* Check it isn't doing very strange things with descriptor numbers. */ if (num_heads > vq->vring.num) { error_report("Guest moved used index from %u to %u", idx, vring_avail_idx(vq)); exit(1); return num_heads; | 3,091 |
1 | static void xhci_port_write(void *ptr, hwaddr reg, uint64_t val, unsigned size) { XHCIPort *port = ptr; uint32_t portsc; trace_usb_xhci_port_write(port->portnr, reg, val); switch (reg) { case 0x00: /* PORTSC */ portsc = port->portsc; /* write-1-to-clear bits*/ portsc &= ~(val & (PORTSC_CSC|PORTSC_PEC|PORTSC_WRC|PORTSC_OCC| PORTSC_PRC|PORTSC_PLC|PORTSC_CEC)); if (val & PORTSC_LWS) { /* overwrite PLS only when LWS=1 */ uint32_t pls = get_field(val, PORTSC_PLS); set_field(&portsc, pls, PORTSC_PLS); trace_usb_xhci_port_link(port->portnr, pls); } /* read/write bits */ portsc &= ~(PORTSC_PP|PORTSC_WCE|PORTSC_WDE|PORTSC_WOE); portsc |= (val & (PORTSC_PP|PORTSC_WCE|PORTSC_WDE|PORTSC_WOE)); port->portsc = portsc; /* write-1-to-start bits */ if (val & PORTSC_PR) { xhci_port_reset(port); } break; case 0x04: /* PORTPMSC */ case 0x08: /* PORTLI */ default: trace_usb_xhci_unimplemented("port write", reg); } } | 3,092 |
1 | static void nbd_restart_write(void *opaque) { BlockDriverState *bs = opaque; qemu_coroutine_enter(nbd_get_client_session(bs)->send_coroutine, NULL); } | 3,093 |
1 | static int rtp_write_header(AVFormatContext *s1) { RTPMuxContext *s = s1->priv_data; int max_packet_size, n; AVStream *st; if (s1->nb_streams != 1) return -1; st = s1->streams[0]; if (!is_supported(st->codec->codec_id)) { av_log(s1, AV_LOG_ERROR, "Unsupported codec %x\n", st->codec->codec_id); return -1; } if (s->payload_type < 0) s->payload_type = ff_rtp_get_payload_type(s1, st->codec); s->base_timestamp = av_get_random_seed(); s->timestamp = s->base_timestamp; s->cur_timestamp = 0; s->ssrc = av_get_random_seed(); s->first_packet = 1; s->first_rtcp_ntp_time = ff_ntp_time(); if (s1->start_time_realtime) /* Round the NTP time to whole milliseconds. */ s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 + NTP_OFFSET_US; max_packet_size = s1->pb->max_packet_size; if (max_packet_size <= 12) return AVERROR(EIO); s->buf = av_malloc(max_packet_size); if (s->buf == NULL) { return AVERROR(ENOMEM); } s->max_payload_size = max_packet_size - 12; s->max_frames_per_packet = 0; if (s1->max_delay) { if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { if (st->codec->frame_size == 0) { av_log(s1, AV_LOG_ERROR, "Cannot respect max delay: frame size = 0\n"); } else { s->max_frames_per_packet = av_rescale_rnd(s1->max_delay, st->codec->sample_rate, AV_TIME_BASE * st->codec->frame_size, AV_ROUND_DOWN); } } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { /* FIXME: We should round down here... */ s->max_frames_per_packet = av_rescale_q(s1->max_delay, (AVRational){1, 1000000}, st->codec->time_base); } } avpriv_set_pts_info(st, 32, 1, 90000); switch(st->codec->codec_id) { case CODEC_ID_MP2: case CODEC_ID_MP3: s->buf_ptr = s->buf + 4; break; case CODEC_ID_MPEG1VIDEO: case CODEC_ID_MPEG2VIDEO: break; case CODEC_ID_MPEG2TS: n = s->max_payload_size / TS_PACKET_SIZE; if (n < 1) n = 1; s->max_payload_size = n * TS_PACKET_SIZE; s->buf_ptr = s->buf; break; case CODEC_ID_H264: /* check for H.264 MP4 syntax */ if (st->codec->extradata_size > 4 && st->codec->extradata[0] == 1) { s->nal_length_size = (st->codec->extradata[4] & 0x03) + 1; } break; case CODEC_ID_VORBIS: case CODEC_ID_THEORA: if (!s->max_frames_per_packet) s->max_frames_per_packet = 15; s->max_frames_per_packet = av_clip(s->max_frames_per_packet, 1, 15); s->max_payload_size -= 6; // ident+frag+tdt/vdt+pkt_num+pkt_length s->num_frames = 0; goto defaultcase; case CODEC_ID_VP8: av_log(s1, AV_LOG_ERROR, "RTP VP8 payload implementation is " "incompatible with the latest spec drafts.\n"); break; case CODEC_ID_ADPCM_G722: /* Due to a historical error, the clock rate for G722 in RTP is * 8000, even if the sample rate is 16000. See RFC 3551. */ avpriv_set_pts_info(st, 32, 1, 8000); break; case CODEC_ID_AMR_NB: case CODEC_ID_AMR_WB: if (!s->max_frames_per_packet) s->max_frames_per_packet = 12; if (st->codec->codec_id == CODEC_ID_AMR_NB) n = 31; else n = 61; /* max_header_toc_size + the largest AMR payload must fit */ if (1 + s->max_frames_per_packet + n > s->max_payload_size) { av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n"); return -1; } if (st->codec->channels != 1) { av_log(s1, AV_LOG_ERROR, "Only mono is supported\n"); return -1; } case CODEC_ID_AAC: s->num_frames = 0; default: defaultcase: if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate); } s->buf_ptr = s->buf; break; } return 0; } | 3,094 |
1 | PPC_OP(mulhwu) { T0 = ((uint64_t)T0 * (uint64_t)T1) >> 32; RETURN(); } | 3,095 |
1 | static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q, AVPacket *avpkt) { mfxVideoParam param = { { 0 } }; mfxBitstream bs = { { { 0 } } }; int ret; enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV, AV_PIX_FMT_NV12, AV_PIX_FMT_NONE }; ret = ff_get_format(avctx, pix_fmts); if (ret < 0) return ret; avctx->pix_fmt = ret; q->iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY; if (avctx->hwaccel_context) { AVQSVContext *qsv = avctx->hwaccel_context; q->session = qsv->session; q->iopattern = qsv->iopattern; q->ext_buffers = qsv->ext_buffers; q->nb_ext_buffers = qsv->nb_ext_buffers; } if (!q->session) { if (!q->internal_qs.session) { ret = ff_qsv_init_internal_session(avctx, &q->internal_qs, q->load_plugins); if (ret < 0) return ret; } q->session = q->internal_qs.session; } if (avpkt->size) { bs.Data = avpkt->data; bs.DataLength = avpkt->size; bs.MaxLength = bs.DataLength; bs.TimeStamp = avpkt->pts; } else return AVERROR_INVALIDDATA; ret = ff_qsv_codec_id_to_mfx(avctx->codec_id); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Unsupported codec_id %08x\n", avctx->codec_id); return ret; } param.mfx.CodecId = ret; ret = MFXVideoDECODE_DecodeHeader(q->session, &bs, ¶m); if (MFX_ERR_MORE_DATA==ret) { /* this code means that header not found so we return packet size to skip a current packet */ return avpkt->size; } else if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Decode header error %d\n", ret); return ff_qsv_error(ret); } param.IOPattern = q->iopattern; param.AsyncDepth = q->async_depth; param.ExtParam = q->ext_buffers; param.NumExtParam = q->nb_ext_buffers; param.mfx.FrameInfo.BitDepthLuma = 8; param.mfx.FrameInfo.BitDepthChroma = 8; ret = MFXVideoDECODE_Init(q->session, ¶m); if (ret < 0) { if (MFX_ERR_INVALID_VIDEO_PARAM==ret) { av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder, unsupported video\n"); } else { av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder %d\n", ret); } return ff_qsv_error(ret); } avctx->profile = param.mfx.CodecProfile; avctx->level = param.mfx.CodecLevel; avctx->coded_width = param.mfx.FrameInfo.Width; avctx->coded_height = param.mfx.FrameInfo.Height; avctx->width = param.mfx.FrameInfo.CropW - param.mfx.FrameInfo.CropX; avctx->height = param.mfx.FrameInfo.CropH - param.mfx.FrameInfo.CropY; /* maximum decoder latency should be not exceed max DPB size for h.264 and HEVC which is 16 for both cases. So weare pre-allocating fifo big enough for 17 elements: */ if (!q->async_fifo) { q->async_fifo = av_fifo_alloc((1 + 16) * (sizeof(mfxSyncPoint*) + sizeof(QSVFrame*))); if (!q->async_fifo) return AVERROR(ENOMEM); } if (!q->input_fifo) { q->input_fifo = av_fifo_alloc(1024*16); if (!q->input_fifo) return AVERROR(ENOMEM); } if (!q->pkt_fifo) { q->pkt_fifo = av_fifo_alloc( sizeof(AVPacket) * (1 + 16) ); if (!q->pkt_fifo) return AVERROR(ENOMEM); } q->engine_ready = 1; return 0; } | 3,096 |
1 | static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags) { AVFilterBufferRef *buf; AVFrame *frame; int ret; if (!pbuf) return ff_poll_frame(ctx->inputs[0]); frame = av_frame_alloc(); if (!frame) return AVERROR(ENOMEM); if (!nb_samples) ret = av_buffersink_get_frame_flags(ctx, frame, flags); else ret = av_buffersink_get_samples(ctx, frame, nb_samples); if (ret < 0) goto fail; if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) { buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, AV_PERM_READ, frame->width, frame->height, frame->format); } else { buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data, frame->linesize[0], AV_PERM_READ, frame->nb_samples, frame->format, frame->channel_layout); } if (!buf) { ret = AVERROR(ENOMEM); goto fail; } avfilter_copy_frame_props(buf, frame); buf->buf->priv = frame; buf->buf->free = compat_free_buffer; *pbuf = buf; return 0; fail: av_frame_free(&frame); return ret; } | 3,097 |
1 | yuv2yuvX16_c_template(const int16_t *lumFilter, const int32_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int32_t **chrUSrc, const int32_t **chrVSrc, int chrFilterSize, const int32_t **alpSrc, uint16_t *dest[4], int dstW, int chrDstW, int big_endian, int output_bits) { //FIXME Optimize (just quickly written not optimized..) int i; int dword= output_bits == 16; uint16_t *yDest = dest[0], *uDest = dest[1], *vDest = dest[2], *aDest = CONFIG_SWSCALE_ALPHA ? dest[3] : NULL; int shift = 11 + 4*dword + 16 - output_bits; #define output_pixel(pos, val) \ if (big_endian) { \ if (output_bits == 16) { \ AV_WB16(pos, av_clip_uint16(val >> shift)); \ } else { \ AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \ } \ } else { \ if (output_bits == 16) { \ AV_WL16(pos, av_clip_uint16(val >> shift)); \ } else { \ AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \ } \ } for (i = 0; i < dstW; i++) { int val = 1 << (26-output_bits + 4*dword); int j; for (j = 0; j < lumFilterSize; j++) val += (dword ? lumSrc[j][i] : ((int16_t**)lumSrc)[j][i]) * lumFilter[j]; output_pixel(&yDest[i], val); } if (uDest) { for (i = 0; i < chrDstW; i++) { int u = 1 << (26-output_bits + 4*dword); int v = 1 << (26-output_bits + 4*dword); int j; for (j = 0; j < chrFilterSize; j++) { u += (dword ? chrUSrc[j][i] : ((int16_t**)chrUSrc)[j][i]) * chrFilter[j]; v += (dword ? chrVSrc[j][i] : ((int16_t**)chrVSrc)[j][i]) * chrFilter[j]; } output_pixel(&uDest[i], u); output_pixel(&vDest[i], v); } } if (CONFIG_SWSCALE_ALPHA && aDest) { for (i = 0; i < dstW; i++) { int val = 1 << (26-output_bits + 4*dword); int j; for (j = 0; j < lumFilterSize; j++) val += (dword ? alpSrc[j][i] : ((int16_t**)alpSrc)[j][i]) * lumFilter[j]; output_pixel(&aDest[i], val); } } #undef output_pixel } | 3,099 |
1 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) { CPUState *cpu; PageDesc *p; uint32_t h; tb_page_addr_t phys_pc; assert_tb_locked(); atomic_set(&tb->cflags, tb->cflags | CF_INVALID); /* remove the TB from the hash list */ phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->cflags & CF_HASH_MASK, tb->trace_vcpu_dstate); qht_remove(&tb_ctx.htable, tb, h); /* remove the TB from the page list */ if (tb->page_addr[0] != page_addr) { p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); tb_page_remove(&p->first_tb, tb); invalidate_page_bitmap(p); } if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); tb_page_remove(&p->first_tb, tb); invalidate_page_bitmap(p); } /* remove the TB from the hash list */ h = tb_jmp_cache_hash_func(tb->pc); CPU_FOREACH(cpu) { if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) { atomic_set(&cpu->tb_jmp_cache[h], NULL); } } /* suppress this TB from the two jump lists */ tb_remove_from_jmp_list(tb, 0); tb_remove_from_jmp_list(tb, 1); /* suppress any remaining jumps to this TB */ tb_jmp_unlink(tb); tb_ctx.tb_phys_invalidate_count++; } | 3,100 |
1 | iscsi_synccache10_cb(struct iscsi_context *iscsi, int status, void *command_data, void *opaque) { IscsiAIOCB *acb = opaque; if (acb->canceled != 0) { qemu_aio_release(acb); scsi_free_scsi_task(acb->task); acb->task = NULL; return; } acb->status = 0; if (status < 0) { error_report("Failed to sync10 data on iSCSI lun. %s", iscsi_get_error(iscsi)); acb->status = -EIO; } iscsi_schedule_bh(acb); scsi_free_scsi_task(acb->task); acb->task = NULL; } | 3,101 |
1 | static void spectral_to_sample(AACContext *ac) { int i, type; void (*imdct_and_window)(AACContext *ac, SingleChannelElement *sce); switch (ac->oc[1].m4ac.object_type) { case AOT_ER_AAC_LD: imdct_and_window = imdct_and_windowing_ld; break; case AOT_ER_AAC_ELD: imdct_and_window = imdct_and_windowing_eld; break; default: imdct_and_window = ac->imdct_and_windowing; } for (type = 3; type >= 0; type--) { for (i = 0; i < MAX_ELEM_ID; i++) { ChannelElement *che = ac->che[type][i]; if (che && che->present) { if (type <= TYPE_CPE) apply_channel_coupling(ac, che, type, i, BEFORE_TNS, AAC_RENAME(apply_dependent_coupling)); if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) { if (che->ch[0].ics.predictor_present) { if (che->ch[0].ics.ltp.present) ac->apply_ltp(ac, &che->ch[0]); if (che->ch[1].ics.ltp.present && type == TYPE_CPE) ac->apply_ltp(ac, &che->ch[1]); } } if (che->ch[0].tns.present) ac->apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1); if (che->ch[1].tns.present) ac->apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1); if (type <= TYPE_CPE) apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, AAC_RENAME(apply_dependent_coupling)); if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) { imdct_and_window(ac, &che->ch[0]); if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) ac->update_ltp(ac, &che->ch[0]); if (type == TYPE_CPE) { imdct_and_window(ac, &che->ch[1]); if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) ac->update_ltp(ac, &che->ch[1]); } if (ac->oc[1].m4ac.sbr > 0) { AAC_RENAME(ff_sbr_apply)(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret); } } if (type <= TYPE_CCE) apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, AAC_RENAME(apply_independent_coupling)); #if USE_FIXED { int j; /* preparation for resampler */ for(j = 0; j<2048; j++){ che->ch[0].ret[j] = (int32_t)av_clipl_int32((int64_t)che->ch[0].ret[j]<<7)+0x8000; che->ch[1].ret[j] = (int32_t)av_clipl_int32((int64_t)che->ch[1].ret[j]<<7)+0x8000; } } #endif /* USE_FIXED */ che->present = 0; } else if (che) { av_log(ac->avctx, AV_LOG_VERBOSE, "ChannelElement %d.%d missing \n", type, i); } } } } | 3,102 |
1 | void qmp_guest_set_user_password(const char *username, const char *password, bool crypted, Error **errp) { NET_API_STATUS nas; char *rawpasswddata = NULL; size_t rawpasswdlen; wchar_t *user, *wpass; USER_INFO_1003 pi1003 = { 0, }; if (crypted) { error_setg(errp, QERR_UNSUPPORTED); return; } rawpasswddata = (char *)qbase64_decode(password, -1, &rawpasswdlen, errp); if (!rawpasswddata) { return; } rawpasswddata = g_renew(char, rawpasswddata, rawpasswdlen + 1); rawpasswddata[rawpasswdlen] = '\0'; user = g_utf8_to_utf16(username, -1, NULL, NULL, NULL); wpass = g_utf8_to_utf16(rawpasswddata, -1, NULL, NULL, NULL); pi1003.usri1003_password = wpass; nas = NetUserSetInfo(NULL, user, 1003, (LPBYTE)&pi1003, NULL); if (nas != NERR_Success) { gchar *msg = get_net_error_message(nas); error_setg(errp, "failed to set password: %s", msg); g_free(msg); } g_free(user); g_free(wpass); g_free(rawpasswddata); } | 3,103 |
1 | static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst, long dstWidth, const uint8_t *src, int srcW, int xInc) { int32_t *filterPos = c->hLumFilterPos; int16_t *filter = c->hLumFilter; int canMMX2BeUsed = c->canMMX2BeUsed; void *mmx2FilterCode= c->lumMmx2FilterCode; int i; #if defined(PIC) DECLARE_ALIGNED(8, uint64_t, ebxsave); #endif __asm__ volatile( #if defined(PIC) "mov %%"REG_b", %5 \n\t" #endif "pxor %%mm7, %%mm7 \n\t" "mov %0, %%"REG_c" \n\t" "mov %1, %%"REG_D" \n\t" "mov %2, %%"REG_d" \n\t" "mov %3, %%"REG_b" \n\t" "xor %%"REG_a", %%"REG_a" \n\t" // i PREFETCH" (%%"REG_c") \n\t" PREFETCH" 32(%%"REG_c") \n\t" PREFETCH" 64(%%"REG_c") \n\t" #if ARCH_X86_64 #define CALL_MMX2_FILTER_CODE \ "movl (%%"REG_b"), %%esi \n\t"\ "call *%4 \n\t"\ "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\ "add %%"REG_S", %%"REG_c" \n\t"\ "add %%"REG_a", %%"REG_D" \n\t"\ "xor %%"REG_a", %%"REG_a" \n\t"\ #else #define CALL_MMX2_FILTER_CODE \ "movl (%%"REG_b"), %%esi \n\t"\ "call *%4 \n\t"\ "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\ "add %%"REG_a", %%"REG_D" \n\t"\ "xor %%"REG_a", %%"REG_a" \n\t"\ #endif /* ARCH_X86_64 */ CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE CALL_MMX2_FILTER_CODE #if defined(PIC) "mov %5, %%"REG_b" \n\t" #endif :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos), "m" (mmx2FilterCode) #if defined(PIC) ,"m" (ebxsave) #endif : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D #if !defined(PIC) ,"%"REG_b #endif ); for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128; } | 3,104 |
1 | static uint64_t imx_serial_read(void *opaque, hwaddr offset, unsigned size) { IMXSerialState *s = (IMXSerialState *)opaque; uint32_t c; DPRINTF("read(offset=%x)\n", offset >> 2); switch (offset >> 2) { case 0x0: /* URXD */ c = s->readbuff; if (!(s->uts1 & UTS1_RXEMPTY)) { /* Character is valid */ c |= URXD_CHARRDY; s->usr1 &= ~USR1_RRDY; s->usr2 &= ~USR2_RDR; s->uts1 |= UTS1_RXEMPTY; imx_update(s); qemu_chr_accept_input(s->chr); } return c; case 0x20: /* UCR1 */ return s->ucr1; case 0x21: /* UCR2 */ return s->ucr2; case 0x25: /* USR1 */ return s->usr1; case 0x26: /* USR2 */ return s->usr2; case 0x2A: /* BRM Modulator */ return s->ubmr; case 0x2B: /* Baud Rate Count */ return s->ubrc; case 0x2d: /* Test register */ return s->uts1; case 0x24: /* UFCR */ return s->ufcr; case 0x2c: return s->onems; case 0x22: /* UCR3 */ return s->ucr3; case 0x23: /* UCR4 */ case 0x29: /* BRM Incremental */ return 0x0; /* TODO */ default: IPRINTF("%s: bad offset: 0x%x\n", __func__, (int)offset); return 0; } } | 3,107 |
0 | static int display_end_segment(AVCodecContext *avctx, void *data, const uint8_t *buf, int buf_size) { AVSubtitle *sub = data; PGSSubContext *ctx = avctx->priv_data; /* * The end display time is a timeout value and is only reached * if the next subtitle is later then timeout or subtitle has * not been cleared by a subsequent empty display command. */ memset(sub, 0, sizeof(*sub)); // Blank if last object_number was 0. // Note that this may be wrong for more complex subtitles. if (!ctx->presentation.object_number) return 1; sub->start_display_time = 0; sub->end_display_time = 20000; sub->format = 0; sub->rects = av_mallocz(sizeof(*sub->rects)); sub->rects[0] = av_mallocz(sizeof(*sub->rects[0])); sub->num_rects = 1; sub->rects[0]->x = ctx->presentation.x; sub->rects[0]->y = ctx->presentation.y; sub->rects[0]->w = ctx->picture.w; sub->rects[0]->h = ctx->picture.h; sub->rects[0]->type = SUBTITLE_BITMAP; /* Process bitmap */ sub->rects[0]->pict.linesize[0] = ctx->picture.w; if (ctx->picture.rle) { if (ctx->picture.rle_remaining_len) av_log(avctx, AV_LOG_ERROR, "RLE data length %u is %u bytes shorter than expected\n", ctx->picture.rle_data_len, ctx->picture.rle_remaining_len); if(decode_rle(avctx, sub, ctx->picture.rle, ctx->picture.rle_data_len) < 0) return 0; } /* Allocate memory for colors */ sub->rects[0]->nb_colors = 256; sub->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE); memcpy(sub->rects[0]->pict.data[1], ctx->clut, sub->rects[0]->nb_colors * sizeof(uint32_t)); return 1; } | 3,108 |
0 | static int mov_write_tkhd_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track, AVStream *st) { int64_t duration = av_rescale_rnd(track->track_duration, MOV_TIMESCALE, track->timescale, AV_ROUND_UP); int version = duration < INT32_MAX ? 0 : 1; int flags = MOV_TKHD_FLAG_IN_MOVIE; int rotation = 0; int group = 0; uint32_t *display_matrix = NULL; int display_matrix_size, i; if (st) { if (mov->per_stream_grouping) group = st->index; else group = st->codec->codec_type; display_matrix = (uint32_t*)av_stream_get_side_data(st, AV_PKT_DATA_DISPLAYMATRIX, &display_matrix_size); if (display_matrix && display_matrix_size < 9 * sizeof(*display_matrix)) display_matrix = NULL; } if (track->flags & MOV_TRACK_ENABLED) flags |= MOV_TKHD_FLAG_ENABLED; if (track->mode == MODE_ISM) version = 1; (version == 1) ? avio_wb32(pb, 104) : avio_wb32(pb, 92); /* size */ ffio_wfourcc(pb, "tkhd"); avio_w8(pb, version); avio_wb24(pb, flags); if (version == 1) { avio_wb64(pb, track->time); avio_wb64(pb, track->time); } else { avio_wb32(pb, track->time); /* creation time */ avio_wb32(pb, track->time); /* modification time */ } avio_wb32(pb, track->track_id); /* track-id */ avio_wb32(pb, 0); /* reserved */ if (!track->entry && mov->mode == MODE_ISM) (version == 1) ? avio_wb64(pb, UINT64_C(0xffffffffffffffff)) : avio_wb32(pb, 0xffffffff); else if (!track->entry) (version == 1) ? avio_wb64(pb, 0) : avio_wb32(pb, 0); else (version == 1) ? avio_wb64(pb, duration) : avio_wb32(pb, duration); avio_wb32(pb, 0); /* reserved */ avio_wb32(pb, 0); /* reserved */ avio_wb16(pb, 0); /* layer */ avio_wb16(pb, group); /* alternate group) */ /* Volume, only for audio */ if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO) avio_wb16(pb, 0x0100); else avio_wb16(pb, 0); avio_wb16(pb, 0); /* reserved */ /* Matrix structure */ if (st && st->metadata) { AVDictionaryEntry *rot = av_dict_get(st->metadata, "rotate", NULL, 0); rotation = (rot && rot->value) ? atoi(rot->value) : 0; } if (display_matrix) { for (i = 0; i < 9; i++) avio_wb32(pb, display_matrix[i]); } else if (rotation == 90) { write_matrix(pb, 0, 1, -1, 0, track->enc->height, 0); } else if (rotation == 180) { write_matrix(pb, -1, 0, 0, -1, track->enc->width, track->enc->height); } else if (rotation == 270) { write_matrix(pb, 0, -1, 1, 0, 0, track->enc->width); } else { write_matrix(pb, 1, 0, 0, 1, 0, 0); } /* Track width and height, for visual only */ if (st && (track->enc->codec_type == AVMEDIA_TYPE_VIDEO || track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE)) { if (track->mode == MODE_MOV) { avio_wb32(pb, track->enc->width << 16); avio_wb32(pb, track->height << 16); } else { int64_t track_width_1616 = av_rescale(st->sample_aspect_ratio.num, track->enc->width * 0x10000LL, st->sample_aspect_ratio.den); if (!track_width_1616 || track->height != track->enc->height) track_width_1616 = track->enc->width * 0x10000; avio_wb32(pb, track_width_1616); avio_wb32(pb, track->height * 0x10000); } } else { avio_wb32(pb, 0); avio_wb32(pb, 0); } return 0x5c; } | 3,109 |
0 | static int decode_type1(GetByteContext *gb, PutByteContext *pb) { unsigned opcode, len; int high = 0; int i, pos; while (bytestream2_get_bytes_left(gb) > 0) { GetByteContext gbc; while (bytestream2_get_bytes_left(gb) > 0) { while (bytestream2_get_bytes_left(gb) > 0) { opcode = bytestream2_get_byte(gb); high = opcode >= 0x20; if (high) break; if (opcode) break; opcode = bytestream2_get_byte(gb); if (opcode < 0xF8) { opcode = opcode + 32; break; } i = opcode - 0xF8; if (i) { len = 256; do { len *= 2; --i; } while (i); } else { len = 280; } do { bytestream2_put_le32(pb, bytestream2_get_le32(gb)); bytestream2_put_le32(pb, bytestream2_get_le32(gb)); len -= 8; } while (len && bytestream2_get_bytes_left(gb) > 0); } if (!high) { do { bytestream2_put_byte(pb, bytestream2_get_byte(gb)); --opcode; } while (opcode && bytestream2_get_bytes_left(gb) > 0); while (bytestream2_get_bytes_left(gb) > 0) { GetByteContext gbc; opcode = bytestream2_get_byte(gb); if (opcode >= 0x20) break; bytestream2_init(&gbc, pb->buffer_start, pb->buffer_end - pb->buffer_start); pos = -(opcode | 32 * bytestream2_get_byte(gb)) - 1; bytestream2_seek(&gbc, bytestream2_tell_p(pb) + pos, SEEK_SET); bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); bytestream2_put_byte(pb, bytestream2_get_byte(gb)); } } high = 0; if (opcode < 0x40) break; bytestream2_init(&gbc, pb->buffer_start, pb->buffer_end - pb->buffer_start); pos = (-((opcode & 0x1F) | 32 * bytestream2_get_byte(gb)) - 1); bytestream2_seek(&gbc, bytestream2_tell_p(pb) + pos, SEEK_SET); bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); len = (opcode >> 5) - 1; do { bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); --len; } while (len && bytestream2_get_bytes_left(&gbc) > 0); } len = opcode & 0x1F; if (!len) { if (!bytestream2_peek_byte(gb)) { do { bytestream2_skip(gb, 1); len += 255; } while (!bytestream2_peek_byte(gb) && bytestream2_get_bytes_left(gb) > 0); } len += bytestream2_get_byte(gb) + 31; } pos = -bytestream2_get_byte(gb); bytestream2_init(&gbc, pb->buffer_start, pb->buffer_end - pb->buffer_start); bytestream2_seek(&gbc, bytestream2_tell_p(pb) + pos - (bytestream2_get_byte(gb) << 8), SEEK_SET); if (bytestream2_tell_p(pb) == bytestream2_tell(&gbc)) break; if (len < 5 || bytestream2_tell_p(pb) - bytestream2_tell(&gbc) < 4) { bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); do { bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); --len; } while (len && bytestream2_get_bytes_left(&gbc) > 0); } else { bytestream2_put_le32(pb, bytestream2_get_le32(&gbc)); len--; do { bytestream2_put_byte(pb, bytestream2_get_byte(&gbc)); len--; } while (len && bytestream2_get_bytes_left(&gbc) > 0); } } return 0; } | 3,110 |
0 | void ff_avg_h264_qpel8_mc33_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_and_aver_dst_8x8_msa(src + stride - 2, src - (stride * 2) + sizeof(uint8_t), stride, dst, stride); } | 3,111 |
0 | static int check_recording_time(OutputStream *ost) { OutputFile *of = output_files[ost->file_index]; if (of->recording_time != INT64_MAX && av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time, AV_TIME_BASE_Q) >= 0) { ost->is_past_recording_time = 1; return 0; } return 1; } | 3,112 |
1 | static void chs_filter_band_data(DCAXllDecoder *s, DCAXllChSet *c, int band) { DCAXllBand *b = &c->bands[band]; int nsamples = s->nframesamples; int i, j, k; // Inverse adaptive or fixed prediction for (i = 0; i < c->nchannels; i++) { int32_t *buf = b->msb_sample_buffer[i]; int order = b->adapt_pred_order[i]; if (order > 0) { int coeff[DCA_XLL_ADAPT_PRED_ORDER_MAX]; // Conversion from reflection coefficients to direct form coefficients for (j = 0; j < order; j++) { int rc = b->adapt_refl_coeff[i][j]; for (k = 0; k < (j + 1) / 2; k++) { int tmp1 = coeff[ k ]; int tmp2 = coeff[j - k - 1]; coeff[ k ] = tmp1 + mul16(rc, tmp2); coeff[j - k - 1] = tmp2 + mul16(rc, tmp1); } coeff[j] = rc; } // Inverse adaptive prediction for (j = 0; j < nsamples - order; j++) { int64_t err = 0; for (k = 0; k < order; k++) err += (int64_t)buf[j + k] * coeff[order - k - 1]; buf[j + k] -= (SUINT)clip23(norm16(err)); } } else { // Inverse fixed coefficient prediction for (j = 0; j < b->fixed_pred_order[i]; j++) for (k = 1; k < nsamples; k++) buf[k] += buf[k - 1]; } } // Inverse pairwise channel decorrellation if (b->decor_enabled) { int32_t *tmp[DCA_XLL_CHANNELS_MAX]; for (i = 0; i < c->nchannels / 2; i++) { int coeff = b->decor_coeff[i]; if (coeff) { s->dcadsp->decor(b->msb_sample_buffer[i * 2 + 1], b->msb_sample_buffer[i * 2 ], coeff, nsamples); } } // Reorder channel pointers to the original order for (i = 0; i < c->nchannels; i++) tmp[i] = b->msb_sample_buffer[i]; for (i = 0; i < c->nchannels; i++) b->msb_sample_buffer[b->orig_order[i]] = tmp[i]; } // Map output channel pointers for frequency band 0 if (c->nfreqbands == 1) for (i = 0; i < c->nchannels; i++) s->output_samples[c->ch_remap[i]] = b->msb_sample_buffer[i]; } | 3,113 |
1 | void ff_ivi_recompose53(const IVIPlaneDesc *plane, uint8_t *dst, const int dst_pitch, const int num_bands) { int x, y, indx; int32_t p0, p1, p2, p3, tmp0, tmp1, tmp2; int32_t b0_1, b0_2, b1_1, b1_2, b1_3, b2_1, b2_2, b2_3, b2_4, b2_5, b2_6; int32_t b3_1, b3_2, b3_3, b3_4, b3_5, b3_6, b3_7, b3_8, b3_9; int32_t pitch, back_pitch; const IDWTELEM *b0_ptr, *b1_ptr, *b2_ptr, *b3_ptr; /* all bands should have the same pitch */ pitch = plane->bands[0].pitch; /* pixels at the position "y-1" will be set to pixels at the "y" for the 1st iteration */ back_pitch = 0; /* get pointers to the wavelet bands */ b0_ptr = plane->bands[0].buf; b1_ptr = plane->bands[1].buf; b2_ptr = plane->bands[2].buf; b3_ptr = plane->bands[3].buf; for (y = 0; y < plane->height; y += 2) { /* load storage variables with values */ if (num_bands > 0) { b0_1 = b0_ptr[0]; b0_2 = b0_ptr[pitch]; } if (num_bands > 1) { b1_1 = b1_ptr[back_pitch]; b1_2 = b1_ptr[0]; b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch]; } if (num_bands > 2) { b2_2 = b2_ptr[0]; // b2[x, y ] b2_3 = b2_2; // b2[x+1,y ] = b2[x,y] b2_5 = b2_ptr[pitch]; // b2[x ,y+1] b2_6 = b2_5; // b2[x+1,y+1] = b2[x,y+1] } if (num_bands > 3) { b3_2 = b3_ptr[back_pitch]; // b3[x ,y-1] b3_3 = b3_2; // b3[x+1,y-1] = b3[x ,y-1] b3_5 = b3_ptr[0]; // b3[x ,y ] b3_6 = b3_5; // b3[x+1,y ] = b3[x ,y ] b3_8 = b3_2 - b3_5*6 + b3_ptr[pitch]; b3_9 = b3_8; } for (x = 0, indx = 0; x < plane->width; x+=2, indx++) { /* some values calculated in the previous iterations can */ /* be reused in the next ones, so do appropriate copying */ b2_1 = b2_2; // b2[x-1,y ] = b2[x, y ] b2_2 = b2_3; // b2[x ,y ] = b2[x+1,y ] b2_4 = b2_5; // b2[x-1,y+1] = b2[x ,y+1] b2_5 = b2_6; // b2[x ,y+1] = b2[x+1,y+1] b3_1 = b3_2; // b3[x-1,y-1] = b3[x ,y-1] b3_2 = b3_3; // b3[x ,y-1] = b3[x+1,y-1] b3_4 = b3_5; // b3[x-1,y ] = b3[x ,y ] b3_5 = b3_6; // b3[x ,y ] = b3[x+1,y ] b3_7 = b3_8; // vert_HPF(x-1) b3_8 = b3_9; // vert_HPF(x ) p0 = p1 = p2 = p3 = 0; /* process the LL-band by applying LPF both vertically and horizontally */ if (num_bands > 0) { tmp0 = b0_1; tmp2 = b0_2; b0_1 = b0_ptr[indx+1]; b0_2 = b0_ptr[pitch+indx+1]; tmp1 = tmp0 + b0_1; p0 = tmp0 << 4; p1 = tmp1 << 3; p2 = (tmp0 + tmp2) << 3; p3 = (tmp1 + tmp2 + b0_2) << 2; } /* process the HL-band by applying HPF vertically and LPF horizontally */ if (num_bands > 1) { tmp0 = b1_2; tmp1 = b1_1; b1_2 = b1_ptr[indx+1]; b1_1 = b1_ptr[back_pitch+indx+1]; tmp2 = tmp1 - tmp0*6 + b1_3; b1_3 = b1_1 - b1_2*6 + b1_ptr[pitch+indx+1]; p0 += (tmp0 + tmp1) << 3; p1 += (tmp0 + tmp1 + b1_1 + b1_2) << 2; p2 += tmp2 << 2; p3 += (tmp2 + b1_3) << 1; } /* process the LH-band by applying LPF vertically and HPF horizontally */ if (num_bands > 2) { b2_3 = b2_ptr[indx+1]; b2_6 = b2_ptr[pitch+indx+1]; tmp0 = b2_1 + b2_2; tmp1 = b2_1 - b2_2*6 + b2_3; p0 += tmp0 << 3; p1 += tmp1 << 2; p2 += (tmp0 + b2_4 + b2_5) << 2; p3 += (tmp1 + b2_4 - b2_5*6 + b2_6) << 1; } /* process the HH-band by applying HPF both vertically and horizontally */ if (num_bands > 3) { b3_6 = b3_ptr[indx+1]; // b3[x+1,y ] b3_3 = b3_ptr[back_pitch+indx+1]; // b3[x+1,y-1] tmp0 = b3_1 + b3_4; tmp1 = b3_2 + b3_5; tmp2 = b3_3 + b3_6; b3_9 = b3_3 - b3_6*6 + b3_ptr[pitch+indx+1]; p0 += (tmp0 + tmp1) << 2; p1 += (tmp0 - tmp1*6 + tmp2) << 1; p2 += (b3_7 + b3_8) << 1; p3 += b3_7 - b3_8*6 + b3_9; } /* output four pixels */ dst[x] = av_clip_uint8((p0 >> 6) + 128); dst[x+1] = av_clip_uint8((p1 >> 6) + 128); dst[dst_pitch+x] = av_clip_uint8((p2 >> 6) + 128); dst[dst_pitch+x+1] = av_clip_uint8((p3 >> 6) + 128); }// for x dst += dst_pitch << 1; back_pitch = -pitch; b0_ptr += pitch; b1_ptr += pitch; b2_ptr += pitch; b3_ptr += pitch; } } | 3,114 |
1 | static void megasas_mmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { MegasasState *s = opaque; PCIDevice *pci_dev = PCI_DEVICE(s); uint64_t frame_addr; uint32_t frame_count; int i; switch (addr) { case MFI_IDB: trace_megasas_mmio_writel("MFI_IDB", val); if (val & MFI_FWINIT_ABORT) { /* Abort all pending cmds */ for (i = 0; i < s->fw_cmds; i++) { megasas_abort_command(&s->frames[i]); } } if (val & MFI_FWINIT_READY) { /* move to FW READY */ megasas_soft_reset(s); } if (val & MFI_FWINIT_MFIMODE) { /* discard MFIs */ } if (val & MFI_FWINIT_STOP_ADP) { /* Terminal error, stop processing */ s->fw_state = MFI_FWSTATE_FAULT; } break; case MFI_OMSK: trace_megasas_mmio_writel("MFI_OMSK", val); s->intr_mask = val; if (!megasas_intr_enabled(s) && !msi_enabled(pci_dev) && !msix_enabled(pci_dev)) { trace_megasas_irq_lower(); pci_irq_deassert(pci_dev); } if (megasas_intr_enabled(s)) { if (msix_enabled(pci_dev)) { trace_megasas_msix_enabled(0); } else if (msi_enabled(pci_dev)) { trace_megasas_msi_enabled(0); } else { trace_megasas_intr_enabled(); } } else { trace_megasas_intr_disabled(); megasas_soft_reset(s); } break; case MFI_ODCR0: trace_megasas_mmio_writel("MFI_ODCR0", val); s->doorbell = 0; if (megasas_intr_enabled(s)) { if (!msix_enabled(pci_dev) && !msi_enabled(pci_dev)) { trace_megasas_irq_lower(); pci_irq_deassert(pci_dev); } } break; case MFI_IQPH: trace_megasas_mmio_writel("MFI_IQPH", val); /* Received high 32 bits of a 64 bit MFI frame address */ s->frame_hi = val; break; case MFI_IQPL: trace_megasas_mmio_writel("MFI_IQPL", val); /* Received low 32 bits of a 64 bit MFI frame address */ /* Fallthrough */ case MFI_IQP: if (addr == MFI_IQP) { trace_megasas_mmio_writel("MFI_IQP", val); /* Received 64 bit MFI frame address */ s->frame_hi = 0; } frame_addr = (val & ~0x1F); /* Add possible 64 bit offset */ frame_addr |= ((uint64_t)s->frame_hi << 32); s->frame_hi = 0; frame_count = (val >> 1) & 0xF; megasas_handle_frame(s, frame_addr, frame_count); break; case MFI_SEQ: trace_megasas_mmio_writel("MFI_SEQ", val); /* Magic sequence to start ADP reset */ if (adp_reset_seq[s->adp_reset] == val) { s->adp_reset++; } else { s->adp_reset = 0; s->diag = 0; } if (s->adp_reset == 6) { s->diag = MFI_DIAG_WRITE_ENABLE; } break; case MFI_DIAG: trace_megasas_mmio_writel("MFI_DIAG", val); /* ADP reset */ if ((s->diag & MFI_DIAG_WRITE_ENABLE) && (val & MFI_DIAG_RESET_ADP)) { s->diag |= MFI_DIAG_RESET_ADP; megasas_soft_reset(s); s->adp_reset = 0; s->diag = 0; } break; default: trace_megasas_mmio_invalid_writel(addr, val); break; } } | 3,115 |
1 | static void decode_mode(AVCodecContext *ctx) { static const uint8_t left_ctx[N_BS_SIZES] = { 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf }; static const uint8_t above_ctx[N_BS_SIZES] = { 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf }; static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = { TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16, TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4 }; VP9Context *s = ctx->priv_data; VP9Block *b = s->b; int row = s->row, col = s->col, row7 = s->row7; enum TxfmMode max_tx = max_tx_for_bl_bp[b->bs]; int w4 = FFMIN(s->cols - col, bwh_tab[1][b->bs][0]); int h4 = FFMIN(s->rows - row, bwh_tab[1][b->bs][1]), y; int have_a = row > 0, have_l = col > s->tiling.tile_col_start; if (!s->segmentation.enabled) { b->seg_id = 0; } else if (s->keyframe || s->intraonly) { b->seg_id = s->segmentation.update_map ? vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->prob.seg) : 0; } else if (!s->segmentation.update_map || (s->segmentation.temporal && vp56_rac_get_prob_branchy(&s->c, s->prob.segpred[s->above_segpred_ctx[col] + s->left_segpred_ctx[row7]]))) { int pred = 8, x; uint8_t *refsegmap = s->frames[LAST_FRAME].segmentation_map; if (!s->last_uses_2pass) ff_thread_await_progress(&s->frames[LAST_FRAME].tf, row >> 3, 0); for (y = 0; y < h4; y++) for (x = 0; x < w4; x++) pred = FFMIN(pred, refsegmap[(y + row) * 8 * s->sb_cols + x + col]); av_assert1(pred < 8); b->seg_id = pred; memset(&s->above_segpred_ctx[col], 1, w4); memset(&s->left_segpred_ctx[row7], 1, h4); } else { b->seg_id = vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->prob.seg); memset(&s->above_segpred_ctx[col], 0, w4); memset(&s->left_segpred_ctx[row7], 0, h4); } if ((s->segmentation.enabled && s->segmentation.update_map) || s->keyframe) { uint8_t *segmap = s->frames[CUR_FRAME].segmentation_map; for (y = 0; y < h4; y++) memset(&segmap[(y + row) * 8 * s->sb_cols + col], b->seg_id, w4); } b->skip = s->segmentation.enabled && s->segmentation.feat[b->seg_id].skip_enabled; if (!b->skip) { int c = s->left_skip_ctx[row7] + s->above_skip_ctx[col]; b->skip = vp56_rac_get_prob(&s->c, s->prob.p.skip[c]); s->counts.skip[c][b->skip]++; } if (s->keyframe || s->intraonly) { b->intra = 1; } else if (s->segmentation.feat[b->seg_id].ref_enabled) { b->intra = !s->segmentation.feat[b->seg_id].ref_val; } else { int c, bit; if (have_a && have_l) { c = s->above_intra_ctx[col] + s->left_intra_ctx[row7]; c += (c == 2); } else { c = have_a ? 2 * s->above_intra_ctx[col] : have_l ? 2 * s->left_intra_ctx[row7] : 0; } bit = vp56_rac_get_prob(&s->c, s->prob.p.intra[c]); s->counts.intra[c][bit]++; b->intra = !bit; } if ((b->intra || !b->skip) && s->txfmmode == TX_SWITCHABLE) { int c; if (have_a) { if (have_l) { c = (s->above_skip_ctx[col] ? max_tx : s->above_txfm_ctx[col]) + (s->left_skip_ctx[row7] ? max_tx : s->left_txfm_ctx[row7]) > max_tx; } else { c = s->above_skip_ctx[col] ? 1 : (s->above_txfm_ctx[col] * 2 > max_tx); } } else if (have_l) { c = s->left_skip_ctx[row7] ? 1 : (s->left_txfm_ctx[row7] * 2 > max_tx); } else { c = 1; } switch (max_tx) { case TX_32X32: b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][0]); if (b->tx) { b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][1]); if (b->tx == 2) b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][2]); } s->counts.tx32p[c][b->tx]++; break; case TX_16X16: b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][0]); if (b->tx) b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][1]); s->counts.tx16p[c][b->tx]++; break; case TX_8X8: b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx8p[c]); s->counts.tx8p[c][b->tx]++; break; case TX_4X4: b->tx = TX_4X4; break; } } else { b->tx = FFMIN(max_tx, s->txfmmode); } if (s->keyframe || s->intraonly) { uint8_t *a = &s->above_mode_ctx[col * 2]; uint8_t *l = &s->left_mode_ctx[(row7) << 1]; b->comp = 0; if (b->bs > BS_8x8) { // FIXME the memory storage intermediates here aren't really // necessary, they're just there to make the code slightly // simpler for now b->mode[0] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, vp9_default_kf_ymode_probs[a[0]][l[0]]); if (b->bs != BS_8x4) { b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, vp9_default_kf_ymode_probs[a[1]][b->mode[0]]); l[0] = a[1] = b->mode[1]; } else { l[0] = a[1] = b->mode[1] = b->mode[0]; } if (b->bs != BS_4x8) { b->mode[2] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, vp9_default_kf_ymode_probs[a[0]][l[1]]); if (b->bs != BS_8x4) { b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, vp9_default_kf_ymode_probs[a[1]][b->mode[2]]); l[1] = a[1] = b->mode[3]; } else { l[1] = a[1] = b->mode[3] = b->mode[2]; } } else { b->mode[2] = b->mode[0]; l[1] = a[1] = b->mode[3] = b->mode[1]; } } else { b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, vp9_default_kf_ymode_probs[*a][*l]); b->mode[3] = b->mode[2] = b->mode[1] = b->mode[0]; // FIXME this can probably be optimized memset(a, b->mode[0], bwh_tab[0][b->bs][0]); memset(l, b->mode[0], bwh_tab[0][b->bs][1]); } b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree, vp9_default_kf_uvmode_probs[b->mode[3]]); } else if (b->intra) { b->comp = 0; if (b->bs > BS_8x8) { b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, s->prob.p.y_mode[0]); s->counts.y_mode[0][b->mode[0]]++; if (b->bs != BS_8x4) { b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, s->prob.p.y_mode[0]); s->counts.y_mode[0][b->mode[1]]++; } else { b->mode[1] = b->mode[0]; } if (b->bs != BS_4x8) { b->mode[2] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, s->prob.p.y_mode[0]); s->counts.y_mode[0][b->mode[2]]++; if (b->bs != BS_8x4) { b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, s->prob.p.y_mode[0]); s->counts.y_mode[0][b->mode[3]]++; } else { b->mode[3] = b->mode[2]; } } else { b->mode[2] = b->mode[0]; b->mode[3] = b->mode[1]; } } else { static const uint8_t size_group[10] = { 3, 3, 3, 3, 2, 2, 2, 1, 1, 1 }; int sz = size_group[b->bs]; b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree, s->prob.p.y_mode[sz]); b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0]; s->counts.y_mode[sz][b->mode[3]]++; } b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree, s->prob.p.uv_mode[b->mode[3]]); s->counts.uv_mode[b->mode[3]][b->uvmode]++; } else { static const uint8_t inter_mode_ctx_lut[14][14] = { { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }, { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 }, { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 }, { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 }, { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 }, }; if (s->segmentation.feat[b->seg_id].ref_enabled) { av_assert2(s->segmentation.feat[b->seg_id].ref_val != 0); b->comp = 0; b->ref[0] = s->segmentation.feat[b->seg_id].ref_val - 1; } else { // read comp_pred flag if (s->comppredmode != PRED_SWITCHABLE) { b->comp = s->comppredmode == PRED_COMPREF; } else { int c; // FIXME add intra as ref=0xff (or -1) to make these easier? if (have_a) { if (have_l) { if (s->above_comp_ctx[col] && s->left_comp_ctx[row7]) { c = 4; } else if (s->above_comp_ctx[col]) { c = 2 + (s->left_intra_ctx[row7] || s->left_ref_ctx[row7] == s->fixcompref); } else if (s->left_comp_ctx[row7]) { c = 2 + (s->above_intra_ctx[col] || s->above_ref_ctx[col] == s->fixcompref); } else { c = (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->fixcompref) ^ (!s->left_intra_ctx[row7] && s->left_ref_ctx[row & 7] == s->fixcompref); } } else { c = s->above_comp_ctx[col] ? 3 : (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->fixcompref); } } else if (have_l) { c = s->left_comp_ctx[row7] ? 3 : (!s->left_intra_ctx[row7] && s->left_ref_ctx[row7] == s->fixcompref); } else { c = 1; } b->comp = vp56_rac_get_prob(&s->c, s->prob.p.comp[c]); s->counts.comp[c][b->comp]++; } // read actual references // FIXME probably cache a few variables here to prevent repetitive // memory accesses below if (b->comp) /* two references */ { int fix_idx = s->signbias[s->fixcompref], var_idx = !fix_idx, c, bit; b->ref[fix_idx] = s->fixcompref; // FIXME can this codeblob be replaced by some sort of LUT? if (have_a) { if (have_l) { if (s->above_intra_ctx[col]) { if (s->left_intra_ctx[row7]) { c = 2; } else { c = 1 + 2 * (s->left_ref_ctx[row7] != s->varcompref[1]); } } else if (s->left_intra_ctx[row7]) { c = 1 + 2 * (s->above_ref_ctx[col] != s->varcompref[1]); } else { int refl = s->left_ref_ctx[row7], refa = s->above_ref_ctx[col]; if (refl == refa && refa == s->varcompref[1]) { c = 0; } else if (!s->left_comp_ctx[row7] && !s->above_comp_ctx[col]) { if ((refa == s->fixcompref && refl == s->varcompref[0]) || (refl == s->fixcompref && refa == s->varcompref[0])) { c = 4; } else { c = (refa == refl) ? 3 : 1; } } else if (!s->left_comp_ctx[row7]) { if (refa == s->varcompref[1] && refl != s->varcompref[1]) { c = 1; } else { c = (refl == s->varcompref[1] && refa != s->varcompref[1]) ? 2 : 4; } } else if (!s->above_comp_ctx[col]) { if (refl == s->varcompref[1] && refa != s->varcompref[1]) { c = 1; } else { c = (refa == s->varcompref[1] && refl != s->varcompref[1]) ? 2 : 4; } } else { c = (refl == refa) ? 4 : 2; } } } else { if (s->above_intra_ctx[col]) { c = 2; } else if (s->above_comp_ctx[col]) { c = 4 * (s->above_ref_ctx[col] != s->varcompref[1]); } else { c = 3 * (s->above_ref_ctx[col] != s->varcompref[1]); } } } else if (have_l) { if (s->left_intra_ctx[row7]) { c = 2; } else if (s->left_comp_ctx[row7]) { c = 4 * (s->left_ref_ctx[row7] != s->varcompref[1]); } else { c = 3 * (s->left_ref_ctx[row7] != s->varcompref[1]); } } else { c = 2; } bit = vp56_rac_get_prob(&s->c, s->prob.p.comp_ref[c]); b->ref[var_idx] = s->varcompref[bit]; s->counts.comp_ref[c][bit]++; } else /* single reference */ { int bit, c; if (have_a && !s->above_intra_ctx[col]) { if (have_l && !s->left_intra_ctx[row7]) { if (s->left_comp_ctx[row7]) { if (s->above_comp_ctx[col]) { c = 1 + (!s->fixcompref || !s->left_ref_ctx[row7] || !s->above_ref_ctx[col]); } else { c = (3 * !s->above_ref_ctx[col]) + (!s->fixcompref || !s->left_ref_ctx[row7]); } } else if (s->above_comp_ctx[col]) { c = (3 * !s->left_ref_ctx[row7]) + (!s->fixcompref || !s->above_ref_ctx[col]); } else { c = 2 * !s->left_ref_ctx[row7] + 2 * !s->above_ref_ctx[col]; } } else if (s->above_intra_ctx[col]) { c = 2; } else if (s->above_comp_ctx[col]) { c = 1 + (!s->fixcompref || !s->above_ref_ctx[col]); } else { c = 4 * (!s->above_ref_ctx[col]); } } else if (have_l && !s->left_intra_ctx[row7]) { if (s->left_intra_ctx[row7]) { c = 2; } else if (s->left_comp_ctx[row7]) { c = 1 + (!s->fixcompref || !s->left_ref_ctx[row7]); } else { c = 4 * (!s->left_ref_ctx[row7]); } } else { c = 2; } bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][0]); s->counts.single_ref[c][0][bit]++; if (!bit) { b->ref[0] = 0; } else { // FIXME can this codeblob be replaced by some sort of LUT? if (have_a) { if (have_l) { if (s->left_intra_ctx[row7]) { if (s->above_intra_ctx[col]) { c = 2; } else if (s->above_comp_ctx[col]) { c = 1 + 2 * (s->fixcompref == 1 || s->above_ref_ctx[col] == 1); } else if (!s->above_ref_ctx[col]) { c = 3; } else { c = 4 * (s->above_ref_ctx[col] == 1); } } else if (s->above_intra_ctx[col]) { if (s->left_intra_ctx[row7]) { c = 2; } else if (s->left_comp_ctx[row7]) { c = 1 + 2 * (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1); } else if (!s->left_ref_ctx[row7]) { c = 3; } else { c = 4 * (s->left_ref_ctx[row7] == 1); } } else if (s->above_comp_ctx[col]) { if (s->left_comp_ctx[row7]) { if (s->left_ref_ctx[row7] == s->above_ref_ctx[col]) { c = 3 * (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1); } else { c = 2; } } else if (!s->left_ref_ctx[row7]) { c = 1 + 2 * (s->fixcompref == 1 || s->above_ref_ctx[col] == 1); } else { c = 3 * (s->left_ref_ctx[row7] == 1) + (s->fixcompref == 1 || s->above_ref_ctx[col] == 1); } } else if (s->left_comp_ctx[row7]) { if (!s->above_ref_ctx[col]) { c = 1 + 2 * (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1); } else { c = 3 * (s->above_ref_ctx[col] == 1) + (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1); } } else if (!s->above_ref_ctx[col]) { if (!s->left_ref_ctx[row7]) { c = 3; } else { c = 4 * (s->left_ref_ctx[row7] == 1); } } else if (!s->left_ref_ctx[row7]) { c = 4 * (s->above_ref_ctx[col] == 1); } else { c = 2 * (s->left_ref_ctx[row7] == 1) + 2 * (s->above_ref_ctx[col] == 1); } } else { if (s->above_intra_ctx[col] || (!s->above_comp_ctx[col] && !s->above_ref_ctx[col])) { c = 2; } else if (s->above_comp_ctx[col]) { c = 3 * (s->fixcompref == 1 || s->above_ref_ctx[col] == 1); } else { c = 4 * (s->above_ref_ctx[col] == 1); } } } else if (have_l) { if (s->left_intra_ctx[row7] || (!s->left_comp_ctx[row7] && !s->left_ref_ctx[row7])) { c = 2; } else if (s->left_comp_ctx[row7]) { c = 3 * (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1); } else { c = 4 * (s->left_ref_ctx[row7] == 1); } } else { c = 2; } bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][1]); s->counts.single_ref[c][1][bit]++; b->ref[0] = 1 + bit; } } } if (b->bs <= BS_8x8) { if (s->segmentation.feat[b->seg_id].skip_enabled) { b->mode[0] = b->mode[1] = b->mode[2] = b->mode[3] = ZEROMV; } else { static const uint8_t off[10] = { 3, 0, 0, 1, 0, 0, 0, 0, 0, 0 }; // FIXME this needs to use the LUT tables from find_ref_mvs // because not all are -1,0/0,-1 int c = inter_mode_ctx_lut[s->above_mode_ctx[col + off[b->bs]]] [s->left_mode_ctx[row7 + off[b->bs]]]; b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree, s->prob.p.mv_mode[c]); b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0]; s->counts.mv_mode[c][b->mode[0] - 10]++; } } if (s->filtermode == FILTER_SWITCHABLE) { int c; if (have_a && s->above_mode_ctx[col] >= NEARESTMV) { if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) { c = s->above_filter_ctx[col] == s->left_filter_ctx[row7] ? s->left_filter_ctx[row7] : 3; } else { c = s->above_filter_ctx[col]; } } else if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) { c = s->left_filter_ctx[row7]; } else { c = 3; } b->filter = vp8_rac_get_tree(&s->c, vp9_filter_tree, s->prob.p.filter[c]); s->counts.filter[c][b->filter]++; } else { b->filter = s->filtermode; } if (b->bs > BS_8x8) { int c = inter_mode_ctx_lut[s->above_mode_ctx[col]][s->left_mode_ctx[row7]]; b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree, s->prob.p.mv_mode[c]); s->counts.mv_mode[c][b->mode[0] - 10]++; fill_mv(s, b->mv[0], b->mode[0], 0); if (b->bs != BS_8x4) { b->mode[1] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree, s->prob.p.mv_mode[c]); s->counts.mv_mode[c][b->mode[1] - 10]++; fill_mv(s, b->mv[1], b->mode[1], 1); } else { b->mode[1] = b->mode[0]; AV_COPY32(&b->mv[1][0], &b->mv[0][0]); AV_COPY32(&b->mv[1][1], &b->mv[0][1]); } if (b->bs != BS_4x8) { b->mode[2] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree, s->prob.p.mv_mode[c]); s->counts.mv_mode[c][b->mode[2] - 10]++; fill_mv(s, b->mv[2], b->mode[2], 2); if (b->bs != BS_8x4) { b->mode[3] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree, s->prob.p.mv_mode[c]); s->counts.mv_mode[c][b->mode[3] - 10]++; fill_mv(s, b->mv[3], b->mode[3], 3); } else { b->mode[3] = b->mode[2]; AV_COPY32(&b->mv[3][0], &b->mv[2][0]); AV_COPY32(&b->mv[3][1], &b->mv[2][1]); } } else { b->mode[2] = b->mode[0]; AV_COPY32(&b->mv[2][0], &b->mv[0][0]); AV_COPY32(&b->mv[2][1], &b->mv[0][1]); b->mode[3] = b->mode[1]; AV_COPY32(&b->mv[3][0], &b->mv[1][0]); AV_COPY32(&b->mv[3][1], &b->mv[1][1]); } } else { fill_mv(s, b->mv[0], b->mode[0], -1); AV_COPY32(&b->mv[1][0], &b->mv[0][0]); AV_COPY32(&b->mv[2][0], &b->mv[0][0]); AV_COPY32(&b->mv[3][0], &b->mv[0][0]); AV_COPY32(&b->mv[1][1], &b->mv[0][1]); AV_COPY32(&b->mv[2][1], &b->mv[0][1]); AV_COPY32(&b->mv[3][1], &b->mv[0][1]); } } // FIXME this can probably be optimized memset(&s->above_skip_ctx[col], b->skip, w4); memset(&s->left_skip_ctx[row7], b->skip, h4); memset(&s->above_txfm_ctx[col], b->tx, w4); memset(&s->left_txfm_ctx[row7], b->tx, h4); memset(&s->above_partition_ctx[col], above_ctx[b->bs], w4); memset(&s->left_partition_ctx[row7], left_ctx[b->bs], h4); if (!s->keyframe && !s->intraonly) { memset(&s->above_intra_ctx[col], b->intra, w4); memset(&s->left_intra_ctx[row7], b->intra, h4); memset(&s->above_comp_ctx[col], b->comp, w4); memset(&s->left_comp_ctx[row7], b->comp, h4); memset(&s->above_mode_ctx[col], b->mode[3], w4); memset(&s->left_mode_ctx[row7], b->mode[3], h4); if (s->filtermode == FILTER_SWITCHABLE && !b->intra ) { memset(&s->above_filter_ctx[col], b->filter, w4); memset(&s->left_filter_ctx[row7], b->filter, h4); b->filter = vp9_filter_lut[b->filter]; } if (b->bs > BS_8x8) { int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]); AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][0], &b->mv[1][0]); AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][1], &b->mv[1][1]); AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][0], mv0); AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][1], mv1); AV_COPY32(&s->above_mv_ctx[col * 2 + 0][0], &b->mv[2][0]); AV_COPY32(&s->above_mv_ctx[col * 2 + 0][1], &b->mv[2][1]); AV_WN32A(&s->above_mv_ctx[col * 2 + 1][0], mv0); AV_WN32A(&s->above_mv_ctx[col * 2 + 1][1], mv1); } else { int n, mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]); for (n = 0; n < w4 * 2; n++) { AV_WN32A(&s->above_mv_ctx[col * 2 + n][0], mv0); AV_WN32A(&s->above_mv_ctx[col * 2 + n][1], mv1); } for (n = 0; n < h4 * 2; n++) { AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][0], mv0); AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][1], mv1); } } if (!b->intra) { // FIXME write 0xff or -1 if intra, so we can use this // as a direct check in above branches int vref = b->ref[b->comp ? s->signbias[s->varcompref[0]] : 0]; memset(&s->above_ref_ctx[col], vref, w4); memset(&s->left_ref_ctx[row7], vref, h4); } } // FIXME kinda ugly for (y = 0; y < h4; y++) { int x, o = (row + y) * s->sb_cols * 8 + col; struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[o]; if (b->intra) { for (x = 0; x < w4; x++) { mv[x].ref[0] = mv[x].ref[1] = -1; } } else if (b->comp) { for (x = 0; x < w4; x++) { mv[x].ref[0] = b->ref[0]; mv[x].ref[1] = b->ref[1]; AV_COPY32(&mv[x].mv[0], &b->mv[3][0]); AV_COPY32(&mv[x].mv[1], &b->mv[3][1]); } } else { for (x = 0; x < w4; x++) { mv[x].ref[0] = b->ref[0]; mv[x].ref[1] = -1; AV_COPY32(&mv[x].mv[0], &b->mv[3][0]); } } } } | 3,116 |
1 | int do_subchannel_work_virtual(SubchDev *sch) { SCSW *s = &sch->curr_status.scsw; if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) { sch_handle_clear_func(sch); } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) { sch_handle_halt_func(sch); } else if (s->ctrl & SCSW_FCTL_START_FUNC) { /* Triggered by both ssch and rsch. */ sch_handle_start_func_virtual(sch); } css_inject_io_interrupt(sch); return 0; } | 3,118 |
1 | static int seg_write_header(AVFormatContext *s) { SegmentContext *seg = s->priv_data; AVFormatContext *oc = NULL; int ret, i; seg->segment_count = 0; if (!seg->write_header_trailer) seg->individual_header_trailer = 0; if (seg->time_str && seg->times_str) { av_log(s, AV_LOG_ERROR, "segment_time and segment_times options are mutually exclusive, select just one of them\n"); return AVERROR(EINVAL); } if ((seg->list_flags & SEGMENT_LIST_FLAG_LIVE) && seg->times_str) { av_log(s, AV_LOG_ERROR, "segment_flags +live and segment_times options are mutually exclusive:" "specify -segment_time if you want a live-friendly list\n"); return AVERROR(EINVAL); } if (seg->times_str) { if ((ret = parse_times(s, &seg->times, &seg->nb_times, seg->times_str)) < 0) return ret; } else { /* set default value if not specified */ if (!seg->time_str) seg->time_str = av_strdup("2"); if ((ret = av_parse_time(&seg->time, seg->time_str, 1)) < 0) { av_log(s, AV_LOG_ERROR, "Invalid time duration specification '%s' for segment_time option\n", seg->time_str); return ret; } } if (seg->time_delta_str) { if ((ret = av_parse_time(&seg->time_delta, seg->time_delta_str, 1)) < 0) { av_log(s, AV_LOG_ERROR, "Invalid time duration specification '%s' for delta option\n", seg->time_delta_str); return ret; } } if (seg->list) { if (seg->list_type == LIST_TYPE_UNDEFINED) { if (av_match_ext(seg->list, "csv" )) seg->list_type = LIST_TYPE_CSV; else if (av_match_ext(seg->list, "ext" )) seg->list_type = LIST_TYPE_EXT; else if (av_match_ext(seg->list, "m3u8")) seg->list_type = LIST_TYPE_M3U8; else seg->list_type = LIST_TYPE_FLAT; } if ((ret = segment_list_open(s)) < 0) goto fail; } if (seg->list_type == LIST_TYPE_EXT) av_log(s, AV_LOG_WARNING, "'ext' list type option is deprecated in favor of 'csv'\n"); for (i = 0; i < s->nb_streams; i++) seg->has_video += (s->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO); if (seg->has_video > 1) av_log(s, AV_LOG_WARNING, "More than a single video stream present, " "expect issues decoding it.\n"); seg->oformat = av_guess_format(seg->format, s->filename, NULL); if (!seg->oformat) { ret = AVERROR_MUXER_NOT_FOUND; goto fail; } if (seg->oformat->flags & AVFMT_NOFILE) { av_log(s, AV_LOG_ERROR, "format %s not supported.\n", oc->oformat->name); ret = AVERROR(EINVAL); goto fail; } if ((ret = segment_mux_init(s)) < 0) goto fail; oc = seg->avf; if (av_get_frame_filename(oc->filename, sizeof(oc->filename), s->filename, seg->segment_idx++) < 0) { ret = AVERROR(EINVAL); goto fail; } seg->segment_count++; if (seg->write_header_trailer) { if ((ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL)) < 0) goto fail; } else { if ((ret = open_null_ctx(&oc->pb)) < 0) goto fail; } if ((ret = avformat_write_header(oc, NULL)) < 0) { avio_close(oc->pb); goto fail; } if (oc->avoid_negative_ts > 0 && s->avoid_negative_ts < 0) s->avoid_negative_ts = 1; if (!seg->write_header_trailer) { close_null_ctx(oc->pb); if ((ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL)) < 0) goto fail; } fail: if (ret) { if (seg->list) segment_list_close(s); if (seg->avf) avformat_free_context(seg->avf); } return ret; } | 3,119 |
1 | void av_parser_close(AVCodecParserContext *s) { if(s){ if (s->parser->parser_close) { ff_lock_avcodec(NULL); s->parser->parser_close(s); ff_unlock_avcodec(); } av_free(s->priv_data); av_free(s); } } | 3,120 |
1 | static uint64_t openpic_msi_read(void *opaque, hwaddr addr, unsigned size) { OpenPICState *opp = opaque; uint64_t r = 0; int i, srs; DPRINTF("%s: addr " TARGET_FMT_plx "\n", __func__, addr); if (addr & 0xF) { return -1; } srs = addr >> 4; switch (addr) { case 0x00: case 0x10: case 0x20: case 0x30: case 0x40: case 0x50: case 0x60: case 0x70: /* MSIRs */ r = opp->msi[srs].msir; /* Clear on read */ opp->msi[srs].msir = 0; break; case 0x120: /* MSISR */ for (i = 0; i < MAX_MSI; i++) { r |= (opp->msi[i].msir ? 1 : 0) << i; } break; } return r; } | 3,121 |
1 | static inline int wv_unpack_stereo(WavpackFrameContext *s, GetBitContext *gb, void *dst, const int type) { int i, j, count = 0; int last, t; int A, B, L, L2, R, R2; int pos = s->pos; uint32_t crc = s->sc.crc; uint32_t crc_extra_bits = s->extra_sc.crc; int16_t *dst16 = dst; int32_t *dst32 = dst; float *dstfl = dst; const int channel_pad = s->avctx->channels - 2; if(s->samples_left == s->samples) s->one = s->zero = s->zeroes = 0; do{ L = wv_get_value(s, gb, 0, &last); if(last) break; R = wv_get_value(s, gb, 1, &last); if(last) break; for(i = 0; i < s->terms; i++){ t = s->decorr[i].value; if(t > 0){ if(t > 8){ if(t & 1){ A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]; B = 2 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]; }else{ A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1; B = (3 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]) >> 1; } s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0]; s->decorr[i].samplesB[1] = s->decorr[i].samplesB[0]; j = 0; }else{ A = s->decorr[i].samplesA[pos]; B = s->decorr[i].samplesB[pos]; j = (pos + t) & 7; } if(type != AV_SAMPLE_FMT_S16){ L2 = L + ((s->decorr[i].weightA * (int64_t)A + 512) >> 10); R2 = R + ((s->decorr[i].weightB * (int64_t)B + 512) >> 10); }else{ L2 = L + ((s->decorr[i].weightA * A + 512) >> 10); R2 = R + ((s->decorr[i].weightB * B + 512) >> 10); } if(A && L) s->decorr[i].weightA -= ((((L ^ A) >> 30) & 2) - 1) * s->decorr[i].delta; if(B && R) s->decorr[i].weightB -= ((((R ^ B) >> 30) & 2) - 1) * s->decorr[i].delta; s->decorr[i].samplesA[j] = L = L2; s->decorr[i].samplesB[j] = R = R2; }else if(t == -1){ if(type != AV_SAMPLE_FMT_S16) L2 = L + ((s->decorr[i].weightA * (int64_t)s->decorr[i].samplesA[0] + 512) >> 10); else L2 = L + ((s->decorr[i].weightA * s->decorr[i].samplesA[0] + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, s->decorr[i].samplesA[0], L); L = L2; if(type != AV_SAMPLE_FMT_S16) R2 = R + ((s->decorr[i].weightB * (int64_t)L2 + 512) >> 10); else R2 = R + ((s->decorr[i].weightB * L2 + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, L2, R); R = R2; s->decorr[i].samplesA[0] = R; }else{ if(type != AV_SAMPLE_FMT_S16) R2 = R + ((s->decorr[i].weightB * (int64_t)s->decorr[i].samplesB[0] + 512) >> 10); else R2 = R + ((s->decorr[i].weightB * s->decorr[i].samplesB[0] + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, s->decorr[i].samplesB[0], R); R = R2; if(t == -3){ R2 = s->decorr[i].samplesA[0]; s->decorr[i].samplesA[0] = R; } if(type != AV_SAMPLE_FMT_S16) L2 = L + ((s->decorr[i].weightA * (int64_t)R2 + 512) >> 10); else L2 = L + ((s->decorr[i].weightA * R2 + 512) >> 10); UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, R2, L); L = L2; s->decorr[i].samplesB[0] = L; } } pos = (pos + 1) & 7; if(s->joint) L += (R -= (L >> 1)); crc = (crc * 3 + L) * 3 + R; if(type == AV_SAMPLE_FMT_FLT){ *dstfl++ = wv_get_value_float(s, &crc_extra_bits, L); *dstfl++ = wv_get_value_float(s, &crc_extra_bits, R); dstfl += channel_pad; } else if(type == AV_SAMPLE_FMT_S32){ *dst32++ = wv_get_value_integer(s, &crc_extra_bits, L); *dst32++ = wv_get_value_integer(s, &crc_extra_bits, R); dst32 += channel_pad; } else { *dst16++ = wv_get_value_integer(s, &crc_extra_bits, L); *dst16++ = wv_get_value_integer(s, &crc_extra_bits, R); dst16 += channel_pad; } count++; }while(!last && count < s->max_samples); s->samples_left -= count; if(!s->samples_left){ wv_reset_saved_context(s); if(crc != s->CRC){ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n"); return -1; } if(s->got_extra_bits && crc_extra_bits != s->crc_extra_bits){ av_log(s->avctx, AV_LOG_ERROR, "Extra bits CRC error\n"); return -1; } }else{ s->pos = pos; s->sc.crc = crc; s->sc.bits_used = get_bits_count(&s->gb); if(s->got_extra_bits){ s->extra_sc.crc = crc_extra_bits; s->extra_sc.bits_used = get_bits_count(&s->gb_extra_bits); } } return count * 2; } | 3,122 |
1 | static int get_mmu_address(CPUState * env, target_ulong * physical, int *prot, target_ulong address, int rw, int access_type) { int use_asid, is_code, n; tlb_t *matching = NULL; use_asid = (env->mmucr & MMUCR_SV) == 0 && (env->sr & SR_MD) == 0; is_code = env->pc == address; /* Hack */ /* Use a hack to find if this is an instruction or data access */ if (env->pc == address && !(rw & PAGE_WRITE)) { n = find_itlb_entry(env, address, use_asid, 1); if (n >= 0) { matching = &env->itlb[n]; if ((env->sr & SR_MD) & !(matching->pr & 2)) n = MMU_ITLB_VIOLATION; else *prot = PAGE_READ; } } else { n = find_utlb_entry(env, address, use_asid); if (n >= 0) { matching = &env->utlb[n]; switch ((matching->pr << 1) | ((env->sr & SR_MD) ? 1 : 0)) { case 0: /* 000 */ case 2: /* 010 */ n = (rw & PAGE_WRITE) ? MMU_DTLB_VIOLATION_WRITE : MMU_DTLB_VIOLATION_READ; break; case 1: /* 001 */ case 4: /* 100 */ case 5: /* 101 */ if (rw & PAGE_WRITE) n = MMU_DTLB_VIOLATION_WRITE; else *prot = PAGE_READ; break; case 3: /* 011 */ case 6: /* 110 */ case 7: /* 111 */ *prot = rw & (PAGE_READ | PAGE_WRITE); break; } } else if (n == MMU_DTLB_MISS) { n = (rw & PAGE_WRITE) ? MMU_DTLB_MISS_WRITE : MMU_DTLB_MISS_READ; } } if (n >= 0) { *physical = ((matching->ppn << 10) & ~(matching->size - 1)) | (address & (matching->size - 1)); if ((rw & PAGE_WRITE) & !matching->d) n = MMU_DTLB_INITIAL_WRITE; else n = MMU_OK; } return n; } | 3,123 |
0 | static inline void mix_2f_1r_to_mono(AC3DecodeContext *ctx) { int i; float (*output)[256] = ctx->audio_block.block_output; for (i = 0; i < 256; i++) output[1][i] += (output[2][i] + output[3][i]); memset(output[2], 0, sizeof(output[2])); memset(output[3], 0, sizeof(output[3])); } | 3,125 |
0 | void mjpeg_picture_header(MpegEncContext *s) { put_marker(&s->pb, SOI); jpeg_table_header(s); put_marker(&s->pb, SOF0); put_bits(&s->pb, 16, 17); put_bits(&s->pb, 8, 8); /* 8 bits/component */ put_bits(&s->pb, 16, s->height); put_bits(&s->pb, 16, s->width); put_bits(&s->pb, 8, 3); /* 3 components */ /* Y component */ put_bits(&s->pb, 8, 1); /* component number */ put_bits(&s->pb, 4, 2); /* H factor */ put_bits(&s->pb, 4, 2); /* V factor */ put_bits(&s->pb, 8, 0); /* select matrix */ /* Cb component */ put_bits(&s->pb, 8, 2); /* component number */ put_bits(&s->pb, 4, 1); /* H factor */ put_bits(&s->pb, 4, 1); /* V factor */ put_bits(&s->pb, 8, 0); /* select matrix */ /* Cr component */ put_bits(&s->pb, 8, 3); /* component number */ put_bits(&s->pb, 4, 1); /* H factor */ put_bits(&s->pb, 4, 1); /* V factor */ put_bits(&s->pb, 8, 0); /* select matrix */ /* scan header */ put_marker(&s->pb, SOS); put_bits(&s->pb, 16, 12); /* length */ put_bits(&s->pb, 8, 3); /* 3 components */ /* Y component */ put_bits(&s->pb, 8, 1); /* index */ put_bits(&s->pb, 4, 0); /* DC huffman table index */ put_bits(&s->pb, 4, 0); /* AC huffman table index */ /* Cb component */ put_bits(&s->pb, 8, 2); /* index */ put_bits(&s->pb, 4, 1); /* DC huffman table index */ put_bits(&s->pb, 4, 1); /* AC huffman table index */ /* Cr component */ put_bits(&s->pb, 8, 3); /* index */ put_bits(&s->pb, 4, 1); /* DC huffman table index */ put_bits(&s->pb, 4, 1); /* AC huffman table index */ put_bits(&s->pb, 8, 0); /* Ss (not used) */ put_bits(&s->pb, 8, 63); /* Se (not used) */ put_bits(&s->pb, 8, 0); /* (not used) */ } | 3,126 |
1 | void qmp_xen_save_devices_state(const char *filename, Error **errp) { QEMUFile *f; QIOChannelFile *ioc; int saved_vm_running; int ret; saved_vm_running = runstate_is_running(); vm_stop(RUN_STATE_SAVE_VM); global_state_store_running(); ioc = qio_channel_file_new_path(filename, O_WRONLY | O_CREAT, 0660, errp); if (!ioc) { goto the_end; } qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-save-state"); f = qemu_fopen_channel_output(QIO_CHANNEL(ioc)); ret = qemu_save_device_state(f); qemu_fclose(f); if (ret < 0) { error_setg(errp, QERR_IO_ERROR); } the_end: if (saved_vm_running) { vm_start(); } } | 3,127 |
1 | static int get_physical_address(CPUState *env, target_phys_addr_t *physical, int *prot, int *access_index, target_ulong address, int rw, int mmu_idx) { int access_perms = 0; target_phys_addr_t pde_ptr; uint32_t pde; int error_code = 0, is_dirty, is_user; unsigned long page_offset; is_user = mmu_idx == MMU_USER_IDX; if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */ // Boot mode: instruction fetches are taken from PROM if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) { *physical = env->prom_addr | (address & 0x7ffffULL); *prot = PAGE_READ | PAGE_EXEC; return 0; } *physical = address; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user? 0 : 1); *physical = 0xffffffffffff0000ULL; /* SPARC reference MMU table walk: Context table->L1->L2->PTE */ /* Context base + context number */ pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2); pde = ldl_phys(pde_ptr); /* Ctx pde */ switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ return 1 << 2; case 2: /* L0 PTE, maybe should not happen? */ case 3: /* Reserved */ return 4 << 2; case 1: /* L0 PDE */ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4); pde = ldl_phys(pde_ptr); switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ return (1 << 8) | (1 << 2); case 3: /* Reserved */ return (1 << 8) | (4 << 2); case 1: /* L1 PDE */ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4); pde = ldl_phys(pde_ptr); switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ return (2 << 8) | (1 << 2); case 3: /* Reserved */ return (2 << 8) | (4 << 2); case 1: /* L2 PDE */ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4); pde = ldl_phys(pde_ptr); switch (pde & PTE_ENTRYTYPE_MASK) { default: case 0: /* Invalid */ return (3 << 8) | (1 << 2); case 1: /* PDE, should not happen */ case 3: /* Reserved */ return (3 << 8) | (4 << 2); case 2: /* L3 PTE */ page_offset = (address & TARGET_PAGE_MASK) & (TARGET_PAGE_SIZE - 1); } break; case 2: /* L2 PTE */ page_offset = address & 0x3ffff; } break; case 2: /* L1 PTE */ page_offset = address & 0xffffff; } } /* update page modified and dirty bits */ is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK); if (!(pde & PG_ACCESSED_MASK) || is_dirty) { pde |= PG_ACCESSED_MASK; if (is_dirty) pde |= PG_MODIFIED_MASK; stl_phys_notdirty(pde_ptr, pde); } /* check access */ access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT; error_code = access_table[*access_index][access_perms]; if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) return error_code; /* the page can be put in the TLB */ *prot = perm_table[is_user][access_perms]; if (!(pde & PG_MODIFIED_MASK)) { /* only set write access if already dirty... otherwise wait for dirty access */ *prot &= ~PAGE_WRITE; } /* Even if large ptes, we map only one 4KB page in the cache to avoid filling it too fast */ *physical = ((target_phys_addr_t)(pde & PTE_ADDR_MASK) << 4) + page_offset; return error_code; } | 3,128 |
1 | static void pci_bridge_region_cleanup(PCIBridge *br) { PCIBus *parent = br->dev.bus; pci_bridge_cleanup_alias(&br->alias_io, parent->address_space_io); pci_bridge_cleanup_alias(&br->alias_mem, parent->address_space_mem); pci_bridge_cleanup_alias(&br->alias_pref_mem, parent->address_space_mem); } | 3,129 |
1 | static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret) { IDEState *s = opaque; int data_offset, n; if (ret < 0) { ide_atapi_io_error(s, ret); goto eot; } if (s->io_buffer_size > 0) { /* * For a cdrom read sector command (s->lba != -1), * adjust the lba for the next s->io_buffer_size chunk * and dma the current chunk. * For a command != read (s->lba == -1), just transfer * the reply data. */ if (s->lba != -1) { if (s->cd_sector_size == 2352) { n = 1; cd_data_to_raw(s->io_buffer, s->lba); } else { n = s->io_buffer_size >> 11; } s->lba += n; } s->packet_transfer_size -= s->io_buffer_size; if (s->bus->dma->ops->rw_buf(s->bus->dma, 1) == 0) goto eot; } if (s->packet_transfer_size <= 0) { s->status = READY_STAT | SEEK_STAT; s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; ide_set_irq(s->bus); goto eot; } s->io_buffer_index = 0; if (s->cd_sector_size == 2352) { n = 1; s->io_buffer_size = s->cd_sector_size; data_offset = 16; } else { n = s->packet_transfer_size >> 11; if (n > (IDE_DMA_BUF_SECTORS / 4)) n = (IDE_DMA_BUF_SECTORS / 4); s->io_buffer_size = n * 2048; data_offset = 0; } #ifdef DEBUG_AIO printf("aio_read_cd: lba=%u n=%d\n", s->lba, n); #endif s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset); s->bus->dma->iov.iov_len = n * 4 * 512; qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1); s->bus->dma->aiocb = blk_aio_readv(s->blk, (int64_t)s->lba << 2, &s->bus->dma->qiov, n * 4, ide_atapi_cmd_read_dma_cb, s); return; eot: block_acct_done(blk_get_stats(s->blk), &s->acct); ide_set_inactive(s, false); } | 3,131 |
1 | static int vfio_ccw_handle_request(ORB *orb, SCSW *scsw, void *data) { S390CCWDevice *cdev = data; VFIOCCWDevice *vcdev = DO_UPCAST(VFIOCCWDevice, cdev, cdev); struct ccw_io_region *region = vcdev->io_region; int ret; QEMU_BUILD_BUG_ON(sizeof(region->orb_area) != sizeof(ORB)); QEMU_BUILD_BUG_ON(sizeof(region->scsw_area) != sizeof(SCSW)); QEMU_BUILD_BUG_ON(sizeof(region->irb_area) != sizeof(IRB)); memset(region, 0, sizeof(*region)); memcpy(region->orb_area, orb, sizeof(ORB)); memcpy(region->scsw_area, scsw, sizeof(SCSW)); again: ret = pwrite(vcdev->vdev.fd, region, vcdev->io_region_size, vcdev->io_region_offset); if (ret != vcdev->io_region_size) { if (errno == EAGAIN) { goto again; } error_report("vfio-ccw: wirte I/O region failed with errno=%d", errno); return -errno; } return region->ret_code; } | 3,132 |
1 | static void sdl_audio_callback(void *opaque, Uint8 *stream, int len) { VideoState *is = opaque; int audio_size, len1, silence = 0; audio_callback_time = av_gettime_relative(); while (len > 0) { if (is->audio_buf_index >= is->audio_buf_size) { audio_size = audio_decode_frame(is); if (audio_size < 0) { /* if error, just output silence */ silence = 1; is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size; } else { if (is->show_mode != SHOW_MODE_VIDEO) update_sample_display(is, (int16_t *)is->audio_buf, audio_size); is->audio_buf_size = audio_size; } is->audio_buf_index = 0; } len1 = is->audio_buf_size - is->audio_buf_index; if (len1 > len) len1 = len; if (!is->muted && !silence && is->audio_volume == SDL_MIX_MAXVOLUME) memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1); else { memset(stream, 0, len1); if (!is->muted && !silence) SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume); } len -= len1; stream += len1; is->audio_buf_index += len1; } is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index; /* Let's assume the audio driver that is used by SDL has two periods. */ if (!isnan(is->audio_clock)) { set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0); sync_clock_to_slave(&is->extclk, &is->audclk); } } | 3,133 |
1 | static int wmavoice_decode_packet(AVCodecContext *ctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { WMAVoiceContext *s = ctx->priv_data; GetBitContext *gb = &s->gb; int size, res, pos; /* Packets are sometimes a multiple of ctx->block_align, with a packet * header at each ctx->block_align bytes. However, FFmpeg's ASF demuxer * feeds us ASF packets, which may concatenate multiple "codec" packets * in a single "muxer" packet, so we artificially emulate that by * capping the packet size at ctx->block_align. */ for (size = avpkt->size; size > ctx->block_align; size -= ctx->block_align); init_get_bits(&s->gb, avpkt->data, size << 3); /* size == ctx->block_align is used to indicate whether we are dealing with * a new packet or a packet of which we already read the packet header * previously. */ if (!(size % ctx->block_align)) { // new packet header if (!size) { s->spillover_nbits = 0; s->nb_superframes = 0; } else { if ((res = parse_packet_header(s)) < 0) return res; s->nb_superframes = res; } /* If the packet header specifies a s->spillover_nbits, then we want * to push out all data of the previous packet (+ spillover) before * continuing to parse new superframes in the current packet. */ if (s->sframe_cache_size > 0) { int cnt = get_bits_count(gb); copy_bits(&s->pb, avpkt->data, size, gb, s->spillover_nbits); flush_put_bits(&s->pb); s->sframe_cache_size += s->spillover_nbits; if ((res = synth_superframe(ctx, data, got_frame_ptr)) == 0 && *got_frame_ptr) { cnt += s->spillover_nbits; s->skip_bits_next = cnt & 7; res = cnt >> 3; if (res > avpkt->size) { av_log(ctx, AV_LOG_ERROR, "Trying to skip %d bytes in packet of size %d\n", res, avpkt->size); return AVERROR_INVALIDDATA; } return res; } else skip_bits_long (gb, s->spillover_nbits - cnt + get_bits_count(gb)); // resync } else if (s->spillover_nbits) { skip_bits_long(gb, s->spillover_nbits); // resync } } else if (s->skip_bits_next) skip_bits(gb, s->skip_bits_next); /* Try parsing superframes in current packet */ s->sframe_cache_size = 0; s->skip_bits_next = 0; pos = get_bits_left(gb); if (s->nb_superframes-- == 0) { *got_frame_ptr = 0; return size; } else if (s->nb_superframes > 0) { if ((res = synth_superframe(ctx, data, got_frame_ptr)) < 0) { return res; } else if (*got_frame_ptr) { int cnt = get_bits_count(gb); s->skip_bits_next = cnt & 7; res = cnt >> 3; if (res > avpkt->size) { av_log(ctx, AV_LOG_ERROR, "Trying to skip %d bytes in packet of size %d\n", res, avpkt->size); return AVERROR_INVALIDDATA; } return res; } } else if ((s->sframe_cache_size = pos) > 0) { /* ... cache it for spillover in next packet */ init_put_bits(&s->pb, s->sframe_cache, SFRAME_CACHE_MAXSIZE); copy_bits(&s->pb, avpkt->data, size, gb, s->sframe_cache_size); // FIXME bad - just copy bytes as whole and add use the // skip_bits_next field } return size; } | 3,134 |
1 | static void ppc_core99_init (ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env = NULL; char *filename; qemu_irq *pic, **openpic_irqs; int unin_memory; int linux_boot, i; ram_addr_t ram_offset, bios_offset; target_phys_addr_t kernel_base, initrd_base, cmdline_base = 0; long kernel_size, initrd_size; PCIBus *pci_bus; MacIONVRAMState *nvr; int bios_size; MemoryRegion *pic_mem, *dbdma_mem, *cuda_mem, *escc_mem; MemoryRegion *ide_mem[3]; int ppc_boot_device; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; void *fw_cfg; void *dbdma; int machine_arch; linux_boot = (kernel_filename != NULL); /* init CPUs */ if (cpu_model == NULL) #ifdef TARGET_PPC64 cpu_model = "970fx"; #else cpu_model = "G4"; #endif for (i = 0; i < smp_cpus; i++) { env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find PowerPC CPU definition\n"); exit(1); } /* Set time-base frequency to 100 Mhz */ cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL); qemu_register_reset((QEMUResetHandler*)&cpu_reset, env); } /* allocate RAM */ ram_offset = qemu_ram_alloc(NULL, "ppc_core99.ram", ram_size); cpu_register_physical_memory(0, ram_size, ram_offset); /* allocate and load BIOS */ bios_offset = qemu_ram_alloc(NULL, "ppc_core99.bios", BIOS_SIZE); if (bios_name == NULL) bios_name = PROM_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); cpu_register_physical_memory(PROM_ADDR, BIOS_SIZE, bios_offset | IO_MEM_ROM); /* Load OpenBIOS (ELF) */ if (filename) { bios_size = load_elf(filename, NULL, NULL, NULL, NULL, NULL, 1, ELF_MACHINE, 0); g_free(filename); } else { bios_size = -1; } if (bios_size < 0 || bios_size > BIOS_SIZE) { hw_error("qemu: could not load PowerPC bios '%s'\n", bios_name); exit(1); } if (linux_boot) { uint64_t lowaddr = 0; int bswap_needed; #ifdef BSWAP_NEEDED bswap_needed = 1; #else bswap_needed = 0; #endif kernel_base = KERNEL_LOAD_ADDR; kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0); if (kernel_size < 0) kernel_size = load_aout(kernel_filename, kernel_base, ram_size - kernel_base, bswap_needed, TARGET_PAGE_SIZE); if (kernel_size < 0) kernel_size = load_image_targphys(kernel_filename, kernel_base, ram_size - kernel_base); if (kernel_size < 0) { hw_error("qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_base = round_page(kernel_base + kernel_size + KERNEL_GAP); initrd_size = load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { hw_error("qemu: could not load initial ram disk '%s'\n", initrd_filename); exit(1); } cmdline_base = round_page(initrd_base + initrd_size); } else { initrd_base = 0; initrd_size = 0; cmdline_base = round_page(kernel_base + kernel_size + KERNEL_GAP); } ppc_boot_device = 'm'; } else { kernel_base = 0; kernel_size = 0; initrd_base = 0; initrd_size = 0; ppc_boot_device = '\0'; /* We consider that NewWorld PowerMac never have any floppy drive * For now, OHW cannot boot from the network. */ for (i = 0; boot_device[i] != '\0'; i++) { if (boot_device[i] >= 'c' && boot_device[i] <= 'f') { ppc_boot_device = boot_device[i]; break; } } if (ppc_boot_device == '\0') { fprintf(stderr, "No valid boot device for Mac99 machine\n"); exit(1); } } isa_mem_base = 0x80000000; /* Register 8 MB of ISA IO space */ isa_mmio_init(0xf2000000, 0x00800000); /* UniN init */ unin_memory = cpu_register_io_memory(unin_read, unin_write, NULL, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(0xf8000000, 0x00001000, unin_memory); openpic_irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *)); openpic_irqs[0] = g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB); for (i = 0; i < smp_cpus; i++) { /* Mac99 IRQ connection between OpenPIC outputs pins * and PowerPC input pins */ switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_6xx: openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB); openpic_irqs[i][OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_MCK] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_MCP]; /* Not connected ? */ openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i][OPENPIC_OUTPUT_RESET] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_HRESET]; break; #if defined(TARGET_PPC64) case PPC_FLAGS_INPUT_970: openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB); openpic_irqs[i][OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_MCK] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_MCP]; /* Not connected ? */ openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i][OPENPIC_OUTPUT_RESET] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_HRESET]; break; #endif /* defined(TARGET_PPC64) */ default: hw_error("Bus model not supported on mac99 machine\n"); exit(1); } } pic = openpic_init(NULL, &pic_mem, smp_cpus, openpic_irqs, NULL); if (PPC_INPUT(env) == PPC_FLAGS_INPUT_970) { /* 970 gets a U3 bus */ pci_bus = pci_pmac_u3_init(pic, get_system_memory(), get_system_io()); machine_arch = ARCH_MAC99_U3; } else { pci_bus = pci_pmac_init(pic, get_system_memory(), get_system_io()); machine_arch = ARCH_MAC99; } /* init basic PC hardware */ pci_vga_init(pci_bus); escc_mem = escc_init(0x80013000, pic[0x25], pic[0x24], serial_hds[0], serial_hds[1], ESCC_CLOCK, 4); for(i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], "ne2k_pci", NULL); ide_drive_get(hd, MAX_IDE_BUS); dbdma = DBDMA_init(&dbdma_mem); /* We only emulate 2 out of 3 IDE controllers for now */ ide_mem[0] = NULL; ide_mem[1] = pmac_ide_init(hd, pic[0x0d], dbdma, 0x16, pic[0x02]); ide_mem[2] = pmac_ide_init(&hd[MAX_IDE_DEVS], pic[0x0e], dbdma, 0x1a, pic[0x02]); /* cuda also initialize ADB */ if (machine_arch == ARCH_MAC99_U3) { usb_enabled = 1; } cuda_init(&cuda_mem, pic[0x19]); adb_kbd_init(&adb_bus); adb_mouse_init(&adb_bus); macio_init(pci_bus, PCI_DEVICE_ID_APPLE_UNI_N_KEYL, 0, pic_mem, dbdma_mem, cuda_mem, NULL, 3, ide_mem, escc_mem); if (usb_enabled) { usb_ohci_init_pci(pci_bus, -1); } /* U3 needs to use USB for input because Linux doesn't support via-cuda on PPC64 */ if (machine_arch == ARCH_MAC99_U3) { usbdevice_create("keyboard"); usbdevice_create("mouse"); } if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) graphic_depth = 15; /* The NewWorld NVRAM is not located in the MacIO device */ nvr = macio_nvram_init(0x2000, 1); pmac_format_nvram_partition(nvr, 0x2000); macio_nvram_setup_bar(nvr, get_system_memory(), 0xFFF04000); /* No PCI init: the BIOS will do it */ fw_cfg = fw_cfg_init(0, 0, CFG_ADDR, CFG_ADDR + 2); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, cmdline_base); pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); } fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); if (kvm_enabled()) { #ifdef CONFIG_KVM uint8_t *hypercall; fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, kvmppc_get_tbfreq()); hypercall = g_malloc(16); kvmppc_get_hypercall(env, hypercall, 16); fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); #endif } else { fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, get_ticks_per_sec()); } qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); } | 3,135 |
1 | void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, int is_write, target_phys_addr_t access_len) { if (buffer != bounce.buffer) { if (is_write) { unsigned long addr1 = (uint8_t *)buffer - phys_ram_base; while (access_len) { unsigned l; l = TARGET_PAGE_SIZE; if (l > access_len) l = access_len; if (!cpu_physical_memory_is_dirty(addr1)) { /* invalidate code */ tb_invalidate_phys_page_range(addr1, addr1 + l, 0); /* set dirty bit */ phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= (0xff & ~CODE_DIRTY_FLAG); } addr1 += l; access_len -= l; } } return; } if (is_write) { cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len); } qemu_free(bounce.buffer); bounce.buffer = NULL; } | 3,136 |
1 | static int mov_read_glbl(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if ((uint64_t)atom.size > (1<<30)) return AVERROR_INVALIDDATA; if (atom.size >= 10) { // Broken files created by legacy versions of libavformat will // wrap a whole fiel atom inside of a glbl atom. unsigned size = avio_rb32(pb); unsigned type = avio_rl32(pb); avio_seek(pb, -8, SEEK_CUR); if (type == MKTAG('f','i','e','l') && size == atom.size) return mov_read_default(c, pb, atom); } av_free(st->codec->extradata); st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = atom.size; avio_read(pb, st->codec->extradata, atom.size); return 0; } | 3,137 |
1 | void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) { mm_flags = mm_support(); if (avctx->dsp_mask) { if (avctx->dsp_mask & FF_MM_FORCE) mm_flags |= (avctx->dsp_mask & 0xffff); else mm_flags &= ~(avctx->dsp_mask & 0xffff); } #if 0 av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); if (mm_flags & MM_MMX) av_log(avctx, AV_LOG_INFO, " mmx"); if (mm_flags & MM_MMXEXT) av_log(avctx, AV_LOG_INFO, " mmxext"); if (mm_flags & MM_3DNOW) av_log(avctx, AV_LOG_INFO, " 3dnow"); if (mm_flags & MM_SSE) av_log(avctx, AV_LOG_INFO, " sse"); if (mm_flags & MM_SSE2) av_log(avctx, AV_LOG_INFO, " sse2"); av_log(avctx, AV_LOG_INFO, "\n"); #endif if (mm_flags & MM_MMX) { const int idct_algo= avctx->idct_algo; #ifdef CONFIG_ENCODERS const int dct_algo = avctx->dct_algo; if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ if(mm_flags & MM_SSE2){ c->fdct = ff_fdct_sse2; }else if(mm_flags & MM_MMXEXT){ c->fdct = ff_fdct_mmx2; }else{ c->fdct = ff_fdct_mmx; } } #endif //CONFIG_ENCODERS if(avctx->lowres==0){ if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ c->idct_put= ff_simple_idct_put_mmx; c->idct_add= ff_simple_idct_add_mmx; c->idct = ff_simple_idct_mmx; c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; #ifdef CONFIG_GPL }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ if(mm_flags & MM_MMXEXT){ c->idct_put= ff_libmpeg2mmx2_idct_put; c->idct_add= ff_libmpeg2mmx2_idct_add; c->idct = ff_mmxext_idct; }else{ c->idct_put= ff_libmpeg2mmx_idct_put; c->idct_add= ff_libmpeg2mmx_idct_add; c->idct = ff_mmx_idct; } c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; #endif }else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER) && idct_algo==FF_IDCT_VP3 && avctx->codec->id!=CODEC_ID_THEORA && !(avctx->flags & CODEC_FLAG_BITEXACT)){ if(mm_flags & MM_SSE2){ c->idct_put= ff_vp3_idct_put_sse2; c->idct_add= ff_vp3_idct_add_sse2; c->idct = ff_vp3_idct_sse2; c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; }else{ ff_vp3_dsp_init_mmx(); c->idct_put= ff_vp3_idct_put_mmx; c->idct_add= ff_vp3_idct_add_mmx; c->idct = ff_vp3_idct_mmx; c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; } }else if(idct_algo==FF_IDCT_CAVS){ c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; }else if(idct_algo==FF_IDCT_XVIDMMX){ if(mm_flags & MM_MMXEXT){ c->idct_put= ff_idct_xvid_mmx2_put; c->idct_add= ff_idct_xvid_mmx2_add; c->idct = ff_idct_xvid_mmx2; }else{ c->idct_put= ff_idct_xvid_mmx_put; c->idct_add= ff_idct_xvid_mmx_add; c->idct = ff_idct_xvid_mmx; } } } #ifdef CONFIG_ENCODERS c->get_pixels = get_pixels_mmx; c->diff_pixels = diff_pixels_mmx; #endif //CONFIG_ENCODERS c->put_pixels_clamped = put_pixels_clamped_mmx; c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx; c->add_pixels_clamped = add_pixels_clamped_mmx; c->clear_blocks = clear_blocks_mmx; #ifdef CONFIG_ENCODERS c->pix_sum = pix_sum16_mmx; #endif //CONFIG_ENCODERS c->put_pixels_tab[0][0] = put_pixels16_mmx; c->put_pixels_tab[0][1] = put_pixels16_x2_mmx; c->put_pixels_tab[0][2] = put_pixels16_y2_mmx; c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx; c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx; c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx; c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx; c->avg_pixels_tab[0][0] = avg_pixels16_mmx; c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx; c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx; c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx; c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx; c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx; c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx; c->put_pixels_tab[1][0] = put_pixels8_mmx; c->put_pixels_tab[1][1] = put_pixels8_x2_mmx; c->put_pixels_tab[1][2] = put_pixels8_y2_mmx; c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx; c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx; c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx; c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx; c->avg_pixels_tab[1][0] = avg_pixels8_mmx; c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx; c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx; c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx; c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx; c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx; c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx; c->gmc= gmc_mmx; c->add_bytes= add_bytes_mmx; #ifdef CONFIG_ENCODERS c->diff_bytes= diff_bytes_mmx; c->sum_abs_dctelem= sum_abs_dctelem_mmx; c->hadamard8_diff[0]= hadamard8_diff16_mmx; c->hadamard8_diff[1]= hadamard8_diff_mmx; c->pix_norm1 = pix_norm1_mmx; c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx; c->sse[1] = sse8_mmx; c->vsad[4]= vsad_intra16_mmx; c->nsse[0] = nsse16_mmx; c->nsse[1] = nsse8_mmx; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->vsad[0] = vsad16_mmx; } if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->try_8x8basis= try_8x8basis_mmx; } c->add_8x8basis= add_8x8basis_mmx; c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx; #endif //CONFIG_ENCODERS if (ENABLE_ANY_H263) { c->h263_v_loop_filter= h263_v_loop_filter_mmx; c->h263_h_loop_filter= h263_h_loop_filter_mmx; } c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx; c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx; c->h264_idct_dc_add= c->h264_idct_add= ff_h264_idct_add_mmx; c->h264_idct8_dc_add= c->h264_idct8_add= ff_h264_idct8_add_mmx; if (mm_flags & MM_MMXEXT) { c->prefetch = prefetch_mmx2; c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; #ifdef CONFIG_ENCODERS c->sum_abs_dctelem= sum_abs_dctelem_mmx2; c->hadamard8_diff[0]= hadamard8_diff16_mmx2; c->hadamard8_diff[1]= hadamard8_diff_mmx2; c->vsad[4]= vsad_intra16_mmx2; #endif //CONFIG_ENCODERS c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2; c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; #ifdef CONFIG_ENCODERS c->vsad[0] = vsad16_mmx2; #endif //CONFIG_ENCODERS } #if 1 SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2) SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2) #endif //FIXME 3dnow too #define dspfunc(PFX, IDX, NUM) \ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2 dspfunc(put_h264_qpel, 0, 16); dspfunc(put_h264_qpel, 1, 8); dspfunc(put_h264_qpel, 2, 4); dspfunc(avg_h264_qpel, 0, 16); dspfunc(avg_h264_qpel, 1, 8); dspfunc(avg_h264_qpel, 2, 4); dspfunc(put_2tap_qpel, 0, 16); dspfunc(put_2tap_qpel, 1, 8); dspfunc(avg_2tap_qpel, 0, 16); dspfunc(avg_2tap_qpel, 1, 8); #undef dspfunc c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2; c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2; c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2; c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2; c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2; c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2; c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2; c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2; c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2; c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2; c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2; c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2; c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2; c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2; c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2; c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2; c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2; c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2; c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2; c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2; c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2; c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2; c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2; c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2; #ifdef CONFIG_CAVS_DECODER ff_cavsdsp_init_mmx2(c, avctx); #endif #ifdef CONFIG_ENCODERS c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2; #endif //CONFIG_ENCODERS } else if (mm_flags & MM_3DNOW) { c->prefetch = prefetch_3dnow; c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; } SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow) SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow) #define dspfunc(PFX, IDX, NUM) \ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow dspfunc(put_h264_qpel, 0, 16); dspfunc(put_h264_qpel, 1, 8); dspfunc(put_h264_qpel, 2, 4); dspfunc(avg_h264_qpel, 0, 16); dspfunc(avg_h264_qpel, 1, 8); dspfunc(avg_h264_qpel, 2, 4); dspfunc(put_2tap_qpel, 0, 16); dspfunc(put_2tap_qpel, 1, 8); dspfunc(avg_2tap_qpel, 0, 16); dspfunc(avg_2tap_qpel, 1, 8); c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow; } #ifdef CONFIG_ENCODERS if(mm_flags & MM_SSE2){ c->sum_abs_dctelem= sum_abs_dctelem_sse2; c->hadamard8_diff[0]= hadamard8_diff16_sse2; c->hadamard8_diff[1]= hadamard8_diff_sse2; } #ifdef HAVE_SSSE3 if(mm_flags & MM_SSSE3){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->try_8x8basis= try_8x8basis_ssse3; } c->add_8x8basis= add_8x8basis_ssse3; c->sum_abs_dctelem= sum_abs_dctelem_ssse3; c->hadamard8_diff[0]= hadamard8_diff16_ssse3; c->hadamard8_diff[1]= hadamard8_diff_ssse3; } #endif #endif #ifdef CONFIG_SNOW_DECODER #if 0 if(mm_flags & MM_SSE2){ c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; } else{ c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; c->inner_add_yblock = ff_snow_inner_add_yblock_mmx; } #endif #endif if(mm_flags & MM_3DNOW){ #ifdef CONFIG_ENCODERS if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->try_8x8basis= try_8x8basis_3dnow; } c->add_8x8basis= add_8x8basis_3dnow; #endif //CONFIG_ENCODERS c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; c->vector_fmul = vector_fmul_3dnow; if(!(avctx->flags & CODEC_FLAG_BITEXACT)) c->float_to_int16 = float_to_int16_3dnow; } if(mm_flags & MM_3DNOWEXT) c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; if(mm_flags & MM_SSE){ c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; c->vector_fmul = vector_fmul_sse; c->float_to_int16 = float_to_int16_sse; c->vector_fmul_reverse = vector_fmul_reverse_sse; c->vector_fmul_add_add = vector_fmul_add_add_sse; } if(mm_flags & MM_3DNOW) c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse } #ifdef CONFIG_ENCODERS dsputil_init_pix_mmx(c, avctx); #endif //CONFIG_ENCODERS #if 0 // for speed testing get_pixels = just_return; put_pixels_clamped = just_return; add_pixels_clamped = just_return; pix_abs16x16 = just_return; pix_abs16x16_x2 = just_return; pix_abs16x16_y2 = just_return; pix_abs16x16_xy2 = just_return; put_pixels_tab[0] = just_return; put_pixels_tab[1] = just_return; put_pixels_tab[2] = just_return; put_pixels_tab[3] = just_return; put_no_rnd_pixels_tab[0] = just_return; put_no_rnd_pixels_tab[1] = just_return; put_no_rnd_pixels_tab[2] = just_return; put_no_rnd_pixels_tab[3] = just_return; avg_pixels_tab[0] = just_return; avg_pixels_tab[1] = just_return; avg_pixels_tab[2] = just_return; avg_pixels_tab[3] = just_return; avg_no_rnd_pixels_tab[0] = just_return; avg_no_rnd_pixels_tab[1] = just_return; avg_no_rnd_pixels_tab[2] = just_return; avg_no_rnd_pixels_tab[3] = just_return; //av_fdct = just_return; //ff_idct = just_return; #endif } | 3,138 |
1 | static int nppscale_resize(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in) { NPPScaleContext *s = ctx->priv; NppStatus err; int i; for (i = 0; i < FF_ARRAY_ELEMS(in->data) && in->data[i]; i++) { int iw = stage->planes_in[i].width; int ih = stage->planes_in[i].height; int ow = stage->planes_out[i].width; int oh = stage->planes_out[i].height; err = nppiResizeSqrPixel_8u_C1R(in->data[i], (NppiSize){ iw, ih }, in->linesize[i], (NppiRect){ 0, 0, iw, ih }, out->data[i], out->linesize[i], (NppiRect){ 0, 0, ow, oh }, (double)ow / iw, (double)oh / ih, 0.0, 0.0, s->interp_algo); if (err != NPP_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "NPP resize error: %d\n", err); return AVERROR_UNKNOWN; } } return 0; } | 3,139 |
1 | static int init_report(const char *env) { const char *filename_template = "%p-%t.log"; char *key, *val; int ret, count = 0; time_t now; struct tm *tm; AVBPrint filename; if (report_file) /* already opened */ return 0; time(&now); tm = localtime(&now); while (env && *env) { if ((ret = av_opt_get_key_value(&env, "=", ":", 0, &key, &val)) < 0) { if (count) av_log(NULL, AV_LOG_ERROR, "Failed to parse FFREPORT environment variable: %s\n", av_err2str(ret)); break; } if (*env) env++; count++; if (!strcmp(key, "file")) { filename_template = val; val = NULL; } else { av_log(NULL, AV_LOG_ERROR, "Unknown key '%s' in FFREPORT\n", key); } av_free(val); av_free(key); } av_bprint_init(&filename, 0, 1); expand_filename_template(&filename, filename_template, tm); if (!av_bprint_is_complete(&filename)) { av_log(NULL, AV_LOG_ERROR, "Out of memory building report file name\n"); return AVERROR(ENOMEM); } report_file = fopen(filename.str, "w"); if (!report_file) { av_log(NULL, AV_LOG_ERROR, "Failed to open report \"%s\": %s\n", filename.str, strerror(errno)); return AVERROR(errno); } av_log_set_callback(log_callback_report); av_log(NULL, AV_LOG_INFO, "%s started on %04d-%02d-%02d at %02d:%02d:%02d\n" "Report written to \"%s\"\n", program_name, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, filename.str); av_log_set_level(FFMAX(av_log_get_level(), AV_LOG_VERBOSE)); av_bprint_finalize(&filename, NULL); return 0; } | 3,140 |
1 | void apic_deliver_pic_intr(DeviceState *dev, int level) { APICCommonState *s = APIC_COMMON(dev); if (level) { apic_local_deliver(s, APIC_LVT_LINT0); } else { uint32_t lvt = s->lvt[APIC_LVT_LINT0]; switch ((lvt >> 8) & 7) { case APIC_DM_FIXED: if (!(lvt & APIC_LVT_LEVEL_TRIGGER)) break; apic_reset_bit(s->irr, lvt & 0xff); /* fall through */ case APIC_DM_EXTINT: cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_HARD); break; } } } | 3,141 |
1 | void rgb15tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size) { unsigned i; unsigned num_pixels = src_size >> 1; for(i=0; i<num_pixels; i++) { unsigned b,g,r; register uint16_t rgb; rgb = src[2*i]; r = rgb&0x1F; g = (rgb&0x3E0)>>5; b = (rgb&0x7C00)>>10; dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11); } } | 3,142 |
1 | static void vnc_connect(VncDisplay *vd, int csock, int skipauth) { VncState *vs = g_malloc0(sizeof(VncState)); int i; vs->csock = csock; if (skipauth) { vs->auth = VNC_AUTH_NONE; #ifdef CONFIG_VNC_TLS vs->subauth = VNC_AUTH_INVALID; #endif } else { vs->auth = vd->auth; #ifdef CONFIG_VNC_TLS vs->subauth = vd->subauth; #endif } vs->lossy_rect = g_malloc0(VNC_STAT_ROWS * sizeof (*vs->lossy_rect)); for (i = 0; i < VNC_STAT_ROWS; ++i) { vs->lossy_rect[i] = g_malloc0(VNC_STAT_COLS * sizeof (uint8_t)); } VNC_DEBUG("New client on socket %d\n", csock); dcl->idle = 0; socket_set_nonblock(vs->csock); qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, vs); vnc_client_cache_addr(vs); vnc_qmp_event(vs, QEVENT_VNC_CONNECTED); vnc_set_share_mode(vs, VNC_SHARE_MODE_CONNECTING); vs->vd = vd; vs->ds = vd->ds; vs->last_x = -1; vs->last_y = -1; vs->as.freq = 44100; vs->as.nchannels = 2; vs->as.fmt = AUD_FMT_S16; vs->as.endianness = 0; #ifdef CONFIG_VNC_THREAD qemu_mutex_init(&vs->output_mutex); #endif QTAILQ_INSERT_HEAD(&vd->clients, vs, next); vga_hw_update(); vnc_write(vs, "RFB 003.008\n", 12); vnc_flush(vs); vnc_read_when(vs, protocol_version, 12); reset_keys(vs); if (vs->vd->lock_key_sync) vs->led = qemu_add_led_event_handler(kbd_leds, vs); vs->mouse_mode_notifier.notify = check_pointer_type_change; qemu_add_mouse_mode_change_notifier(&vs->mouse_mode_notifier); vnc_init_timer(vd); /* vs might be free()ed here */ } | 3,143 |
1 | void pcnet_h_reset(void *opaque) { PCNetState *s = opaque; int i; uint16_t checksum; /* Initialize the PROM */ memcpy(s->prom, s->conf.macaddr.a, 6); s->prom[12] = s->prom[13] = 0x00; s->prom[14] = s->prom[15] = 0x57; for (i = 0,checksum = 0; i < 16; i++) checksum += s->prom[i]; *(uint16_t *)&s->prom[12] = cpu_to_le16(checksum); s->bcr[BCR_MSRDA] = 0x0005; s->bcr[BCR_MSWRA] = 0x0005; s->bcr[BCR_MC ] = 0x0002; s->bcr[BCR_LNKST] = 0x00c0; s->bcr[BCR_LED1 ] = 0x0084; s->bcr[BCR_LED2 ] = 0x0088; s->bcr[BCR_LED3 ] = 0x0090; s->bcr[BCR_FDC ] = 0x0000; s->bcr[BCR_BSBC ] = 0x9001; s->bcr[BCR_EECAS] = 0x0002; s->bcr[BCR_SWS ] = 0x0200; s->bcr[BCR_PLAT ] = 0xff06; pcnet_s_reset(s); pcnet_update_irq(s); pcnet_poll_timer(s); } | 3,144 |
0 | void avformat_close_input(AVFormatContext **ps) { AVFormatContext *s = *ps; AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb; flush_packet_queue(s); if (s->iformat->read_close) s->iformat->read_close(s); avformat_free_context(s); *ps = NULL; if (pb) avio_close(pb); } | 3,145 |
1 | static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { H264Context *h = avctx->priv_data; MpegEncContext *s = &h->s; AVFrame *pict = data; int buf_index; s->flags= avctx->flags; s->flags2= avctx->flags2; /* no supplementary picture */ if (buf_size == 0) { return 0; } if(s->flags&CODEC_FLAG_TRUNCATED){ int next= find_frame_end(h, buf, buf_size); if( ff_combine_frame(&s->parse_context, next, &buf, &buf_size) < 0 ) return buf_size; //printf("next:%d buf_size:%d last_index:%d\n", next, buf_size, s->parse_context.last_index); } if(h->is_avc && !h->got_avcC) { int i, cnt, nalsize; unsigned char *p = avctx->extradata; if(avctx->extradata_size < 7) { av_log(avctx, AV_LOG_ERROR, "avcC too short\n"); return -1; } if(*p != 1) { av_log(avctx, AV_LOG_ERROR, "Unknown avcC version %d\n", *p); return -1; } /* sps and pps in the avcC always have length coded with 2 bytes, so put a fake nal_length_size = 2 while parsing them */ h->nal_length_size = 2; // Decode sps from avcC cnt = *(p+5) & 0x1f; // Number of sps p += 6; for (i = 0; i < cnt; i++) { nalsize = BE_16(p) + 2; if(decode_nal_units(h, p, nalsize) < 0) { av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i); return -1; } p += nalsize; } // Decode pps from avcC cnt = *(p++); // Number of pps for (i = 0; i < cnt; i++) { nalsize = BE_16(p) + 2; if(decode_nal_units(h, p, nalsize) != nalsize) { av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i); return -1; } p += nalsize; } // Now store right nal length size, that will be use to parse all other nals h->nal_length_size = ((*(((char*)(avctx->extradata))+4))&0x03)+1; // Do not reparse avcC h->got_avcC = 1; } if(!h->is_avc && s->avctx->extradata_size && s->picture_number==0){ if(decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) < 0) return -1; } buf_index=decode_nal_units(h, buf, buf_size); if(buf_index < 0) return -1; //FIXME do something with unavailable reference frames // if(ret==FRAME_SKIPPED) return get_consumed_bytes(s, buf_index, buf_size); if(!s->current_picture_ptr){ av_log(h->s.avctx, AV_LOG_DEBUG, "error, NO frame\n"); return -1; } { Picture *out = s->current_picture_ptr; #if 0 //decode order *data_size = sizeof(AVFrame); #else /* Sort B-frames into display order */ Picture *cur = s->current_picture_ptr; Picture *prev = h->delayed_output_pic; int out_idx = 0; int pics = 0; int out_of_order; int cross_idr = 0; int dropped_frame = 0; int i; if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){ s->avctx->has_b_frames = h->sps.num_reorder_frames; s->low_delay = 0; } while(h->delayed_pic[pics]) pics++; h->delayed_pic[pics++] = cur; if(cur->reference == 0) cur->reference = 1; for(i=0; h->delayed_pic[i]; i++) if(h->delayed_pic[i]->key_frame || h->delayed_pic[i]->poc==0) cross_idr = 1; out = h->delayed_pic[0]; for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; } out_of_order = !cross_idr && prev && out->poc < prev->poc; if(prev && pics <= s->avctx->has_b_frames) out = prev; else if((out_of_order && pics-1 == s->avctx->has_b_frames && pics < 15) || (s->low_delay && ((!cross_idr && prev && out->poc > prev->poc + 2) || cur->pict_type == B_TYPE))) { s->low_delay = 0; s->avctx->has_b_frames++; out = prev; } else if(out_of_order) out = prev; if(out_of_order || pics > s->avctx->has_b_frames){ dropped_frame = (out != h->delayed_pic[out_idx]); for(i=out_idx; h->delayed_pic[i]; i++) h->delayed_pic[i] = h->delayed_pic[i+1]; } if(prev == out && !dropped_frame) *data_size = 0; else *data_size = sizeof(AVFrame); if(prev && prev != out && prev->reference == 1) prev->reference = 0; h->delayed_output_pic = out; #endif if(out) *pict= *(AVFrame*)out; else av_log(avctx, AV_LOG_DEBUG, "no picture\n"); } assert(pict->data[0] || !*data_size); ff_print_debug_info(s, pict); //printf("out %d\n", (int)pict->data[0]); #if 0 //? /* Return the Picture timestamp as the frame number */ /* we substract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; #endif return get_consumed_bytes(s, buf_index, buf_size); } | 3,146 |
1 | int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, AVInputFormat *fmt, int buf_size, AVFormatParameters *ap) { int err, probe_size; AVProbeData probe_data, *pd = &probe_data; ByteIOContext *pb = NULL; pd->filename = ""; if (filename) pd->filename = filename; pd->buf = NULL; pd->buf_size = 0; if (!fmt) { /* guess format if no file can be opened */ fmt = av_probe_input_format(pd, 0); } /* Do not open file if the format does not need it. XXX: specific hack needed to handle RTSP/TCP */ if (!fmt || !(fmt->flags & AVFMT_NOFILE)) { /* if no file needed do not try to open one */ if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) { goto fail; } if (buf_size > 0) { url_setbufsize(pb, buf_size); } for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){ int score= probe_size < PROBE_BUF_MAX ? AVPROBE_SCORE_MAX/4 : 0; /* read probe data */ pd->buf= av_realloc(pd->buf, probe_size + AVPROBE_PADDING_SIZE); pd->buf_size = get_buffer(pb, pd->buf, probe_size); memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); if (url_fseek(pb, 0, SEEK_SET) < 0) { url_fclose(pb); if (url_fopen(&pb, filename, URL_RDONLY) < 0) { pb = NULL; err = AVERROR(EIO); goto fail; } } /* guess file format */ fmt = av_probe_input_format2(pd, 1, &score); } av_freep(&pd->buf); } /* if still no format found, error */ if (!fmt) { err = AVERROR_NOFMT; goto fail; } /* check filename in case an image number is expected */ if (fmt->flags & AVFMT_NEEDNUMBER) { if (!av_filename_number_test(filename)) { err = AVERROR_NUMEXPECTED; goto fail; } } err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap); if (err) goto fail; return 0; fail: av_freep(&pd->buf); if (pb) url_fclose(pb); *ic_ptr = NULL; return err; } | 3,147 |
1 | static void digic_load_rom(DigicBoardState *s, hwaddr addr, hwaddr max_size, const char *def_filename) { target_long rom_size; const char *filename; if (qtest_enabled()) { /* qtest runs no code so don't attempt a ROM load which * could fail and result in a spurious test failure. */ return; } if (bios_name) { filename = bios_name; } else { filename = def_filename; } if (filename) { char *fn = qemu_find_file(QEMU_FILE_TYPE_BIOS, filename); if (!fn) { error_report("Couldn't find rom image '%s'.", filename); exit(1); } rom_size = load_image_targphys(fn, addr, max_size); if (rom_size < 0 || rom_size > max_size) { error_report("Couldn't load rom image '%s'.", filename); exit(1); } } } | 3,148 |
0 | static int hevc_parse_slice_header(AVCodecParserContext *s, H2645NAL *nal, AVCodecContext *avctx) { HEVCParserContext *ctx = s->priv_data; GetBitContext *gb = &nal->gb; HEVCPPS *pps; HEVCSPS *sps; unsigned int pps_id; get_bits1(gb); // first slice in pic if (IS_IRAP_NAL(nal)) get_bits1(gb); // no output of prior pics pps_id = get_ue_golomb_long(gb); if (pps_id >= HEVC_MAX_PPS_COUNT || !ctx->ps.pps_list[pps_id]) { av_log(avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", pps_id); return AVERROR_INVALIDDATA; } pps = (HEVCPPS*)ctx->ps.pps_list[pps_id]->data; sps = (HEVCSPS*)ctx->ps.sps_list[pps->sps_id]->data; /* export the stream parameters */ s->coded_width = sps->width; s->coded_height = sps->height; s->width = sps->output_width; s->height = sps->output_height; s->format = sps->pix_fmt; avctx->profile = sps->ptl.general_ptl.profile_idc; avctx->level = sps->ptl.general_ptl.level_idc; /* ignore the rest for now*/ return 0; } | 3,149 |
1 | void helper_store_msr(CPUPPCState *env, target_ulong val) { uint32_t excp = hreg_store_msr(env, val, 0); if (excp != 0) { CPUState *cs = CPU(ppc_env_get_cpu(env)); cs->interrupt_request |= CPU_INTERRUPT_EXITTB; raise_exception(env, excp); } } | 3,150 |
1 | static int do_qsv_decode(AVCodecContext *avctx, QSVContext *q, AVFrame *frame, int *got_frame, AVPacket *avpkt) { QSVFrame *out_frame; mfxFrameSurface1 *insurf; mfxFrameSurface1 *outsurf; mfxSyncPoint *sync; mfxBitstream bs = { { { 0 } } }; int ret; int n_out_frames; int buffered = 0; int flush = !avpkt->size || q->reinit_pending; if (!q->engine_ready) { ret = qsv_decode_init(avctx, q, avpkt); if (ret) return ret; } if (!flush) { if (av_fifo_size(q->input_fifo)) { /* we have got rest of previous packet into buffer */ if (av_fifo_space(q->input_fifo) < avpkt->size) { ret = av_fifo_grow(q->input_fifo, avpkt->size); if (ret < 0) return ret; } av_fifo_generic_write(q->input_fifo, avpkt->data, avpkt->size, NULL); bs.Data = q->input_fifo->rptr; bs.DataLength = av_fifo_size(q->input_fifo); buffered = 1; } else { bs.Data = avpkt->data; bs.DataLength = avpkt->size; } bs.MaxLength = bs.DataLength; bs.TimeStamp = avpkt->pts; } sync = av_mallocz(sizeof(*sync)); if (!sync) { av_freep(&sync); return AVERROR(ENOMEM); } while (1) { ret = get_surface(avctx, q, &insurf); if (ret < 0) return ret; do { ret = MFXVideoDECODE_DecodeFrameAsync(q->session, flush ? NULL : &bs, insurf, &outsurf, sync); if (ret != MFX_WRN_DEVICE_BUSY) break; av_usleep(500); } while (1); if (MFX_WRN_VIDEO_PARAM_CHANGED==ret) { /* TODO: handle here minor sequence header changing */ } else if (MFX_ERR_INCOMPATIBLE_VIDEO_PARAM==ret) { av_fifo_reset(q->input_fifo); flush = q->reinit_pending = 1; continue; } if (*sync) { QSVFrame *out_frame = find_frame(q, outsurf); if (!out_frame) { av_freep(&sync); av_log(avctx, AV_LOG_ERROR, "The returned surface does not correspond to any frame\n"); return AVERROR_BUG; } out_frame->queued = 1; av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL); av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL); continue; } else { av_freep(&sync); } if (MFX_ERR_MORE_SURFACE != ret && ret < 0) break; } /* make sure we do not enter an infinite loop if the SDK * did not consume any data and did not return anything */ if (!*sync && !bs.DataOffset && !flush) { av_log(avctx, AV_LOG_WARNING, "A decode call did not consume any data\n"); bs.DataOffset = avpkt->size; } if (buffered) { qsv_fifo_relocate(q->input_fifo, bs.DataOffset); } else if (bs.DataOffset!=avpkt->size) { /* some data of packet was not consumed. store it to local buffer */ av_fifo_generic_write(q->input_fifo, avpkt->data+bs.DataOffset, avpkt->size - bs.DataOffset, NULL); } if (MFX_ERR_MORE_DATA!=ret && ret < 0) { av_freep(&sync); av_log(avctx, AV_LOG_ERROR, "Error %d during QSV decoding.\n", ret); return ff_qsv_error(ret); } n_out_frames = av_fifo_size(q->async_fifo) / (sizeof(out_frame)+sizeof(sync)); if (n_out_frames > q->async_depth || (flush && n_out_frames) ) { AVFrame *src_frame; av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL); av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL); out_frame->queued = 0; do { ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000); } while (ret == MFX_WRN_IN_EXECUTION); av_freep(&sync); src_frame = out_frame->frame; ret = av_frame_ref(frame, src_frame); if (ret < 0) return ret; outsurf = out_frame->surface; frame->pkt_pts = frame->pts = outsurf->Data.TimeStamp; frame->repeat_pict = outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 : outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 : outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0; frame->top_field_first = outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF; frame->interlaced_frame = !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE); *got_frame = 1; } return avpkt->size; } | 3,151 |
1 | static int local_rename(FsContext *ctx, const char *oldpath, const char *newpath) { int err; char *buffer, *buffer1; if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { err = local_create_mapped_attr_dir(ctx, newpath); if (err < 0) { return err; } /* rename the .virtfs_metadata files */ buffer = local_mapped_attr_path(ctx, oldpath); buffer1 = local_mapped_attr_path(ctx, newpath); err = rename(buffer, buffer1); g_free(buffer); g_free(buffer1); if (err < 0 && errno != ENOENT) { return err; } } buffer = rpath(ctx, oldpath); buffer1 = rpath(ctx, newpath); err = rename(buffer, buffer1); g_free(buffer); g_free(buffer1); return err; } | 3,152 |
0 | static int16_t long_term_filter(DSPContext *dsp, int pitch_delay_int, const int16_t* residual, int16_t *residual_filt, int subframe_size) { int i, k, tmp, tmp2; int sum; int L_temp0; int L_temp1; int64_t L64_temp0; int64_t L64_temp1; int16_t shift; int corr_int_num, corr_int_den; int ener; int16_t sh_ener; int16_t gain_num,gain_den; //selected signal's gain numerator and denominator int16_t sh_gain_num, sh_gain_den; int gain_num_square; int16_t gain_long_num,gain_long_den; //filtered through long interpolation filter signal's gain numerator and denominator int16_t sh_gain_long_num, sh_gain_long_den; int16_t best_delay_int, best_delay_frac; int16_t delayed_signal_offset; int lt_filt_factor_a, lt_filt_factor_b; int16_t * selected_signal; const int16_t * selected_signal_const; //Necessary to avoid compiler warning int16_t sig_scaled[SUBFRAME_SIZE + RES_PREV_DATA_SIZE]; int16_t delayed_signal[ANALYZED_FRAC_DELAYS][SUBFRAME_SIZE+1]; int corr_den[ANALYZED_FRAC_DELAYS][2]; tmp = 0; for(i=0; i<subframe_size + RES_PREV_DATA_SIZE; i++) tmp |= FFABS(residual[i]); if(!tmp) shift = 3; else shift = av_log2(tmp) - 11; if (shift > 0) for (i = 0; i < subframe_size + RES_PREV_DATA_SIZE; i++) sig_scaled[i] = residual[i] >> shift; else for (i = 0; i < subframe_size + RES_PREV_DATA_SIZE; i++) sig_scaled[i] = residual[i] << -shift; /* Start of best delay searching code */ gain_num = 0; ener = dsp->scalarproduct_int16(sig_scaled + RES_PREV_DATA_SIZE, sig_scaled + RES_PREV_DATA_SIZE, subframe_size); if (ener) { sh_ener = FFMAX(av_log2(ener) - 14, 0); ener >>= sh_ener; /* Search for best pitch delay. sum{ r(n) * r(k,n) ] }^2 R'(k)^2 := ------------------------- sum{ r(k,n) * r(k,n) } R(T) := sum{ r(n) * r(n-T) ] } where r(n-T) is integer delayed signal with delay T r(k,n) is non-integer delayed signal with integer delay best_delay and fractional delay k */ /* Find integer delay best_delay which maximizes correlation R(T). This is also equals to numerator of R'(0), since the fine search (second step) is done with 1/8 precision around best_delay. */ corr_int_num = 0; best_delay_int = pitch_delay_int - 1; for (i = pitch_delay_int - 1; i <= pitch_delay_int + 1; i++) { sum = dsp->scalarproduct_int16(sig_scaled + RES_PREV_DATA_SIZE, sig_scaled + RES_PREV_DATA_SIZE - i, subframe_size); if (sum > corr_int_num) { corr_int_num = sum; best_delay_int = i; } } if (corr_int_num) { /* Compute denominator of pseudo-normalized correlation R'(0). */ corr_int_den = dsp->scalarproduct_int16(sig_scaled - best_delay_int + RES_PREV_DATA_SIZE, sig_scaled - best_delay_int + RES_PREV_DATA_SIZE, subframe_size); /* Compute signals with non-integer delay k (with 1/8 precision), where k is in [0;6] range. Entire delay is qual to best_delay+(k+1)/8 This is archieved by applying an interpolation filter of legth 33 to source signal. */ for (k = 0; k < ANALYZED_FRAC_DELAYS; k++) { ff_acelp_interpolate(&delayed_signal[k][0], &sig_scaled[RES_PREV_DATA_SIZE - best_delay_int], ff_g729_interp_filt_short, ANALYZED_FRAC_DELAYS+1, 8 - k - 1, SHORT_INT_FILT_LEN, subframe_size + 1); } /* Compute denominator of pseudo-normalized correlation R'(k). corr_den[k][0] is square root of R'(k) denominator, for int(T) == int(T0) corr_den[k][1] is square root of R'(k) denominator, for int(T) == int(T0)+1 Also compute maximum value of above denominators over all k. */ tmp = corr_int_den; for (k = 0; k < ANALYZED_FRAC_DELAYS; k++) { sum = dsp->scalarproduct_int16(&delayed_signal[k][1], &delayed_signal[k][1], subframe_size - 1); corr_den[k][0] = sum + delayed_signal[k][0 ] * delayed_signal[k][0 ]; corr_den[k][1] = sum + delayed_signal[k][subframe_size] * delayed_signal[k][subframe_size]; tmp = FFMAX3(tmp, corr_den[k][0], corr_den[k][1]); } sh_gain_den = av_log2(tmp) - 14; if (sh_gain_den >= 0) { sh_gain_num = FFMAX(sh_gain_den, sh_ener); /* Loop through all k and find delay that maximizes R'(k) correlation. Search is done in [int(T0)-1; intT(0)+1] range with 1/8 precision. */ delayed_signal_offset = 1; best_delay_frac = 0; gain_den = corr_int_den >> sh_gain_den; gain_num = corr_int_num >> sh_gain_num; gain_num_square = gain_num * gain_num; for (k = 0; k < ANALYZED_FRAC_DELAYS; k++) { for (i = 0; i < 2; i++) { int16_t gain_num_short, gain_den_short; int gain_num_short_square; /* Compute numerator of pseudo-normalized correlation R'(k). */ sum = dsp->scalarproduct_int16(&delayed_signal[k][i], sig_scaled + RES_PREV_DATA_SIZE, subframe_size); gain_num_short = FFMAX(sum >> sh_gain_num, 0); /* gain_num_short_square gain_num_square R'(T)^2 = -----------------------, max R'(T)^2= -------------- den gain_den */ gain_num_short_square = gain_num_short * gain_num_short; gain_den_short = corr_den[k][i] >> sh_gain_den; tmp = MULL(gain_num_short_square, gain_den, FRAC_BITS); tmp2 = MULL(gain_num_square, gain_den_short, FRAC_BITS); // R'(T)^2 > max R'(T)^2 if (tmp > tmp2) { gain_num = gain_num_short; gain_den = gain_den_short; gain_num_square = gain_num_short_square; delayed_signal_offset = i; best_delay_frac = k + 1; } } } /* R'(T)^2 2 * --------- < 1 R(0) */ L64_temp0 = (int64_t)gain_num_square << ((sh_gain_num << 1) + 1); L64_temp1 = ((int64_t)gain_den * ener) << (sh_gain_den + sh_ener); if (L64_temp0 < L64_temp1) gain_num = 0; } // if(sh_gain_den >= 0) } // if(corr_int_num) } // if(ener) /* End of best delay searching code */ if (!gain_num) { memcpy(residual_filt, residual + RES_PREV_DATA_SIZE, subframe_size * sizeof(int16_t)); /* Long-term prediction gain is less than 3dB. Long-term postfilter is disabled. */ return 0; } if (best_delay_frac) { /* Recompute delayed signal with an interpolation filter of length 129. */ ff_acelp_interpolate(residual_filt, &sig_scaled[RES_PREV_DATA_SIZE - best_delay_int + delayed_signal_offset], ff_g729_interp_filt_long, ANALYZED_FRAC_DELAYS + 1, 8 - best_delay_frac, LONG_INT_FILT_LEN, subframe_size + 1); /* Compute R'(k) correlation's numerator. */ sum = dsp->scalarproduct_int16(residual_filt, sig_scaled + RES_PREV_DATA_SIZE, subframe_size); if (sum < 0) { gain_long_num = 0; sh_gain_long_num = 0; } else { tmp = FFMAX(av_log2(sum) - 14, 0); sum >>= tmp; gain_long_num = sum; sh_gain_long_num = tmp; } /* Compute R'(k) correlation's denominator. */ sum = dsp->scalarproduct_int16(residual_filt, residual_filt, subframe_size); tmp = FFMAX(av_log2(sum) - 14, 0); sum >>= tmp; gain_long_den = sum; sh_gain_long_den = tmp; /* Select between original and delayed signal. Delayed signal will be selected if it increases R'(k) correlation. */ L_temp0 = gain_num * gain_num; L_temp0 = MULL(L_temp0, gain_long_den, FRAC_BITS); L_temp1 = gain_long_num * gain_long_num; L_temp1 = MULL(L_temp1, gain_den, FRAC_BITS); tmp = ((sh_gain_long_num - sh_gain_num) << 1) - (sh_gain_long_den - sh_gain_den); if (tmp > 0) L_temp0 >>= tmp; else L_temp1 >>= -tmp; /* Check if longer filter increases the values of R'(k). */ if (L_temp1 > L_temp0) { /* Select long filter. */ selected_signal = residual_filt; gain_num = gain_long_num; gain_den = gain_long_den; sh_gain_num = sh_gain_long_num; sh_gain_den = sh_gain_long_den; } else /* Select short filter. */ selected_signal = &delayed_signal[best_delay_frac-1][delayed_signal_offset]; /* Rescale selected signal to original value. */ if (shift > 0) for (i = 0; i < subframe_size; i++) selected_signal[i] <<= shift; else for (i = 0; i < subframe_size; i++) selected_signal[i] >>= -shift; /* necessary to avoid compiler warning */ selected_signal_const = selected_signal; } // if(best_delay_frac) else selected_signal_const = residual + RES_PREV_DATA_SIZE - (best_delay_int + 1 - delayed_signal_offset); #ifdef G729_BITEXACT tmp = sh_gain_num - sh_gain_den; if (tmp > 0) gain_den >>= tmp; else gain_num >>= -tmp; if (gain_num > gain_den) lt_filt_factor_a = MIN_LT_FILT_FACTOR_A; else { gain_num >>= 2; gain_den >>= 1; lt_filt_factor_a = (gain_den << 15) / (gain_den + gain_num); } #else L64_temp0 = ((int64_t)gain_num) << (sh_gain_num - 1); L64_temp1 = ((int64_t)gain_den) << sh_gain_den; lt_filt_factor_a = FFMAX((L64_temp1 << 15) / (L64_temp1 + L64_temp0), MIN_LT_FILT_FACTOR_A); #endif /* Filter through selected filter. */ lt_filt_factor_b = 32767 - lt_filt_factor_a + 1; ff_acelp_weighted_vector_sum(residual_filt, residual + RES_PREV_DATA_SIZE, selected_signal_const, lt_filt_factor_a, lt_filt_factor_b, 1<<14, 15, subframe_size); // Long-term prediction gain is larger than 3dB. return 1; } | 3,154 |
1 | void pc_dimm_memory_plug(DeviceState *dev, MemoryHotplugState *hpms, MemoryRegion *mr, uint64_t align, bool gap, Error **errp) { int slot; MachineState *machine = MACHINE(qdev_get_machine()); PCDIMMDevice *dimm = PC_DIMM(dev); Error *local_err = NULL; uint64_t existing_dimms_capacity = 0; uint64_t addr; addr = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, &local_err); if (local_err) { addr = pc_dimm_get_free_addr(hpms->base, memory_region_size(&hpms->mr), !addr ? NULL : &addr, align, gap, memory_region_size(mr), &local_err); if (local_err) { existing_dimms_capacity = pc_existing_dimms_capacity(&local_err); if (local_err) { if (existing_dimms_capacity + memory_region_size(mr) > machine->maxram_size - machine->ram_size) { error_setg(&local_err, "not enough space, currently 0x%" PRIx64 " in use of total hot pluggable 0x" RAM_ADDR_FMT, existing_dimms_capacity, machine->maxram_size - machine->ram_size); object_property_set_int(OBJECT(dev), addr, PC_DIMM_ADDR_PROP, &local_err); if (local_err) { trace_mhp_pc_dimm_assigned_address(addr); slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP, &local_err); if (local_err) { slot = pc_dimm_get_free_slot(slot == PC_DIMM_UNASSIGNED_SLOT ? NULL : &slot, machine->ram_slots, &local_err); if (local_err) { object_property_set_int(OBJECT(dev), slot, PC_DIMM_SLOT_PROP, &local_err); if (local_err) { trace_mhp_pc_dimm_assigned_slot(slot); if (kvm_enabled() && !kvm_has_free_slot(machine)) { error_setg(&local_err, "hypervisor has no free memory slots left"); memory_region_add_subregion(&hpms->mr, addr - hpms->base, mr); vmstate_register_ram(mr, dev); numa_set_mem_node_id(addr, memory_region_size(mr), dimm->node); out: error_propagate(errp, local_err); | 3,155 |
1 | static int64_t asf_read_pts(AVFormatContext *s, int64_t *ppos, int stream_index) { ASFContext *asf = s->priv_data; AVPacket pkt1, *pkt = &pkt1; int64_t pos= *ppos; int64_t pts; // ensure we are on the packet boundry assert(pos % asf->packet_size == 0); url_fseek(&s->pb, pos + s->data_offset, SEEK_SET); do{ pos= url_ftell(&s->pb) - s->data_offset; asf_reset_header(s); if (av_read_frame(s, pkt) < 0) return AV_NOPTS_VALUE; pts= pkt->pts; av_free_packet(pkt); }while(pkt->stream_index != stream_index); *ppos= pos; return pts; } | 3,156 |
1 | static void serial_reset(void *opaque) { SerialState *s = opaque; s->divider = 0; s->rbr = 0; s->ier = 0; s->iir = UART_IIR_NO_INT; s->lcr = 0; s->mcr = 0; s->lsr = UART_LSR_TEMT | UART_LSR_THRE; s->msr = UART_MSR_DCD | UART_MSR_DSR | UART_MSR_CTS; s->scr = 0; s->thr_ipending = 0; s->last_break_enable = 0; qemu_irq_lower(s->irq); } | 3,157 |
1 | static void decode_micromips32_opc (CPUMIPSState *env, DisasContext *ctx, uint16_t insn_hw1, int *is_branch) { int32_t offset; uint16_t insn; int rt, rs, rd, rr; int16_t imm; uint32_t op, minor, mips32_op; uint32_t cond, fmt, cc; insn = lduw_code(ctx->pc + 2); ctx->opcode = (ctx->opcode << 16) | insn; rt = (ctx->opcode >> 21) & 0x1f; rs = (ctx->opcode >> 16) & 0x1f; rd = (ctx->opcode >> 11) & 0x1f; rr = (ctx->opcode >> 6) & 0x1f; imm = (int16_t) ctx->opcode; op = (ctx->opcode >> 26) & 0x3f; switch (op) { case POOL32A: minor = ctx->opcode & 0x3f; switch (minor) { case 0x00: minor = (ctx->opcode >> 6) & 0xf; switch (minor) { case SLL32: mips32_op = OPC_SLL; goto do_shifti; case SRA: mips32_op = OPC_SRA; goto do_shifti; case SRL32: mips32_op = OPC_SRL; goto do_shifti; case ROTR: mips32_op = OPC_ROTR; do_shifti: gen_shift_imm(env, ctx, mips32_op, rt, rs, rd); break; default: goto pool32a_invalid; } break; case 0x10: minor = (ctx->opcode >> 6) & 0xf; switch (minor) { /* Arithmetic */ case ADD: mips32_op = OPC_ADD; goto do_arith; case ADDU32: mips32_op = OPC_ADDU; goto do_arith; case SUB: mips32_op = OPC_SUB; goto do_arith; case SUBU32: mips32_op = OPC_SUBU; goto do_arith; case MUL: mips32_op = OPC_MUL; do_arith: gen_arith(env, ctx, mips32_op, rd, rs, rt); break; /* Shifts */ case SLLV: mips32_op = OPC_SLLV; goto do_shift; case SRLV: mips32_op = OPC_SRLV; goto do_shift; case SRAV: mips32_op = OPC_SRAV; goto do_shift; case ROTRV: mips32_op = OPC_ROTRV; do_shift: gen_shift(env, ctx, mips32_op, rd, rs, rt); break; /* Logical operations */ case AND: mips32_op = OPC_AND; goto do_logic; case OR32: mips32_op = OPC_OR; goto do_logic; case NOR: mips32_op = OPC_NOR; goto do_logic; case XOR32: mips32_op = OPC_XOR; do_logic: gen_logic(env, mips32_op, rd, rs, rt); break; /* Set less than */ case SLT: mips32_op = OPC_SLT; goto do_slt; case SLTU: mips32_op = OPC_SLTU; do_slt: gen_slt(env, mips32_op, rd, rs, rt); break; default: goto pool32a_invalid; } break; case 0x18: minor = (ctx->opcode >> 6) & 0xf; switch (minor) { /* Conditional moves */ case MOVN: mips32_op = OPC_MOVN; goto do_cmov; case MOVZ: mips32_op = OPC_MOVZ; do_cmov: gen_cond_move(env, mips32_op, rd, rs, rt); break; case LWXS: gen_ldxs(ctx, rs, rt, rd); break; default: goto pool32a_invalid; } break; case INS: gen_bitops(ctx, OPC_INS, rt, rs, rr, rd); return; case EXT: gen_bitops(ctx, OPC_EXT, rt, rs, rr, rd); return; case POOL32AXF: gen_pool32axf(env, ctx, rt, rs, is_branch); break; case 0x07: generate_exception(ctx, EXCP_BREAK); break; default: pool32a_invalid: MIPS_INVAL("pool32a"); generate_exception(ctx, EXCP_RI); break; } break; case POOL32B: minor = (ctx->opcode >> 12) & 0xf; switch (minor) { case CACHE: /* Treat as no-op. */ break; case LWC2: case SWC2: /* COP2: Not implemented. */ generate_exception_err(ctx, EXCP_CpU, 2); break; case LWP: case SWP: #ifdef TARGET_MIPS64 case LDP: case SDP: #endif gen_ldst_pair(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); break; case LWM32: case SWM32: #ifdef TARGET_MIPS64 case LDM: case SDM: #endif gen_ldst_multiple(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12)); break; default: MIPS_INVAL("pool32b"); generate_exception(ctx, EXCP_RI); break; } break; case POOL32F: if (env->CP0_Config1 & (1 << CP0C1_FP)) { minor = ctx->opcode & 0x3f; check_cp1_enabled(ctx); switch (minor) { case ALNV_PS: mips32_op = OPC_ALNV_PS; goto do_madd; case MADD_S: mips32_op = OPC_MADD_S; goto do_madd; case MADD_D: mips32_op = OPC_MADD_D; goto do_madd; case MADD_PS: mips32_op = OPC_MADD_PS; goto do_madd; case MSUB_S: mips32_op = OPC_MSUB_S; goto do_madd; case MSUB_D: mips32_op = OPC_MSUB_D; goto do_madd; case MSUB_PS: mips32_op = OPC_MSUB_PS; goto do_madd; case NMADD_S: mips32_op = OPC_NMADD_S; goto do_madd; case NMADD_D: mips32_op = OPC_NMADD_D; goto do_madd; case NMADD_PS: mips32_op = OPC_NMADD_PS; goto do_madd; case NMSUB_S: mips32_op = OPC_NMSUB_S; goto do_madd; case NMSUB_D: mips32_op = OPC_NMSUB_D; goto do_madd; case NMSUB_PS: mips32_op = OPC_NMSUB_PS; do_madd: gen_flt3_arith(ctx, mips32_op, rd, rr, rs, rt); break; case CABS_COND_FMT: cond = (ctx->opcode >> 6) & 0xf; cc = (ctx->opcode >> 13) & 0x7; fmt = (ctx->opcode >> 10) & 0x3; switch (fmt) { case 0x0: gen_cmpabs_s(ctx, cond, rt, rs, cc); break; case 0x1: gen_cmpabs_d(ctx, cond, rt, rs, cc); break; case 0x2: gen_cmpabs_ps(ctx, cond, rt, rs, cc); break; default: goto pool32f_invalid; } break; case C_COND_FMT: cond = (ctx->opcode >> 6) & 0xf; cc = (ctx->opcode >> 13) & 0x7; fmt = (ctx->opcode >> 10) & 0x3; switch (fmt) { case 0x0: gen_cmp_s(ctx, cond, rt, rs, cc); break; case 0x1: gen_cmp_d(ctx, cond, rt, rs, cc); break; case 0x2: gen_cmp_ps(ctx, cond, rt, rs, cc); break; default: goto pool32f_invalid; } break; case POOL32FXF: gen_pool32fxf(env, ctx, rt, rs); break; case 0x00: /* PLL foo */ switch ((ctx->opcode >> 6) & 0x7) { case PLL_PS: mips32_op = OPC_PLL_PS; goto do_ps; case PLU_PS: mips32_op = OPC_PLU_PS; goto do_ps; case PUL_PS: mips32_op = OPC_PUL_PS; goto do_ps; case PUU_PS: mips32_op = OPC_PUU_PS; goto do_ps; case CVT_PS_S: mips32_op = OPC_CVT_PS_S; do_ps: gen_farith(ctx, mips32_op, rt, rs, rd, 0); break; default: goto pool32f_invalid; } break; case 0x08: /* [LS][WDU]XC1 */ switch ((ctx->opcode >> 6) & 0x7) { case LWXC1: mips32_op = OPC_LWXC1; goto do_ldst_cp1; case SWXC1: mips32_op = OPC_SWXC1; goto do_ldst_cp1; case LDXC1: mips32_op = OPC_LDXC1; goto do_ldst_cp1; case SDXC1: mips32_op = OPC_SDXC1; goto do_ldst_cp1; case LUXC1: mips32_op = OPC_LUXC1; goto do_ldst_cp1; case SUXC1: mips32_op = OPC_SUXC1; do_ldst_cp1: gen_flt3_ldst(ctx, mips32_op, rd, rd, rt, rs); break; default: goto pool32f_invalid; } break; case 0x18: /* 3D insns */ fmt = (ctx->opcode >> 9) & 0x3; switch ((ctx->opcode >> 6) & 0x7) { case RSQRT2_FMT: switch (fmt) { case FMT_SDPS_S: mips32_op = OPC_RSQRT2_S; goto do_3d; case FMT_SDPS_D: mips32_op = OPC_RSQRT2_D; goto do_3d; case FMT_SDPS_PS: mips32_op = OPC_RSQRT2_PS; goto do_3d; default: goto pool32f_invalid; } break; case RECIP2_FMT: switch (fmt) { case FMT_SDPS_S: mips32_op = OPC_RECIP2_S; goto do_3d; case FMT_SDPS_D: mips32_op = OPC_RECIP2_D; goto do_3d; case FMT_SDPS_PS: mips32_op = OPC_RECIP2_PS; goto do_3d; default: goto pool32f_invalid; } break; case ADDR_PS: mips32_op = OPC_ADDR_PS; goto do_3d; case MULR_PS: mips32_op = OPC_MULR_PS; do_3d: gen_farith(ctx, mips32_op, rt, rs, rd, 0); break; default: goto pool32f_invalid; } break; case 0x20: /* MOV[FT].fmt and PREFX */ cc = (ctx->opcode >> 13) & 0x7; fmt = (ctx->opcode >> 9) & 0x3; switch ((ctx->opcode >> 6) & 0x7) { case MOVF_FMT: switch (fmt) { case FMT_SDPS_S: gen_movcf_s(rs, rt, cc, 0); break; case FMT_SDPS_D: gen_movcf_d(ctx, rs, rt, cc, 0); break; case FMT_SDPS_PS: gen_movcf_ps(rs, rt, cc, 0); break; default: goto pool32f_invalid; } break; case MOVT_FMT: switch (fmt) { case FMT_SDPS_S: gen_movcf_s(rs, rt, cc, 1); break; case FMT_SDPS_D: gen_movcf_d(ctx, rs, rt, cc, 1); break; case FMT_SDPS_PS: gen_movcf_ps(rs, rt, cc, 1); break; default: goto pool32f_invalid; } break; case PREFX: break; default: goto pool32f_invalid; } break; #define FINSN_3ARG_SDPS(prfx) \ switch ((ctx->opcode >> 8) & 0x3) { \ case FMT_SDPS_S: \ mips32_op = OPC_##prfx##_S; \ goto do_fpop; \ case FMT_SDPS_D: \ mips32_op = OPC_##prfx##_D; \ goto do_fpop; \ case FMT_SDPS_PS: \ mips32_op = OPC_##prfx##_PS; \ goto do_fpop; \ default: \ goto pool32f_invalid; \ } case 0x30: /* regular FP ops */ switch ((ctx->opcode >> 6) & 0x3) { case ADD_FMT: FINSN_3ARG_SDPS(ADD); break; case SUB_FMT: FINSN_3ARG_SDPS(SUB); break; case MUL_FMT: FINSN_3ARG_SDPS(MUL); break; case DIV_FMT: fmt = (ctx->opcode >> 8) & 0x3; if (fmt == 1) { mips32_op = OPC_DIV_D; } else if (fmt == 0) { mips32_op = OPC_DIV_S; } else { goto pool32f_invalid; } goto do_fpop; default: goto pool32f_invalid; } break; case 0x38: /* cmovs */ switch ((ctx->opcode >> 6) & 0x3) { case MOVN_FMT: FINSN_3ARG_SDPS(MOVN); break; case MOVZ_FMT: FINSN_3ARG_SDPS(MOVZ); break; default: goto pool32f_invalid; } break; do_fpop: gen_farith(ctx, mips32_op, rt, rs, rd, 0); break; default: pool32f_invalid: MIPS_INVAL("pool32f"); generate_exception(ctx, EXCP_RI); break; } } else { generate_exception_err(ctx, EXCP_CpU, 1); } break; case POOL32I: minor = (ctx->opcode >> 21) & 0x1f; switch (minor) { case BLTZ: mips32_op = OPC_BLTZ; goto do_branch; case BLTZAL: mips32_op = OPC_BLTZAL; goto do_branch; case BLTZALS: mips32_op = OPC_BLTZALS; goto do_branch; case BGEZ: mips32_op = OPC_BGEZ; goto do_branch; case BGEZAL: mips32_op = OPC_BGEZAL; goto do_branch; case BGEZALS: mips32_op = OPC_BGEZALS; goto do_branch; case BLEZ: mips32_op = OPC_BLEZ; goto do_branch; case BGTZ: mips32_op = OPC_BGTZ; do_branch: gen_compute_branch(ctx, mips32_op, 4, rs, -1, imm << 1); *is_branch = 1; break; /* Traps */ case TLTI: mips32_op = OPC_TLTI; goto do_trapi; case TGEI: mips32_op = OPC_TGEI; goto do_trapi; case TLTIU: mips32_op = OPC_TLTIU; goto do_trapi; case TGEIU: mips32_op = OPC_TGEIU; goto do_trapi; case TNEI: mips32_op = OPC_TNEI; goto do_trapi; case TEQI: mips32_op = OPC_TEQI; do_trapi: gen_trap(ctx, mips32_op, rs, -1, imm); break; case BNEZC: case BEQZC: gen_compute_branch(ctx, minor == BNEZC ? OPC_BNE : OPC_BEQ, 4, rs, 0, imm << 1); /* Compact branches don't have a delay slot, so just let the normal delay slot handling take us to the branch target. */ break; case LUI: gen_logic_imm(env, OPC_LUI, rs, -1, imm); break; case SYNCI: break; case BC2F: case BC2T: /* COP2: Not implemented. */ generate_exception_err(ctx, EXCP_CpU, 2); break; case BC1F: mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1FANY2 : OPC_BC1F; goto do_cp1branch; case BC1T: mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1TANY2 : OPC_BC1T; goto do_cp1branch; case BC1ANY4F: mips32_op = OPC_BC1FANY4; goto do_cp1mips3d; case BC1ANY4T: mips32_op = OPC_BC1TANY4; do_cp1mips3d: check_cop1x(ctx); check_insn(env, ctx, ASE_MIPS3D); /* Fall through */ do_cp1branch: gen_compute_branch1(env, ctx, mips32_op, (ctx->opcode >> 18) & 0x7, imm << 1); *is_branch = 1; break; case BPOSGE64: case BPOSGE32: /* MIPS DSP: not implemented */ /* Fall through */ default: MIPS_INVAL("pool32i"); generate_exception(ctx, EXCP_RI); break; } break; case POOL32C: minor = (ctx->opcode >> 12) & 0xf; switch (minor) { case LWL: mips32_op = OPC_LWL; goto do_ld_lr; case SWL: mips32_op = OPC_SWL; goto do_st_lr; case LWR: mips32_op = OPC_LWR; goto do_ld_lr; case SWR: mips32_op = OPC_SWR; goto do_st_lr; #if defined(TARGET_MIPS64) case LDL: mips32_op = OPC_LDL; goto do_ld_lr; case SDL: mips32_op = OPC_SDL; goto do_st_lr; case LDR: mips32_op = OPC_LDR; goto do_ld_lr; case SDR: mips32_op = OPC_SDR; goto do_st_lr; case LWU: mips32_op = OPC_LWU; goto do_ld_lr; case LLD: mips32_op = OPC_LLD; goto do_ld_lr; #endif case LL: mips32_op = OPC_LL; goto do_ld_lr; do_ld_lr: gen_ld(env, ctx, mips32_op, rt, rs, SIMM(ctx->opcode, 0, 12)); break; do_st_lr: gen_st(ctx, mips32_op, rt, rs, SIMM(ctx->opcode, 0, 12)); break; case SC: gen_st_cond(ctx, OPC_SC, rt, rs, SIMM(ctx->opcode, 0, 12)); break; #if defined(TARGET_MIPS64) case SCD: gen_st_cond(ctx, OPC_SCD, rt, rs, SIMM(ctx->opcode, 0, 12)); break; #endif case PREF: /* Treat as no-op */ break; default: MIPS_INVAL("pool32c"); generate_exception(ctx, EXCP_RI); break; } break; case ADDI32: mips32_op = OPC_ADDI; goto do_addi; case ADDIU32: mips32_op = OPC_ADDIU; do_addi: gen_arith_imm(env, ctx, mips32_op, rt, rs, imm); break; /* Logical operations */ case ORI32: mips32_op = OPC_ORI; goto do_logici; case XORI32: mips32_op = OPC_XORI; goto do_logici; case ANDI32: mips32_op = OPC_ANDI; do_logici: gen_logic_imm(env, mips32_op, rt, rs, imm); break; /* Set less than immediate */ case SLTI32: mips32_op = OPC_SLTI; goto do_slti; case SLTIU32: mips32_op = OPC_SLTIU; do_slti: gen_slt_imm(env, mips32_op, rt, rs, imm); break; case JALX32: offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2; gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset); *is_branch = 1; break; case JALS32: offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 1; gen_compute_branch(ctx, OPC_JALS, 4, rt, rs, offset); *is_branch = 1; break; case BEQ32: gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1); *is_branch = 1; break; case BNE32: gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1); *is_branch = 1; break; case J32: gen_compute_branch(ctx, OPC_J, 4, rt, rs, (int32_t)(ctx->opcode & 0x3FFFFFF) << 1); *is_branch = 1; break; case JAL32: gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, (int32_t)(ctx->opcode & 0x3FFFFFF) << 1); *is_branch = 1; break; /* Floating point (COP1) */ case LWC132: mips32_op = OPC_LWC1; goto do_cop1; case LDC132: mips32_op = OPC_LDC1; goto do_cop1; case SWC132: mips32_op = OPC_SWC1; goto do_cop1; case SDC132: mips32_op = OPC_SDC1; do_cop1: gen_cop1_ldst(env, ctx, mips32_op, rt, rs, imm); break; case ADDIUPC: { int reg = mmreg(ZIMM(ctx->opcode, 23, 3)); int offset = SIMM(ctx->opcode, 0, 23) << 2; gen_addiupc(ctx, reg, offset, 0, 0); } break; /* Loads and stores */ case LB32: mips32_op = OPC_LB; goto do_ld; case LBU32: mips32_op = OPC_LBU; goto do_ld; case LH32: mips32_op = OPC_LH; goto do_ld; case LHU32: mips32_op = OPC_LHU; goto do_ld; case LW32: mips32_op = OPC_LW; goto do_ld; #ifdef TARGET_MIPS64 case LD32: mips32_op = OPC_LD; goto do_ld; case SD32: mips32_op = OPC_SD; goto do_st; #endif case SB32: mips32_op = OPC_SB; goto do_st; case SH32: mips32_op = OPC_SH; goto do_st; case SW32: mips32_op = OPC_SW; goto do_st; do_ld: gen_ld(env, ctx, mips32_op, rt, rs, imm); break; do_st: gen_st(ctx, mips32_op, rt, rs, imm); break; default: generate_exception(ctx, EXCP_RI); break; } } | 3,159 |
0 | static int xmv_process_packet_header(AVFormatContext *s) { XMVDemuxContext *xmv = s->priv_data; AVIOContext *pb = s->pb; uint8_t data[8]; uint16_t audio_track; uint64_t data_offset; /* Next packet size */ xmv->next_packet_size = avio_rl32(pb); /* Packet video header */ if (avio_read(pb, data, 8) != 8) return AVERROR(EIO); xmv->video.data_size = AV_RL32(data) & 0x007FFFFF; xmv->video.current_frame = 0; xmv->video.frame_count = (AV_RL32(data) >> 23) & 0xFF; xmv->video.has_extradata = (data[3] & 0x80) != 0; /* Adding the audio data sizes and the video data size keeps you 4 bytes * short for every audio track. But as playing around with XMV files with * ADPCM audio showed, taking the extra 4 bytes from the audio data gives * you either completely distorted audio or click (when skipping the * remaining 68 bytes of the ADPCM block). Subtracting 4 bytes for every * audio track from the video data works at least for the audio. Probably * some alignment thing? * The video data has (always?) lots of padding, so it should work out... */ xmv->video.data_size -= xmv->audio_track_count * 4; xmv->current_stream = 0; if (!xmv->video.frame_count) { xmv->video.frame_count = 1; xmv->current_stream = xmv->stream_count > 1; } /* Packet audio header */ for (audio_track = 0; audio_track < xmv->audio_track_count; audio_track++) { XMVAudioPacket *packet = &xmv->audio[audio_track]; if (avio_read(pb, data, 4) != 4) return AVERROR(EIO); packet->data_size = AV_RL32(data) & 0x007FFFFF; if ((packet->data_size == 0) && (audio_track != 0)) /* This happens when I create an XMV with several identical audio * streams. From the size calculations, duplicating the previous * stream's size works out, but the track data itself is silent. * Maybe this should also redirect the offset to the previous track? */ packet->data_size = xmv->audio[audio_track - 1].data_size; /* Carve up the audio data in frame_count slices */ packet->frame_size = packet->data_size / xmv->video.frame_count; packet->frame_size -= packet->frame_size % packet->block_align; } /* Packet data offsets */ data_offset = avio_tell(pb); xmv->video.data_offset = data_offset; data_offset += xmv->video.data_size; for (audio_track = 0; audio_track < xmv->audio_track_count; audio_track++) { xmv->audio[audio_track].data_offset = data_offset; data_offset += xmv->audio[audio_track].data_size; } /* Video frames header */ /* Read new video extra data */ if (xmv->video.data_size > 0) { if (xmv->video.has_extradata) { xmv_read_extradata(xmv->video.extradata, pb); xmv->video.data_size -= 4; xmv->video.data_offset += 4; if (xmv->video.stream_index >= 0) { AVStream *vst = s->streams[xmv->video.stream_index]; av_assert0(xmv->video.stream_index < s->nb_streams); if (vst->codec->extradata_size < 4) { av_freep(&vst->codec->extradata); ff_alloc_extradata(vst->codec, 4); } memcpy(vst->codec->extradata, xmv->video.extradata, 4); } } } return 0; } | 3,163 |
0 | static av_cold int qtrle_decode_init(AVCodecContext *avctx) { QtrleContext *s = avctx->priv_data; s->avctx = avctx; switch (avctx->bits_per_coded_sample) { case 1: case 33: avctx->pix_fmt = AV_PIX_FMT_MONOWHITE; break; case 2: case 4: case 8: case 34: case 36: case 40: avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case 16: avctx->pix_fmt = AV_PIX_FMT_RGB555; break; case 24: avctx->pix_fmt = AV_PIX_FMT_RGB24; break; case 32: avctx->pix_fmt = AV_PIX_FMT_RGB32; break; default: av_log (avctx, AV_LOG_ERROR, "Unsupported colorspace: %d bits/sample?\n", avctx->bits_per_coded_sample); return AVERROR_INVALIDDATA; } s->frame.data[0] = NULL; return 0; } | 3,164 |
Subsets and Splits