label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors, int sector_size) { s->lba = lba; s->packet_transfer_size = nb_sectors * sector_size; s->io_buffer_index = 0; s->io_buffer_size = 0; s->cd_sector_size = sector_size; block_acct_start(bdrv_get_stats(s->bs), &s->acct, s->packet_transfer_size, BLOCK_ACCT_READ); /* XXX: check if BUSY_STAT should be set */ s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT; ide_start_dma(s, ide_atapi_cmd_read_dma_cb); } | 23,358 |
0 | void qed_acquire(BDRVQEDState *s) { aio_context_acquire(bdrv_get_aio_context(s->bs)); } | 23,359 |
0 | static int qemu_rdma_post_send_control(RDMAContext *rdma, uint8_t *buf, RDMAControlHeader *head) { int ret = 0; RDMAWorkRequestData *wr = &rdma->wr_data[RDMA_WRID_CONTROL]; struct ibv_send_wr *bad_wr; struct ibv_sge sge = { .addr = (uint64_t)(wr->control), .length = head->len + sizeof(RDMAControlHeader), .lkey = wr->control_mr->lkey, }; struct ibv_send_wr send_wr = { .wr_id = RDMA_WRID_SEND_CONTROL, .opcode = IBV_WR_SEND, .send_flags = IBV_SEND_SIGNALED, .sg_list = &sge, .num_sge = 1, }; DDDPRINTF("CONTROL: sending %s..\n", control_desc[head->type]); /* * We don't actually need to do a memcpy() in here if we used * the "sge" properly, but since we're only sending control messages * (not RAM in a performance-critical path), then its OK for now. * * The copy makes the RDMAControlHeader simpler to manipulate * for the time being. */ assert(head->len <= RDMA_CONTROL_MAX_BUFFER - sizeof(*head)); memcpy(wr->control, head, sizeof(RDMAControlHeader)); control_to_network((void *) wr->control); if (buf) { memcpy(wr->control + sizeof(RDMAControlHeader), buf, head->len); } if (ibv_post_send(rdma->qp, &send_wr, &bad_wr)) { return -1; } if (ret < 0) { fprintf(stderr, "Failed to use post IB SEND for control!\n"); return ret; } ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_SEND_CONTROL); if (ret < 0) { fprintf(stderr, "rdma migration: send polling control error!\n"); } return ret; } | 23,360 |
0 | static int hdev_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVRawState *s = bs->opaque; Error *local_err = NULL; int ret; #if defined(__APPLE__) && defined(__MACH__) const char *filename = qdict_get_str(options, "filename"); if (strstart(filename, "/dev/cdrom", NULL)) { kern_return_t kernResult; io_iterator_t mediaIterator; char bsdPath[ MAXPATHLEN ]; int fd; kernResult = FindEjectableCDMedia( &mediaIterator ); kernResult = GetBSDPath(mediaIterator, bsdPath, sizeof(bsdPath), flags); if ( bsdPath[ 0 ] != '\0' ) { strcat(bsdPath,"s0"); /* some CDs don't have a partition 0 */ fd = qemu_open(bsdPath, O_RDONLY | O_BINARY | O_LARGEFILE); if (fd < 0) { bsdPath[strlen(bsdPath)-1] = '1'; } else { qemu_close(fd); } filename = bsdPath; qdict_put(options, "filename", qstring_from_str(filename)); } if ( mediaIterator ) IOObjectRelease( mediaIterator ); } #endif s->type = FTYPE_FILE; ret = raw_open_common(bs, options, flags, 0, &local_err); if (ret < 0) { if (local_err) { error_propagate(errp, local_err); } return ret; } /* Since this does ioctl the device must be already opened */ bs->sg = hdev_is_sg(bs); if (flags & BDRV_O_RDWR) { ret = check_hdev_writable(s); if (ret < 0) { raw_close(bs); error_setg_errno(errp, -ret, "The device is not writable"); return ret; } } return ret; } | 23,362 |
0 | static void pc_init1(MachineState *machine, int pci_enabled, int kvmclock_enabled) { PCMachineState *pc_machine = PC_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); int i; ram_addr_t below_4g_mem_size, above_4g_mem_size; PCIBus *pci_bus; ISABus *isa_bus; PCII440FXState *i440fx_state; int piix3_devfn = -1; qemu_irq *cpu_irq; qemu_irq *gsi; qemu_irq *i8259; qemu_irq *smi_irq; GSIState *gsi_state; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; BusState *idebus[MAX_IDE_BUS]; ISADevice *rtc_state; ISADevice *floppy; MemoryRegion *ram_memory; MemoryRegion *pci_memory; MemoryRegion *rom_memory; DeviceState *icc_bridge; FWCfgState *fw_cfg = NULL; PcGuestInfo *guest_info; /* Check whether RAM fits below 4G (leaving 1/2 GByte for IO memory). * If it doesn't, we need to split it in chunks below and above 4G. * In any case, try to make sure that guest addresses aligned at * 1G boundaries get mapped to host addresses aligned at 1G boundaries. * For old machine types, use whatever split we used historically to avoid * breaking migration. */ if (machine->ram_size >= 0xe0000000) { ram_addr_t lowmem = gigabyte_align ? 0xc0000000 : 0xe0000000; above_4g_mem_size = machine->ram_size - lowmem; below_4g_mem_size = lowmem; } else { above_4g_mem_size = 0; below_4g_mem_size = machine->ram_size; } if (xen_enabled() && xen_hvm_init(&below_4g_mem_size, &above_4g_mem_size, &ram_memory) != 0) { fprintf(stderr, "xen hardware virtual machine initialisation failed\n"); exit(1); } icc_bridge = qdev_create(NULL, TYPE_ICC_BRIDGE); object_property_add_child(qdev_get_machine(), "icc-bridge", OBJECT(icc_bridge), NULL); pc_cpus_init(machine->cpu_model, icc_bridge); if (kvm_enabled() && kvmclock_enabled) { kvmclock_create(); } if (pci_enabled) { pci_memory = g_new(MemoryRegion, 1); memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); rom_memory = pci_memory; } else { pci_memory = NULL; rom_memory = system_memory; } guest_info = pc_guest_info_init(below_4g_mem_size, above_4g_mem_size); guest_info->has_acpi_build = has_acpi_build; guest_info->has_pci_info = has_pci_info; guest_info->isapc_ram_fw = !pci_enabled; guest_info->has_reserved_memory = has_reserved_memory; if (smbios_defaults) { MachineClass *mc = MACHINE_GET_CLASS(machine); /* These values are guest ABI, do not change */ smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)", mc->name, smbios_legacy_mode); } /* allocate ram and load rom/bios */ if (!xen_enabled()) { fw_cfg = pc_memory_init(machine, system_memory, below_4g_mem_size, above_4g_mem_size, rom_memory, &ram_memory, guest_info); } gsi_state = g_malloc0(sizeof(*gsi_state)); if (kvm_irqchip_in_kernel()) { kvm_pc_setup_irq_routing(pci_enabled); gsi = qemu_allocate_irqs(kvm_pc_gsi_handler, gsi_state, GSI_NUM_PINS); } else { gsi = qemu_allocate_irqs(gsi_handler, gsi_state, GSI_NUM_PINS); } if (pci_enabled) { pci_bus = i440fx_init(&i440fx_state, &piix3_devfn, &isa_bus, gsi, system_memory, system_io, machine->ram_size, below_4g_mem_size, above_4g_mem_size, pci_memory, ram_memory); } else { pci_bus = NULL; i440fx_state = NULL; isa_bus = isa_bus_new(NULL, system_io); no_hpet = 1; } isa_bus_irqs(isa_bus, gsi); if (kvm_irqchip_in_kernel()) { i8259 = kvm_i8259_init(isa_bus); } else if (xen_enabled()) { i8259 = xen_interrupt_controller_init(); } else { cpu_irq = pc_allocate_cpu_irq(); i8259 = i8259_init(isa_bus, cpu_irq[0]); } for (i = 0; i < ISA_NUM_IRQS; i++) { gsi_state->i8259_irq[i] = i8259[i]; } if (pci_enabled) { ioapic_init_gsi(gsi_state, "i440fx"); } qdev_init_nofail(icc_bridge); pc_register_ferr_irq(gsi[13]); pc_vga_init(isa_bus, pci_enabled ? pci_bus : NULL); /* init basic PC hardware */ pc_basic_device_init(isa_bus, gsi, &rtc_state, &floppy, xen_enabled(), 0x4); pc_nic_init(isa_bus, pci_bus); ide_drive_get(hd, MAX_IDE_BUS); if (pci_enabled) { PCIDevice *dev; if (xen_enabled()) { dev = pci_piix3_xen_ide_init(pci_bus, hd, piix3_devfn + 1); } else { dev = pci_piix3_ide_init(pci_bus, hd, piix3_devfn + 1); } idebus[0] = qdev_get_child_bus(&dev->qdev, "ide.0"); idebus[1] = qdev_get_child_bus(&dev->qdev, "ide.1"); } else { for(i = 0; i < MAX_IDE_BUS; i++) { ISADevice *dev; char busname[] = "ide.0"; dev = isa_ide_init(isa_bus, ide_iobase[i], ide_iobase2[i], ide_irq[i], hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]); /* * The ide bus name is ide.0 for the first bus and ide.1 for the * second one. */ busname[4] = '0' + i; idebus[i] = qdev_get_child_bus(DEVICE(dev), busname); } } pc_cmos_init(below_4g_mem_size, above_4g_mem_size, machine->boot_order, floppy, idebus[0], idebus[1], rtc_state); if (pci_enabled && usb_enabled(false)) { pci_create_simple(pci_bus, piix3_devfn + 2, "piix3-usb-uhci"); } if (pci_enabled && acpi_enabled) { DeviceState *piix4_pm; I2CBus *smbus; smi_irq = qemu_allocate_irqs(pc_acpi_smi_interrupt, first_cpu, 1); /* TODO: Populate SPD eeprom data. */ smbus = piix4_pm_init(pci_bus, piix3_devfn + 3, 0xb100, gsi[9], *smi_irq, kvm_enabled(), fw_cfg, &piix4_pm); smbus_eeprom_init(smbus, 8, NULL, 0); object_property_add_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP, TYPE_HOTPLUG_HANDLER, (Object **)&pc_machine->acpi_dev, object_property_allow_set_link, OBJ_PROP_LINK_UNREF_ON_RELEASE, &error_abort); object_property_set_link(OBJECT(machine), OBJECT(piix4_pm), PC_MACHINE_ACPI_DEVICE_PROP, &error_abort); } if (pci_enabled) { pc_pci_device_init(pci_bus); } } | 23,363 |
0 | static void qpci_spapr_io_writel(QPCIBus *bus, void *addr, uint32_t value) { QPCIBusSPAPR *s = container_of(bus, QPCIBusSPAPR, bus); uint64_t port = (uintptr_t)addr; value = bswap32(value); if (port < s->pio.size) { writel(s->pio_cpu_base + port, value); } else { writel(s->mmio_cpu_base + port, value); } } | 23,364 |
0 | static int get_char(GDBState *s) { uint8_t ch; int ret; for(;;) { ret = qemu_recv(s->fd, &ch, 1, 0); if (ret < 0) { if (errno == ECONNRESET) s->fd = -1; if (errno != EINTR && errno != EAGAIN) return -1; } else if (ret == 0) { close(s->fd); s->fd = -1; return -1; } else { break; } } return ch; } | 23,367 |
0 | static void gen_ldda_asi(DisasContext *dc, TCGv hi, TCGv addr, int insn, int rd) { TCGv_i32 r_asi, r_rd; r_asi = gen_get_asi(dc, insn); r_rd = tcg_const_i32(rd); gen_helper_ldda_asi(cpu_env, addr, r_asi, r_rd); tcg_temp_free_i32(r_rd); tcg_temp_free_i32(r_asi); } | 23,369 |
0 | static int kvm_sclp_service_call(CPUS390XState *env, struct kvm_run *run, uint16_t ipbh0) { uint32_t sccb; uint64_t code; int r = 0; cpu_synchronize_state(env); sccb = env->regs[ipbh0 & 0xf]; code = env->regs[(ipbh0 & 0xf0) >> 4]; r = sclp_service_call(env, sccb, code); if (r) { setcc(env, 3); } return 0; } | 23,370 |
0 | void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, char *line, int line_size, int *print_prefix) { AVBPrint part[4]; format_line(ptr, level, fmt, vl, part, print_prefix, NULL); snprintf(line, line_size, "%s%s%s%s", part[0].str, part[1].str, part[2].str, part[3].str); av_bprint_finalize(part+3, NULL); } | 23,372 |
0 | void do_raise_exception_err (uint32_t exception, int error_code) { #if 0 printf("Raise exception %3x code : %d\n", exception, error_code); #endif switch (exception) { case EXCP_PROGRAM: if (error_code == EXCP_FP && msr_fe0 == 0 && msr_fe1 == 0) return; break; default: break; } env->exception_index = exception; env->error_code = error_code; cpu_loop_exit(); } | 23,375 |
0 | static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) { VirtIOPCIProxy *proxy = VIRTIO_PCI(d); PCIDevice *dev = &proxy->pci_dev; return pci_get_address_space(dev); } | 23,376 |
0 | static int pte32_check (mmu_ctx_t *ctx, target_ulong pte0, target_ulong pte1, int h, int rw) { return _pte_check(ctx, 0, pte0, pte1, h, rw); } | 23,377 |
0 | static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, strList **missing_feats) { X86CPU *xc; FeatureWord w; Error *err = NULL; strList **next = missing_feats; if (xcc->kvm_required && !kvm_enabled()) { strList *new = g_new0(strList, 1); new->value = g_strdup("kvm");; *missing_feats = new; return; } xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc)))); x86_cpu_load_features(xc, &err); if (err) { /* Errors at x86_cpu_load_features should never happen, * but in case it does, just report the model as not * runnable at all using the "type" property. */ strList *new = g_new0(strList, 1); new->value = g_strdup("type"); *next = new; next = &new->next; } x86_cpu_filter_features(xc); for (w = 0; w < FEATURE_WORDS; w++) { uint32_t filtered = xc->filtered_features[w]; int i; for (i = 0; i < 32; i++) { if (filtered & (1UL << i)) { strList *new = g_new0(strList, 1); new->value = g_strdup(x86_cpu_feature_name(w, i)); *next = new; next = &new->next; } } } object_unref(OBJECT(xc)); } | 23,378 |
0 | int bdrv_flush(BlockDriverState *bs) { Coroutine *co; RwCo rwco = { .bs = bs, .ret = NOT_DONE, }; if (qemu_in_coroutine()) { /* Fast-path if already in coroutine context */ bdrv_flush_co_entry(&rwco); } else { AioContext *aio_context = bdrv_get_aio_context(bs); co = qemu_coroutine_create(bdrv_flush_co_entry); qemu_coroutine_enter(co, &rwco); while (rwco.ret == NOT_DONE) { aio_poll(aio_context, true); } } return rwco.ret; } | 23,380 |
0 | void qmp_nbd_server_start(SocketAddressLegacy *addr, bool has_tls_creds, const char *tls_creds, Error **errp) { if (nbd_server) { error_setg(errp, "NBD server already running"); return; } nbd_server = g_new0(NBDServerData, 1); nbd_server->watch = -1; nbd_server->listen_ioc = qio_channel_socket_new(); qio_channel_set_name(QIO_CHANNEL(nbd_server->listen_ioc), "nbd-listener"); if (qio_channel_socket_listen_sync( nbd_server->listen_ioc, addr, errp) < 0) { goto error; } if (has_tls_creds) { nbd_server->tlscreds = nbd_get_tls_creds(tls_creds, errp); if (!nbd_server->tlscreds) { goto error; } /* TODO SOCKET_ADDRESS_LEGACY_KIND_FD where fd has AF_INET or AF_INET6 */ if (addr->type != SOCKET_ADDRESS_LEGACY_KIND_INET) { error_setg(errp, "TLS is only supported with IPv4/IPv6"); goto error; } } nbd_server->watch = qio_channel_add_watch( QIO_CHANNEL(nbd_server->listen_ioc), G_IO_IN, nbd_accept, NULL, NULL); return; error: nbd_server_free(nbd_server); nbd_server = NULL; } | 23,381 |
0 | vubr_backend_recv_cb(int sock, void *ctx) { VubrDev *vubr = (VubrDev *) ctx; VuDev *dev = &vubr->vudev; VuVirtq *vq = vu_get_queue(dev, 0); VuVirtqElement *elem = NULL; struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE]; struct virtio_net_hdr_mrg_rxbuf mhdr; unsigned mhdr_cnt = 0; int hdrlen = vubr->hdrlen; int i = 0; struct virtio_net_hdr hdr = { .flags = 0, .gso_type = VIRTIO_NET_HDR_GSO_NONE }; DPRINT("\n\n *** IN UDP RECEIVE CALLBACK ***\n\n"); DPRINT(" hdrlen = %d\n", hdrlen); if (!vu_queue_enabled(dev, vq) || !vu_queue_started(dev, vq) || !vu_queue_avail_bytes(dev, vq, hdrlen, 0)) { DPRINT("Got UDP packet, but no available descriptors on RX virtq.\n"); return; } do { struct iovec *sg; ssize_t ret, total = 0; unsigned int num; elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement)); if (!elem) { break; } if (elem->in_num < 1) { fprintf(stderr, "virtio-net contains no in buffers\n"); break; } sg = elem->in_sg; num = elem->in_num; if (i == 0) { if (hdrlen == 12) { mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg), sg, elem->in_num, offsetof(typeof(mhdr), num_buffers), sizeof(mhdr.num_buffers)); } iov_from_buf(sg, elem->in_num, 0, &hdr, sizeof hdr); total += hdrlen; ret = iov_discard_front(&sg, &num, hdrlen); assert(ret == hdrlen); } struct msghdr msg = { .msg_name = (struct sockaddr *) &vubr->backend_udp_dest, .msg_namelen = sizeof(struct sockaddr_in), .msg_iov = sg, .msg_iovlen = elem->in_num, .msg_flags = MSG_DONTWAIT, }; do { ret = recvmsg(vubr->backend_udp_sock, &msg, 0); } while (ret == -1 && (errno == EINTR)); if (i == 0) { iov_restore_front(elem->in_sg, sg, hdrlen); } if (ret == -1) { if (errno == EWOULDBLOCK) { vu_queue_rewind(dev, vq, 1); break; } vubr_die("recvmsg()"); } total += ret; iov_truncate(elem->in_sg, elem->in_num, total); vu_queue_fill(dev, vq, elem, total, i++); free(elem); elem = NULL; } while (false); /* could loop if DONTWAIT worked? */ if (mhdr_cnt) { mhdr.num_buffers = i; iov_from_buf(mhdr_sg, mhdr_cnt, 0, &mhdr.num_buffers, sizeof mhdr.num_buffers); } vu_queue_flush(dev, vq, i); vu_queue_notify(dev, vq); free(elem); } | 23,382 |
0 | int ff_j2k_init_component(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty, Jpeg2000QuantStyle *qntsty, int cbps, int dx, int dy, AVCodecContext *avctx) { uint8_t log2_band_prec_width, log2_band_prec_height; int reslevelno, bandno, gbandno = 0, ret, i, j, csize = 1; if (ret=ff_jpeg2000_dwt_init(&comp->dwt, comp->coord, codsty->nreslevels2decode-1, codsty->transform == FF_DWT53 ? FF_DWT53 : FF_DWT97_INT)) return ret; for (i = 0; i < 2; i++) csize *= comp->coord[i][1] - comp->coord[i][0]; comp->data = av_malloc_array(csize, sizeof(*comp->data)); if (!comp->data) return AVERROR(ENOMEM); comp->reslevel = av_malloc_array(codsty->nreslevels, sizeof(*comp->reslevel)); if (!comp->reslevel) return AVERROR(ENOMEM); /* LOOP on resolution levels */ for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++) { int declvl = codsty->nreslevels - reslevelno; // N_L -r see ISO/IEC 15444-1:2002 B.5 Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno; /* Compute borders for each resolution level. * Computation of trx_0, trx_1, try_0 and try_1. * see ISO/IEC 15444-1:2002 eq. B.5 and B-14 */ for (i = 0; i < 2; i++) for (j = 0; j < 2; j++) reslevel->coord[i][j] = ff_jpeg2000_ceildivpow2(comp->coord_o[i][j], declvl - 1); // update precincts size: 2^n value reslevel->log2_prec_width = codsty->log2_prec_widths[reslevelno]; reslevel->log2_prec_height = codsty->log2_prec_heights[reslevelno]; /* Number of bands for each resolution level */ if (reslevelno == 0) reslevel->nbands = 1; else reslevel->nbands = 3; /* Number of precincts wich span the tile for resolution level reslevelno * see B.6 in ISO/IEC 15444-1:2002 eq. B-16 * num_precincts_x = |- trx_1 / 2 ^ log2_prec_width) -| - (trx_0 / 2 ^ log2_prec_width) * num_precincts_y = |- try_1 / 2 ^ log2_prec_width) -| - (try_0 / 2 ^ log2_prec_width) * for Dcinema profiles in JPEG 2000 * num_precincts_x = |- trx_1 / 2 ^ log2_prec_width) -| * num_precincts_y = |- try_1 / 2 ^ log2_prec_width) -| */ if (reslevel->coord[0][1] == reslevel->coord[0][0]) reslevel->num_precincts_x = 0; else reslevel->num_precincts_x = ff_jpeg2000_ceildivpow2(reslevel->coord[0][1], reslevel->log2_prec_width) - (reslevel->coord[0][0] >> reslevel->log2_prec_width); if (reslevel->coord[1][1] == reslevel->coord[1][0]) reslevel->num_precincts_y = 0; else reslevel->num_precincts_y = ff_jpeg2000_ceildivpow2(reslevel->coord[1][1], reslevel->log2_prec_height) - (reslevel->coord[1][0] >> reslevel->log2_prec_height); reslevel->band = av_malloc_array(reslevel->nbands, sizeof(*reslevel->band)); if (!reslevel->band) return AVERROR(ENOMEM); for (bandno = 0; bandno < reslevel->nbands; bandno++, gbandno++) { Jpeg2000Band *band = reslevel->band + bandno; int cblkno, precno; int nb_precincts; /* TODO: Implementation of quantization step not finished, * see ISO/IEC 15444-1:2002 E.1 and A.6.4. */ switch (qntsty->quantsty) { uint8_t gain; int numbps; case JPEG2000_QSTY_NONE: /* TODO: to verify. No quantization in this case */ band->f_stepsize = 1; break; case JPEG2000_QSTY_SI: /*TODO: Compute formula to implement. */ numbps = cbps + lut_gain[codsty->transform][bandno + (reslevelno > 0)]; band->f_stepsize = SHL(2048 + qntsty->mant[gbandno], 2 + numbps - qntsty->expn[gbandno]); break; case JPEG2000_QSTY_SE: /* Exponent quantization step. * Formula: * delta_b = 2 ^ (R_b - expn_b) * (1 + (mant_b / 2 ^ 11)) * R_b = R_I + log2 (gain_b ) * see ISO/IEC 15444-1:2002 E.1.1 eqn. E-3 and E-4 */ /* TODO/WARN: value of log2 (gain_b ) not taken into account * but it works (compared to OpenJPEG). Why? * Further investigation needed. */ gain = cbps; band->f_stepsize = pow(2.0, gain - qntsty->expn[gbandno]); band->f_stepsize *= (qntsty->mant[gbandno] / 2048.0 + 1.0); break; default: band->f_stepsize = 0; av_log(avctx, AV_LOG_ERROR, "Unknown quantization format\n"); break; } /* FIXME: In openjepg code stespize = stepsize * 0.5. Why? * If not set output of entropic decoder is not correct. */ if (!av_codec_is_encoder(avctx->codec)) band->f_stepsize *= 0.5; band->i_stepsize = band->f_stepsize * (1 << 16); /* computation of tbx_0, tbx_1, tby_0, tby_1 * see ISO/IEC 15444-1:2002 B.5 eq. B-15 and tbl B.1 * codeblock width and height is computed for * DCI JPEG 2000 codeblock_width = codeblock_width = 32 = 2 ^ 5 */ if (reslevelno == 0) { /* for reslevelno = 0, only one band, x0_b = y0_b = 0 */ for (i = 0; i < 2; i++) for (j = 0; j < 2; j++) band->coord[i][j] = ff_jpeg2000_ceildivpow2(comp->coord_o[i][j] - comp->coord_o[i][0], declvl - 1); log2_band_prec_width = reslevel->log2_prec_width; log2_band_prec_height = reslevel->log2_prec_height; /* see ISO/IEC 15444-1:2002 eq. B-17 and eq. B-15 */ band->log2_cblk_width = FFMIN(codsty->log2_cblk_width, reslevel->log2_prec_width); band->log2_cblk_height = FFMIN(codsty->log2_cblk_height, reslevel->log2_prec_height); } else { /* 3 bands x0_b = 1 y0_b = 0; x0_b = 0 y0_b = 1; x0_b = y0_b = 1 */ /* x0_b and y0_b are computed with ((bandno + 1 >> i) & 1) */ for (i = 0; i < 2; i++) for (j = 0; j < 2; j++) /* Formula example for tbx_0 = ceildiv((tcx_0 - 2 ^ (declvl - 1) * x0_b) / declvl) */ band->coord[i][j] = ff_jpeg2000_ceildivpow2(comp->coord_o[i][j] - comp->coord_o[i][0] - (((bandno + 1 >> i) & 1) << declvl - 1), declvl); /* TODO: Manage case of 3 band offsets here or * in coding/decoding function? */ /* see ISO/IEC 15444-1:2002 eq. B-17 and eq. B-15 */ band->log2_cblk_width = FFMIN(codsty->log2_cblk_width, reslevel->log2_prec_width - 1); band->log2_cblk_height = FFMIN(codsty->log2_cblk_height, reslevel->log2_prec_height - 1); log2_band_prec_width = reslevel->log2_prec_width - 1; log2_band_prec_height = reslevel->log2_prec_height - 1; } for (j = 0; j < 2; j++) band->coord[0][j] = ff_jpeg2000_ceildiv(band->coord[0][j], dx); for (j = 0; j < 2; j++) band->coord[1][j] = ff_jpeg2000_ceildiv(band->coord[1][j], dy); band->prec = av_malloc_array(reslevel->num_precincts_x * reslevel->num_precincts_y, sizeof(*band->prec)); if (!band->prec) return AVERROR(ENOMEM); nb_precincts = reslevel->num_precincts_x * reslevel->num_precincts_y; for (precno = 0; precno < nb_precincts; precno++) { Jpeg2000Prec *prec = band->prec + precno; /* TODO: Explain formula for JPEG200 DCINEMA. */ /* TODO: Verify with previous count of codeblocks per band */ /* Compute P_x0 */ prec->coord[0][0] = (precno % reslevel->num_precincts_x) * (1 << log2_band_prec_width); prec->coord[0][0] = FFMAX(prec->coord[0][0], band->coord[0][0]); /* Compute P_y0 */ prec->coord[1][0] = (precno / reslevel->num_precincts_x) * (1 << log2_band_prec_height); prec->coord[1][0] = FFMAX(prec->coord[1][0], band->coord[1][0]); /* Compute P_x1 */ prec->coord[0][1] = prec->coord[0][0] + (1 << log2_band_prec_width); prec->coord[0][1] = FFMIN(prec->coord[0][1], band->coord[0][1]); /* Compute P_y1 */ prec->coord[1][1] = prec->coord[1][0] + (1 << log2_band_prec_height); prec->coord[1][1] = FFMIN(prec->coord[1][1], band->coord[1][1]); prec->nb_codeblocks_width = ff_jpeg2000_ceildivpow2(prec->coord[0][1] - prec->coord[0][0], band->log2_cblk_width); prec->nb_codeblocks_height = ff_jpeg2000_ceildivpow2(prec->coord[1][1] - prec->coord[1][0], band->log2_cblk_height); /* Tag trees initialization */ prec->cblkincl = ff_j2k_tag_tree_init(prec->nb_codeblocks_width, prec->nb_codeblocks_height); if (!prec->cblkincl) return AVERROR(ENOMEM); prec->zerobits = ff_j2k_tag_tree_init(prec->nb_codeblocks_width, prec->nb_codeblocks_height); if (!prec->zerobits) return AVERROR(ENOMEM); prec->cblk = av_malloc_array(prec->nb_codeblocks_width * prec->nb_codeblocks_height, sizeof(*prec->cblk)); if (!prec->cblk) return AVERROR(ENOMEM); for (cblkno = 0; cblkno < prec->nb_codeblocks_width * prec->nb_codeblocks_height; cblkno++) { Jpeg2000Cblk *cblk = prec->cblk + cblkno; uint16_t Cx0, Cy0; /* Compute coordinates of codeblocks */ /* Compute Cx0*/ Cx0 = (prec->coord[0][0] >> band->log2_cblk_width) << band->log2_cblk_width; Cx0 = Cx0 + ((cblkno % prec->nb_codeblocks_width) << band->log2_cblk_width); cblk->coord[0][0] = FFMAX(Cx0, prec->coord[0][0]); /* Compute Cy0*/ Cy0 = (prec->coord[1][0] >> band->log2_cblk_height) << band->log2_cblk_height; Cy0 = Cy0 + ((cblkno / prec->nb_codeblocks_width) << band->log2_cblk_height); cblk->coord[1][0] = FFMAX(Cy0, prec->coord[1][0]); /* Compute Cx1 */ cblk->coord[0][1] = FFMIN(Cx0 + (1 << band->log2_cblk_width), prec->coord[0][1]); /* Compute Cy1 */ cblk->coord[1][1] = FFMIN(Cy0 + (1 << band->log2_cblk_height), prec->coord[1][1]); if((bandno + !!reslevelno) & 1) { cblk->coord[0][0] += comp->reslevel[reslevelno-1].coord[0][1] - comp->reslevel[reslevelno-1].coord[0][0]; cblk->coord[0][1] += comp->reslevel[reslevelno-1].coord[0][1] - comp->reslevel[reslevelno-1].coord[0][0]; } if((bandno + !!reslevelno) & 2) { cblk->coord[1][0] += comp->reslevel[reslevelno-1].coord[1][1] - comp->reslevel[reslevelno-1].coord[1][0]; cblk->coord[1][1] += comp->reslevel[reslevelno-1].coord[1][1] - comp->reslevel[reslevelno-1].coord[1][0]; } cblk->zero = 0; cblk->lblock = 3; cblk->length = 0; cblk->lengthinc = 0; cblk->npasses = 0; } } } } return 0; } | 23,383 |
0 | static av_always_inline void blend_image_packed_rgb(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int main_has_alpha, int x, int y, int is_straight) { OverlayContext *s = ctx->priv; int i, imax, j, jmax; const int src_w = src->width; const int src_h = src->height; const int dst_w = dst->width; const int dst_h = dst->height; uint8_t alpha; ///< the amount of overlay to blend on to main const int dr = s->main_rgba_map[R]; const int dg = s->main_rgba_map[G]; const int db = s->main_rgba_map[B]; const int da = s->main_rgba_map[A]; const int dstep = s->main_pix_step[0]; const int sr = s->overlay_rgba_map[R]; const int sg = s->overlay_rgba_map[G]; const int sb = s->overlay_rgba_map[B]; const int sa = s->overlay_rgba_map[A]; const int sstep = s->overlay_pix_step[0]; uint8_t *S, *sp, *d, *dp; i = FFMAX(-y, 0); sp = src->data[0] + i * src->linesize[0]; dp = dst->data[0] + (y+i) * dst->linesize[0]; for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) { j = FFMAX(-x, 0); S = sp + j * sstep; d = dp + (x+j) * dstep; for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) { alpha = S[sa]; // if the main channel has an alpha channel, alpha has to be calculated // to create an un-premultiplied (straight) alpha value if (main_has_alpha && alpha != 0 && alpha != 255) { uint8_t alpha_d = d[da]; alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); } switch (alpha) { case 0: break; case 255: d[dr] = S[sr]; d[dg] = S[sg]; d[db] = S[sb]; break; default: // main_value = main_value * (1 - alpha) + overlay_value * alpha // since alpha is in the range 0-255, the result must divided by 255 d[dr] = is_straight ? FAST_DIV255(d[dr] * (255 - alpha) + S[sr] * alpha) : FAST_DIV255(d[dr] * (255 - alpha) + S[sr]); d[dg] = is_straight ? FAST_DIV255(d[dg] * (255 - alpha) + S[sg] * alpha) : FAST_DIV255(d[dr] * (255 - alpha) + S[sr]); d[db] = is_straight ? FAST_DIV255(d[db] * (255 - alpha) + S[sb] * alpha) : FAST_DIV255(d[dr] * (255 - alpha) + S[sr]); } if (main_has_alpha) { switch (alpha) { case 0: break; case 255: d[da] = S[sa]; break; default: // apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha d[da] += FAST_DIV255((255 - d[da]) * S[sa]); } } d += dstep; S += sstep; } dp += dst->linesize[0]; sp += src->linesize[0]; } } | 23,385 |
1 | static int qemu_rdma_exchange_send(RDMAContext *rdma, RDMAControlHeader *head, uint8_t *data, RDMAControlHeader *resp, int *resp_idx, int (*callback)(RDMAContext *rdma)) { int ret = 0; /* * Wait until the dest is ready before attempting to deliver the message * by waiting for a READY message. */ if (rdma->control_ready_expected) { RDMAControlHeader resp; ret = qemu_rdma_exchange_get_response(rdma, &resp, RDMA_CONTROL_READY, RDMA_WRID_READY); if (ret < 0) { return ret; } } /* * If the user is expecting a response, post a WR in anticipation of it. */ if (resp) { ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_DATA); if (ret) { fprintf(stderr, "rdma migration: error posting" " extra control recv for anticipated result!"); return ret; } } /* * Post a WR to replace the one we just consumed for the READY message. */ ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); if (ret) { fprintf(stderr, "rdma migration: error posting first control recv!"); return ret; } /* * Deliver the control message that was requested. */ ret = qemu_rdma_post_send_control(rdma, data, head); if (ret < 0) { fprintf(stderr, "Failed to send control buffer!\n"); return ret; } /* * If we're expecting a response, block and wait for it. */ if (resp) { if (callback) { DDPRINTF("Issuing callback before receiving response...\n"); ret = callback(rdma); if (ret < 0) { return ret; } } DDPRINTF("Waiting for response %s\n", control_desc[resp->type]); ret = qemu_rdma_exchange_get_response(rdma, resp, resp->type, RDMA_WRID_DATA); if (ret < 0) { return ret; } qemu_rdma_move_header(rdma, RDMA_WRID_DATA, resp); if (resp_idx) { *resp_idx = RDMA_WRID_DATA; } DDPRINTF("Response %s received.\n", control_desc[resp->type]); } rdma->control_ready_expected = 1; return 0; } | 23,387 |
1 | static int configure_accelerator(void) { const char *p = NULL; char buf[10]; int i, ret; bool accel_initialised = false; bool init_failed = false; QemuOptsList *list = qemu_find_opts("machine"); if (!QTAILQ_EMPTY(&list->head)) { p = qemu_opt_get(QTAILQ_FIRST(&list->head), "accel"); } if (p == NULL) { /* Use the default "accelerator", tcg */ p = "tcg"; } while (!accel_initialised && *p != '\0') { if (*p == ':') { p++; } p = get_opt_name(buf, sizeof (buf), p, ':'); for (i = 0; i < ARRAY_SIZE(accel_list); i++) { if (strcmp(accel_list[i].opt_name, buf) == 0) { if (!accel_list[i].available()) { printf("%s not supported for this target\n", accel_list[i].name); continue; } *(accel_list[i].allowed) = true; ret = accel_list[i].init(); if (ret < 0) { init_failed = true; fprintf(stderr, "failed to initialize %s: %s\n", accel_list[i].name, strerror(-ret)); *(accel_list[i].allowed) = false; } else { accel_initialised = true; } break; } } if (i == ARRAY_SIZE(accel_list)) { fprintf(stderr, "\"%s\" accelerator does not exist.\n", buf); } } if (!accel_initialised) { if (!init_failed) { fprintf(stderr, "No accelerator found!\n"); } exit(1); } if (init_failed) { fprintf(stderr, "Back to %s accelerator.\n", accel_list[i].name); } return !accel_initialised; } | 23,388 |
1 | void qemu_chr_add_handlers(CharDriverState *s, IOCanReadHandler *fd_can_read, IOReadHandler *fd_read, IOEventHandler *fd_event, void *opaque) { if (!opaque) { /* chr driver being released. */ s->assigned = 0; } s->chr_can_read = fd_can_read; s->chr_read = fd_read; s->chr_event = fd_event; s->handler_opaque = opaque; if (s->chr_update_read_handler) s->chr_update_read_handler(s); /* We're connecting to an already opened device, so let's make sure we also get the open event */ if (s->opened) { qemu_chr_generic_open(s); } } | 23,389 |
1 | static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, int16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN) { int code, i = ss, j, sign, val, run; int last = FFMIN(se, *last_nnz); OPEN_READER(re, &s->gb); if (*EOBRUN) { (*EOBRUN)--; } else { for (; ; i++) { UPDATE_CACHE(re, &s->gb); GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2); if (code & 0xF) { run = ((unsigned) code) >> 4; UPDATE_CACHE(re, &s->gb); val = SHOW_UBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); ZERO_RUN; j = s->scantable.permutated[i]; val--; block[j] = ((quant_matrix[j]^val) - val) << Al; if (i == se) { if (i > *last_nnz) *last_nnz = i; CLOSE_READER(re, &s->gb); return 0; } } else { run = ((unsigned) code) >> 4; if (run == 0xF) { ZERO_RUN; } else { val = run; run = (1 << run); if (val) { UPDATE_CACHE(re, &s->gb); run += SHOW_UBITS(re, &s->gb, val); LAST_SKIP_BITS(re, &s->gb, val); } *EOBRUN = run - 1; break; } } } if (i > *last_nnz) *last_nnz = i; } for (; i <= last; i++) { j = s->scantable.permutated[i]; if (block[j]) REFINE_BIT(j) } CLOSE_READER(re, &s->gb); return 0; } | 23,390 |
1 | int ff_mov_lang_to_iso639(int code, char *to) { int i; /* is it the mangled iso code? */ /* see http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt */ if (code > 138) { for (i = 2; i >= 0; i--) { to[i] = 0x60 + (code & 0x1f); code >>= 5; } return 1; } /* old fashion apple lang code */ if (code >= FF_ARRAY_ELEMS(mov_mdhd_language_map)) return 0; if (!mov_mdhd_language_map[code]) return 0; strncpy(to, mov_mdhd_language_map[code], 4); return 1; } | 23,391 |
1 | static void ohci_sysbus_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = ohci_realize_pxa; set_bit(DEVICE_CATEGORY_USB, dc->categories); dc->desc = "OHCI USB Controller"; dc->props = ohci_sysbus_properties; dc->reset = usb_ohci_reset_sysbus; } | 23,392 |
0 | av_cold void ff_vp3dsp_init_x86(VP3DSPContext *c, int flags) { #if HAVE_YASM int cpuflags = av_get_cpu_flags(); #if ARCH_X86_32 if (HAVE_MMX && cpuflags & AV_CPU_FLAG_MMX) { c->idct_put = ff_vp3_idct_put_mmx; c->idct_add = ff_vp3_idct_add_mmx; c->idct_perm = FF_PARTTRANS_IDCT_PERM; } #endif if (HAVE_MMXEXT && cpuflags & AV_CPU_FLAG_MMXEXT) { c->idct_dc_add = ff_vp3_idct_dc_add_mmx2; if (!(flags & CODEC_FLAG_BITEXACT)) { c->v_loop_filter = ff_vp3_v_loop_filter_mmx2; c->h_loop_filter = ff_vp3_h_loop_filter_mmx2; } } if (cpuflags & AV_CPU_FLAG_SSE2) { c->idct_put = ff_vp3_idct_put_sse2; c->idct_add = ff_vp3_idct_add_sse2; c->idct_perm = FF_TRANSPOSE_IDCT_PERM; } #endif } | 23,395 |
0 | void memory_region_init(MemoryRegion *mr, const char *name, uint64_t size) { mr->ops = NULL; mr->parent = NULL; mr->size = int128_make64(size); if (size == UINT64_MAX) { mr->size = int128_2_64(); } mr->addr = 0; mr->subpage = false; mr->enabled = true; mr->terminates = false; mr->ram = false; mr->romd_mode = true; mr->readonly = false; mr->rom_device = false; mr->destructor = memory_region_destructor_none; mr->priority = 0; mr->may_overlap = false; mr->alias = NULL; QTAILQ_INIT(&mr->subregions); memset(&mr->subregions_link, 0, sizeof mr->subregions_link); QTAILQ_INIT(&mr->coalesced); mr->name = g_strdup(name); mr->dirty_log_mask = 0; mr->ioeventfd_nb = 0; mr->ioeventfds = NULL; mr->flush_coalesced_mmio = false; } | 23,397 |
0 | float64 helper_sub_cmpf64(CPUM68KState *env, float64 src0, float64 src1) { /* ??? This may incorrectly raise exceptions. */ /* ??? Should flush denormals to zero. */ float64 res; res = float64_sub(src0, src1, &env->fp_status); if (float64_is_nan(res)) { /* +/-inf compares equal against itself, but sub returns nan. */ if (!float64_is_nan(src0) && !float64_is_nan(src1)) { res = 0; if (float64_lt_quiet(src0, res, &env->fp_status)) res = float64_chs(res); } } return res; } | 23,398 |
0 | bool write_list_to_kvmstate(ARMCPU *cpu) { CPUState *cs = CPU(cpu); int i; bool ok = true; for (i = 0; i < cpu->cpreg_array_len; i++) { struct kvm_one_reg r; uint64_t regidx = cpu->cpreg_indexes[i]; uint32_t v32; int ret; r.id = regidx; switch (regidx & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U32: v32 = cpu->cpreg_values[i]; r.addr = (uintptr_t)&v32; break; case KVM_REG_SIZE_U64: r.addr = (uintptr_t)(cpu->cpreg_values + i); break; default: abort(); } ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r); if (ret) { /* We might fail for "unknown register" and also for * "you tried to set a register which is constant with * a different value from what it actually contains". */ ok = false; } } return ok; } | 23,399 |
0 | static void do_mchk_interrupt(CPUS390XState *env) { S390CPU *cpu = s390_env_get_cpu(env); uint64_t mask, addr; LowCore *lowcore; MchkQueue *q; int i; if (!(env->psw.mask & PSW_MASK_MCHECK)) { cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n"); } if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) { cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index); } q = &env->mchk_queue[env->mchk_index]; if (q->type != 1) { /* Don't know how to handle this... */ cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type); } if (!(env->cregs[14] & (1 << 28))) { /* CRW machine checks disabled */ return; } lowcore = cpu_map_lowcore(env); for (i = 0; i < 16; i++) { lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll); lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]); lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]); lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]); } lowcore->prefixreg_save_area = cpu_to_be32(env->psa); lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc); lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr); lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32); lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm); lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32); lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc); lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d); lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000); lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->mcck_new_psw.mask); addr = be64_to_cpu(lowcore->mcck_new_psw.addr); cpu_unmap_lowcore(lowcore); env->mchk_index--; if (env->mchk_index == -1) { env->pending_int &= ~INTERRUPT_MCHK; } DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, env->psw.mask, env->psw.addr); load_psw(env, mask, addr); } | 23,400 |
0 | static void bochs_bios_init(void) { void *fw_cfg; uint8_t *smbios_table; size_t smbios_len; uint64_t *numa_fw_cfg; int i, j; register_ioport_write(0x400, 1, 2, bochs_bios_write, NULL); register_ioport_write(0x401, 1, 2, bochs_bios_write, NULL); register_ioport_write(0x402, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x403, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x8900, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x501, 1, 2, bochs_bios_write, NULL); register_ioport_write(0x502, 1, 2, bochs_bios_write, NULL); register_ioport_write(0x500, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x503, 1, 1, bochs_bios_write, NULL); fw_cfg = fw_cfg_init(BIOS_CFG_IOPORT, BIOS_CFG_IOPORT + 1, 0, 0); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES, (uint8_t *)acpi_tables, acpi_tables_len); smbios_table = smbios_get_table(&smbios_len); if (smbios_table) fw_cfg_add_bytes(fw_cfg, FW_CFG_SMBIOS_ENTRIES, smbios_table, smbios_len); /* allocate memory for the NUMA channel: one (64bit) word for the number * of nodes, one word for each VCPU->node and one word for each node to * hold the amount of memory. */ numa_fw_cfg = qemu_mallocz((1 + smp_cpus + nb_numa_nodes) * 8); numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes); for (i = 0; i < smp_cpus; i++) { for (j = 0; j < nb_numa_nodes; j++) { if (node_cpumask[j] & (1 << i)) { numa_fw_cfg[i + 1] = cpu_to_le64(j); break; } } } for (i = 0; i < nb_numa_nodes; i++) { numa_fw_cfg[smp_cpus + 1 + i] = cpu_to_le64(node_mem[i]); } fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, (uint8_t *)numa_fw_cfg, (1 + smp_cpus + nb_numa_nodes) * 8); } | 23,401 |
0 | check_host_key_hash(BDRVSSHState *s, const char *hash, int hash_type, size_t fingerprint_len) { const char *fingerprint; fingerprint = libssh2_hostkey_hash(s->session, hash_type); if (!fingerprint) { session_error_report(s, "failed to read remote host key"); return -EINVAL; } if(compare_fingerprint((unsigned char *) fingerprint, fingerprint_len, hash) != 0) { error_report("remote host key does not match host_key_check '%s'", hash); return -EPERM; } return 0; } | 23,402 |
0 | static uint64_t get_cluster_offset(BlockDriverState *bs, uint64_t offset, int allocate, int compressed_size, int n_start, int n_end) { BDRVQcowState *s = bs->opaque; int min_index, i, j, l1_index, l2_index; uint64_t l2_offset, *l2_table, cluster_offset, tmp; uint32_t min_count; int new_l2_table; l1_index = offset >> (s->l2_bits + s->cluster_bits); l2_offset = s->l1_table[l1_index]; new_l2_table = 0; if (!l2_offset) { if (!allocate) return 0; /* allocate a new l2 entry */ l2_offset = bdrv_getlength(bs->file->bs); /* round to cluster size */ l2_offset = (l2_offset + s->cluster_size - 1) & ~(s->cluster_size - 1); /* update the L1 entry */ s->l1_table[l1_index] = l2_offset; tmp = cpu_to_be64(l2_offset); if (bdrv_pwrite_sync(bs->file, s->l1_table_offset + l1_index * sizeof(tmp), &tmp, sizeof(tmp)) < 0) return 0; new_l2_table = 1; } for(i = 0; i < L2_CACHE_SIZE; i++) { if (l2_offset == s->l2_cache_offsets[i]) { /* increment the hit count */ if (++s->l2_cache_counts[i] == 0xffffffff) { for(j = 0; j < L2_CACHE_SIZE; j++) { s->l2_cache_counts[j] >>= 1; } } l2_table = s->l2_cache + (i << s->l2_bits); goto found; } } /* not found: load a new entry in the least used one */ min_index = 0; min_count = 0xffffffff; for(i = 0; i < L2_CACHE_SIZE; i++) { if (s->l2_cache_counts[i] < min_count) { min_count = s->l2_cache_counts[i]; min_index = i; } } l2_table = s->l2_cache + (min_index << s->l2_bits); if (new_l2_table) { memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); if (bdrv_pwrite_sync(bs->file, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) < 0) return 0; } else { if (bdrv_pread(bs->file, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) != s->l2_size * sizeof(uint64_t)) return 0; } s->l2_cache_offsets[min_index] = l2_offset; s->l2_cache_counts[min_index] = 1; found: l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); cluster_offset = be64_to_cpu(l2_table[l2_index]); if (!cluster_offset || ((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) { if (!allocate) return 0; /* allocate a new cluster */ if ((cluster_offset & QCOW_OFLAG_COMPRESSED) && (n_end - n_start) < s->cluster_sectors) { /* if the cluster is already compressed, we must decompress it in the case it is not completely overwritten */ if (decompress_cluster(bs, cluster_offset) < 0) return 0; cluster_offset = bdrv_getlength(bs->file->bs); cluster_offset = (cluster_offset + s->cluster_size - 1) & ~(s->cluster_size - 1); /* write the cluster content */ if (bdrv_pwrite(bs->file, cluster_offset, s->cluster_cache, s->cluster_size) != s->cluster_size) return -1; } else { cluster_offset = bdrv_getlength(bs->file->bs); if (allocate == 1) { /* round to cluster size */ cluster_offset = (cluster_offset + s->cluster_size - 1) & ~(s->cluster_size - 1); bdrv_truncate(bs->file, cluster_offset + s->cluster_size, NULL); /* if encrypted, we must initialize the cluster content which won't be written */ if (bs->encrypted && (n_end - n_start) < s->cluster_sectors) { uint64_t start_sect; assert(s->cipher); start_sect = (offset & ~(s->cluster_size - 1)) >> 9; for(i = 0; i < s->cluster_sectors; i++) { if (i < n_start || i >= n_end) { Error *err = NULL; memset(s->cluster_data, 0x00, 512); if (encrypt_sectors(s, start_sect + i, s->cluster_data, 1, true, &err) < 0) { error_free(err); errno = EIO; return -1; } if (bdrv_pwrite(bs->file, cluster_offset + i * 512, s->cluster_data, 512) != 512) return -1; } } } } else if (allocate == 2) { cluster_offset |= QCOW_OFLAG_COMPRESSED | (uint64_t)compressed_size << (63 - s->cluster_bits); } } /* update L2 table */ tmp = cpu_to_be64(cluster_offset); l2_table[l2_index] = tmp; if (bdrv_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp), &tmp, sizeof(tmp)) < 0) return 0; } return cluster_offset; } | 23,403 |
0 | static int decode_hrd(VC9Context *v, GetBitContext *gb) { int i, num; num = get_bits(gb, 5); if (v->hrd_rate || num != v->hrd_num_leaky_buckets) { av_freep(&v->hrd_rate); } if (!v->hrd_rate) v->hrd_rate = av_malloc(num); if (!v->hrd_rate) return -1; if (v->hrd_buffer || num != v->hrd_num_leaky_buckets) { av_freep(&v->hrd_buffer); } if (!v->hrd_buffer) v->hrd_buffer = av_malloc(num); if (!v->hrd_buffer) return -1; v->hrd_num_leaky_buckets = num; //exponent in base-2 for rate v->bit_rate_exponent = get_bits(gb, 4); //exponent in base-2 for buffer_size v->buffer_size_exponent = get_bits(gb, 4); for (i=0; i<num; i++) { //mantissae, ordered (if not, use a function ? v->hrd_rate[i] = get_bits(gb, 16); if (i && v->hrd_rate[i-1]>=v->hrd_rate[i]) { av_log(v, AV_LOG_ERROR, "HDR Rates aren't strictly increasing:" "%i vs %i\n", v->hrd_rate[i-1], v->hrd_rate[i]); return -1; } v->hrd_buffer[i] = get_bits(gb, 16); if (i && v->hrd_buffer[i-1]<v->hrd_buffer[i]) { av_log(v, AV_LOG_ERROR, "HDR Buffers aren't decreasing:" "%i vs %i\n", v->hrd_buffer[i-1], v->hrd_buffer[i]); return -1; } } return 0; } | 23,406 |
0 | static int handle_utimensat(FsContext *ctx, V9fsPath *fs_path, const struct timespec *buf) { int fd, ret; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = futimens(fd, buf); close(fd); return ret; } | 23,407 |
0 | int kqemu_cpu_exec(CPUState *env) { struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state; int ret, cpl, i; #ifdef CONFIG_PROFILER int64_t ti; #endif #ifdef _WIN32 DWORD temp; #endif #ifdef CONFIG_PROFILER ti = profile_getclock(); #endif LOG_INT("kqemu: cpu_exec: enter\n"); LOG_INT_STATE(env); for(i = 0; i < CPU_NB_REGS; i++) kenv->regs[i] = env->regs[i]; kenv->eip = env->eip; kenv->eflags = env->eflags; for(i = 0; i < 6; i++) kqemu_load_seg(&kenv->segs[i], &env->segs[i]); kqemu_load_seg(&kenv->ldt, &env->ldt); kqemu_load_seg(&kenv->tr, &env->tr); kqemu_load_seg(&kenv->gdt, &env->gdt); kqemu_load_seg(&kenv->idt, &env->idt); kenv->cr0 = env->cr[0]; kenv->cr2 = env->cr[2]; kenv->cr3 = env->cr[3]; kenv->cr4 = env->cr[4]; kenv->a20_mask = env->a20_mask; kenv->efer = env->efer; kenv->tsc_offset = 0; kenv->star = env->star; kenv->sysenter_cs = env->sysenter_cs; kenv->sysenter_esp = env->sysenter_esp; kenv->sysenter_eip = env->sysenter_eip; #ifdef TARGET_X86_64 kenv->lstar = env->lstar; kenv->cstar = env->cstar; kenv->fmask = env->fmask; kenv->kernelgsbase = env->kernelgsbase; #endif if (env->dr[7] & 0xff) { kenv->dr7 = env->dr[7]; kenv->dr0 = env->dr[0]; kenv->dr1 = env->dr[1]; kenv->dr2 = env->dr[2]; kenv->dr3 = env->dr[3]; } else { kenv->dr7 = 0; } kenv->dr6 = env->dr[6]; cpl = (env->hflags & HF_CPL_MASK); kenv->cpl = cpl; kenv->nb_pages_to_flush = nb_pages_to_flush; kenv->user_only = (env->kqemu_enabled == 1); kenv->nb_ram_pages_to_update = nb_ram_pages_to_update; nb_ram_pages_to_update = 0; kenv->nb_modified_ram_pages = nb_modified_ram_pages; kqemu_reset_modified_ram_pages(); if (env->cpuid_features & CPUID_FXSR) restore_native_fp_fxrstor(env); else restore_native_fp_frstor(env); #ifdef _WIN32 if (DeviceIoControl(kqemu_fd, KQEMU_EXEC, kenv, sizeof(struct kqemu_cpu_state), kenv, sizeof(struct kqemu_cpu_state), &temp, NULL)) { ret = kenv->retval; } else { ret = -1; } #else ioctl(kqemu_fd, KQEMU_EXEC, kenv); ret = kenv->retval; #endif if (env->cpuid_features & CPUID_FXSR) save_native_fp_fxsave(env); else save_native_fp_fsave(env); for(i = 0; i < CPU_NB_REGS; i++) env->regs[i] = kenv->regs[i]; env->eip = kenv->eip; env->eflags = kenv->eflags; for(i = 0; i < 6; i++) kqemu_save_seg(&env->segs[i], &kenv->segs[i]); cpu_x86_set_cpl(env, kenv->cpl); kqemu_save_seg(&env->ldt, &kenv->ldt); env->cr[0] = kenv->cr0; env->cr[4] = kenv->cr4; env->cr[3] = kenv->cr3; env->cr[2] = kenv->cr2; env->dr[6] = kenv->dr6; #ifdef TARGET_X86_64 env->kernelgsbase = kenv->kernelgsbase; #endif /* flush pages as indicated by kqemu */ if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) { tlb_flush(env, 1); } else { for(i = 0; i < kenv->nb_pages_to_flush; i++) { tlb_flush_page(env, pages_to_flush[i]); } } nb_pages_to_flush = 0; #ifdef CONFIG_PROFILER kqemu_time += profile_getclock() - ti; kqemu_exec_count++; #endif if (kenv->nb_ram_pages_to_update > 0) { cpu_tlb_update_dirty(env); } if (kenv->nb_modified_ram_pages > 0) { for(i = 0; i < kenv->nb_modified_ram_pages; i++) { unsigned long addr; addr = modified_ram_pages[i]; tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0); } } /* restore the hidden flags */ { unsigned int new_hflags; #ifdef TARGET_X86_64 if ((env->hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { /* long mode */ new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; } else #endif { /* legacy / compatibility case */ new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_CS32_SHIFT); new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> (DESC_B_SHIFT - HF_SS32_SHIFT); if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || !(env->hflags & HF_CS32_MASK)) { /* XXX: try to avoid this test. The problem comes from the fact that is real mode or vm86 mode we only modify the 'base' and 'selector' fields of the segment cache to go faster. A solution may be to force addseg to one in translate-i386.c. */ new_hflags |= HF_ADDSEG_MASK; } else { new_hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; } } env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) | new_hflags; } /* update FPU flags */ env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) | ((env->cr[0] << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); if (env->cr[4] & CR4_OSFXSR_MASK) env->hflags |= HF_OSFXSR_MASK; else env->hflags &= ~HF_OSFXSR_MASK; LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret); if (ret == KQEMU_RET_SYSCALL) { /* syscall instruction */ return do_syscall(env, kenv); } else if ((ret & 0xff00) == KQEMU_RET_INT) { env->exception_index = ret & 0xff; env->error_code = 0; env->exception_is_int = 1; env->exception_next_eip = kenv->next_eip; #ifdef CONFIG_PROFILER kqemu_ret_int_count++; #endif LOG_INT("kqemu: interrupt v=%02x:\n", env->exception_index); LOG_INT_STATE(env); return 1; } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) { env->exception_index = ret & 0xff; env->error_code = kenv->error_code; env->exception_is_int = 0; env->exception_next_eip = 0; #ifdef CONFIG_PROFILER kqemu_ret_excp_count++; #endif LOG_INT("kqemu: exception v=%02x e=%04x:\n", env->exception_index, env->error_code); LOG_INT_STATE(env); return 1; } else if (ret == KQEMU_RET_INTR) { #ifdef CONFIG_PROFILER kqemu_ret_intr_count++; #endif LOG_INT_STATE(env); return 0; } else if (ret == KQEMU_RET_SOFTMMU) { #ifdef CONFIG_PROFILER { unsigned long pc = env->eip + env->segs[R_CS].base; kqemu_record_pc(pc); } #endif LOG_INT_STATE(env); return 2; } else { cpu_dump_state(env, stderr, fprintf, 0); fprintf(stderr, "Unsupported return value: 0x%x\n", ret); exit(1); } return 0; } | 23,408 |
0 | static void cpu_handle_ioreq(void *opaque) { XenIOState *state = opaque; ioreq_t *req = cpu_get_ioreq(state); handle_buffered_iopage(state); if (req) { handle_ioreq(state, req); if (req->state != STATE_IOREQ_INPROCESS) { fprintf(stderr, "Badness in I/O request ... not in service?!: " "%x, ptr: %x, port: %"PRIx64", " "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size ", type: %"FMT_ioreq_size"\n", req->state, req->data_is_ptr, req->addr, req->data, req->count, req->size, req->type); destroy_hvm_domain(false); return; } xen_wmb(); /* Update ioreq contents /then/ update state. */ /* * We do this before we send the response so that the tools * have the opportunity to pick up on the reset before the * guest resumes and does a hlt with interrupts disabled which * causes Xen to powerdown the domain. */ if (runstate_is_running()) { if (qemu_shutdown_requested_get()) { destroy_hvm_domain(false); } if (qemu_reset_requested_get()) { qemu_system_reset(VMRESET_REPORT); destroy_hvm_domain(true); } } req->state = STATE_IORESP_READY; xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]); } } | 23,410 |
0 | int float64_is_nan( float64 a1 ) { float64u u; uint64_t a; u.f = a1; a = u.i; return ( LIT64( 0xFFF0000000000000 ) < (bits64) ( a<<1 ) ); } | 23,412 |
0 | uint32_t helper_efdctsf (uint64_t val) { CPU_DoubleU u; float64 tmp; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_nan(u.d))) return 0; tmp = uint64_to_float64(1ULL << 32, &env->vec_status); u.d = float64_mul(u.d, tmp, &env->vec_status); return float64_to_int32(u.d, &env->vec_status); } | 23,413 |
0 | static void char_socket_class_init(ObjectClass *oc, void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); cc->parse = qemu_chr_parse_socket; cc->open = qmp_chardev_open_socket; cc->chr_wait_connected = tcp_chr_wait_connected; cc->chr_write = tcp_chr_write; cc->chr_sync_read = tcp_chr_sync_read; cc->chr_disconnect = tcp_chr_disconnect; cc->get_msgfds = tcp_get_msgfds; cc->set_msgfds = tcp_set_msgfds; cc->chr_add_client = tcp_chr_add_client; cc->chr_add_watch = tcp_chr_add_watch; cc->chr_update_read_handler = tcp_chr_update_read_handler; object_class_property_add(oc, "addr", "SocketAddressLegacy", char_socket_get_addr, NULL, NULL, NULL, &error_abort); object_class_property_add_bool(oc, "connected", char_socket_get_connected, NULL, &error_abort); } | 23,414 |
0 | static void hb_regs_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { uint32_t *regs = opaque; if (offset == 0xf00) { if (value == 1 || value == 2) { qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); } else if (value == 3) { qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } } regs[offset/4] = value; } | 23,415 |
0 | static bool fw_cfg_ctl_mem_valid(void *opaque, target_phys_addr_t addr, unsigned size, bool is_write) { return is_write && size == 2; } | 23,416 |
0 | static ssize_t net_socket_receive(NetClientState *nc, const uint8_t *buf, size_t size) { NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc); uint32_t len; len = htonl(size); send_all(s->fd, (const uint8_t *)&len, sizeof(len)); return send_all(s->fd, buf, size); } | 23,418 |
0 | static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options, Error **errp) { SocketAddress *saddr = NULL; QDict *addr = NULL; QObject *crumpled_addr = NULL; Visitor *iv = NULL; Error *local_err = NULL; qdict_extract_subqdict(options, &addr, "server."); if (!qdict_size(addr)) { error_setg(errp, "NBD server address missing"); goto done; } crumpled_addr = qdict_crumple(addr, errp); if (!crumpled_addr) { goto done; } /* * FIXME .numeric, .to, .ipv4 or .ipv6 don't work with -drive * server.type=inet. .to doesn't matter, it's ignored anyway. * That's because when @options come from -blockdev or * blockdev_add, members are typed according to the QAPI schema, * but when they come from -drive, they're all QString. The * visitor expects the former. */ iv = qobject_input_visitor_new(crumpled_addr); visit_type_SocketAddress(iv, NULL, &saddr, &local_err); if (local_err) { error_propagate(errp, local_err); goto done; } done: QDECREF(addr); qobject_decref(crumpled_addr); visit_free(iv); return saddr; } | 23,419 |
0 | static int local_mkdir(FsContext *ctx, const char *path, mode_t mode) { return mkdir(rpath(ctx, path), mode); } | 23,420 |
0 | static int vnc_client_io_error(VncState *vs, int ret, int last_errno) { if (ret == 0 || ret == -1) { if (ret == -1) { switch (last_errno) { case EINTR: case EAGAIN: #ifdef _WIN32 case WSAEWOULDBLOCK: #endif return 0; default: break; } } VNC_DEBUG("Closing down client sock %d %d\n", ret, ret < 0 ? last_errno : 0); qemu_set_fd_handler2(vs->csock, NULL, NULL, NULL, NULL); closesocket(vs->csock); qemu_del_timer(vs->timer); qemu_free_timer(vs->timer); if (vs->input.buffer) qemu_free(vs->input.buffer); if (vs->output.buffer) qemu_free(vs->output.buffer); #ifdef CONFIG_VNC_TLS if (vs->tls_session) { gnutls_deinit(vs->tls_session); vs->tls_session = NULL; } #endif /* CONFIG_VNC_TLS */ audio_del(vs); VncState *p, *parent = NULL; for (p = vs->vd->clients; p != NULL; p = p->next) { if (p == vs) { if (parent) parent->next = p->next; else vs->vd->clients = p->next; break; } parent = p; } if (!vs->vd->clients) dcl->idle = 1; qemu_free(vs->old_data); qemu_free(vs); return 0; } return ret; } | 23,421 |
0 | static void qemu_event_increment(void) { /* Write 8 bytes to be compatible with eventfd. */ static const uint64_t val = 1; ssize_t ret; if (io_thread_fd == -1) return; do { ret = write(io_thread_fd, &val, sizeof(val)); } while (ret < 0 && errno == EINTR); /* EAGAIN is fine, a read must be pending. */ if (ret < 0 && errno != EAGAIN) { fprintf(stderr, "qemu_event_increment: write() filed: %s\n", strerror(errno)); exit (1); } } | 23,422 |
0 | static uint64_t mv88w8618_wlan_read(void *opaque, target_phys_addr_t offset, unsigned size) { switch (offset) { /* Workaround to allow loading the binary-only wlandrv.ko crap * from the original Freecom firmware. */ case MP_WLAN_MAGIC1: return ~3; case MP_WLAN_MAGIC2: return -1; default: return 0; } } | 23,423 |
0 | static av_cold int oggvorbis_encode_close(AVCodecContext *avccontext) { OggVorbisContext *context = avccontext->priv_data; /* ogg_packet op ; */ vorbis_analysis_wrote(&context->vd, 0); /* notify vorbisenc this is EOF */ vorbis_block_clear(&context->vb); vorbis_dsp_clear(&context->vd); vorbis_info_clear(&context->vi); av_freep(&avccontext->coded_frame); av_freep(&avccontext->extradata); return 0; } | 23,424 |
0 | static int dxtory_decode_v2_565(AVCodecContext *avctx, AVFrame *pic, const uint8_t *src, int src_size, int is_565) { GetByteContext gb; GetBitContext gb2; int nslices, slice, slice_height; uint32_t off, slice_size; uint8_t *dst; int ret; bytestream2_init(&gb, src, src_size); nslices = bytestream2_get_le16(&gb); off = FFALIGN(nslices * 4 + 2, 16); if (src_size < off) { av_log(avctx, AV_LOG_ERROR, "no slice data\n"); return AVERROR_INVALIDDATA; } if (!nslices || avctx->height % nslices) { avpriv_request_sample(avctx, "%d slices for %dx%d", nslices, avctx->width, avctx->height); return AVERROR_PATCHWELCOME; } slice_height = avctx->height / nslices; avctx->pix_fmt = AV_PIX_FMT_RGB24; if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) return ret; dst = pic->data[0]; for (slice = 0; slice < nslices; slice++) { slice_size = bytestream2_get_le32(&gb); ret = check_slice_size(avctx, src, src_size, slice_size, off); if (ret < 0) return ret; init_get_bits(&gb2, src + off + 16, (slice_size - 16) * 8); dx2_decode_slice_565(&gb2, avctx->width, slice_height, dst, pic->linesize[0], is_565); dst += pic->linesize[0] * slice_height; off += slice_size; } return 0; } | 23,427 |
1 | static void gen_icread(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } /* interpreted as no-op */ #endif } | 23,428 |
1 | static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size) { const uint8_t *s = src; const uint8_t *end; #ifdef HAVE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX mm_end = end - 15; #if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster) asm volatile( "movq %3, %%mm5 \n\t" "movq %4, %%mm6 \n\t" "movq %5, %%mm7 \n\t" "jmp 2f \n\t" ASMALIGN(4) "1: \n\t" PREFETCH" 32(%1) \n\t" "movd (%1), %%mm0 \n\t" "movd 4(%1), %%mm3 \n\t" "punpckldq 8(%1), %%mm0 \n\t" "punpckldq 12(%1), %%mm3 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm3, %%mm4 \n\t" "pand %%mm6, %%mm0 \n\t" "pand %%mm6, %%mm3 \n\t" "pmaddwd %%mm7, %%mm0 \n\t" "pmaddwd %%mm7, %%mm3 \n\t" "pand %%mm5, %%mm1 \n\t" "pand %%mm5, %%mm4 \n\t" "por %%mm1, %%mm0 \n\t" "por %%mm4, %%mm3 \n\t" "psrld $5, %%mm0 \n\t" "pslld $11, %%mm3 \n\t" "por %%mm3, %%mm0 \n\t" MOVNTQ" %%mm0, (%0) \n\t" "add $16, %1 \n\t" "add $8, %0 \n\t" "2: \n\t" "cmp %2, %1 \n\t" " jb 1b \n\t" : "+r" (d), "+r"(s) : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216) ); #else __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm __volatile( "movq %0, %%mm7\n\t" "movq %1, %%mm6\n\t" ::"m"(red_16mask),"m"(green_16mask)); while(s < mm_end) { __asm __volatile( PREFETCH" 32%1\n\t" "movd %1, %%mm0\n\t" "movd 4%1, %%mm3\n\t" "punpckldq 8%1, %%mm0\n\t" "punpckldq 12%1, %%mm3\n\t" "movq %%mm0, %%mm1\n\t" "movq %%mm0, %%mm2\n\t" "movq %%mm3, %%mm4\n\t" "movq %%mm3, %%mm5\n\t" "psrlq $3, %%mm0\n\t" "psrlq $3, %%mm3\n\t" "pand %2, %%mm0\n\t" "pand %2, %%mm3\n\t" "psrlq $5, %%mm1\n\t" "psrlq $5, %%mm4\n\t" "pand %%mm6, %%mm1\n\t" "pand %%mm6, %%mm4\n\t" "psrlq $8, %%mm2\n\t" "psrlq $8, %%mm5\n\t" "pand %%mm7, %%mm2\n\t" "pand %%mm7, %%mm5\n\t" "por %%mm1, %%mm0\n\t" "por %%mm4, %%mm3\n\t" "por %%mm2, %%mm0\n\t" "por %%mm5, %%mm3\n\t" "psllq $16, %%mm3\n\t" "por %%mm3, %%mm0\n\t" MOVNTQ" %%mm0, %0\n\t" :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory"); d += 4; s += 16; } #endif __asm __volatile(SFENCE:::"memory"); __asm __volatile(EMMS:::"memory"); #endif while(s < end) { register int rgb = *(uint32_t*)s; s += 4; *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8); } } | 23,433 |
1 | static void print_formats(AVFilterContext *filter_ctx) { int i, j; #define PRINT_FMTS(inout, outin, INOUT) \ for (i = 0; i < filter_ctx->nb_##inout##puts; i++) { \ if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_VIDEO) { \ AVFilterFormats *fmts = \ filter_ctx->inout##puts[i]->outin##_formats; \ for (j = 0; j < fmts->nb_formats; j++) \ if(av_get_pix_fmt_name(fmts->formats[j])) \ printf(#INOUT "PUT[%d] %s: fmt:%s\n", \ i, filter_ctx->filter->inout##puts[i].name, \ av_get_pix_fmt_name(fmts->formats[j])); \ } else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \ AVFilterFormats *fmts; \ AVFilterChannelLayouts *layouts; \ \ fmts = filter_ctx->inout##puts[i]->outin##_formats; \ for (j = 0; j < fmts->nb_formats; j++) \ printf(#INOUT "PUT[%d] %s: fmt:%s\n", \ i, filter_ctx->filter->inout##puts[i].name, \ av_get_sample_fmt_name(fmts->formats[j])); \ \ layouts = filter_ctx->inout##puts[i]->outin##_channel_layouts; \ for (j = 0; j < layouts->nb_channel_layouts; j++) { \ char buf[256]; \ av_get_channel_layout_string(buf, sizeof(buf), -1, \ layouts->channel_layouts[j]); \ printf(#INOUT "PUT[%d] %s: chlayout:%s\n", \ i, filter_ctx->filter->inout##puts[i].name, buf); \ } \ } \ } \ PRINT_FMTS(in, out, IN); PRINT_FMTS(out, in, OUT); } | 23,435 |
1 | static void imx6_defer_clear_reset_bit(int cpuid, IMX6SRCState *s, unsigned long reset_shift) { struct SRCSCRResetInfo *ri; ri = g_malloc(sizeof(struct SRCSCRResetInfo)); ri->s = s; ri->reset_bit = reset_shift; async_run_on_cpu(arm_get_cpu_by_id(cpuid), imx6_clear_reset_bit, RUN_ON_CPU_HOST_PTR(ri)); } | 23,436 |
1 | static void fsl_imx6_realize(DeviceState *dev, Error **errp) { FslIMX6State *s = FSL_IMX6(dev); uint16_t i; Error *err = NULL; for (i = 0; i < smp_cpus; i++) { /* On uniprocessor, the CBAR is set to 0 */ if (smp_cpus > 1) { object_property_set_int(OBJECT(&s->cpu[i]), FSL_IMX6_A9MPCORE_ADDR, "reset-cbar", &error_abort); } /* All CPU but CPU 0 start in power off mode */ if (i) { object_property_set_bool(OBJECT(&s->cpu[i]), true, "start-powered-off", &error_abort); } object_property_set_bool(OBJECT(&s->cpu[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } } object_property_set_int(OBJECT(&s->a9mpcore), smp_cpus, "num-cpu", &error_abort); object_property_set_int(OBJECT(&s->a9mpcore), FSL_IMX6_MAX_IRQ + GIC_INTERNAL, "num-irq", &error_abort); object_property_set_bool(OBJECT(&s->a9mpcore), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->a9mpcore), 0, FSL_IMX6_A9MPCORE_ADDR); for (i = 0; i < smp_cpus; i++) { sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i, qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_IRQ)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->a9mpcore), i + smp_cpus, qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ)); } object_property_set_bool(OBJECT(&s->ccm), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->ccm), 0, FSL_IMX6_CCM_ADDR); object_property_set_bool(OBJECT(&s->src), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->src), 0, FSL_IMX6_SRC_ADDR); /* Initialize all UARTs */ for (i = 0; i < FSL_IMX6_NUM_UARTS; i++) { static const struct { hwaddr addr; unsigned int irq; } serial_table[FSL_IMX6_NUM_UARTS] = { { FSL_IMX6_UART1_ADDR, FSL_IMX6_UART1_IRQ }, { FSL_IMX6_UART2_ADDR, FSL_IMX6_UART2_IRQ }, { FSL_IMX6_UART3_ADDR, FSL_IMX6_UART3_IRQ }, { FSL_IMX6_UART4_ADDR, FSL_IMX6_UART4_IRQ }, { FSL_IMX6_UART5_ADDR, FSL_IMX6_UART5_IRQ }, }; if (i < MAX_SERIAL_PORTS) { Chardev *chr; chr = serial_hds[i]; if (!chr) { char *label = g_strdup_printf("imx6.uart%d", i + 1); chr = qemu_chr_new(label, "null"); g_free(label); serial_hds[i] = chr; } qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr); } object_property_set_bool(OBJECT(&s->uart[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, serial_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, qdev_get_gpio_in(DEVICE(&s->a9mpcore), serial_table[i].irq)); } s->gpt.ccm = IMX_CCM(&s->ccm); object_property_set_bool(OBJECT(&s->gpt), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpt), 0, FSL_IMX6_GPT_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpt), 0, qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_GPT_IRQ)); /* Initialize all EPIT timers */ for (i = 0; i < FSL_IMX6_NUM_EPITS; i++) { static const struct { hwaddr addr; unsigned int irq; } epit_table[FSL_IMX6_NUM_EPITS] = { { FSL_IMX6_EPIT1_ADDR, FSL_IMX6_EPIT1_IRQ }, { FSL_IMX6_EPIT2_ADDR, FSL_IMX6_EPIT2_IRQ }, }; s->epit[i].ccm = IMX_CCM(&s->ccm); object_property_set_bool(OBJECT(&s->epit[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->epit[i]), 0, epit_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->epit[i]), 0, qdev_get_gpio_in(DEVICE(&s->a9mpcore), epit_table[i].irq)); } /* Initialize all I2C */ for (i = 0; i < FSL_IMX6_NUM_I2CS; i++) { static const struct { hwaddr addr; unsigned int irq; } i2c_table[FSL_IMX6_NUM_I2CS] = { { FSL_IMX6_I2C1_ADDR, FSL_IMX6_I2C1_IRQ }, { FSL_IMX6_I2C2_ADDR, FSL_IMX6_I2C2_IRQ }, { FSL_IMX6_I2C3_ADDR, FSL_IMX6_I2C3_IRQ } }; object_property_set_bool(OBJECT(&s->i2c[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->i2c[i]), 0, i2c_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c[i]), 0, qdev_get_gpio_in(DEVICE(&s->a9mpcore), i2c_table[i].irq)); } /* Initialize all GPIOs */ for (i = 0; i < FSL_IMX6_NUM_GPIOS; i++) { static const struct { hwaddr addr; unsigned int irq_low; unsigned int irq_high; } gpio_table[FSL_IMX6_NUM_GPIOS] = { { FSL_IMX6_GPIO1_ADDR, FSL_IMX6_GPIO1_LOW_IRQ, FSL_IMX6_GPIO1_HIGH_IRQ }, { FSL_IMX6_GPIO2_ADDR, FSL_IMX6_GPIO2_LOW_IRQ, FSL_IMX6_GPIO2_HIGH_IRQ }, { FSL_IMX6_GPIO3_ADDR, FSL_IMX6_GPIO3_LOW_IRQ, FSL_IMX6_GPIO3_HIGH_IRQ }, { FSL_IMX6_GPIO4_ADDR, FSL_IMX6_GPIO4_LOW_IRQ, FSL_IMX6_GPIO4_HIGH_IRQ }, { FSL_IMX6_GPIO5_ADDR, FSL_IMX6_GPIO5_LOW_IRQ, FSL_IMX6_GPIO5_HIGH_IRQ }, { FSL_IMX6_GPIO6_ADDR, FSL_IMX6_GPIO6_LOW_IRQ, FSL_IMX6_GPIO6_HIGH_IRQ }, { FSL_IMX6_GPIO7_ADDR, FSL_IMX6_GPIO7_LOW_IRQ, FSL_IMX6_GPIO7_HIGH_IRQ }, }; object_property_set_bool(OBJECT(&s->gpio[i]), true, "has-edge-sel", &error_abort); object_property_set_bool(OBJECT(&s->gpio[i]), true, "has-upper-pin-irq", &error_abort); object_property_set_bool(OBJECT(&s->gpio[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->gpio[i]), 0, gpio_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 0, qdev_get_gpio_in(DEVICE(&s->a9mpcore), gpio_table[i].irq_low)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio[i]), 1, qdev_get_gpio_in(DEVICE(&s->a9mpcore), gpio_table[i].irq_high)); } /* Initialize all SDHC */ for (i = 0; i < FSL_IMX6_NUM_ESDHCS; i++) { static const struct { hwaddr addr; unsigned int irq; } esdhc_table[FSL_IMX6_NUM_ESDHCS] = { { FSL_IMX6_uSDHC1_ADDR, FSL_IMX6_uSDHC1_IRQ }, { FSL_IMX6_uSDHC2_ADDR, FSL_IMX6_uSDHC2_IRQ }, { FSL_IMX6_uSDHC3_ADDR, FSL_IMX6_uSDHC3_IRQ }, { FSL_IMX6_uSDHC4_ADDR, FSL_IMX6_uSDHC4_IRQ }, }; object_property_set_bool(OBJECT(&s->esdhc[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->esdhc[i]), 0, esdhc_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->esdhc[i]), 0, qdev_get_gpio_in(DEVICE(&s->a9mpcore), esdhc_table[i].irq)); } /* Initialize all ECSPI */ for (i = 0; i < FSL_IMX6_NUM_ECSPIS; i++) { static const struct { hwaddr addr; unsigned int irq; } spi_table[FSL_IMX6_NUM_ECSPIS] = { { FSL_IMX6_eCSPI1_ADDR, FSL_IMX6_ECSPI1_IRQ }, { FSL_IMX6_eCSPI2_ADDR, FSL_IMX6_ECSPI2_IRQ }, { FSL_IMX6_eCSPI3_ADDR, FSL_IMX6_ECSPI3_IRQ }, { FSL_IMX6_eCSPI4_ADDR, FSL_IMX6_ECSPI4_IRQ }, { FSL_IMX6_eCSPI5_ADDR, FSL_IMX6_ECSPI5_IRQ }, }; /* Initialize the SPI */ object_property_set_bool(OBJECT(&s->spi[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, spi_table[i].addr); sysbus_connect_irq(SYS_BUS_DEVICE(&s->spi[i]), 0, qdev_get_gpio_in(DEVICE(&s->a9mpcore), spi_table[i].irq)); } object_property_set_bool(OBJECT(&s->eth), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->eth), 0, FSL_IMX6_ENET_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth), 0, qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_ENET_MAC_IRQ)); sysbus_connect_irq(SYS_BUS_DEVICE(&s->eth), 1, qdev_get_gpio_in(DEVICE(&s->a9mpcore), FSL_IMX6_ENET_MAC_1588_IRQ)); /* ROM memory */ memory_region_init_rom_nomigrate(&s->rom, NULL, "imx6.rom", FSL_IMX6_ROM_SIZE, &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_ROM_ADDR, &s->rom); /* CAAM memory */ memory_region_init_rom_nomigrate(&s->caam, NULL, "imx6.caam", FSL_IMX6_CAAM_MEM_SIZE, &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_CAAM_MEM_ADDR, &s->caam); /* OCRAM memory */ memory_region_init_ram(&s->ocram, NULL, "imx6.ocram", FSL_IMX6_OCRAM_SIZE, &err); if (err) { error_propagate(errp, err); return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ADDR, &s->ocram); /* internal OCRAM (256 KB) is aliased over 1 MB */ memory_region_init_alias(&s->ocram_alias, NULL, "imx6.ocram_alias", &s->ocram, 0, FSL_IMX6_OCRAM_ALIAS_SIZE); memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ALIAS_ADDR, &s->ocram_alias); } | 23,437 |
1 | static int hls_slice_data_wpp(HEVCContext *s, const uint8_t *nal, int length) { HEVCLocalContext *lc = s->HEVClc; int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int)); int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int)); int offset; int startheader, cmpt = 0; int i, j, res = 0; if (!s->sList[1]) { ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1); for (i = 1; i < s->threads_number; i++) { s->sList[i] = av_malloc(sizeof(HEVCContext)); memcpy(s->sList[i], s, sizeof(HEVCContext)); s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext)); s->sList[i]->HEVClc = s->HEVClcList[i]; offset = (lc->gb.index >> 3); for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < s->skipped_bytes; j++) { if (s->skipped_bytes_pos[j] >= offset && s->skipped_bytes_pos[j] < startheader) { startheader--; cmpt++; for (i = 1; i < s->sh.num_entry_point_offsets; i++) { offset += (s->sh.entry_point_offset[i - 1] - cmpt); for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[i]; j < s->skipped_bytes; j++) { if (s->skipped_bytes_pos[j] >= offset && s->skipped_bytes_pos[j] < startheader) { startheader--; cmpt++; s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt; s->sh.offset[i - 1] = offset; if (s->sh.num_entry_point_offsets != 0) { offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt; s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset; s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset; s->data = nal; for (i = 1; i < s->threads_number; i++) { s->sList[i]->HEVClc->first_qp_group = 1; s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y; memcpy(s->sList[i], s, sizeof(HEVCContext)); s->sList[i]->HEVClc = s->HEVClcList[i]; avpriv_atomic_int_set(&s->wpp_err, 0); ff_reset_entries(s->avctx); for (i = 0; i <= s->sh.num_entry_point_offsets; i++) { arg[i] = i; ret[i] = 0; if (s->pps->entropy_coding_sync_enabled_flag) s->avctx->execute2(s->avctx, (void *) hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1); for (i = 0; i <= s->sh.num_entry_point_offsets; i++) res += ret[i]; return res; | 23,438 |
1 | ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset, int bswap, const char *fmt, va_list ap) { int i; ssize_t copied = 0; size_t old_offset = offset; for (i = 0; fmt[i]; i++) { switch (fmt[i]) { case 'b': { uint8_t *valp = va_arg(ap, uint8_t *); copied = v9fs_unpack(valp, out_sg, out_num, offset, sizeof(*valp)); break; } case 'w': { uint16_t val, *valp; valp = va_arg(ap, uint16_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); if (bswap) { *valp = le16_to_cpu(val); } else { *valp = val; } break; } case 'd': { uint32_t val, *valp; valp = va_arg(ap, uint32_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); if (bswap) { *valp = le32_to_cpu(val); } else { *valp = val; } break; } case 'q': { uint64_t val, *valp; valp = va_arg(ap, uint64_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); if (bswap) { *valp = le64_to_cpu(val); } else { *valp = val; } break; } case 's': { V9fsString *str = va_arg(ap, V9fsString *); copied = v9fs_iov_unmarshal(out_sg, out_num, offset, bswap, "w", &str->size); if (copied > 0) { offset += copied; str->data = g_malloc(str->size + 1); copied = v9fs_unpack(str->data, out_sg, out_num, offset, str->size); if (copied > 0) { str->data[str->size] = 0; } else { v9fs_string_free(str); } } break; } case 'Q': { V9fsQID *qidp = va_arg(ap, V9fsQID *); copied = v9fs_iov_unmarshal(out_sg, out_num, offset, bswap, "bdq", &qidp->type, &qidp->version, &qidp->path); break; } case 'S': { V9fsStat *statp = va_arg(ap, V9fsStat *); copied = v9fs_iov_unmarshal(out_sg, out_num, offset, bswap, "wwdQdddqsssssddd", &statp->size, &statp->type, &statp->dev, &statp->qid, &statp->mode, &statp->atime, &statp->mtime, &statp->length, &statp->name, &statp->uid, &statp->gid, &statp->muid, &statp->extension, &statp->n_uid, &statp->n_gid, &statp->n_muid); break; } case 'I': { V9fsIattr *iattr = va_arg(ap, V9fsIattr *); copied = v9fs_iov_unmarshal(out_sg, out_num, offset, bswap, "ddddqqqqq", &iattr->valid, &iattr->mode, &iattr->uid, &iattr->gid, &iattr->size, &iattr->atime_sec, &iattr->atime_nsec, &iattr->mtime_sec, &iattr->mtime_nsec); break; } default: break; } if (copied < 0) { return copied; } offset += copied; } return offset - old_offset; } | 23,439 |
0 | static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int log2w, int log2h, int stride) { const int index = size2index[log2h][log2w]; const int h = 1 << log2h; int code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table, BLOCK_TYPE_VLC_BITS, 1); uint16_t *start = (uint16_t *)f->last_picture.data[0]; uint16_t *end = start + stride * (f->avctx->height - h + 1) - (1 << log2w); av_assert2(code >= 0 && code <= 6); if (code == 0) { if (bytestream2_get_bytes_left(&f->g) < 1) { av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n"); return; } src += f->mv[bytestream2_get_byteu(&f->g)]; if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return; } mcdc(dst, src, log2w, h, stride, 1, 0); } else if (code == 1) { log2h--; decode_p_block(f, dst, src, log2w, log2h, stride); decode_p_block(f, dst + (stride << log2h), src + (stride << log2h), log2w, log2h, stride); } else if (code == 2) { log2w--; decode_p_block(f, dst , src, log2w, log2h, stride); decode_p_block(f, dst + (1 << log2w), src + (1 << log2w), log2w, log2h, stride); } else if (code == 3 && f->version < 2) { mcdc(dst, src, log2w, h, stride, 1, 0); } else if (code == 4) { if (bytestream2_get_bytes_left(&f->g) < 1) { av_log(f->avctx, AV_LOG_ERROR, "bytestream overread\n"); return; } src += f->mv[bytestream2_get_byteu(&f->g)]; if (start > src || src > end) { av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return; } if (bytestream2_get_bytes_left(&f->g) < 2){ av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); return; } mcdc(dst, src, log2w, h, stride, 1, bytestream2_get_le16u(&f->g2)); } else if (code == 5) { if (bytestream2_get_bytes_left(&f->g) < 2) { av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); return; } mcdc(dst, src, log2w, h, stride, 0, bytestream2_get_le16u(&f->g2)); } else if (code == 6) { if (bytestream2_get_bytes_left(&f->g) < 4) { av_log(f->avctx, AV_LOG_ERROR, "wordstream overread\n"); return; } if (log2w) { dst[0] = bytestream2_get_le16u(&f->g2); dst[1] = bytestream2_get_le16u(&f->g2); } else { dst[0] = bytestream2_get_le16u(&f->g2); dst[stride] = bytestream2_get_le16u(&f->g2); } } } | 23,441 |
0 | static void generate_coupling_coordinates(AC3DecodeContext * ctx) { ac3_audio_block *ab = &ctx->audio_block; uint8_t exp, mstrcplco; int16_t mant; uint32_t cplbndstrc = (1 << ab->ncplsubnd) >> 1; int ch, bnd, sbnd; float cplco; if (ab->cplcoe) for (ch = 0; ch < ctx->bsi.nfchans; ch++) if (ab->cplcoe & (1 << ch)) { mstrcplco = 3 * ab->mstrcplco[ch]; sbnd = ab->cplbegf; for (bnd = 0; bnd < ab->ncplbnd; bnd++) { exp = ab->cplcoexp[ch][bnd]; if (exp == 15) mant = ab->cplcomant[ch][bnd] <<= 14; else mant = (ab->cplcomant[ch][bnd] | 0x10) << 13; cplco = to_float(exp + mstrcplco, mant); if (ctx->bsi.acmod == 0x02 && (ab->flags & AC3_AB_PHSFLGINU) && ch == 1 && (ab->phsflg & (1 << bnd))) cplco = -cplco; /* invert the right channel */ ab->cplco[ch][sbnd++] = cplco; while (cplbndstrc & ab->cplbndstrc) { cplbndstrc >>= 1; ab->cplco[ch][sbnd++] = cplco; } cplbndstrc >>= 1; } } } | 23,442 |
0 | static void usb_serial_event(void *opaque, int event) { USBSerialState *s = opaque; switch (event) { case CHR_EVENT_BREAK: s->event_trigger |= FTDI_BI; break; case CHR_EVENT_FOCUS: break; case CHR_EVENT_OPENED: usb_serial_reset(s); /* TODO: Reset USB port */ break; } } | 23,443 |
0 | static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip) { gen_update_cc_op(s); gen_jmp_im(cur_eip); gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno)); s->is_jmp = DISAS_TB_JUMP; } | 23,445 |
0 | uint32_t helper_efdctsiz (uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_nan(u.d))) return 0; return float64_to_int32_round_to_zero(u.d, &env->vec_status); } | 23,446 |
0 | static inline uint32_t reloc_pc16_val(tcg_insn_unit *pc, tcg_insn_unit *target) { /* Let the compiler perform the right-shift as part of the arithmetic. */ ptrdiff_t disp = target - (pc + 1); assert(disp == (int16_t)disp); return disp & 0xffff; } | 23,447 |
0 | static int update_streams_from_subdemuxer(AVFormatContext *s, struct playlist *pls) { while (pls->n_main_streams < pls->ctx->nb_streams) { int ist_idx = pls->n_main_streams; AVStream *st = avformat_new_stream(s, NULL); AVStream *ist = pls->ctx->streams[ist_idx]; if (!st) return AVERROR(ENOMEM); st->id = pls->index; set_stream_info_from_input_stream(st, pls, ist); dynarray_add(&pls->main_streams, &pls->n_main_streams, st); add_stream_to_programs(s, pls, st); } return 0; } | 23,448 |
0 | static uint16_t pxb_bus_numa_node(PCIBus *bus) { PXBDev *pxb = PXB_DEV(bus->parent_dev); return pxb->numa_node; } | 23,449 |
0 | void ptimer_run(ptimer_state *s, int oneshot) { bool was_disabled = !s->enabled; if (was_disabled && s->period == 0) { fprintf(stderr, "Timer with period zero, disabling\n"); return; } s->enabled = oneshot ? 2 : 1; if (was_disabled) { s->next_event = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); ptimer_reload(s); } } | 23,450 |
0 | static uint16_t reloc_pc14_val(tcg_insn_unit *pc, tcg_insn_unit *target) { ptrdiff_t disp = tcg_ptr_byte_diff(target, pc); assert(disp == (int16_t) disp); return disp & 0xfffc; } | 23,451 |
0 | static void qemu_sgl_init_external(VirtIOSCSIReq *req, struct iovec *sg, hwaddr *addr, int num) { QEMUSGList *qsgl = &req->qsgl; qemu_sglist_init(qsgl, DEVICE(req->dev), num, &address_space_memory); while (num--) { qemu_sglist_add(qsgl, *(addr++), (sg++)->iov_len); } } | 23,452 |
0 | bool vring_setup(Vring *vring, VirtIODevice *vdev, int n) { hwaddr vring_addr = virtio_queue_get_ring_addr(vdev, n); hwaddr vring_size = virtio_queue_get_ring_size(vdev, n); void *vring_ptr; vring->broken = false; hostmem_init(&vring->hostmem); vring_ptr = hostmem_lookup(&vring->hostmem, vring_addr, vring_size, true); if (!vring_ptr) { error_report("Failed to map vring " "addr %#" HWADDR_PRIx " size %" HWADDR_PRIu, vring_addr, vring_size); vring->broken = true; return false; } vring_init(&vring->vr, virtio_queue_get_num(vdev, n), vring_ptr, 4096); vring->last_avail_idx = 0; vring->last_used_idx = 0; vring->signalled_used = 0; vring->signalled_used_valid = false; trace_vring_setup(virtio_queue_get_ring_addr(vdev, n), vring->vr.desc, vring->vr.avail, vring->vr.used); return true; } | 23,453 |
0 | static uint32_t drc_set_usable(sPAPRDRConnector *drc) { /* if there's no resource/device associated with the DRC, there's * no way for us to put it in an allocation state consistent with * being 'USABLE'. PAPR 2.7, 13.5.3.4 documents that this should * result in an RTAS return code of -3 / "no such indicator" */ if (!drc->dev) { return RTAS_OUT_NO_SUCH_INDICATOR; } if (drc->awaiting_release) { /* Don't allow the guest to move a device away from UNUSABLE * state when we want to unplug it */ return RTAS_OUT_NO_SUCH_INDICATOR; } drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE; return RTAS_OUT_SUCCESS; } | 23,454 |
0 | static uint32_t gt64120_readl (void *opaque, target_phys_addr_t addr) { GT64120State *s = opaque; uint32_t val; uint32_t saddr; val = 0; saddr = (addr & 0xfff) >> 2; switch (saddr) { /* CPU Configuration */ case GT_MULTI: /* Only one GT64xxx is present on the CPU bus, return the initial value */ val = s->regs[saddr]; break; /* CPU Error Report */ case GT_CPUERR_ADDRLO: case GT_CPUERR_ADDRHI: case GT_CPUERR_DATALO: case GT_CPUERR_DATAHI: case GT_CPUERR_PARITY: /* Emulated memory has no error, always return the initial values */ val = s->regs[saddr]; break; /* CPU Sync Barrier */ case GT_PCI0SYNC: case GT_PCI1SYNC: /* Reading those register should empty all FIFO on the PCI bus, which are not emulated. The return value should be a random value that should be ignored. */ val = 0xc000ffee; break; /* ECC */ case GT_ECC_ERRDATALO: case GT_ECC_ERRDATAHI: case GT_ECC_MEM: case GT_ECC_CALC: case GT_ECC_ERRADDR: /* Emulated memory has no error, always return the initial values */ val = s->regs[saddr]; break; case GT_CPU: case GT_SCS10LD: case GT_SCS10HD: case GT_SCS32LD: case GT_SCS32HD: case GT_CS20LD: case GT_CS20HD: case GT_CS3BOOTLD: case GT_CS3BOOTHD: case GT_SCS10AR: case GT_SCS32AR: case GT_CS20R: case GT_CS3BOOTR: case GT_PCI0IOLD: case GT_PCI0M0LD: case GT_PCI0M1LD: case GT_PCI1IOLD: case GT_PCI1M0LD: case GT_PCI1M1LD: case GT_PCI0IOHD: case GT_PCI0M0HD: case GT_PCI0M1HD: case GT_PCI1IOHD: case GT_PCI1M0HD: case GT_PCI1M1HD: case GT_PCI0IOREMAP: case GT_PCI0M0REMAP: case GT_PCI0M1REMAP: case GT_PCI1IOREMAP: case GT_PCI1M0REMAP: case GT_PCI1M1REMAP: case GT_ISD: val = s->regs[saddr]; break; case GT_PCI0_IACK: /* Read the IRQ number */ val = pic_read_irq(isa_pic); break; /* SDRAM and Device Address Decode */ case GT_SCS0LD: case GT_SCS0HD: case GT_SCS1LD: case GT_SCS1HD: case GT_SCS2LD: case GT_SCS2HD: case GT_SCS3LD: case GT_SCS3HD: case GT_CS0LD: case GT_CS0HD: case GT_CS1LD: case GT_CS1HD: case GT_CS2LD: case GT_CS2HD: case GT_CS3LD: case GT_CS3HD: case GT_BOOTLD: case GT_BOOTHD: case GT_ADERR: val = s->regs[saddr]; break; /* SDRAM Configuration */ case GT_SDRAM_CFG: case GT_SDRAM_OPMODE: case GT_SDRAM_BM: case GT_SDRAM_ADDRDECODE: val = s->regs[saddr]; break; /* SDRAM Parameters */ case GT_SDRAM_B0: case GT_SDRAM_B1: case GT_SDRAM_B2: case GT_SDRAM_B3: /* We don't simulate electrical parameters of the SDRAM. Just return the last written value. */ val = s->regs[saddr]; break; /* Device Parameters */ case GT_DEV_B0: case GT_DEV_B1: case GT_DEV_B2: case GT_DEV_B3: case GT_DEV_BOOT: val = s->regs[saddr]; break; /* DMA Record */ case GT_DMA0_CNT: case GT_DMA1_CNT: case GT_DMA2_CNT: case GT_DMA3_CNT: case GT_DMA0_SA: case GT_DMA1_SA: case GT_DMA2_SA: case GT_DMA3_SA: case GT_DMA0_DA: case GT_DMA1_DA: case GT_DMA2_DA: case GT_DMA3_DA: case GT_DMA0_NEXT: case GT_DMA1_NEXT: case GT_DMA2_NEXT: case GT_DMA3_NEXT: case GT_DMA0_CUR: case GT_DMA1_CUR: case GT_DMA2_CUR: case GT_DMA3_CUR: val = s->regs[saddr]; break; /* DMA Channel Control */ case GT_DMA0_CTRL: case GT_DMA1_CTRL: case GT_DMA2_CTRL: case GT_DMA3_CTRL: val = s->regs[saddr]; break; /* DMA Arbiter */ case GT_DMA_ARB: val = s->regs[saddr]; break; /* Timer/Counter */ case GT_TC0: case GT_TC1: case GT_TC2: case GT_TC3: case GT_TC_CONTROL: val = s->regs[saddr]; break; /* PCI Internal */ case GT_PCI0_CFGADDR: val = s->pci->config_reg; break; case GT_PCI0_CFGDATA: if (!(s->pci->config_reg & (1u << 31))) val = 0xffffffff; else val = pci_host_data_readl(s->pci, 0); break; case GT_PCI0_CMD: case GT_PCI0_TOR: case GT_PCI0_BS_SCS10: case GT_PCI0_BS_SCS32: case GT_PCI0_BS_CS20: case GT_PCI0_BS_CS3BT: case GT_PCI1_IACK: case GT_PCI0_BARE: case GT_PCI0_PREFMBR: case GT_PCI0_SCS10_BAR: case GT_PCI0_SCS32_BAR: case GT_PCI0_CS20_BAR: case GT_PCI0_CS3BT_BAR: case GT_PCI0_SSCS10_BAR: case GT_PCI0_SSCS32_BAR: case GT_PCI0_SCS3BT_BAR: case GT_PCI1_CMD: case GT_PCI1_TOR: case GT_PCI1_BS_SCS10: case GT_PCI1_BS_SCS32: case GT_PCI1_BS_CS20: case GT_PCI1_BS_CS3BT: case GT_PCI1_BARE: case GT_PCI1_PREFMBR: case GT_PCI1_SCS10_BAR: case GT_PCI1_SCS32_BAR: case GT_PCI1_CS20_BAR: case GT_PCI1_CS3BT_BAR: case GT_PCI1_SSCS10_BAR: case GT_PCI1_SSCS32_BAR: case GT_PCI1_SCS3BT_BAR: case GT_PCI1_CFGADDR: case GT_PCI1_CFGDATA: val = s->regs[saddr]; break; /* Interrupts */ case GT_INTRCAUSE: val = s->regs[saddr]; dprintf("INTRCAUSE %x\n", val); break; case GT_INTRMASK: val = s->regs[saddr]; dprintf("INTRMASK %x\n", val); break; case GT_PCI0_ICMASK: val = s->regs[saddr]; dprintf("ICMASK %x\n", val); break; case GT_PCI0_SERR0MASK: val = s->regs[saddr]; dprintf("SERR0MASK %x\n", val); break; /* Reserved when only PCI_0 is configured. */ case GT_HINTRCAUSE: case GT_CPU_INTSEL: case GT_PCI0_INTSEL: case GT_HINTRMASK: case GT_PCI0_HICMASK: case GT_PCI1_SERR1MASK: val = s->regs[saddr]; break; default: val = s->regs[saddr]; dprintf ("Bad register offset 0x%x\n", (int)addr); break; } #ifdef TARGET_WORDS_BIGENDIAN val = bswap32(val); #endif return val; } | 23,456 |
0 | static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) { CPUX86State *env = &cpu->env; FeatureWord w; GList *l; Error *local_err = NULL; /*TODO: cpu->max_features incorrectly overwrites features * set using "feat=on|off". Once we fix this, we can convert * plus_features & minus_features to global properties * inside x86_cpu_parse_featurestr() too. */ if (cpu->max_features) { for (w = 0; w < FEATURE_WORDS; w++) { env->features[w] = x86_cpu_get_supported_feature_word(w, cpu->migratable); } } for (l = plus_features; l; l = l->next) { const char *prop = l->data; object_property_set_bool(OBJECT(cpu), true, prop, &local_err); if (local_err) { goto out; } } for (l = minus_features; l; l = l->next) { const char *prop = l->data; object_property_set_bool(OBJECT(cpu), false, prop, &local_err); if (local_err) { goto out; } } if (!kvm_enabled() || !cpu->expose_kvm) { env->features[FEAT_KVM] = 0; } x86_cpu_enable_xsave_components(cpu); /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); if (cpu->full_cpuid_auto_level) { x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_SVM); x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); /* SVM requires CPUID[0x8000000A] */ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); } } /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ if (env->cpuid_level == UINT32_MAX) { env->cpuid_level = env->cpuid_min_level; } if (env->cpuid_xlevel == UINT32_MAX) { env->cpuid_xlevel = env->cpuid_min_xlevel; } if (env->cpuid_xlevel2 == UINT32_MAX) { env->cpuid_xlevel2 = env->cpuid_min_xlevel2; } out: if (local_err != NULL) { error_propagate(errp, local_err); } } | 23,457 |
0 | static BusState *qbus_find_bus(DeviceState *dev, char *elem) { BusState *child; LIST_FOREACH(child, &dev->child_bus, sibling) { if (strcmp(child->name, elem) == 0) { return child; } } return NULL; } | 23,458 |
0 | static inline void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0) { int i, d; for( i = 0; i < 4; i++ ) { if( tc0[i] < 0 ) { pix += 4*ystride; continue; } for( d = 0; d < 4; d++ ) { const int p0 = pix[-1*xstride]; const int p1 = pix[-2*xstride]; const int p2 = pix[-3*xstride]; const int q0 = pix[0]; const int q1 = pix[1*xstride]; const int q2 = pix[2*xstride]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int tc = tc0[i]; int i_delta; if( FFABS( p2 - p0 ) < beta ) { if(tc0[i]) pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] ); tc++; } if( FFABS( q2 - q0 ) < beta ) { if(tc0[i]) pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] ); tc++; } i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-xstride] = av_clip_uint8( p0 + i_delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ } pix += ystride; } } } | 23,459 |
0 | static int open_f(BlockBackend *blk, int argc, char **argv) { int flags = BDRV_O_UNMAP; int readonly = 0; bool writethrough = true; int c; QemuOpts *qopts; QDict *opts; bool force_share = false; while ((c = getopt(argc, argv, "snro:kt:d:U")) != -1) { switch (c) { case 's': flags |= BDRV_O_SNAPSHOT; break; case 'n': flags |= BDRV_O_NOCACHE; writethrough = false; break; case 'r': readonly = 1; break; case 'k': flags |= BDRV_O_NATIVE_AIO; break; case 't': if (bdrv_parse_cache_mode(optarg, &flags, &writethrough) < 0) { error_report("Invalid cache option: %s", optarg); qemu_opts_reset(&empty_opts); return 0; } break; case 'd': if (bdrv_parse_discard_flags(optarg, &flags) < 0) { error_report("Invalid discard option: %s", optarg); qemu_opts_reset(&empty_opts); return 0; } break; case 'o': if (imageOpts) { printf("--image-opts and 'open -o' are mutually exclusive\n"); qemu_opts_reset(&empty_opts); return 0; } if (!qemu_opts_parse_noisily(&empty_opts, optarg, false)) { qemu_opts_reset(&empty_opts); return 0; } break; case 'U': force_share = true; break; default: qemu_opts_reset(&empty_opts); return qemuio_command_usage(&open_cmd); } } if (!readonly) { flags |= BDRV_O_RDWR; } if (imageOpts && (optind == argc - 1)) { if (!qemu_opts_parse_noisily(&empty_opts, argv[optind], false)) { qemu_opts_reset(&empty_opts); return 0; } optind++; } qopts = qemu_opts_find(&empty_opts, NULL); opts = qopts ? qemu_opts_to_qdict(qopts, NULL) : NULL; qemu_opts_reset(&empty_opts); if (optind == argc - 1) { openfile(argv[optind], flags, writethrough, force_share, opts); } else if (optind == argc) { openfile(NULL, flags, writethrough, force_share, opts); } else { QDECREF(opts); qemuio_command_usage(&open_cmd); } return 0; } | 23,460 |
0 | static bool iscsi_allocationmap_is_allocated(IscsiLun *iscsilun, int64_t sector_num, int nb_sectors) { unsigned long size; if (iscsilun->allocationmap == NULL) { return true; } size = DIV_ROUND_UP(sector_num + nb_sectors, iscsilun->cluster_sectors); return !(find_next_bit(iscsilun->allocationmap, size, sector_num / iscsilun->cluster_sectors) == size); } | 23,461 |
0 | static void disas_ldst_pair(DisasContext *s, uint32_t insn) { int rt = extract32(insn, 0, 5); int rn = extract32(insn, 5, 5); int rt2 = extract32(insn, 10, 5); int64_t offset = sextract32(insn, 15, 7); int index = extract32(insn, 23, 2); bool is_vector = extract32(insn, 26, 1); bool is_load = extract32(insn, 22, 1); int opc = extract32(insn, 30, 2); bool is_signed = false; bool postindex = false; bool wback = false; TCGv_i64 tcg_addr; /* calculated address */ int size; if (opc == 3) { unallocated_encoding(s); return; } if (is_vector) { size = 2 + opc; } else { size = 2 + extract32(opc, 1, 1); is_signed = extract32(opc, 0, 1); if (!is_load && is_signed) { unallocated_encoding(s); return; } } switch (index) { case 1: /* post-index */ postindex = true; wback = true; break; case 0: /* signed offset with "non-temporal" hint. Since we don't emulate * caches we don't care about hints to the cache system about * data access patterns, and handle this identically to plain * signed offset. */ if (is_signed) { /* There is no non-temporal-hint version of LDPSW */ unallocated_encoding(s); return; } postindex = false; break; case 2: /* signed offset, rn not updated */ postindex = false; break; case 3: /* pre-index */ postindex = false; wback = true; break; } if (is_vector && !fp_access_check(s)) { return; } offset <<= size; if (rn == 31) { gen_check_sp_alignment(s); } tcg_addr = read_cpu_reg_sp(s, rn, 1); if (!postindex) { tcg_gen_addi_i64(tcg_addr, tcg_addr, offset); } if (is_vector) { if (is_load) { do_fp_ld(s, rt, tcg_addr, size); } else { do_fp_st(s, rt, tcg_addr, size); } } else { TCGv_i64 tcg_rt = cpu_reg(s, rt); if (is_load) { do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false); } else { do_gpr_st(s, tcg_rt, tcg_addr, size); } } tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size); if (is_vector) { if (is_load) { do_fp_ld(s, rt2, tcg_addr, size); } else { do_fp_st(s, rt2, tcg_addr, size); } } else { TCGv_i64 tcg_rt2 = cpu_reg(s, rt2); if (is_load) { do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false); } else { do_gpr_st(s, tcg_rt2, tcg_addr, size); } } if (wback) { if (postindex) { tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size)); } else { tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size); } tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr); } } | 23,462 |
0 | static struct omap_rtc_s *omap_rtc_init(target_phys_addr_t base, qemu_irq *irq, omap_clk clk) { int iomemtype; struct omap_rtc_s *s = (struct omap_rtc_s *) qemu_mallocz(sizeof(struct omap_rtc_s)); s->irq = irq[0]; s->alarm = irq[1]; s->clk = qemu_new_timer(rt_clock, omap_rtc_tick, s); omap_rtc_reset(s); iomemtype = cpu_register_io_memory(omap_rtc_readfn, omap_rtc_writefn, s, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(base, 0x800, iomemtype); return s; } | 23,463 |
0 | static int mov_write_udta_tag(ByteIOContext *pb, MOVContext* mov, AVFormatContext *s) { offset_t pos = url_ftell(pb); int i; put_be32(pb, 0); /* size */ put_tag(pb, "udta"); /* iTunes meta data */ mov_write_meta_tag(pb, mov, s); if(mov->mode == MODE_MOV){ // the title field breaks gtkpod with mp4 and my suspicion is that stuff isnt valid in mp4 /* Requirements */ for (i=0; i<MAX_STREAMS; i++) { if(mov->tracks[i].entry <= 0) continue; if (mov->tracks[i].enc->codec_id == CODEC_ID_AAC || mov->tracks[i].enc->codec_id == CODEC_ID_MPEG4) { mov_write_string_tag(pb, "\251req", "QuickTime 6.0 or greater", 0); break; } } mov_write_string_tag(pb, "\251nam", s->title , 0); mov_write_string_tag(pb, "\251aut", s->author , 0); mov_write_string_tag(pb, "\251alb", s->album , 0); mov_write_day_tag(pb, s->year, 0); if(mov->tracks[0].enc && !(mov->tracks[0].enc->flags & CODEC_FLAG_BITEXACT)) mov_write_string_tag(pb, "\251enc", LIBAVFORMAT_IDENT, 0); mov_write_string_tag(pb, "\251des", s->comment , 0); mov_write_string_tag(pb, "\251gen", s->genre , 0); } return updateSize(pb, pos); } | 23,464 |
0 | static void selfTest(uint8_t *ref[4], int refStride[4], int w, int h) { const int flags[] = { SWS_FAST_BILINEAR, SWS_BILINEAR, SWS_BICUBIC, SWS_X , SWS_POINT , SWS_AREA, 0 }; const int srcW = w; const int srcH = h; const int dstW[] = { srcW - srcW/3, srcW, srcW + srcW/3, 0 }; const int dstH[] = { srcH - srcH/3, srcH, srcH + srcH/3, 0 }; enum PixelFormat srcFormat, dstFormat; for (srcFormat = 0; srcFormat < PIX_FMT_NB; srcFormat++) { for (dstFormat = 0; dstFormat < PIX_FMT_NB; dstFormat++) { int i, j, k; int res = 0; printf("%s -> %s\n", sws_format_name(srcFormat), sws_format_name(dstFormat)); fflush(stdout); for (i = 0; dstW[i] && !res; i++) for (j = 0; dstH[j] && !res; j++) for (k = 0; flags[k] && !res; k++) res = doTest(ref, refStride, w, h, srcFormat, dstFormat, srcW, srcH, dstW[i], dstH[j], flags[k]); } } } | 23,466 |
0 | static void avc_biwgt_4width_msa(uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, int32_t height, int32_t log2_denom, int32_t src_weight, int32_t dst_weight, int32_t offset_in) { if (2 == height) { avc_biwgt_4x2_msa(src, src_stride, dst, dst_stride, log2_denom, src_weight, dst_weight, offset_in); } else { avc_biwgt_4x4multiple_msa(src, src_stride, dst, dst_stride, height, log2_denom, src_weight, dst_weight, offset_in); } } | 23,467 |
0 | static inline int decode_vui_parameters(GetBitContext *gb, AVCodecContext *avctx, SPS *sps) { int aspect_ratio_info_present_flag; unsigned int aspect_ratio_idc; aspect_ratio_info_present_flag = get_bits1(gb); if (aspect_ratio_info_present_flag) { aspect_ratio_idc = get_bits(gb, 8); if (aspect_ratio_idc == EXTENDED_SAR) { sps->sar.num = get_bits(gb, 16); sps->sar.den = get_bits(gb, 16); } else if (aspect_ratio_idc < FF_ARRAY_ELEMS(pixel_aspect)) { sps->sar = pixel_aspect[aspect_ratio_idc]; } else { av_log(avctx, AV_LOG_ERROR, "illegal aspect ratio\n"); return AVERROR_INVALIDDATA; } } else { sps->sar.num = sps->sar.den = 0; } if (get_bits1(gb)) /* overscan_info_present_flag */ get_bits1(gb); /* overscan_appropriate_flag */ sps->video_signal_type_present_flag = get_bits1(gb); if (sps->video_signal_type_present_flag) { get_bits(gb, 3); /* video_format */ sps->full_range = get_bits1(gb); /* video_full_range_flag */ sps->colour_description_present_flag = get_bits1(gb); if (sps->colour_description_present_flag) { sps->color_primaries = get_bits(gb, 8); /* colour_primaries */ sps->color_trc = get_bits(gb, 8); /* transfer_characteristics */ sps->colorspace = get_bits(gb, 8); /* matrix_coefficients */ if (sps->color_primaries >= AVCOL_PRI_NB) sps->color_primaries = AVCOL_PRI_UNSPECIFIED; if (sps->color_trc >= AVCOL_TRC_NB) sps->color_trc = AVCOL_TRC_UNSPECIFIED; if (sps->colorspace >= AVCOL_SPC_NB) sps->colorspace = AVCOL_SPC_UNSPECIFIED; } } /* chroma_location_info_present_flag */ if (get_bits1(gb)) { /* chroma_sample_location_type_top_field */ avctx->chroma_sample_location = get_ue_golomb(gb) + 1; get_ue_golomb(gb); /* chroma_sample_location_type_bottom_field */ } sps->timing_info_present_flag = get_bits1(gb); if (sps->timing_info_present_flag) { sps->num_units_in_tick = get_bits_long(gb, 32); sps->time_scale = get_bits_long(gb, 32); if (!sps->num_units_in_tick || !sps->time_scale) { av_log(avctx, AV_LOG_ERROR, "time_scale/num_units_in_tick invalid or unsupported (%"PRIu32"/%"PRIu32")\n", sps->time_scale, sps->num_units_in_tick); return AVERROR_INVALIDDATA; } sps->fixed_frame_rate_flag = get_bits1(gb); } sps->nal_hrd_parameters_present_flag = get_bits1(gb); if (sps->nal_hrd_parameters_present_flag) if (decode_hrd_parameters(gb, avctx, sps) < 0) return AVERROR_INVALIDDATA; sps->vcl_hrd_parameters_present_flag = get_bits1(gb); if (sps->vcl_hrd_parameters_present_flag) if (decode_hrd_parameters(gb, avctx, sps) < 0) return AVERROR_INVALIDDATA; if (sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag) get_bits1(gb); /* low_delay_hrd_flag */ sps->pic_struct_present_flag = get_bits1(gb); sps->bitstream_restriction_flag = get_bits1(gb); if (sps->bitstream_restriction_flag) { get_bits1(gb); /* motion_vectors_over_pic_boundaries_flag */ get_ue_golomb(gb); /* max_bytes_per_pic_denom */ get_ue_golomb(gb); /* max_bits_per_mb_denom */ get_ue_golomb(gb); /* log2_max_mv_length_horizontal */ get_ue_golomb(gb); /* log2_max_mv_length_vertical */ sps->num_reorder_frames = get_ue_golomb(gb); get_ue_golomb(gb); /*max_dec_frame_buffering*/ if (get_bits_left(gb) < 0) { sps->num_reorder_frames = 0; sps->bitstream_restriction_flag = 0; } if (sps->num_reorder_frames > 16U /* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) { av_log(avctx, AV_LOG_ERROR, "Clipping illegal num_reorder_frames %d\n", sps->num_reorder_frames); sps->num_reorder_frames = 16; return AVERROR_INVALIDDATA; } } if (get_bits_left(gb) < 0) { av_log(avctx, AV_LOG_ERROR, "Overread VUI by %d bits\n", -get_bits_left(gb)); return AVERROR_INVALIDDATA; } return 0; } | 23,468 |
0 | static int buffer_needs_copy(PadContext *s, AVFrame *frame, AVBufferRef *buf) { int planes[4] = { -1, -1, -1, -1}, *p = planes; int i, j; /* get all planes in this buffer */ for (i = 0; i < FF_ARRAY_ELEMS(planes) && frame->data[i]; i++) { if (av_frame_get_plane_buffer(frame, i) == buf) *p++ = i; } /* for each plane in this buffer, check that it can be padded without * going over buffer bounds or other planes */ for (i = 0; i < FF_ARRAY_ELEMS(planes) && planes[i] >= 0; i++) { int hsub = s->draw.hsub[planes[i]]; int vsub = s->draw.vsub[planes[i]]; uint8_t *start = frame->data[planes[i]]; uint8_t *end = start + (frame->height >> vsub) * frame->linesize[planes[i]]; /* amount of free space needed before the start and after the end * of the plane */ ptrdiff_t req_start = (s->x >> hsub) * s->draw.pixelstep[planes[i]] + (s->y >> vsub) * frame->linesize[planes[i]]; ptrdiff_t req_end = ((s->w - s->x - frame->width) >> hsub) * s->draw.pixelstep[planes[i]] + (s->y >> vsub) * frame->linesize[planes[i]]; if (frame->linesize[planes[i]] < (s->w >> hsub) * s->draw.pixelstep[planes[i]]) return 1; if (start - buf->data < req_start || (buf->data + buf->size) - end < req_end) return 1; for (j = 0; j < FF_ARRAY_ELEMS(planes) && planes[j] >= 0; j++) { int vsub1 = s->draw.vsub[planes[j]]; uint8_t *start1 = frame->data[planes[j]]; uint8_t *end1 = start1 + (frame->height >> vsub1) * frame->linesize[planes[j]]; if (i == j) continue; if (FFSIGN(start - end1) != FFSIGN(start - end1 - req_start) || FFSIGN(end - start1) != FFSIGN(end - start1 + req_end)) return 1; } } return 0; } | 23,469 |
1 | static uint64_t bonito_ldma_readl(void *opaque, hwaddr addr, unsigned size) { uint32_t val; PCIBonitoState *s = opaque; val = ((uint32_t *)(&s->bonldma))[addr/sizeof(uint32_t)]; return val; | 23,472 |
1 | static void integratorcp_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; ram_addr_t ram_offset; qemu_irq pic[32]; qemu_irq *cpu_pic; DeviceState *dev; int i; if (!cpu_model) cpu_model = "arm926"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } ram_offset = qemu_ram_alloc(ram_size); /* ??? On a real system the first 1Mb is mapped as SSRAM or boot flash. */ /* ??? RAM should repeat to fill physical memory space. */ /* SDRAM at address zero*/ cpu_register_physical_memory(0, ram_size, ram_offset | IO_MEM_RAM); /* And again at address 0x80000000 */ cpu_register_physical_memory(0x80000000, ram_size, ram_offset | IO_MEM_RAM); dev = qdev_create(NULL, "integrator_core"); qdev_prop_set_uint32(dev, "memsz", ram_size >> 20); qdev_init(dev); sysbus_mmio_map((SysBusDevice *)dev, 0, 0x10000000); cpu_pic = arm_pic_init_cpu(env); dev = sysbus_create_varargs("integrator_pic", 0x14000000, cpu_pic[ARM_PIC_CPU_IRQ], cpu_pic[ARM_PIC_CPU_FIQ], NULL); for (i = 0; i < 32; i++) { pic[i] = qdev_get_gpio_in(dev, i); } sysbus_create_simple("integrator_pic", 0xca000000, pic[26]); sysbus_create_varargs("integrator_pit", 0x13000000, pic[5], pic[6], pic[7], NULL); sysbus_create_simple("pl031", 0x15000000, pic[8]); sysbus_create_simple("pl011", 0x16000000, pic[1]); sysbus_create_simple("pl011", 0x17000000, pic[2]); icp_control_init(0xcb000000); sysbus_create_simple("pl050_keyboard", 0x18000000, pic[3]); sysbus_create_simple("pl050_mouse", 0x19000000, pic[4]); sysbus_create_varargs("pl181", 0x1c000000, pic[23], pic[24], NULL); if (nd_table[0].vlan) smc91c111_init(&nd_table[0], 0xc8000000, pic[27]); sysbus_create_simple("pl110", 0xc0000000, pic[22]); integrator_binfo.ram_size = ram_size; integrator_binfo.kernel_filename = kernel_filename; integrator_binfo.kernel_cmdline = kernel_cmdline; integrator_binfo.initrd_filename = initrd_filename; arm_load_kernel(env, &integrator_binfo); } | 23,473 |
1 | static void ich9_apm_ctrl_changed(uint32_t val, void *arg) { ICH9LPCState *lpc = arg; /* ACPI specs 3.0, 4.7.2.5 */ acpi_pm1_cnt_update(&lpc->pm.acpi_regs, val == ICH9_APM_ACPI_ENABLE, val == ICH9_APM_ACPI_DISABLE); if (val == ICH9_APM_ACPI_ENABLE || val == ICH9_APM_ACPI_DISABLE) { return; } /* SMI_EN = PMBASE + 30. SMI control and enable register */ if (lpc->pm.smi_en & ICH9_PMIO_SMI_EN_APMC_EN) { cpu_interrupt(current_cpu, CPU_INTERRUPT_SMI); } } | 23,474 |
1 | static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const char* path) { /* * This is a little bit tricky: * IF the guest OS just inserts a cluster into the file chain, * and leaves the rest alone, (i.e. the original file had clusters * 15 -> 16, but now has 15 -> 32 -> 16), then the following happens: * * - do_commit will write the cluster into the file at the given * offset, but * * - the cluster which is overwritten should be moved to a later * position in the file. * * I am not aware that any OS does something as braindead, but this * situation could happen anyway when not committing for a long time. * Just to be sure that this does not bite us, detect it, and copy the * contents of the clusters to-be-overwritten into the qcow. */ int copy_it = 0; int was_modified = 0; int32_t ret = 0; uint32_t cluster_num = begin_of_direntry(direntry); uint32_t offset = 0; int first_mapping_index = -1; mapping_t* mapping = NULL; const char* basename2 = NULL; vvfat_close_current_file(s); /* the root directory */ if (cluster_num == 0) return 0; /* write support */ if (s->qcow) { basename2 = get_basename(path); mapping = find_mapping_for_cluster(s, cluster_num); if (mapping) { const char* basename; assert(mapping->mode & MODE_DELETED); mapping->mode &= ~MODE_DELETED; basename = get_basename(mapping->path); assert(mapping->mode & MODE_NORMAL); /* rename */ if (strcmp(basename, basename2)) schedule_rename(s, cluster_num, strdup(path)); } else if (is_file(direntry)) /* new file */ schedule_new_file(s, strdup(path), cluster_num); else { assert(0); return 0; } } while(1) { if (s->qcow) { if (!copy_it && cluster_was_modified(s, cluster_num)) { if (mapping == NULL || mapping->begin > cluster_num || mapping->end <= cluster_num) mapping = find_mapping_for_cluster(s, cluster_num); if (mapping && (mapping->mode & MODE_DIRECTORY) == 0) { /* was modified in qcow */ if (offset != mapping->info.file.offset + s->cluster_size * (cluster_num - mapping->begin)) { /* offset of this cluster in file chain has changed */ assert(0); copy_it = 1; } else if (offset == 0) { const char* basename = get_basename(mapping->path); if (strcmp(basename, basename2)) copy_it = 1; first_mapping_index = array_index(&(s->mapping), mapping); } if (mapping->first_mapping_index != first_mapping_index && mapping->info.file.offset > 0) { assert(0); copy_it = 1; } /* need to write out? */ if (!was_modified && is_file(direntry)) { was_modified = 1; schedule_writeout(s, mapping->dir_index, offset); } } } if (copy_it) { int i, dummy; /* * This is horribly inefficient, but that is okay, since * it is rarely executed, if at all. */ int64_t offset = cluster2sector(s, cluster_num); vvfat_close_current_file(s); for (i = 0; i < s->sectors_per_cluster; i++) if (!s->qcow->drv->bdrv_is_allocated(s->qcow, offset + i, 1, &dummy)) { if (vvfat_read(s->bs, offset, s->cluster_buffer, 1)) return -1; if (s->qcow->drv->bdrv_write(s->qcow, offset, s->cluster_buffer, 1)) return -2; } } } ret++; if (s->used_clusters[cluster_num] & USED_ANY) return 0; s->used_clusters[cluster_num] = USED_FILE; cluster_num = modified_fat_get(s, cluster_num); if (fat_eof(s, cluster_num)) return ret; else if (cluster_num < 2 || cluster_num > s->max_fat_value - 16) return -1; offset += s->cluster_size; } } | 23,475 |
1 | void visit_type_str(Visitor *v, const char *name, char **obj, Error **errp) { v->type_str(v, name, obj, errp); } | 23,476 |
1 | static void decode_pitch_lag_high(int *lag_int, int *lag_frac, int pitch_index, uint8_t *base_lag_int, int subframe) { if (subframe == 0 || subframe == 2) { if (pitch_index < 376) { *lag_int = (pitch_index + 137) >> 2; *lag_frac = pitch_index - (*lag_int << 2) + 136; } else if (pitch_index < 440) { *lag_int = (pitch_index + 257 - 376) >> 1; *lag_frac = (pitch_index - (*lag_int << 1) + 256 - 376) << 1; /* the actual resolution is 1/2 but expressed as 1/4 */ } else { *lag_int = pitch_index - 280; *lag_frac = 0; } /* minimum lag for next subframe */ *base_lag_int = av_clip(*lag_int - 8 - (*lag_frac < 0), AMRWB_P_DELAY_MIN, AMRWB_P_DELAY_MAX - 15); // XXX: the spec states clearly that *base_lag_int should be // the nearest integer to *lag_int (minus 8), but the ref code // actually always uses its floor, I'm following the latter } else { *lag_int = (pitch_index + 1) >> 2; *lag_frac = pitch_index - (*lag_int << 2); *lag_int += *base_lag_int; } } | 23,477 |
1 | static bool invalid_qmp_mode(const Monitor *mon, const char *cmd, Error **errp) { bool is_cap = g_str_equal(cmd, "qmp_capabilities"); if (is_cap && mon->qmp.in_command_mode) { error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND, "Capabilities negotiation is already complete, command " "'%s' ignored", cmd); return true; } if (!is_cap && !mon->qmp.in_command_mode) { error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND, "Expecting capabilities negotiation with " "'qmp_capabilities' before command '%s'", cmd); return true; } return false; } | 23,478 |
1 | static void migration_completion(MigrationState *s, int current_active_state, bool *old_vm_running, int64_t *start_time) { int ret; if (s->state == MIGRATION_STATUS_ACTIVE) { qemu_mutex_lock_iothread(); *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); *old_vm_running = runstate_is_running(); ret = global_state_store(); if (!ret) { ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); if (ret >= 0) { ret = bdrv_inactivate_all(); } if (ret >= 0) { qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX); qemu_savevm_state_complete_precopy(s->to_dst_file, false); } } qemu_mutex_unlock_iothread(); if (ret < 0) { goto fail; } } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { trace_migration_completion_postcopy_end(); qemu_savevm_state_complete_postcopy(s->to_dst_file); trace_migration_completion_postcopy_end_after_complete(); } /* * If rp was opened we must clean up the thread before * cleaning everything else up (since if there are no failures * it will wait for the destination to send it's status in * a SHUT command). * Postcopy opens rp if enabled (even if it's not avtivated) */ if (migrate_postcopy_ram()) { int rp_error; trace_migration_completion_postcopy_end_before_rp(); rp_error = await_return_path_close_on_source(s); trace_migration_completion_postcopy_end_after_rp(rp_error); if (rp_error) { goto fail; } } if (qemu_file_get_error(s->to_dst_file)) { trace_migration_completion_file_err(); goto fail; } migrate_set_state(&s->state, current_active_state, MIGRATION_STATUS_COMPLETED); return; fail: migrate_set_state(&s->state, current_active_state, MIGRATION_STATUS_FAILED); } | 23,479 |
1 | static int transcode(AVFormatContext **output_files, int nb_output_files, InputFile *input_files, int nb_input_files, StreamMap *stream_maps, int nb_stream_maps) { int ret = 0, i, j, k, n, nb_ostreams = 0, step; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; OutputStream *ost, **ost_table = NULL; InputStream *ist; char error[1024]; int key; int want_sdp = 1; uint8_t no_packet[MAX_FILES]={0}; int no_packet_count=0; int nb_frame_threshold[AVMEDIA_TYPE_NB]={0}; int nb_streams[AVMEDIA_TYPE_NB]={0}; if (rate_emu) for (i = 0; i < nb_input_streams; i++) input_streams[i].start = av_gettime(); /* output stream init */ nb_ostreams = 0; for(i=0;i<nb_output_files;i++) { os = output_files[i]; if (!os->nb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) { av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Output file #%d does not contain any stream\n", i); ret = AVERROR(EINVAL); goto fail; } nb_ostreams += os->nb_streams; } if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) { fprintf(stderr, "Number of stream maps must match number of output streams\n"); ret = AVERROR(EINVAL); goto fail; } /* Sanity check the mapping args -- do the input files & streams exist? */ for(i=0;i<nb_stream_maps;i++) { int fi = stream_maps[i].file_index; int si = stream_maps[i].stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > input_files[fi].ctx->nb_streams - 1) { fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; } fi = stream_maps[i].sync_file_index; si = stream_maps[i].sync_stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > input_files[fi].ctx->nb_streams - 1) { fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; } } ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams); if (!ost_table) goto fail; for(k=0;k<nb_output_files;k++) { os = output_files[k]; for(i=0;i<os->nb_streams;i++,n++) { nb_streams[os->streams[i]->codec->codec_type]++; } } for(step=1<<30; step; step>>=1){ int found_streams[AVMEDIA_TYPE_NB]={0}; for(j=0; j<AVMEDIA_TYPE_NB; j++) nb_frame_threshold[j] += step; for(j=0; j<nb_input_streams; j++) { int skip=0; ist = &input_streams[j]; if(opt_programid){ int pi,si; AVFormatContext *f= input_files[ ist->file_index ].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; si<p->nb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && nb_frame_threshold[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames){ found_streams[ist->st->codec->codec_type]++; } } for(j=0; j<AVMEDIA_TYPE_NB; j++) if(found_streams[j] < nb_streams[j]) nb_frame_threshold[j] -= step; } n = 0; for(k=0;k<nb_output_files;k++) { os = output_files[k]; for(i=0;i<os->nb_streams;i++,n++) { int found; ost = ost_table[n] = output_streams_for_file[k][i]; if (nb_stream_maps > 0) { ost->source_index = input_files[stream_maps[n].file_index].ist_index + stream_maps[n].stream_index; /* Sanity check that the stream types match */ if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) { int i= ost->file_index; av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n", stream_maps[n].file_index, stream_maps[n].stream_index, ost->file_index, ost->index); ffmpeg_exit(1); } } else { /* get corresponding input stream index : we select the first one with the right type */ found = 0; for (j = 0; j < nb_input_streams; j++) { int skip=0; ist = &input_streams[j]; if(opt_programid){ int pi,si; AVFormatContext *f = input_files[ist->file_index].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; si<p->nb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && ist->st->codec->codec_type == ost->st->codec->codec_type && nb_frame_threshold[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames) { ost->source_index = j; found = 1; break; } } if (!found) { if(! opt_programid) { /* try again and reuse existing stream */ for (j = 0; j < nb_input_streams; j++) { ist = &input_streams[j]; if ( ist->st->codec->codec_type == ost->st->codec->codec_type && ist->st->discard != AVDISCARD_ALL) { ost->source_index = j; found = 1; } } } if (!found) { int i= ost->file_index; av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n", ost->file_index, ost->index); ffmpeg_exit(1); } } } ist = &input_streams[ost->source_index]; ist->discard = 0; ost->sync_ist = (nb_stream_maps > 0) ? &input_streams[input_files[stream_maps[n].sync_file_index].ist_index + stream_maps[n].sync_stream_index] : ist; } } /* for each output stream, we compute the right encoding parameters */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; os = output_files[ost->file_index]; ist = &input_streams[ost->source_index]; codec = ost->st->codec; icodec = ist->st->codec; if (metadata_streams_autocopy) av_dict_copy(&ost->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample= icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; if (ost->st->stream_copy) { uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) goto fail; /* if stream_copy is selected, no need to decode or encode */ codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; if(!codec->codec_tag){ if( !os->oformat->codec_tag || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0) codec->codec_tag = icodec->codec_tag; } codec->bit_rate = icodec->bit_rate; codec->rc_max_rate = icodec->rc_max_rate; codec->rc_buffer_size = icodec->rc_buffer_size; codec->extradata= av_mallocz(extra_size); if (!codec->extradata) goto fail; memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size= icodec->extradata_size; codec->time_base = ist->st->time_base; if(!strcmp(os->oformat->name, "avi")) { if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; codec->time_base.den *= 2; } } else if(!(os->oformat->flags & AVFMT_VARIABLE_FPS)) { if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; } } av_reduce(&codec->time_base.num, &codec->time_base.den, codec->time_base.num, codec->time_base.den, INT_MAX); switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(audio_volume != 256) { fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n"); ffmpeg_exit(1); } codec->channel_layout = icodec->channel_layout; codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; codec->frame_size = icodec->frame_size; codec->audio_service_type = icodec->audio_service_type; codec->block_align= icodec->block_align; if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3) codec->block_align= 0; if(codec->codec_id == CODEC_ID_AC3) codec->block_align= 0; break; case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = icodec->pix_fmt; codec->width = icodec->width; codec->height = icodec->height; codec->has_b_frames = icodec->has_b_frames; if (!codec->sample_aspect_ratio.num) { codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : ist->st->codec->sample_aspect_ratio.num ? ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; } break; case AVMEDIA_TYPE_SUBTITLE: codec->width = icodec->width; codec->height = icodec->height; break; case AVMEDIA_TYPE_DATA: break; default: abort(); } } else { if (!ost->enc) ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo= av_fifo_alloc(1024); if(!ost->fifo) goto fail; ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); if (!codec->sample_rate) { codec->sample_rate = icodec->sample_rate; } choose_sample_rate(ost->st, ost->enc); codec->time_base = (AVRational){1, codec->sample_rate}; if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) codec->sample_fmt = icodec->sample_fmt; choose_sample_fmt(ost->st, ost->enc); if (!codec->channels) { codec->channels = icodec->channels; codec->channel_layout = icodec->channel_layout; } if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; icodec->request_channels = codec->channels; ist->decoding_needed = 1; ost->encoding_needed = 1; ost->resample_sample_fmt = icodec->sample_fmt; ost->resample_sample_rate = icodec->sample_rate; ost->resample_channels = icodec->channels; break; case AVMEDIA_TYPE_VIDEO: if (codec->pix_fmt == PIX_FMT_NONE) codec->pix_fmt = icodec->pix_fmt; choose_pixel_fmt(ost->st, ost->enc); if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n"); ffmpeg_exit(1); } if (!codec->width || !codec->height) { codec->width = icodec->width; codec->height = icodec->height; } ost->video_resample = codec->width != icodec->width || codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt; if (ost->video_resample) { codec->bits_per_raw_sample= frame_bits_per_raw_sample; } ost->resample_height = icodec->height; ost->resample_width = icodec->width; ost->resample_pix_fmt= icodec->pix_fmt; ost->encoding_needed = 1; ist->decoding_needed = 1; if (!ost->frame_rate.num) ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1}; if (ost->enc && ost->enc->supported_framerates && !force_fps) { int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); ost->frame_rate = ost->enc->supported_framerates[idx]; } codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; if( av_q2d(codec->time_base) < 0.001 && video_sync_method && (video_sync_method==1 || (video_sync_method<0 && !(os->oformat->flags & AVFMT_VARIABLE_FPS)))){ av_log(os, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\n" "Please consider specifiying a lower framerate, a different muxer or -vsync 2\n"); } #if CONFIG_AVFILTER if (configure_video_filters(ist, ost)) { fprintf(stderr, "Error opening filters!\n"); exit(1); } #endif break; case AVMEDIA_TYPE_SUBTITLE: ost->encoding_needed = 1; ist->decoding_needed = 1; break; default: abort(); break; } /* two pass mode */ if (ost->encoding_needed && codec->codec_id != CODEC_ID_H264 && (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; snprintf(logfilename, sizeof(logfilename), "%s-%d.log", pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, i); if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(logfilename, "wb"); if (!f) { fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno)); ffmpeg_exit(1); } ost->logfile = f; } else { char *logbuffer; size_t logbuffer_size; if (read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { fprintf(stderr, "Error reading log file '%s' for pass-2 encoding\n", logfilename); ffmpeg_exit(1); } codec->stats_in = logbuffer; } } } if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ /* maximum video buffer size is 6-bytes per pixel, plus DPX header size */ int size= codec->width * codec->height; bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 1664); } } if (!bit_buffer) bit_buffer = av_malloc(bit_buffer_size); if (!bit_buffer) { fprintf(stderr, "Cannot allocate %d bytes output buffer\n", bit_buffer_size); ret = AVERROR(ENOMEM); goto fail; } /* open each encoder */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost->encoding_needed) { AVCodec *codec = ost->enc; AVCodecContext *dec = input_streams[ost->source_index].st->codec; if (!codec) { snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d", ost->st->codec->codec_id, ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } if (dec->subtitle_header) { ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); if (!ost->st->codec->subtitle_header) { ret = AVERROR(ENOMEM); goto dump_format; } memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ost->st->codec, 1); assert_avoptions(ost->opts); if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." "It takes bits/s as argument, not kbits/s\n"); extra_size += ost->st->codec->extradata_size; } } /* open each decoder */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { AVCodec *codec = ist->dec; if (!codec) codec = avcodec_find_decoder(ist->st->codec->codec_id); if (!codec) { snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d", ist->st->codec->codec_id, ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ist->st->codec, 0); assert_avoptions(ost->opts); //if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD; } } /* init pts */ for (i = 0; i < nb_input_streams; i++) { AVStream *st; ist = &input_streams[i]; st= ist->st; ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0; ist->next_pts = AV_NOPTS_VALUE; ist->is_start = 1; } /* set meta data information from input file if required */ for (i=0;i<nb_meta_data_maps;i++) { AVFormatContext *files[2]; AVDictionary **meta[2]; int j; #define METADATA_CHECK_INDEX(index, nb_elems, desc)\ if ((index) < 0 || (index) >= (nb_elems)) {\ snprintf(error, sizeof(error), "Invalid %s index %d while processing metadata maps\n",\ (desc), (index));\ ret = AVERROR(EINVAL);\ goto dump_format;\ } int out_file_index = meta_data_maps[i][0].file; int in_file_index = meta_data_maps[i][1].file; if (in_file_index < 0 || out_file_index < 0) continue; METADATA_CHECK_INDEX(out_file_index, nb_output_files, "output file") METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file") files[0] = output_files[out_file_index]; files[1] = input_files[in_file_index].ctx; for (j = 0; j < 2; j++) { MetadataMap *map = &meta_data_maps[i][j]; switch (map->type) { case 'g': meta[j] = &files[j]->metadata; break; case 's': METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream") meta[j] = &files[j]->streams[map->index]->metadata; break; case 'c': METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter") meta[j] = &files[j]->chapters[map->index]->metadata; break; case 'p': METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program") meta[j] = &files[j]->programs[map->index]->metadata; break; } } av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); } /* copy global metadata by default */ if (metadata_global_autocopy) { for (i = 0; i < nb_output_files; i++) av_dict_copy(&output_files[i]->metadata, input_files[0].ctx->metadata, AV_DICT_DONT_OVERWRITE); } /* copy chapters according to chapter maps */ for (i = 0; i < nb_chapter_maps; i++) { int infile = chapter_maps[i].in_file; int outfile = chapter_maps[i].out_file; if (infile < 0 || outfile < 0) continue; if (infile >= nb_input_files) { snprintf(error, sizeof(error), "Invalid input file index %d in chapter mapping.\n", infile); ret = AVERROR(EINVAL); goto dump_format; } if (outfile >= nb_output_files) { snprintf(error, sizeof(error), "Invalid output file index %d in chapter mapping.\n",outfile); ret = AVERROR(EINVAL); goto dump_format; } copy_chapters(infile, outfile); } /* copy chapters from the first input file that has them*/ if (!nb_chapter_maps) for (i = 0; i < nb_input_files; i++) { if (!input_files[i].ctx->nb_chapters) continue; for (j = 0; j < nb_output_files; j++) if ((ret = copy_chapters(i, j)) < 0) goto dump_format; break; } /* open files and write file headers */ for(i=0;i<nb_output_files;i++) { os = output_files[i]; if (avformat_write_header(os, &output_opts[i]) < 0) { snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i); ret = AVERROR(EINVAL); goto dump_format; } // assert_avoptions(output_opts[i]); if (strcmp(output_files[i]->oformat->name, "rtp")) { want_sdp = 0; } } dump_format: /* dump the file output parameters - cannot be done before in case of stream copy */ for(i=0;i<nb_output_files;i++) { av_dump_format(output_files[i], i, output_files[i]->filename, 1); } /* dump the stream mapping */ if (verbose >= 0) { fprintf(stderr, "Stream mapping:\n"); for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; fprintf(stderr, " Stream #%d.%d -> #%d.%d", input_streams[ost->source_index].file_index, input_streams[ost->source_index].st->index, ost->file_index, ost->index); if (ost->sync_ist != &input_streams[ost->source_index]) fprintf(stderr, " [sync #%d.%d]", ost->sync_ist->file_index, ost->sync_ist->st->index); fprintf(stderr, "\n"); } } if (ret) { fprintf(stderr, "%s\n", error); goto fail; } if (want_sdp) { print_sdp(output_files, nb_output_files); } if (!using_stdin) { if(verbose >= 0) fprintf(stderr, "Press [q] to stop, [?] for help\n"); avio_set_interrupt_cb(decode_interrupt_cb); } term_init(); timer_start = av_gettime(); for(; received_sigterm == 0;) { int file_index, ist_index; AVPacket pkt; double ipts_min; double opts_min; redo: ipts_min= 1e100; opts_min= 1e100; /* if 'q' pressed, exits */ if (!using_stdin) { if (q_pressed) break; /* read_key() returns 0 on EOF */ key = read_key(); if (key == 'q') break; if (key == '+') verbose++; if (key == '-') verbose--; if (key == 's') qp_hist ^= 1; if (key == 'h'){ if (do_hex_dump){ do_hex_dump = do_pkt_dump = 0; } else if(do_pkt_dump){ do_hex_dump = 1; } else do_pkt_dump = 1; av_log_set_level(AV_LOG_DEBUG); } if (key == 'd' || key == 'D'){ int debug=0; if(key == 'D') { debug = input_streams[0].st->codec->debug<<1; if(!debug) debug = 1; while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash debug += debug; }else scanf("%d", &debug); for(i=0;i<nb_input_streams;i++) { input_streams[i].st->codec->debug = debug; } for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; ost->st->codec->debug = debug; } if(debug) av_log_set_level(AV_LOG_DEBUG); fprintf(stderr,"debug=%d\n", debug); } if (key == '?'){ fprintf(stderr, "key function\n" "? show this help\n" "+ increase verbosity\n" "- decrease verbosity\n" "D cycle through available debug modes\n" "h dump packets/hex press to cycle through the 3 states\n" "q quit\n" "s Show QP histogram\n" ); } } /* select the stream that we must read now by looking at the smallest output pts */ file_index = -1; for(i=0;i<nb_ostreams;i++) { double ipts, opts; ost = ost_table[i]; os = output_files[ost->file_index]; ist = &input_streams[ost->source_index]; if(ist->is_past_recording_time || no_packet[ist->file_index]) continue; opts = ost->st->pts.val * av_q2d(ost->st->time_base); ipts = (double)ist->pts; if (!input_files[ist->file_index].eof_reached){ if(ipts < ipts_min) { ipts_min = ipts; if(input_sync ) file_index = ist->file_index; } if(opts < opts_min) { opts_min = opts; if(!input_sync) file_index = ist->file_index; } } if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){ file_index= -1; break; } } /* if none, if is finished */ if (file_index < 0) { if(no_packet_count){ no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); usleep(10000); continue; } break; } /* finish if limit size exhausted */ if (limit_filesize != 0 && limit_filesize <= avio_tell(output_files[0]->pb)) break; /* read a frame from it and output it in the fifo */ is = input_files[file_index].ctx; ret= av_read_frame(is, &pkt); if(ret == AVERROR(EAGAIN)){ no_packet[file_index]=1; no_packet_count++; continue; } if (ret < 0) { input_files[file_index].eof_reached = 1; if (opt_shortest) break; else continue; } no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); if (do_pkt_dump) { av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, is->streams[pkt.stream_index]); } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ if (pkt.stream_index >= input_files[file_index].ctx->nb_streams) goto discard_packet; ist_index = input_files[file_index].ist_index + pkt.stream_index; ist = &input_streams[ist_index]; if (ist->discard) goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (ist->ts_scale) { if(pkt.pts != AV_NOPTS_VALUE) pkt.pts *= ist->ts_scale; if(pkt.dts != AV_NOPTS_VALUE) pkt.dts *= ist->ts_scale; } // fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type); if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta= pkt_dts - ist->next_pts; if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){ input_files[ist->file_index].ts_offset -= delta; if (verbose > 2) fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files[ist->file_index].ts_offset); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if(pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } /* finish if recording time exhausted */ if (recording_time != INT64_MAX && (pkt.pts != AV_NOPTS_VALUE ? av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) : av_compare_ts(ist->pts, AV_TIME_BASE_Q, recording_time + start_time, (AVRational){1, 1000000}) )>= 0) { ist->is_past_recording_time = 1; goto discard_packet; } //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) { if (verbose >= 0) fprintf(stderr, "Error while decoding stream #%d.%d\n", ist->file_index, ist->st->index); if (exit_on_error) ffmpeg_exit(1); av_free_packet(&pkt); goto redo; } discard_packet: av_free_packet(&pkt); /* dump report by using the output first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 0); } /* at the end of stream, we must flush the decoder buffers */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { output_packet(ist, i, ost_table, nb_ostreams, NULL); } } term_exit(); /* write the trailer if needed and close file */ for(i=0;i<nb_output_files;i++) { os = output_files[i]; av_write_trailer(os); } /* dump report by using the first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 1); /* close each encoder */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost->encoding_needed) { av_freep(&ost->st->codec->stats_in); avcodec_close(ost->st->codec); } #if CONFIG_AVFILTER avfilter_graph_free(&ost->graph); #endif } /* close each decoder */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { avcodec_close(ist->st->codec); } } /* finished ! */ ret = 0; fail: av_freep(&bit_buffer); if (ost_table) { for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost) { if (ost->st->stream_copy) av_freep(&ost->st->codec->extradata); if (ost->logfile) { fclose(ost->logfile); ost->logfile = NULL; } av_fifo_free(ost->fifo); /* works even if fifo is not initialized but set to zero */ av_freep(&ost->st->codec->subtitle_header); av_free(ost->resample_frame.data[0]); av_free(ost->forced_kf_pts); if (ost->video_resample) sws_freeContext(ost->img_resample_ctx); if (ost->resample) audio_resample_close(ost->resample); if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); av_dict_free(&ost->opts); av_free(ost); } } av_free(ost_table); } return ret; } | 23,480 |
1 | static int qemu_event_init(void) { int err; int fds[2]; err = pipe(fds); if (err == -1) return -errno; err = fcntl_setfl(fds[0], O_NONBLOCK); if (err < 0) goto fail; err = fcntl_setfl(fds[1], O_NONBLOCK); if (err < 0) goto fail; qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, (void *)(unsigned long)fds[0]); io_thread_fd = fds[1]; return 0; fail: close(fds[0]); close(fds[1]); return err; } | 23,481 |
1 | static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet) { MpegEncContext *s = avctx->priv_data; AVFrame *pic; int i, ret; int chroma_h_shift, chroma_v_shift; av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); //CODEC_FLAG_EMU_EDGE have to be cleared if(s->avctx->flags & CODEC_FLAG_EMU_EDGE) return AVERROR(EINVAL); if (avctx->height & 15) { av_log(avctx, AV_LOG_ERROR, "Height must be a multiple of 16, also note, " "if you have a AMV sample thats mod 16 != 0, please contact us\n"); return AVERROR(EINVAL); } pic = av_frame_clone(pic_arg); if (!pic) return AVERROR(ENOMEM); //picture should be flipped upside-down for(i=0; i < 3; i++) { int vsample = i ? 2 >> chroma_v_shift : 2; pic->data[i] += (pic->linesize[i] * (vsample * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 )); pic->linesize[i] *= -1; } ret = ff_MPV_encode_picture(avctx, pkt, pic, got_packet); av_frame_free(&pic); return ret; } | 23,482 |
0 | static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv val2, int insn, int rd) { TCGv val1 = gen_load_gpr(dc, rd); TCGv dst = gen_dest_gpr(dc, rd); TCGv_i32 r_asi = gen_get_asi(dc, insn); gen_helper_casx_asi(dst, cpu_env, addr, val1, val2, r_asi); tcg_temp_free_i32(r_asi); gen_store_gpr(dc, rd, dst); } | 23,484 |
0 | static void bt_vhci_add(int vlan_id) { struct bt_scatternet_s *vlan = qemu_find_bt_vlan(vlan_id); if (!vlan->slave) fprintf(stderr, "qemu: warning: adding a VHCI to " "an empty scatternet %i\n", vlan_id); bt_vhci_init(bt_new_hci(vlan)); } | 23,485 |
0 | e1000_autoneg_timer(void *opaque) { E1000State *s = opaque; if (!qemu_get_queue(s->nic)->link_down) { e1000_link_up(s); } s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; DBGOUT(PHY, "Auto negotiation is completed\n"); } | 23,486 |
0 | static inline void halfpel_motion_search4(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, int xmin, int ymin, int xmax, int ymax, int pred_x, int pred_y, int block_x, int block_y, uint8_t *ref_picture) { UINT16 *mv_penalty= s->mv_penalty[s->f_code] + MAX_MV; // f_code of the prev frame const int quant= s->qscale; int pen_x, pen_y; int mx, my, mx1, my1, d, xx, yy, dminh; UINT8 *pix, *ptr; xx = 8 * block_x; yy = 8 * block_y; pix = s->new_picture[0] + (yy * s->linesize) + xx; mx = *mx_ptr; my = *my_ptr; ptr = ref_picture + ((yy+my) * s->linesize) + xx + mx; dminh = dmin; if (mx > xmin && mx < xmax && my > ymin && my < ymax) { mx= mx1= 2*mx; my= my1= 2*my; if(dmin < Z_THRESHOLD && mx==0 && my==0){ *mx_ptr = 0; *my_ptr = 0; return; } pen_x= pred_x + mx; pen_y= pred_y + my; ptr-= s->linesize; CHECK_HALF_MV4(xy2, -1, -1) CHECK_HALF_MV4(y2 , 0, -1) CHECK_HALF_MV4(xy2, +1, -1) ptr+= s->linesize; CHECK_HALF_MV4(x2 , -1, 0) CHECK_HALF_MV4(x2 , +1, 0) CHECK_HALF_MV4(xy2, -1, +1) CHECK_HALF_MV4(y2 , 0, +1) CHECK_HALF_MV4(xy2, +1, +1) }else{ mx*=2; my*=2; } *mx_ptr = mx; *my_ptr = my; } | 23,487 |
0 | void cpu_loop(CPUCRISState *env) { CPUState *cs = CPU(cris_env_get_cpu(env)); int trapnr, ret; target_siginfo_t info; while (1) { cpu_exec_start(cs); trapnr = cpu_cris_exec(cs); cpu_exec_end(cs); switch (trapnr) { case 0xaa: { info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; /* XXX: check env->error_code */ info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = env->pregs[PR_EDA]; queue_signal(env, info.si_signo, &info); } break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_BREAK: ret = do_syscall(env, env->regs[9], env->regs[10], env->regs[11], env->regs[12], env->regs[13], env->pregs[7], env->pregs[11], 0, 0); env->regs[10] = ret; break; case EXCP_DEBUG: { int sig; sig = gdb_handlesig(cs, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); } } break; default: printf ("Unhandled trap: 0x%x\n", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); } process_pending_signals (env); } } | 23,489 |
0 | static void tlb_info_32(Monitor *mon, CPUState *env) { int l1, l2; uint32_t pgd, pde, pte; pgd = env->cr[3] & ~0xfff; for(l1 = 0; l1 < 1024; l1++) { cpu_physical_memory_read(pgd + l1 * 4, &pde, 4); pde = le32_to_cpu(pde); if (pde & PG_PRESENT_MASK) { if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { /* 4M pages */ print_pte(mon, (l1 << 22), pde, ~((1 << 21) - 1)); } else { for(l2 = 0; l2 < 1024; l2++) { cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4); pte = le32_to_cpu(pte); if (pte & PG_PRESENT_MASK) { print_pte(mon, (l1 << 22) + (l2 << 12), pte & ~PG_PSE_MASK, ~0xfff); } } } } } } | 23,490 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.