label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
1
static void assert_codec_experimental(AVCodecContext *c, int encoder) { const char *codec_string = encoder ? "encoder" : "decoder"; AVCodec *codec; if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL && c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(NULL, AV_LOG_ERROR, "%s '%s' is experimental and might produce bad " "results.\nAdd '-strict experimental' if you want to use it.\n", codec_string, c->codec->name); codec = encoder ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id); if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL)) av_log(NULL, AV_LOG_ERROR, "Or use the non experimental %s '%s'.\n", codec_string, codec->name); ffmpeg_exit(1); } }
20,866
1
static void vnc_display_close(DisplayState *ds) { VncDisplay *vs = ds ? (VncDisplay *)ds->opaque : vnc_display; if (!vs) return; if (vs->display) { g_free(vs->display); vs->display = NULL; } if (vs->lsock != -1) { qemu_set_fd_handler2(vs->lsock, NULL, NULL, NULL, NULL); close(vs->lsock); vs->lsock = -1; } #ifdef CONFIG_VNC_WS g_free(vs->ws_display); vs->ws_display = NULL; if (vs->lwebsock != -1) { qemu_set_fd_handler2(vs->lwebsock, NULL, NULL, NULL, NULL); close(vs->lwebsock); vs->lwebsock = -1; } #endif /* CONFIG_VNC_WS */ vs->auth = VNC_AUTH_INVALID; #ifdef CONFIG_VNC_TLS vs->subauth = VNC_AUTH_INVALID; vs->tls.x509verify = 0; #endif }
20,867
1
static void device_set_realized(Object *obj, bool value, Error **errp) { DeviceState *dev = DEVICE(obj); DeviceClass *dc = DEVICE_GET_CLASS(dev); HotplugHandler *hotplug_ctrl; BusState *bus; Error *local_err = NULL; bool unattached_parent = false; static int unattached_count; int ret; if (dev->hotplugged && !dc->hotpluggable) { error_setg(errp, QERR_DEVICE_NO_HOTPLUG, object_get_typename(obj)); return; if (value && !dev->realized) { if (!obj->parent) { gchar *name = g_strdup_printf("device[%d]", unattached_count++); object_property_add_child(container_get(qdev_get_machine(), "/unattached"), name, obj, &error_abort); unattached_parent = true; g_free(name); hotplug_ctrl = qdev_get_hotplug_handler(dev); if (hotplug_ctrl) { hotplug_handler_pre_plug(hotplug_ctrl, dev, &local_err); if (local_err != NULL) { if (dc->realize) { dc->realize(dev, &local_err); if (local_err != NULL) { DEVICE_LISTENER_CALL(realize, Forward, dev); if (hotplug_ctrl) { hotplug_handler_plug(hotplug_ctrl, dev, &local_err); if (local_err != NULL) { goto post_realize_fail; if (qdev_get_vmsd(dev)) { if (vmstate_register_with_alias_id(dev, -1, qdev_get_vmsd(dev), dev, dev->instance_id_alias, dev->alias_required_for_version, &local_err) < 0) { goto post_realize_fail; QLIST_FOREACH(bus, &dev->child_bus, sibling) { object_property_set_bool(OBJECT(bus), true, "realized", &local_err); if (local_err != NULL) { goto child_realize_fail; if (dev->hotplugged) { device_reset(dev); dev->pending_deleted_event = false; } else if (!value && dev->realized) { Error **local_errp = NULL; QLIST_FOREACH(bus, &dev->child_bus, sibling) { local_errp = local_err ? NULL : &local_err; object_property_set_bool(OBJECT(bus), false, "realized", local_errp); if (qdev_get_vmsd(dev)) { vmstate_unregister(dev, qdev_get_vmsd(dev), dev); if (dc->unrealize) { local_errp = local_err ? NULL : &local_err; dc->unrealize(dev, local_errp); dev->pending_deleted_event = true; DEVICE_LISTENER_CALL(unrealize, Reverse, dev); if (local_err != NULL) { dev->realized = value; return; child_realize_fail: QLIST_FOREACH(bus, &dev->child_bus, sibling) { object_property_set_bool(OBJECT(bus), false, "realized", NULL); if (qdev_get_vmsd(dev)) { vmstate_unregister(dev, qdev_get_vmsd(dev), dev); post_realize_fail: if (dc->unrealize) { dc->unrealize(dev, NULL); fail: error_propagate(errp, local_err); if (unattached_parent) { object_unparent(OBJECT(dev)); unattached_count--;
20,868
1
void do_delvm(Monitor *mon, const QDict *qdict) { DriveInfo *dinfo; BlockDriverState *bs, *bs1; int ret; const char *name = qdict_get_str(qdict, "name"); bs = get_bs_snapshots(); if (!bs) { monitor_printf(mon, "No block device supports snapshots\n"); return; } QTAILQ_FOREACH(dinfo, &drives, next) { bs1 = dinfo->bdrv; if (bdrv_has_snapshot(bs1)) { ret = bdrv_snapshot_delete(bs1, name); if (ret < 0) { if (ret == -ENOTSUP) monitor_printf(mon, "Snapshots not supported on device '%s'\n", bdrv_get_device_name(bs1)); else monitor_printf(mon, "Error %d while deleting snapshot on " "'%s'\n", ret, bdrv_get_device_name(bs1)); } } } }
20,870
1
static void bit_prop_set(DeviceState *dev, Property *props, bool val) { uint32_t *p = qdev_get_prop_ptr(dev, props); uint32_t mask = qdev_get_prop_mask(props); if (val) *p |= ~mask; else *p &= ~mask; }
20,871
1
static void vmxnet3_adjust_by_guest_type(VMXNET3State *s) { struct Vmxnet3_GOSInfo gos; VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos, &gos, sizeof(gos)); s->rx_packets_compound = (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true; VMW_CFPRN("Guest type specifics: RXCOMPOUND: %d", s->rx_packets_compound); }
20,872
1
int av_grow_packet(AVPacket *pkt, int grow_by) { int new_size; av_assert0((unsigned)pkt->size <= INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE); if ((unsigned)grow_by > INT_MAX - (pkt->size + AV_INPUT_BUFFER_PADDING_SIZE)) return -1; new_size = pkt->size + grow_by + AV_INPUT_BUFFER_PADDING_SIZE; if (pkt->buf) { size_t data_offset; uint8_t *old_data = pkt->data; if (pkt->data == NULL) { data_offset = 0; pkt->data = pkt->buf->data; } else { data_offset = pkt->data - pkt->buf->data; if (data_offset > INT_MAX - new_size) return -1; } if (new_size + data_offset > pkt->buf->size) { int ret = av_buffer_realloc(&pkt->buf, new_size + data_offset); if (ret < 0) { pkt->data = old_data; return ret; } pkt->data = pkt->buf->data + data_offset; } } else { pkt->buf = av_buffer_alloc(new_size); if (!pkt->buf) return AVERROR(ENOMEM); memcpy(pkt->buf->data, pkt->data, pkt->size); pkt->data = pkt->buf->data; } pkt->size += grow_by; memset(pkt->data + pkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE); return 0; }
20,873
1
static inline void IRQ_resetbit(IRQ_queue_t *q, int n_IRQ) { q->pending--; reset_bit(q->queue, n_IRQ); }
20,874
1
static void *nbd_client_thread(void *arg) { char *device = arg; off_t size; size_t blocksize; uint32_t nbdflags; int fd, sock; int ret; pthread_t show_parts_thread; sock = unix_socket_outgoing(sockpath); if (sock < 0) { goto out; } ret = nbd_receive_negotiate(sock, NULL, &nbdflags, &size, &blocksize); if (ret < 0) { goto out; } fd = open(device, O_RDWR); if (fd < 0) { /* Linux-only, we can use %m in printf. */ fprintf(stderr, "Failed to open %s: %m", device); goto out; } ret = nbd_init(fd, sock, nbdflags, size, blocksize); if (ret < 0) { goto out; } /* update partition table */ pthread_create(&show_parts_thread, NULL, show_parts, device); if (verbose) { fprintf(stderr, "NBD device %s is now connected to %s\n", device, srcpath); } else { /* Close stderr so that the qemu-nbd process exits. */ dup2(STDOUT_FILENO, STDERR_FILENO); } ret = nbd_client(fd); if (ret) { goto out; } close(fd); kill(getpid(), SIGTERM); return (void *) EXIT_SUCCESS; out: kill(getpid(), SIGTERM); return (void *) EXIT_FAILURE; }
20,875
1
bool migration_in_setup(MigrationState *s) { return s->state == MIG_STATE_SETUP; }
20,876
1
static int64_t guest_file_handle_add(HANDLE fh, Error **errp) { GuestFileHandle *gfh; int64_t handle; handle = ga_get_fd_handle(ga_state, errp); if (handle < 0) { return -1; } gfh = g_malloc0(sizeof(GuestFileHandle)); gfh->id = handle; gfh->fh = fh; QTAILQ_INSERT_TAIL(&guest_file_state.filehandles, gfh, next); return handle; }
20,877
1
static void pm_reset(void *opaque) { ICH9LPCPMRegs *pm = opaque; ich9_pm_iospace_update(pm, 0); acpi_pm1_evt_reset(&pm->acpi_regs); acpi_pm1_cnt_reset(&pm->acpi_regs); acpi_pm_tmr_reset(&pm->acpi_regs); acpi_gpe_reset(&pm->acpi_regs); pm_update_sci(pm);
20,878
1
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels, ptrdiff_t line_size) { int i; vec_u8 perm = vec_lvsl(0, pixels); const vec_u8 zero = (const vec_u8)vec_splat_u8(0); for (i = 0; i < 8; i++) { /* Read potentially unaligned pixels. * We're reading 16 pixels, and actually only want 8, * but we simply ignore the extras. */ vec_u8 pixl = vec_ld(0, pixels); vec_u8 pixr = vec_ld(7, pixels); vec_u8 bytes = vec_perm(pixl, pixr, perm); // Convert the bytes into shorts. vec_s16 shorts = (vec_s16)vec_mergeh(zero, bytes); // Save the data to the block, we assume the block is 16-byte aligned. vec_st(shorts, i * 16, (vec_s16 *)block); pixels += line_size; } }
20,880
1
static int spapr_phb_init(SysBusDevice *s) { sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s); PCIHostState *phb = PCI_HOST_BRIDGE(s); char *namebuf; int i; PCIBus *bus; sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid); namebuf = alloca(strlen(sphb->dtbusname) + 32); /* Initialize memory regions */ sprintf(namebuf, "%s.mmio", sphb->dtbusname); memory_region_init(&sphb->memspace, namebuf, INT64_MAX); sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname); memory_region_init_alias(&sphb->memwindow, namebuf, &sphb->memspace, SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size); memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr, &sphb->memwindow); /* On ppc, we only have MMIO no specific IO space from the CPU * perspective. In theory we ought to be able to embed the PCI IO * memory region direction in the system memory space. However, * if any of the IO BAR subregions use the old_portio mechanism, * that won't be processed properly unless accessed from the * system io address space. This hack to bounce things via * system_io works around the problem until all the users of * old_portion are updated */ sprintf(namebuf, "%s.io", sphb->dtbusname); memory_region_init(&sphb->iospace, namebuf, SPAPR_PCI_IO_WIN_SIZE); /* FIXME: fix to support multiple PHBs */ memory_region_add_subregion(get_system_io(), 0, &sphb->iospace); sprintf(namebuf, "%s.io-alias", sphb->dtbusname); memory_region_init_io(&sphb->iowindow, &spapr_io_ops, sphb, namebuf, SPAPR_PCI_IO_WIN_SIZE); memory_region_add_subregion(get_system_memory(), sphb->io_win_addr, &sphb->iowindow); /* As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors, * we need to allocate some memory to catch those writes coming * from msi_notify()/msix_notify() */ if (msi_supported) { sprintf(namebuf, "%s.msi", sphb->dtbusname); memory_region_init_io(&sphb->msiwindow, &spapr_msi_ops, sphb, namebuf, SPAPR_MSIX_MAX_DEVS * 0x10000); memory_region_add_subregion(get_system_memory(), sphb->msi_win_addr, &sphb->msiwindow); } bus = pci_register_bus(DEVICE(s), sphb->busname ? sphb->busname : sphb->dtbusname, pci_spapr_set_irq, pci_spapr_map_irq, sphb, &sphb->memspace, &sphb->iospace, PCI_DEVFN(0, 0), PCI_NUM_PINS); phb->bus = bus; sphb->dma_liobn = SPAPR_PCI_BASE_LIOBN | (pci_find_domain(bus) << 16); sphb->dma_window_start = 0; sphb->dma_window_size = 0x40000000; sphb->dma = spapr_tce_new_dma_context(sphb->dma_liobn, sphb->dma_window_size); pci_setup_iommu(bus, spapr_pci_dma_context_fn, sphb); QLIST_INSERT_HEAD(&spapr->phbs, sphb, list); /* Initialize the LSI table */ for (i = 0; i < PCI_NUM_PINS; i++) { uint32_t irq; irq = spapr_allocate_lsi(0); if (!irq) { return -1; } sphb->lsi_table[i].irq = irq; } return 0; }
20,881
1
static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint16_t *val, uint16_t dev_value, uint16_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; XenPTMSI *msi = s->msi; uint16_t writable_mask = 0; uint16_t throughable_mask = 0; uint16_t raw_val; /* Currently no support for multi-vector */ if (*val & PCI_MSI_FLAGS_QSIZE) { XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val); } /* modify emulate register */ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask; cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask); msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE; /* create value for writing to I/O device register */ raw_val = *val; throughable_mask = ~reg->emu_mask & valid_mask; *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask); /* update MSI */ if (raw_val & PCI_MSI_FLAGS_ENABLE) { /* setup MSI pirq for the first time */ if (!msi->initialized) { /* Init physical one */ XEN_PT_LOG(&s->dev, "setup MSI\n"); if (xen_pt_msi_setup(s)) { /* We do not broadcast the error to the framework code, so * that MSI errors are contained in MSI emulation code and * QEMU can go on running. * Guest MSI would be actually not working. */ *val &= ~PCI_MSI_FLAGS_ENABLE; XEN_PT_WARN(&s->dev, "Can not map MSI.\n"); return 0; } if (xen_pt_msi_update(s)) { *val &= ~PCI_MSI_FLAGS_ENABLE; XEN_PT_WARN(&s->dev, "Can not bind MSI\n"); return 0; } msi->initialized = true; msi->mapped = true; } msi->flags |= PCI_MSI_FLAGS_ENABLE; } else { msi->flags &= ~PCI_MSI_FLAGS_ENABLE; } /* pass through MSI_ENABLE bit */ *val &= ~PCI_MSI_FLAGS_ENABLE; *val |= raw_val & PCI_MSI_FLAGS_ENABLE; return 0; }
20,882
1
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val) { #ifdef DEBUG_UNASSIGNED printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val); #endif #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 1, 0, 0, 2); #endif }
20,884
1
static int i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu, int max_insns) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUX86State *env = cpu->env_ptr; uint32_t flags = dc->base.tb->flags; target_ulong cs_base = dc->base.tb->cs_base; dc->pe = (flags >> HF_PE_SHIFT) & 1; dc->code32 = (flags >> HF_CS32_SHIFT) & 1; dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; dc->f_st = 0; dc->vm86 = (flags >> VM_SHIFT) & 1; dc->cpl = (flags >> HF_CPL_SHIFT) & 3; dc->iopl = (flags >> IOPL_SHIFT) & 3; dc->tf = (flags >> TF_SHIFT) & 1; dc->cc_op = CC_OP_DYNAMIC; dc->cc_op_dirty = false; dc->cs_base = cs_base; dc->popl_esp_hack = 0; /* select memory access functions */ dc->mem_index = 0; #ifdef CONFIG_SOFTMMU dc->mem_index = cpu_mmu_index(env, false); #endif dc->cpuid_features = env->features[FEAT_1_EDX]; dc->cpuid_ext_features = env->features[FEAT_1_ECX]; dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; #ifdef TARGET_X86_64 dc->lma = (flags >> HF_LMA_SHIFT) & 1; dc->code64 = (flags >> HF_CS64_SHIFT) & 1; #endif dc->flags = flags; dc->jmp_opt = !(dc->tf || dc->base.singlestep_enabled || (flags & HF_INHIBIT_IRQ_MASK)); /* Do not optimize repz jumps at all in icount mode, because rep movsS instructions are execured with different paths in !repz_opt and repz_opt modes. The first one was used always except single step mode. And this setting disables jumps optimization and control paths become equivalent in run and single step modes. Now there will be no jump optimization for repz in record/replay modes and there will always be an additional step for ecx=0 when icount is enabled. */ dc->repz_opt = !dc->jmp_opt && !(dc->base.tb->cflags & CF_USE_ICOUNT); #if 0 /* check addseg logic */ if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) printf("ERROR addseg\n"); #endif cpu_T0 = tcg_temp_new(); cpu_T1 = tcg_temp_new(); cpu_A0 = tcg_temp_new(); cpu_tmp0 = tcg_temp_new(); cpu_tmp1_i64 = tcg_temp_new_i64(); cpu_tmp2_i32 = tcg_temp_new_i32(); cpu_tmp3_i32 = tcg_temp_new_i32(); cpu_tmp4 = tcg_temp_new(); cpu_ptr0 = tcg_temp_new_ptr(); cpu_ptr1 = tcg_temp_new_ptr(); cpu_cc_srcT = tcg_temp_local_new(); return max_insns; }
20,885
1
void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest, unsigned width, unsigned height, int src1Stride, int src2Stride, int dstStride){ unsigned h; for(h=0; h < height; h++) { unsigned w; #ifdef HAVE_MMX #ifdef HAVE_SSE2 asm( "xor %%"REG_a", %%"REG_a" \n\t" "1: \n\t" PREFETCH" 64(%1, %%"REG_a") \n\t" PREFETCH" 64(%2, %%"REG_a") \n\t" "movdqa (%1, %%"REG_a"), %%xmm0 \n\t" "movdqa (%1, %%"REG_a"), %%xmm1 \n\t" "movdqa (%2, %%"REG_a"), %%xmm2 \n\t" "punpcklbw %%xmm2, %%xmm0 \n\t" "punpckhbw %%xmm2, %%xmm1 \n\t" "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t" "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t" "add $16, %%"REG_a" \n\t" "cmp %3, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15) : "memory", "%"REG_a"" ); #else asm( "xor %%"REG_a", %%"REG_a" \n\t" "1: \n\t" PREFETCH" 64(%1, %%"REG_a") \n\t" PREFETCH" 64(%2, %%"REG_a") \n\t" "movq (%1, %%"REG_a"), %%mm0 \n\t" "movq 8(%1, %%"REG_a"), %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "movq (%2, %%"REG_a"), %%mm4 \n\t" "movq 8(%2, %%"REG_a"), %%mm5 \n\t" "punpcklbw %%mm4, %%mm0 \n\t" "punpckhbw %%mm4, %%mm1 \n\t" "punpcklbw %%mm5, %%mm2 \n\t" "punpckhbw %%mm5, %%mm3 \n\t" MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t" MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t" MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t" MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t" "add $16, %%"REG_a" \n\t" "cmp %3, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15) : "memory", "%"REG_a ); #endif for(w= (width&(~15)); w < width; w++) { dest[2*w+0] = src1[w]; dest[2*w+1] = src2[w]; } #else for(w=0; w < width; w++) { dest[2*w+0] = src1[w]; dest[2*w+1] = src2[w]; } #endif dest += dstStride; src1 += src1Stride; src2 += src2Stride; } #ifdef HAVE_MMX asm( EMMS" \n\t" SFENCE" \n\t" ::: "memory" ); #endif }
20,886
1
void OPPROTO op_check_subfo (void) { if (likely(!(((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) & ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)))) { xer_ov = 0; } else { xer_ov = 1; xer_so = 1; } RETURN(); }
20,887
0
static int asf_write_packet(AVFormatContext *s, AVPacket *pkt) { ASFContext *asf = s->priv_data; ASFStream *stream; int64_t duration; AVCodecContext *codec; int64_t packet_st, pts; int start_sec, i; int flags = pkt->flags; codec = s->streams[pkt->stream_index]->codec; stream = &asf->streams[pkt->stream_index]; if (codec->codec_type == AVMEDIA_TYPE_AUDIO) flags &= ~AV_PKT_FLAG_KEY; pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts; if (pts < 0) { av_log(s, AV_LOG_ERROR, "Negative dts not supported stream %d, dts %"PRId64"\n", pkt->stream_index, pts); return AVERROR(ENOSYS); } assert(pts != AV_NOPTS_VALUE); duration = pts * 10000; asf->duration = FFMAX(asf->duration, duration + pkt->duration * 10000); packet_st = asf->nb_packets; put_frame(s, stream, s->streams[pkt->stream_index], pkt->dts, pkt->data, pkt->size, flags); /* check index */ if ((!asf->is_streamed) && (flags & AV_PKT_FLAG_KEY)) { start_sec = (int)(duration / INT64_C(10000000)); if (start_sec != (int)(asf->last_indexed_pts / INT64_C(10000000))) { for (i = asf->nb_index_count; i < start_sec; i++) { if (i >= asf->nb_index_memory_alloc) { asf->nb_index_memory_alloc += ASF_INDEX_BLOCK; asf->index_ptr = (ASFIndex *)av_realloc(asf->index_ptr, sizeof(ASFIndex) * asf->nb_index_memory_alloc); } // store asf->index_ptr[i].packet_number = (uint32_t)packet_st; asf->index_ptr[i].packet_count = (uint16_t)(asf->nb_packets - packet_st); asf->maximum_packet = FFMAX(asf->maximum_packet, (uint16_t)(asf->nb_packets - packet_st)); } asf->nb_index_count = start_sec; asf->last_indexed_pts = duration; } } return 0; }
20,888
0
int avpriv_snprintf(char *restrict s, size_t n, const char *restrict fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = avpriv_vsnprintf(s, n, fmt, ap); va_end(ap); return ret; }
20,889
0
static void virtio_pci_device_plugged(DeviceState *d, Error **errp) { VirtIOPCIProxy *proxy = VIRTIO_PCI(d); VirtioBusState *bus = &proxy->bus; bool legacy = virtio_pci_legacy(proxy); bool modern; bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; uint8_t *config; uint32_t size; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); /* * Virtio capabilities present without * VIRTIO_F_VERSION_1 confuses guests */ if (!proxy->ignore_backend_features && !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { virtio_pci_disable_modern(proxy); if (!legacy) { error_setg(errp, "Device doesn't support modern mode, and legacy" " mode is disabled"); error_append_hint(errp, "Set disable-legacy to off\n"); return; } } modern = virtio_pci_modern(proxy); config = proxy->pci_dev.config; if (proxy->class_code) { pci_config_set_class(config, proxy->class_code); } if (legacy) { if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" "neither legacy nor transitional device."); return ; } /* legacy and transitional */ pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID, pci_get_word(config + PCI_VENDOR_ID)); pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); } else { /* pure virtio-1.0 */ pci_set_word(config + PCI_VENDOR_ID, PCI_VENDOR_ID_REDHAT_QUMRANET); pci_set_word(config + PCI_DEVICE_ID, 0x1040 + virtio_bus_get_vdev_id(bus)); pci_config_set_revision(config, 1); } config[PCI_INTERRUPT_PIN] = 1; if (modern) { struct virtio_pci_cap cap = { .cap_len = sizeof cap, }; struct virtio_pci_notify_cap notify = { .cap.cap_len = sizeof notify, .notify_off_multiplier = cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), }; struct virtio_pci_cfg_cap cfg = { .cap.cap_len = sizeof cfg, .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, }; struct virtio_pci_notify_cap notify_pio = { .cap.cap_len = sizeof notify, .notify_off_multiplier = cpu_to_le32(0x0), }; struct virtio_pci_cfg_cap *cfg_mask; virtio_pci_modern_regions_init(proxy); virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); virtio_pci_modern_mem_region_map(proxy, &proxy->notify, &notify.cap); if (modern_pio) { memory_region_init(&proxy->io_bar, OBJECT(proxy), "virtio-pci-io", 0x4); pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, &notify_pio.cap); } pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64, &proxy->modern_bar); proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); pci_set_byte(&cfg_mask->cap.bar, ~0x0); pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); pci_set_long(cfg_mask->pci_cfg_data, ~0x0); } if (proxy->nvectors) { int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, proxy->msix_bar_idx, NULL); if (err) { /* Notice when a system that supports MSIx can't initialize it */ if (err != -ENOTSUP) { error_report("unable to init msix vectors to %" PRIu32, proxy->nvectors); } proxy->nvectors = 0; } } proxy->pci_dev.config_write = virtio_write_config; proxy->pci_dev.config_read = virtio_read_config; if (legacy) { size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + virtio_bus_get_vdev_config_len(bus); size = pow2ceil(size); memory_region_init_io(&proxy->bar, OBJECT(proxy), &virtio_pci_config_ops, proxy, "virtio-pci", size); pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); } }
20,890
0
static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) { DECLARE_BITMAP(compacted, nodes_nb); if (d->phys_map.skip) { phys_page_compact(&d->phys_map, d->nodes, compacted); } }
20,891
0
int avpriv_adx_decode_header(AVCodecContext *avctx, const uint8_t *buf, int bufsize, int *header_size, int *coeff) { int offset, cutoff; if (bufsize < 24) return AVERROR_INVALIDDATA; if (AV_RB16(buf) != 0x8000) return AVERROR_INVALIDDATA; offset = AV_RB16(buf + 2) + 4; /* if copyright string is within the provided data, validate it */ if (bufsize >= offset && memcmp(buf + offset - 6, "(c)CRI", 6)) return AVERROR_INVALIDDATA; /* check for encoding=3 block_size=18, sample_size=4 */ if (buf[4] != 3 || buf[5] != 18 || buf[6] != 4) { avpriv_request_sample(avctx, "Support for this ADX format"); return AVERROR_PATCHWELCOME; } /* channels */ avctx->channels = buf[7]; if (avctx->channels <= 0 || avctx->channels > 2) return AVERROR_INVALIDDATA; /* sample rate */ avctx->sample_rate = AV_RB32(buf + 8); if (avctx->sample_rate < 1 || avctx->sample_rate > INT_MAX / (avctx->channels * BLOCK_SIZE * 8)) return AVERROR_INVALIDDATA; /* bit rate */ avctx->bit_rate = avctx->sample_rate * avctx->channels * BLOCK_SIZE * 8 / BLOCK_SAMPLES; /* LPC coefficients */ if (coeff) { cutoff = AV_RB16(buf + 16); ff_adx_calculate_coeffs(cutoff, avctx->sample_rate, COEFF_BITS, coeff); } *header_size = offset; return 0; }
20,892
0
int walk_memory_regions(void *priv, walk_memory_regions_fn fn) { struct walk_memory_regions_data data; uintptr_t i; data.fn = fn; data.priv = priv; data.start = -1ul; data.prot = 0; for (i = 0; i < V_L1_SIZE; i++) { int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT, V_L1_SHIFT / L2_BITS - 1, l1_map + i); if (rc != 0) { return rc; } } return walk_memory_regions_end(&data, 0, 0); }
20,893
0
static int es1370_initfn (PCIDevice *dev) { ES1370State *s = DO_UPCAST (ES1370State, dev, dev); uint8_t *c = s->dev.config; pci_config_set_vendor_id (c, PCI_VENDOR_ID_ENSONIQ); pci_config_set_device_id (c, PCI_DEVICE_ID_ENSONIQ_ES1370); c[PCI_STATUS + 1] = PCI_STATUS_DEVSEL_SLOW >> 8; pci_config_set_class (c, PCI_CLASS_MULTIMEDIA_AUDIO); #if 1 c[PCI_SUBSYSTEM_VENDOR_ID] = 0x42; c[PCI_SUBSYSTEM_VENDOR_ID + 1] = 0x49; c[PCI_SUBSYSTEM_ID] = 0x4c; c[PCI_SUBSYSTEM_ID + 1] = 0x4c; #else c[PCI_SUBSYSTEM_VENDOR_ID] = 0x74; c[PCI_SUBSYSTEM_VENDOR_ID + 1] = 0x12; c[PCI_SUBSYSTEM_ID] = 0x71; c[PCI_SUBSYSTEM_ID + 1] = 0x13; c[PCI_CAPABILITY_LIST] = 0xdc; c[PCI_INTERRUPT_LINE] = 10; c[0xdc] = 0x00; #endif /* TODO: RST# value should be 0. */ c[PCI_INTERRUPT_PIN] = 1; c[PCI_MIN_GNT] = 0x0c; c[PCI_MAX_LAT] = 0x80; pci_register_bar (&s->dev, 0, 256, PCI_BASE_ADDRESS_SPACE_IO, es1370_map); qemu_register_reset (es1370_on_reset, s); AUD_register_card ("es1370", &s->card); es1370_reset (s); return 0; }
20,894
0
int qemu_can_send_packet(VLANClientState *sender) { VLANState *vlan = sender->vlan; VLANClientState *vc; for (vc = vlan->first_client; vc != NULL; vc = vc->next) { if (vc == sender) { continue; } /* no can_receive() handler, they can always receive */ if (!vc->can_receive || vc->can_receive(vc->opaque)) { return 1; } } return 0; }
20,896
0
static uint32_t apic_mem_readw(void *opaque, target_phys_addr_t addr) { return 0; }
20,897
0
static int ppc_hash64_pp_check(int key, int pp, bool nx) { int access; /* Compute access rights */ /* When pp is 4, 5 or 7, the result is undefined. Set it to noaccess */ access = 0; if (key == 0) { switch (pp) { case 0x0: case 0x1: case 0x2: access |= PAGE_WRITE; /* No break here */ case 0x3: case 0x6: access |= PAGE_READ; break; } } else { switch (pp) { case 0x0: case 0x6: access = 0; break; case 0x1: case 0x3: access = PAGE_READ; break; case 0x2: access = PAGE_READ | PAGE_WRITE; break; } } if (!nx) { access |= PAGE_EXEC; } return access; }
20,900
0
long do_rt_sigreturn(CPUAlphaState *env) { abi_ulong frame_addr = env->ir[IR_A0]; struct target_rt_sigframe *frame; sigset_t set; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { goto badframe; } target_to_host_sigset(&set, &frame->uc.tuc_sigmask); sigprocmask(SIG_SETMASK, &set, NULL); if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { goto badframe; } if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, uc.tuc_stack), 0, env->ir[IR_SP]) == -EFAULT) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); return env->ir[IR_V0]; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); }
20,901
0
static int tcx_init1(SysBusDevice *dev) { TCXState *s = FROM_SYSBUS(TCXState, dev); ram_addr_t vram_offset = 0; int size; uint8_t *vram_base; memory_region_init_ram(&s->vram_mem, "tcx.vram", s->vram_size * (1 + 4 + 4)); vmstate_register_ram_global(&s->vram_mem); vram_base = memory_region_get_ram_ptr(&s->vram_mem); /* 8-bit plane */ s->vram = vram_base; size = s->vram_size; memory_region_init_alias(&s->vram_8bit, "tcx.vram.8bit", &s->vram_mem, vram_offset, size); sysbus_init_mmio(dev, &s->vram_8bit); vram_offset += size; vram_base += size; /* DAC */ memory_region_init_io(&s->dac, &tcx_dac_ops, s, "tcx.dac", TCX_DAC_NREGS); sysbus_init_mmio(dev, &s->dac); /* TEC (dummy) */ memory_region_init_io(&s->tec, &dummy_ops, s, "tcx.tec", TCX_TEC_NREGS); sysbus_init_mmio(dev, &s->tec); /* THC: NetBSD writes here even with 8-bit display: dummy */ memory_region_init_io(&s->thc24, &dummy_ops, s, "tcx.thc24", TCX_THC_NREGS_24); sysbus_init_mmio(dev, &s->thc24); if (s->depth == 24) { /* 24-bit plane */ size = s->vram_size * 4; s->vram24 = (uint32_t *)vram_base; s->vram24_offset = vram_offset; memory_region_init_alias(&s->vram_24bit, "tcx.vram.24bit", &s->vram_mem, vram_offset, size); sysbus_init_mmio(dev, &s->vram_24bit); vram_offset += size; vram_base += size; /* Control plane */ size = s->vram_size * 4; s->cplane = (uint32_t *)vram_base; s->cplane_offset = vram_offset; memory_region_init_alias(&s->vram_cplane, "tcx.vram.cplane", &s->vram_mem, vram_offset, size); sysbus_init_mmio(dev, &s->vram_cplane); s->con = graphic_console_init(tcx24_update_display, tcx24_invalidate_display, tcx24_screen_dump, NULL, s); } else { /* THC 8 bit (dummy) */ memory_region_init_io(&s->thc8, &dummy_ops, s, "tcx.thc8", TCX_THC_NREGS_8); sysbus_init_mmio(dev, &s->thc8); s->con = graphic_console_init(tcx_update_display, tcx_invalidate_display, tcx_screen_dump, NULL, s); } qemu_console_resize(s->con, s->width, s->height); return 0; }
20,904
0
static QObject *qmp_input_get_object(QmpInputVisitor *qiv, const char *name, bool consume) { StackObject *tos; QObject *qobj; QObject *ret; if (!qiv->nb_stack) { /* Starting at root, name is ignored. */ return qiv->root; } /* We are in a container; find the next element. */ tos = &qiv->stack[qiv->nb_stack - 1]; qobj = tos->obj; assert(qobj); if (qobject_type(qobj) == QTYPE_QDICT) { assert(name); ret = qdict_get(qobject_to_qdict(qobj), name); if (tos->h && consume && ret) { bool removed = g_hash_table_remove(tos->h, name); assert(removed); } } else { assert(qobject_type(qobj) == QTYPE_QLIST); assert(!name); ret = qlist_entry_obj(tos->entry); if (consume) { tos->entry = qlist_next(tos->entry); } } return ret; }
20,905
0
int qemu_add_polling_cb(PollingFunc *func, void *opaque) { PollingEntry **ppe, *pe; pe = g_malloc0(sizeof(PollingEntry)); pe->func = func; pe->opaque = opaque; for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next); *ppe = pe; return 0; }
20,907
0
static void xtensa_ml605_init(MachineState *machine) { static const LxBoardDesc ml605_board = { .flash_base = 0xf8000000, .flash_size = 0x01000000, .flash_sector_size = 0x20000, .sram_size = 0x2000000, }; lx_init(&ml605_board, machine); }
20,909
0
START_TEST(qstring_from_str_test) { QString *qstring; const char *str = "QEMU"; qstring = qstring_from_str(str); fail_unless(qstring != NULL); fail_unless(qstring->base.refcnt == 1); fail_unless(strcmp(str, qstring->string) == 0); fail_unless(qobject_type(QOBJECT(qstring)) == QTYPE_QSTRING); // destroy doesn't exit yet g_free(qstring->string); g_free(qstring); }
20,911
0
static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VirtIOVGA *vvga = VIRTIO_VGA(vpci_dev); VirtIOGPU *g = &vvga->vdev; VGACommonState *vga = &vvga->vga; Error *err = NULL; uint32_t offset; int i; /* init vga compat bits */ vga->vram_size_mb = 8; vga_common_init(vga, OBJECT(vpci_dev), false); vga_init(vga, OBJECT(vpci_dev), pci_address_space(&vpci_dev->pci_dev), pci_address_space_io(&vpci_dev->pci_dev), true); pci_register_bar(&vpci_dev->pci_dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &vga->vram); /* * Configure virtio bar and regions * * We use bar #2 for the mmio regions, to be compatible with stdvga. * virtio regions are moved to the end of bar #2, to make room for * the stdvga mmio registers at the start of bar #2. */ vpci_dev->modern_mem_bar = 2; vpci_dev->msix_bar = 4; offset = memory_region_size(&vpci_dev->modern_bar); offset -= vpci_dev->notify.size; vpci_dev->notify.offset = offset; offset -= vpci_dev->device.size; vpci_dev->device.offset = offset; offset -= vpci_dev->isr.size; vpci_dev->isr.offset = offset; offset -= vpci_dev->common.size; vpci_dev->common.offset = offset; /* init virtio bits */ qdev_set_parent_bus(DEVICE(g), BUS(&vpci_dev->bus)); /* force virtio-1.0 */ vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN; vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY; object_property_set_bool(OBJECT(g), true, "realized", &err); if (err) { error_propagate(errp, err); return; } /* add stdvga mmio regions */ pci_std_vga_mmio_region_init(vga, &vpci_dev->modern_bar, vvga->vga_mrs, true); vga->con = g->scanout[0].con; graphic_console_set_hwops(vga->con, &virtio_vga_ops, vvga); for (i = 0; i < g->conf.max_outputs; i++) { object_property_set_link(OBJECT(g->scanout[i].con), OBJECT(vpci_dev), "device", errp); } }
20,912
0
static int virtio_read_many(ulong sector, void *load_addr, int sec_num) { struct virtio_blk_outhdr out_hdr; u8 status; /* Tell the host we want to read */ out_hdr.type = VIRTIO_BLK_T_IN; out_hdr.ioprio = 99; out_hdr.sector = sector; vring_send_buf(&block, &out_hdr, sizeof(out_hdr), VRING_DESC_F_NEXT); /* This is where we want to receive data */ vring_send_buf(&block, load_addr, SECTOR_SIZE * sec_num, VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN | VRING_DESC_F_NEXT); /* status field */ vring_send_buf(&block, &status, sizeof(u8), VRING_DESC_F_WRITE | VRING_HIDDEN_IS_CHAIN); /* Now we can tell the host to read */ vring_wait_reply(&block, 0); drain_irqs(block.schid); return status; }
20,913
0
static int filter_frame(AVFilterLink *link, AVFrame *in) { AVFilterContext *ctx = link->dst; AVFilterLink *outlink = ctx->outputs[0]; ColorSpaceContext *s = ctx->priv; // FIXME if yuv2yuv_passthrough, don't get a new buffer but use the // input one if it is writable *OR* the actual literal values of in_* // and out_* are identical (not just their respective properties) AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h); int res; ptrdiff_t rgb_stride = FFALIGN(in->width * sizeof(int16_t), 32); unsigned rgb_sz = rgb_stride * in->height; struct ThreadData td; if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); out->color_primaries = s->user_prm == AVCOL_PRI_UNSPECIFIED ? default_prm[FFMIN(s->user_all, CS_NB)] : s->user_prm; if (s->user_trc == AVCOL_TRC_UNSPECIFIED) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format); out->color_trc = default_trc[FFMIN(s->user_all, CS_NB)]; if (out->color_trc == AVCOL_TRC_BT2020_10 && desc && desc->comp[0].depth >= 12) out->color_trc = AVCOL_TRC_BT2020_12; } else { out->color_trc = s->user_trc; } out->colorspace = s->user_csp == AVCOL_SPC_UNSPECIFIED ? default_csp[FFMIN(s->user_all, CS_NB)] : s->user_csp; out->color_range = s->user_rng == AVCOL_RANGE_UNSPECIFIED ? in->color_range : s->user_rng; if (rgb_sz != s->rgb_sz) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(out->format); int uvw = in->width >> desc->log2_chroma_w; av_freep(&s->rgb[0]); av_freep(&s->rgb[1]); av_freep(&s->rgb[2]); s->rgb_sz = 0; av_freep(&s->dither_scratch_base[0][0]); av_freep(&s->dither_scratch_base[0][1]); av_freep(&s->dither_scratch_base[1][0]); av_freep(&s->dither_scratch_base[1][1]); av_freep(&s->dither_scratch_base[2][0]); av_freep(&s->dither_scratch_base[2][1]); s->rgb[0] = av_malloc(rgb_sz); s->rgb[1] = av_malloc(rgb_sz); s->rgb[2] = av_malloc(rgb_sz); s->dither_scratch_base[0][0] = av_malloc(sizeof(*s->dither_scratch_base[0][0]) * (in->width + 4)); s->dither_scratch_base[0][1] = av_malloc(sizeof(*s->dither_scratch_base[0][1]) * (in->width + 4)); s->dither_scratch_base[1][0] = av_malloc(sizeof(*s->dither_scratch_base[1][0]) * (uvw + 4)); s->dither_scratch_base[1][1] = av_malloc(sizeof(*s->dither_scratch_base[1][1]) * (uvw + 4)); s->dither_scratch_base[2][0] = av_malloc(sizeof(*s->dither_scratch_base[2][0]) * (uvw + 4)); s->dither_scratch_base[2][1] = av_malloc(sizeof(*s->dither_scratch_base[2][1]) * (uvw + 4)); s->dither_scratch[0][0] = &s->dither_scratch_base[0][0][1]; s->dither_scratch[0][1] = &s->dither_scratch_base[0][1][1]; s->dither_scratch[1][0] = &s->dither_scratch_base[1][0][1]; s->dither_scratch[1][1] = &s->dither_scratch_base[1][1][1]; s->dither_scratch[2][0] = &s->dither_scratch_base[2][0][1]; s->dither_scratch[2][1] = &s->dither_scratch_base[2][1][1]; if (!s->rgb[0] || !s->rgb[1] || !s->rgb[2] || !s->dither_scratch_base[0][0] || !s->dither_scratch_base[0][1] || !s->dither_scratch_base[1][0] || !s->dither_scratch_base[1][1] || !s->dither_scratch_base[2][0] || !s->dither_scratch_base[2][1]) { uninit(ctx); return AVERROR(ENOMEM); } s->rgb_sz = rgb_sz; } res = create_filtergraph(ctx, in, out); if (res < 0) return res; s->rgb_stride = rgb_stride / sizeof(int16_t); td.in = in; td.out = out; td.in_linesize[0] = in->linesize[0]; td.in_linesize[1] = in->linesize[1]; td.in_linesize[2] = in->linesize[2]; td.out_linesize[0] = out->linesize[0]; td.out_linesize[1] = out->linesize[1]; td.out_linesize[2] = out->linesize[2]; td.in_ss_h = av_pix_fmt_desc_get(in->format)->log2_chroma_h; td.out_ss_h = av_pix_fmt_desc_get(out->format)->log2_chroma_h; if (s->yuv2yuv_passthrough) { res = av_frame_copy(out, in); if (res < 0) return res; } else { ctx->internal->execute(ctx, convert, &td, NULL, FFMIN((in->height + 1) >> 1, ctx->graph->nb_threads)); } av_frame_free(&in); return ff_filter_frame(outlink, out); }
20,914
0
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub, int64_t pts) { static uint8_t *subtitle_out = NULL; int subtitle_out_max_size = 1024 * 1024; int subtitle_out_size, nb, i; AVCodecContext *enc; AVPacket pkt; if (pts == AV_NOPTS_VALUE) { av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n"); if (exit_on_error) exit_program(1); return; } enc = ost->enc_ctx; if (!subtitle_out) { subtitle_out = av_malloc(subtitle_out_max_size); } /* Note: DVB subtitle need one packet to draw them and one other packet to clear them */ /* XXX: signal it in the codec context ? */ if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) nb = 2; else nb = 1; for (i = 0; i < nb; i++) { ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base); if (!check_recording_time(ost)) return; sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q); // start_display_time is required to be 0 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q); sub->end_display_time -= sub->start_display_time; sub->start_display_time = 0; ost->frames_encoded++; subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out, subtitle_out_max_size, sub); if (subtitle_out_size < 0) { av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n"); exit_program(1); } av_init_packet(&pkt); pkt.data = subtitle_out; pkt.size = subtitle_out_size; pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base); if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) { /* XXX: the pts correction is handled here. Maybe handling it in the codec would be better */ if (i == 0) pkt.pts += 90 * sub->start_display_time; else pkt.pts += 90 * sub->end_display_time; } output_packet(s, &pkt, ost); } }
20,915
1
static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, long width, long height, long lumStride, long chromStride, long srcStride) { long y; const long chromWidth= width>>1; for(y=0; y<height; y+=2) { #ifdef HAVE_MMX asm volatile( "xor %%"REG_a", %%"REG_a" \n\t" "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... ASMALIGN(4) "1: \n\t" PREFETCH" 64(%0, %%"REG_a", 4) \n\t" "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0) "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4) "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0) "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4) "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0) "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4) "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0) "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4) "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0) "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0) MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t" "movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8) "movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12) "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8) "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12) "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8) "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12) "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8) "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12) "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8) "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8) MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t" "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0) "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8) "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0) "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8) "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0) "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8) "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0) "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0) MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t" MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t" "add $8, %%"REG_a" \n\t" "cmp %4, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) : "memory", "%"REG_a ); ydst += lumStride; src += srcStride; asm volatile( "xor %%"REG_a", %%"REG_a" \n\t" ASMALIGN(4) "1: \n\t" PREFETCH" 64(%0, %%"REG_a", 4) \n\t" "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0) "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4) "movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8) "movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12) "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0) "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4) "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8) "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12) "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0) "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8) MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t" MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t" "add $8, %%"REG_a" \n\t" "cmp %4, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) : "memory", "%"REG_a ); #else long i; for(i=0; i<chromWidth; i++) { ydst[2*i+0] = src[4*i+0]; udst[i] = src[4*i+1]; ydst[2*i+1] = src[4*i+2]; vdst[i] = src[4*i+3]; } ydst += lumStride; src += srcStride; for(i=0; i<chromWidth; i++) { ydst[2*i+0] = src[4*i+0]; ydst[2*i+1] = src[4*i+2]; } #endif udst += chromStride; vdst += chromStride; ydst += lumStride; src += srcStride; } #ifdef HAVE_MMX asm volatile( EMMS" \n\t" SFENCE" \n\t" :::"memory"); #endif }
20,919
1
static void tcx_update_display(void *opaque) { TCXState *ts = opaque; ram_addr_t page, page_min, page_max; int y, y_start, dd, ds; uint8_t *d, *s; void (*f)(TCXState *s1, uint8_t *dst, const uint8_t *src, int width); if (ts->ds->depth == 0) return; page = ts->vram_offset; y_start = -1; page_min = 0xffffffff; page_max = 0; d = ts->ds->data; s = ts->vram; dd = ts->ds->linesize; ds = 1024; switch (ts->ds->depth) { case 32: f = tcx_draw_line32; break; case 15: case 16: f = tcx_draw_line16; break; default: case 8: f = tcx_draw_line8; break; case 0: return; } for(y = 0; y < ts->height; y += 4, page += TARGET_PAGE_SIZE) { if (cpu_physical_memory_get_dirty(page, VGA_DIRTY_FLAG)) { if (y_start < 0) y_start = y; if (page < page_min) page_min = page; if (page > page_max) page_max = page; f(ts, d, s, ts->width); d += dd; s += ds; f(ts, d, s, ts->width); d += dd; s += ds; f(ts, d, s, ts->width); d += dd; s += ds; f(ts, d, s, ts->width); d += dd; s += ds; } else { if (y_start >= 0) { /* flush to display */ dpy_update(ts->ds, 0, y_start, ts->width, y - y_start); y_start = -1; } d += dd * 4; s += ds * 4; } } if (y_start >= 0) { /* flush to display */ dpy_update(ts->ds, 0, y_start, ts->width, y - y_start); } /* reset modified pages */ if (page_min <= page_max) { cpu_physical_memory_reset_dirty(page_min, page_max + TARGET_PAGE_SIZE, VGA_DIRTY_FLAG); } }
20,920
1
static int bamboo_load_device_tree(hwaddr addr, uint32_t ramsize, hwaddr initrd_base, hwaddr initrd_size, const char *kernel_cmdline) { int ret = -1; uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(ramsize) }; char *filename; int fdt_size; void *fdt; uint32_t tb_freq = 400000000; uint32_t clock_freq = 400000000; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); if (!filename) { goto out; } fdt = load_device_tree(filename, &fdt_size); g_free(filename); if (fdt == NULL) { goto out; } /* Manipulate device tree in memory. */ ret = qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property, sizeof(mem_reg_property)); if (ret < 0) fprintf(stderr, "couldn't set /memory/reg\n"); ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_base); if (ret < 0) fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end", (initrd_base + initrd_size)); if (ret < 0) fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); ret = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); if (ret < 0) fprintf(stderr, "couldn't set /chosen/bootargs\n"); /* Copy data from the host device tree into the guest. Since the guest can * directly access the timebase without host involvement, we must expose * the correct frequencies. */ if (kvm_enabled()) { tb_freq = kvmppc_get_tbfreq(); clock_freq = kvmppc_get_clockfreq(); } qemu_devtree_setprop_cell(fdt, "/cpus/cpu@0", "clock-frequency", clock_freq); qemu_devtree_setprop_cell(fdt, "/cpus/cpu@0", "timebase-frequency", tb_freq); ret = rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr); g_free(fdt); out: return ret; }
20,921
1
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size) { VP56RangeCoder *c = &s->c; int part1_size, hscale, vscale, i, j, ret; int width = s->avctx->width; int height = s->avctx->height; s->profile = (buf[0]>>1) & 7; if (s->profile > 1) { avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile); s->keyframe = !(buf[0] & 1); s->invisible = 0; part1_size = AV_RL24(buf) >> 4; buf += 4 - s->profile; buf_size -= 4 - s->profile; memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab)); ff_vp56_init_range_decoder(c, buf, part1_size); buf += part1_size; buf_size -= part1_size; /* A. Dimension information (keyframes only) */ if (s->keyframe) { width = vp8_rac_get_uint(c, 12); height = vp8_rac_get_uint(c, 12); hscale = vp8_rac_get_uint(c, 2); vscale = vp8_rac_get_uint(c, 2); if (hscale || vscale) avpriv_request_sample(s->avctx, "Upscaling"); s->update_golden = s->update_altref = VP56_FRAME_CURRENT; vp78_reset_probability_tables(s); memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16)); memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c)); for (i = 0; i < 2; i++) memcpy(s->prob->mvc[i], vp7_mv_default_prob[i], sizeof(vp7_mv_default_prob[i])); memset(&s->segmentation, 0, sizeof(s->segmentation)); memset(&s->lf_delta, 0, sizeof(s->lf_delta)); memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan)); if (s->keyframe || s->profile > 0) memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred)); /* B. Decoding information for all four macroblock-level features */ for (i = 0; i < 4; i++) { s->feature_enabled[i] = vp8_rac_get(c); if (s->feature_enabled[i]) { s->feature_present_prob[i] = vp8_rac_get_uint(c, 8); for (j = 0; j < 3; j++) s->feature_index_prob[i][j] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255; if (vp7_feature_value_size[i]) for (j = 0; j < 4; j++) s->feature_value[i][j] = vp8_rac_get(c) ? vp8_rac_get_uint(c, vp7_feature_value_size[s->profile][i]) : 0; s->segmentation.enabled = 0; s->segmentation.update_map = 0; s->lf_delta.enabled = 0; s->num_coeff_partitions = 1; ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size); if (!s->macroblocks_base || /* first frame */ width != s->avctx->width || height != s->avctx->height || (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) { if ((ret = update_dimensions(s, width, height)) < 0) return ret; /* C. Dequantization indices */ vp7_get_quants(s); /* D. Golden frame update flag (a Flag) for interframes only */ if (!s->keyframe) { s->update_golden = vp8_rac_get(c) ? VP56_FRAME_CURRENT : VP56_FRAME_NONE; s->sign_bias[VP56_FRAME_GOLDEN] = 0; s->update_last = 1; s->update_probabilities = 1; s->fade_present = 1; if (s->profile > 0) { s->update_probabilities = vp8_rac_get(c); if (!s->update_probabilities) s->prob[1] = s->prob[0]; if (!s->keyframe) s->fade_present = vp8_rac_get(c); /* E. Fading information for previous frame */ if (s->fade_present && vp8_rac_get(c)) { int alpha = (int8_t)vp8_rac_get_uint(c, 8); int beta = (int8_t)vp8_rac_get_uint(c, 8); if (!s->keyframe && (alpha || beta)) { /* preserve the golden frame */ if (s->framep[VP56_FRAME_GOLDEN] == s->framep[VP56_FRAME_PREVIOUS]) { AVFrame *gold = s->framep[VP56_FRAME_GOLDEN]->tf.f; AVFrame *prev; int i, j; s->framep[VP56_FRAME_PREVIOUS] = vp8_find_free_buffer(s); if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0) return ret; prev = s->framep[VP56_FRAME_PREVIOUS]->tf.f; fade(prev->data[0], prev->linesize[0], gold->data[0], gold->linesize[0], s->mb_width * 16, s->mb_height * 16, alpha, beta); for (j = 1; j < 3; j++) for (i = 0; i < s->mb_height * 8; i++) memcpy(prev->data[j] + i * prev->linesize[j], gold->data[j] + i * gold->linesize[j], s->mb_width * 8); } else { AVFrame *prev = s->framep[VP56_FRAME_PREVIOUS]->tf.f; fade(prev->data[0], prev->linesize[0], prev->data[0], prev->linesize[0], s->mb_width * 16, s->mb_height * 16, alpha, beta); /* F. Loop filter type */ if (!s->profile) s->filter.simple = vp8_rac_get(c); /* G. DCT coefficient ordering specification */ if (vp8_rac_get(c)) for (i = 1; i < 16; i++) s->prob[0].scan[i] = zigzag_scan[vp8_rac_get_uint(c, 4)]; /* H. Loop filter levels */ if (s->profile > 0) s->filter.simple = vp8_rac_get(c); s->filter.level = vp8_rac_get_uint(c, 6); s->filter.sharpness = vp8_rac_get_uint(c, 3); /* I. DCT coefficient probability update; 13.3 Token Probability Updates */ vp78_update_probability_tables(s); s->mbskip_enabled = 0; /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */ if (!s->keyframe) { s->prob->intra = vp8_rac_get_uint(c, 8); s->prob->last = vp8_rac_get_uint(c, 8); vp78_update_pred16x16_pred8x8_mvc_probabilities(s); return 0;
20,922
1
static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, uint32_t val, int len) { pci_default_write_config(pci_dev, address, val, len); msix_write_config(pci_dev, address, val, len); }
20,923
1
static av_cold int theora_decode_init(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; int ptype; const uint8_t *header_start[3]; int header_len[3]; int i; avctx->pix_fmt = AV_PIX_FMT_YUV420P; s->theora = 1; if (!avctx->extradata_size) { av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n"); return -1; } if (avpriv_split_xiph_headers(avctx->extradata, avctx->extradata_size, 42, header_start, header_len) < 0) { av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n"); return -1; } for (i = 0; i < 3; i++) { if (header_len[i] <= 0) continue; init_get_bits8(&gb, header_start[i], header_len[i]); ptype = get_bits(&gb, 8); if (!(ptype & 0x80)) { av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n"); // return -1; } // FIXME: Check for this as well. skip_bits_long(&gb, 6 * 8); /* "theora" */ switch (ptype) { case 0x80: if (theora_decode_header(avctx, &gb) < 0) return -1; break; case 0x81: // FIXME: is this needed? it breaks sometimes // theora_decode_comments(avctx, gb); break; case 0x82: if (theora_decode_tables(avctx, &gb)) return -1; break; default: av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype & ~0x80); break; } if (ptype != 0x81 && 8 * header_len[i] != get_bits_count(&gb)) av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8 * header_len[i] - get_bits_count(&gb), ptype); if (s->theora < 0x030200) break; } return vp3_decode_init(avctx); }
20,924
1
uint64_t helper_sublv(CPUAlphaState *env, uint64_t op1, uint64_t op2) { uint32_t res; res = op1 - op2; if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) { arith_excp(env, GETPC(), EXC_M_IOV, 0); } return res; }
20,925
0
static int decode_mb_info(IVI45DecContext *ctx, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx) { int x, y, mv_x, mv_y, mv_delta, offs, mb_offset, mv_scale, blks_per_mb; IVIMbInfo *mb, *ref_mb; int row_offset = band->mb_size * band->pitch; mb = tile->mbs; ref_mb = tile->ref_mbs; offs = tile->ypos * band->pitch + tile->xpos; if (!ref_mb && ((band->qdelta_present && band->inherit_qdelta) || band->inherit_mv)) return AVERROR_INVALIDDATA; if (tile->num_MBs != IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)) { av_log(avctx, AV_LOG_ERROR, "Allocated tile size %d mismatches parameters %d\n", tile->num_MBs, IVI_MBs_PER_TILE(tile->width, tile->height, band->mb_size)); return AVERROR_INVALIDDATA; } /* scale factor for motion vectors */ mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3); mv_x = mv_y = 0; for (y = tile->ypos; y < (tile->ypos + tile->height); y += band->mb_size) { mb_offset = offs; for (x = tile->xpos; x < (tile->xpos + tile->width); x += band->mb_size) { mb->xpos = x; mb->ypos = y; mb->buf_offs = mb_offset; if (get_bits1(&ctx->gb)) { if (ctx->frame_type == FRAMETYPE_INTRA) { av_log(avctx, AV_LOG_ERROR, "Empty macroblock in an INTRA picture!\n"); return -1; } mb->type = 1; /* empty macroblocks are always INTER */ mb->cbp = 0; /* all blocks are empty */ mb->q_delta = 0; if (!band->plane && !band->band_num && (ctx->frame_flags & 8)) { mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mb->q_delta = IVI_TOSIGNED(mb->q_delta); } mb->mv_x = mb->mv_y = 0; /* no motion vector coded */ if (band->inherit_mv){ /* motion vector inheritance */ if (mv_scale) { mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); } else { mb->mv_x = ref_mb->mv_x; mb->mv_y = ref_mb->mv_y; } } } else { if (band->inherit_mv) { mb->type = ref_mb->type; /* copy mb_type from corresponding reference mb */ } else if (ctx->frame_type == FRAMETYPE_INTRA) { mb->type = 0; /* mb_type is always INTRA for intra-frames */ } else { mb->type = get_bits1(&ctx->gb); } blks_per_mb = band->mb_size != band->blk_size ? 4 : 1; mb->cbp = get_bits(&ctx->gb, blks_per_mb); mb->q_delta = 0; if (band->qdelta_present) { if (band->inherit_qdelta) { if (ref_mb) mb->q_delta = ref_mb->q_delta; } else if (mb->cbp || (!band->plane && !band->band_num && (ctx->frame_flags & 8))) { mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mb->q_delta = IVI_TOSIGNED(mb->q_delta); } } if (!mb->type) { mb->mv_x = mb->mv_y = 0; /* there is no motion vector in intra-macroblocks */ } else { if (band->inherit_mv){ /* motion vector inheritance */ if (mv_scale) { mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); } else { mb->mv_x = ref_mb->mv_x; mb->mv_y = ref_mb->mv_y; } } else { /* decode motion vector deltas */ mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mv_y += IVI_TOSIGNED(mv_delta); mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mv_x += IVI_TOSIGNED(mv_delta); mb->mv_x = mv_x; mb->mv_y = mv_y; if (mv_x < 0 || mv_y < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid MV %d %d\n", mv_x, mv_y); mb->mv_x = mb->mv_y = 0; return AVERROR_INVALIDDATA; } } } } mb++; if (ref_mb) ref_mb++; mb_offset += band->mb_size; } offs += row_offset; } align_get_bits(&ctx->gb); return 0; }
20,926
0
static int huffman_decode(MPADecodeContext *s, GranuleDef *g, int16_t *exponents, int end_pos) { int s_index; int linbits, code, x, y, l, v, i, j, k, pos; int last_pos; VLC *vlc; /* low frequencies (called big values) */ s_index = 0; for(i=0;i<3;i++) { j = g->region_size[i]; if (j == 0) continue; /* select vlc table */ k = g->table_select[i]; l = mpa_huff_data[k][0]; linbits = mpa_huff_data[k][1]; vlc = &huff_vlc[l]; if(!l){ memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*j); s_index += 2*j; continue; } /* read huffcode and compute each couple */ for(;j>0;j--) { int exponent; if (get_bits_count(&s->gb) >= end_pos) break; y = get_vlc2(&s->gb, vlc->table, 7, 3); if(!y){ g->sb_hybrid[s_index ] = g->sb_hybrid[s_index+1] = 0; s_index += 2; continue; } x = y >> 4; y = y & 0x0f; exponent= exponents[s_index]; dprintf("region=%d n=%d x=%d y=%d exp=%d\n", i, g->region_size[i] - j, x, y, exponent); if (x) { #if 0 if (x == 15) x += get_bitsz(&s->gb, linbits); v = l3_unscale(x, exponent); #else if (x < 15){ v = expval_table[ exponent + 400 ][ x ]; // v = expval_table[ (exponent&3) + 400 ][ x ] >> FFMIN(0 - (exponent>>2), 31); }else{ x += get_bitsz(&s->gb, linbits); v = l3_unscale(x, exponent); } #endif if (get_bits1(&s->gb)) v = -v; } else { v = 0; } g->sb_hybrid[s_index++] = v; if (y) { #if 0 if (y == 15) y += get_bitsz(&s->gb, linbits); v = l3_unscale(y, exponent); #else if (y < 15){ v = expval_table[ exponent + 400 ][ y ]; }else{ y += get_bitsz(&s->gb, linbits); v = l3_unscale(y, exponent); } #endif if (get_bits1(&s->gb)) v = -v; } else { v = 0; } g->sb_hybrid[s_index++] = v; } } /* high frequencies */ vlc = &huff_quad_vlc[g->count1table_select]; last_pos=0; while (s_index <= 572) { pos = get_bits_count(&s->gb); if (pos >= end_pos) { if (pos > end_pos && last_pos){ /* some encoders generate an incorrect size for this part. We must go back into the data */ s_index -= 4; init_get_bits(&s->gb, s->gb.buffer + 4*(last_pos>>5), s->gb.size_in_bits - (last_pos&(~31))); skip_bits(&s->gb, last_pos&31); } break; } last_pos= pos; code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1); dprintf("t=%d code=%d\n", g->count1table_select, code); g->sb_hybrid[s_index+0]= g->sb_hybrid[s_index+1]= g->sb_hybrid[s_index+2]= g->sb_hybrid[s_index+3]= 0; while(code){ const static int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0}; int pos= s_index+idxtab[code]; code ^= 8>>idxtab[code]; v = exp_table[ exponents[pos] + 400]; if(get_bits1(&s->gb)) v = -v; g->sb_hybrid[pos] = v; } s_index+=4; } memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index)); return 0; }
20,927
0
static int vma_add_mapping(struct mm_struct *mm, abi_ulong start, abi_ulong end, abi_ulong flags) { struct vm_area_struct *vma; if ((vma = qemu_mallocz(sizeof (*vma))) == NULL) return (-1); vma->vma_start = start; vma->vma_end = end; vma->vma_flags = flags; TAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link); mm->mm_count++; return (0); }
20,928
0
static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, abi_ulong target_addr, socklen_t len) { const socklen_t unix_maxlen = sizeof (struct sockaddr_un); sa_family_t sa_family; struct target_sockaddr *target_saddr; if (fd_trans_target_to_host_addr(fd)) { return fd_trans_target_to_host_addr(fd)(addr, target_addr, len); } target_saddr = lock_user(VERIFY_READ, target_addr, len, 1); if (!target_saddr) return -TARGET_EFAULT; sa_family = tswap16(target_saddr->sa_family); /* Oops. The caller might send a incomplete sun_path; sun_path * must be terminated by \0 (see the manual page), but * unfortunately it is quite common to specify sockaddr_un * length as "strlen(x->sun_path)" while it should be * "strlen(...) + 1". We'll fix that here if needed. * Linux kernel has a similar feature. */ if (sa_family == AF_UNIX) { if (len < unix_maxlen && len > 0) { char *cp = (char*)target_saddr; if ( cp[len-1] && !cp[len] ) len++; } if (len > unix_maxlen) len = unix_maxlen; } memcpy(addr, target_saddr, len); addr->sa_family = sa_family; if (sa_family == AF_PACKET) { struct target_sockaddr_ll *lladdr; lladdr = (struct target_sockaddr_ll *)addr; lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); lladdr->sll_hatype = tswap16(lladdr->sll_hatype); } unlock_user(target_saddr, target_addr, 0); return 0; }
20,930
0
int kvm_arch_put_registers(CPUState *cpu, int level) { X86CPU *x86_cpu = X86_CPU(cpu); int ret; assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_msr_feature_control(x86_cpu); if (ret < 0) { return ret; } } if (level == KVM_PUT_FULL_STATE) { /* We don't check for kvm_arch_set_tsc_khz() errors here, * because TSC frequency mismatch shouldn't abort migration, * unless the user explicitly asked for a more strict TSC * setting (e.g. using an explicit "tsc-freq" option). */ kvm_arch_set_tsc_khz(cpu); } ret = kvm_getput_regs(x86_cpu, 1); if (ret < 0) { return ret; } ret = kvm_put_xsave(x86_cpu); if (ret < 0) { return ret; } ret = kvm_put_xcrs(x86_cpu); if (ret < 0) { return ret; } ret = kvm_put_sregs(x86_cpu); if (ret < 0) { return ret; } /* must be before kvm_put_msrs */ ret = kvm_inject_mce_oldstyle(x86_cpu); if (ret < 0) { return ret; } ret = kvm_put_msrs(x86_cpu, level); if (ret < 0) { return ret; } if (level >= KVM_PUT_RESET_STATE) { ret = kvm_put_mp_state(x86_cpu); if (ret < 0) { return ret; } } ret = kvm_put_tscdeadline_msr(x86_cpu); if (ret < 0) { return ret; } ret = kvm_put_vcpu_events(x86_cpu, level); if (ret < 0) { return ret; } ret = kvm_put_debugregs(x86_cpu); if (ret < 0) { return ret; } /* must be last */ ret = kvm_guest_debug_workarounds(x86_cpu); if (ret < 0) { return ret; } return 0; }
20,931
0
void ff_proresdsp_x86_init(ProresDSPContext *dsp) { #if ARCH_X86_64 && HAVE_YASM int flags = av_get_cpu_flags(); if (flags & AV_CPU_FLAG_SSE2) { dsp->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; dsp->idct_put = ff_prores_idct_put_10_sse2; } if (flags & AV_CPU_FLAG_SSE4) { dsp->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; dsp->idct_put = ff_prores_idct_put_10_sse4; } #if HAVE_AVX if (flags & AV_CPU_FLAG_AVX) { dsp->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; dsp->idct_put = ff_prores_idct_put_10_avx; } #endif /* HAVE_AVX */ #endif /* ARCH_X86_64 && HAVE_YASM */ }
20,932
0
void virtio_scsi_set_iothread(VirtIOSCSI *s, IOThread *iothread) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(s))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); assert(!s->ctx); s->ctx = iothread_get_aio_context(vs->conf.iothread); /* Don't try if transport does not support notifiers. */ if (!k->set_guest_notifiers || !k->ioeventfd_assign) { fprintf(stderr, "virtio-scsi: Failed to set iothread " "(transport does not support notifiers)"); exit(1); } }
20,933
0
void OPPROTO op_decl_ECX(void) { ECX = (uint32_t)(ECX - 1); }
20,934
0
static void qapi_dealloc_type_str(Visitor *v, char **obj, const char *name, Error **errp) { g_free(*obj); }
20,935
0
static inline void pxa2xx_rtc_int_update(PXA2xxState *s) { qemu_set_irq(s->pic[PXA2XX_PIC_RTCALARM], !!(s->rtsr & 0x2553)); }
20,936
0
static void kvm_s390_enable_cmma(KVMState *s) { int rc; struct kvm_device_attr attr = { .group = KVM_S390_VM_MEM_CTRL, .attr = KVM_S390_VM_MEM_ENABLE_CMMA, }; if (kvm_s390_check_enable_cmma(s) || kvm_s390_check_clear_cmma(s)) { return; } rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr); if (!rc) { qemu_register_reset(kvm_s390_clear_cmma_callback, s); } trace_kvm_enable_cmma(rc); }
20,937
0
int kvm_arch_init_vcpu(CPUX86State *env) { struct { struct kvm_cpuid2 cpuid; struct kvm_cpuid_entry2 entries[100]; } QEMU_PACKED cpuid_data; KVMState *s = env->kvm_state; uint32_t limit, i, j, cpuid_i; uint32_t unused; struct kvm_cpuid_entry2 *c; uint32_t signature[3]; int r; env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); j = env->cpuid_ext_features & CPUID_EXT_TSC_DEADLINE_TIMER; env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX); if (j && kvm_irqchip_in_kernel() && kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { env->cpuid_ext_features |= CPUID_EXT_TSC_DEADLINE_TIMER; } env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX); env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX); env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A, 0, R_EDX); cpuid_i = 0; /* Paravirtualization CPUIDs */ c = &cpuid_data.entries[cpuid_i++]; memset(c, 0, sizeof(*c)); c->function = KVM_CPUID_SIGNATURE; if (!hyperv_enabled()) { memcpy(signature, "KVMKVMKVM\0\0\0", 12); c->eax = 0; } else { memcpy(signature, "Microsoft Hv", 12); c->eax = HYPERV_CPUID_MIN; } c->ebx = signature[0]; c->ecx = signature[1]; c->edx = signature[2]; c = &cpuid_data.entries[cpuid_i++]; memset(c, 0, sizeof(*c)); c->function = KVM_CPUID_FEATURES; c->eax = env->cpuid_kvm_features & kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX); if (hyperv_enabled()) { memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); c->eax = signature[0]; c = &cpuid_data.entries[cpuid_i++]; memset(c, 0, sizeof(*c)); c->function = HYPERV_CPUID_VERSION; c->eax = 0x00001bbc; c->ebx = 0x00060001; c = &cpuid_data.entries[cpuid_i++]; memset(c, 0, sizeof(*c)); c->function = HYPERV_CPUID_FEATURES; if (hyperv_relaxed_timing_enabled()) { c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE; } if (hyperv_vapic_recommended()) { c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE; c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE; } c = &cpuid_data.entries[cpuid_i++]; memset(c, 0, sizeof(*c)); c->function = HYPERV_CPUID_ENLIGHTMENT_INFO; if (hyperv_relaxed_timing_enabled()) { c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; } if (hyperv_vapic_recommended()) { c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; } c->ebx = hyperv_get_spinlock_retries(); c = &cpuid_data.entries[cpuid_i++]; memset(c, 0, sizeof(*c)); c->function = HYPERV_CPUID_IMPLEMENT_LIMITS; c->eax = 0x40; c->ebx = 0x40; c = &cpuid_data.entries[cpuid_i++]; memset(c, 0, sizeof(*c)); c->function = KVM_CPUID_SIGNATURE_NEXT; memcpy(signature, "KVMKVMKVM\0\0\0", 12); c->eax = 0; c->ebx = signature[0]; c->ecx = signature[1]; c->edx = signature[2]; } has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF); has_msr_pv_eoi_en = c->eax & (1 << KVM_FEATURE_PV_EOI); cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); for (i = 0; i <= limit; i++) { c = &cpuid_data.entries[cpuid_i++]; switch (i) { case 2: { /* Keep reading function 2 till all the input is received */ int times; c->function = i; c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | KVM_CPUID_FLAG_STATE_READ_NEXT; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); times = c->eax & 0xff; for (j = 1; j < times; ++j) { c = &cpuid_data.entries[cpuid_i++]; c->function = i; c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); } break; } case 4: case 0xb: case 0xd: for (j = 0; ; j++) { if (i == 0xd && j == 64) { break; } c->function = i; c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; c->index = j; cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); if (i == 4 && c->eax == 0) { break; } if (i == 0xb && !(c->ecx & 0xff00)) { break; } if (i == 0xd && c->eax == 0) { continue; } c = &cpuid_data.entries[cpuid_i++]; } break; default: c->function = i; c->flags = 0; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); break; } } cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); for (i = 0x80000000; i <= limit; i++) { c = &cpuid_data.entries[cpuid_i++]; c->function = i; c->flags = 0; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); } /* Call Centaur's CPUID instructions they are supported. */ if (env->cpuid_xlevel2 > 0) { env->cpuid_ext4_features &= kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX); cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); for (i = 0xC0000000; i <= limit; i++) { c = &cpuid_data.entries[cpuid_i++]; c->function = i; c->flags = 0; cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); } } cpuid_data.cpuid.nent = cpuid_i; if (((env->cpuid_version >> 8)&0xF) >= 6 && (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA) && kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) { uint64_t mcg_cap; int banks; int ret; ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks); if (ret < 0) { fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret)); return ret; } if (banks > MCE_BANKS_DEF) { banks = MCE_BANKS_DEF; } mcg_cap &= MCE_CAP_DEF; mcg_cap |= banks; ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap); if (ret < 0) { fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret)); return ret; } env->mcg_cap = mcg_cap; } qemu_add_vm_change_state_handler(cpu_update_state, env); cpuid_data.cpuid.padding = 0; r = kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data); if (r) { return r; } r = kvm_check_extension(env->kvm_state, KVM_CAP_TSC_CONTROL); if (r && env->tsc_khz) { r = kvm_vcpu_ioctl(env, KVM_SET_TSC_KHZ, env->tsc_khz); if (r < 0) { fprintf(stderr, "KVM_SET_TSC_KHZ failed\n"); return r; } } if (kvm_has_xsave()) { env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); } return 0; }
20,939
0
void qmp_blockdev_snapshot(const char *node, const char *overlay, Error **errp) { BlockdevSnapshot snapshot_data = { .node = (char *) node, .overlay = (char *) overlay }; TransactionAction action = { .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT, .u.blockdev_snapshot = &snapshot_data, }; blockdev_do_action(&action, errp); }
20,940
0
CPUX86State *cpu_x86_init_user(const char *cpu_model) { Error *error = NULL; X86CPU *cpu; cpu = cpu_x86_create(cpu_model, NULL, &error); if (error) { goto out; } object_property_set_bool(OBJECT(cpu), true, "realized", &error); out: if (error) { error_report("%s", error_get_pretty(error)); error_free(error); if (cpu != NULL) { object_unref(OBJECT(cpu)); } return NULL; } return &cpu->env; }
20,941
0
static int nbd_negotiate_options(NBDClient *client, uint16_t myflags, Error **errp) { uint32_t flags; bool fixedNewstyle = false; bool no_zeroes = false; /* Client sends: [ 0 .. 3] client flags Then we loop until NBD_OPT_EXPORT_NAME or NBD_OPT_GO: [ 0 .. 7] NBD_OPTS_MAGIC [ 8 .. 11] NBD option [12 .. 15] Data length ... Rest of request [ 0 .. 7] NBD_OPTS_MAGIC [ 8 .. 11] Second NBD option [12 .. 15] Data length ... Rest of request */ if (nbd_read(client->ioc, &flags, sizeof(flags), errp) < 0) { error_prepend(errp, "read failed: "); return -EIO; } be32_to_cpus(&flags); trace_nbd_negotiate_options_flags(flags); if (flags & NBD_FLAG_C_FIXED_NEWSTYLE) { fixedNewstyle = true; flags &= ~NBD_FLAG_C_FIXED_NEWSTYLE; } if (flags & NBD_FLAG_C_NO_ZEROES) { no_zeroes = true; flags &= ~NBD_FLAG_C_NO_ZEROES; } if (flags != 0) { error_setg(errp, "Unknown client flags 0x%" PRIx32 " received", flags); return -EINVAL; } while (1) { int ret; uint32_t option, length; uint64_t magic; if (nbd_read(client->ioc, &magic, sizeof(magic), errp) < 0) { error_prepend(errp, "read failed: "); return -EINVAL; } magic = be64_to_cpu(magic); trace_nbd_negotiate_options_check_magic(magic); if (magic != NBD_OPTS_MAGIC) { error_setg(errp, "Bad magic received"); return -EINVAL; } if (nbd_read(client->ioc, &option, sizeof(option), errp) < 0) { error_prepend(errp, "read failed: "); return -EINVAL; } option = be32_to_cpu(option); if (nbd_read(client->ioc, &length, sizeof(length), errp) < 0) { error_prepend(errp, "read failed: "); return -EINVAL; } length = be32_to_cpu(length); trace_nbd_negotiate_options_check_option(option, nbd_opt_lookup(option)); if (client->tlscreds && client->ioc == (QIOChannel *)client->sioc) { QIOChannel *tioc; if (!fixedNewstyle) { error_setg(errp, "Unsupported option 0x%" PRIx32, option); return -EINVAL; } switch (option) { case NBD_OPT_STARTTLS: tioc = nbd_negotiate_handle_starttls(client, length, errp); if (!tioc) { return -EIO; } object_unref(OBJECT(client->ioc)); client->ioc = QIO_CHANNEL(tioc); break; case NBD_OPT_EXPORT_NAME: /* No way to return an error to client, so drop connection */ error_setg(errp, "Option 0x%x not permitted before TLS", option); return -EINVAL; default: if (nbd_drop(client->ioc, length, errp) < 0) { return -EIO; } ret = nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_TLS_REQD, option, errp, "Option 0x%" PRIx32 "not permitted before TLS", option); if (ret < 0) { return ret; } /* Let the client keep trying, unless they asked to * quit. In this mode, we've already sent an error, so * we can't ack the abort. */ if (option == NBD_OPT_ABORT) { return 1; } break; } } else if (fixedNewstyle) { switch (option) { case NBD_OPT_LIST: ret = nbd_negotiate_handle_list(client, length, errp); if (ret < 0) { return ret; } break; case NBD_OPT_ABORT: /* NBD spec says we must try to reply before * disconnecting, but that we must also tolerate * guests that don't wait for our reply. */ nbd_negotiate_send_rep(client->ioc, NBD_REP_ACK, option, NULL); return 1; case NBD_OPT_EXPORT_NAME: return nbd_negotiate_handle_export_name(client, length, myflags, no_zeroes, errp); case NBD_OPT_INFO: case NBD_OPT_GO: ret = nbd_negotiate_handle_info(client, length, option, myflags, errp); if (ret == 1) { assert(option == NBD_OPT_GO); return 0; } if (ret) { return ret; } break; case NBD_OPT_STARTTLS: if (nbd_drop(client->ioc, length, errp) < 0) { return -EIO; } if (client->tlscreds) { ret = nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_INVALID, option, errp, "TLS already enabled"); } else { ret = nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_POLICY, option, errp, "TLS not configured"); } if (ret < 0) { return ret; } break; default: if (nbd_drop(client->ioc, length, errp) < 0) { return -EIO; } ret = nbd_negotiate_send_rep_err(client->ioc, NBD_REP_ERR_UNSUP, option, errp, "Unsupported option 0x%" PRIx32 " (%s)", option, nbd_opt_lookup(option)); if (ret < 0) { return ret; } break; } } else { /* * If broken new-style we should drop the connection * for anything except NBD_OPT_EXPORT_NAME */ switch (option) { case NBD_OPT_EXPORT_NAME: return nbd_negotiate_handle_export_name(client, length, myflags, no_zeroes, errp); default: error_setg(errp, "Unsupported option 0x%" PRIx32 " (%s)", option, nbd_opt_lookup(option)); return -EINVAL; } } } }
20,942
0
static void print_all_libs_info(int flags, int level) { PRINT_LIB_INFO(avutil, AVUTIL, flags, level); PRINT_LIB_INFO(avcodec, AVCODEC, flags, level); PRINT_LIB_INFO(avformat, AVFORMAT, flags, level); PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level); PRINT_LIB_INFO(avfilter, AVFILTER, flags, level); PRINT_LIB_INFO(avresample, AVRESAMPLE, flags, level); PRINT_LIB_INFO(swscale, SWSCALE, flags, level); PRINT_LIB_INFO(swresample,SWRESAMPLE, flags, level); #if CONFIG_POSTPROC PRINT_LIB_INFO(postproc, POSTPROC, flags, level); #endif }
20,943
0
static uint32_t sd_wpbits(SDState *sd, uint64_t addr) { uint32_t i, wpnum; uint32_t ret = 0; wpnum = addr >> (HWBLOCK_SHIFT + SECTOR_SHIFT + WPGROUP_SHIFT); for (i = 0; i < 32; i ++, wpnum ++, addr += WPGROUP_SIZE) if (addr < sd->size && sd->wp_groups[wpnum]) ret |= (1 << i); return ret; }
20,944
0
static TileExcp decode_x0(DisasContext *dc, tilegx_bundle_bits bundle) { unsigned opc = get_Opcode_X0(bundle); unsigned dest = get_Dest_X0(bundle); unsigned srca = get_SrcA_X0(bundle); unsigned ext, srcb, bfs, bfe; int imm; switch (opc) { case RRR_0_OPCODE_X0: ext = get_RRROpcodeExtension_X0(bundle); if (ext == UNARY_RRR_0_OPCODE_X0) { ext = get_UnaryOpcodeExtension_X0(bundle); return gen_rr_opcode(dc, OE(opc, ext, X0), dest, srca); } srcb = get_SrcB_X0(bundle); return gen_rrr_opcode(dc, OE(opc, ext, X0), dest, srca, srcb); case SHIFT_OPCODE_X0: ext = get_ShiftOpcodeExtension_X0(bundle); imm = get_ShAmt_X0(bundle); return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm); case IMM8_OPCODE_X0: ext = get_Imm8OpcodeExtension_X0(bundle); imm = (int8_t)get_Imm8_X0(bundle); return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm); case BF_OPCODE_X0: ext = get_BFOpcodeExtension_X0(bundle); bfs = get_BFStart_X0(bundle); bfe = get_BFEnd_X0(bundle); return gen_bf_opcode_x0(dc, ext, dest, srca, bfs, bfe); case ADDLI_OPCODE_X0: case SHL16INSLI_OPCODE_X0: case ADDXLI_OPCODE_X0: imm = (int16_t)get_Imm16_X0(bundle); return gen_rri_opcode(dc, OE(opc, 0, X0), dest, srca, imm); default: return TILEGX_EXCP_OPCODE_UNIMPLEMENTED; } }
20,946
0
static int alloc_f(int argc, char **argv) { int64_t offset; int nb_sectors, remaining; char s1[64]; int num, sum_alloc; int ret; offset = cvtnum(argv[1]); if (offset & 0x1ff) { printf("offset %" PRId64 " is not sector aligned\n", offset); return 0; } if (argc == 3) { nb_sectors = cvtnum(argv[2]); } else { nb_sectors = 1; } remaining = nb_sectors; sum_alloc = 0; while (remaining) { ret = bdrv_is_allocated(bs, offset >> 9, nb_sectors, &num); remaining -= num; if (ret) { sum_alloc += num; } } cvtstr(offset, s1, sizeof(s1)); printf("%d/%d sectors allocated at offset %s\n", sum_alloc, nb_sectors, s1); return 0; }
20,947
0
static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) { assert(r->req.aiocb == NULL); if (r->req.io_canceled) { scsi_req_cancel_complete(&r->req); goto done; } if (ret < 0) { if (scsi_handle_rw_error(r, -ret, false)) { goto done; } } r->sector += r->sector_count; r->sector_count = 0; if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { scsi_write_do_fua(r); return; } else { scsi_req_complete(&r->req, GOOD); } done: scsi_req_unref(&r->req); }
20,948
0
static void usb_keyboard_class_initfn(ObjectClass *klass, void *data) { USBDeviceClass *uc = USB_DEVICE_CLASS(klass); uc->init = usb_keyboard_initfn; uc->product_desc = "QEMU USB Keyboard"; uc->usb_desc = &desc_keyboard; uc->handle_packet = usb_generic_handle_packet; uc->handle_reset = usb_hid_handle_reset; uc->handle_control = usb_hid_handle_control; uc->handle_data = usb_hid_handle_data; uc->handle_destroy = usb_hid_handle_destroy; }
20,949
0
static void omap_sti_fifo_write(void *opaque, target_phys_addr_t addr, uint32_t value) { struct omap_sti_s *s = (struct omap_sti_s *) opaque; int offset = addr - s->channel_base; int ch = offset >> 6; uint8_t byte = value; if (ch == STI_TRACE_CONTROL_CHANNEL) { /* Flush channel <i>value</i>. */ qemu_chr_write(s->chr, "\r", 1); } else if (ch == STI_TRACE_CONSOLE_CHANNEL || 1) { if (value == 0xc0 || value == 0xc3) { /* Open channel <i>ch</i>. */ } else if (value == 0x00) qemu_chr_write(s->chr, "\n", 1); else qemu_chr_write(s->chr, &byte, 1); } }
20,950
0
static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) { static const S390Opcode ni_insns[4] = { RI_NILL, RI_NILH, RI_NIHL, RI_NIHH }; static const S390Opcode nif_insns[2] = { RIL_NILF, RIL_NIHF }; uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull); int i; /* Look for the zero-extensions. */ if ((val & valid) == 0xffffffff) { tgen_ext32u(s, dest, dest); return; } if (facilities & FACILITY_EXT_IMM) { if ((val & valid) == 0xff) { tgen_ext8u(s, TCG_TYPE_I64, dest, dest); return; } if ((val & valid) == 0xffff) { tgen_ext16u(s, TCG_TYPE_I64, dest, dest); return; } } /* Try all 32-bit insns that can perform it in one go. */ for (i = 0; i < 4; i++) { tcg_target_ulong mask = ~(0xffffull << i*16); if (((val | ~valid) & mask) == mask) { tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16); return; } } /* Try all 48-bit insns that can perform it in one go. */ if (facilities & FACILITY_EXT_IMM) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = ~(0xffffffffull << i*32); if (((val | ~valid) & mask) == mask) { tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32); return; } } } if ((facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { tgen_andi_risbg(s, dest, dest, val); return; } /* Fall back to loading the constant. */ tcg_out_movi(s, type, TCG_TMP0, val); if (type == TCG_TYPE_I32) { tcg_out_insn(s, RR, NR, dest, TCG_TMP0); } else { tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0); } }
20,951
0
static void nand_realize(DeviceState *dev, Error **errp) { int pagesize; NANDFlashState *s = NAND(dev); s->buswidth = nand_flash_ids[s->chip_id].width >> 3; s->size = nand_flash_ids[s->chip_id].size << 20; if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) { s->page_shift = 11; s->erase_shift = 6; } else { s->page_shift = nand_flash_ids[s->chip_id].page_shift; s->erase_shift = nand_flash_ids[s->chip_id].erase_shift; } switch (1 << s->page_shift) { case 256: nand_init_256(s); break; case 512: nand_init_512(s); break; case 2048: nand_init_2048(s); break; default: error_setg(errp, "Unsupported NAND block size %#x\n", 1 << s->page_shift); return; } pagesize = 1 << s->oob_shift; s->mem_oob = 1; if (s->bdrv) { if (bdrv_is_read_only(s->bdrv)) { error_setg(errp, "Can't use a read-only drive"); return; } if (bdrv_getlength(s->bdrv) >= (s->pages << s->page_shift) + (s->pages << s->oob_shift)) { pagesize = 0; s->mem_oob = 0; } } else { pagesize += 1 << s->page_shift; } if (pagesize) { s->storage = (uint8_t *) memset(g_malloc(s->pages * pagesize), 0xff, s->pages * pagesize); } /* Give s->ioaddr a sane value in case we save state before it is used. */ s->ioaddr = s->io; }
20,952
0
static inline void gen_intermediate_code_internal(ARMCPU *cpu, TranslationBlock *tb, bool search_pc) { CPUState *cs = CPU(cpu); CPUARMState *env = &cpu->env; DisasContext dc1, *dc = &dc1; CPUBreakpoint *bp; int j, lj; target_ulong pc_start; target_ulong next_page_start; int num_insns; int max_insns; /* generate intermediate code */ /* The A64 decoder has its own top level loop, because it doesn't need * the A32/T32 complexity to do with conditional execution/IT blocks/etc. */ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) { gen_intermediate_code_internal_a64(cpu, tb, search_pc); return; } pc_start = tb->pc; dc->tb = tb; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = cs->singlestep_enabled; dc->condjmp = 0; dc->aarch64 = 0; dc->el3_is_aa64 = arm_el_is_aa64(env, 3); dc->thumb = ARM_TBFLAG_THUMB(tb->flags); dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags); dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1; dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4; dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags); dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx); #if !defined(CONFIG_USER_ONLY) dc->user = (dc->current_el == 0); #endif dc->ns = ARM_TBFLAG_NS(tb->flags); dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags); dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags); dc->cp_regs = cpu->cp_regs; dc->features = env->features; /* Single step state. The code-generation logic here is: * SS_ACTIVE == 0: * generate code with no special handling for single-stepping (except * that anything that can make us go to SS_ACTIVE == 1 must end the TB; * this happens anyway because those changes are all system register or * PSTATE writes). * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending) * emit code for one insn * emit code to clear PSTATE.SS * emit code to generate software step exception for completed step * end TB (as usual for having generated an exception) * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending) * emit code to generate a software step exception * end the TB */ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags); dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags); dc->is_ldex = false; dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */ cpu_F0s = tcg_temp_new_i32(); cpu_F1s = tcg_temp_new_i32(); cpu_F0d = tcg_temp_new_i64(); cpu_F1d = tcg_temp_new_i64(); cpu_V0 = cpu_F0d; cpu_V1 = cpu_F1d; /* FIXME: cpu_M0 can probably be the same as cpu_V0. */ cpu_M0 = tcg_temp_new_i64(); next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; lj = -1; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; gen_tb_start(tb); tcg_clear_temp_count(); /* A note on handling of the condexec (IT) bits: * * We want to avoid the overhead of having to write the updated condexec * bits back to the CPUARMState for every instruction in an IT block. So: * (1) if the condexec bits are not already zero then we write * zero back into the CPUARMState now. This avoids complications trying * to do it at the end of the block. (For example if we don't do this * it's hard to identify whether we can safely skip writing condexec * at the end of the TB, which we definitely want to do for the case * where a TB doesn't do anything with the IT state at all.) * (2) if we are going to leave the TB then we call gen_set_condexec() * which will write the correct value into CPUARMState if zero is wrong. * This is done both for leaving the TB at the end, and for leaving * it because of an exception we know will happen, which is done in * gen_exception_insn(). The latter is necessary because we need to * leave the TB with the PC/IT state just prior to execution of the * instruction which caused the exception. * (3) if we leave the TB unexpectedly (eg a data abort on a load) * then the CPUARMState will be wrong and we need to reset it. * This is handled in the same way as restoration of the * PC in these situations: we will be called again with search_pc=1 * and generate a mapping of the condexec bits for each PC in * gen_opc_condexec_bits[]. restore_state_to_opc() then uses * this to restore the condexec bits. * * Note that there are no instructions which can read the condexec * bits, and none which can write non-static values to them, so * we don't need to care about whether CPUARMState is correct in the * middle of a TB. */ /* Reset the conditional execution bits immediately. This avoids complications trying to do it at the end of the block. */ if (dc->condexec_mask || dc->condexec_cond) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); store_cpu_field(tmp, condexec_bits); } do { #ifdef CONFIG_USER_ONLY /* Intercept jump to the magic kernel page. */ if (dc->pc >= 0xffff0000) { /* We always get here via a jump, so know we are not in a conditional execution block. */ gen_exception_internal(EXCP_KERNEL_TRAP); dc->is_jmp = DISAS_UPDATE; break; } #else if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) { /* We always get here via a jump, so know we are not in a conditional execution block. */ gen_exception_internal(EXCP_EXCEPTION_EXIT); dc->is_jmp = DISAS_UPDATE; break; } #endif if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { if (bp->pc == dc->pc) { gen_exception_internal_insn(dc, 0, EXCP_DEBUG); /* Advance PC so that clearing the breakpoint will invalidate this TB. */ dc->pc += 2; goto done_generating; } } } if (search_pc) { j = tcg_op_buf_count(); if (lj < j) { lj++; while (lj < j) tcg_ctx.gen_opc_instr_start[lj++] = 0; } tcg_ctx.gen_opc_pc[lj] = dc->pc; gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); tcg_ctx.gen_opc_instr_start[lj] = 1; tcg_ctx.gen_opc_icount[lj] = num_insns; } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(dc->pc); } if (dc->ss_active && !dc->pstate_ss) { /* Singlestep state is Active-pending. * If we're in this state at the start of a TB then either * a) we just took an exception to an EL which is being debugged * and this is the first insn in the exception handler * b) debug exceptions were masked and we just unmasked them * without changing EL (eg by clearing PSTATE.D) * In either case we're going to take a swstep exception in the * "did not step an insn" case, and so the syndrome ISV and EX * bits should be zero. */ assert(num_insns == 0); gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0), default_exception_el(dc)); goto done_generating; } if (dc->thumb) { disas_thumb_insn(env, dc); if (dc->condexec_mask) { dc->condexec_cond = (dc->condexec_cond & 0xe) | ((dc->condexec_mask >> 4) & 1); dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; if (dc->condexec_mask == 0) { dc->condexec_cond = 0; } } } else { unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code); dc->pc += 4; disas_arm_insn(dc, insn); } if (dc->condjmp && !dc->is_jmp) { gen_set_label(dc->condlabel); dc->condjmp = 0; } if (tcg_check_temp_count()) { fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n", dc->pc); } /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ num_insns ++; } while (!dc->is_jmp && !tcg_op_buf_full() && !cs->singlestep_enabled && !singlestep && !dc->ss_active && dc->pc < next_page_start && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { if (dc->condjmp) { /* FIXME: This can theoretically happen with self-modifying code. */ cpu_abort(cs, "IO on conditional branch instruction"); } gen_io_end(); } /* At this stage dc->condjmp will only be set when the skipped instruction was a conditional branch or trap, and the PC has already been written. */ if (unlikely(cs->singlestep_enabled || dc->ss_active)) { /* Make sure the pc is updated, and raise a debug exception. */ if (dc->condjmp) { gen_set_condexec(dc); if (dc->is_jmp == DISAS_SWI) { gen_ss_advance(dc); gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); } else if (dc->is_jmp == DISAS_HVC) { gen_ss_advance(dc); gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); } else if (dc->is_jmp == DISAS_SMC) { gen_ss_advance(dc); gen_exception(EXCP_SMC, syn_aa32_smc(), 3); } else if (dc->ss_active) { gen_step_complete_exception(dc); } else { gen_exception_internal(EXCP_DEBUG); } gen_set_label(dc->condlabel); } if (dc->condjmp || !dc->is_jmp) { gen_set_pc_im(dc, dc->pc); dc->condjmp = 0; } gen_set_condexec(dc); if (dc->is_jmp == DISAS_SWI && !dc->condjmp) { gen_ss_advance(dc); gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) { gen_ss_advance(dc); gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) { gen_ss_advance(dc); gen_exception(EXCP_SMC, syn_aa32_smc(), 3); } else if (dc->ss_active) { gen_step_complete_exception(dc); } else { /* FIXME: Single stepping a WFI insn will not halt the CPU. */ gen_exception_internal(EXCP_DEBUG); } } else { /* While branches must always occur at the end of an IT block, there are a few other things that can cause us to terminate the TB in the middle of an IT block: - Exception generating instructions (bkpt, swi, undefined). - Page boundaries. - Hardware watchpoints. Hardware breakpoints have already been handled and skip this code. */ gen_set_condexec(dc); switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; default: case DISAS_JUMP: case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; case DISAS_WFI: gen_helper_wfi(cpu_env); break; case DISAS_WFE: gen_helper_wfe(cpu_env); break; case DISAS_SWI: gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb), default_exception_el(dc)); break; case DISAS_HVC: gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2); break; case DISAS_SMC: gen_exception(EXCP_SMC, syn_aa32_smc(), 3); break; } if (dc->condjmp) { gen_set_label(dc->condlabel); gen_set_condexec(dc); gen_goto_tb(dc, 1, dc->pc); dc->condjmp = 0; } } done_generating: gen_tb_end(tb, num_insns); #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log("----------------\n"); qemu_log("IN: %s\n", lookup_symbol(pc_start)); log_target_disas(env, pc_start, dc->pc - pc_start, dc->thumb | (dc->bswap_code << 1)); qemu_log("\n"); } #endif if (search_pc) { j = tcg_op_buf_count(); lj++; while (lj <= j) tcg_ctx.gen_opc_instr_start[lj++] = 0; } else { tb->size = dc->pc - pc_start; tb->icount = num_insns; } }
20,953
0
av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; static int done = 0; int i; MVTable *mv; ff_h263_decode_init(avctx); common_init(s); if (!done) { done = 1; for(i=0;i<NB_RL_TABLES;i++) { init_rl(&rl_table[i], static_rl_table_store[i]); } INIT_VLC_RL(rl_table[0], 642); INIT_VLC_RL(rl_table[1], 1104); INIT_VLC_RL(rl_table[2], 554); INIT_VLC_RL(rl_table[3], 940); INIT_VLC_RL(rl_table[4], 962); INIT_VLC_RL(rl_table[5], 554); mv = &mv_tables[0]; INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1, mv->table_mv_bits, 1, 1, mv->table_mv_code, 2, 2, 3714); mv = &mv_tables[1]; INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1, mv->table_mv_bits, 1, 1, mv->table_mv_code, 2, 2, 2694); INIT_VLC_STATIC(&ff_msmp4_dc_luma_vlc[0], DC_VLC_BITS, 120, &ff_table0_dc_lum[0][1], 8, 4, &ff_table0_dc_lum[0][0], 8, 4, 1158); INIT_VLC_STATIC(&ff_msmp4_dc_chroma_vlc[0], DC_VLC_BITS, 120, &ff_table0_dc_chroma[0][1], 8, 4, &ff_table0_dc_chroma[0][0], 8, 4, 1118); INIT_VLC_STATIC(&ff_msmp4_dc_luma_vlc[1], DC_VLC_BITS, 120, &ff_table1_dc_lum[0][1], 8, 4, &ff_table1_dc_lum[0][0], 8, 4, 1476); INIT_VLC_STATIC(&ff_msmp4_dc_chroma_vlc[1], DC_VLC_BITS, 120, &ff_table1_dc_chroma[0][1], 8, 4, &ff_table1_dc_chroma[0][0], 8, 4, 1216); INIT_VLC_STATIC(&v2_dc_lum_vlc, DC_VLC_BITS, 512, &v2_dc_lum_table[0][1], 8, 4, &v2_dc_lum_table[0][0], 8, 4, 1472); INIT_VLC_STATIC(&v2_dc_chroma_vlc, DC_VLC_BITS, 512, &v2_dc_chroma_table[0][1], 8, 4, &v2_dc_chroma_table[0][0], 8, 4, 1506); INIT_VLC_STATIC(&v2_intra_cbpc_vlc, V2_INTRA_CBPC_VLC_BITS, 4, &v2_intra_cbpc[0][1], 2, 1, &v2_intra_cbpc[0][0], 2, 1, 8); INIT_VLC_STATIC(&v2_mb_type_vlc, V2_MB_TYPE_VLC_BITS, 8, &v2_mb_type[0][1], 2, 1, &v2_mb_type[0][0], 2, 1, 128); INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33, &mvtab[0][1], 2, 1, &mvtab[0][0], 2, 1, 538); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[0][0][1], 8, 4, &wmv2_inter_table[0][0][0], 8, 4, 1636); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[1], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[1][0][1], 8, 4, &wmv2_inter_table[1][0][0], 8, 4, 2648); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[2], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[2][0][1], 8, 4, &wmv2_inter_table[2][0][0], 8, 4, 1532); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[3], MB_NON_INTRA_VLC_BITS, 128, &wmv2_inter_table[3][0][1], 8, 4, &wmv2_inter_table[3][0][0], 8, 4, 2488); INIT_VLC_STATIC(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64, &ff_msmp4_mb_i_table[0][1], 4, 2, &ff_msmp4_mb_i_table[0][0], 4, 2, 536); INIT_VLC_STATIC(&ff_inter_intra_vlc, INTER_INTRA_VLC_BITS, 4, &table_inter_intra[0][1], 2, 1, &table_inter_intra[0][0], 2, 1, 8); } switch(s->msmpeg4_version){ case 1: case 2: s->decode_mb= msmpeg4v12_decode_mb; break; case 3: case 4: s->decode_mb= msmpeg4v34_decode_mb; break; case 5: if (CONFIG_WMV2_DECODER) s->decode_mb= ff_wmv2_decode_mb; case 6: //FIXME + TODO VC1 decode mb break; } s->slice_height= s->mb_height; //to avoid 1/0 if the first frame is not a keyframe return 0; }
20,954
0
PCIBus *pci_grackle_init(uint32_t base, qemu_irq *pic, MemoryRegion *address_space_mem, MemoryRegion *address_space_io) { DeviceState *dev; SysBusDevice *s; PCIHostState *phb; GrackleState *d; dev = qdev_create(NULL, TYPE_GRACKLE_PCI_HOST_BRIDGE); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); phb = PCI_HOST_BRIDGE(dev); d = GRACKLE_PCI_HOST_BRIDGE(dev); memory_region_init(&d->pci_mmio, OBJECT(s), "pci-mmio", 0x100000000ULL); memory_region_init_alias(&d->pci_hole, OBJECT(s), "pci-hole", &d->pci_mmio, 0x80000000ULL, 0x7e000000ULL); memory_region_add_subregion(address_space_mem, 0x80000000ULL, &d->pci_hole); phb->bus = pci_register_bus(dev, "pci", pci_grackle_set_irq, pci_grackle_map_irq, pic, &d->pci_mmio, address_space_io, 0, 4, TYPE_PCI_BUS); pci_create_simple(phb->bus, 0, "grackle"); sysbus_mmio_map(s, 0, base); sysbus_mmio_map(s, 1, base + 0x00200000); return phb->bus; }
20,955
0
CPUAlphaState * cpu_alpha_init (const char *cpu_model) { CPUAlphaState *env; int implver, amask, i, max; env = qemu_mallocz(sizeof(CPUAlphaState)); cpu_exec_init(env); alpha_translate_init(); tlb_flush(env, 1); /* Default to ev67; no reason not to emulate insns by default. */ implver = IMPLVER_21264; amask = (AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_MVI | AMASK_TRAP | AMASK_PREFETCH); max = ARRAY_SIZE(cpu_defs); for (i = 0; i < max; i++) { if (strcmp (cpu_model, cpu_defs[i].name) == 0) { implver = cpu_defs[i].implver; amask = cpu_defs[i].amask; break; } } env->implver = implver; env->amask = amask; env->ps = 0x1F00; #if defined (CONFIG_USER_ONLY) env->ps |= 1 << 3; cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD | FPCR_UNFD | FPCR_INED | FPCR_DNOD)); #endif pal_init(env); /* Initialize IPR */ #if defined (CONFIG_USER_ONLY) env->ipr[IPR_EXC_ADDR] = 0; env->ipr[IPR_EXC_SUM] = 0; env->ipr[IPR_EXC_MASK] = 0; #else { uint64_t hwpcb; hwpcb = env->ipr[IPR_PCBB]; env->ipr[IPR_ASN] = 0; env->ipr[IPR_ASTEN] = 0; env->ipr[IPR_ASTSR] = 0; env->ipr[IPR_DATFX] = 0; /* XXX: fix this */ // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8); // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0); // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16); // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24); env->ipr[IPR_FEN] = 0; env->ipr[IPR_IPL] = 31; env->ipr[IPR_MCES] = 0; env->ipr[IPR_PERFMON] = 0; /* Implementation specific */ // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32); env->ipr[IPR_SISR] = 0; env->ipr[IPR_VIRBND] = -1ULL; } #endif qemu_init_vcpu(env); return env; }
20,956
0
static int match_group_separator(const OptionGroupDef *groups, const char *opt) { const OptionGroupDef *p = groups; while (p->name) { if (p->sep && !strcmp(p->sep, opt)) return p - groups; p++; } return -1; }
20,959
0
static void evolve(AVFilterContext *ctx) { LifeContext *life = ctx->priv; int i, j; uint8_t *oldbuf = life->buf[ life->buf_idx]; uint8_t *newbuf = life->buf[!life->buf_idx]; enum { NW, N, NE, W, E, SW, S, SE }; /* evolve the grid */ for (i = 0; i < life->h; i++) { for (j = 0; j < life->w; j++) { int pos[8][2], n, alive, cell; if (life->stitch) { pos[NW][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NW][1] = (j-1) < 0 ? life->w-1 : j-1; pos[N ][0] = (i-1) < 0 ? life->h-1 : i-1; pos[N ][1] = j ; pos[NE][0] = (i-1) < 0 ? life->h-1 : i-1; pos[NE][1] = (j+1) == life->w ? 0 : j+1; pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? life->w-1 : j-1; pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? 0 : j+1; pos[SW][0] = (i+1) == life->h ? 0 : i+1; pos[SW][1] = (j-1) < 0 ? life->w-1 : j-1; pos[S ][0] = (i+1) == life->h ? 0 : i+1; pos[S ][1] = j ; pos[SE][0] = (i+1) == life->h ? 0 : i+1; pos[SE][1] = (j+1) == life->w ? 0 : j+1; } else { pos[NW][0] = (i-1) < 0 ? -1 : i-1; pos[NW][1] = (j-1) < 0 ? -1 : j-1; pos[N ][0] = (i-1) < 0 ? -1 : i-1; pos[N ][1] = j ; pos[NE][0] = (i-1) < 0 ? -1 : i-1; pos[NE][1] = (j+1) == life->w ? -1 : j+1; pos[W ][0] = i ; pos[W ][1] = (j-1) < 0 ? -1 : j-1; pos[E ][0] = i ; pos[E ][1] = (j+1) == life->w ? -1 : j+1; pos[SW][0] = (i+1) == life->h ? -1 : i+1; pos[SW][1] = (j-1) < 0 ? -1 : j-1; pos[S ][0] = (i+1) == life->h ? -1 : i+1; pos[S ][1] = j ; pos[SE][0] = (i+1) == life->h ? -1 : i+1; pos[SE][1] = (j+1) == life->w ? -1 : j+1; } /* compute the number of live neighbor cells */ n = (pos[NW][0] == -1 || pos[NW][1] == -1 ? 0 : oldbuf[pos[NW][0]*life->w + pos[NW][1]] == ALIVE_CELL) + (pos[N ][0] == -1 || pos[N ][1] == -1 ? 0 : oldbuf[pos[N ][0]*life->w + pos[N ][1]] == ALIVE_CELL) + (pos[NE][0] == -1 || pos[NE][1] == -1 ? 0 : oldbuf[pos[NE][0]*life->w + pos[NE][1]] == ALIVE_CELL) + (pos[W ][0] == -1 || pos[W ][1] == -1 ? 0 : oldbuf[pos[W ][0]*life->w + pos[W ][1]] == ALIVE_CELL) + (pos[E ][0] == -1 || pos[E ][1] == -1 ? 0 : oldbuf[pos[E ][0]*life->w + pos[E ][1]] == ALIVE_CELL) + (pos[SW][0] == -1 || pos[SW][1] == -1 ? 0 : oldbuf[pos[SW][0]*life->w + pos[SW][1]] == ALIVE_CELL) + (pos[S ][0] == -1 || pos[S ][1] == -1 ? 0 : oldbuf[pos[S ][0]*life->w + pos[S ][1]] == ALIVE_CELL) + (pos[SE][0] == -1 || pos[SE][1] == -1 ? 0 : oldbuf[pos[SE][0]*life->w + pos[SE][1]] == ALIVE_CELL); cell = oldbuf[i*life->w + j]; alive = 1<<n & (cell == ALIVE_CELL ? life->stay_rule : life->born_rule); if (alive) *newbuf = ALIVE_CELL; // new cell is alive else if (cell) *newbuf = cell - 1; // new cell is dead and in the process of mold else *newbuf = 0; // new cell is definitely dead av_dlog(ctx, "i:%d j:%d live_neighbors:%d cell:%d -> cell:%d\n", i, j, n, cell, *newbuf); newbuf++; } } life->buf_idx = !life->buf_idx; }
20,960
0
void ppm_save(const char *filename, struct DisplaySurface *ds, Error **errp) { int width = pixman_image_get_width(ds->image); int height = pixman_image_get_height(ds->image); FILE *f; int y; int ret; pixman_image_t *linebuf; trace_ppm_save(filename, ds); f = fopen(filename, "wb"); if (!f) { error_setg(errp, "failed to open file '%s': %s", filename, strerror(errno)); return; } ret = fprintf(f, "P6\n%d %d\n%d\n", width, height, 255); if (ret < 0) { linebuf = NULL; goto write_err; } linebuf = qemu_pixman_linebuf_create(PIXMAN_BE_r8g8b8, width); for (y = 0; y < height; y++) { qemu_pixman_linebuf_fill(linebuf, ds->image, width, 0, y); clearerr(f); ret = fwrite(pixman_image_get_data(linebuf), 1, pixman_image_get_stride(linebuf), f); (void)ret; if (ferror(f)) { goto write_err; } } out: qemu_pixman_image_unref(linebuf); fclose(f); return; write_err: error_setg(errp, "failed to write to file '%s': %s", filename, strerror(errno)); unlink(filename); goto out; }
20,962
0
static TranslationBlock *tb_find_physical(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags) { CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb, **tb_hash_head, **ptb1; unsigned int h; tb_page_addr_t phys_pc, phys_page1; tcg_ctx.tb_ctx.tb_invalidated_flag = 0; /* find translated block using physical mappings */ phys_pc = get_page_addr_code(env, pc); phys_page1 = phys_pc & TARGET_PAGE_MASK; h = tb_phys_hash_func(phys_pc); /* Start at head of the hash entry */ ptb1 = tb_hash_head = &tcg_ctx.tb_ctx.tb_phys_hash[h]; tb = *ptb1; while (tb) { if (tb->pc == pc && tb->page_addr[0] == phys_page1 && tb->cs_base == cs_base && tb->flags == flags) { if (tb->page_addr[1] == -1) { /* done, we have a match */ break; } else { /* check next page if needed */ target_ulong virt_page2 = (pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; tb_page_addr_t phys_page2 = get_page_addr_code(env, virt_page2); if (tb->page_addr[1] == phys_page2) { break; } } } ptb1 = &tb->phys_hash_next; tb = *ptb1; } if (tb) { /* Move the TB to the head of the list */ *ptb1 = tb->phys_hash_next; tb->phys_hash_next = *tb_hash_head; *tb_hash_head = tb; } return tb; }
20,964
0
static int mxf_get_sorted_table_segments(MXFContext *mxf, int *nb_sorted_segments, MXFIndexTableSegment ***sorted_segments) { int i, j, nb_segments = 0; MXFIndexTableSegment **unsorted_segments; int last_body_sid = -1, last_index_sid = -1, last_index_start = -1; /* count number of segments, allocate arrays and copy unsorted segments */ for (i = 0; i < mxf->metadata_sets_count; i++) if (mxf->metadata_sets[i]->type == IndexTableSegment) nb_segments++; if (!nb_segments) return AVERROR_INVALIDDATA; *sorted_segments = av_mallocz(nb_segments * sizeof(**sorted_segments)); unsorted_segments = av_mallocz(nb_segments * sizeof(*unsorted_segments)); if (!sorted_segments || !unsorted_segments) { av_freep(sorted_segments); av_free(unsorted_segments); return AVERROR(ENOMEM); } for (i = j = 0; i < mxf->metadata_sets_count; i++) if (mxf->metadata_sets[i]->type == IndexTableSegment) unsorted_segments[j++] = (MXFIndexTableSegment*)mxf->metadata_sets[i]; *nb_sorted_segments = 0; /* sort segments by {BodySID, IndexSID, IndexStartPosition}, remove duplicates while we're at it */ for (i = 0; i < nb_segments; i++) { int best = -1, best_body_sid = -1, best_index_sid = -1, best_index_start = -1; for (j = 0; j < nb_segments; j++) { MXFIndexTableSegment *s = unsorted_segments[j]; /* Require larger BosySID, IndexSID or IndexStartPosition then the previous entry. This removes duplicates. * We want the smallest values for the keys than what we currently have, unless this is the first such entry this time around. */ if ((i == 0 || s->body_sid > last_body_sid || s->index_sid > last_index_sid || s->index_start_position > last_index_start) && (best == -1 || s->body_sid < best_body_sid || s->index_sid < best_index_sid || s->index_start_position < best_index_start)) { best = j; best_body_sid = s->body_sid; best_index_sid = s->index_sid; best_index_start = s->index_start_position; } } /* no suitable entry found -> we're done */ if (best == -1) break; (*sorted_segments)[(*nb_sorted_segments)++] = unsorted_segments[best]; last_body_sid = best_body_sid; last_index_sid = best_index_sid; last_index_start = best_index_start; } av_free(unsorted_segments); return 0; }
20,965
0
static uint32_t slavio_led_mem_reads(void *opaque, target_phys_addr_t addr) { MiscState *s = opaque; uint32_t ret = 0, saddr; saddr = addr & LED_MAXADDR; switch (saddr) { case 0: ret = s->leds; break; default: break; } MISC_DPRINTF("Read diagnostic LED reg 0x" TARGET_FMT_plx " = %x\n", addr, ret); return ret; }
20,966
0
static int proxy_init(FsContext *ctx) { V9fsProxy *proxy = g_malloc(sizeof(V9fsProxy)); int sock_id; if (ctx->export_flags & V9FS_PROXY_SOCK_NAME) { sock_id = connect_namedsocket(ctx->fs_root); } else { sock_id = atoi(ctx->fs_root); if (sock_id < 0) { fprintf(stderr, "socket descriptor not initialized\n"); } } if (sock_id < 0) { g_free(proxy); return -1; } g_free(ctx->fs_root); ctx->fs_root = NULL; proxy->in_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); proxy->in_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; proxy->out_iovec.iov_base = g_malloc(PROXY_MAX_IO_SZ + PROXY_HDR_SZ); proxy->out_iovec.iov_len = PROXY_MAX_IO_SZ + PROXY_HDR_SZ; ctx->private = proxy; proxy->sockfd = sock_id; qemu_mutex_init(&proxy->mutex); ctx->export_flags |= V9FS_PATHNAME_FSCONTEXT; ctx->exops.get_st_gen = proxy_ioc_getversion; return 0; }
20,967
0
static void scsi_flush_complete(void * opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); if (r->req.aiocb != NULL) { r->req.aiocb = NULL; bdrv_acct_done(s->qdev.conf.bs, &r->acct); } if (ret < 0) { if (scsi_handle_rw_error(r, -ret)) { goto done; } } scsi_req_complete(&r->req, GOOD); done: if (!r->req.io_canceled) { scsi_req_unref(&r->req); } }
20,968
0
void fork_end(int child) { mmap_fork_end(child); if (child) { CPUState *cpu, *next_cpu; /* Child processes created by fork() only have a single thread. Discard information about the parent threads. */ CPU_FOREACH_SAFE(cpu, next_cpu) { if (cpu != thread_cpu) { QTAILQ_REMOVE(&cpus, thread_cpu, node); } } pending_cpus = 0; pthread_mutex_init(&exclusive_lock, NULL); pthread_mutex_init(&cpu_list_mutex, NULL); pthread_cond_init(&exclusive_cond, NULL); pthread_cond_init(&exclusive_resume, NULL); pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL); gdbserver_fork(thread_cpu); } else { pthread_mutex_unlock(&exclusive_lock); pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); } }
20,969
0
static uint32_t rtas_set_allocation_state(uint32_t idx, uint32_t state) { sPAPRDRConnector *drc = spapr_drc_by_index(idx); sPAPRDRConnectorClass *drck; if (!drc) { return RTAS_OUT_PARAM_ERROR; } drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); return drck->set_allocation_state(drc, state); }
20,971
0
static void nic_save(QEMUFile * f, void *opaque) { EEPRO100State *s = opaque; int i; pci_device_save(&s->dev, f); qemu_put_8s(f, &s->rxcr); qemu_put_8s(f, &s->cmd); qemu_put_be32s(f, &s->start); qemu_put_be32s(f, &s->stop); qemu_put_8s(f, &s->boundary); qemu_put_8s(f, &s->tsr); qemu_put_8s(f, &s->tpsr); qemu_put_be16s(f, &s->tcnt); qemu_put_be16s(f, &s->rcnt); qemu_put_be32s(f, &s->rsar); qemu_put_8s(f, &s->rsr); qemu_put_8s(f, &s->isr); qemu_put_8s(f, &s->dcfg); qemu_put_8s(f, &s->imr); qemu_put_buffer(f, s->phys, 6); qemu_put_8s(f, &s->curpag); qemu_put_buffer(f, s->mult, 8); qemu_put_buffer(f, s->mem, sizeof(s->mem)); /* Save all members of struct between scv_stat and mem. */ qemu_put_8s(f, &s->scb_stat); qemu_put_8s(f, &s->int_stat); for (i = 0; i < 3; i++) { qemu_put_be32s(f, &s->region[i]); } qemu_put_buffer(f, s->macaddr, 6); for (i = 0; i < 19; i++) { qemu_put_be32s(f, &s->statcounter[i]); } for (i = 0; i < 32; i++) { qemu_put_be16s(f, &s->mdimem[i]); } /* The eeprom should be saved and restored by its own routines. */ qemu_put_be32s(f, &s->device); qemu_put_be32s(f, &s->pointer); qemu_put_be32s(f, &s->cu_base); qemu_put_be32s(f, &s->cu_offset); qemu_put_be32s(f, &s->ru_base); qemu_put_be32s(f, &s->ru_offset); qemu_put_be32s(f, &s->statsaddr); /* Save epro100_stats_t statistics. */ qemu_put_be32s(f, &s->statistics.tx_good_frames); qemu_put_be32s(f, &s->statistics.tx_max_collisions); qemu_put_be32s(f, &s->statistics.tx_late_collisions); qemu_put_be32s(f, &s->statistics.tx_underruns); qemu_put_be32s(f, &s->statistics.tx_lost_crs); qemu_put_be32s(f, &s->statistics.tx_deferred); qemu_put_be32s(f, &s->statistics.tx_single_collisions); qemu_put_be32s(f, &s->statistics.tx_multiple_collisions); qemu_put_be32s(f, &s->statistics.tx_total_collisions); qemu_put_be32s(f, &s->statistics.rx_good_frames); qemu_put_be32s(f, &s->statistics.rx_crc_errors); qemu_put_be32s(f, &s->statistics.rx_alignment_errors); qemu_put_be32s(f, &s->statistics.rx_resource_errors); qemu_put_be32s(f, &s->statistics.rx_overrun_errors); qemu_put_be32s(f, &s->statistics.rx_cdt_errors); qemu_put_be32s(f, &s->statistics.rx_short_frame_errors); qemu_put_be32s(f, &s->statistics.fc_xmt_pause); qemu_put_be32s(f, &s->statistics.fc_rcv_pause); qemu_put_be32s(f, &s->statistics.fc_rcv_unsupported); qemu_put_be16s(f, &s->statistics.xmt_tco_frames); qemu_put_be16s(f, &s->statistics.rcv_tco_frames); qemu_put_be32s(f, &s->statistics.complete); #if 0 qemu_put_be16s(f, &s->status); #endif /* Configuration bytes. */ qemu_put_buffer(f, s->configuration, sizeof(s->configuration)); }
20,972
0
static void terrier_init(int ram_size, int vga_ram_size, int boot_device, DisplayState *ds, const char **fd_filename, int snapshot, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { spitz_common_init(ram_size, vga_ram_size, ds, kernel_filename, kernel_cmdline, initrd_filename, terrier, 0x33f); }
20,973
0
int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, unsigned int index, unsigned int n, bool flush) { int ret = -EINPROGRESS; qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { aio_poll(bdrv_get_aio_context(s->bs), true); } return ret; }
20,974
0
static void encode_picture(MpegEncContext *s, int picture_number) { int mb_x, mb_y, pdif = 0; int i; int bits; MpegEncContext best_s, backup_s; uint8_t bit_buf[2][3000]; uint8_t bit_buf2[2][3000]; uint8_t bit_buf_tex[2][3000]; PutBitContext pb[2], pb2[2], tex_pb[2]; for(i=0; i<2; i++){ init_put_bits(&pb [i], bit_buf [i], 3000, NULL, NULL); init_put_bits(&pb2 [i], bit_buf2 [i], 3000, NULL, NULL); init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000, NULL, NULL); } s->picture_number = picture_number; s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= s->block_wrap[3]= s->mb_width*2 + 2; s->block_wrap[4]= s->block_wrap[5]= s->mb_width + 2; /* Reset the average MB variance */ s->current_picture.mb_var_sum = 0; s->current_picture.mc_mb_var_sum = 0; #ifdef CONFIG_RISKY /* we need to initialize some time vars before we can encode b-frames */ // RAL: Condition added for MPEG1VIDEO if (s->codec_id == CODEC_ID_MPEG1VIDEO || (s->h263_pred && !s->h263_msmpeg4)) ff_set_mpeg4_time(s, s->picture_number); #endif s->scene_change_score=0; s->qscale= (int)(s->frame_qscale + 0.5); //FIXME qscale / ... stuff for ME ratedistoration if(s->msmpeg4_version){ if(s->pict_type==I_TYPE) s->no_rounding=1; else if(s->flipflop_rounding) s->no_rounding ^= 1; }else if(s->out_format == FMT_H263){ if(s->pict_type==I_TYPE) s->no_rounding=0; else if(s->pict_type!=B_TYPE) s->no_rounding ^= 1; } /* Estimate motion for every MB */ s->mb_intra=0; //for the rate distoration & bit compare functions if(s->pict_type != I_TYPE){ if(s->pict_type != B_TYPE){ if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){ s->me.pre_pass=1; s->me.dia_size= s->avctx->pre_dia_size; for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) { for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) { s->mb_x = mb_x; s->mb_y = mb_y; ff_pre_estimate_p_frame_motion(s, mb_x, mb_y); } } s->me.pre_pass=0; } } s->me.dia_size= s->avctx->dia_size; for(mb_y=0; mb_y < s->mb_height; mb_y++) { s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1; s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1); s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1; s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2); for(mb_x=0; mb_x < s->mb_width; mb_x++) { s->mb_x = mb_x; s->mb_y = mb_y; s->block_index[0]+=2; s->block_index[1]+=2; s->block_index[2]+=2; s->block_index[3]+=2; /* compute motion vector & mb_type and store in context */ if(s->pict_type==B_TYPE) ff_estimate_b_frame_motion(s, mb_x, mb_y); else ff_estimate_p_frame_motion(s, mb_x, mb_y); } } }else /* if(s->pict_type == I_TYPE) */{ /* I-Frame */ //FIXME do we need to zero them? memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2); memset(s->p_mv_table , 0, sizeof(int16_t)*(s->mb_width+2)*(s->mb_height+2)*2); memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_width*s->mb_height); if(!s->fixed_qscale){ /* finding spatial complexity for I-frame rate control */ for(mb_y=0; mb_y < s->mb_height; mb_y++) { for(mb_x=0; mb_x < s->mb_width; mb_x++) { int xx = mb_x * 16; int yy = mb_y * 16; uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx; int varc; int sum = s->dsp.pix_sum(pix, s->linesize); varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; s->current_picture.mb_var [s->mb_width * mb_y + mb_x] = varc; s->current_picture.mb_mean[s->mb_width * mb_y + mb_x] = (sum+128)>>8; s->current_picture.mb_var_sum += varc; } } } } emms_c(); if(s->scene_change_score > 0 && s->pict_type == P_TYPE){ s->pict_type= I_TYPE; memset(s->mb_type , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_width*s->mb_height); //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum); } if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) { s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER); // RAL: Next call moved into that bloc ff_fix_long_p_mvs(s); } // RAL: All this bloc changed if(s->pict_type==B_TYPE){ int a, b; a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD); b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR); s->f_code = FFMAX(a, b); a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD); b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR); s->b_code = FFMAX(a, b); ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD); ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD); ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR); ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR); } if (s->fixed_qscale) s->frame_qscale = s->current_picture.quality; else s->frame_qscale = ff_rate_estimate_qscale(s); if(s->adaptive_quant){ #ifdef CONFIG_RISKY switch(s->codec_id){ case CODEC_ID_MPEG4: ff_clean_mpeg4_qscales(s); break; case CODEC_ID_H263: case CODEC_ID_H263P: ff_clean_h263_qscales(s); break; } #endif s->qscale= s->current_picture.qscale_table[0]; }else s->qscale= (int)(s->frame_qscale + 0.5); if (s->out_format == FMT_MJPEG) { /* for mjpeg, we do include qscale in the matrix */ s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0]; for(i=1;i<64;i++){ int j= s->idct_permutation[i]; s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3); } convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16, s->q_intra_matrix16_bias, s->intra_matrix, s->intra_quant_bias, 8, 8); } //FIXME var duplication s->current_picture.key_frame= s->pict_type == I_TYPE; s->current_picture.pict_type= s->pict_type; if(s->current_picture.key_frame) s->picture_in_gop_number=0; s->last_bits= get_bit_count(&s->pb); switch(s->out_format) { case FMT_MJPEG: mjpeg_picture_header(s); break; #ifdef CONFIG_RISKY case FMT_H263: if (s->codec_id == CODEC_ID_WMV2) ff_wmv2_encode_picture_header(s, picture_number); else if (s->h263_msmpeg4) msmpeg4_encode_picture_header(s, picture_number); else if (s->h263_pred) mpeg4_encode_picture_header(s, picture_number); else if (s->h263_rv10) rv10_encode_picture_header(s, picture_number); else h263_encode_picture_header(s, picture_number); break; #endif case FMT_MPEG1: mpeg1_encode_picture_header(s, picture_number); break; } bits= get_bit_count(&s->pb); s->header_bits= bits - s->last_bits; s->last_bits= bits; s->mv_bits=0; s->misc_bits=0; s->i_tex_bits=0; s->p_tex_bits=0; s->i_count=0; s->f_count=0; s->b_count=0; s->skip_count=0; for(i=0; i<3; i++){ /* init last dc values */ /* note: quant matrix value (8) is implied here */ s->last_dc[i] = 128; s->current_picture.error[i] = 0; } s->mb_incr = 1; s->last_mv[0][0][0] = 0; s->last_mv[0][0][1] = 0; s->last_mv[1][0][0] = 0; s->last_mv[1][0][1] = 0; s->last_mv_dir = 0; #ifdef CONFIG_RISKY if (s->codec_id==CODEC_ID_H263 || s->codec_id==CODEC_ID_H263P) s->gob_index = ff_h263_get_gob_height(s); if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame) ff_mpeg4_init_partitions(s); #endif s->resync_mb_x=0; s->resync_mb_y=0; s->first_slice_line = 1; s->ptr_lastgob = s->pb.buf; s->ptr_last_mb_line = s->pb.buf; for(mb_y=0; mb_y < s->mb_height; mb_y++) { s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1; s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1); s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1; s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2); s->block_index[4]= s->block_wrap[4]*(mb_y + 1) + s->block_wrap[0]*(s->mb_height*2 + 2); s->block_index[5]= s->block_wrap[4]*(mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2); for(mb_x=0; mb_x < s->mb_width; mb_x++) { int mb_type= s->mb_type[mb_y * s->mb_width + mb_x]; const int xy= (mb_y+1) * (s->mb_width+2) + mb_x + 1; // int d; int dmin=10000000; s->mb_x = mb_x; s->mb_y = mb_y; s->block_index[0]+=2; s->block_index[1]+=2; s->block_index[2]+=2; s->block_index[3]+=2; s->block_index[4]++; s->block_index[5]++; /* write gob / video packet header */ #ifdef CONFIG_RISKY if(s->rtp_mode){ int current_packet_size, is_gob_start; current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob; is_gob_start=0; if(s->codec_id==CODEC_ID_MPEG4){ if(current_packet_size + s->mb_line_avgsize/s->mb_width >= s->rtp_payload_size && s->mb_y + s->mb_x>0){ if(s->partitioned_frame){ ff_mpeg4_merge_partitions(s); ff_mpeg4_init_partitions(s); } ff_mpeg4_encode_video_packet_header(s); if(s->flags&CODEC_FLAG_PASS1){ int bits= get_bit_count(&s->pb); s->misc_bits+= bits - s->last_bits; s->last_bits= bits; } ff_mpeg4_clean_buffers(s); is_gob_start=1; } }else{ if(current_packet_size + s->mb_line_avgsize*s->gob_index >= s->rtp_payload_size && s->mb_x==0 && s->mb_y>0 && s->mb_y%s->gob_index==0){ h263_encode_gob_header(s, mb_y); is_gob_start=1; } } if(is_gob_start){ s->ptr_lastgob = pbBufPtr(&s->pb); s->first_slice_line=1; s->resync_mb_x=mb_x; s->resync_mb_y=mb_y; } } #endif if( (s->resync_mb_x == s->mb_x) && s->resync_mb_y+1 == s->mb_y){ s->first_slice_line=0; } if(mb_type & (mb_type-1)){ // more than 1 MB type possible int next_block=0; int pb_bits_count, pb2_bits_count, tex_pb_bits_count; copy_context_before_encode(&backup_s, s, -1); backup_s.pb= s->pb; best_s.data_partitioning= s->data_partitioning; best_s.partitioned_frame= s->partitioned_frame; if(s->data_partitioning){ backup_s.pb2= s->pb2; backup_s.tex_pb= s->tex_pb; } if(mb_type&MB_TYPE_INTER){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = s->p_mv_table[xy][0]; s->mv[0][0][1] = s->p_mv_table[xy][1]; encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } if(mb_type&MB_TYPE_INTER4V){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; for(i=0; i<4; i++){ s->mv[0][i][0] = s->motion_val[s->block_index[i]][0]; s->mv[0][i][1] = s->motion_val[s->block_index[i]][1]; } encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&MB_TYPE_FORWARD){ s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]); } if(mb_type&MB_TYPE_BACKWARD){ s->mv_dir = MV_DIR_BACKWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[1][0][0] = s->b_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_back_mv_table[xy][1]; encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]); } if(mb_type&MB_TYPE_BIDIR){ s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mv_type = MV_TYPE_16X16; s->mb_intra= 0; s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); } if(mb_type&MB_TYPE_DIRECT){ int mx= s->b_direct_mv_table[xy][0]; int my= s->b_direct_mv_table[xy][1]; s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; s->mb_intra= 0; #ifdef CONFIG_RISKY ff_mpeg4_set_direct_mv(s, mx, my); #endif encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, &dmin, &next_block, mx, my); } if(mb_type&MB_TYPE_INTRA){ s->mv_dir = 0; s->mv_type = MV_TYPE_16X16; s->mb_intra= 1; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, &dmin, &next_block, 0, 0); /* force cleaning of ac/dc pred stuff if needed ... */ if(s->h263_pred || s->h263_aic) s->mbintra_table[mb_x + mb_y*s->mb_width]=1; } copy_context_after_encode(s, &best_s, -1); pb_bits_count= get_bit_count(&s->pb); flush_put_bits(&s->pb); ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count); s->pb= backup_s.pb; if(s->data_partitioning){ pb2_bits_count= get_bit_count(&s->pb2); flush_put_bits(&s->pb2); ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count); s->pb2= backup_s.pb2; tex_pb_bits_count= get_bit_count(&s->tex_pb); flush_put_bits(&s->tex_pb); ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count); s->tex_pb= backup_s.tex_pb; } s->last_bits= get_bit_count(&s->pb); } else { int motion_x, motion_y; int intra_score; int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_width]; if(!(s->flags&CODEC_FLAG_HQ) && s->pict_type==P_TYPE){ /* get luma score */ if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){ intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_width]<<8) - 500; //FIXME dont scale it down so we dont have to fix it }else{ uint8_t *dest_y; int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_width]; //FIXME mean*= 0x01010101; dest_y = s->new_picture.data[0] + (mb_y * 16 * s->linesize ) + mb_x * 16; for(i=0; i<16; i++){ *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean; *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean; *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean; *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean; } s->mb_intra=1; intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize); /* printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8, s->current_picture.mb_var[mb_x + mb_y*s->mb_width], s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_width]);*/ } /* get chroma score */ if(s->avctx->mb_cmp&FF_CMP_CHROMA){ int i; s->mb_intra=1; for(i=1; i<3; i++){ uint8_t *dest_c; int mean; if(s->out_format == FMT_H263){ mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;) }else{ mean= (s->last_dc[i] + 4)>>3; } dest_c = s->new_picture.data[i] + (mb_y * 8 * (s->uvlinesize)) + mb_x * 8; mean*= 0x01010101; for(i=0; i<8; i++){ *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean; *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean; } intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize); } } /* bias */ switch(s->avctx->mb_cmp&0xFF){ default: case FF_CMP_SAD: intra_score+= 32*s->qscale; break; case FF_CMP_SSE: intra_score+= 24*s->qscale*s->qscale; break; case FF_CMP_SATD: intra_score+= 96*s->qscale; break; case FF_CMP_DCT: intra_score+= 48*s->qscale; break; case FF_CMP_BIT: intra_score+= 16; break; case FF_CMP_PSNR: case FF_CMP_RD: intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7; break; } if(intra_score < inter_score) mb_type= MB_TYPE_INTRA; } s->mv_type=MV_TYPE_16X16; // only one MB-Type possible switch(mb_type){ case MB_TYPE_INTRA: s->mv_dir = 0; s->mb_intra= 1; motion_x= s->mv[0][0][0] = 0; motion_y= s->mv[0][0][1] = 0; break; case MB_TYPE_INTER: s->mv_dir = MV_DIR_FORWARD; s->mb_intra= 0; motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0]; motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1]; break; case MB_TYPE_INTER4V: s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_8X8; s->mb_intra= 0; for(i=0; i<4; i++){ s->mv[0][i][0] = s->motion_val[s->block_index[i]][0]; s->mv[0][i][1] = s->motion_val[s->block_index[i]][1]; } motion_x= motion_y= 0; break; case MB_TYPE_DIRECT: s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT; s->mb_intra= 0; motion_x=s->b_direct_mv_table[xy][0]; motion_y=s->b_direct_mv_table[xy][1]; #ifdef CONFIG_RISKY ff_mpeg4_set_direct_mv(s, motion_x, motion_y); #endif break; case MB_TYPE_BIDIR: s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mb_intra= 0; motion_x=0; motion_y=0; s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; break; case MB_TYPE_BACKWARD: s->mv_dir = MV_DIR_BACKWARD; s->mb_intra= 0; motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0]; motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1]; break; case MB_TYPE_FORWARD: s->mv_dir = MV_DIR_FORWARD; s->mb_intra= 0; motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; // printf(" %d %d ", motion_x, motion_y); break; default: motion_x=motion_y=0; //gcc warning fix printf("illegal MB type\n"); } encode_mb(s, motion_x, motion_y); // RAL: Update last macrobloc type s->last_mv_dir = s->mv_dir; } /* clean the MV table in IPS frames for direct mode in B frames */ if(s->mb_intra /* && I,P,S_TYPE */){ s->p_mv_table[xy][0]=0; s->p_mv_table[xy][1]=0; } MPV_decode_mb(s, s->block); if(s->flags&CODEC_FLAG_PSNR){ int w= 16; int h= 16; if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; s->current_picture.error[0] += sse( s, s->new_picture .data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->current_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, w, h, s->linesize); s->current_picture.error[1] += sse( s, s->new_picture .data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, s->current_picture.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, w>>1, h>>1, s->uvlinesize); s->current_picture.error[2] += sse( s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, s->current_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8, w>>1, h>>1, s->uvlinesize); } //printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_width, get_bit_count(&s->pb)); } /* Obtain average mb_row size for RTP */ if (s->rtp_mode) { if (mb_y==0) s->mb_line_avgsize = pbBufPtr(&s->pb) - s->ptr_last_mb_line; else { s->mb_line_avgsize = (s->mb_line_avgsize + pbBufPtr(&s->pb) - s->ptr_last_mb_line) >> 1; } s->ptr_last_mb_line = pbBufPtr(&s->pb); } } emms_c(); #ifdef CONFIG_RISKY if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame) ff_mpeg4_merge_partitions(s); if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE) msmpeg4_encode_ext_header(s); if(s->codec_id==CODEC_ID_MPEG4) ff_mpeg4_stuffing(&s->pb); #endif //if (s->gob_number) // fprintf(stderr,"\nNumber of GOB: %d", s->gob_number); /* Send the last GOB if RTP */ if (s->rtp_mode) { flush_put_bits(&s->pb); pdif = pbBufPtr(&s->pb) - s->ptr_lastgob; /* Call the RTP callback to send the last GOB */ if (s->rtp_callback) s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number); s->ptr_lastgob = pbBufPtr(&s->pb); //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif); } }
20,976
0
static void *spapr_create_fdt_skel(hwaddr initrd_base, hwaddr initrd_size, hwaddr kernel_size, bool little_endian, const char *kernel_cmdline, uint32_t epow_irq) { void *fdt; uint32_t start_prop = cpu_to_be32(initrd_base); uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size); GString *hypertas = g_string_sized_new(256); GString *qemu_hypertas = g_string_sized_new(256); uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)}; uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(max_cpus)}; unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80}; char *buf; add_str(hypertas, "hcall-pft"); add_str(hypertas, "hcall-term"); add_str(hypertas, "hcall-dabr"); add_str(hypertas, "hcall-interrupt"); add_str(hypertas, "hcall-tce"); add_str(hypertas, "hcall-vio"); add_str(hypertas, "hcall-splpar"); add_str(hypertas, "hcall-bulk"); add_str(hypertas, "hcall-set-mode"); add_str(hypertas, "hcall-sprg0"); add_str(hypertas, "hcall-copy"); add_str(hypertas, "hcall-debug"); add_str(qemu_hypertas, "hcall-memop1"); fdt = g_malloc0(FDT_MAX_SIZE); _FDT((fdt_create(fdt, FDT_MAX_SIZE))); if (kernel_size) { _FDT((fdt_add_reservemap_entry(fdt, KERNEL_LOAD_ADDR, kernel_size))); } if (initrd_size) { _FDT((fdt_add_reservemap_entry(fdt, initrd_base, initrd_size))); } _FDT((fdt_finish_reservemap(fdt))); /* Root node */ _FDT((fdt_begin_node(fdt, ""))); _FDT((fdt_property_string(fdt, "device_type", "chrp"))); _FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)"))); _FDT((fdt_property_string(fdt, "compatible", "qemu,pseries"))); /* * Add info to guest to indentify which host is it being run on * and what is the uuid of the guest */ if (kvmppc_get_host_model(&buf)) { _FDT((fdt_property_string(fdt, "host-model", buf))); g_free(buf); } if (kvmppc_get_host_serial(&buf)) { _FDT((fdt_property_string(fdt, "host-serial", buf))); g_free(buf); } buf = g_strdup_printf(UUID_FMT, qemu_uuid[0], qemu_uuid[1], qemu_uuid[2], qemu_uuid[3], qemu_uuid[4], qemu_uuid[5], qemu_uuid[6], qemu_uuid[7], qemu_uuid[8], qemu_uuid[9], qemu_uuid[10], qemu_uuid[11], qemu_uuid[12], qemu_uuid[13], qemu_uuid[14], qemu_uuid[15]); _FDT((fdt_property_string(fdt, "vm,uuid", buf))); if (qemu_uuid_set) { _FDT((fdt_property_string(fdt, "system-id", buf))); } g_free(buf); if (qemu_get_vm_name()) { _FDT((fdt_property_string(fdt, "ibm,partition-name", qemu_get_vm_name()))); } _FDT((fdt_property_cell(fdt, "#address-cells", 0x2))); _FDT((fdt_property_cell(fdt, "#size-cells", 0x2))); /* /chosen */ _FDT((fdt_begin_node(fdt, "chosen"))); /* Set Form1_affinity */ _FDT((fdt_property(fdt, "ibm,architecture-vec-5", vec5, sizeof(vec5)))); _FDT((fdt_property_string(fdt, "bootargs", kernel_cmdline))); _FDT((fdt_property(fdt, "linux,initrd-start", &start_prop, sizeof(start_prop)))); _FDT((fdt_property(fdt, "linux,initrd-end", &end_prop, sizeof(end_prop)))); if (kernel_size) { uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR), cpu_to_be64(kernel_size) }; _FDT((fdt_property(fdt, "qemu,boot-kernel", &kprop, sizeof(kprop)))); if (little_endian) { _FDT((fdt_property(fdt, "qemu,boot-kernel-le", NULL, 0))); } } if (boot_menu) { _FDT((fdt_property_cell(fdt, "qemu,boot-menu", boot_menu))); } _FDT((fdt_property_cell(fdt, "qemu,graphic-width", graphic_width))); _FDT((fdt_property_cell(fdt, "qemu,graphic-height", graphic_height))); _FDT((fdt_property_cell(fdt, "qemu,graphic-depth", graphic_depth))); _FDT((fdt_end_node(fdt))); /* RTAS */ _FDT((fdt_begin_node(fdt, "rtas"))); if (!kvm_enabled() || kvmppc_spapr_use_multitce()) { add_str(hypertas, "hcall-multi-tce"); } _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas->str, hypertas->len))); g_string_free(hypertas, TRUE); _FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas->str, qemu_hypertas->len))); g_string_free(qemu_hypertas, TRUE); _FDT((fdt_property(fdt, "ibm,associativity-reference-points", refpoints, sizeof(refpoints)))); _FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX))); _FDT((fdt_property_cell(fdt, "rtas-event-scan-rate", RTAS_EVENT_SCAN_RATE))); if (msi_nonbroken) { _FDT((fdt_property(fdt, "ibm,change-msix-capable", NULL, 0))); } /* * According to PAPR, rtas ibm,os-term does not guarantee a return * back to the guest cpu. * * While an additional ibm,extended-os-term property indicates that * rtas call return will always occur. Set this property. */ _FDT((fdt_property(fdt, "ibm,extended-os-term", NULL, 0))); _FDT((fdt_end_node(fdt))); /* interrupt controller */ _FDT((fdt_begin_node(fdt, "interrupt-controller"))); _FDT((fdt_property_string(fdt, "device_type", "PowerPC-External-Interrupt-Presentation"))); _FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp"))); _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0))); _FDT((fdt_property(fdt, "ibm,interrupt-server-ranges", interrupt_server_ranges_prop, sizeof(interrupt_server_ranges_prop)))); _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2))); _FDT((fdt_property_cell(fdt, "linux,phandle", PHANDLE_XICP))); _FDT((fdt_property_cell(fdt, "phandle", PHANDLE_XICP))); _FDT((fdt_end_node(fdt))); /* vdevice */ _FDT((fdt_begin_node(fdt, "vdevice"))); _FDT((fdt_property_string(fdt, "device_type", "vdevice"))); _FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice"))); _FDT((fdt_property_cell(fdt, "#address-cells", 0x1))); _FDT((fdt_property_cell(fdt, "#size-cells", 0x0))); _FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2))); _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0))); _FDT((fdt_end_node(fdt))); /* event-sources */ spapr_events_fdt_skel(fdt, epow_irq); /* /hypervisor node */ if (kvm_enabled()) { uint8_t hypercall[16]; /* indicate KVM hypercall interface */ _FDT((fdt_begin_node(fdt, "hypervisor"))); _FDT((fdt_property_string(fdt, "compatible", "linux,kvm"))); if (kvmppc_has_cap_fixup_hcalls()) { /* * Older KVM versions with older guest kernels were broken with the * magic page, don't allow the guest to map it. */ if (!kvmppc_get_hypercall(first_cpu->env_ptr, hypercall, sizeof(hypercall))) { _FDT((fdt_property(fdt, "hcall-instructions", hypercall, sizeof(hypercall)))); } } _FDT((fdt_end_node(fdt))); } _FDT((fdt_end_node(fdt))); /* close root node */ _FDT((fdt_finish(fdt))); return fdt; }
20,977
0
static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, QEMUTimer *timer, void (*raise_excp)(PowerPCCPU *), uint32_t decr, uint32_t value, int is_excp) { CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env = env->tb_env; uint64_t now, next; LOG_TB("%s: %08" PRIx32 " => %08" PRIx32 "\n", __func__, decr, value); if (kvm_enabled()) { /* KVM handles decrementer exceptions, we don't need our own timer */ return; } now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq); if (is_excp) { next += *nextp - now; } if (next == now) { next++; } *nextp = next; /* Adjust timer */ timer_mod(timer, next); /* If we set a negative value and the decrementer was positive, raise an * exception. */ if ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000) && !(decr & 0x80000000)) { (*raise_excp)(cpu); } }
20,978
0
static void qxl_spice_destroy_surfaces(PCIQXLDevice *qxl, qxl_async_io async) { if (async) { #if SPICE_INTERFACE_QXL_MINOR < 1 abort(); #else spice_qxl_destroy_surfaces_async(&qxl->ssd.qxl, 0); #endif } else { qxl->ssd.worker->destroy_surfaces(qxl->ssd.worker); qxl_spice_destroy_surfaces_complete(qxl); } }
20,979
0
static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u) { TCGv tmp; switch ((size << 1) | u) { case 0: gen_helper_neon_mull_s8(dest, a, b); break; case 1: gen_helper_neon_mull_u8(dest, a, b); break; case 2: gen_helper_neon_mull_s16(dest, a, b); break; case 3: gen_helper_neon_mull_u16(dest, a, b); break; case 4: tmp = gen_muls_i64_i32(a, b); tcg_gen_mov_i64(dest, tmp); break; case 5: tmp = gen_mulu_i64_i32(a, b); tcg_gen_mov_i64(dest, tmp); break; default: abort(); } if (size < 2) { dead_tmp(b); dead_tmp(a); } }
20,980
0
static void imx_timerp_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { IMXTimerPState *s = (IMXTimerPState *)opaque; DPRINTF("p-write(offset=%x, value = %x)\n", (unsigned int)offset >> 2, (unsigned int)value); switch (offset >> 2) { case 0: /* CR */ if (value & CR_SWR) { imx_timerp_reset(&s->busdev.qdev); value &= ~CR_SWR; } s->cr = value & 0x03ffffff; set_timerp_freq(s); if (s->freq && (s->cr & CR_EN)) { if (!(s->cr & CR_ENMOD)) { ptimer_set_count(s->timer, s->lr); } ptimer_run(s->timer, 0); } else { ptimer_stop(s->timer); } break; case 1: /* SR - ACK*/ s->int_level = 0; imx_timerp_update(s); break; case 2: /* LR - set ticks */ s->lr = value; ptimer_set_limit(s->timer, value, !!(s->cr & CR_IOVW)); break; case 3: /* CMP */ s->cmp = value; if (value) { IPRINTF( "Values for EPIT comparison other than zero not supported\n" ); } break; default: IPRINTF("imx_timerp_write: Bad offset %x\n", (int)offset >> 2); } }
20,982
0
static int nbd_co_send_reply(NBDRequest *req, struct nbd_reply *reply, int len) { NBDClient *client = req->client; int csock = client->sock; int rc, ret; qemu_co_mutex_lock(&client->send_lock); qemu_set_fd_handler2(csock, NULL, nbd_read, nbd_restart_write, client); client->send_coroutine = qemu_coroutine_self(); if (!len) { rc = nbd_send_reply(csock, reply); if (rc == -1) { rc = -errno; } } else { socket_set_cork(csock, 1); rc = nbd_send_reply(csock, reply); if (rc != -1) { ret = qemu_co_send(csock, req->data, len); if (ret != len) { errno = EIO; rc = -1; } } if (rc == -1) { rc = -errno; } socket_set_cork(csock, 0); } client->send_coroutine = NULL; qemu_set_fd_handler2(csock, NULL, nbd_read, NULL, client); qemu_co_mutex_unlock(&client->send_lock); return rc; }
20,983
0
static uint32_t get_features(VirtIODevice *vdev, uint32_t features) { VirtIOSerial *vser; vser = VIRTIO_SERIAL(vdev); if (vser->bus.max_nr_ports > 1) { features |= (1 << VIRTIO_CONSOLE_F_MULTIPORT); } return features; }
20,984
0
static int proxy_fsync(FsContext *ctx, int fid_type, V9fsFidOpenState *fs, int datasync) { int fd; if (fid_type == P9_FID_DIR) { fd = dirfd(fs->dir); } else { fd = fs->fd; } if (datasync) { return qemu_fdatasync(fd); } else { return fsync(fd); } }
20,985
0
static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint32_t *value, uint32_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint32_t valid_emu_mask = 0; uint32_t bar_emu_mask = 0; int index; /* get BAR index */ index = xen_pt_bar_offset_to_index(reg->offset); if (index < 0 || index >= PCI_NUM_REGIONS - 1) { XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index); return -1; } /* use fixed-up value from kernel sysfs */ *value = base_address_with_flags(&s->real_device.io_regions[index]); /* set emulate mask depend on BAR flag */ switch (s->bases[index].bar_flag) { case XEN_PT_BAR_FLAG_MEM: bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK; break; case XEN_PT_BAR_FLAG_IO: bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK; break; case XEN_PT_BAR_FLAG_UPPER: bar_emu_mask = XEN_PT_BAR_ALLF; break; default: break; } /* emulate BAR */ valid_emu_mask = bar_emu_mask & valid_mask; *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); return 0; }
20,986
0
uint32_t ssi_transfer(SSIBus *bus, uint32_t val) { DeviceState *dev; SSISlave *slave; dev = LIST_FIRST(&bus->qbus.children); if (!dev) { return 0; } slave = SSI_SLAVE_FROM_QDEV(dev); return slave->info->transfer(slave, val); }
20,988
0
int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base, int64_t sector_num, int nb_sectors, int *pnum) { BlockDriverState *intermediate; int ret, n = nb_sectors; intermediate = top; while (intermediate && intermediate != base) { int pnum_inter; ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors, &pnum_inter); if (ret < 0) { return ret; } else if (ret) { *pnum = pnum_inter; return 1; } /* * [sector_num, nb_sectors] is unallocated on top but intermediate * might have * * [sector_num+x, nr_sectors] allocated. */ if (n > pnum_inter && (intermediate == top || sector_num + pnum_inter < intermediate->total_sectors)) { n = pnum_inter; } intermediate = intermediate->backing_hd; } *pnum = n; return 0; }
20,989
0
static void do_info_kqemu(Monitor *mon) { #ifdef CONFIG_KQEMU CPUState *env; int val; val = 0; env = mon_get_cpu(); if (!env) { monitor_printf(mon, "No cpu initialized yet"); return; } val = env->kqemu_enabled; monitor_printf(mon, "kqemu support: "); switch(val) { default: case 0: monitor_printf(mon, "disabled\n"); break; case 1: monitor_printf(mon, "enabled for user code\n"); break; case 2: monitor_printf(mon, "enabled for user and kernel code\n"); break; } #else monitor_printf(mon, "kqemu support: not compiled\n"); #endif }
20,990