id
int32
0
27.3k
func
stringlengths
26
142k
target
bool
2 classes
project
stringclasses
2 values
commit_id
stringlengths
40
40
24,628
void ff_put_h264_qpel8_mc13_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_hv_qrt_8w_msa(src + stride - 2, src - (stride * 2), stride, dst, stride, 8); }
false
FFmpeg
2aab7c2dfaca4386c38e5d565cd2bf73096bcc86
24,630
static void kvm_arm_gic_get(GICState *s) { uint32_t reg; int i; int cpu; if (!kvm_arm_gic_can_save_restore(s)) { DPRINTF("Cannot get kernel gic state, no kernel interface"); return; } /***************************************************************** * Distributor State */ /* GICD_CTLR -> s->enabled */ kvm_gicd_access(s, 0x0, 0, &reg, false); s->enabled = reg & 1; /* Sanity checking on GICD_TYPER -> s->num_irq, s->num_cpu */ kvm_gicd_access(s, 0x4, 0, &reg, false); s->num_irq = ((reg & 0x1f) + 1) * 32; s->num_cpu = ((reg & 0xe0) >> 5) + 1; if (s->num_irq > GIC_MAXIRQ) { fprintf(stderr, "Too many IRQs reported from the kernel: %d\n", s->num_irq); abort(); } /* GICD_IIDR -> ? */ kvm_gicd_access(s, 0x8, 0, &reg, false); /* Verify no GROUP 1 interrupts configured in the kernel */ for_each_irq_reg(i, s->num_irq, 1) { kvm_gicd_access(s, 0x80 + (i * 4), 0, &reg, false); if (reg != 0) { fprintf(stderr, "Unsupported GICD_IGROUPRn value: %08x\n", reg); abort(); } } /* Clear all the IRQ settings */ for (i = 0; i < s->num_irq; i++) { memset(&s->irq_state[i], 0, sizeof(s->irq_state[0])); } /* GICD_ISENABLERn -> irq_state[n].enabled */ kvm_dist_get(s, 0x100, 1, s->num_irq, translate_enabled); /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */ kvm_dist_get(s, 0x200, 1, s->num_irq, translate_pending); /* GICD_ISACTIVERn -> irq_state[n].active */ kvm_dist_get(s, 0x300, 1, s->num_irq, translate_active); /* GICD_ICFRn -> irq_state[n].trigger */ kvm_dist_get(s, 0xc00, 2, s->num_irq, translate_trigger); /* GICD_IPRIORITYRn -> s->priorityX[irq] */ kvm_dist_get(s, 0x400, 8, s->num_irq, translate_priority); /* GICD_ITARGETSRn -> s->irq_target[irq] */ kvm_dist_get(s, 0x800, 8, s->num_irq, translate_targets); /* GICD_CPENDSGIRn -> s->sgi_pending */ kvm_dist_get(s, 0xf10, 8, GIC_NR_SGIS, translate_sgisource); /***************************************************************** * CPU Interface(s) State */ for (cpu = 0; cpu < s->num_cpu; cpu++) { /* GICC_CTLR -> s->cpu_enabled[cpu] */ kvm_gicc_access(s, 0x00, cpu, &reg, false); s->cpu_enabled[cpu] = (reg & 1); /* GICC_PMR -> s->priority_mask[cpu] */ kvm_gicc_access(s, 0x04, cpu, &reg, false); s->priority_mask[cpu] = (reg & 0xff); /* GICC_BPR -> s->bpr[cpu] */ kvm_gicc_access(s, 0x08, cpu, &reg, false); s->bpr[cpu] = (reg & 0x7); /* GICC_ABPR -> s->abpr[cpu] */ kvm_gicc_access(s, 0x1c, cpu, &reg, false); s->abpr[cpu] = (reg & 0x7); /* GICC_APRn -> s->apr[n][cpu] */ for (i = 0; i < 4; i++) { kvm_gicc_access(s, 0xd0 + i * 4, cpu, &reg, false); s->apr[i][cpu] = reg; } } }
true
qemu
eb8b9530b0c618d4f2e728eae10d89239d35b0c0
24,633
static void xvid_idct_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block) { ff_xvid_idct(block); ff_add_pixels_clamped(block, dest, line_size); }
true
FFmpeg
32baeafeee4f8446c2c3720b9223ad2166ca9d30
24,634
print_syscall_ret_addr(const struct syscallname *name, abi_long ret) { char *errstr = NULL; if (ret == -1) { errstr = target_strerror(errno); } if ((ret == -1) && errstr) { gemu_log(" = -1 errno=%d (%s)\n", errno, errstr); } else { gemu_log(" = 0x" TARGET_ABI_FMT_lx "\n", ret); } }
true
qemu
2a7e12455c1d388e41f4c8d2231fb48a968792cd
24,636
static int rv30_decode_mb_info(RV34DecContext *r) { static const int rv30_p_types[6] = { RV34_MB_SKIP, RV34_MB_P_16x16, RV34_MB_P_8x8, -1, RV34_MB_TYPE_INTRA, RV34_MB_TYPE_INTRA16x16 }; static const int rv30_b_types[6] = { RV34_MB_SKIP, RV34_MB_B_DIRECT, RV34_MB_B_FORWARD, RV34_MB_B_BACKWARD, RV34_MB_TYPE_INTRA, RV34_MB_TYPE_INTRA16x16 }; MpegEncContext *s = &r->s; GetBitContext *gb = &s->gb; int code = svq3_get_ue_golomb(gb); if(code > 11){ av_log(s->avctx, AV_LOG_ERROR, "Incorrect MB type code\n"); return -1; } if(code > 5){ av_log(s->avctx, AV_LOG_ERROR, "dquant needed\n"); code -= 6; } if(s->pict_type != AV_PICTURE_TYPE_B) return rv30_p_types[code]; else return rv30_b_types[code]; }
true
FFmpeg
18d1d5886bb78e4d0e11a2a0193fda765e05805d
24,637
static void send_framebuffer_update_hextile(VncState *vs, int x, int y, int w, int h) { int i, j; int has_fg, has_bg; uint8_t *last_fg, *last_bg; vnc_framebuffer_update(vs, x, y, w, h, 5); last_fg = (uint8_t *) malloc(vs->depth); last_bg = (uint8_t *) malloc(vs->depth); has_fg = has_bg = 0; for (j = y; j < (y + h); j += 16) { for (i = x; i < (x + w); i += 16) { vs->send_hextile_tile(vs, i, j, MIN(16, x + w - i), MIN(16, y + h - j), last_bg, last_fg, &has_bg, &has_fg); } } free(last_fg); free(last_bg); }
true
qemu
6cec5487990bf3f1f22b3fcb871978255e92ae0d
24,638
static av_cold int j2kenc_init(AVCodecContext *avctx) { int i, ret; Jpeg2000EncoderContext *s = avctx->priv_data; Jpeg2000CodingStyle *codsty = &s->codsty; Jpeg2000QuantStyle *qntsty = &s->qntsty; s->avctx = avctx; av_log(s->avctx, AV_LOG_DEBUG, "init\n"); // defaults: // TODO: implement setting non-standard precinct size memset(codsty->log2_prec_widths , 15, sizeof(codsty->log2_prec_widths )); memset(codsty->log2_prec_heights, 15, sizeof(codsty->log2_prec_heights)); codsty->nreslevels2decode= codsty->nreslevels = 7; codsty->log2_cblk_width = 4; codsty->log2_cblk_height = 4; codsty->transform = avctx->prediction_method ? FF_DWT53 : FF_DWT97_INT; qntsty->nguardbits = 1; s->tile_width = 256; s->tile_height = 256; if (codsty->transform == FF_DWT53) qntsty->quantsty = JPEG2000_QSTY_NONE; else qntsty->quantsty = JPEG2000_QSTY_SE; s->width = avctx->width; s->height = avctx->height; for (i = 0; i < 3; i++) s->cbps[i] = 8; if (avctx->pix_fmt == AV_PIX_FMT_RGB24){ s->ncomponents = 3; } else if (avctx->pix_fmt == AV_PIX_FMT_GRAY8){ s->ncomponents = 1; } else{ // planar YUV s->planar = 1; s->ncomponents = 3; avcodec_get_chroma_sub_sample(avctx->pix_fmt, s->chroma_shift, s->chroma_shift + 1); } ff_jpeg2000_init_tier1_luts(); ff_mqc_init_context_tables(); init_luts(); init_quantization(s); if (ret=init_tiles(s)) return ret; av_log(s->avctx, AV_LOG_DEBUG, "after init\n"); return 0; }
true
FFmpeg
2580bae54a45d6aaf85ddc5e780389e7e90b2c86
24,639
static void ppc405_ocm_init(CPUPPCState *env) { ppc405_ocm_t *ocm; ocm = g_malloc0(sizeof(ppc405_ocm_t)); /* XXX: Size is 4096 or 0x04000000 */ memory_region_init_ram(&ocm->isarc_ram, NULL, "ppc405.ocm", 4096, &error_abort); vmstate_register_ram_global(&ocm->isarc_ram); memory_region_init_alias(&ocm->dsarc_ram, NULL, "ppc405.dsarc", &ocm->isarc_ram, 0, 4096); qemu_register_reset(&ocm_reset, ocm); ppc_dcr_register(env, OCM0_ISARC, ocm, &dcr_read_ocm, &dcr_write_ocm); ppc_dcr_register(env, OCM0_ISACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm); ppc_dcr_register(env, OCM0_DSARC, ocm, &dcr_read_ocm, &dcr_write_ocm); ppc_dcr_register(env, OCM0_DSACNTL, ocm, &dcr_read_ocm, &dcr_write_ocm); }
true
qemu
f8ed85ac992c48814d916d5df4d44f9a971c5de4
24,640
static int serial_load(QEMUFile *f, void *opaque, int version_id) { SerialState *s = opaque; if(version_id > 2) return -EINVAL; if (version_id >= 2) qemu_get_be16s(f, &s->divider); else s->divider = qemu_get_byte(f); qemu_get_8s(f,&s->rbr); qemu_get_8s(f,&s->ier); qemu_get_8s(f,&s->iir); qemu_get_8s(f,&s->lcr); qemu_get_8s(f,&s->mcr); qemu_get_8s(f,&s->lsr); qemu_get_8s(f,&s->msr); qemu_get_8s(f,&s->scr); return 0; }
true
qemu
81174dae3f9189519cd60c7b79e91c291b021bbe
24,641
void ff_release_unused_pictures(MpegEncContext *s, int remove_current) { int i; /* release non reference frames */ for(i=0; i<s->picture_count; i++){ if(s->picture[i].data[0] && !s->picture[i].reference && s->picture[i].owner2 == s && (remove_current || &s->picture[i] != s->current_picture_ptr) /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){ free_frame_buffer(s, &s->picture[i]); } } }
true
FFmpeg
0424e052f83adc422d8a746e3cdc5ab6bc28679e
24,642
static inline void vmsvga_update_rect(struct vmsvga_state_s *s, int x, int y, int w, int h) { #ifndef DIRECT_VRAM int line = h; int bypl = s->bypp * s->width; int width = s->bypp * w; int start = s->bypp * x + bypl * y; uint8_t *src = s->vram + start; uint8_t *dst = s->ds->data + start; for (; line > 0; line --, src += bypl, dst += bypl) memcpy(dst, src, width); #endif dpy_update(s->ds, x, y, w, h); }
true
qemu
a8fbaf96e0791d72078d22b75c5f3c1f1d1ee45d
24,643
static int os_host_main_loop_wait(uint32_t timeout) { GMainContext *context = g_main_context_default(); int select_ret, g_poll_ret, ret, i; PollingEntry *pe; WaitObjects *w = &wait_objects; gint poll_timeout; static struct timeval tv0; /* XXX: need to suppress polling by better using win32 events */ ret = 0; for (pe = first_polling_entry; pe != NULL; pe = pe->next) { ret |= pe->func(pe->opaque); } if (ret != 0) { return ret; } g_main_context_prepare(context, &max_priority); n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout, poll_fds, ARRAY_SIZE(poll_fds)); g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds)); for (i = 0; i < w->num; i++) { poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i]; poll_fds[n_poll_fds + i].events = G_IO_IN; } if (poll_timeout < 0 || timeout < poll_timeout) { poll_timeout = timeout; } qemu_mutex_unlock_iothread(); g_poll_ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout); qemu_mutex_lock_iothread(); if (g_poll_ret > 0) { for (i = 0; i < w->num; i++) { w->revents[i] = poll_fds[n_poll_fds + i].revents; } for (i = 0; i < w->num; i++) { if (w->revents[i] && w->func[i]) { w->func[i](w->opaque[i]); } } } if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) { g_main_context_dispatch(context); } /* Call select after g_poll to avoid a useless iteration and therefore * improve socket latency. */ if (nfds >= 0) { select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0); if (select_ret != 0) { timeout = 0; } } return select_ret || g_poll_ret; }
true
qemu
134a03e0b3d34b01b68107104c525c3bff1211d4
24,645
static void *spapr_create_fdt_skel(const char *cpu_model, target_phys_addr_t rma_size, target_phys_addr_t initrd_base, target_phys_addr_t initrd_size, const char *boot_device, const char *kernel_cmdline, long hash_shift) { void *fdt; CPUState *env; uint64_t mem_reg_property[2]; uint32_t start_prop = cpu_to_be32(initrd_base); uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size); uint32_t pft_size_prop[] = {0, cpu_to_be32(hash_shift)}; char hypertas_prop[] = "hcall-pft\0hcall-term\0hcall-dabr\0hcall-interrupt" "\0hcall-tce\0hcall-vio\0hcall-splpar\0hcall-bulk"; uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(smp_cpus)}; int i; char *modelname; int smt = kvmppc_smt_threads(); unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80}; uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)}; uint32_t associativity[] = {cpu_to_be32(0x4), cpu_to_be32(0x0), cpu_to_be32(0x0), cpu_to_be32(0x0), cpu_to_be32(0x0)}; char mem_name[32]; target_phys_addr_t node0_size, mem_start; #define _FDT(exp) \ do { \ int ret = (exp); \ if (ret < 0) { \ fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \ #exp, fdt_strerror(ret)); \ exit(1); \ } \ } while (0) fdt = g_malloc0(FDT_MAX_SIZE); _FDT((fdt_create(fdt, FDT_MAX_SIZE))); _FDT((fdt_finish_reservemap(fdt))); /* Root node */ _FDT((fdt_begin_node(fdt, ""))); _FDT((fdt_property_string(fdt, "device_type", "chrp"))); _FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)"))); _FDT((fdt_property_cell(fdt, "#address-cells", 0x2))); _FDT((fdt_property_cell(fdt, "#size-cells", 0x2))); /* /chosen */ _FDT((fdt_begin_node(fdt, "chosen"))); /* Set Form1_affinity */ _FDT((fdt_property(fdt, "ibm,architecture-vec-5", vec5, sizeof(vec5)))); _FDT((fdt_property_string(fdt, "bootargs", kernel_cmdline))); _FDT((fdt_property(fdt, "linux,initrd-start", &start_prop, sizeof(start_prop)))); _FDT((fdt_property(fdt, "linux,initrd-end", &end_prop, sizeof(end_prop)))); _FDT((fdt_property_string(fdt, "qemu,boot-device", boot_device))); /* * Because we don't always invoke any firmware, we can't rely on * that to do BAR allocation. Long term, we should probably do * that ourselves, but for now, this setting (plus advertising the * current BARs as 0) causes sufficiently recent kernels to to the * BAR assignment themselves */ _FDT((fdt_property_cell(fdt, "linux,pci-probe-only", 0))); _FDT((fdt_end_node(fdt))); /* memory node(s) */ node0_size = (nb_numa_nodes > 1) ? node_mem[0] : ram_size; if (rma_size > node0_size) { rma_size = node0_size; } /* RMA */ mem_reg_property[0] = 0; mem_reg_property[1] = cpu_to_be64(rma_size); _FDT((fdt_begin_node(fdt, "memory@0"))); _FDT((fdt_property_string(fdt, "device_type", "memory"))); _FDT((fdt_property(fdt, "reg", mem_reg_property, sizeof(mem_reg_property)))); _FDT((fdt_property(fdt, "ibm,associativity", associativity, sizeof(associativity)))); _FDT((fdt_end_node(fdt))); /* RAM: Node 0 */ if (node0_size > rma_size) { mem_reg_property[0] = cpu_to_be64(rma_size); mem_reg_property[1] = cpu_to_be64(node0_size - rma_size); sprintf(mem_name, "memory@" TARGET_FMT_lx, rma_size); _FDT((fdt_begin_node(fdt, mem_name))); _FDT((fdt_property_string(fdt, "device_type", "memory"))); _FDT((fdt_property(fdt, "reg", mem_reg_property, sizeof(mem_reg_property)))); _FDT((fdt_property(fdt, "ibm,associativity", associativity, sizeof(associativity)))); _FDT((fdt_end_node(fdt))); } /* RAM: Node 1 and beyond */ mem_start = node0_size; for (i = 1; i < nb_numa_nodes; i++) { mem_reg_property[0] = cpu_to_be64(mem_start); mem_reg_property[1] = cpu_to_be64(node_mem[i]); associativity[3] = associativity[4] = cpu_to_be32(i); sprintf(mem_name, "memory@" TARGET_FMT_lx, mem_start); _FDT((fdt_begin_node(fdt, mem_name))); _FDT((fdt_property_string(fdt, "device_type", "memory"))); _FDT((fdt_property(fdt, "reg", mem_reg_property, sizeof(mem_reg_property)))); _FDT((fdt_property(fdt, "ibm,associativity", associativity, sizeof(associativity)))); _FDT((fdt_end_node(fdt))); mem_start += node_mem[i]; } /* cpus */ _FDT((fdt_begin_node(fdt, "cpus"))); _FDT((fdt_property_cell(fdt, "#address-cells", 0x1))); _FDT((fdt_property_cell(fdt, "#size-cells", 0x0))); modelname = g_strdup(cpu_model); for (i = 0; i < strlen(modelname); i++) { modelname[i] = toupper(modelname[i]); } /* This is needed during FDT finalization */ spapr->cpu_model = g_strdup(modelname); for (env = first_cpu; env != NULL; env = env->next_cpu) { int index = env->cpu_index; uint32_t servers_prop[smp_threads]; uint32_t gservers_prop[smp_threads * 2]; char *nodename; uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40), 0xffffffff, 0xffffffff}; uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TIMEBASE_FREQ; uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000; if ((index % smt) != 0) { continue; } if (asprintf(&nodename, "%s@%x", modelname, index) < 0) { fprintf(stderr, "Allocation failure\n"); exit(1); } _FDT((fdt_begin_node(fdt, nodename))); free(nodename); _FDT((fdt_property_cell(fdt, "reg", index))); _FDT((fdt_property_string(fdt, "device_type", "cpu"))); _FDT((fdt_property_cell(fdt, "cpu-version", env->spr[SPR_PVR]))); _FDT((fdt_property_cell(fdt, "dcache-block-size", env->dcache_line_size))); _FDT((fdt_property_cell(fdt, "icache-block-size", env->icache_line_size))); _FDT((fdt_property_cell(fdt, "timebase-frequency", tbfreq))); _FDT((fdt_property_cell(fdt, "clock-frequency", cpufreq))); _FDT((fdt_property_cell(fdt, "ibm,slb-size", env->slb_nr))); _FDT((fdt_property(fdt, "ibm,pft-size", pft_size_prop, sizeof(pft_size_prop)))); _FDT((fdt_property_string(fdt, "status", "okay"))); _FDT((fdt_property(fdt, "64-bit", NULL, 0))); /* Build interrupt servers and gservers properties */ for (i = 0; i < smp_threads; i++) { servers_prop[i] = cpu_to_be32(index + i); /* Hack, direct the group queues back to cpu 0 */ gservers_prop[i*2] = cpu_to_be32(index + i); gservers_prop[i*2 + 1] = 0; } _FDT((fdt_property(fdt, "ibm,ppc-interrupt-server#s", servers_prop, sizeof(servers_prop)))); _FDT((fdt_property(fdt, "ibm,ppc-interrupt-gserver#s", gservers_prop, sizeof(gservers_prop)))); if (env->mmu_model & POWERPC_MMU_1TSEG) { _FDT((fdt_property(fdt, "ibm,processor-segment-sizes", segs, sizeof(segs)))); } /* Advertise VMX/VSX (vector extensions) if available * 0 / no property == no vector extensions * 1 == VMX / Altivec available * 2 == VSX available */ if (env->insns_flags & PPC_ALTIVEC) { uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1; _FDT((fdt_property_cell(fdt, "ibm,vmx", vmx))); } /* Advertise DFP (Decimal Floating Point) if available * 0 / no property == no DFP * 1 == DFP available */ if (env->insns_flags2 & PPC2_DFP) { _FDT((fdt_property_cell(fdt, "ibm,dfp", 1))); } _FDT((fdt_end_node(fdt))); } g_free(modelname); _FDT((fdt_end_node(fdt))); /* RTAS */ _FDT((fdt_begin_node(fdt, "rtas"))); _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop, sizeof(hypertas_prop)))); _FDT((fdt_property(fdt, "ibm,associativity-reference-points", refpoints, sizeof(refpoints)))); _FDT((fdt_end_node(fdt))); /* interrupt controller */ _FDT((fdt_begin_node(fdt, "interrupt-controller"))); _FDT((fdt_property_string(fdt, "device_type", "PowerPC-External-Interrupt-Presentation"))); _FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp"))); _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0))); _FDT((fdt_property(fdt, "ibm,interrupt-server-ranges", interrupt_server_ranges_prop, sizeof(interrupt_server_ranges_prop)))); _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2))); _FDT((fdt_property_cell(fdt, "linux,phandle", PHANDLE_XICP))); _FDT((fdt_property_cell(fdt, "phandle", PHANDLE_XICP))); _FDT((fdt_end_node(fdt))); /* vdevice */ _FDT((fdt_begin_node(fdt, "vdevice"))); _FDT((fdt_property_string(fdt, "device_type", "vdevice"))); _FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice"))); _FDT((fdt_property_cell(fdt, "#address-cells", 0x1))); _FDT((fdt_property_cell(fdt, "#size-cells", 0x0))); _FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2))); _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0))); _FDT((fdt_end_node(fdt))); _FDT((fdt_end_node(fdt))); /* close root node */ _FDT((fdt_finish(fdt))); return fdt; }
true
qemu
4d8d5467cd6e324fb49ae97b9d5dcee3973d9a19
24,646
static int mov_read_stsc(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom) { AVStream *st = c->fc->streams[c->fc->nb_streams-1]; MOVStreamContext *sc = (MOVStreamContext *)st->priv_data; int entries, i; print_atom("stsc", atom); get_byte(pb); /* version */ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */ entries = get_be32(pb); #ifdef DEBUG av_log(NULL, AV_LOG_DEBUG, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries); #endif sc->sample_to_chunk_sz = entries; sc->sample_to_chunk = (MOV_sample_to_chunk_tbl*) av_malloc(entries * sizeof(MOV_sample_to_chunk_tbl)); if (!sc->sample_to_chunk) return -1; for(i=0; i<entries; i++) { sc->sample_to_chunk[i].first = get_be32(pb); sc->sample_to_chunk[i].count = get_be32(pb); sc->sample_to_chunk[i].id = get_be32(pb); #ifdef DEBUG /* av_log(NULL, AV_LOG_DEBUG, "sample_to_chunk first=%ld count=%ld, id=%ld\n", sc->sample_to_chunk[i].first, sc->sample_to_chunk[i].count, sc->sample_to_chunk[i].id); */ #endif } return 0; }
true
FFmpeg
568e18b15e2ddf494fd8926707d34ca08c8edce5
24,647
av_cold int ff_MPV_common_init(MpegEncContext *s) { int i; int nb_slices = (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_SLICE) ? s->avctx->thread_count : 1; if (s->encoding && s->avctx->slices) nb_slices = s->avctx->slices; if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence) s->mb_height = (s->height + 31) / 32 * 2; else s->mb_height = (s->height + 15) / 16; if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) { av_log(s->avctx, AV_LOG_ERROR, "decoding to AV_PIX_FMT_NONE is not supported.\n"); return -1; } if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) { int max_slices; if (s->mb_height) max_slices = FFMIN(MAX_THREADS, s->mb_height); else max_slices = MAX_THREADS; av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d)," " reducing to %d\n", nb_slices, max_slices); nb_slices = max_slices; } if ((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx)) return -1; ff_dct_common_init(s); s->flags = s->avctx->flags; s->flags2 = s->avctx->flags2; /* set chroma shifts */ av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); /* convert fourcc to upper case */ s->codec_tag = avpriv_toupper4(s->avctx->codec_tag); s->stream_codec_tag = avpriv_toupper4(s->avctx->stream_codec_tag); FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail); for (i = 0; i < MAX_PICTURE_COUNT; i++) { av_frame_unref(&s->picture[i].f); } memset(&s->next_picture, 0, sizeof(s->next_picture)); memset(&s->last_picture, 0, sizeof(s->last_picture)); memset(&s->current_picture, 0, sizeof(s->current_picture)); av_frame_unref(&s->next_picture.f); av_frame_unref(&s->last_picture.f); av_frame_unref(&s->current_picture.f); if (s->width && s->height) { if (init_context_frame(s)) goto fail; s->parse_context.state = -1; } s->context_initialized = 1; s->thread_context[0] = s; if (s->width && s->height) { if (nb_slices > 1) { for (i = 1; i < nb_slices; i++) { s->thread_context[i] = av_malloc(sizeof(MpegEncContext)); memcpy(s->thread_context[i], s, sizeof(MpegEncContext)); } for (i = 0; i < nb_slices; i++) { if (init_duplicate_context(s->thread_context[i]) < 0) goto fail; s->thread_context[i]->start_mb_y = (s->mb_height * (i) + nb_slices / 2) / nb_slices; s->thread_context[i]->end_mb_y = (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices; } } else { if (init_duplicate_context(s) < 0) goto fail; s->start_mb_y = 0; s->end_mb_y = s->mb_height; } s->slice_context_count = nb_slices; } return 0; fail: ff_MPV_common_end(s); return -1; }
true
FFmpeg
f6774f905fb3cfdc319523ac640be30b14c1bc55
24,652
static void handle_control_message(VirtIOSerial *vser, void *buf) { struct VirtIOSerialPort *port; struct virtio_console_control cpkt, *gcpkt; uint8_t *buffer; size_t buffer_len; gcpkt = buf; cpkt.event = lduw_p(&gcpkt->event); cpkt.value = lduw_p(&gcpkt->value); port = find_port_by_id(vser, ldl_p(&gcpkt->id)); if (!port && cpkt.event != VIRTIO_CONSOLE_DEVICE_READY) return; switch(cpkt.event) { case VIRTIO_CONSOLE_DEVICE_READY: if (!cpkt.value) { error_report("virtio-serial-bus: Guest failure in adding device %s\n", vser->bus->qbus.name); break; } /* * The device is up, we can now tell the device about all the * ports we have here. */ QTAILQ_FOREACH(port, &vser->ports, next) { send_control_event(port, VIRTIO_CONSOLE_PORT_ADD, 1); } break; case VIRTIO_CONSOLE_PORT_READY: if (!cpkt.value) { error_report("virtio-serial-bus: Guest failure in adding port %u for device %s\n", port->id, vser->bus->qbus.name); break; } /* * Now that we know the guest asked for the port name, we're * sure the guest has initialised whatever state is necessary * for this port. Now's a good time to let the guest know if * this port is a console port so that the guest can hook it * up to hvc. */ if (port->is_console) { send_control_event(port, VIRTIO_CONSOLE_CONSOLE_PORT, 1); } if (port->name) { stw_p(&cpkt.event, VIRTIO_CONSOLE_PORT_NAME); stw_p(&cpkt.value, 1); buffer_len = sizeof(cpkt) + strlen(port->name) + 1; buffer = qemu_malloc(buffer_len); memcpy(buffer, &cpkt, sizeof(cpkt)); memcpy(buffer + sizeof(cpkt), port->name, strlen(port->name)); buffer[buffer_len - 1] = 0; send_control_msg(port, buffer, buffer_len); qemu_free(buffer); } if (port->host_connected) { send_control_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1); } /* * When the guest has asked us for this information it means * the guest is all setup and has its virtqueues * initialised. If some app is interested in knowing about * this event, let it know. */ if (port->info->guest_ready) { port->info->guest_ready(port); } break; case VIRTIO_CONSOLE_PORT_OPEN: port->guest_connected = cpkt.value; if (cpkt.value && port->info->guest_open) { /* Send the guest opened notification if an app is interested */ port->info->guest_open(port); } if (!cpkt.value && port->info->guest_close) { /* Send the guest closed notification if an app is interested */ port->info->guest_close(port); } break; } }
true
qemu
e61da14d60ba1cceacad8396adcb9662c7f690af
24,653
static int blend_frames(AVFilterContext *ctx, int interpolate) { FrameRateContext *s = ctx->priv; AVFilterLink *outlink = ctx->outputs[0]; double interpolate_scene_score = 0; if ((s->flags & FRAMERATE_FLAG_SCD)) { if (s->score >= 0.0) interpolate_scene_score = s->score; else interpolate_scene_score = s->score = get_scene_score(ctx, s->f0, s->f1); ff_dlog(ctx, "blend_frames() interpolate scene score:%f\n", interpolate_scene_score); } // decide if the shot-change detection allows us to blend two frames if (interpolate_scene_score < s->scene_score) { ThreadData td; td.copy_src1 = s->f0; td.copy_src2 = s->f1; td.src2_factor = interpolate; td.src1_factor = s->max - td.src2_factor; // get work-space for output frame s->work = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!s->work) return AVERROR(ENOMEM); av_frame_copy_props(s->work, s->f0); ff_dlog(ctx, "blend_frames() INTERPOLATE to create work frame\n"); ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx))); return 1; } return 0; }
true
FFmpeg
2cbe6bac0337939f023bd1c37a9c455e6d535f3a
24,656
static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) { unsigned flags = rhs->flags; lhs->selector = rhs->selector; lhs->base = rhs->base; lhs->limit = rhs->limit; lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; lhs->present = (flags & DESC_P_MASK) != 0; lhs->dpl = rhs->selector & 3; lhs->db = (flags >> DESC_B_SHIFT) & 1; lhs->s = (flags & DESC_S_MASK) != 0; lhs->l = (flags >> DESC_L_SHIFT) & 1; lhs->g = (flags & DESC_G_MASK) != 0; lhs->avl = (flags & DESC_AVL_MASK) != 0; lhs->unusable = 0; }
true
qemu
acaa75507b34f7b588924a09c76c6848d209e08c
24,657
static int parse_bintree(Indeo3DecodeContext *ctx, AVCodecContext *avctx, Plane *plane, int code, Cell *ref_cell, const int depth, const int strip_width) { Cell curr_cell; int bytes_used; if (depth <= 0) { av_log(avctx, AV_LOG_ERROR, "Stack overflow (corrupted binary tree)!\n"); return AVERROR_INVALIDDATA; // unwind recursion } curr_cell = *ref_cell; // clone parent cell if (code == H_SPLIT) { SPLIT_CELL(ref_cell->height, curr_cell.height); ref_cell->ypos += curr_cell.height; ref_cell->height -= curr_cell.height; } else if (code == V_SPLIT) { if (curr_cell.width > strip_width) { /* split strip */ curr_cell.width = (curr_cell.width <= (strip_width << 1) ? 1 : 2) * strip_width; } else SPLIT_CELL(ref_cell->width, curr_cell.width); ref_cell->xpos += curr_cell.width; ref_cell->width -= curr_cell.width; } while (get_bits_left(&ctx->gb) >= 2) { /* loop until return */ RESYNC_BITSTREAM; switch (code = get_bits(&ctx->gb, 2)) { case H_SPLIT: case V_SPLIT: if (parse_bintree(ctx, avctx, plane, code, &curr_cell, depth - 1, strip_width)) return AVERROR_INVALIDDATA; break; case INTRA_NULL: if (!curr_cell.tree) { /* MC tree INTRA code */ curr_cell.mv_ptr = 0; /* mark the current strip as INTRA */ curr_cell.tree = 1; /* enter the VQ tree */ } else { /* VQ tree NULL code */ RESYNC_BITSTREAM; code = get_bits(&ctx->gb, 2); if (code >= 2) { av_log(avctx, AV_LOG_ERROR, "Invalid VQ_NULL code: %d\n", code); return AVERROR_INVALIDDATA; } if (code == 1) av_log(avctx, AV_LOG_ERROR, "SkipCell procedure not implemented yet!\n"); CHECK_CELL if (!curr_cell.mv_ptr) return AVERROR_INVALIDDATA; copy_cell(ctx, plane, &curr_cell); return 0; } break; case INTER_DATA: if (!curr_cell.tree) { /* MC tree INTER code */ /* get motion vector index and setup the pointer to the mv set */ if (!ctx->need_resync) ctx->next_cell_data = &ctx->gb.buffer[(get_bits_count(&ctx->gb) + 7) >> 3]; curr_cell.mv_ptr = &ctx->mc_vectors[*(ctx->next_cell_data++) << 1]; curr_cell.tree = 1; /* enter the VQ tree */ UPDATE_BITPOS(8); } else { /* VQ tree DATA code */ if (!ctx->need_resync) ctx->next_cell_data = &ctx->gb.buffer[(get_bits_count(&ctx->gb) + 7) >> 3]; CHECK_CELL bytes_used = decode_cell(ctx, avctx, plane, &curr_cell, ctx->next_cell_data, ctx->last_byte); if (bytes_used < 0) return AVERROR_INVALIDDATA; UPDATE_BITPOS(bytes_used << 3); ctx->next_cell_data += bytes_used; return 0; } break; } }//while return AVERROR_INVALIDDATA; }
true
FFmpeg
134aaa79f7f1ce1df64afc7d10d2b3de77df7b08
24,658
static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_ptr) { WtvContext *wtv = s->priv_data; ByteIOContext *pb = wtv->pb; while (!url_feof(pb)) { ff_asf_guid g; int len, sid, consumed; ff_get_guid(pb, &g); len = get_le32(pb); if (len < 32) break; sid = get_le32(pb) & 0x7FFF; url_fskip(pb, 8); consumed = 32; if (!ff_guidcmp(g, stream_guid)) { if (ff_find_stream_index(s, sid) < 0) { ff_asf_guid mediatype, subtype, formattype; int size; consumed += 20; url_fskip(pb, 16); if (get_le32(pb)) { url_fskip(pb, 8); ff_get_guid(pb, &mediatype); ff_get_guid(pb, &subtype); url_fskip(pb, 12); ff_get_guid(pb, &formattype); size = get_le32(pb); parse_media_type(s, 0, sid, mediatype, subtype, formattype, size); consumed += 72 + size; } } } else if (!ff_guidcmp(g, stream2_guid)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0 && !((WtvStream*)s->streams[stream_index]->priv_data)->seen_data) { ff_asf_guid mediatype, subtype, formattype; int size; url_fskip(pb, 12); ff_get_guid(pb, &mediatype); ff_get_guid(pb, &subtype); url_fskip(pb, 12); ff_get_guid(pb, &formattype); size = get_le32(pb); parse_media_type(s, s->streams[stream_index], sid, mediatype, subtype, formattype, size); consumed += 76 + size; } } else if (!ff_guidcmp(g, EVENTID_AudioDescriptorSpanningEvent) || !ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) || !ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent) || !ff_guidcmp(g, EVENTID_StreamIDSpanningEvent) || !ff_guidcmp(g, EVENTID_SubtitleSpanningEvent) || !ff_guidcmp(g, EVENTID_TeletextSpanningEvent)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { AVStream *st = s->streams[stream_index]; uint8_t buf[258]; const uint8_t *pbuf = buf; int buf_size; url_fskip(pb, 8); consumed += 8; if (!ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) || !ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent)) { url_fskip(pb, 6); consumed += 6; } buf_size = FFMIN(len - consumed, sizeof(buf)); get_buffer(pb, buf, buf_size); consumed += buf_size; ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, 0, 0, 0, 0); } } else if (!ff_guidcmp(g, EVENTID_DVBScramblingControlSpanningEvent)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { url_fskip(pb, 12); if (get_le32(pb)) av_log(s, AV_LOG_WARNING, "DVB scrambled stream detected (st:%d), decoding will likely fail\n", stream_index); consumed += 16; } } else if (!ff_guidcmp(g, EVENTID_LanguageSpanningEvent)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { AVStream *st = s->streams[stream_index]; uint8_t language[4]; url_fskip(pb, 12); get_buffer(pb, language, 3); if (language[0]) { language[3] = 0; av_metadata_set2(&st->metadata, "language", language, 0); } consumed += 15; } } else if (!ff_guidcmp(g, timestamp_guid)) { int stream_index = ff_find_stream_index(s, sid); if (stream_index >= 0) { url_fskip(pb, 8); wtv->pts = get_le64(pb); consumed += 16; if (wtv->pts == -1) wtv->pts = AV_NOPTS_VALUE; else { wtv->last_valid_pts = wtv->pts; if (wtv->epoch == AV_NOPTS_VALUE || wtv->pts < wtv->epoch) wtv->epoch = wtv->pts; if (mode == SEEK_TO_PTS && wtv->pts >= seekts) { #define WTV_PAD8(x) (((x) + 7) & ~7) url_fskip(pb, WTV_PAD8(len) - consumed); return 0; } } } } else if (!ff_guidcmp(g, data_guid)) { int stream_index = ff_find_stream_index(s, sid); if (mode == SEEK_TO_DATA && stream_index >= 0) { WtvStream *wst = s->streams[stream_index]->priv_data; wst->seen_data = 1; if (len_ptr) { *len_ptr = len; } return stream_index; } } else if ( !ff_guidcmp(g, /* DSATTRIB_CAPTURE_STREAMTIME */ (const ff_asf_guid){0x14,0x56,0x1A,0x0C,0xCD,0x30,0x40,0x4F,0xBC,0xBF,0xD0,0x3E,0x52,0x30,0x62,0x07}) || !ff_guidcmp(g, /* DSATTRIB_PicSampleSeq */ (const ff_asf_guid){0x02,0xAE,0x5B,0x2F,0x8F,0x7B,0x60,0x4F,0x82,0xD6,0xE4,0xEA,0x2F,0x1F,0x4C,0x99}) || !ff_guidcmp(g, /* DSATTRIB_TRANSPORT_PROPERTIES */ (const ff_asf_guid){0x12,0xF6,0x22,0xB6,0xAD,0x47,0x71,0x46,0xAD,0x6C,0x05,0xA9,0x8E,0x65,0xDE,0x3A}) || !ff_guidcmp(g, /* dvr_ms_vid_frame_rep_data */ (const ff_asf_guid){0xCC,0x32,0x64,0xDD,0x29,0xE2,0xDB,0x40,0x80,0xF6,0xD2,0x63,0x28,0xD2,0x76,0x1F}) || !ff_guidcmp(g, /* EVENTID_AudioTypeSpanningEvent */ (const ff_asf_guid){0xBE,0xBF,0x1C,0x50,0x49,0xB8,0xCE,0x42,0x9B,0xE9,0x3D,0xB8,0x69,0xFB,0x82,0xB3}) || !ff_guidcmp(g, /* EVENTID_ChannelChangeSpanningEvent */ (const ff_asf_guid){0xE5,0xC5,0x67,0x90,0x5C,0x4C,0x05,0x42,0x86,0xC8,0x7A,0xFE,0x20,0xFE,0x1E,0xFA}) || !ff_guidcmp(g, /* EVENTID_ChannelInfoSpanningEvent */ (const ff_asf_guid){0x80,0x6D,0xF3,0x41,0x32,0x41,0xC2,0x4C,0xB1,0x21,0x01,0xA4,0x32,0x19,0xD8,0x1B}) || !ff_guidcmp(g, /* EVENTID_ChannelTypeSpanningEvent */ (const ff_asf_guid){0x51,0x1D,0xAB,0x72,0xD2,0x87,0x9B,0x48,0xBA,0x11,0x0E,0x08,0xDC,0x21,0x02,0x43}) || !ff_guidcmp(g, /* EVENTID_PIDListSpanningEvent */ (const ff_asf_guid){0x65,0x8F,0xFC,0x47,0xBB,0xE2,0x34,0x46,0x9C,0xEF,0xFD,0xBF,0xE6,0x26,0x1D,0x5C}) || !ff_guidcmp(g, /* EVENTID_SignalAndServiceStatusSpanningEvent */ (const ff_asf_guid){0xCB,0xC5,0x68,0x80,0x04,0x3C,0x2B,0x49,0xB4,0x7D,0x03,0x08,0x82,0x0D,0xCE,0x51}) || !ff_guidcmp(g, /* EVENTID_StreamTypeSpanningEvent */ (const ff_asf_guid){0xBC,0x2E,0xAF,0x82,0xA6,0x30,0x64,0x42,0xA8,0x0B,0xAD,0x2E,0x13,0x72,0xAC,0x60}) || !ff_guidcmp(g, (const ff_asf_guid){0x1E,0xBE,0xC3,0xC5,0x43,0x92,0xDC,0x11,0x85,0xE5,0x00,0x12,0x3F,0x6F,0x73,0xB9}) || !ff_guidcmp(g, (const ff_asf_guid){0x3B,0x86,0xA2,0xB1,0xEB,0x1E,0xC3,0x44,0x8C,0x88,0x1C,0xA3,0xFF,0xE3,0xE7,0x6A}) || !ff_guidcmp(g, (const ff_asf_guid){0x4E,0x7F,0x4C,0x5B,0xC4,0xD0,0x38,0x4B,0xA8,0x3E,0x21,0x7F,0x7B,0xBF,0x52,0xE7}) || !ff_guidcmp(g, (const ff_asf_guid){0x63,0x36,0xEB,0xFE,0xA1,0x7E,0xD9,0x11,0x83,0x08,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) || !ff_guidcmp(g, (const ff_asf_guid){0x70,0xE9,0xF1,0xF8,0x89,0xA4,0x4C,0x4D,0x83,0x73,0xB8,0x12,0xE0,0xD5,0xF8,0x1E}) || !ff_guidcmp(g, (const ff_asf_guid){0x96,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) || !ff_guidcmp(g, (const ff_asf_guid){0x97,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) || !ff_guidcmp(g, (const ff_asf_guid){0xA1,0xC3,0xD2,0xC2,0x7E,0x9A,0xDA,0x11,0x8B,0xF7,0x00,0x07,0xE9,0x5E,0xAD,0x8D})) { //ignore known guids } else av_log(s, AV_LOG_WARNING, "unsupported chunk:"PRI_GUID"\n", ARG_GUID(g)); url_fskip(pb, WTV_PAD8(len) - consumed); } return AVERROR_EOF; }
false
FFmpeg
e4f85b849913794395bb03dfc09546cd41b10882
24,660
static void ac3_decode_transform_coeffs_ch(AC3DecodeContext *s, int ch_index, mant_groups *m) { int start_freq = s->start_freq[ch_index]; int end_freq = s->end_freq[ch_index]; uint8_t *baps = s->bap[ch_index]; int8_t *exps = s->dexps[ch_index]; int32_t *coeffs = s->fixed_coeffs[ch_index]; int dither = (ch_index == CPL_CH) || s->dither_flag[ch_index]; GetBitContext *gbc = &s->gbc; int freq; for (freq = start_freq; freq < end_freq; freq++) { int bap = baps[freq]; int mantissa; switch (bap) { case 0: /* random noise with approximate range of -0.707 to 0.707 */ if (dither) mantissa = (((av_lfg_get(&s->dith_state)>>8)*181)>>8) - 5931008; else mantissa = 0; break; case 1: if (m->b1) { m->b1--; mantissa = m->b1_mant[m->b1]; } else { int bits = get_bits(gbc, 5); mantissa = b1_mantissas[bits][0]; m->b1_mant[1] = b1_mantissas[bits][1]; m->b1_mant[0] = b1_mantissas[bits][2]; m->b1 = 2; break; case 2: if (m->b2) { m->b2--; mantissa = m->b2_mant[m->b2]; } else { int bits = get_bits(gbc, 7); mantissa = b2_mantissas[bits][0]; m->b2_mant[1] = b2_mantissas[bits][1]; m->b2_mant[0] = b2_mantissas[bits][2]; m->b2 = 2; break; case 3: mantissa = b3_mantissas[get_bits(gbc, 3)]; break; case 4: if (m->b4) { m->b4 = 0; mantissa = m->b4_mant; } else { int bits = get_bits(gbc, 7); mantissa = b4_mantissas[bits][0]; m->b4_mant = b4_mantissas[bits][1]; m->b4 = 1; break; case 5: mantissa = b5_mantissas[get_bits(gbc, 4)]; break; default: /* 6 to 15 */ /* Shift mantissa and sign-extend it. */ mantissa = get_sbits(gbc, quantization_tab[bap]); mantissa <<= 24 - quantization_tab[bap]; break; coeffs[freq] = mantissa >> exps[freq];
true
FFmpeg
4782c4284fa3856a9b6910fe5ff6e4fb1c65b58c
24,661
int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, bool exact_size) { BDRVQcowState *s = bs->opaque; int new_l1_size2, ret, i; uint64_t *new_l1_table; int64_t old_l1_table_offset, old_l1_size; int64_t new_l1_table_offset, new_l1_size; uint8_t data[12]; if (min_size <= s->l1_size) return 0; /* Do a sanity check on min_size before trying to calculate new_l1_size * (this prevents overflows during the while loop for the calculation of * new_l1_size) */ if (min_size > INT_MAX / sizeof(uint64_t)) { return -EFBIG; } if (exact_size) { new_l1_size = min_size; } else { /* Bump size up to reduce the number of times we have to grow */ new_l1_size = s->l1_size; if (new_l1_size == 0) { new_l1_size = 1; } while (min_size > new_l1_size) { new_l1_size = (new_l1_size * 3 + 1) / 2; } } if (new_l1_size > INT_MAX / sizeof(uint64_t)) { return -EFBIG; } #ifdef DEBUG_ALLOC2 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", s->l1_size, new_l1_size); #endif new_l1_size2 = sizeof(uint64_t) * new_l1_size; new_l1_table = g_malloc0(align_offset(new_l1_size2, 512)); memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); /* write new table (align to cluster) */ BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); if (new_l1_table_offset < 0) { g_free(new_l1_table); return new_l1_table_offset; } ret = qcow2_cache_flush(bs, s->refcount_block_cache); if (ret < 0) { goto fail; } /* the L1 position has not yet been updated, so these clusters must * indeed be completely free */ ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, new_l1_size2); if (ret < 0) { goto fail; } BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); for(i = 0; i < s->l1_size; i++) new_l1_table[i] = cpu_to_be64(new_l1_table[i]); ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); if (ret < 0) goto fail; for(i = 0; i < s->l1_size; i++) new_l1_table[i] = be64_to_cpu(new_l1_table[i]); /* set new table */ BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); cpu_to_be32w((uint32_t*)data, new_l1_size); stq_be_p(data + 4, new_l1_table_offset); ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); if (ret < 0) { goto fail; } g_free(s->l1_table); old_l1_table_offset = s->l1_table_offset; s->l1_table_offset = new_l1_table_offset; s->l1_table = new_l1_table; old_l1_size = s->l1_size; s->l1_size = new_l1_size; qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), QCOW2_DISCARD_OTHER); return 0; fail: g_free(new_l1_table); qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, QCOW2_DISCARD_OTHER); return ret; }
true
qemu
de82815db1c89da058b7fb941dab137d6d9ab738
24,662
static void r2d_init(ram_addr_t ram_size, const char *boot_device, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; struct SH7750State *s; ram_addr_t sdram_addr; qemu_irq *irq; PCIBus *pci; DriveInfo *dinfo; int i; if (!cpu_model) cpu_model = "SH7751R"; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } /* Allocate memory space */ sdram_addr = qemu_ram_alloc(SDRAM_SIZE); cpu_register_physical_memory(SDRAM_BASE, SDRAM_SIZE, sdram_addr); /* Register peripherals */ s = sh7750_init(env); irq = r2d_fpga_init(0x04000000, sh7750_irl(s)); pci = sh_pci_register_bus(r2d_pci_set_irq, r2d_pci_map_irq, irq, 0, 4); sm501_init(0x10000000, SM501_VRAM_SIZE, irq[SM501], serial_hds[2]); /* onboard CF (True IDE mode, Master only). */ if ((dinfo = drive_get(IF_IDE, 0, 0)) != NULL) mmio_ide_init(0x14001000, 0x1400080c, irq[CF_IDE], 1, dinfo, NULL); /* NIC: rtl8139 on-board, and 2 slots. */ for (i = 0; i < nb_nics; i++) pci_nic_init(&nd_table[i], "rtl8139", i==0 ? "2" : NULL); /* Todo: register on board registers */ if (kernel_filename) { int kernel_size; /* initialization which should be done by firmware */ stl_phys(SH7750_BCR1, 1<<3); /* cs3 SDRAM */ stw_phys(SH7750_BCR2, 3<<(3*2)); /* cs3 32bit */ if (kernel_cmdline) { kernel_size = load_image_targphys(kernel_filename, SDRAM_BASE + LINUX_LOAD_OFFSET, SDRAM_SIZE - LINUX_LOAD_OFFSET); env->pc = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; pstrcpy_targphys(SDRAM_BASE + 0x10100, 256, kernel_cmdline); } else { kernel_size = load_image_targphys(kernel_filename, SDRAM_BASE, SDRAM_SIZE); env->pc = SDRAM_BASE | 0xa0000000; /* Start from P2 area */ } if (kernel_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } } }
true
qemu
07caea315a85ebfe90851f9c2e4ef3fdd24117b5
24,663
void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) { #if !defined(FLUSH_ALL_TLBS) PowerPCCPU *cpu = ppc_env_get_cpu(env); CPUState *cs; addr &= TARGET_PAGE_MASK; switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: ppc6xx_tlb_invalidate_virt(env, addr, 0); if (env->id_tlbs == 1) { ppc6xx_tlb_invalidate_virt(env, addr, 1); } break; case POWERPC_MMU_32B: case POWERPC_MMU_601: /* tlbie invalidate TLBs for all segments */ addr &= ~((target_ulong)-1ULL << 28); cs = CPU(cpu); /* XXX: this case should be optimized, * giving a mask to tlb_flush_page */ /* This is broken, some CPUs invalidate a whole congruence * class on an even smaller subset of bits and some OSes take * advantage of this. Just blow the whole thing away. */ #if 0 tlb_flush_page(cs, addr | (0x0 << 28)); tlb_flush_page(cs, addr | (0x1 << 28)); tlb_flush_page(cs, addr | (0x2 << 28)); tlb_flush_page(cs, addr | (0x3 << 28)); tlb_flush_page(cs, addr | (0x4 << 28)); tlb_flush_page(cs, addr | (0x5 << 28)); tlb_flush_page(cs, addr | (0x6 << 28)); tlb_flush_page(cs, addr | (0x7 << 28)); tlb_flush_page(cs, addr | (0x8 << 28)); tlb_flush_page(cs, addr | (0x9 << 28)); tlb_flush_page(cs, addr | (0xA << 28)); tlb_flush_page(cs, addr | (0xB << 28)); tlb_flush_page(cs, addr | (0xC << 28)); tlb_flush_page(cs, addr | (0xD << 28)); tlb_flush_page(cs, addr | (0xE << 28)); tlb_flush_page(cs, addr | (0xF << 28)); break; #if defined(TARGET_PPC64) case POWERPC_MMU_64B: case POWERPC_MMU_2_03: case POWERPC_MMU_2_06: case POWERPC_MMU_2_06a: case POWERPC_MMU_2_07: case POWERPC_MMU_2_07a: /* tlbie invalidate TLBs for all segments */ /* XXX: given the fact that there are too many segments to invalidate, * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, * we just invalidate all TLBs */ env->tlb_need_flush = 1; break; #endif /* defined(TARGET_PPC64) */ default: /* Should never reach here with other MMU models */ assert(0); } ppc_tlb_invalidate_all(env); }
true
qemu
3dcfb74fd4e4ab31508c80e6965a0cd477510234
24,664
static void draw_bar(TestSourceContext *test, const uint8_t color[4], unsigned x, unsigned y, unsigned w, unsigned h, AVFrame *frame) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); uint8_t *p, *p0; int plane; x = FFMIN(x, test->w - 1); y = FFMIN(y, test->h - 1); w = FFMIN(w, test->w - x); h = FFMIN(h, test->h - y); av_assert0(x + w <= test->w); av_assert0(y + h <= test->h); for (plane = 0; frame->data[plane]; plane++) { const int c = color[plane]; const int linesize = frame->linesize[plane]; int i, px, py, pw, ph; if (plane == 1 || plane == 2) { px = x >> desc->log2_chroma_w; pw = w >> desc->log2_chroma_w; py = y >> desc->log2_chroma_h; ph = h >> desc->log2_chroma_h; } else { px = x; pw = w; py = y; ph = h; } p0 = p = frame->data[plane] + py * linesize + px; memset(p, c, pw); p += linesize; for (i = 1; i < ph; i++, p += linesize) memcpy(p, p0, pw); } }
true
FFmpeg
7a7ca3cc2f43e7a7b61fdad8200b365ff0977bd2
24,665
static int qpa_init_in (HWVoiceIn *hw, struct audsettings *as) { int error; static pa_sample_spec ss; struct audsettings obt_as = *as; PAVoiceIn *pa = (PAVoiceIn *) hw; ss.format = audfmt_to_pa (as->fmt, as->endianness); ss.channels = as->nchannels; ss.rate = as->freq; obt_as.fmt = pa_to_audfmt (ss.format, &obt_as.endianness); pa->s = pa_simple_new ( conf.server, "qemu", PA_STREAM_RECORD, conf.source, "pcm.capture", &ss, NULL, /* channel map */ NULL, /* buffering attributes */ &error ); if (!pa->s) { qpa_logerr (error, "pa_simple_new for capture failed\n"); goto fail1; } audio_pcm_init_info (&hw->info, &obt_as); hw->samples = conf.samples; pa->pcm_buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift); pa->wpos = hw->wpos; if (!pa->pcm_buf) { dolog ("Could not allocate buffer (%d bytes)\n", hw->samples << hw->info.shift); goto fail2; } if (audio_pt_init (&pa->pt, qpa_thread_in, hw, AUDIO_CAP, AUDIO_FUNC)) { goto fail3; } return 0; fail3: g_free (pa->pcm_buf); pa->pcm_buf = NULL; fail2: pa_simple_free (pa->s); pa->s = NULL; fail1: return -1; }
true
qemu
ea9ebc2ce69198f7aca4b43652824c5d621ac978
24,666
static inline int64_t gb_get_v(GetBitContext *gb) { int64_t v = 0; int bits = 0; while(get_bits1(gb) && bits < 64-7){ v <<= 7; v |= get_bits(gb, 7); bits += 7; } v <<= 7; v |= get_bits(gb, 7); return v; }
true
FFmpeg
05e161952954acf247e0fd1fdef00559675c4d4d
24,667
int qed_check(BDRVQEDState *s, BdrvCheckResult *result, bool fix) { QEDCheck check = { .s = s, .result = result, .nclusters = qed_bytes_to_clusters(s, s->file_size), .request = { .l2_table = NULL }, .fix = fix, }; int ret; check.used_clusters = g_try_malloc0(((check.nclusters + 31) / 32) * sizeof(check.used_clusters[0])); if (check.nclusters && check.used_clusters == NULL) { return -ENOMEM; } check.result->bfi.total_clusters = (s->header.image_size + s->header.cluster_size - 1) / s->header.cluster_size; ret = qed_check_l1_table(&check, s->l1_table); if (ret == 0) { /* Only check for leaks if entire image was scanned successfully */ qed_check_for_leaks(&check); if (fix) { qed_check_mark_clean(s, result); } } g_free(check.used_clusters); return ret; }
true
qemu
02c4f26b1517d9e403ec10d6f6ca3c0276d19e43
24,669
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run) { int i, j = 0; int token; int zero_run = 0; DCTELEM coeff = 0; int bits_to_get; int blocks_ended; int coeff_i = 0; int num_coeffs = s->num_coded_frags[plane][coeff_index]; int16_t *dct_tokens = s->dct_tokens[plane][coeff_index]; /* local references to structure members to avoid repeated deferences */ int *coded_fragment_list = s->coded_fragment_list[plane]; Vp3Fragment *all_fragments = s->all_fragments; VLC_TYPE (*vlc_table)[2] = table->table; if (num_coeffs < 0) av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index); if (eob_run > num_coeffs) { coeff_i = blocks_ended = num_coeffs; eob_run -= num_coeffs; } else { coeff_i = blocks_ended = eob_run; eob_run = 0; } // insert fake EOB token to cover the split between planes or zzi if (blocks_ended) dct_tokens[j++] = blocks_ended << 2; while (coeff_i < num_coeffs && get_bits_left(gb) > 0) { /* decode a VLC into a token */ token = get_vlc2(gb, vlc_table, 11, 3); /* use the token to get a zero run, a coefficient, and an eob run */ if (token <= 6) { eob_run = eob_run_base[token]; if (eob_run_get_bits[token]) eob_run += get_bits(gb, eob_run_get_bits[token]); // record only the number of blocks ended in this plane, // any spill will be recorded in the next plane. if (eob_run > num_coeffs - coeff_i) { dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i); blocks_ended += num_coeffs - coeff_i; eob_run -= num_coeffs - coeff_i; coeff_i = num_coeffs; } else { dct_tokens[j++] = TOKEN_EOB(eob_run); blocks_ended += eob_run; coeff_i += eob_run; eob_run = 0; } } else { bits_to_get = coeff_get_bits[token]; if (bits_to_get) bits_to_get = get_bits(gb, bits_to_get); coeff = coeff_tables[token][bits_to_get]; zero_run = zero_run_base[token]; if (zero_run_get_bits[token]) zero_run += get_bits(gb, zero_run_get_bits[token]); if (zero_run) { dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run); } else { // Save DC into the fragment structure. DC prediction is // done in raster order, so the actual DC can't be in with // other tokens. We still need the token in dct_tokens[] // however, or else the structure collapses on itself. if (!coeff_index) all_fragments[coded_fragment_list[coeff_i]].dc = coeff; dct_tokens[j++] = TOKEN_COEFF(coeff); } if (coeff_index + zero_run > 64) { av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with" " %d coeffs left\n", zero_run, 64-coeff_index); zero_run = 64 - coeff_index; } // zero runs code multiple coefficients, // so don't try to decode coeffs for those higher levels for (i = coeff_index+1; i <= coeff_index+zero_run; i++) s->num_coded_frags[plane][i]--; coeff_i++; } } if (blocks_ended > s->num_coded_frags[plane][coeff_index]) av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n"); // decrement the number of blocks that have higher coeffecients for each // EOB run at this level if (blocks_ended) for (i = coeff_index+1; i < 64; i++) s->num_coded_frags[plane][i] -= blocks_ended; // setup the next buffer if (plane < 2) s->dct_tokens[plane+1][coeff_index] = dct_tokens + j; else if (coeff_index < 63) s->dct_tokens[0][coeff_index+1] = dct_tokens + j; return eob_run; }
true
FFmpeg
8370e426e42f2e4b9d14a1fb8107ecfe5163ce7f
24,670
int ff_copy_whitelists(AVFormatContext *dst, AVFormatContext *src) { av_assert0(!dst->codec_whitelist && !dst->format_whitelist); dst-> codec_whitelist = av_strdup(src->codec_whitelist); dst->format_whitelist = av_strdup(src->format_whitelist); if ( (src-> codec_whitelist && !dst-> codec_whitelist) || (src->format_whitelist && !dst->format_whitelist)) { av_log(dst, AV_LOG_ERROR, "Failed to duplicate whitelist\n"); return AVERROR(ENOMEM); } return 0; }
false
FFmpeg
1dba8371d93cf1c83bcd5c432d921905206a60f3
24,671
static void init_dequant4_coeff_table(H264Context *h){ int i,j,q,x; const int transpose = (h->h264dsp.h264_idct_add != ff_h264_idct_add_c); //FIXME ugly for(i=0; i<6; i++ ){ h->dequant4_coeff[i] = h->dequant4_buffer[i]; for(j=0; j<i; j++){ if(!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i], 16*sizeof(uint8_t))){ h->dequant4_coeff[i] = h->dequant4_buffer[j]; break; } } if(j<i) continue; for(q=0; q<52; q++){ int shift = div6[q] + 2; int idx = rem6[q]; for(x=0; x<16; x++) h->dequant4_coeff[i][q][transpose ? (x>>2)|((x<<2)&0xF) : x] = ((uint32_t)dequant4_coeff_init[idx][(x&1) + ((x>>2)&1)] * h->pps.scaling_matrix4[i][x]) << shift; } } }
false
FFmpeg
ca32f7f2083f9ededd1d9964ed065e0ad07a01e0
24,672
static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt) { MpegMuxContext *s = ctx->priv_data; int stream_index= pkt->stream_index; int size= pkt->size; uint8_t *buf= pkt->data; AVStream *st = ctx->streams[stream_index]; StreamInfo *stream = st->priv_data; int64_t pts, dts, new_start_pts, new_start_dts; int len, avail_size; //XXX/FIXME this is and always was broken // compute_pts_dts(st, &pts, &dts, pkt->pts); pts= pkt->pts; dts= pkt->dts; if(s->is_svcd) { /* offset pts and dts slightly into the future to be able to do the compatibility fix below.*/ pts = (pts + 2) & ((1LL << 33) - 1); dts = (dts + 2) & ((1LL << 33) - 1); if (stream->packet_number == 0 && dts == pts) /* For the very first packet we want to force the DTS to be included. This increases compatibility with lots of DVD players. Since the MPEG-2 standard mandates that DTS is only written when it is different from PTS we have to move it slightly into the past.*/ dts = (dts - 2) & ((1LL << 33) - 1); } if(s->is_vcd) { /* We have to offset the PTS, so that it is consistent with the SCR. SCR starts at 36000, but the first two packs contain only padding and the first pack from the other stream, respectively, may also have been written before. So the real data starts at SCR 36000+3*1200. */ pts = (pts + 36000 + 3600) & ((1LL << 33) - 1); dts = (dts + 36000 + 3600) & ((1LL << 33) - 1); } #if 0 update_scr(ctx,stream_index,pts); printf("%d: pts=%0.3f dts=%0.3f scr=%0.3f\n", stream_index, pts / 90000.0, dts / 90000.0, s->last_scr / 90000.0); #endif /* we assume here that pts != AV_NOPTS_VALUE */ new_start_pts = stream->start_pts; new_start_dts = stream->start_dts; if (stream->start_pts == AV_NOPTS_VALUE) { new_start_pts = pts; new_start_dts = dts; } avail_size = get_packet_payload_size(ctx, stream_index, new_start_pts, new_start_dts); if (stream->buffer_ptr >= avail_size) { update_scr(ctx,stream_index,stream->start_pts); /* unlikely case: outputing the pts or dts increase the packet size so that we cannot write the start of the next packet. In this case, we must flush the current packet with padding. Note: this always happens for the first audio and video packet in a VCD file, since they do not carry any data.*/ flush_packet(ctx, stream_index, stream->start_pts, stream->start_dts, s->last_scr); stream->buffer_ptr = 0; } stream->start_pts = new_start_pts; stream->start_dts = new_start_dts; stream->nb_frames++; if (stream->frame_start_offset == 0) stream->frame_start_offset = stream->buffer_ptr; while (size > 0) { avail_size = get_packet_payload_size(ctx, stream_index, stream->start_pts, stream->start_dts); len = avail_size - stream->buffer_ptr; if (len > size) len = size; memcpy(stream->buffer + stream->buffer_ptr, buf, len); stream->buffer_ptr += len; buf += len; size -= len; if (stream->buffer_ptr >= avail_size) { update_scr(ctx,stream_index,stream->start_pts); /* if packet full, we send it now */ flush_packet(ctx, stream_index, stream->start_pts, stream->start_dts, s->last_scr); stream->buffer_ptr = 0; if (s->is_vcd) { /* Write one or more padding sectors, if necessary, to reach the constant overall bitrate.*/ int vcd_pad_bytes; while((vcd_pad_bytes = get_vcd_padding_size(ctx,stream->start_pts) ) >= s->packet_size) put_vcd_padding_sector(ctx); } /* Make sure only the FIRST pes packet for this frame has a timestamp */ stream->start_pts = AV_NOPTS_VALUE; stream->start_dts = AV_NOPTS_VALUE; } } return 0; }
false
FFmpeg
3c895fc098f7637f6d5ec3a9d6766e724a8b9e41
24,674
SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, float lumaSharpen, float chromaSharpen, float chromaHShift, float chromaVShift, int verbose) { SwsFilter *filter = av_malloc(sizeof(SwsFilter)); if (!filter) return NULL; if (lumaGBlur != 0.0) { filter->lumH = sws_getGaussianVec(lumaGBlur, 3.0); filter->lumV = sws_getGaussianVec(lumaGBlur, 3.0); } else { filter->lumH = sws_getIdentityVec(); filter->lumV = sws_getIdentityVec(); } if (chromaGBlur != 0.0) { filter->chrH = sws_getGaussianVec(chromaGBlur, 3.0); filter->chrV = sws_getGaussianVec(chromaGBlur, 3.0); } else { filter->chrH = sws_getIdentityVec(); filter->chrV = sws_getIdentityVec(); } if (!filter->lumH || !filter->lumV || !filter->chrH || !filter->chrV) { sws_freeVec(filter->lumH); sws_freeVec(filter->lumV); sws_freeVec(filter->chrH); sws_freeVec(filter->chrV); av_freep(&filter); return NULL; } if (chromaSharpen != 0.0) { SwsVector *id = sws_getIdentityVec(); sws_scaleVec(filter->chrH, -chromaSharpen); sws_scaleVec(filter->chrV, -chromaSharpen); sws_addVec(filter->chrH, id); sws_addVec(filter->chrV, id); sws_freeVec(id); } if (lumaSharpen != 0.0) { SwsVector *id = sws_getIdentityVec(); sws_scaleVec(filter->lumH, -lumaSharpen); sws_scaleVec(filter->lumV, -lumaSharpen); sws_addVec(filter->lumH, id); sws_addVec(filter->lumV, id); sws_freeVec(id); } if (chromaHShift != 0.0) sws_shiftVec(filter->chrH, (int)(chromaHShift + 0.5)); if (chromaVShift != 0.0) sws_shiftVec(filter->chrV, (int)(chromaVShift + 0.5)); sws_normalizeVec(filter->chrH, 1.0); sws_normalizeVec(filter->chrV, 1.0); sws_normalizeVec(filter->lumH, 1.0); sws_normalizeVec(filter->lumV, 1.0); if (verbose) sws_printVec2(filter->chrH, NULL, AV_LOG_DEBUG); if (verbose) sws_printVec2(filter->lumH, NULL, AV_LOG_DEBUG); return filter; }
false
FFmpeg
7ebb3022297aa00afda6800105684b8303f2608e
24,675
static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MPADecodeContext *s = avctx->priv_data; uint32_t header; int out_size; OUT_INT *out_samples = data; if (buf_size < HEADER_SIZE) return AVERROR_INVALIDDATA; header = AV_RB32(buf); if (ff_mpa_check_header(header) < 0) { av_log(avctx, AV_LOG_ERROR, "Header missing\n"); return AVERROR_INVALIDDATA; } if (avpriv_mpegaudio_decode_header((MPADecodeHeader *)s, header) == 1) { /* free format: prepare to compute frame size */ s->frame_size = -1; return AVERROR_INVALIDDATA; } /* update codec info */ avctx->channels = s->nb_channels; avctx->channel_layout = s->nb_channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO; if (!avctx->bit_rate) avctx->bit_rate = s->bit_rate; avctx->sub_id = s->layer; if (*data_size < 1152 * avctx->channels * sizeof(OUT_INT)) return AVERROR(EINVAL); *data_size = 0; if (s->frame_size <= 0 || s->frame_size > buf_size) { av_log(avctx, AV_LOG_ERROR, "incomplete frame\n"); return AVERROR_INVALIDDATA; } else if (s->frame_size < buf_size) { av_log(avctx, AV_LOG_ERROR, "incorrect frame size\n"); buf_size= s->frame_size; } out_size = mp_decode_frame(s, out_samples, buf, buf_size); if (out_size >= 0) { *data_size = out_size; avctx->sample_rate = s->sample_rate; //FIXME maybe move the other codec info stuff from above here too } else { av_log(avctx, AV_LOG_ERROR, "Error while decoding MPEG audio frame.\n"); /* Only return an error if the bad frame makes up the whole packet. If there is more data in the packet, just consume the bad frame instead of returning an error, which would discard the whole packet. */ if (buf_size == avpkt->size) return out_size; } s->frame_size = 0; return buf_size; }
false
FFmpeg
e2e6c8799b3c4a61b8be36c84c5e5e15c49a31cd
24,676
int load_flt_binary(struct linux_binprm * bprm, struct target_pt_regs * regs, struct image_info * info) { struct lib_info libinfo[MAX_SHARED_LIBS]; abi_ulong p = bprm->p; abi_ulong stack_len; abi_ulong start_addr; abi_ulong sp; int res; int i, j; memset(libinfo, 0, sizeof(libinfo)); /* * We have to add the size of our arguments to our stack size * otherwise it's too easy for users to create stack overflows * by passing in a huge argument list. And yes, we have to be * pedantic and include space for the argv/envp array as it may have * a lot of entries. */ stack_len = 0; for (i = 0; i < bprm->argc; ++i) { /* the argv strings */ stack_len += strlen(bprm->argv[i]); } for (i = 0; i < bprm->envc; ++i) { /* the envp strings */ stack_len += strlen(bprm->envp[i]); } stack_len += (bprm->argc + 1) * 4; /* the argv array */ stack_len += (bprm->envc + 1) * 4; /* the envp array */ res = load_flat_file(bprm, libinfo, 0, &stack_len); if (res > (unsigned long)-4096) return res; /* Update data segment pointers for all libraries */ for (i=0; i<MAX_SHARED_LIBS; i++) { if (libinfo[i].loaded) { abi_ulong p; p = libinfo[i].start_data; for (j=0; j<MAX_SHARED_LIBS; j++) { p -= 4; /* FIXME - handle put_user() failures */ if (put_user_ual(libinfo[j].loaded ? libinfo[j].start_data : UNLOADED_LIB, p)) return -EFAULT; } } } p = ((libinfo[0].start_brk + stack_len + 3) & ~3) - 4; DBG_FLT("p=%x\n", (int)p); /* Copy argv/envp. */ p = copy_strings(p, bprm->envc, bprm->envp); p = copy_strings(p, bprm->argc, bprm->argv); /* Align stack. */ sp = p & ~(abi_ulong)(sizeof(abi_ulong) - 1); /* Enforce final stack alignment of 16 bytes. This is sufficient for all current targets, and excess alignment is harmless. */ stack_len = bprm->envc + bprm->argc + 2; stack_len += 3; /* argc, arvg, argp */ stack_len *= sizeof(abi_ulong); if ((sp + stack_len) & 15) sp -= 16 - ((sp + stack_len) & 15); sp = loader_build_argptr(bprm->envc, bprm->argc, sp, p, 1); /* Fake some return addresses to ensure the call chain will * initialise library in order for us. We are required to call * lib 1 first, then 2, ... and finally the main program (id 0). */ start_addr = libinfo[0].entry; #ifdef CONFIG_BINFMT_SHARED_FLAT #error here for (i = MAX_SHARED_LIBS-1; i>0; i--) { if (libinfo[i].loaded) { /* Push previos first to call address */ --sp; if (put_user_ual(start_addr, sp)) return -EFAULT; start_addr = libinfo[i].entry; } } #endif /* Stash our initial stack pointer into the mm structure */ info->start_code = libinfo[0].start_code; info->end_code = libinfo[0].start_code = libinfo[0].text_len; info->start_data = libinfo[0].start_data; info->end_data = libinfo[0].end_data; info->start_brk = libinfo[0].start_brk; info->start_stack = sp; info->stack_limit = libinfo[0].start_brk; info->entry = start_addr; info->code_offset = info->start_code; info->data_offset = info->start_data - libinfo[0].text_len; DBG_FLT("start_thread(entry=0x%x, start_stack=0x%x)\n", (int)info->entry, (int)info->start_stack); return 0; }
false
qemu
c3109ba1b109f84929abbfe0462d910d5aa8617c
24,678
static always_inline void gen_farith2 (void *helper, int rb, int rc) { if (unlikely(rc == 31)) return; if (rb != 31) tcg_gen_helper_1_1(helper, cpu_fir[rc], cpu_fir[rb]); else { TCGv tmp = tcg_const_i64(0); tcg_gen_helper_1_1(helper, cpu_fir[rc], tmp); tcg_temp_free(tmp); } }
false
qemu
a7812ae412311d7d47f8aa85656faadac9d64b56
24,679
static void qio_channel_socket_connect_worker(QIOTask *task, gpointer opaque) { QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task)); SocketAddress *addr = opaque; Error *err = NULL; qio_channel_socket_connect_sync(ioc, addr, &err); qio_task_set_error(task, err); }
false
qemu
dfd100f242370886bb6732f70f1f7cbd8eb9fedc
24,680
void pxa27x_timer_init(target_phys_addr_t base, qemu_irq *irqs, qemu_irq irq4) { pxa2xx_timer_info *s = pxa2xx_timer_init(base, irqs); int i; s->freq = PXA27X_FREQ; s->tm4 = (PXA2xxTimer4 *) qemu_mallocz(8 * sizeof(PXA2xxTimer4)); for (i = 0; i < 8; i ++) { s->tm4[i].tm.value = 0; s->tm4[i].tm.irq = irq4; s->tm4[i].tm.info = s; s->tm4[i].tm.num = i + 4; s->tm4[i].tm.level = 0; s->tm4[i].freq = 0; s->tm4[i].control = 0x0; s->tm4[i].tm.qtimer = qemu_new_timer(vm_clock, pxa2xx_timer_tick4, &s->tm4[i]); } }
false
qemu
e1f8c729fa890c67bb4532f22c22ace6fb0e1aaf
24,681
static void serial_init_core(SerialState *s) { if (!s->chr) { fprintf(stderr, "Can't create serial device, empty char device\n"); exit(1); } s->modem_status_poll = qemu_new_timer(vm_clock, (QEMUTimerCB *) serial_update_msl, s); s->fifo_timeout_timer = qemu_new_timer(vm_clock, (QEMUTimerCB *) fifo_timeout_int, s); s->transmit_timer = qemu_new_timer(vm_clock, (QEMUTimerCB *) serial_xmit, s); qemu_register_reset(serial_reset, s); serial_reset(s); qemu_chr_add_handlers(s->chr, serial_can_receive1, serial_receive1, serial_event, s); }
false
qemu
c169998802505c244b8bcad562633f29de7d74a4
24,683
int bdrv_create_file(const char* filename, QEMUOptionParameter *options) { BlockDriver *drv; drv = bdrv_find_protocol(filename); if (drv == NULL) { drv = bdrv_find_format("file"); } return bdrv_create(drv, filename, options); }
false
qemu
16905d717507d3daffa714c7f0fd5403873807b2
24,684
int qemu_acl_append(qemu_acl *acl, int deny, const char *match) { qemu_acl_entry *entry; entry = qemu_malloc(sizeof(*entry)); entry->match = qemu_strdup(match); entry->deny = deny; TAILQ_INSERT_TAIL(&acl->entries, entry, next); acl->nentries++; return acl->nentries; }
false
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
24,685
void virtio_scsi_common_realize(DeviceState *dev, Error **errp, HandleOutput ctrl, HandleOutput evt, HandleOutput cmd) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev); int i; virtio_init(vdev, "virtio-scsi", VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig)); if (s->conf.num_queues <= 0 || s->conf.num_queues > VIRTIO_PCI_QUEUE_MAX) { error_setg(errp, "Invalid number of queues (= %" PRId32 "), " "must be a positive integer less than %d.", s->conf.num_queues, VIRTIO_PCI_QUEUE_MAX); virtio_cleanup(vdev); return; } s->cmd_vqs = g_malloc0(s->conf.num_queues * sizeof(VirtQueue *)); s->sense_size = VIRTIO_SCSI_SENSE_SIZE; s->cdb_size = VIRTIO_SCSI_CDB_SIZE; s->ctrl_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, ctrl); s->event_vq = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, evt); for (i = 0; i < s->conf.num_queues; i++) { s->cmd_vqs[i] = virtio_add_queue(vdev, VIRTIO_SCSI_VQ_SIZE, cmd); } if (s->conf.iothread) { virtio_scsi_set_iothread(VIRTIO_SCSI(s), s->conf.iothread); } }
false
qemu
0ba1f53191221b541b938df86a39eeccfb87f996
24,686
static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, int phdr_index, hwaddr offset) { Elf32_Phdr phdr; int ret; int endian = s->dump_info.d_endian; memset(&phdr, 0, sizeof(Elf32_Phdr)); phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian); phdr.p_offset = cpu_convert_to_target32(offset, endian); phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian); if (offset == -1) { /* When the memory is not stored into vmcore, offset will be -1 */ phdr.p_filesz = 0; } else { phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian); } phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian); phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian); ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); if (ret < 0) { dump_error(s, "dump: failed to write program header table.\n"); return -1; } return 0; }
false
qemu
2cac260768b9d4253737417ea7501cf2950e257f
24,687
static void qemu_laio_enqueue_completed(struct qemu_laio_state *s, struct qemu_laiocb* laiocb) { if (laiocb->async_context_id == get_async_context_id()) { qemu_laio_process_completion(s, laiocb); } else { QLIST_INSERT_HEAD(&s->completed_reqs, laiocb, node); } }
false
qemu
384acbf46b70edf0d2c1648aa1a92a90bcf7057d
24,689
static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) { TranslationBlock *tb; tb = s->tb; if (use_goto_tb(s, n, dest)) { tcg_gen_goto_tb(n); gen_a64_set_pc_im(dest); tcg_gen_exit_tb((intptr_t)tb + n); s->is_jmp = DISAS_TB_JUMP; } else { gen_a64_set_pc_im(dest); if (s->singlestep_enabled) { gen_exception_internal(EXCP_DEBUG); } tcg_gen_exit_tb(0); s->is_jmp = DISAS_JUMP; } }
false
qemu
cc9c1ed14e876d724107fe72f74dcac71a003fbc
24,690
static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask, uint32_t *tlbncfg) { #if !defined(CONFIG_USER_ONLY) const char *mas_names[8] = { "MAS0", "MAS1", "MAS2", "MAS3", "MAS4", "MAS5", "MAS6", "MAS7", }; int mas_sprn[8] = { SPR_BOOKE_MAS0, SPR_BOOKE_MAS1, SPR_BOOKE_MAS2, SPR_BOOKE_MAS3, SPR_BOOKE_MAS4, SPR_BOOKE_MAS5, SPR_BOOKE_MAS6, SPR_BOOKE_MAS7, }; int i; /* TLB assist registers */ /* XXX : not implemented */ for (i = 0; i < 8; i++) { if (mas_mask & (1 << i)) { spr_register(env, mas_sprn[i], mas_names[i], SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } } if (env->nb_pids > 1) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID1, "PID1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } if (env->nb_pids > 2) { /* XXX : not implemented */ spr_register(env, SPR_BOOKE_PID2, "PID2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_booke_pid, 0x00000000); } /* XXX : not implemented */ spr_register(env, SPR_MMUCFG, "MMUCFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* TOFIX */ switch (env->nb_ways) { case 4: spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[3]); /* Fallthru */ case 3: spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[2]); /* Fallthru */ case 2: spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[1]); /* Fallthru */ case 1: spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, tlbncfg[0]); /* Fallthru */ case 0: default: break; } #endif gen_spr_usprgh(env); }
false
qemu
ba38ab8d429a326c2a9c30110df84f0cad441094
24,691
static void vnc_tight_start(VncState *vs) { buffer_reset(&vs->tight); // make the output buffer be the zlib buffer, so we can compress it later vs->tight_tmp = vs->output; vs->output = vs->tight; }
false
qemu
245f7b51c0ea04fb2224b1127430a096c91aee70
24,692
static void error_mem_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { abort(); }
false
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
24,693
build_tpm_tcpa(GArray *table_data, GArray *linker, GArray *tcpalog) { Acpi20Tcpa *tcpa = acpi_data_push(table_data, sizeof *tcpa); uint64_t log_area_start_address = acpi_data_len(tcpalog); tcpa->platform_class = cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT); tcpa->log_area_minimum_length = cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE); tcpa->log_area_start_address = cpu_to_le64(log_area_start_address); bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, 1, false /* high memory */); /* log area start address to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, ACPI_BUILD_TPMLOG_FILE, table_data, &tcpa->log_area_start_address, sizeof(tcpa->log_area_start_address)); build_header(linker, table_data, (void *)tcpa, "TCPA", sizeof(*tcpa), 2, NULL); acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE); }
false
qemu
37ad223c515da2fe9f1c679768cb5ccaa42e57e1
24,694
static void build_pci_bus_state_cleanup(AcpiBuildPciBusHotplugState *state) { build_free_array(state->device_table); build_free_array(state->notify_table); }
false
qemu
b23046abe78f48498a423b802d6d86ba0172d57f
24,696
uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) { uintptr_t ra = GETPC(); uint64_t destlen = get_length(env, r1 + 1); uint64_t dest = get_address(env, r1); uint64_t srclen = get_length(env, r3 + 1); uint64_t src = get_address(env, r3); uint8_t pad = a2 & 0xff; uint8_t v; uint32_t cc; if (destlen == srclen) { cc = 0; } else if (destlen < srclen) { cc = 1; } else { cc = 2; } if (srclen > destlen) { srclen = destlen; } for (; destlen && srclen; src++, dest++, destlen--, srclen--) { v = cpu_ldub_data_ra(env, src, ra); cpu_stb_data_ra(env, dest, v, ra); } for (; destlen; dest++, destlen--) { cpu_stb_data_ra(env, dest, pad, ra); } set_length(env, r1 + 1 , destlen); /* can't use srclen here, we trunc'ed it */ set_length(env, r3 + 1, env->regs[r3 + 1] - src - env->regs[r3]); set_address(env, r1, dest); set_address(env, r3, src); return cc; }
false
qemu
d33271213437ed1834b0a50540d79e877e1cd894
24,697
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) { target_long *stack = (void *)infop->start_stack; memset(regs, 0, sizeof(*regs)); regs->ARM_cpsr = 0x10; regs->ARM_pc = infop->entry; regs->ARM_sp = infop->start_stack; regs->ARM_r2 = tswapl(stack[2]); /* envp */ regs->ARM_r1 = tswapl(stack[1]); /* argv */ /* XXX: it seems that r0 is zeroed after ! */ // regs->ARM_r0 = tswapl(stack[0]); /* argc */ }
false
qemu
0240ded8bb1580147ed2ff1748df439a3b41e38f
24,698
bool qemu_aio_wait(void) { AioHandler *node; fd_set rdfds, wrfds; int max_fd = -1; int ret; bool busy; /* * If there are callbacks left that have been queued, we need to call then. * Do not call select in this case, because it is possible that the caller * does not need a complete flush (as is the case for qemu_aio_wait loops). */ if (qemu_bh_poll()) { return true; } walking_handlers++; FD_ZERO(&rdfds); FD_ZERO(&wrfds); /* fill fd sets */ busy = false; QLIST_FOREACH(node, &aio_handlers, node) { /* If there aren't pending AIO operations, don't invoke callbacks. * Otherwise, if there are no AIO requests, qemu_aio_wait() would * wait indefinitely. */ if (node->io_flush) { if (node->io_flush(node->opaque) == 0) { continue; } busy = true; } if (!node->deleted && node->io_read) { FD_SET(node->fd, &rdfds); max_fd = MAX(max_fd, node->fd + 1); } if (!node->deleted && node->io_write) { FD_SET(node->fd, &wrfds); max_fd = MAX(max_fd, node->fd + 1); } } walking_handlers--; /* No AIO operations? Get us out of here */ if (!busy) { return false; } /* wait until next event */ ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); /* if we have any readable fds, dispatch event */ if (ret > 0) { walking_handlers++; /* we have to walk very carefully in case * qemu_aio_set_fd_handler is called while we're walking */ node = QLIST_FIRST(&aio_handlers); while (node) { AioHandler *tmp; if (!node->deleted && FD_ISSET(node->fd, &rdfds) && node->io_read) { node->io_read(node->opaque); } if (!node->deleted && FD_ISSET(node->fd, &wrfds) && node->io_write) { node->io_write(node->opaque); } tmp = node; node = QLIST_NEXT(node, node); if (tmp->deleted) { QLIST_REMOVE(tmp, node); g_free(tmp); } } walking_handlers--; } return true; }
false
qemu
2db2bfc0ccac5fd68dbf0ceb70fbc372c5d8a8c7
24,700
static uint64_t lsi_io_read(void *opaque, target_phys_addr_t addr, unsigned size) { LSIState *s = opaque; return lsi_reg_readb(s, addr & 0xff); }
false
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
24,701
static av_cold int pcx_encode_close(AVCodecContext *avctx) { av_frame_free(&avctx->coded_frame); return 0; }
false
FFmpeg
d6604b29ef544793479d7fb4e05ef6622bb3e534
24,702
static void fill_double_array(AVLFG *lfg, double *a, int len) { int i; double bmg[2], stddev = 10.0, mean = 0.0; for (i = 0; i < len; i += 2) { av_bmg_get(lfg, bmg); a[i] = bmg[0] * stddev + mean; a[i + 1] = bmg[1] * stddev + mean; } }
false
FFmpeg
e53c9065ca08a9153ecc73a6a8940bcc6d667e58
24,703
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in) { InputStream *ist = NULL; enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx); int i; // TODO: support other filter types if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) { av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported " "currently.\n"); exit(1); } if (in->name) { AVFormatContext *s; AVStream *st = NULL; char *p; int file_idx = strtol(in->name, &p, 0); if (file_idx < 0 || file_idx >= nb_input_files) { av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n", file_idx, fg->graph_desc); exit(1); } s = input_files[file_idx]->ctx; for (i = 0; i < s->nb_streams; i++) { if (s->streams[i]->codecpar->codec_type != type) continue; if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) { st = s->streams[i]; break; } } if (!st) { av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s " "matches no streams.\n", p, fg->graph_desc); exit(1); } ist = input_streams[input_files[file_idx]->ist_index + st->index]; } else { /* find the first unused stream of corresponding type */ for (i = 0; i < nb_input_streams; i++) { ist = input_streams[i]; if (ist->dec_ctx->codec_type == type && ist->discard) break; } if (i == nb_input_streams) { av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for " "unlabeled input pad %d on filter %s\n", in->pad_idx, in->filter_ctx->name); exit(1); } } av_assert0(ist); ist->discard = 0; ist->decoding_needed = 1; ist->st->discard = AVDISCARD_NONE; GROW_ARRAY(fg->inputs, fg->nb_inputs); if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0])))) exit(1); fg->inputs[fg->nb_inputs - 1]->ist = ist; fg->inputs[fg->nb_inputs - 1]->graph = fg; fg->inputs[fg->nb_inputs - 1]->format = -1; fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*)); if (!fg->inputs[fg->nb_inputs - 1]) exit_program(1); GROW_ARRAY(ist->filters, ist->nb_filters); ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1]; }
false
FFmpeg
602abe77b02f9702c18c2787d208fcfc9d94b70f
24,704
static int ccid_handle_data(USBDevice *dev, USBPacket *p) { USBCCIDState *s = DO_UPCAST(USBCCIDState, dev, dev); int ret = 0; uint8_t *data = p->data; int len = p->len; switch (p->pid) { case USB_TOKEN_OUT: ret = ccid_handle_bulk_out(s, p); break; case USB_TOKEN_IN: switch (p->devep & 0xf) { case CCID_BULK_IN_EP: if (!len) { ret = USB_RET_NAK; } else { ret = ccid_bulk_in_copy_to_guest(s, data, len); } break; case CCID_INT_IN_EP: if (s->notify_slot_change) { /* page 56, RDR_to_PC_NotifySlotChange */ data[0] = CCID_MESSAGE_TYPE_RDR_to_PC_NotifySlotChange; data[1] = s->bmSlotICCState; ret = 2; s->notify_slot_change = false; s->bmSlotICCState &= ~SLOT_0_CHANGED_MASK; DPRINTF(s, D_INFO, "handle_data: int_in: notify_slot_change %X, " "requested len %d\n", s->bmSlotICCState, len); } break; default: DPRINTF(s, 1, "Bad endpoint\n"); break; } break; default: DPRINTF(s, 1, "Bad token\n"); ret = USB_RET_STALL; break; } return ret; }
true
qemu
4f4321c11ff6e98583846bfd6f0e81954924b003
24,705
static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) { #define HAS_OPTION_BITS(opt) do { \ if (!option_bits_enabled(dc, opt)) { \ qemu_log("Option is not enabled %s:%d\n", \ __FILE__, __LINE__); \ goto invalid_opcode; \ } \ } while (0) #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt)) #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__) #define RESERVED() do { \ qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \ dc->pc, b0, b1, b2, __FILE__, __LINE__); \ goto invalid_opcode; \ } while (0) #ifdef TARGET_WORDS_BIGENDIAN #define OP0 (((b0) & 0xf0) >> 4) #define OP1 (((b2) & 0xf0) >> 4) #define OP2 ((b2) & 0xf) #define RRR_R ((b1) & 0xf) #define RRR_S (((b1) & 0xf0) >> 4) #define RRR_T ((b0) & 0xf) #else #define OP0 (((b0) & 0xf)) #define OP1 (((b2) & 0xf)) #define OP2 (((b2) & 0xf0) >> 4) #define RRR_R (((b1) & 0xf0) >> 4) #define RRR_S (((b1) & 0xf)) #define RRR_T (((b0) & 0xf0) >> 4) #endif #define RRR_X ((RRR_R & 0x4) >> 2) #define RRR_Y ((RRR_T & 0x4) >> 2) #define RRR_W (RRR_R & 0x3) #define RRRN_R RRR_R #define RRRN_S RRR_S #define RRRN_T RRR_T #define RRI8_R RRR_R #define RRI8_S RRR_S #define RRI8_T RRR_T #define RRI8_IMM8 (b2) #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8) #ifdef TARGET_WORDS_BIGENDIAN #define RI16_IMM16 (((b1) << 8) | (b2)) #else #define RI16_IMM16 (((b2) << 8) | (b1)) #endif #ifdef TARGET_WORDS_BIGENDIAN #define CALL_N (((b0) & 0xc) >> 2) #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2)) #else #define CALL_N (((b0) & 0x30) >> 4) #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10)) #endif #define CALL_OFFSET_SE \ (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET) #define CALLX_N CALL_N #ifdef TARGET_WORDS_BIGENDIAN #define CALLX_M ((b0) & 0x3) #else #define CALLX_M (((b0) & 0xc0) >> 6) #endif #define CALLX_S RRR_S #define BRI12_M CALLX_M #define BRI12_S RRR_S #ifdef TARGET_WORDS_BIGENDIAN #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2)) #else #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4)) #endif #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12) #define BRI8_M BRI12_M #define BRI8_R RRI8_R #define BRI8_S RRI8_S #define BRI8_IMM8 RRI8_IMM8 #define BRI8_IMM8_SE RRI8_IMM8_SE #define RSR_SR (b1) uint8_t b0 = cpu_ldub_code(env, dc->pc); uint8_t b1 = cpu_ldub_code(env, dc->pc + 1); uint8_t b2 = 0; static const uint32_t B4CONST[] = { 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 }; static const uint32_t B4CONSTU[] = { 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 }; if (OP0 >= 8) { dc->next_pc = dc->pc + 2; HAS_OPTION(XTENSA_OPTION_CODE_DENSITY); } else { dc->next_pc = dc->pc + 3; b2 = cpu_ldub_code(env, dc->pc + 2); } switch (OP0) { case 0: /*QRST*/ switch (OP1) { case 0: /*RST0*/ switch (OP2) { case 0: /*ST0*/ if ((RRR_R & 0xc) == 0x8) { HAS_OPTION(XTENSA_OPTION_BOOLEAN); } switch (RRR_R) { case 0: /*SNM0*/ switch (CALLX_M) { case 0: /*ILL*/ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); break; case 1: /*reserved*/ RESERVED(); break; case 2: /*JR*/ switch (CALLX_N) { case 0: /*RET*/ case 2: /*JX*/ gen_window_check1(dc, CALLX_S); gen_jump(dc, cpu_R[CALLX_S]); break; case 1: /*RETWw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 3: /*reserved*/ RESERVED(); break; } break; case 3: /*CALLX*/ gen_window_check2(dc, CALLX_S, CALLX_N << 2); switch (CALLX_N) { case 0: /*CALLX0*/ { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); tcg_gen_movi_i32(cpu_R[0], dc->next_pc); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 1: /*CALLX4w*/ case 2: /*CALLX8w*/ case 3: /*CALLX12w*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); gen_callw(dc, CALLX_N, tmp); tcg_temp_free(tmp); } break; } break; } break; case 1: /*MOVSPw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_window_check2(dc, RRR_T, RRR_S); { TCGv_i32 pc = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_movsp(cpu_env, pc); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]); tcg_temp_free(pc); } break; case 2: /*SYNC*/ switch (RRR_T) { case 0: /*ISYNC*/ break; case 1: /*RSYNC*/ break; case 2: /*ESYNC*/ break; case 3: /*DSYNC*/ break; case 8: /*EXCW*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); break; case 12: /*MEMW*/ break; case 13: /*EXTW*/ break; case 15: /*NOP*/ break; default: /*reserved*/ RESERVED(); break; } break; case 3: /*RFEIx*/ switch (RRR_T) { case 0: /*RFETx*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); switch (RRR_S) { case 0: /*RFEx*/ gen_check_privilege(dc); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); break; case 1: /*RFUEx*/ RESERVED(); break; case 2: /*RFDEx*/ gen_check_privilege(dc); gen_jump(dc, cpu_SR[ dc->config->ndepc ? DEPC : EPC1]); break; case 4: /*RFWOw*/ case 5: /*RFWUw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 tmp = tcg_const_i32(1); tcg_gen_andi_i32( cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); if (RRR_S == 4) { tcg_gen_andc_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } else { tcg_gen_or_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } gen_helper_restore_owb(cpu_env); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*RFIx*/ HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT); if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) { gen_check_privilege(dc); tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + RRR_S - 2]); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]); } else { qemu_log("RFI %d is illegal\n", RRR_S); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } break; case 2: /*RFME*/ TBD(); break; default: /*reserved*/ RESERVED(); break; } break; case 4: /*BREAKx*/ HAS_OPTION(XTENSA_OPTION_DEBUG); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_BI); } break; case 5: /*SYSCALLx*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); switch (RRR_S) { case 0: /*SYSCALLx*/ gen_exception_cause(dc, SYSCALL_CAUSE); break; case 1: /*SIMCALL*/ if (semihosting_enabled) { gen_check_privilege(dc); gen_helper_simcall(cpu_env); } else { qemu_log("SIMCALL but semihosting is disabled\n"); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } break; default: RESERVED(); break; } break; case 6: /*RSILx*/ HAS_OPTION(XTENSA_OPTION_INTERRUPT); gen_check_privilege(dc); gen_window_check1(dc, RRR_T); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S); gen_helper_check_interrupts(cpu_env); gen_jumpi_check_loop_end(dc, 0); break; case 7: /*WAITIx*/ HAS_OPTION(XTENSA_OPTION_INTERRUPT); gen_check_privilege(dc); gen_waiti(dc, RRR_S); break; case 8: /*ANY4p*/ case 9: /*ALL4p*/ case 10: /*ANY8p*/ case 11: /*ALL8p*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); { const unsigned shift = (RRR_R & 2) ? 8 : 4; TCGv_i32 mask = tcg_const_i32( ((1 << shift) - 1) << RRR_S); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_SR[BR], mask); if (RRR_R & 1) { /*ALL*/ tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S); } else { /*ANY*/ tcg_gen_add_i32(tmp, tmp, mask); } tcg_gen_shri_i32(tmp, tmp, RRR_S + shift); tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp, RRR_T, 1); tcg_temp_free(mask); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*AND*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 2: /*OR*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 3: /*XOR*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 4: /*ST1*/ switch (RRR_R) { case 0: /*SSR*/ gen_window_check1(dc, RRR_S); gen_right_shift_sar(dc, cpu_R[RRR_S]); break; case 1: /*SSL*/ gen_window_check1(dc, RRR_S); gen_left_shift_sar(dc, cpu_R[RRR_S]); break; case 2: /*SSA8L*/ gen_window_check1(dc, RRR_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 3: /*SSA8B*/ gen_window_check1(dc, RRR_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3); gen_left_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 4: /*SSAI*/ { TCGv_i32 tmp = tcg_const_i32( RRR_S | ((RRR_T & 1) << 4)); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 6: /*RER*/ TBD(); break; case 7: /*WER*/ TBD(); break; case 8: /*ROTWw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 tmp = tcg_const_i32( RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0)); gen_helper_rotw(cpu_env, tmp); tcg_temp_free(tmp); reset_used_window(dc); } break; case 14: /*NSAu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); gen_window_check2(dc, RRR_S, RRR_T); gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]); break; case 15: /*NSAUu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); gen_window_check2(dc, RRR_S, RRR_T); gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]); break; default: /*reserved*/ RESERVED(); break; } break; case 5: /*TLB*/ HAS_OPTION_BITS( XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION)); gen_check_privilege(dc); gen_window_check2(dc, RRR_S, RRR_T); { TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0); switch (RRR_R & 7) { case 3: /*RITLB0*/ /*RDTLB0*/ gen_helper_rtlb0(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; case 4: /*IITLB*/ /*IDTLB*/ gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 5: /*PITLB*/ /*PDTLB*/ tcg_gen_movi_i32(cpu_pc, dc->pc); gen_helper_ptlb(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; case 6: /*WITLB*/ /*WDTLB*/ gen_helper_wtlb( cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 7: /*RITLB1*/ /*RDTLB1*/ gen_helper_rtlb1(cpu_R[RRR_T], cpu_env, cpu_R[RRR_S], dtlb); break; default: tcg_temp_free(dtlb); RESERVED(); break; } tcg_temp_free(dtlb); } break; case 6: /*RT0*/ gen_window_check2(dc, RRR_R, RRR_T); switch (RRR_S) { case 0: /*NEG*/ tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); break; case 1: /*ABS*/ { int label = gen_new_label(); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]); tcg_gen_brcondi_i32( TCG_COND_GE, cpu_R[RRR_R], 0, label); tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); gen_set_label(label); } break; default: /*reserved*/ RESERVED(); break; } break; case 7: /*reserved*/ RESERVED(); break; case 8: /*ADD*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 9: /*ADD**/ case 10: case 11: gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8); tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); tcg_temp_free(tmp); } break; case 12: /*SUB*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 13: /*SUB**/ case 14: case 15: gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12); tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); tcg_temp_free(tmp); } break; } break; case 1: /*RST1*/ switch (OP2) { case 0: /*SLLI*/ case 1: gen_window_check2(dc, RRR_R, RRR_S); tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S], 32 - (RRR_T | ((OP2 & 1) << 4))); break; case 2: /*SRAI*/ case 3: gen_window_check2(dc, RRR_R, RRR_T); tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S | ((OP2 & 1) << 4)); break; case 4: /*SRLI*/ gen_window_check2(dc, RRR_R, RRR_T); tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S); break; case 6: /*XSR*/ { TCGv_i32 tmp = tcg_temp_new_i32(); if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); tcg_gen_mov_i32(tmp, cpu_R[RRR_T]); gen_rsr(dc, cpu_R[RRR_T], RSR_SR); gen_wsr(dc, RSR_SR, tmp); tcg_temp_free(tmp); if (!sregnames[RSR_SR]) { TBD(); } } break; /* * Note: 64 bit ops are used here solely because SAR values * have range 0..63 */ #define gen_shift_reg(cmd, reg) do { \ TCGv_i64 tmp = tcg_temp_new_i64(); \ tcg_gen_extu_i32_i64(tmp, reg); \ tcg_gen_##cmd##_i64(v, v, tmp); \ tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \ tcg_temp_free_i64(v); \ tcg_temp_free_i64(tmp); \ } while (0) #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR]) case 8: /*SRC*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]); gen_shift(shr); } break; case 9: /*SRL*/ gen_window_check2(dc, RRR_R, RRR_T); if (dc->sar_5bit) { tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]); gen_shift(shr); } break; case 10: /*SLL*/ gen_window_check2(dc, RRR_R, RRR_S); if (dc->sar_m32_5bit) { tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32); } else { TCGv_i64 v = tcg_temp_new_i64(); TCGv_i32 s = tcg_const_i32(32); tcg_gen_sub_i32(s, s, cpu_SR[SAR]); tcg_gen_andi_i32(s, s, 0x3f); tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]); gen_shift_reg(shl, s); tcg_temp_free(s); } break; case 11: /*SRA*/ gen_window_check2(dc, RRR_R, RRR_T); if (dc->sar_5bit) { tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]); gen_shift(sar); } break; #undef gen_shift #undef gen_shift_reg case 12: /*MUL16U*/ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]); tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]); tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } break; case 13: /*MUL16S*/ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]); tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]); tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } break; default: /*reserved*/ RESERVED(); break; } break; case 2: /*RST2*/ if (OP2 >= 8) { gen_window_check3(dc, RRR_R, RRR_S, RRR_T); } if (OP2 >= 12) { HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV); int label = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label); gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE); gen_set_label(label); } switch (OP2) { #define BOOLEAN_LOGIC(fn, r, s, t) \ do { \ HAS_OPTION(XTENSA_OPTION_BOOLEAN); \ TCGv_i32 tmp1 = tcg_temp_new_i32(); \ TCGv_i32 tmp2 = tcg_temp_new_i32(); \ \ tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \ tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \ tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \ tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \ tcg_temp_free(tmp1); \ tcg_temp_free(tmp2); \ } while (0) case 0: /*ANDBp*/ BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T); break; case 1: /*ANDBCp*/ BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T); break; case 2: /*ORBp*/ BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T); break; case 3: /*ORBCp*/ BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T); break; case 4: /*XORBp*/ BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T); break; #undef BOOLEAN_LOGIC case 8: /*MULLi*/ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL); tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 10: /*MULUHi*/ case 11: /*MULSHi*/ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH); { TCGv_i64 r = tcg_temp_new_i64(); TCGv_i64 s = tcg_temp_new_i64(); TCGv_i64 t = tcg_temp_new_i64(); if (OP2 == 10) { tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]); tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]); } else { tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]); tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]); } tcg_gen_mul_i64(r, s, t); tcg_gen_shri_i64(r, r, 32); tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r); tcg_temp_free_i64(r); tcg_temp_free_i64(s); tcg_temp_free_i64(t); } break; case 12: /*QUOUi*/ tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 13: /*QUOSi*/ case 15: /*REMSi*/ { int label1 = gen_new_label(); int label2 = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000, label1); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff, label1); tcg_gen_movi_i32(cpu_R[RRR_R], OP2 == 13 ? 0x80000000 : 0); tcg_gen_br(label2); gen_set_label(label1); if (OP2 == 13) { tcg_gen_div_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } else { tcg_gen_rem_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } gen_set_label(label2); } break; case 14: /*REMUi*/ tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; default: /*reserved*/ RESERVED(); break; } break; case 3: /*RST3*/ switch (OP2) { case 0: /*RSR*/ if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); gen_rsr(dc, cpu_R[RRR_T], RSR_SR); if (!sregnames[RSR_SR]) { TBD(); } break; case 1: /*WSR*/ if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); gen_wsr(dc, RSR_SR, cpu_R[RRR_T]); if (!sregnames[RSR_SR]) { TBD(); } break; case 2: /*SEXTu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT); gen_window_check2(dc, RRR_R, RRR_S); { int shift = 24 - RRR_T; if (shift == 24) { tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } else if (shift == 16) { tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } else { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift); tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift); tcg_temp_free(tmp); } } break; case 3: /*CLAMPSu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS); gen_window_check2(dc, RRR_R, RRR_S); { TCGv_i32 tmp1 = tcg_temp_new_i32(); TCGv_i32 tmp2 = tcg_temp_new_i32(); int label = gen_new_label(); tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T); tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]); tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7)); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label); tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31); tcg_gen_xori_i32(cpu_R[RRR_R], tmp1, 0xffffffff >> (25 - RRR_T)); gen_set_label(label); tcg_temp_free(tmp1); tcg_temp_free(tmp2); } break; case 4: /*MINu*/ case 5: /*MAXu*/ case 6: /*MINUu*/ case 7: /*MAXUu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { static const TCGCond cond[] = { TCG_COND_LE, TCG_COND_GE, TCG_COND_LEU, TCG_COND_GEU }; int label = gen_new_label(); if (RRR_R != RRR_T) { tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); tcg_gen_brcond_i32(cond[OP2 - 4], cpu_R[RRR_S], cpu_R[RRR_T], label); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]); } else { tcg_gen_brcond_i32(cond[OP2 - 4], cpu_R[RRR_T], cpu_R[RRR_S], label); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } gen_set_label(label); } break; case 8: /*MOVEQZ*/ case 9: /*MOVNEZ*/ case 10: /*MOVLTZ*/ case 11: /*MOVGEZ*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { static const TCGCond cond[] = { TCG_COND_NE, TCG_COND_EQ, TCG_COND_GE, TCG_COND_LT }; int label = gen_new_label(); tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); gen_set_label(label); } break; case 12: /*MOVFp*/ case 13: /*MOVTp*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); gen_window_check2(dc, RRR_R, RRR_S); { int label = gen_new_label(); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T); tcg_gen_brcondi_i32( OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp, 0, label); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); gen_set_label(label); tcg_temp_free(tmp); } break; case 14: /*RUR*/ gen_window_check1(dc, RRR_R); { int st = (RRR_S << 4) + RRR_T; if (uregnames[st]) { tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]); } else { qemu_log("RUR %d not implemented, ", st); TBD(); } } break; case 15: /*WUR*/ gen_window_check1(dc, RRR_T); if (uregnames[RSR_SR]) { gen_wur(RSR_SR, cpu_R[RRR_T]); } else { qemu_log("WUR %d not implemented, ", RSR_SR); TBD(); } break; } break; case 4: /*EXTUI*/ case 5: gen_window_check2(dc, RRR_R, RRR_T); { int shiftimm = RRR_S | ((OP1 & 1) << 4); int maskimm = (1 << (OP2 + 1)) - 1; TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm); tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm); tcg_temp_free(tmp); } break; case 6: /*CUST0*/ RESERVED(); break; case 7: /*CUST1*/ RESERVED(); break; case 8: /*LSCXp*/ switch (OP2) { case 0: /*LSXf*/ case 1: /*LSXUf*/ case 4: /*SSXf*/ case 5: /*SSXUf*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); gen_window_check2(dc, RRR_S, RRR_T); gen_check_cpenable(dc, 0); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]); gen_load_store_alignment(dc, 2, addr, false); if (OP2 & 0x4) { tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring); } else { tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring); } if (OP2 & 0x1) { tcg_gen_mov_i32(cpu_R[RRR_S], addr); } tcg_temp_free(addr); } break; default: /*reserved*/ RESERVED(); break; } break; case 9: /*LSC4*/ gen_window_check2(dc, RRR_S, RRR_T); switch (OP2) { case 0: /*L32E*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRR_S], (0xffffffc0 | (RRR_R << 2))); tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring); tcg_temp_free(addr); } break; case 4: /*S32E*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRR_S], (0xffffffc0 | (RRR_R << 2))); tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring); tcg_temp_free(addr); } break; default: RESERVED(); break; } break; case 10: /*FP0*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); switch (OP2) { case 0: /*ADD.Sf*/ gen_check_cpenable(dc, 0); gen_helper_add_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 1: /*SUB.Sf*/ gen_check_cpenable(dc, 0); gen_helper_sub_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 2: /*MUL.Sf*/ gen_check_cpenable(dc, 0); gen_helper_mul_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 4: /*MADD.Sf*/ gen_check_cpenable(dc, 0); gen_helper_madd_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 5: /*MSUB.Sf*/ gen_check_cpenable(dc, 0); gen_helper_msub_s(cpu_FR[RRR_R], cpu_env, cpu_FR[RRR_R], cpu_FR[RRR_S], cpu_FR[RRR_T]); break; case 8: /*ROUND.Sf*/ case 9: /*TRUNC.Sf*/ case 10: /*FLOOR.Sf*/ case 11: /*CEIL.Sf*/ case 14: /*UTRUNC.Sf*/ gen_window_check1(dc, RRR_R); gen_check_cpenable(dc, 0); { static const unsigned rounding_mode_const[] = { float_round_nearest_even, float_round_to_zero, float_round_down, float_round_up, [6] = float_round_to_zero, }; TCGv_i32 rounding_mode = tcg_const_i32( rounding_mode_const[OP2 & 7]); TCGv_i32 scale = tcg_const_i32(RRR_T); if (OP2 == 14) { gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S], rounding_mode, scale); } else { gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S], rounding_mode, scale); } tcg_temp_free(rounding_mode); tcg_temp_free(scale); } break; case 12: /*FLOAT.Sf*/ case 13: /*UFLOAT.Sf*/ gen_window_check1(dc, RRR_S); gen_check_cpenable(dc, 0); { TCGv_i32 scale = tcg_const_i32(-RRR_T); if (OP2 == 13) { gen_helper_uitof(cpu_FR[RRR_R], cpu_env, cpu_R[RRR_S], scale); } else { gen_helper_itof(cpu_FR[RRR_R], cpu_env, cpu_R[RRR_S], scale); } tcg_temp_free(scale); } break; case 15: /*FP1OP*/ switch (RRR_T) { case 0: /*MOV.Sf*/ gen_check_cpenable(dc, 0); tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]); break; case 1: /*ABS.Sf*/ gen_check_cpenable(dc, 0); gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]); break; case 4: /*RFRf*/ gen_window_check1(dc, RRR_R); gen_check_cpenable(dc, 0); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]); break; case 5: /*WFRf*/ gen_window_check1(dc, RRR_S); gen_check_cpenable(dc, 0); tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]); break; case 6: /*NEG.Sf*/ gen_check_cpenable(dc, 0); gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]); break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; case 11: /*FP1*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); #define gen_compare(rel, br, a, b) \ do { \ TCGv_i32 bit = tcg_const_i32(1 << br); \ \ gen_check_cpenable(dc, 0); \ gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \ tcg_temp_free(bit); \ } while (0) switch (OP2) { case 1: /*UN.Sf*/ gen_compare(un_s, RRR_R, RRR_S, RRR_T); break; case 2: /*OEQ.Sf*/ gen_compare(oeq_s, RRR_R, RRR_S, RRR_T); break; case 3: /*UEQ.Sf*/ gen_compare(ueq_s, RRR_R, RRR_S, RRR_T); break; case 4: /*OLT.Sf*/ gen_compare(olt_s, RRR_R, RRR_S, RRR_T); break; case 5: /*ULT.Sf*/ gen_compare(ult_s, RRR_R, RRR_S, RRR_T); break; case 6: /*OLE.Sf*/ gen_compare(ole_s, RRR_R, RRR_S, RRR_T); break; case 7: /*ULE.Sf*/ gen_compare(ule_s, RRR_R, RRR_S, RRR_T); break; #undef gen_compare case 8: /*MOVEQZ.Sf*/ case 9: /*MOVNEZ.Sf*/ case 10: /*MOVLTZ.Sf*/ case 11: /*MOVGEZ.Sf*/ gen_window_check1(dc, RRR_T); gen_check_cpenable(dc, 0); { static const TCGCond cond[] = { TCG_COND_NE, TCG_COND_EQ, TCG_COND_GE, TCG_COND_LT }; int label = gen_new_label(); tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label); tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]); gen_set_label(label); } break; case 12: /*MOVF.Sf*/ case 13: /*MOVT.Sf*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); gen_check_cpenable(dc, 0); { int label = gen_new_label(); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T); tcg_gen_brcondi_i32( OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp, 0, label); tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]); gen_set_label(label); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*L32R*/ gen_window_check1(dc, RRR_T); { TCGv_i32 tmp = tcg_const_i32( ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ? 0 : ((dc->pc + 3) & ~3)) + (0xfffc0000 | (RI16_IMM16 << 2))); if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) { tcg_gen_add_i32(tmp, tmp, dc->litbase); } tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring); tcg_temp_free(tmp); } break; case 2: /*LSAI*/ #define gen_load_store(type, shift) do { \ TCGv_i32 addr = tcg_temp_new_i32(); \ gen_window_check2(dc, RRI8_S, RRI8_T); \ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \ if (shift) { \ gen_load_store_alignment(dc, shift, addr, false); \ } \ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) switch (RRI8_R) { case 0: /*L8UI*/ gen_load_store(ld8u, 0); break; case 1: /*L16UI*/ gen_load_store(ld16u, 1); break; case 2: /*L32I*/ gen_load_store(ld32u, 2); break; case 4: /*S8I*/ gen_load_store(st8, 0); break; case 5: /*S16I*/ gen_load_store(st16, 1); break; case 6: /*S32I*/ gen_load_store(st32, 2); break; case 7: /*CACHEc*/ if (RRI8_T < 8) { HAS_OPTION(XTENSA_OPTION_DCACHE); } switch (RRI8_T) { case 0: /*DPFRc*/ break; case 1: /*DPFWc*/ break; case 2: /*DPFROc*/ break; case 3: /*DPFWOc*/ break; case 4: /*DHWBc*/ break; case 5: /*DHWBIc*/ break; case 6: /*DHIc*/ break; case 7: /*DIIc*/ break; case 8: /*DCEc*/ switch (OP1) { case 0: /*DPFLl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 2: /*DHUl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 3: /*DIUl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 4: /*DIWBc*/ HAS_OPTION(XTENSA_OPTION_DCACHE); break; case 5: /*DIWBIc*/ HAS_OPTION(XTENSA_OPTION_DCACHE); break; default: /*reserved*/ RESERVED(); break; } break; case 12: /*IPFc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; case 13: /*ICEc*/ switch (OP1) { case 0: /*IPFLl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; case 2: /*IHUl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; case 3: /*IIUl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; default: /*reserved*/ RESERVED(); break; } break; case 14: /*IHIc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; case 15: /*IIIc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; default: /*reserved*/ RESERVED(); break; } break; case 9: /*L16SI*/ gen_load_store(ld16s, 1); break; #undef gen_load_store case 10: /*MOVI*/ gen_window_check1(dc, RRI8_T); tcg_gen_movi_i32(cpu_R[RRI8_T], RRI8_IMM8 | (RRI8_S << 8) | ((RRI8_S & 0x8) ? 0xfffff000 : 0)); break; #define gen_load_store_no_hw_align(type) do { \ TCGv_i32 addr = tcg_temp_local_new_i32(); \ gen_window_check2(dc, RRI8_S, RRI8_T); \ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \ gen_load_store_alignment(dc, 2, addr, true); \ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) case 11: /*L32AIy*/ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/ break; case 12: /*ADDI*/ gen_window_check2(dc, RRI8_S, RRI8_T); tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE); break; case 13: /*ADDMI*/ gen_window_check2(dc, RRI8_S, RRI8_T); tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8); break; case 14: /*S32C1Iy*/ HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE); gen_window_check2(dc, RRI8_S, RRI8_T); { int label = gen_new_label(); TCGv_i32 tmp = tcg_temp_local_new_i32(); TCGv_i32 addr = tcg_temp_local_new_i32(); TCGv_i32 tpc; tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]); tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); gen_load_store_alignment(dc, 2, addr, true); gen_advance_ccount(dc); tpc = tcg_const_i32(dc->pc); gen_helper_check_atomctl(cpu_env, tpc, addr); tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring); tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T], cpu_SR[SCOMPARE1], label); tcg_gen_qemu_st32(tmp, addr, dc->cring); gen_set_label(label); tcg_temp_free(tpc); tcg_temp_free(addr); tcg_temp_free(tmp); } break; case 15: /*S32RIy*/ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); gen_load_store_no_hw_align(st32); /*TODO release?*/ break; #undef gen_load_store_no_hw_align default: /*reserved*/ RESERVED(); break; } break; case 3: /*LSCIp*/ switch (RRI8_R) { case 0: /*LSIf*/ case 4: /*SSIf*/ case 8: /*LSIUf*/ case 12: /*SSIUf*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); gen_window_check1(dc, RRI8_S); gen_check_cpenable(dc, 0); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); gen_load_store_alignment(dc, 2, addr, false); if (RRI8_R & 0x4) { tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring); } else { tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring); } if (RRI8_R & 0x8) { tcg_gen_mov_i32(cpu_R[RRI8_S], addr); } tcg_temp_free(addr); } break; default: /*reserved*/ RESERVED(); break; } break; case 4: /*MAC16d*/ HAS_OPTION(XTENSA_OPTION_MAC16); { enum { MAC16_UMUL = 0x0, MAC16_MUL = 0x4, MAC16_MULA = 0x8, MAC16_MULS = 0xc, MAC16_NONE = 0xf, } op = OP1 & 0xc; bool is_m1_sr = (OP2 & 0x3) == 2; bool is_m2_sr = (OP2 & 0xc) == 0; uint32_t ld_offset = 0; if (OP2 > 9) { RESERVED(); } switch (OP2 & 2) { case 0: /*MACI?/MACC?*/ is_m1_sr = true; ld_offset = (OP2 & 1) ? -4 : 4; if (OP2 >= 8) { /*MACI/MACC*/ if (OP1 == 0) { /*LDINC/LDDEC*/ op = MAC16_NONE; } else { RESERVED(); } } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/ RESERVED(); } break; case 2: /*MACD?/MACA?*/ if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/ RESERVED(); } break; } if (op != MAC16_NONE) { if (!is_m1_sr) { gen_window_check1(dc, RRR_S); } if (!is_m2_sr) { gen_window_check1(dc, RRR_T); } } { TCGv_i32 vaddr = tcg_temp_new_i32(); TCGv_i32 mem32 = tcg_temp_new_i32(); if (ld_offset) { gen_window_check1(dc, RRR_S); tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset); gen_load_store_alignment(dc, 2, vaddr, false); tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); } if (op != MAC16_NONE) { TCGv_i32 m1 = gen_mac16_m( is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S], OP1 & 1, op == MAC16_UMUL); TCGv_i32 m2 = gen_mac16_m( is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T], OP1 & 2, op == MAC16_UMUL); if (op == MAC16_MUL || op == MAC16_UMUL) { tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2); if (op == MAC16_UMUL) { tcg_gen_movi_i32(cpu_SR[ACCHI], 0); } else { tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31); } } else { TCGv_i32 res = tcg_temp_new_i32(); TCGv_i64 res64 = tcg_temp_new_i64(); TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_mul_i32(res, m1, m2); tcg_gen_ext_i32_i64(res64, res); tcg_gen_concat_i32_i64(tmp, cpu_SR[ACCLO], cpu_SR[ACCHI]); if (op == MAC16_MULA) { tcg_gen_add_i64(tmp, tmp, res64); } else { tcg_gen_sub_i64(tmp, tmp, res64); } tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp); tcg_gen_shri_i64(tmp, tmp, 32); tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp); tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); tcg_temp_free(res); tcg_temp_free_i64(res64); tcg_temp_free_i64(tmp); } tcg_temp_free(m1); tcg_temp_free(m2); } if (ld_offset) { tcg_gen_mov_i32(cpu_R[RRR_S], vaddr); tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32); } tcg_temp_free(vaddr); tcg_temp_free(mem32); } } break; case 5: /*CALLN*/ switch (CALL_N) { case 0: /*CALL0*/ tcg_gen_movi_i32(cpu_R[0], dc->next_pc); gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); break; case 1: /*CALL4w*/ case 2: /*CALL8w*/ case 3: /*CALL12w*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_window_check1(dc, CALL_N << 2); gen_callwi(dc, CALL_N, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); break; } break; case 6: /*SI*/ switch (CALL_N) { case 0: /*J*/ gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0); break; case 1: /*BZ*/ gen_window_check1(dc, BRI12_S); { static const TCGCond cond[] = { TCG_COND_EQ, /*BEQZ*/ TCG_COND_NE, /*BNEZ*/ TCG_COND_LT, /*BLTZ*/ TCG_COND_GE, /*BGEZ*/ }; gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0, 4 + BRI12_IMM12_SE); } break; case 2: /*BI0*/ gen_window_check1(dc, BRI8_S); { static const TCGCond cond[] = { TCG_COND_EQ, /*BEQI*/ TCG_COND_NE, /*BNEI*/ TCG_COND_LT, /*BLTI*/ TCG_COND_GE, /*BGEI*/ }; gen_brcondi(dc, cond[BRI8_M & 3], cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE); } break; case 3: /*BI1*/ switch (BRI8_M) { case 0: /*ENTRYw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 s = tcg_const_i32(BRI12_S); TCGv_i32 imm = tcg_const_i32(BRI12_IMM12); gen_advance_ccount(dc); gen_helper_entry(cpu_env, pc, s, imm); tcg_temp_free(imm); tcg_temp_free(s); tcg_temp_free(pc); reset_used_window(dc); } break; case 1: /*B1*/ switch (BRI8_R) { case 0: /*BFp*/ case 1: /*BTp*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S); gen_brcondi(dc, BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 8: /*LOOP*/ case 9: /*LOOPNEZ*/ case 10: /*LOOPGTZ*/ HAS_OPTION(XTENSA_OPTION_LOOP); gen_window_check1(dc, RRI8_S); { uint32_t lend = dc->pc + RRI8_IMM8 + 4; TCGv_i32 tmp = tcg_const_i32(lend); tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1); tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc); gen_helper_wsr_lend(cpu_env, tmp); tcg_temp_free(tmp); if (BRI8_R > 8) { int label = gen_new_label(); tcg_gen_brcondi_i32( BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT, cpu_R[RRI8_S], 0, label); gen_jumpi(dc, lend, 1); gen_set_label(label); } gen_jumpi(dc, dc->next_pc, 0); } break; default: /*reserved*/ RESERVED(); break; } break; case 2: /*BLTUI*/ case 3: /*BGEUI*/ gen_window_check1(dc, BRI8_S); gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU, cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE); break; } break; } break; case 7: /*B*/ { TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ; switch (RRI8_R & 7) { case 0: /*BNONE*/ /*BANY*/ gen_window_check2(dc, RRI8_S, RRI8_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 1: /*BEQ*/ /*BNE*/ case 2: /*BLT*/ /*BGE*/ case 3: /*BLTU*/ /*BGEU*/ gen_window_check2(dc, RRI8_S, RRI8_T); { static const TCGCond cond[] = { [1] = TCG_COND_EQ, [2] = TCG_COND_LT, [3] = TCG_COND_LTU, [9] = TCG_COND_NE, [10] = TCG_COND_GE, [11] = TCG_COND_GEU, }; gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T], 4 + RRI8_IMM8_SE); } break; case 4: /*BALL*/ /*BNALL*/ gen_window_check2(dc, RRI8_S, RRI8_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T], 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 5: /*BBC*/ /*BBS*/ gen_window_check2(dc, RRI8_S, RRI8_T); { #ifdef TARGET_WORDS_BIGENDIAN TCGv_i32 bit = tcg_const_i32(0x80000000); #else TCGv_i32 bit = tcg_const_i32(0x00000001); #endif TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_shr_i32(bit, bit, tmp); #else tcg_gen_shl_i32(bit, bit, tmp); #endif tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); tcg_temp_free(bit); } break; case 6: /*BBCI*/ /*BBSI*/ case 7: gen_window_check1(dc, RRI8_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_R[RRI8_S], #ifdef TARGET_WORDS_BIGENDIAN 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T)); #else 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T)); #endif gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; } } break; #define gen_narrow_load_store(type) do { \ TCGv_i32 addr = tcg_temp_new_i32(); \ gen_window_check2(dc, RRRN_S, RRRN_T); \ tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \ gen_load_store_alignment(dc, 2, addr, false); \ tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) case 8: /*L32I.Nn*/ gen_narrow_load_store(ld32u); break; case 9: /*S32I.Nn*/ gen_narrow_load_store(st32); break; #undef gen_narrow_load_store case 10: /*ADD.Nn*/ gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T); tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]); break; case 11: /*ADDI.Nn*/ gen_window_check2(dc, RRRN_R, RRRN_S); tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1); break; case 12: /*ST2n*/ gen_window_check1(dc, RRRN_S); if (RRRN_T < 8) { /*MOVI.Nn*/ tcg_gen_movi_i32(cpu_R[RRRN_S], RRRN_R | (RRRN_T << 4) | ((RRRN_T & 6) == 6 ? 0xffffff80 : 0)); } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/ TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ; gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0, 4 + (RRRN_R | ((RRRN_T & 3) << 4))); } break; case 13: /*ST3n*/ switch (RRRN_R) { case 0: /*MOV.Nn*/ gen_window_check2(dc, RRRN_S, RRRN_T); tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]); break; case 15: /*S3*/ switch (RRRN_T) { case 0: /*RET.Nn*/ gen_jump(dc, cpu_R[0]); break; case 1: /*RETW.Nn*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_retw(tmp, cpu_env, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 2: /*BREAK.Nn*/ HAS_OPTION(XTENSA_OPTION_DEBUG); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_BN); } break; case 3: /*NOP.Nn*/ break; case 6: /*ILL.Nn*/ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } if (dc->is_jmp == DISAS_NEXT) { gen_check_loop_end(dc, 0); } dc->pc = dc->next_pc; return; invalid_opcode: qemu_log("INVALID(pc = %08x)\n", dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); #undef HAS_OPTION }
true
qemu
fe0bd475aa31e60674f7f53b85dc293108026202
24,706
static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int singleMCLFlag, int part_idx, int merge_idx, struct MvField mergecandlist[]) { HEVCLocalContext *lc = &s->HEVClc; RefPicList *refPicList = s->ref->refPicList; MvField *tab_mvf = s->ref->tab_mvf; const int min_pu_width = s->sps->min_pu_width; const int cand_bottom_left = lc->na.cand_bottom_left; const int cand_left = lc->na.cand_left; const int cand_up_left = lc->na.cand_up_left; const int cand_up = lc->na.cand_up; const int cand_up_right = lc->na.cand_up_right_sap; const int xA1 = x0 - 1; const int yA1 = y0 + nPbH - 1; const int xA1_pu = xA1 >> s->sps->log2_min_pu_size; const int yA1_pu = yA1 >> s->sps->log2_min_pu_size; const int xB1 = x0 + nPbW - 1; const int yB1 = y0 - 1; const int xB1_pu = xB1 >> s->sps->log2_min_pu_size; const int yB1_pu = yB1 >> s->sps->log2_min_pu_size; const int xB0 = x0 + nPbW; const int yB0 = y0 - 1; const int xB0_pu = xB0 >> s->sps->log2_min_pu_size; const int yB0_pu = yB0 >> s->sps->log2_min_pu_size; const int xA0 = x0 - 1; const int yA0 = y0 + nPbH; const int xA0_pu = xA0 >> s->sps->log2_min_pu_size; const int yA0_pu = yA0 >> s->sps->log2_min_pu_size; const int xB2 = x0 - 1; const int yB2 = y0 - 1; const int xB2_pu = xB2 >> s->sps->log2_min_pu_size; const int yB2_pu = yB2 >> s->sps->log2_min_pu_size; const int nb_refs = (s->sh.slice_type == P_SLICE) ? s->sh.nb_refs[0] : FFMIN(s->sh.nb_refs[0], s->sh.nb_refs[1]); int check_MER = 1; int check_MER_1 = 1; int zero_idx = 0; int nb_merge_cand = 0; int nb_orig_merge_cand = 0; int is_available_a0; int is_available_a1; int is_available_b0; int is_available_b1; int is_available_b2; int check_B0; int check_A0; //first left spatial merge candidate is_available_a1 = AVAILABLE(cand_left, A1); if (!singleMCLFlag && part_idx == 1 && (lc->cu.part_mode == PART_Nx2N || lc->cu.part_mode == PART_nLx2N || lc->cu.part_mode == PART_nRx2N) || isDiffMER(s, xA1, yA1, x0, y0)) { is_available_a1 = 0; } if (is_available_a1) { mergecandlist[0] = TAB_MVF_PU(A1); if (merge_idx == 0) return; nb_merge_cand++; } // above spatial merge candidate is_available_b1 = AVAILABLE(cand_up, B1); if (!singleMCLFlag && part_idx == 1 && (lc->cu.part_mode == PART_2NxN || lc->cu.part_mode == PART_2NxnU || lc->cu.part_mode == PART_2NxnD) || isDiffMER(s, xB1, yB1, x0, y0)) { is_available_b1 = 0; } if (is_available_a1 && is_available_b1) check_MER = !COMPARE_MV_REFIDX(B1, A1); if (is_available_b1 && check_MER) mergecandlist[nb_merge_cand++] = TAB_MVF_PU(B1); // above right spatial merge candidate check_MER = 1; check_B0 = PRED_BLOCK_AVAILABLE(B0); is_available_b0 = check_B0 && AVAILABLE(cand_up_right, B0); if (isDiffMER(s, xB0, yB0, x0, y0)) is_available_b0 = 0; if (is_available_b1 && is_available_b0) check_MER = !COMPARE_MV_REFIDX(B0, B1); if (is_available_b0 && check_MER) { mergecandlist[nb_merge_cand] = TAB_MVF_PU(B0); if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } // left bottom spatial merge candidate check_MER = 1; check_A0 = PRED_BLOCK_AVAILABLE(A0); is_available_a0 = check_A0 && AVAILABLE(cand_bottom_left, A0); if (isDiffMER(s, xA0, yA0, x0, y0)) is_available_a0 = 0; if (is_available_a1 && is_available_a0) check_MER = !COMPARE_MV_REFIDX(A0, A1); if (is_available_a0 && check_MER) { mergecandlist[nb_merge_cand] = TAB_MVF_PU(A0); if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } // above left spatial merge candidate check_MER = 1; is_available_b2 = AVAILABLE(cand_up_left, B2); if (isDiffMER(s, xB2, yB2, x0, y0)) is_available_b2 = 0; if (is_available_a1 && is_available_b2) check_MER = !COMPARE_MV_REFIDX(B2, A1); if (is_available_b1 && is_available_b2) check_MER_1 = !COMPARE_MV_REFIDX(B2, B1); if (is_available_b2 && check_MER && check_MER_1 && nb_merge_cand != 4) { mergecandlist[nb_merge_cand] = TAB_MVF_PU(B2); if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } // temporal motion vector candidate if (s->sh.slice_temporal_mvp_enabled_flag && nb_merge_cand < s->sh.max_num_merge_cand) { Mv mv_l0_col, mv_l1_col; int available_l0 = temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH, 0, &mv_l0_col, 0); int available_l1 = (s->sh.slice_type == B_SLICE) ? temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH, 0, &mv_l1_col, 1) : 0; if (available_l0 || available_l1) { mergecandlist[nb_merge_cand].is_intra = 0; mergecandlist[nb_merge_cand].pred_flag[0] = available_l0; mergecandlist[nb_merge_cand].pred_flag[1] = available_l1; AV_ZERO16(mergecandlist[nb_merge_cand].ref_idx); mergecandlist[nb_merge_cand].mv[0] = mv_l0_col; mergecandlist[nb_merge_cand].mv[1] = mv_l1_col; if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } } nb_orig_merge_cand = nb_merge_cand; // combined bi-predictive merge candidates (applies for B slices) if (s->sh.slice_type == B_SLICE && nb_orig_merge_cand > 1 && nb_orig_merge_cand < s->sh.max_num_merge_cand) { int comb_idx; for (comb_idx = 0; nb_merge_cand < s->sh.max_num_merge_cand && comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) { int l0_cand_idx = l0_l1_cand_idx[comb_idx][0]; int l1_cand_idx = l0_l1_cand_idx[comb_idx][1]; MvField l0_cand = mergecandlist[l0_cand_idx]; MvField l1_cand = mergecandlist[l1_cand_idx]; if (l0_cand.pred_flag[0] && l1_cand.pred_flag[1] && (refPicList[0].list[l0_cand.ref_idx[0]] != refPicList[1].list[l1_cand.ref_idx[1]] || AV_RN32A(&l0_cand.mv[0]) != AV_RN32A(&l1_cand.mv[1]))) { mergecandlist[nb_merge_cand].ref_idx[0] = l0_cand.ref_idx[0]; mergecandlist[nb_merge_cand].ref_idx[1] = l1_cand.ref_idx[1]; mergecandlist[nb_merge_cand].pred_flag[0] = 1; mergecandlist[nb_merge_cand].pred_flag[1] = 1; AV_COPY32(&mergecandlist[nb_merge_cand].mv[0], &l0_cand.mv[0]); AV_COPY32(&mergecandlist[nb_merge_cand].mv[1], &l1_cand.mv[1]); mergecandlist[nb_merge_cand].is_intra = 0; if (merge_idx == nb_merge_cand) return; nb_merge_cand++; } } } // append Zero motion vector candidates while (nb_merge_cand < s->sh.max_num_merge_cand) { mergecandlist[nb_merge_cand].pred_flag[0] = 1; mergecandlist[nb_merge_cand].pred_flag[1] = s->sh.slice_type == B_SLICE; AV_ZERO32(mergecandlist[nb_merge_cand].mv + 0); AV_ZERO32(mergecandlist[nb_merge_cand].mv + 1); mergecandlist[nb_merge_cand].is_intra = 0; mergecandlist[nb_merge_cand].ref_idx[0] = zero_idx < nb_refs ? zero_idx : 0; mergecandlist[nb_merge_cand].ref_idx[1] = zero_idx < nb_refs ? zero_idx : 0; if (merge_idx == nb_merge_cand) return; nb_merge_cand++; zero_idx++; } }
false
FFmpeg
9e9be5a20c0b36dce1cae11f5f5957886231a764
24,707
void ff_avg_h264_qpel4_mc12_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midh_qrt_and_aver_dst_4w_msa(src - (2 * stride) - 2, stride, dst, stride, 4, 0); }
false
FFmpeg
72dbc610be3272ba36603f78a39cc2d2d8fe0cc3
24,708
static void decode_postinit(H264Context *h, int setup_finished) { const SPS *sps = h->ps.sps; H264Picture *out = h->cur_pic_ptr; H264Picture *cur = h->cur_pic_ptr; int i, pics, out_of_order, out_idx; int invalid = 0, cnt = 0; h->cur_pic_ptr->f->pict_type = h->pict_type; if (h->next_output_pic) return; if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) { /* FIXME: if we have two PAFF fields in one packet, we can't start * the next thread here. If we have one field per packet, we can. * The check in decode_nal_units() is not good enough to find this * yet, so we assume the worst for now. */ // if (setup_finished) // ff_thread_finish_setup(h->avctx); return; } cur->f->interlaced_frame = 0; cur->f->repeat_pict = 0; /* Signal interlacing information externally. */ /* Prioritize picture timing SEI information over used * decoding process if it exists. */ if (sps->pic_struct_present_flag) { H264SEIPictureTiming *pt = &h->sei.picture_timing; switch (pt->pic_struct) { case SEI_PIC_STRUCT_FRAME: break; case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD: cur->f->interlaced_frame = 1; break; case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_BOTTOM_TOP: if (FIELD_OR_MBAFF_PICTURE(h)) cur->f->interlaced_frame = 1; else // try to flag soft telecine progressive cur->f->interlaced_frame = h->prev_interlaced_frame; break; case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: /* Signal the possibility of telecined film externally * (pic_struct 5,6). From these hints, let the applications * decide if they apply deinterlacing. */ cur->f->repeat_pict = 1; break; case SEI_PIC_STRUCT_FRAME_DOUBLING: cur->f->repeat_pict = 2; break; case SEI_PIC_STRUCT_FRAME_TRIPLING: cur->f->repeat_pict = 4; break; } if ((pt->ct_type & 3) && pt->pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP) cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0; } else { /* Derive interlacing flag from used decoding process. */ cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h); } h->prev_interlaced_frame = cur->f->interlaced_frame; if (cur->field_poc[0] != cur->field_poc[1]) { /* Derive top_field_first from field pocs. */ cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1]; } else { if (cur->f->interlaced_frame || sps->pic_struct_present_flag) { /* Use picture timing SEI information. Even if it is a * information of a past frame, better than nothing. */ if (h->sei.picture_timing.pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || h->sei.picture_timing.pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP) cur->f->top_field_first = 1; else cur->f->top_field_first = 0; } else { /* Most likely progressive */ cur->f->top_field_first = 0; } } if (h->sei.frame_packing.present && h->sei.frame_packing.arrangement_type >= 0 && h->sei.frame_packing.arrangement_type <= 6 && h->sei.frame_packing.content_interpretation_type > 0 && h->sei.frame_packing.content_interpretation_type < 3) { H264SEIFramePacking *fp = &h->sei.frame_packing; AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f); if (!stereo) return; switch (fp->arrangement_type) { case 0: stereo->type = AV_STEREO3D_CHECKERBOARD; break; case 1: stereo->type = AV_STEREO3D_COLUMNS; break; case 2: stereo->type = AV_STEREO3D_LINES; break; case 3: if (fp->quincunx_subsampling) stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX; else stereo->type = AV_STEREO3D_SIDEBYSIDE; break; case 4: stereo->type = AV_STEREO3D_TOPBOTTOM; break; case 5: stereo->type = AV_STEREO3D_FRAMESEQUENCE; break; case 6: stereo->type = AV_STEREO3D_2D; break; } if (fp->content_interpretation_type == 2) stereo->flags = AV_STEREO3D_FLAG_INVERT; } if (h->sei.display_orientation.present && (h->sei.display_orientation.anticlockwise_rotation || h->sei.display_orientation.hflip || h->sei.display_orientation.vflip)) { H264SEIDisplayOrientation *o = &h->sei.display_orientation; double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16); AVFrameSideData *rotation = av_frame_new_side_data(cur->f, AV_FRAME_DATA_DISPLAYMATRIX, sizeof(int32_t) * 9); if (!rotation) return; av_display_rotation_set((int32_t *)rotation->data, angle); av_display_matrix_flip((int32_t *)rotation->data, o->hflip, o->vflip); } if (h->sei.afd.present) { AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD, sizeof(uint8_t)); if (!sd) return; *sd->data = h->sei.afd.active_format_description; h->sei.afd.present = 0; } if (h->sei.a53_caption.a53_caption) { H264SEIA53Caption *a53 = &h->sei.a53_caption; AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_A53_CC, a53->a53_caption_size); if (!sd) return; memcpy(sd->data, a53->a53_caption, a53->a53_caption_size); av_freep(&a53->a53_caption); a53->a53_caption_size = 0; } // FIXME do something with unavailable reference frames /* Sort B-frames into display order */ if (sps->bitstream_restriction_flag || h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) { h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames); } h->low_delay = !h->avctx->has_b_frames; pics = 0; while (h->delayed_pic[pics]) pics++; assert(pics <= MAX_DELAYED_PIC_COUNT); h->delayed_pic[pics++] = cur; if (cur->reference == 0) cur->reference = DELAYED_PIC_REF; /* Frame reordering. This code takes pictures from coding order and sorts * them by their incremental POC value into display order. It supports POC * gaps, MMCO reset codes and random resets. * A "display group" can start either with a IDR frame (f.key_frame = 1), * and/or can be closed down with a MMCO reset code. In sequences where * there is no delay, we can't detect that (since the frame was already * output to the user), so we also set h->mmco_reset to detect the MMCO * reset code. * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames), * we increase the delay between input and output. All frames affected by * the lag (e.g. those that should have been output before another frame * that we already returned to the user) will be dropped. This is a bug * that we will fix later. */ for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) { cnt += out->poc < h->last_pocs[i]; invalid += out->poc == INT_MIN; } if (!h->mmco_reset && !cur->f->key_frame && cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) { h->mmco_reset = 2; if (pics > 1) h->delayed_pic[pics - 2]->mmco_reset = 2; } if (h->mmco_reset || cur->f->key_frame) { for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) h->last_pocs[i] = INT_MIN; cnt = 0; invalid = MAX_DELAYED_PIC_COUNT; } out = h->delayed_pic[0]; out_idx = 0; for (i = 1; i < MAX_DELAYED_PIC_COUNT && h->delayed_pic[i] && !h->delayed_pic[i - 1]->mmco_reset && !h->delayed_pic[i]->f->key_frame; i++) if (h->delayed_pic[i]->poc < out->poc) { out = h->delayed_pic[i]; out_idx = i; } if (h->avctx->has_b_frames == 0 && (h->delayed_pic[0]->f->key_frame || h->mmco_reset)) h->next_outputed_poc = INT_MIN; out_of_order = !out->f->key_frame && !h->mmco_reset && (out->poc < h->next_outputed_poc); if (sps->bitstream_restriction_flag && h->avctx->has_b_frames >= sps->num_reorder_frames) { } else if (out_of_order && pics - 1 == h->avctx->has_b_frames && h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) { if (invalid + cnt < MAX_DELAYED_PIC_COUNT) { h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt); } h->low_delay = 0; } else if (h->low_delay && ((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2) || cur->f->pict_type == AV_PICTURE_TYPE_B)) { h->low_delay = 0; h->avctx->has_b_frames++; } if (pics > h->avctx->has_b_frames) { out->reference &= ~DELAYED_PIC_REF; for (i = out_idx; h->delayed_pic[i]; i++) h->delayed_pic[i] = h->delayed_pic[i + 1]; } memmove(h->last_pocs, &h->last_pocs[1], sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1)); h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc; if (!out_of_order && pics > h->avctx->has_b_frames) { h->next_output_pic = out; if (out->mmco_reset) { if (out_idx > 0) { h->next_outputed_poc = out->poc; h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset; } else { h->next_outputed_poc = INT_MIN; } } else { if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f->key_frame) { h->next_outputed_poc = INT_MIN; } else { h->next_outputed_poc = out->poc; } } h->mmco_reset = 0; } else { av_log(h->avctx, AV_LOG_DEBUG, "no picture\n"); } if (h->next_output_pic) { if (h->next_output_pic->recovered) { // We have reached an recovery point and all frames after it in // display order are "recovered". h->frame_recovered |= FRAME_RECOVERED_SEI; } h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI); } if (setup_finished && !h->avctx->hwaccel) { ff_thread_finish_setup(h->avctx); if (h->avctx->active_thread_type & FF_THREAD_FRAME) h->setup_finished = 1; } }
false
FFmpeg
99c554efc8b09c3f1bb2fb41c3da5431085f7470
24,709
static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c, GetByteContext *gb) { int i, j, k; uint8_t *dst; uint32_t bits; uint32_t cur_size, cursor_w, cursor_h, cursor_stride; uint32_t cursor_hot_x, cursor_hot_y; int cursor_fmt; uint8_t *tmp; cur_size = bytestream2_get_be32(gb); cursor_w = bytestream2_get_byte(gb); cursor_h = bytestream2_get_byte(gb); cursor_hot_x = bytestream2_get_byte(gb); cursor_hot_y = bytestream2_get_byte(gb); cursor_fmt = bytestream2_get_byte(gb); cursor_stride = FFALIGN(cursor_w, c->cursor_fmt==1 ? 32 : 1) * 4; if (cursor_w < 1 || cursor_w > 256 || cursor_h < 1 || cursor_h > 256) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %dx%d\n", cursor_w, cursor_h); return AVERROR_INVALIDDATA; } if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) { av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %d,%d\n", cursor_hot_x, cursor_hot_y); cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1); cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1); } if (cur_size - 9 > bytestream2_get_bytes_left(gb) || c->cursor_w * c->cursor_h / 4 > cur_size) { av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d/%d\n", cur_size, bytestream2_get_bytes_left(gb)); return AVERROR_INVALIDDATA; } if (cursor_fmt != 1 && cursor_fmt != 32) { avpriv_report_missing_feature(avctx, "Cursor format %d", cursor_fmt); return AVERROR_PATCHWELCOME; } tmp = av_realloc(c->cursor, cursor_stride * cursor_h); if (!tmp) { av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n"); return AVERROR(ENOMEM); } c->cursor = tmp; c->cursor_w = cursor_w; c->cursor_h = cursor_h; c->cursor_hot_x = cursor_hot_x; c->cursor_hot_y = cursor_hot_y; c->cursor_fmt = cursor_fmt; c->cursor_stride = cursor_stride; dst = c->cursor; switch (c->cursor_fmt) { case 1: // old monochrome for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i += 32) { bits = bytestream2_get_be32(gb); for (k = 0; k < 32; k++) { dst[0] = !!(bits & 0x80000000); dst += 4; bits <<= 1; } } } dst = c->cursor; for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i += 32) { bits = bytestream2_get_be32(gb); for (k = 0; k < 32; k++) { int mask_bit = !!(bits & 0x80000000); switch (dst[0] * 2 + mask_bit) { case 0: dst[0] = 0xFF; dst[1] = 0x00; dst[2] = 0x00; dst[3] = 0x00; break; case 1: dst[0] = 0xFF; dst[1] = 0xFF; dst[2] = 0xFF; dst[3] = 0xFF; break; default: dst[0] = 0x00; dst[1] = 0x00; dst[2] = 0x00; dst[3] = 0x00; } dst += 4; bits <<= 1; } } } break; case 32: // full colour /* skip monochrome version of the cursor and decode RGBA instead */ bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3)); for (j = 0; j < c->cursor_h; j++) { for (i = 0; i < c->cursor_w; i++) { int val = bytestream2_get_be32(gb); *dst++ = val >> 0; *dst++ = val >> 8; *dst++ = val >> 16; *dst++ = val >> 24; } } break; default: return AVERROR_PATCHWELCOME; } return 0; }
false
FFmpeg
83f7bd6dcf00875725c5f3b7e1bedac5a6b3c77d
24,710
static int has_duration(AVFormatContext *ic) { int i; AVStream *st; for(i = 0;i < ic->nb_streams; i++) { st = ic->streams[i]; if (st->duration != AV_NOPTS_VALUE) return 1; } if (ic->duration) return 1; return 0; }
false
FFmpeg
8b97ae64841ed29db9c77db322890656cdc0d354
24,712
void virt_acpi_setup(VirtGuestInfo *guest_info) { AcpiBuildTables tables; AcpiBuildState *build_state; if (!guest_info->fw_cfg) { trace_virt_acpi_setup(); return; } if (!acpi_enabled) { trace_virt_acpi_setup(); return; } build_state = g_malloc0(sizeof *build_state); build_state->guest_info = guest_info; acpi_build_tables_init(&tables); virt_acpi_build(build_state->guest_info, &tables); /* Now expose it all to Guest */ build_state->table_mr = acpi_add_rom_blob(build_state, tables.table_data, ACPI_BUILD_TABLE_FILE, ACPI_BUILD_TABLE_MAX_SIZE); assert(build_state->table_mr != NULL); build_state->linker_mr = acpi_add_rom_blob(build_state, tables.linker, "etc/table-loader", 0); fw_cfg_add_file(guest_info->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data, acpi_data_len(tables.tcpalog)); build_state->rsdp_mr = acpi_add_rom_blob(build_state, tables.rsdp, ACPI_BUILD_RSDP_FILE, 0); qemu_register_reset(virt_acpi_build_reset, build_state); virt_acpi_build_reset(build_state); vmstate_register(NULL, 0, &vmstate_virt_acpi_build, build_state); /* Cleanup tables but don't free the memory: we track it * in build_state. */ acpi_build_tables_cleanup(&tables, false); }
false
qemu
0e9b9edae7bebfd31fdbead4ccbbce03876a7edd
24,713
void test_fenv(void) { struct __attribute__((packed)) { uint16_t fpuc; uint16_t dummy1; uint16_t fpus; uint16_t dummy2; uint16_t fptag; uint16_t dummy3; uint32_t ignored[4]; long double fpregs[8]; } float_env32; struct __attribute__((packed)) { uint16_t fpuc; uint16_t fpus; uint16_t fptag; uint16_t ignored[4]; long double fpregs[8]; } float_env16; double dtab[8]; double rtab[8]; int i; for(i=0;i<8;i++) dtab[i] = i + 1; TEST_ENV(&float_env16, "data16 fnstenv", "data16 fldenv"); TEST_ENV(&float_env16, "data16 fnsave", "data16 frstor"); TEST_ENV(&float_env32, "fnstenv", "fldenv"); TEST_ENV(&float_env32, "fnsave", "frstor"); /* test for ffree */ for(i=0;i<5;i++) asm volatile ("fldl %0" : : "m" (dtab[i])); asm volatile("ffree %st(2)"); asm volatile ("fnstenv %0\n" : : "m" (float_env32)); asm volatile ("fninit"); printf("fptag=%04x\n", float_env32.fptag); }
false
qemu
541dc0d47f10973c241e9955afc2aefc96adec51
24,714
static void migrate_params_apply(MigrateSetParameters *params) { MigrationState *s = migrate_get_current(); /* TODO use QAPI_CLONE() instead of duplicating it inline */ if (params->has_compress_level) { s->parameters.compress_level = params->compress_level; } if (params->has_compress_threads) { s->parameters.compress_threads = params->compress_threads; } if (params->has_decompress_threads) { s->parameters.decompress_threads = params->decompress_threads; } if (params->has_cpu_throttle_initial) { s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; } if (params->has_cpu_throttle_increment) { s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; } if (params->has_tls_creds) { g_free(s->parameters.tls_creds); s->parameters.tls_creds = g_strdup(params->tls_creds); } if (params->has_tls_hostname) { g_free(s->parameters.tls_hostname); s->parameters.tls_hostname = g_strdup(params->tls_hostname); } if (params->has_max_bandwidth) { s->parameters.max_bandwidth = params->max_bandwidth; if (s->to_dst_file) { qemu_file_set_rate_limit(s->to_dst_file, s->parameters.max_bandwidth / XFER_LIMIT_RATIO); } } if (params->has_downtime_limit) { s->parameters.downtime_limit = params->downtime_limit; } if (params->has_x_checkpoint_delay) { s->parameters.x_checkpoint_delay = params->x_checkpoint_delay; if (migration_in_colo_state()) { colo_checkpoint_notify(s); } } if (params->has_block_incremental) { s->parameters.block_incremental = params->block_incremental; } }
false
qemu
01fa55982692fb51a16049b63b571651a1053989
24,715
void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) { env->CP0_Wired = arg1 % env->tlb->nb_tlb; }
false
qemu
ba801af429aaa68f6cc03842c8b6be81a6ede65a
24,717
static int multiwrite_req_compare(const void *a, const void *b) { const BlockRequest *req1 = a, *req2 = b; /* * Note that we can't simply subtract req2->sector from req1->sector * here as that could overflow the return value. */ if (req1->sector > req2->sector) { return 1; } else if (req1->sector < req2->sector) { return -1; } else { return 0; } }
false
qemu
61007b316cd71ee7333ff7a0a749a8949527575f
24,718
static void iothread_complete(UserCreatable *obj, Error **errp) { Error *local_error = NULL; IOThread *iothread = IOTHREAD(obj); iothread->stopping = false; iothread->thread_id = -1; iothread->ctx = aio_context_new(&local_error); if (!iothread->ctx) { error_propagate(errp, local_error); return; } qemu_mutex_init(&iothread->init_done_lock); qemu_cond_init(&iothread->init_done_cond); /* This assumes we are called from a thread with useful CPU affinity for us * to inherit. */ qemu_thread_create(&iothread->thread, "iothread", iothread_run, iothread, QEMU_THREAD_JOINABLE); /* Wait for initialization to complete */ qemu_mutex_lock(&iothread->init_done_lock); while (iothread->thread_id == -1) { qemu_cond_wait(&iothread->init_done_cond, &iothread->init_done_lock); } qemu_mutex_unlock(&iothread->init_done_lock); }
false
qemu
d21e8776f6578be155714ae95c7d6c1bb03e8e34
24,719
restore_fpu_state(CPUSPARCState *env, qemu_siginfo_fpu_t *fpu) { int err; #if 0 #ifdef CONFIG_SMP if (current->flags & PF_USEDFPU) regs->psr &= ~PSR_EF; #else if (current == last_task_used_math) { last_task_used_math = 0; regs->psr &= ~PSR_EF; } #endif current->used_math = 1; current->flags &= ~PF_USEDFPU; #endif #if 0 if (verify_area (VERIFY_READ, fpu, sizeof(*fpu))) return -EFAULT; #endif /* XXX: incorrect */ err = copy_from_user(&env->fpr[0], fpu->si_float_regs[0], (sizeof(abi_ulong) * 32)); err |= __get_user(env->fsr, &fpu->si_fsr); #if 0 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_from_user(&current->thread.fpqueue[0], &fpu->si_fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); #endif return err; }
false
qemu
945473847b4bb0869915aa47dabc4d2abbc87bdb
24,721
static int tta_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TTAContext *s = avctx->priv_data; int i; init_get_bits(&s->gb, buf, buf_size*8); { int cur_chan = 0, framelen = s->frame_length; int32_t *p; if (*data_size < (framelen * s->channels * 2)) { av_log(avctx, AV_LOG_ERROR, "Output buffer size is too small.\n"); return -1; } // FIXME: seeking s->total_frames--; if (!s->total_frames && s->last_frame_length) framelen = s->last_frame_length; // init per channel states for (i = 0; i < s->channels; i++) { s->ch_ctx[i].predictor = 0; ttafilter_init(&s->ch_ctx[i].filter, ttafilter_configs[s->bps-1][0], ttafilter_configs[s->bps-1][1]); rice_init(&s->ch_ctx[i].rice, 10, 10); } for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) { int32_t *predictor = &s->ch_ctx[cur_chan].predictor; TTAFilter *filter = &s->ch_ctx[cur_chan].filter; TTARice *rice = &s->ch_ctx[cur_chan].rice; uint32_t unary, depth, k; int32_t value; unary = tta_get_unary(&s->gb); if (unary == 0) { depth = 0; k = rice->k0; } else { depth = 1; k = rice->k1; unary--; } if (get_bits_left(&s->gb) < k) return -1; if (k) { if (k > MIN_CACHE_BITS) return -1; value = (unary << k) + get_bits(&s->gb, k); } else value = unary; // FIXME: copy paste from original switch (depth) { case 1: rice->sum1 += value - (rice->sum1 >> 4); if (rice->k1 > 0 && rice->sum1 < shift_16[rice->k1]) rice->k1--; else if(rice->sum1 > shift_16[rice->k1 + 1]) rice->k1++; value += shift_1[rice->k0]; default: rice->sum0 += value - (rice->sum0 >> 4); if (rice->k0 > 0 && rice->sum0 < shift_16[rice->k0]) rice->k0--; else if(rice->sum0 > shift_16[rice->k0 + 1]) rice->k0++; } // extract coded value #define UNFOLD(x) (((x)&1) ? (++(x)>>1) : (-(x)>>1)) *p = UNFOLD(value); // run hybrid filter ttafilter_process(filter, p, 0); // fixed order prediction #define PRED(x, k) (int32_t)((((uint64_t)x << k) - x) >> k) switch (s->bps) { case 1: *p += PRED(*predictor, 4); break; case 2: case 3: *p += PRED(*predictor, 5); break; case 4: *p += *predictor; break; } *predictor = *p; // flip channels if (cur_chan < (s->channels-1)) cur_chan++; else { // decorrelate in case of stereo integer if (s->channels > 1) { int32_t *r = p - 1; for (*p += *r / 2; r > p - s->channels; r--) *r = *(r + 1) - *r; } cur_chan = 0; } } if (get_bits_left(&s->gb) < 32) return -1; skip_bits(&s->gb, 32); // frame crc // convert to output buffer switch(s->bps) { case 2: { uint16_t *samples = data; for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) { *samples++ = *p; } *data_size = (uint8_t *)samples - (uint8_t *)data; break; } default: av_log(s->avctx, AV_LOG_ERROR, "Error, only 16bit samples supported!\n"); } } return buf_size; }
false
FFmpeg
e6923f683c506cbb581eb7f31288801f1a065fb0
24,722
int qdev_init(DeviceState *dev) { int rc; assert(dev->state == DEV_STATE_CREATED); rc = dev->info->init(dev, dev->info); if (rc < 0) { qdev_free(dev); return rc; } qemu_register_reset(qdev_reset, dev); if (dev->info->vmsd) { vmstate_register_with_alias_id(dev, -1, dev->info->vmsd, dev, dev->instance_id_alias, dev->alias_required_for_version); } dev->state = DEV_STATE_INITIALIZED; return 0; }
false
qemu
ec990eb622ad46df5ddcb1e94c418c271894d416
24,723
static int coroutine_fn copy_sectors(BlockDriverState *bs, uint64_t start_sect, uint64_t cluster_offset, int n_start, int n_end) { BDRVQcowState *s = bs->opaque; QEMUIOVector qiov; struct iovec iov; int n, ret; /* * If this is the last cluster and it is only partially used, we must only * copy until the end of the image, or bdrv_check_request will fail for the * bdrv_read/write calls below. */ if (start_sect + n_end > bs->total_sectors) { n_end = bs->total_sectors - start_sect; } n = n_end - n_start; if (n <= 0) { return 0; } iov.iov_len = n * BDRV_SECTOR_SIZE; iov.iov_base = qemu_blockalign(bs, iov.iov_len); qemu_iovec_init_external(&qiov, &iov, 1); BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); if (!bs->drv) { return -ENOMEDIUM; } /* Call .bdrv_co_readv() directly instead of using the public block-layer * interface. This avoids double I/O throttling and request tracking, * which can lead to deadlock when block layer copy-on-read is enabled. */ ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); if (ret < 0) { goto out; } if (s->crypt_method) { qcow2_encrypt_sectors(s, start_sect + n_start, iov.iov_base, iov.iov_base, n, 1, &s->aes_encrypt_key); } ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); if (ret < 0) { goto out; } BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); if (ret < 0) { goto out; } ret = 0; out: qemu_vfree(iov.iov_base); return ret; }
false
qemu
6b7d4c55586a849aa8313282d79432917eade3bf
24,724
static int xhci_ep_nuke_xfers(XHCIState *xhci, unsigned int slotid, unsigned int epid, TRBCCode report) { XHCISlot *slot; XHCIEPContext *epctx; int i, xferi, killed = 0; USBEndpoint *ep = NULL; assert(slotid >= 1 && slotid <= xhci->numslots); assert(epid >= 1 && epid <= 31); DPRINTF("xhci_ep_nuke_xfers(%d, %d)\n", slotid, epid); slot = &xhci->slots[slotid-1]; if (!slot->eps[epid-1]) { return 0; } epctx = slot->eps[epid-1]; xferi = epctx->next_xfer; for (i = 0; i < TD_QUEUE; i++) { killed += xhci_ep_nuke_one_xfer(&epctx->transfers[xferi], report); if (killed) { report = 0; /* Only report once */ } epctx->transfers[xferi].packet.ep = NULL; xferi = (xferi + 1) % TD_QUEUE; } ep = xhci_epid_to_usbep(xhci, slotid, epid); if (ep) { usb_device_ep_stopped(ep->dev, ep); } return killed; }
false
qemu
94b037f2a451b3dc855f9f2c346e5049a361bd55
24,725
static inline void gen_branch_slot(uint32_t delayed_pc, int t) { TCGv sr; int label = gen_new_label(); tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc); sr = tcg_temp_new(); tcg_gen_andi_i32(sr, cpu_sr, SR_T); tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label); tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE); gen_set_label(label); }
false
qemu
42a268c241183877192c376d03bd9b6d527407c7
24,729
static void put_fid(V9fsPDU *pdu, V9fsFidState *fidp) { BUG_ON(!fidp->ref); fidp->ref--; /* * Don't free the fid if it is in reclaim list */ if (!fidp->ref && fidp->clunked) { if (fidp->fid == pdu->s->root_fid) { /* * if the clunked fid is root fid then we * have unmounted the fs on the client side. * delete the migration blocker. Ideally, this * should be hooked to transport close notification */ if (pdu->s->migration_blocker) { migrate_del_blocker(pdu->s->migration_blocker); error_free(pdu->s->migration_blocker); pdu->s->migration_blocker = NULL; } } free_fid(pdu, fidp); } }
false
qemu
a911a182a6bfd3b0257b13f862b0d4fbd9392715
24,731
static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,unsigned src_size) { register const uint8_t* s=src; register uint8_t* d=dst; register const uint8_t *end; const uint8_t *mm_end; end = s + src_size; #ifdef HAVE_MMX __asm __volatile(PREFETCH" %0"::"m"(*s)); __asm __volatile("movq %0, %%mm7"::"m"(mask15rg)); __asm __volatile("movq %0, %%mm6"::"m"(mask15b)); mm_end = end - 15; while(s<mm_end) { __asm __volatile( PREFETCH" 32%1\n\t" "movq %1, %%mm0\n\t" "movq 8%1, %%mm2\n\t" "movq %%mm0, %%mm1\n\t" "movq %%mm2, %%mm3\n\t" "psrlq $1, %%mm0\n\t" "psrlq $1, %%mm2\n\t" "pand %%mm7, %%mm0\n\t" "pand %%mm7, %%mm2\n\t" "pand %%mm6, %%mm1\n\t" "pand %%mm6, %%mm3\n\t" "por %%mm1, %%mm0\n\t" "por %%mm3, %%mm2\n\t" MOVNTQ" %%mm0, %0\n\t" MOVNTQ" %%mm2, 8%0" :"=m"(*d) :"m"(*s) ); d+=16; s+=16; } __asm __volatile(SFENCE:::"memory"); __asm __volatile(EMMS:::"memory"); #endif mm_end = end - 3; while(s < mm_end) { register uint32_t x= *((uint32_t *)s); *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F); s+=4; d+=4; } if(s < end) { register uint16_t x= *((uint16_t *)s); *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F); s+=2; d+=2; } }
true
FFmpeg
7f526efd17973ec6d2204f7a47b6923e2be31363
24,733
static void moxiesim_init(MachineState *machine) { MoxieCPU *cpu = NULL; ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; CPUMoxieState *env; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *rom = g_new(MemoryRegion, 1); hwaddr ram_base = 0x200000; LoaderParams loader_params; /* Init CPUs. */ if (cpu_model == NULL) { cpu_model = "MoxieLite-moxie-cpu"; } cpu = MOXIE_CPU(cpu_generic_init(TYPE_MOXIE_CPU, cpu_model)); if (!cpu) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } env = &cpu->env; qemu_register_reset(main_cpu_reset, cpu); /* Allocate RAM. */ memory_region_init_ram(ram, NULL, "moxiesim.ram", ram_size, &error_fatal); memory_region_add_subregion(address_space_mem, ram_base, ram); memory_region_init_ram(rom, NULL, "moxie.rom", 128 * 0x1000, &error_fatal); memory_region_add_subregion(get_system_memory(), 0x1000, rom); if (kernel_filename) { loader_params.ram_size = ram_size; loader_params.kernel_filename = kernel_filename; loader_params.kernel_cmdline = kernel_cmdline; loader_params.initrd_filename = initrd_filename; load_kernel(cpu, &loader_params); } /* A single 16450 sits at offset 0x3f8. */ if (serial_hds[0]) { serial_mm_init(address_space_mem, 0x3f8, 0, env->irq[4], 8000000/16, serial_hds[0], DEVICE_LITTLE_ENDIAN); } }
true
qemu
4482e05cbbb7e50e476f6a9500cf0b38913bd939
24,734
static void build_guest_fsinfo_for_virtual_device(char const *syspath, GuestFilesystemInfo *fs, Error **errp) { DIR *dir; char *dirpath; struct dirent entry, *result; dirpath = g_strdup_printf("%s/slaves", syspath); dir = opendir(dirpath); if (!dir) { error_setg_errno(errp, errno, "opendir(\"%s\")", dirpath); g_free(dirpath); return; } g_free(dirpath); for (;;) { if (readdir_r(dir, &entry, &result) != 0) { error_setg_errno(errp, errno, "readdir_r(\"%s\")", dirpath); break; } if (!result) { break; } if (entry.d_type == DT_LNK) { g_debug(" slave device '%s'", entry.d_name); dirpath = g_strdup_printf("%s/slaves/%s", syspath, entry.d_name); build_guest_fsinfo_for_device(dirpath, fs, errp); g_free(dirpath); if (*errp) { break; } } } closedir(dir); }
true
qemu
e668d1b8545f1c79cf869bd78813cb1e52216f45
24,735
void write_video_frame(AVFormatContext *oc, AVStream *st) { int x, y, i, out_size; AVCodecContext *c; c = &st->codec; /* prepare a dummy image */ /* Y */ i = frame_count++; for(y=0;y<c->height;y++) { for(x=0;x<c->width;x++) { picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3; } } /* Cb and Cr */ for(y=0;y<c->height/2;y++) { for(x=0;x<c->width/2;x++) { picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2; picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5; } } /* encode the image */ out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture); /* write the compressed frame in the media file */ if (av_write_frame(oc, st->index, video_outbuf, out_size) != 0) { fprintf(stderr, "Error while writing video frame\n"); exit(1); } }
true
FFmpeg
e70fcf075b8f92c4e410b80c703fbdc1d531d42d
24,736
static void gen_sse(CPUX86State *env, DisasContext *s, int b, target_ulong pc_start, int rex_r) { int b1, op1_offset, op2_offset, is_xmm, val; int modrm, mod, rm, reg; SSEFunc_0_epp sse_fn_epp; SSEFunc_0_eppi sse_fn_eppi; SSEFunc_0_ppi sse_fn_ppi; SSEFunc_0_eppt sse_fn_eppt; TCGMemOp ot; b &= 0xff; if (s->prefix & PREFIX_DATA) b1 = 1; else if (s->prefix & PREFIX_REPZ) b1 = 2; else if (s->prefix & PREFIX_REPNZ) b1 = 3; else b1 = 0; sse_fn_epp = sse_op_table1[b][b1]; if (!sse_fn_epp) { goto illegal_op; } if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { is_xmm = 1; } else { if (b1 == 0) { /* MMX case */ is_xmm = 0; } else { is_xmm = 1; } } /* simple MMX/SSE operation */ if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); return; } if (s->flags & HF_EM_MASK) { illegal_op: gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); return; } if (is_xmm && !(s->flags & HF_OSFXSR_MASK)) if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA)) goto illegal_op; if (b == 0x0e) { if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) goto illegal_op; /* femms */ gen_helper_emms(cpu_env); return; } if (b == 0x77) { /* emms */ gen_helper_emms(cpu_env); return; } /* prepare MMX state (XXX: optimize by storing fptt and fptags in the static cpu state) */ if (!is_xmm) { gen_helper_enter_mmx(cpu_env); } modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7); if (is_xmm) reg |= rex_r; mod = (modrm >> 6) & 3; if (sse_fn_epp == SSE_SPECIAL) { b |= (b1 << 8); switch(b) { case 0x0e7: /* movntq */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); break; case 0x1e7: /* movntdq */ case 0x02b: /* movntps */ case 0x12b: /* movntps */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); break; case 0x3f0: /* lddqu */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); break; case 0x22b: /* movntss */ case 0x32b: /* movntsd */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); if (b1 & 1) { gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(0))); } else { tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(0))); gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0); } break; case 0x6e: /* movd mm, ea */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); } else #endif { gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32); } break; case 0x16e: /* movd xmm, ea */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]); } else #endif { gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32); } break; case 0x6f: /* movq mm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); } else { rm = (modrm & 7); tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); } break; case 0x010: /* movups */ case 0x110: /* movupd */ case 0x028: /* movaps */ case 0x128: /* movapd */ case 0x16f: /* movdqa xmm, ea */ case 0x26f: /* movdqu xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]), offsetof(CPUX86State,xmm_regs[rm])); } break; case 0x210: /* movss xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); tcg_gen_movi_tl(cpu_T[0], 0); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); } break; case 0x310: /* movsd xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(0))); tcg_gen_movi_tl(cpu_T[0], 0); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); } break; case 0x012: /* movlps */ case 0x112: /* movlpd */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(0))); } else { /* movhlps */ rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); } break; case 0x212: /* movsldup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(2))); } gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); break; case 0x312: /* movddup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); } gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); break; case 0x016: /* movhps */ case 0x116: /* movhpd */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(1))); } else { /* movlhps */ rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); } break; case 0x216: /* movshdup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(1))); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(3))); } gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); break; case 0x178: case 0x378: { int bit_index, field_length; if (b1 == 1 && reg != 0) goto illegal_op; field_length = cpu_ldub_code(env, s->pc++) & 0x3F; bit_index = cpu_ldub_code(env, s->pc++) & 0x3F; tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); if (b1 == 1) gen_helper_extrq_i(cpu_env, cpu_ptr0, tcg_const_i32(bit_index), tcg_const_i32(field_length)); else gen_helper_insertq_i(cpu_env, cpu_ptr0, tcg_const_i32(bit_index), tcg_const_i32(field_length)); } break; case 0x7e: /* movd ea, mm */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { tcg_gen_ld_i64(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); } else #endif { tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); } break; case 0x17e: /* movd ea, xmm */ #ifdef TARGET_X86_64 if (s->dflag == MO_64) { tcg_gen_ld_i64(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1); } else #endif { tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1); } break; case 0x27e: /* movq xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); } gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); break; case 0x7f: /* movq ea, mm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx)); } else { rm = (modrm & 7); gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx), offsetof(CPUX86State,fpregs[reg].mmx)); } break; case 0x011: /* movups */ case 0x111: /* movupd */ case 0x029: /* movaps */ case 0x129: /* movapd */ case 0x17f: /* movdqa ea, xmm */ case 0x27f: /* movdqu ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]), offsetof(CPUX86State,xmm_regs[reg])); } break; case 0x211: /* movss ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); } break; case 0x311: /* movsd ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } break; case 0x013: /* movlps */ case 0x113: /* movlpd */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(0))); } else { goto illegal_op; } break; case 0x017: /* movhps */ case 0x117: /* movhpd */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(1))); } else { goto illegal_op; } break; case 0x71: /* shift mm, im */ case 0x72: case 0x73: case 0x171: /* shift xmm, im */ case 0x172: case 0x173: if (b1 >= 2) { goto illegal_op; } val = cpu_ldub_code(env, s->pc++); if (is_xmm) { tcg_gen_movi_tl(cpu_T[0], val); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); tcg_gen_movi_tl(cpu_T[0], 0); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1))); op1_offset = offsetof(CPUX86State,xmm_t0); } else { tcg_gen_movi_tl(cpu_T[0], val); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0))); tcg_gen_movi_tl(cpu_T[0], 0); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))); op1_offset = offsetof(CPUX86State,mmx_t0); } sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1]; if (!sse_fn_epp) { goto illegal_op; } if (is_xmm) { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset); sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); break; case 0x050: /* movmskps */ rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); break; case 0x150: /* movmskpd */ rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); break; case 0x02a: /* cvtpi2ps */ case 0x12a: /* cvtpi2pd */ gen_helper_enter_mmx(cpu_env); if (mod != 3) { gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,mmx_t0); gen_ldq_env_A0(s, op2_offset); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } op1_offset = offsetof(CPUX86State,xmm_regs[reg]); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); switch(b >> 8) { case 0x0: gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1); break; default: case 0x1: gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1); break; } break; case 0x22a: /* cvtsi2ss */ case 0x32a: /* cvtsi2sd */ ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); op1_offset = offsetof(CPUX86State,xmm_regs[reg]); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); if (ot == MO_32) { SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1]; tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1]; sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]); #else goto illegal_op; #endif } break; case 0x02c: /* cvttps2pi */ case 0x12c: /* cvttpd2pi */ case 0x02d: /* cvtps2pi */ case 0x12d: /* cvtpd2pi */ gen_helper_enter_mmx(cpu_env); if (mod != 3) { gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,xmm_t0); gen_ldo_env_A0(s, op2_offset); } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); switch(b) { case 0x02c: gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1); break; case 0x12c: gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1); break; case 0x02d: gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1); break; case 0x12d: gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1); break; } break; case 0x22c: /* cvttss2si */ case 0x32c: /* cvttsd2si */ case 0x22d: /* cvtss2si */ case 0x32d: /* cvtsd2si */ ot = mo_64_32(s->dflag); if (mod != 3) { gen_lea_modrm(env, s, modrm); if ((b >> 8) & 1) { gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0))); } else { gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); } op2_offset = offsetof(CPUX86State,xmm_t0); } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); if (ot == MO_32) { SSEFunc_i_ep sse_fn_i_ep = sse_op_table3bi[((b >> 7) & 2) | (b & 1)]; sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); } else { #ifdef TARGET_X86_64 SSEFunc_l_ep sse_fn_l_ep = sse_op_table3bq[((b >> 7) & 2) | (b & 1)]; sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0); #else goto illegal_op; #endif } gen_op_mov_reg_v(ot, reg, cpu_T[0]); break; case 0xc4: /* pinsrw */ case 0x1c4: s->rip_offset = 1; gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0); val = cpu_ldub_code(env, s->pc++); if (b1) { val &= 7; tcg_gen_st16_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_W(val))); } else { val &= 3; tcg_gen_st16_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); } break; case 0xc5: /* pextrw */ case 0x1c5: if (mod != 3) goto illegal_op; ot = mo_64_32(s->dflag); val = cpu_ldub_code(env, s->pc++); if (b1) { val &= 7; rm = (modrm & 7) | REX_B(s); tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[rm].XMM_W(val))); } else { val &= 3; rm = (modrm & 7); tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); } reg = ((modrm >> 3) & 7) | rex_r; gen_op_mov_reg_v(ot, reg, cpu_T[0]); break; case 0x1d6: /* movq ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); } break; case 0x2d6: /* movq2dq */ gen_helper_enter_mmx(cpu_env); rm = (modrm & 7); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,fpregs[rm].mmx)); gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); break; case 0x3d6: /* movdq2q */ gen_helper_enter_mmx(cpu_env); rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); break; case 0xd7: /* pmovmskb */ case 0x1d7: if (mod != 3) goto illegal_op; if (b1) { rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0); } else { rm = (modrm & 7); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0); } reg = ((modrm >> 3) & 7) | rex_r; tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32); break; case 0x138: case 0x038: b = modrm; if ((b & 0xf0) == 0xf0) { goto do_0f_38_fx; } modrm = cpu_ldub_code(env, s->pc++); rm = modrm & 7; reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (b1 >= 2) { goto illegal_op; } sse_fn_epp = sse_op_table6[b].op[b1]; if (!sse_fn_epp) { goto illegal_op; } if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) goto illegal_op; if (b1) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod == 3) { op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); } else { op2_offset = offsetof(CPUX86State,xmm_t0); gen_lea_modrm(env, s, modrm); switch (b) { case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ gen_ldq_env_A0(s, op2_offset + offsetof(XMMReg, XMM_Q(0))); break; case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + offsetof(XMMReg, XMM_L(0))); break; case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0, s->mem_index, MO_LEUW); tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset + offsetof(XMMReg, XMM_W(0))); break; case 0x2a: /* movntqda */ gen_ldo_env_A0(s, op1_offset); return; default: gen_ldo_env_A0(s, op2_offset); } } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod == 3) { op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } else { op2_offset = offsetof(CPUX86State,mmx_t0); gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, op2_offset); } } if (sse_fn_epp == SSE_SPECIAL) { goto illegal_op; } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); if (b == 0x17) { set_cc_op(s, CC_OP_EFLAGS); } break; case 0x238: case 0x338: do_0f_38_fx: /* Various integer extensions at 0f 38 f[0-f]. */ b = modrm | (b1 << 8); modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; switch (b) { case 0x3f0: /* crc32 Gd,Eb */ case 0x3f1: /* crc32 Gd,Ey */ do_crc32: if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) { goto illegal_op; } if ((b & 0xff) == 0xf0) { ot = MO_8; } else if (s->dflag != MO_64) { ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); } else { ot = MO_64; } tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_helper_crc32(cpu_T[0], cpu_tmp2_i32, cpu_T[0], tcg_const_i32(8 << ot)); ot = mo_64_32(s->dflag); gen_op_mov_reg_v(ot, reg, cpu_T[0]); break; case 0x1f0: /* crc32 or movbe */ case 0x1f1: /* For these insns, the f3 prefix is supposed to have priority over the 66 prefix, but that's not what we implement above setting b1. */ if (s->prefix & PREFIX_REPNZ) { goto do_crc32; } /* FALLTHRU */ case 0x0f0: /* movbe Gy,My */ case 0x0f1: /* movbe My,Gy */ if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) { goto illegal_op; } if (s->dflag != MO_64) { ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32); } else { ot = MO_64; } gen_lea_modrm(env, s, modrm); if ((b & 1) == 0) { tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0, s->mem_index, ot | MO_BE); gen_op_mov_reg_v(ot, reg, cpu_T[0]); } else { tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0, s->mem_index, ot | MO_BE); } break; case 0x0f2: /* andn Gy, By, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]); gen_op_mov_reg_v(ot, reg, cpu_T[0]); gen_op_update1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); break; case 0x0f7: /* bextr Gy, Ey, By */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); { TCGv bound, zero; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Extract START, and shift the operand. Shifts larger than operand size get zeros. */ tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]); tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0); bound = tcg_const_tl(ot == MO_64 ? 63 : 31); zero = tcg_const_tl(0); tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound, cpu_T[0], zero); tcg_temp_free(zero); /* Extract the LEN into a mask. Lengths larger than operand size get all ones. */ tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8); tcg_gen_ext8u_tl(cpu_A0, cpu_A0); tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound, cpu_A0, bound); tcg_temp_free(bound); tcg_gen_movi_tl(cpu_T[1], 1); tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0); tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1); tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_v(ot, reg, cpu_T[0]); gen_op_update1_cc(); set_cc_op(s, CC_OP_LOGICB + ot); } break; case 0x0f5: /* bzhi Gy, Ey, By */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]); { TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31); /* Note that since we're using BMILG (in order to get O cleared) we need to store the inverse into C. */ tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, cpu_T[1], bound); tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1], bound, bound, cpu_T[1]); tcg_temp_free(bound); } tcg_gen_movi_tl(cpu_A0, -1); tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]); tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0); gen_op_mov_reg_v(ot, reg, cpu_T[0]); gen_op_update1_cc(); set_cc_op(s, CC_OP_BMILGB + ot); break; case 0x3f6: /* mulx By, Gy, rdx, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); switch (ot) { default: tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]); tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp2_i32, cpu_tmp3_i32); tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32); break; #ifdef TARGET_X86_64 case MO_64: tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg], cpu_T[0], cpu_regs[R_EDX]); break; #endif } break; case 0x3f5: /* pdep Gy, By, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Note that by zero-extending the mask operand, we automatically handle zero-extending the result. */ if (ot == MO_64) { tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]); } else { tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]); } gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]); break; case 0x2f5: /* pext Gy, By, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Note that by zero-extending the mask operand, we automatically handle zero-extending the result. */ if (ot == MO_64) { tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]); } else { tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]); } gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]); break; case 0x1f6: /* adcx Gy, Ey */ case 0x2f6: /* adox Gy, Ey */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) { goto illegal_op; } else { TCGv carry_in, carry_out, zero; int end_op; ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); /* Re-use the carry-out from a previous round. */ TCGV_UNUSED(carry_in); carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2); switch (s->cc_op) { case CC_OP_ADCX: if (b == 0x1f6) { carry_in = cpu_cc_dst; end_op = CC_OP_ADCX; } else { end_op = CC_OP_ADCOX; } break; case CC_OP_ADOX: if (b == 0x1f6) { end_op = CC_OP_ADCOX; } else { carry_in = cpu_cc_src2; end_op = CC_OP_ADOX; } break; case CC_OP_ADCOX: end_op = CC_OP_ADCOX; carry_in = carry_out; break; default: end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX); break; } /* If we can't reuse carry-out, get it out of EFLAGS. */ if (TCGV_IS_UNUSED(carry_in)) { if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) { gen_compute_eflags(s); } carry_in = cpu_tmp0; tcg_gen_shri_tl(carry_in, cpu_cc_src, ctz32(b == 0x1f6 ? CC_C : CC_O)); tcg_gen_andi_tl(carry_in, carry_in, 1); } switch (ot) { #ifdef TARGET_X86_64 case MO_32: /* If we know TL is 64-bit, and we want a 32-bit result, just do everything in 64-bit arithmetic. */ tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]); tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]); tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]); tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in); tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]); tcg_gen_shri_i64(carry_out, cpu_T[0], 32); break; #endif default: /* Otherwise compute the carry-out in two steps. */ zero = tcg_const_tl(0); tcg_gen_add2_tl(cpu_T[0], carry_out, cpu_T[0], zero, carry_in, zero); tcg_gen_add2_tl(cpu_regs[reg], carry_out, cpu_regs[reg], carry_out, cpu_T[0], zero); tcg_temp_free(zero); break; } set_cc_op(s, end_op); } break; case 0x1f7: /* shlx Gy, Ey, By */ case 0x2f7: /* sarx Gy, Ey, By */ case 0x3f7: /* shrx Gy, Ey, By */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (ot == MO_64) { tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63); } else { tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31); } if (b == 0x1f7) { tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]); } else if (b == 0x2f7) { if (ot != MO_64) { tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); } tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]); } else { if (ot != MO_64) { tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]); } tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]); } gen_op_mov_reg_v(ot, reg, cpu_T[0]); break; case 0x0f3: case 0x1f3: case 0x2f3: case 0x3f3: /* Group 17 */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); switch (reg & 7) { case 1: /* blsr By,Ey */ tcg_gen_neg_tl(cpu_T[1], cpu_T[0]); tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]); gen_op_update2_cc(); set_cc_op(s, CC_OP_BMILGB + ot); break; case 2: /* blsmsk By,Ey */ tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1); tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); set_cc_op(s, CC_OP_BMILGB + ot); break; case 3: /* blsi By, Ey */ tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1); tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); set_cc_op(s, CC_OP_BMILGB + ot); break; default: goto illegal_op; } break; default: goto illegal_op; } break; case 0x03a: case 0x13a: b = modrm; modrm = cpu_ldub_code(env, s->pc++); rm = modrm & 7; reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (b1 >= 2) { goto illegal_op; } sse_fn_eppi = sse_op_table7[b].op[b1]; if (!sse_fn_eppi) { goto illegal_op; } if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) goto illegal_op; if (sse_fn_eppi == SSE_SPECIAL) { ot = mo_64_32(s->dflag); rm = (modrm & 7) | REX_B(s); if (mod != 3) gen_lea_modrm(env, s, modrm); reg = ((modrm >> 3) & 7) | rex_r; val = cpu_ldub_code(env, s->pc++); switch (b) { case 0x14: /* pextrb */ tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_B(val & 15))); if (mod == 3) { gen_op_mov_reg_v(ot, rm, cpu_T[0]); } else { tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0, s->mem_index, MO_UB); } break; case 0x15: /* pextrw */ tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_W(val & 7))); if (mod == 3) { gen_op_mov_reg_v(ot, rm, cpu_T[0]); } else { tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0, s->mem_index, MO_LEUW); } break; case 0x16: if (ot == MO_32) { /* pextrd */ tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(val & 3))); if (mod == 3) { tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32); } else { tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); } } else { /* pextrq */ #ifdef TARGET_X86_64 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(val & 1))); if (mod == 3) { tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64); } else { tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); } #else goto illegal_op; #endif } break; case 0x17: /* extractps */ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(val & 3))); if (mod == 3) { gen_op_mov_reg_v(ot, rm, cpu_T[0]); } else { tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0, s->mem_index, MO_LEUL); } break; case 0x20: /* pinsrb */ if (mod == 3) { gen_op_mov_v_reg(MO_32, cpu_T[0], rm); } else { tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0, s->mem_index, MO_UB); } tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_B(val & 15))); break; case 0x21: /* insertps */ if (mod == 3) { tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State,xmm_regs[rm] .XMM_L((val >> 6) & 3))); } else { tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); } tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State,xmm_regs[reg] .XMM_L((val >> 4) & 3))); if ((val >> 0) & 1) tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(0))); if ((val >> 1) & 1) tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(1))); if ((val >> 2) & 1) tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(2))); if ((val >> 3) & 1) tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(3))); break; case 0x22: if (ot == MO_32) { /* pinsrd */ if (mod == 3) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]); } else { tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL); } tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(val & 3))); } else { /* pinsrq */ #ifdef TARGET_X86_64 if (mod == 3) { gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm); } else { tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ); } tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(val & 1))); #else goto illegal_op; #endif } break; } return; } if (b1) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod == 3) { op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); } else { op2_offset = offsetof(CPUX86State,xmm_t0); gen_lea_modrm(env, s, modrm); gen_ldo_env_A0(s, op2_offset); } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod == 3) { op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } else { op2_offset = offsetof(CPUX86State,mmx_t0); gen_lea_modrm(env, s, modrm); gen_ldq_env_A0(s, op2_offset); } } val = cpu_ldub_code(env, s->pc++); if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ set_cc_op(s, CC_OP_EFLAGS); if (s->dflag == MO_64) { /* The helper must use entire 64-bit gp registers */ val |= 1 << 8; } } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); break; case 0x33a: /* Various integer extensions at 0f 3a f[0-f]. */ b = modrm | (b1 << 8); modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; switch (b) { case 0x3f0: /* rorx Gy,Ey, Ib */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2) || !(s->prefix & PREFIX_VEX) || s->vex_l != 0) { goto illegal_op; } ot = mo_64_32(s->dflag); gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); b = cpu_ldub_code(env, s->pc++); if (ot == MO_64) { tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63); } else { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); } gen_op_mov_reg_v(ot, reg, cpu_T[0]); break; default: goto illegal_op; } break; default: goto illegal_op; } } else { /* generic MMX or SSE operation */ switch(b) { case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ case 0xc2: /* compare insns */ s->rip_offset = 1; break; default: break; } if (is_xmm) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod != 3) { int sz = 4; gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,xmm_t0); switch (b) { case 0x50 ... 0x5a: case 0x5c ... 0x5f: case 0xc2: /* Most sse scalar operations. */ if (b1 == 2) { sz = 2; } else if (b1 == 3) { sz = 3; } break; case 0x2e: /* ucomis[sd] */ case 0x2f: /* comis[sd] */ if (b1 == 0) { sz = 2; } else { sz = 3; } break; } switch (sz) { case 2: /* 32 bit access */ gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); break; case 3: /* 64 bit access */ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0))); break; default: /* 128 bit access */ gen_ldo_env_A0(s, op2_offset); break; } } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod != 3) { gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,mmx_t0); gen_ldq_env_A0(s, op2_offset); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } } switch(b) { case 0x0f: /* 3DNow! data insns */ if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) goto illegal_op; val = cpu_ldub_code(env, s->pc++); sse_fn_epp = sse_op_table5[val]; if (!sse_fn_epp) { goto illegal_op; } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); break; case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ val = cpu_ldub_code(env, s->pc++); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp; sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); break; case 0xc2: /* compare insns */ val = cpu_ldub_code(env, s->pc++); if (val >= 8) goto illegal_op; sse_fn_epp = sse_op_table4[val][b1]; tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); break; case 0xf7: /* maskmov : we must prepare A0 */ if (mod != 3) goto illegal_op; tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]); gen_extu(s->aflag, cpu_A0); gen_add_A0_ds_seg(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); /* XXX: introduce a new table? */ sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp; sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0); break; default: tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1); break; } if (b == 0x2e || b == 0x2f) { set_cc_op(s, CC_OP_EFLAGS); } } }
true
qemu
9ecac5dad16722ce2a8c3e88d8eeba5794990031
24,737
void do_subfzeo_64 (void) { T1 = T0; T0 = ~T0 + xer_ca; if (likely(!(((uint64_t)~T1 ^ UINT64_MAX) & ((uint64_t)(~T1) ^ (uint64_t)T0) & (1ULL << 63)))) { xer_ov = 0; } else { xer_ov = 1; xer_so = 1; } if (likely((uint64_t)T0 >= (uint64_t)~T1)) { xer_ca = 0; } else { xer_ca = 1; } }
true
qemu
c3e10c7b4377c1cbc0a4fbc12312c2cf41c0cda7
24,738
av_cold int MPV_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i; int chroma_h_shift, chroma_v_shift; MPV_encode_defaults(s); switch (avctx->codec_id) { case CODEC_ID_MPEG2VIDEO: if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P){ av_log(avctx, AV_LOG_ERROR, "only YUV420 and YUV422 are supported\n"); return -1; } break; case CODEC_ID_LJPEG: if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && avctx->pix_fmt != PIX_FMT_YUVJ444P && avctx->pix_fmt != PIX_FMT_RGB32 && ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P && avctx->pix_fmt != PIX_FMT_YUV444P) || avctx->strict_std_compliance>FF_COMPLIANCE_UNOFFICIAL)){ av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n"); return -1; } break; case CODEC_ID_MJPEG: if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P && ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P) || avctx->strict_std_compliance>FF_COMPLIANCE_UNOFFICIAL)){ av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n"); return -1; } break; default: if(avctx->pix_fmt != PIX_FMT_YUV420P){ av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n"); return -1; } } switch (avctx->pix_fmt) { case PIX_FMT_YUVJ422P: case PIX_FMT_YUV422P: s->chroma_format = CHROMA_422; break; case PIX_FMT_YUVJ420P: case PIX_FMT_YUV420P: default: s->chroma_format = CHROMA_420; break; } s->bit_rate = avctx->bit_rate; s->width = avctx->width; s->height = avctx->height; if(avctx->gop_size > 600 && avctx->strict_std_compliance>FF_COMPLIANCE_EXPERIMENTAL){ av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n"); avctx->gop_size=600; } s->gop_size = avctx->gop_size; s->avctx = avctx; s->flags= avctx->flags; s->flags2= avctx->flags2; s->max_b_frames= avctx->max_b_frames; s->codec_id= avctx->codec->id; s->luma_elim_threshold = avctx->luma_elim_threshold; s->chroma_elim_threshold= avctx->chroma_elim_threshold; s->strict_std_compliance= avctx->strict_std_compliance; s->data_partitioning= avctx->flags & CODEC_FLAG_PART; s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0; s->mpeg_quant= avctx->mpeg_quant; s->rtp_mode= !!avctx->rtp_payload_size; s->intra_dc_precision= avctx->intra_dc_precision; s->user_specified_pts = AV_NOPTS_VALUE; if (s->gop_size <= 1) { s->intra_only = 1; s->gop_size = 12; } else { s->intra_only = 0; } s->me_method = avctx->me_method; /* Fixed QSCALE */ s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE); s->adaptive_quant= ( s->avctx->lumi_masking || s->avctx->dark_masking || s->avctx->temporal_cplx_masking || s->avctx->spatial_cplx_masking || s->avctx->p_masking || s->avctx->border_masking || (s->flags&CODEC_FLAG_QP_RD)) && !s->fixed_qscale; s->obmc= !!(s->flags & CODEC_FLAG_OBMC); s->loop_filter= !!(s->flags & CODEC_FLAG_LOOP_FILTER); s->alternate_scan= !!(s->flags & CODEC_FLAG_ALT_SCAN); s->intra_vlc_format= !!(s->flags2 & CODEC_FLAG2_INTRA_VLC); s->q_scale_type= !!(s->flags2 & CODEC_FLAG2_NON_LINEAR_QUANT); if(avctx->rc_max_rate && !avctx->rc_buffer_size){ av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n"); return -1; } if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){ av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n"); } if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){ av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n"); return -1; } if(avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate){ av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n"); return -1; } if(avctx->rc_max_rate && avctx->rc_max_rate == avctx->bit_rate && avctx->rc_max_rate != avctx->rc_min_rate){ av_log(avctx, AV_LOG_INFO, "impossible bitrate constraints, this will fail\n"); } if(avctx->rc_buffer_size && avctx->bit_rate*(int64_t)avctx->time_base.num > avctx->rc_buffer_size * (int64_t)avctx->time_base.den){ av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n"); return -1; } if(!s->fixed_qscale && avctx->bit_rate*av_q2d(avctx->time_base) > avctx->bit_rate_tolerance){ av_log(avctx, AV_LOG_ERROR, "bitrate tolerance too small for bitrate\n"); return -1; } if( s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) && 90000LL * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*0xFFFFLL){ av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n"); } if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P && s->codec_id != CODEC_ID_FLV1){ av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n"); return -1; } if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){ av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n"); return -1; } if(s->obmc && s->codec_id != CODEC_ID_H263 && s->codec_id != CODEC_ID_H263P){ av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with H263(+)\n"); return -1; } if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){ av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n"); return -1; } if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){ av_log(avctx, AV_LOG_ERROR, "data partitioning not supported by codec\n"); return -1; } if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){ av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n"); return -1; } if ((s->codec_id == CODEC_ID_MPEG4 || s->codec_id == CODEC_ID_H263 || s->codec_id == CODEC_ID_H263P) && (avctx->sample_aspect_ratio.num > 255 || avctx->sample_aspect_ratio.den > 255)) { av_log(avctx, AV_LOG_ERROR, "Invalid pixel aspect ratio %i/%i, limit is 255/255\n", avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den); return -1; } if((s->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)) && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG2VIDEO){ av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n"); return -1; } if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n"); return -1; } if((s->flags & CODEC_FLAG_CBP_RD) && !avctx->trellis){ av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n"); return -1; } if((s->flags & CODEC_FLAG_QP_RD) && s->avctx->mb_decision != FF_MB_DECISION_RD){ av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n"); return -1; } if(s->avctx->scenechange_threshold < 1000000000 && (s->flags & CODEC_FLAG_CLOSED_GOP)){ av_log(avctx, AV_LOG_ERROR, "closed gop with scene change detection are not supported yet, set threshold to 1000000000\n"); return -1; } if((s->flags2 & CODEC_FLAG2_INTRA_VLC) && s->codec_id != CODEC_ID_MPEG2VIDEO){ av_log(avctx, AV_LOG_ERROR, "intra vlc table not supported by codec\n"); return -1; } if(s->flags & CODEC_FLAG_LOW_DELAY){ if (s->codec_id != CODEC_ID_MPEG2VIDEO){ av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg2\n"); return -1; } if (s->max_b_frames != 0){ av_log(avctx, AV_LOG_ERROR, "b frames cannot be used with low delay\n"); return -1; } } if(s->q_scale_type == 1){ if(s->codec_id != CODEC_ID_MPEG2VIDEO){ av_log(avctx, AV_LOG_ERROR, "non linear quant is only available for mpeg2\n"); return -1; } if(avctx->qmax > 12){ av_log(avctx, AV_LOG_ERROR, "non linear quant only supports qmax <= 12 currently\n"); return -1; } } if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO && (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){ av_log(avctx, AV_LOG_ERROR, "multi threaded encoding not supported by codec\n"); return -1; } if(s->avctx->thread_count < 1){ av_log(avctx, AV_LOG_ERROR, "automatic thread number detection not supported by codec, patch welcome\n"); return -1; } if(s->avctx->thread_count > 1) s->rtp_mode= 1; if(!avctx->time_base.den || !avctx->time_base.num){ av_log(avctx, AV_LOG_ERROR, "framerate not set\n"); return -1; } i= (INT_MAX/2+128)>>8; if(avctx->me_threshold >= i){ av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n", i - 1); return -1; } if(avctx->mb_threshold >= i){ av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n", i - 1); return -1; } if(avctx->b_frame_strategy && (avctx->flags&CODEC_FLAG_PASS2)){ av_log(avctx, AV_LOG_INFO, "notice: b_frame_strategy only affects the first pass\n"); avctx->b_frame_strategy = 0; } i= av_gcd(avctx->time_base.den, avctx->time_base.num); if(i > 1){ av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n"); avctx->time_base.den /= i; avctx->time_base.num /= i; // return -1; } if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO || s->codec_id==CODEC_ID_MJPEG){ s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x s->inter_quant_bias= 0; }else{ s->intra_quant_bias=0; s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x } if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) s->intra_quant_bias= avctx->intra_quant_bias; if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS) s->inter_quant_bias= avctx->inter_quant_bias; avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); if(avctx->codec_id == CODEC_ID_MPEG4 && s->avctx->time_base.den > (1<<16)-1){ av_log(avctx, AV_LOG_ERROR, "timebase not supported by mpeg 4 standard\n"); return -1; } s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1; switch(avctx->codec->id) { case CODEC_ID_MPEG1VIDEO: s->out_format = FMT_MPEG1; s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY); avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); break; case CODEC_ID_MPEG2VIDEO: s->out_format = FMT_MPEG1; s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY); avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); s->rtp_mode= 1; break; case CODEC_ID_LJPEG: case CODEC_ID_MJPEG: s->out_format = FMT_MJPEG; s->intra_only = 1; /* force intra only for jpeg */ if(avctx->codec->id == CODEC_ID_LJPEG && avctx->pix_fmt == PIX_FMT_BGRA){ s->mjpeg_vsample[0] = s->mjpeg_hsample[0] = s->mjpeg_vsample[1] = s->mjpeg_hsample[1] = s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1; }else{ s->mjpeg_vsample[0] = 2; s->mjpeg_vsample[1] = 2>>chroma_v_shift; s->mjpeg_vsample[2] = 2>>chroma_v_shift; s->mjpeg_hsample[0] = 2; s->mjpeg_hsample[1] = 2>>chroma_h_shift; s->mjpeg_hsample[2] = 2>>chroma_h_shift; } if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) || ff_mjpeg_encode_init(s) < 0) return -1; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_H261: if (!CONFIG_H261_ENCODER) return -1; if (ff_h261_get_picture_format(s->width, s->height) < 0) { av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height); return -1; } s->out_format = FMT_H261; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_H263: if (!CONFIG_H263_ENCODER) return -1; if (ff_match_2uint16(h263_format, FF_ARRAY_ELEMS(h263_format), s->width, s->height) == 8) { av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height); return -1; } s->out_format = FMT_H263; s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_H263P: s->out_format = FMT_H263; s->h263_plus = 1; /* Fx */ s->umvplus = (avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0; s->h263_aic= (avctx->flags & CODEC_FLAG_AC_PRED) ? 1:0; s->modified_quant= s->h263_aic; s->alt_inter_vlc= (avctx->flags & CODEC_FLAG_H263P_AIV) ? 1:0; s->obmc= (avctx->flags & CODEC_FLAG_OBMC) ? 1:0; s->loop_filter= (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1:0; s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus; s->h263_slice_structured= (s->flags & CODEC_FLAG_H263P_SLICE_STRUCT) ? 1:0; /* /Fx */ /* These are just to be sure */ avctx->delay=0; s->low_delay=1; break; case CODEC_ID_FLV1: s->out_format = FMT_H263; s->h263_flv = 2; /* format = 1; 11-bit codes */ s->unrestricted_mv = 1; s->rtp_mode=0; /* don't allow GOB */ avctx->delay=0; s->low_delay=1; break; case CODEC_ID_RV10: s->out_format = FMT_H263; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_RV20: s->out_format = FMT_H263; avctx->delay=0; s->low_delay=1; s->modified_quant=1; s->h263_aic=1; s->h263_plus=1; s->loop_filter=1; s->unrestricted_mv= 0; break; case CODEC_ID_MPEG4: s->out_format = FMT_H263; s->h263_pred = 1; s->unrestricted_mv = 1; s->low_delay= s->max_b_frames ? 0 : 1; avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1); break; case CODEC_ID_MSMPEG4V1: s->out_format = FMT_H263; s->h263_msmpeg4 = 1; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 1; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_MSMPEG4V2: s->out_format = FMT_H263; s->h263_msmpeg4 = 1; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 2; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_MSMPEG4V3: s->out_format = FMT_H263; s->h263_msmpeg4 = 1; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 3; s->flipflop_rounding=1; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_WMV1: s->out_format = FMT_H263; s->h263_msmpeg4 = 1; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 4; s->flipflop_rounding=1; avctx->delay=0; s->low_delay=1; break; case CODEC_ID_WMV2: s->out_format = FMT_H263; s->h263_msmpeg4 = 1; s->h263_pred = 1; s->unrestricted_mv = 1; s->msmpeg4_version= 5; s->flipflop_rounding=1; avctx->delay=0; s->low_delay=1; break; default: return -1; } avctx->has_b_frames= !s->low_delay; s->encoding = 1; s->progressive_frame= s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN)); /* init */ if (MPV_common_init(s) < 0) return -1; if(!s->dct_quantize) s->dct_quantize = dct_quantize_c; if(!s->denoise_dct) s->denoise_dct = denoise_dct_c; s->fast_dct_quantize = s->dct_quantize; if(avctx->trellis) s->dct_quantize = dct_quantize_trellis_c; if((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant) s->chroma_qscale_table= ff_h263_chroma_qscale_table; s->quant_precision=5; ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp); ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp); if (CONFIG_H261_ENCODER && s->out_format == FMT_H261) ff_h261_encode_init(s); if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) h263_encode_init(s); if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version) ff_msmpeg4_encode_init(s); if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) && s->out_format == FMT_MPEG1) ff_mpeg1_encode_init(s); /* init q matrix */ for(i=0;i<64;i++) { int j= s->dsp.idct_permutation[i]; if(CONFIG_MPEG4_ENCODER && s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){ s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i]; }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){ s->intra_matrix[j] = s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; }else { /* mpeg1/2 */ s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i]; s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i]; } if(s->avctx->intra_matrix) s->intra_matrix[j] = s->avctx->intra_matrix[i]; if(s->avctx->inter_matrix) s->inter_matrix[j] = s->avctx->inter_matrix[i]; } /* precompute matrix */ /* for mjpeg, we do include qscale in the matrix */ if (s->out_format != FMT_MJPEG) { ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1); ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0); } if(ff_rate_control_init(s) < 0) return -1; return 0; }
true
FFmpeg
db61329607c858f95cd7e4c165897dcd39f82977
24,739
static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc, TCGReg r1, int i2, int labelno) { TCGLabel* l = &s->labels[labelno]; tcg_target_long off; if (l->has_value) { off = l->u.value_ptr - s->code_ptr; } else { /* We need to keep the offset unchanged for retranslation. */ off = s->code_ptr[1]; tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, labelno, -2); } tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc); tcg_out16(s, off); tcg_out16(s, (i2 << 8) | (opc & 0xff)); }
false
qemu
bec1631100323fac0900aea71043d5c4e22fc2fa
24,740
int find_utlb_entry(CPUState * env, target_ulong address, int use_asid) { uint8_t urb, urc; /* Increment URC */ urb = ((env->mmucr) >> 18) & 0x3f; urc = ((env->mmucr) >> 10) & 0x3f; urc++; if (urc == urb || urc == UTLB_SIZE - 1) urc = 0; env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10); /* Return entry */ return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid); }
false
qemu
29e179bc3f5e804ab58b975e65c91cb9cd287846
24,741
static void combine_addr(char *buf, size_t len, const char* address, uint16_t port) { /* If the address-part contains a colon, it's an IPv6 IP so needs [] */ if (strstr(address, ":")) { snprintf(buf, len, "[%s]:%u", address, port); } else { snprintf(buf, len, "%s:%u", address, port); } }
false
qemu
537b41f5013e1951fa15e8f18855b18d76124ce4
24,745
static int inline get_mb_score(MpegEncContext * s, int mx, int my, int src_index, int ref_index) { // const int check_luma= s->dsp.me_sub_cmp != s->dsp.mb_cmp; MotionEstContext * const c= &s->me; const int size= 0; const int h= 16; const int penalty_factor= c->mb_penalty_factor; const int flags= c->mb_flags; const int qpel= flags & FLAG_QPEL; const int mask= 1+2*qpel; me_cmp_func cmp_sub, chroma_cmp_sub; int d; LOAD_COMMON //FIXME factorize cmp_sub= s->dsp.mb_cmp[size]; chroma_cmp_sub= s->dsp.mb_cmp[size+1]; assert(!c->skip); assert(c->avctx->me_sub_cmp != c->avctx->mb_cmp); d= cmp(s, mx>>(qpel+1), my>>(qpel+1), mx&mask, my&mask, size, h, ref_index, src_index, cmp_sub, chroma_cmp_sub, flags); //FIXME check cbp before adding penalty for (0,0) vector if(mx || my || size>0) d += (mv_penalty[mx - pred_x] + mv_penalty[my - pred_y])*penalty_factor; return d; }
false
FFmpeg
155ec6edf82692bcf3a5f87d2bc697404f4e5aaf
24,746
av_cold void ff_dcadsp_init(DCADSPContext *s) { s->lfe_fir[0] = dca_lfe_fir0_c; s->lfe_fir[1] = dca_lfe_fir1_c; s->qmf_32_subbands = dca_qmf_32_subbands; s->int8x8_fmul_int32 = int8x8_fmul_int32_c; if (ARCH_ARM) ff_dcadsp_init_arm(s); if (ARCH_X86) ff_dcadsp_init_x86(s); }
false
FFmpeg
4cb6964244fd6c099383d8b7e99731e72cc844b9
24,747
vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVVdiState *s = bs->opaque; QEMUIOVector local_qiov; uint32_t bmap_entry; uint32_t block_index; uint32_t offset_in_block; uint32_t n_bytes; uint32_t bmap_first = VDI_UNALLOCATED; uint32_t bmap_last = VDI_UNALLOCATED; uint8_t *block = NULL; uint64_t bytes_done = 0; int ret = 0; logout("\n"); qemu_iovec_init(&local_qiov, qiov->niov); while (ret >= 0 && bytes > 0) { block_index = offset / s->block_size; offset_in_block = offset % s->block_size; n_bytes = MIN(bytes, s->block_size - offset_in_block); logout("will write %u bytes starting at offset %" PRIu64 "\n", n_bytes, offset); /* prepare next AIO request */ bmap_entry = le32_to_cpu(s->bmap[block_index]); if (!VDI_IS_ALLOCATED(bmap_entry)) { /* Allocate new block and write to it. */ uint64_t data_offset; bmap_entry = s->header.blocks_allocated; s->bmap[block_index] = cpu_to_le32(bmap_entry); s->header.blocks_allocated++; data_offset = s->header.offset_data + (uint64_t)bmap_entry * s->block_size; if (block == NULL) { block = g_malloc(s->block_size); bmap_first = block_index; } bmap_last = block_index; /* Copy data to be written to new block and zero unused parts. */ memset(block, 0, offset_in_block); qemu_iovec_to_buf(qiov, bytes_done, block + offset_in_block, n_bytes); memset(block + offset_in_block + n_bytes, 0, s->block_size - n_bytes - offset_in_block); /* Note that this coroutine does not yield anywhere from reading the * bmap entry until here, so in regards to all the coroutines trying * to write to this cluster, the one doing the allocation will * always be the first to try to acquire the lock. * Therefore, it is also the first that will actually be able to * acquire the lock and thus the padded cluster is written before * the other coroutines can write to the affected area. */ qemu_co_mutex_lock(&s->write_lock); ret = bdrv_pwrite(bs->file, data_offset, block, s->block_size); qemu_co_mutex_unlock(&s->write_lock); } else { uint64_t data_offset = s->header.offset_data + (uint64_t)bmap_entry * s->block_size + offset_in_block; qemu_co_mutex_lock(&s->write_lock); /* This lock is only used to make sure the following write operation * is executed after the write issued by the coroutine allocating * this cluster, therefore we do not need to keep it locked. * As stated above, the allocating coroutine will always try to lock * the mutex before all the other concurrent accesses to that * cluster, therefore at this point we can be absolutely certain * that that write operation has returned (there may be other writes * in flight, but they do not concern this very operation). */ qemu_co_mutex_unlock(&s->write_lock); qemu_iovec_reset(&local_qiov); qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes); ret = bdrv_co_pwritev(bs->file, data_offset, n_bytes, &local_qiov, 0); } bytes -= n_bytes; offset += n_bytes; bytes_done += n_bytes; logout("%u bytes written\n", n_bytes); } qemu_iovec_destroy(&local_qiov); logout("finished data write\n"); if (ret < 0) { return ret; } if (block) { /* One or more new blocks were allocated. */ VdiHeader *header = (VdiHeader *) block; uint8_t *base; uint64_t offset; uint32_t n_sectors; logout("now writing modified header\n"); assert(VDI_IS_ALLOCATED(bmap_first)); *header = s->header; vdi_header_to_le(header); ret = bdrv_write(bs->file, 0, block, 1); g_free(block); block = NULL; if (ret < 0) { return ret; } logout("now writing modified block map entry %u...%u\n", bmap_first, bmap_last); /* Write modified sectors from block map. */ bmap_first /= (SECTOR_SIZE / sizeof(uint32_t)); bmap_last /= (SECTOR_SIZE / sizeof(uint32_t)); n_sectors = bmap_last - bmap_first + 1; offset = s->bmap_sector + bmap_first; base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE; logout("will write %u block map sectors starting from entry %u\n", n_sectors, bmap_first); ret = bdrv_write(bs->file, offset, base, n_sectors); } return ret; }
false
qemu
1e886639791762e89b51aa0507f523c6a1448831
24,749
bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs) { BlockDriverInfo bdi; if (bs->backing_hd || !(bs->open_flags & BDRV_O_UNMAP)) { return false; } if (bdrv_get_info(bs, &bdi) == 0) { return bdi.can_write_zeroes_with_unmap; } return false; }
false
qemu
61007b316cd71ee7333ff7a0a749a8949527575f
24,750
static uint32_t read_u32(uint8_t *data, size_t offset) { return ((data[offset] << 24) | (data[offset + 1] << 16) | (data[offset + 2] << 8) | data[offset + 3]); }
false
qemu
5fb6c7a8b26eab1a22207d24b4784bd2b39ab54b
24,751
static void check_breakpoint(CPUState *env, DisasContext *dc) { CPUBreakpoint *bp; if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) { TAILQ_FOREACH(bp, &env->breakpoints, entry) { if (bp->pc == dc->pc) { t_gen_raise_exception(dc, EXCP_DEBUG); dc->is_jmp = DISAS_UPDATE; } } } }
false
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
24,752
static void dump_json_image_info(ImageInfo *info) { QString *str; QObject *obj; Visitor *v = qmp_output_visitor_new(&obj); visit_type_ImageInfo(v, NULL, &info, &error_abort); visit_complete(v, &obj); str = qobject_to_json_pretty(obj); assert(str != NULL); printf("%s\n", qstring_get_str(str)); qobject_decref(obj); visit_free(v); QDECREF(str); }
false
qemu
7d5e199ade76c53ec316ab6779800581bb47c50a
24,753
static CharDriverState *qemu_chr_open_msmouse(const char *id, ChardevBackend *backend, ChardevReturn *ret, Error **errp) { CharDriverState *chr; chr = qemu_chr_alloc(); chr->chr_write = msmouse_chr_write; chr->chr_close = msmouse_chr_close; chr->explicit_be_open = true; qemu_add_mouse_event_handler(msmouse_event, chr, 0, "QEMU Microsoft Mouse"); return chr; }
false
qemu
d0d7708ba29cbcc343364a46bff981e0ff88366f
24,755
static int iscsi_refresh_limits(BlockDriverState *bs) { IscsiLun *iscsilun = bs->opaque; /* We don't actually refresh here, but just return data queried in * iscsi_open(): iscsi targets don't change their limits. */ if (iscsilun->lbp.lbpu || iscsilun->lbp.lbpws) { if (iscsilun->bl.max_unmap < 0xffffffff) { bs->bl.max_discard = sector_lun2qemu(iscsilun->bl.max_unmap, iscsilun); } bs->bl.discard_alignment = sector_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun); if (iscsilun->bl.max_ws_len < 0xffffffff) { bs->bl.max_write_zeroes = sector_lun2qemu(iscsilun->bl.max_ws_len, iscsilun); } bs->bl.write_zeroes_alignment = sector_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun); bs->bl.opt_transfer_length = sector_lun2qemu(iscsilun->bl.opt_xfer_len, iscsilun); } return 0; }
false
qemu
5d259fc7da83249a4f78fe32de2bc2874a997a9f
24,760
yuv2mono_1_c_template(SwsContext *c, const uint16_t *buf0, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y, enum PixelFormat target) { const uint8_t * const d128 = dither_8x8_220[y & 7]; uint8_t *g = c->table_gU[128] + c->table_gV[128]; int i; for (i = 0; i < dstW - 7; i += 8) { int acc = g[(buf0[i ] >> 7) + d128[0]]; acc += acc + g[(buf0[i + 1] >> 7) + d128[1]]; acc += acc + g[(buf0[i + 2] >> 7) + d128[2]]; acc += acc + g[(buf0[i + 3] >> 7) + d128[3]]; acc += acc + g[(buf0[i + 4] >> 7) + d128[4]]; acc += acc + g[(buf0[i + 5] >> 7) + d128[5]]; acc += acc + g[(buf0[i + 6] >> 7) + d128[6]]; acc += acc + g[(buf0[i + 7] >> 7) + d128[7]]; output_pixel(*dest++, acc); } }
false
FFmpeg
13a099799e89a76eb921ca452e1b04a7a28a9855
24,762
static int decode_cabac_mb_dqp( H264Context *h) { MpegEncContext * const s = &h->s; int mbn_xy; int ctx = 0; int val = 0; if( s->mb_x > 0 ) mbn_xy = s->mb_x + s->mb_y*s->mb_stride - 1; else mbn_xy = s->mb_width - 1 + (s->mb_y-1)*s->mb_stride; if( h->last_qscale_diff != 0 ) ctx++; while( get_cabac( &h->cabac, &h->cabac_state[60 + ctx] ) ) { if( ctx < 2 ) ctx = 2; else ctx = 3; val++; if(val > 102) //prevent infinite loop return INT_MIN; } if( val&0x01 ) return (val + 1)/2; else return -(val + 1)/2; }
false
FFmpeg
851ded8918c977d8160c6617b69604f758cabf50
24,765
static void add_user_command(char *optarg) { cmdline = g_realloc(cmdline, ++ncmdline * sizeof(char *)); cmdline[ncmdline-1] = optarg; }
true
qemu
5839e53bbc0fec56021d758aab7610df421ed8c8
24,766
bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); return !k->query_guest_notifiers || k->query_guest_notifiers(qbus->parent) || hdev->force; }
true
qemu
1e7398a140f7a6bd9f5a438e7ad0f1ef50990e25