label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti) { uint16_t mss; int opt, optlen; DEBUG_CALL("tcp_dooptions"); DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt)); for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[0]; if (opt == TCPOPT_EOL) break; if (opt == TCPOPT_NOP) optlen = 1; else { optlen = cp[1]; if (optlen <= 0) break; } switch (opt) { default: continue; case TCPOPT_MAXSEG: if (optlen != TCPOLEN_MAXSEG) continue; if (!(ti->ti_flags & TH_SYN)) continue; memcpy((char *) &mss, (char *) cp + 2, sizeof(mss)); NTOHS(mss); (void) tcp_mss(tp, mss); /* sets t_maxseg */ break; } } } | 2,798 |
0 | static void test_qemu_strtoull_hex(void) { const char *str = "0123"; char f = 'X'; const char *endptr = &f; uint64_t res = 999; int err; err = qemu_strtoull(str, &endptr, 16, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0x123); g_assert(endptr == str + strlen(str)); str = "0x123"; endptr = &f; res = 999; err = qemu_strtoull(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0x123); g_assert(endptr == str + strlen(str)); } | 2,799 |
0 | static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width) { #ifdef HAVE_MMXFIXME #else int i; for(i=0; i<width; i++) { dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1; dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1; } #endif } | 2,801 |
0 | static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size) { const uint16_t *end; #if COMPILE_TEMPLATE_MMX const uint16_t *mm_end; #endif uint8_t *d = dst; const uint16_t *s = (const uint16_t *)src; end = s + src_size/2; #if COMPILE_TEMPLATE_MMX __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory"); __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory"); __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory"); mm_end = end - 3; while (s < mm_end) { __asm__ volatile( PREFETCH" 32%1 \n\t" "movq %1, %%mm0 \n\t" "movq %1, %%mm1 \n\t" "movq %1, %%mm2 \n\t" "pand %2, %%mm0 \n\t" "pand %3, %%mm1 \n\t" "pand %4, %%mm2 \n\t" "psllq $3, %%mm0 \n\t" "psrlq $2, %%mm1 \n\t" "psrlq $7, %%mm2 \n\t" PACK_RGB32 :"=m"(*d) :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r) :"memory"); d += 16; s += 4; } __asm__ volatile(SFENCE:::"memory"); __asm__ volatile(EMMS:::"memory"); #endif while (s < end) { register uint16_t bgr; bgr = *s++; #if HAVE_BIGENDIAN *d++ = 255; *d++ = (bgr&0x7C00)>>7; *d++ = (bgr&0x3E0)>>2; *d++ = (bgr&0x1F)<<3; #else *d++ = (bgr&0x1F)<<3; *d++ = (bgr&0x3E0)>>2; *d++ = (bgr&0x7C00)>>7; *d++ = 255; #endif } } | 2,802 |
0 | static int qcow_create(const char *filename, QEMUOptionParameter *options) { const char *backing_file = NULL; const char *backing_fmt = NULL; uint64_t sectors = 0; int flags = 0; size_t cluster_size = 65536; int prealloc = 0; /* Read out options */ while (options && options->name) { if (!strcmp(options->name, BLOCK_OPT_SIZE)) { sectors = options->value.n / 512; } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { backing_file = options->value.s; } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { backing_fmt = options->value.s; } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) { flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0; } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { if (options->value.n) { cluster_size = options->value.n; } } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { if (!options->value.s || !strcmp(options->value.s, "off")) { prealloc = 0; } else if (!strcmp(options->value.s, "metadata")) { prealloc = 1; } else { fprintf(stderr, "Invalid preallocation mode: '%s'\n", options->value.s); return -EINVAL; } } options++; } if (backing_file && prealloc) { fprintf(stderr, "Backing file and preallocation cannot be used at " "the same time\n"); return -EINVAL; } return qcow_create2(filename, sectors, backing_file, backing_fmt, flags, cluster_size, prealloc); } | 2,803 |
0 | void do_interrupt(CPUState *env) { int ex_vec = -1; D(fprintf (stderr, "exception index=%d interrupt_req=%d\n", env->exception_index, env->interrupt_request)); switch (env->exception_index) { case EXCP_BREAK: /* These exceptions are genereated by the core itself. ERP should point to the insn following the brk. */ ex_vec = env->trap_vector; env->pregs[PR_ERP] = env->pc + 2; break; case EXCP_MMU_FAULT: ex_vec = env->fault_vector; env->pregs[PR_ERP] = env->pc; break; default: /* Is the core accepting interrupts? */ if (!(env->pregs[PR_CCS] & I_FLAG)) return; /* The interrupt controller gives us the vector. */ ex_vec = env->interrupt_vector; /* Normal interrupts are taken between TB's. env->pc is valid here. */ env->pregs[PR_ERP] = env->pc; break; } if ((env->pregs[PR_CCS] & U_FLAG)) { D(fprintf(logfile, "excp isr=%x PC=%x ERP=%x pid=%x ccs=%x cc=%d %x\n", ex_vec, env->pc, env->pregs[PR_ERP], env->pregs[PR_PID], env->pregs[PR_CCS], env->cc_op, env->cc_mask)); } env->pc = ldl_code(env->pregs[PR_EBP] + ex_vec * 4); if (env->pregs[PR_CCS] & U_FLAG) { /* Swap stack pointers. */ env->pregs[PR_USP] = env->regs[R_SP]; env->regs[R_SP] = env->ksp; } /* Apply the CRIS CCS shift. Clears U if set. */ cris_shift_ccs(env); D(fprintf (logfile, "%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n", __func__, env->pc, ex_vec, env->pregs[PR_CCS], env->pregs[PR_PID], env->pregs[PR_ERP])); } | 2,804 |
0 | void *qemu_blockalign0(BlockDriverState *bs, size_t size) { return memset(qemu_blockalign(bs, size), 0, size); } | 2,807 |
0 | static void t_gen_asr(TCGv d, TCGv a, TCGv b) { TCGv t0, t_31; t0 = tcg_temp_new(TCG_TYPE_TL); t_31 = tcg_temp_new(TCG_TYPE_TL); tcg_gen_sar_tl(d, a, b); tcg_gen_movi_tl(t_31, 31); tcg_gen_sub_tl(t0, t_31, b); tcg_gen_sar_tl(t0, t0, t_31); tcg_gen_or_tl(d, d, t0); tcg_temp_free(t0); tcg_temp_free(t_31); } | 2,809 |
0 | static inline uint64_t do_fri(uint64_t arg, int rounding_mode) { CPU_DoubleU farg; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN round */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) { /* qNan / infinity round */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI); } else { set_float_rounding_mode(rounding_mode, &env->fp_status); farg.ll = float64_round_to_int(farg.d, &env->fp_status); /* Restore rounding mode from FPSCR */ fpscr_set_rounding_mode(); } return farg.ll; } | 2,810 |
0 | static int pci_qdev_init(DeviceState *qdev, DeviceInfo *base) { PCIDevice *pci_dev = (PCIDevice *)qdev; PCIDeviceInfo *info = container_of(base, PCIDeviceInfo, qdev); PCIBus *bus; int devfn, rc; /* initialize cap_present for pci_is_express() and pci_config_size() */ if (info->is_express) { pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; } bus = FROM_QBUS(PCIBus, qdev_get_parent_bus(qdev)); devfn = pci_dev->devfn; pci_dev = do_pci_register_device(pci_dev, bus, base->name, devfn, info->config_read, info->config_write); assert(pci_dev); rc = info->init(pci_dev); if (rc != 0) return rc; if (qdev->hotplugged) bus->hotplug(pci_dev, 1); return 0; } | 2,812 |
0 | static inline void bit_copy(PutBitContext *pb, GetBitContext *gb) { int bits_left = get_bits_left(gb); while (bits_left >= 16) { put_bits(pb, 16, get_bits(gb, 16)); bits_left -= 16; } if (bits_left > 0) { put_bits(pb, bits_left, get_bits(gb, bits_left)); } } | 2,813 |
0 | static int coroutine_fn v9fs_mark_fids_unreclaim(V9fsPDU *pdu, V9fsPath *path) { int err; V9fsState *s = pdu->s; V9fsFidState *fidp, head_fid; head_fid.next = s->fid_list; for (fidp = s->fid_list; fidp; fidp = fidp->next) { if (fidp->path.size != path->size) { continue; } if (!memcmp(fidp->path.data, path->data, path->size)) { /* Mark the fid non reclaimable. */ fidp->flags |= FID_NON_RECLAIMABLE; /* reopen the file/dir if already closed */ err = v9fs_reopen_fid(pdu, fidp); if (err < 0) { return -1; } /* * Go back to head of fid list because * the list could have got updated when * switched to the worker thread */ if (err == 0) { fidp = &head_fid; } } } return 0; } | 2,814 |
0 | float64 helper_fabs_DT(float64 t0) { return float64_abs(t0); } | 2,816 |
0 | static long gethugepagesize(const char *path, Error **errp) { struct statfs fs; int ret; do { ret = statfs(path, &fs); } while (ret != 0 && errno == EINTR); if (ret != 0) { error_setg_errno(errp, errno, "failed to get page size of file %s", path); return 0; } if (!qtest_driver() && fs.f_type != HUGETLBFS_MAGIC) { fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); } return fs.f_bsize; } | 2,819 |
0 | static void gen_sllq(DisasContext *ctx) { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv t0 = tcg_temp_local_new(); TCGv t1 = tcg_temp_local_new(); TCGv t2 = tcg_temp_local_new(); tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_movi_tl(t1, 0xFFFFFFFF); tcg_gen_shl_tl(t1, t1, t2); tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); gen_load_spr(t0, SPR_MQ); tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t2); gen_load_spr(t2, SPR_MQ); tcg_gen_andc_tl(t1, t2, t1); tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); gen_set_label(l2); tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); } | 2,821 |
0 | static void raw_eject(BlockDriverState *bs, bool eject_flag) { bdrv_eject(bs->file->bs, eject_flag); } | 2,822 |
0 | int acpi_add_cpu_info(Object *o, void *opaque) { AcpiCpuInfo *cpu = opaque; uint64_t apic_id; if (object_dynamic_cast(o, TYPE_CPU)) { apic_id = object_property_get_int(o, "apic-id", NULL); assert(apic_id <= MAX_CPUMASK_BITS); set_bit(apic_id, cpu->found_cpus); } object_child_foreach(o, acpi_add_cpu_info, opaque); return 0; } | 2,823 |
0 | int ff_listen_bind(int fd, const struct sockaddr *addr, socklen_t addrlen, int timeout, URLContext *h) { int ret; int reuse = 1; struct pollfd lp = { fd, POLLIN, 0 }; if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse))) { av_log(NULL, AV_LOG_WARNING, "setsockopt(SO_REUSEADDR) failed\n"); } ret = bind(fd, addr, addrlen); if (ret) return ff_neterrno(); ret = listen(fd, 1); if (ret) return ff_neterrno(); ret = ff_poll_interrupt(&lp, 1, timeout, &h->interrupt_callback); if (ret < 0) return ret; ret = accept(fd, NULL, NULL); if (ret < 0) return ff_neterrno(); closesocket(fd); ff_socket_nonblock(ret, 1); return ret; } | 2,824 |
0 | static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint8_t source_id, uint32_t level) { return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) | ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT); } | 2,825 |
0 | static int chr_can_read(void *opaque) { SCLPConsole *scon = opaque; return SIZE_BUFFER_VT220 - scon->iov_data_len; } | 2,826 |
0 | static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req) { SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf->lun); SCSIRequest *r, *next; BusChild *kid; int target; /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */ req->resp.tmf->response = VIRTIO_SCSI_S_OK; tswap32s(&req->req.tmf->subtype); switch (req->req.tmf->subtype) { case VIRTIO_SCSI_T_TMF_ABORT_TASK: case VIRTIO_SCSI_T_TMF_QUERY_TASK: if (!d) { goto fail; } if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) { goto incorrect_lun; } QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { VirtIOSCSIReq *cmd_req = r->hba_private; if (cmd_req && cmd_req->req.cmd->tag == req->req.tmf->tag) { break; } } if (r) { /* * Assert that the request has not been completed yet, we * check for it in the loop above. */ assert(r->hba_private); if (req->req.tmf->subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) { /* "If the specified command is present in the task set, then * return a service response set to FUNCTION SUCCEEDED". */ req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; } else { scsi_req_cancel(r); } } break; case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET: if (!d) { goto fail; } if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) { goto incorrect_lun; } s->resetting++; qdev_reset_all(&d->qdev); s->resetting--; break; case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET: case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET: if (!d) { goto fail; } if (d->lun != virtio_scsi_get_lun(req->req.tmf->lun)) { goto incorrect_lun; } QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) { if (r->hba_private) { if (req->req.tmf->subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) { /* "If there is any command present in the task set, then * return a service response set to FUNCTION SUCCEEDED". */ req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED; break; } else { scsi_req_cancel(r); } } } break; case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: target = req->req.tmf->lun[1]; s->resetting++; QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) { d = DO_UPCAST(SCSIDevice, qdev, kid->child); if (d->channel == 0 && d->id == target) { qdev_reset_all(&d->qdev); } } s->resetting--; break; case VIRTIO_SCSI_T_TMF_CLEAR_ACA: default: req->resp.tmf->response = VIRTIO_SCSI_S_FUNCTION_REJECTED; break; } return; incorrect_lun: req->resp.tmf->response = VIRTIO_SCSI_S_INCORRECT_LUN; return; fail: req->resp.tmf->response = VIRTIO_SCSI_S_BAD_TARGET; } | 2,827 |
0 | int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len) { target_phys_addr_t paddr, plen; int err; #ifdef DEBUG_IOMMU fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT " len=0x" DMA_ADDR_FMT "\n", dma, addr, len); #endif while (len) { err = dma->translate(dma, addr, &paddr, &plen, DMA_DIRECTION_FROM_DEVICE); if (err) { return err; } /* The translation might be valid for larger regions. */ if (plen > len) { plen = len; } do_dma_memory_set(dma->as, paddr, c, plen); len -= plen; addr += plen; } return 0; } | 2,828 |
0 | static void ppc6xx_set_irq (void *opaque, int pin, int level) { CPUState *env = opaque; int cur_level; #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: env %p pin %d level %d\n", __func__, env, pin, level); } #endif cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { switch (pin) { case PPC6xx_INPUT_INT: /* Level sensitive - active high */ #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: set the external IRQ state to %d\n", __func__, level); } #endif ppc_set_irq(env, PPC_INTERRUPT_EXT, level); break; case PPC6xx_INPUT_SMI: /* Level sensitive - active high */ #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: set the SMI IRQ state to %d\n", __func__, level); } #endif ppc_set_irq(env, PPC_INTERRUPT_SMI, level); break; case PPC6xx_INPUT_MCP: /* Negative edge sensitive */ /* XXX: TODO: actual reaction may depends on HID0 status * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: raise machine check state\n", __func__); } #endif ppc_set_irq(env, PPC_INTERRUPT_MCK, 1); } break; case PPC6xx_INPUT_CKSTP_IN: /* Level sensitive - active low */ /* XXX: TODO: relay the signal to CKSTP_OUT pin */ if (level) { #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: stop the CPU\n", __func__); } #endif env->halted = 1; } else { #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: restart the CPU\n", __func__); } #endif env->halted = 0; } break; case PPC6xx_INPUT_HRESET: /* Level sensitive - active low */ if (level) { #if 0 // XXX: TOFIX #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: reset the CPU\n", __func__); } #endif cpu_reset(env); #endif } break; case PPC6xx_INPUT_SRESET: #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: set the RESET IRQ state to %d\n", __func__, level); } #endif ppc_set_irq(env, PPC_INTERRUPT_RESET, level); break; default: /* Unknown pin - do nothing */ #if defined(PPC_DEBUG_IRQ) if (loglevel & CPU_LOG_INT) { fprintf(logfile, "%s: unknown IRQ pin %d\n", __func__, pin); } #endif return; } if (level) env->irq_input_state |= 1 << pin; else env->irq_input_state &= ~(1 << pin); } } | 2,829 |
1 | static void sdp_service_record_build(struct sdp_service_record_s *record, struct sdp_def_service_s *def, int handle) { int len = 0; uint8_t *data; int *uuid; record->uuids = 0; while (def->attributes[record->attributes].data.type) { len += 3; len += sdp_attr_max_size(&def->attributes[record->attributes ++].data, &record->uuids); } record->uuids = pow2ceil(record->uuids); record->attribute_list = g_malloc0(record->attributes * sizeof(*record->attribute_list)); record->uuid = g_malloc0(record->uuids * sizeof(*record->uuid)); data = g_malloc(len); record->attributes = 0; uuid = record->uuid; while (def->attributes[record->attributes].data.type) { record->attribute_list[record->attributes].pair = data; len = 0; data[len ++] = SDP_DTYPE_UINT | SDP_DSIZE_2; data[len ++] = def->attributes[record->attributes].id >> 8; data[len ++] = def->attributes[record->attributes].id & 0xff; len += sdp_attr_write(data + len, &def->attributes[record->attributes].data, &uuid); /* Special case: assign a ServiceRecordHandle in sequence */ if (def->attributes[record->attributes].id == SDP_ATTR_RECORD_HANDLE) def->attributes[record->attributes].data.value.uint = handle; /* Note: we could also assign a ServiceDescription based on * sdp->device.device->lmp_name. */ record->attribute_list[record->attributes ++].len = len; data += len; } /* Sort the attribute list by the AttributeID */ qsort(record->attribute_list, record->attributes, sizeof(*record->attribute_list), (void *) sdp_attributeid_compare); /* Sort the searchable UUIDs list for bisection */ qsort(record->uuid, record->uuids, sizeof(*record->uuid), (void *) sdp_uuid_compare); } | 2,830 |
1 | static int skip_check(MpegEncContext *s, Picture *p, Picture *ref) { int x, y, plane; int score = 0; int64_t score64 = 0; for (plane = 0; plane < 3; plane++) { const int stride = p->f.linesize[plane]; const int bw = plane ? 1 : 2; for (y = 0; y < s->mb_height * bw; y++) { for (x = 0; x < s->mb_width * bw; x++) { int off = p->shared ? 0 : 16; uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off; uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride); int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8); switch (s->avctx->frame_skip_exp) { case 0: score = FFMAX(score, v); break; case 1: score += FFABS(v); break; case 2: score += v * v; break; case 3: score64 += FFABS(v * v * (int64_t)v); break; case 4: score64 += v * v * (int64_t)(v * v); break; } } } } if (score) score64 = score; if (score64 < s->avctx->frame_skip_threshold) return 1; if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8)) return 1; return 0; } | 2,831 |
1 | int ff_slice_thread_init(AVCodecContext *avctx) { int i; SliceThreadContext *c; int thread_count = avctx->thread_count; #if HAVE_W32THREADS w32thread_init(); #endif // We cannot do this in the encoder init as the threads are created before if (av_codec_is_encoder(avctx->codec) && avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && avctx->height > 2800) thread_count = avctx->thread_count = 1; if (!thread_count) { int nb_cpus = av_cpu_count(); if (avctx->height) nb_cpus = FFMIN(nb_cpus, (avctx->height+15)/16); // use number of cores + 1 as thread count if there is more than one if (nb_cpus > 1) thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS); else thread_count = avctx->thread_count = 1; } if (thread_count <= 1) { avctx->active_thread_type = 0; return 0; } c = av_mallocz(sizeof(SliceThreadContext)); if (!c) return -1; c->workers = av_mallocz_array(thread_count, sizeof(pthread_t)); if (!c->workers) { av_free(c); return -1; } avctx->internal->thread_ctx = c; c->current_job = 0; c->job_count = 0; c->job_size = 0; c->done = 0; pthread_cond_init(&c->current_job_cond, NULL); pthread_cond_init(&c->last_job_cond, NULL); pthread_mutex_init(&c->current_job_lock, NULL); pthread_mutex_lock(&c->current_job_lock); for (i=0; i<thread_count; i++) { if(pthread_create(&c->workers[i], NULL, worker, avctx)) { avctx->thread_count = i; pthread_mutex_unlock(&c->current_job_lock); ff_thread_free(avctx); return -1; } } thread_park_workers(c, thread_count); avctx->execute = thread_execute; avctx->execute2 = thread_execute2; return 0; } | 2,833 |
0 | int ff_mpa_decode_header(AVCodecContext *avctx, uint32_t head, int *sample_rate) { MPADecodeContext s1, *s = &s1; s1.avctx = avctx; if (ff_mpa_check_header(head) != 0) return -1; if (ff_mpegaudio_decode_header(s, head) != 0) { return -1; } switch(s->layer) { case 1: avctx->frame_size = 384; break; case 2: avctx->frame_size = 1152; break; default: case 3: if (s->lsf) avctx->frame_size = 576; else avctx->frame_size = 1152; break; } *sample_rate = s->sample_rate; avctx->channels = s->nb_channels; avctx->bit_rate = s->bit_rate; avctx->sub_id = s->layer; return s->frame_size; } | 2,834 |
0 | int ff_vaapi_decode_init(AVCodecContext *avctx) { VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; VAStatus vas; int err; ctx->va_config = VA_INVALID_ID; ctx->va_context = VA_INVALID_ID; #if FF_API_VAAPI_CONTEXT if (avctx->hwaccel_context) { av_log(avctx, AV_LOG_WARNING, "Using deprecated struct " "vaapi_context in decode.\n"); ctx->have_old_context = 1; ctx->old_context = avctx->hwaccel_context; // Really we only want the VAAPI device context, but this // allocates a whole generic device context because we don't // have any other way to determine how big it should be. ctx->device_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VAAPI); if (!ctx->device_ref) { err = AVERROR(ENOMEM); goto fail; } ctx->device = (AVHWDeviceContext*)ctx->device_ref->data; ctx->hwctx = ctx->device->hwctx; ctx->hwctx->display = ctx->old_context->display; // The old VAAPI decode setup assumed this quirk was always // present, so set it here to avoid the behaviour changing. ctx->hwctx->driver_quirks = AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS; } else #endif if (avctx->hw_frames_ctx) { // This structure has a shorter lifetime than the enclosing // AVCodecContext, so we inherit the references from there // and do not need to make separate ones. ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; ctx->hwfc = ctx->frames->hwctx; ctx->device = ctx->frames->device_ctx; ctx->hwctx = ctx->device->hwctx; } else if (avctx->hw_device_ctx) { ctx->device = (AVHWDeviceContext*)avctx->hw_device_ctx->data; ctx->hwctx = ctx->device->hwctx; if (ctx->device->type != AV_HWDEVICE_TYPE_VAAPI) { av_log(avctx, AV_LOG_ERROR, "Device supplied for VAAPI " "decoding must be a VAAPI device (not %d).\n", ctx->device->type); err = AVERROR(EINVAL); goto fail; } } else { av_log(avctx, AV_LOG_ERROR, "A hardware device or frames context " "is required for VAAPI decoding.\n"); err = AVERROR(EINVAL); goto fail; } #if FF_API_VAAPI_CONTEXT if (ctx->have_old_context) { ctx->va_config = ctx->old_context->config_id; ctx->va_context = ctx->old_context->context_id; av_log(avctx, AV_LOG_DEBUG, "Using user-supplied decoder " "context: %#x/%#x.\n", ctx->va_config, ctx->va_context); } else { #endif err = vaapi_decode_make_config(avctx); if (err) goto fail; if (!avctx->hw_frames_ctx) { avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); if (!avctx->hw_frames_ctx) { err = AVERROR(ENOMEM); goto fail; } ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; ctx->frames->format = AV_PIX_FMT_VAAPI; ctx->frames->width = avctx->coded_width; ctx->frames->height = avctx->coded_height; ctx->frames->sw_format = ctx->surface_format; ctx->frames->initial_pool_size = ctx->surface_count; err = av_hwframe_ctx_init(avctx->hw_frames_ctx); if (err < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to initialise internal " "frames context: %d.\n", err); goto fail; } ctx->hwfc = ctx->frames->hwctx; } vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, avctx->coded_width, avctx->coded_height, VA_PROGRESSIVE, ctx->hwfc->surface_ids, ctx->hwfc->nb_surfaces, &ctx->va_context); if (vas != VA_STATUS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "Failed to create decode " "context: %d (%s).\n", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail; } av_log(avctx, AV_LOG_DEBUG, "Decode context initialised: " "%#x/%#x.\n", ctx->va_config, ctx->va_context); #if FF_API_VAAPI_CONTEXT } #endif return 0; fail: ff_vaapi_decode_uninit(avctx); return err; } | 2,835 |
1 | void helper_set_alarm(CPUAlphaState *env, uint64_t expire) { if (expire) { env->alarm_expire = expire; qemu_mod_timer(env->alarm_timer, expire); } else { qemu_del_timer(env->alarm_timer); } } | 2,836 |
1 | static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbuf) { FrameBuffer *buf = av_mallocz(sizeof(*buf)); int i, ret; const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1; int h_chroma_shift, v_chroma_shift; int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1 int w = s->width, h = s->height; if (!buf) return AVERROR(ENOMEM); if (!(s->flags & CODEC_FLAG_EMU_EDGE)) { w += 2*edge; h += 2*edge; } avcodec_align_dimensions(s, &w, &h); if ((ret = av_image_alloc(buf->base, buf->linesize, w, h, s->pix_fmt, 32)) < 0) { av_freep(&buf); return ret; } /* XXX this shouldn't be needed, but some tests break without this line * those decoders are buggy and need to be fixed. * the following tests fail: * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit */ memset(buf->base[0], 128, ret); avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) { const int h_shift = i==0 ? 0 : h_chroma_shift; const int v_shift = i==0 ? 0 : v_chroma_shift; if (s->flags & CODEC_FLAG_EMU_EDGE) buf->data[i] = buf->base[i]; else buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*edge >> v_shift) + (pixel_size*edge >> h_shift), 32); } buf->w = s->width; buf->h = s->height; buf->pix_fmt = s->pix_fmt; buf->pool = pool; *pbuf = buf; return 0; } | 2,837 |
1 | static int mxf_set_audio_pts(MXFContext *mxf, AVCodecContext *codec, AVPacket *pkt) { MXFTrack *track = mxf->fc->streams[pkt->stream_index]->priv_data; pkt->pts = track->sample_count; if (codec->channels <= 0 || av_get_bits_per_sample(codec->codec_id) <= 0) return AVERROR(EINVAL); track->sample_count += pkt->size / (codec->channels * av_get_bits_per_sample(codec->codec_id) / 8); return 0; } | 2,838 |
1 | uint32_t HELPER(sar_cc)(CPUM68KState *env, uint32_t val, uint32_t shift) { uint64_t temp; uint32_t result; shift &= 63; temp = (int64_t)val << 32 >> shift; result = temp >> 32; env->cc_c = (temp >> 31) & 1; env->cc_n = result; env->cc_z = result; env->cc_v = result ^ val; env->cc_x = shift ? env->cc_c : env->cc_x; return result; } | 2,839 |
1 | static void build_basic_mjpeg_vlc(MJpegDecodeContext *s) { build_vlc(&s->vlcs[0][0], avpriv_mjpeg_bits_dc_luminance, avpriv_mjpeg_val_dc, 12, 0, 0); build_vlc(&s->vlcs[0][1], avpriv_mjpeg_bits_dc_chrominance, avpriv_mjpeg_val_dc, 12, 0, 0); build_vlc(&s->vlcs[1][0], avpriv_mjpeg_bits_ac_luminance, avpriv_mjpeg_val_ac_luminance, 251, 0, 1); build_vlc(&s->vlcs[1][1], avpriv_mjpeg_bits_ac_chrominance, avpriv_mjpeg_val_ac_chrominance, 251, 0, 1); build_vlc(&s->vlcs[2][0], avpriv_mjpeg_bits_ac_luminance, avpriv_mjpeg_val_ac_luminance, 251, 0, 0); build_vlc(&s->vlcs[2][1], avpriv_mjpeg_bits_ac_chrominance, avpriv_mjpeg_val_ac_chrominance, 251, 0, 0); } | 2,840 |
1 | static void fsl_imx31_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = fsl_imx31_realize; dc->desc = "i.MX31 SOC"; } | 2,841 |
1 | static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size) { DiracContext *s = avctx->priv_data; DiracFrame *pic = NULL; AVDiracSeqHeader *dsh; int ret, i; uint8_t parse_code; unsigned tmp; if (size < DATA_UNIT_HEADER_SIZE) return AVERROR_INVALIDDATA; parse_code = buf[4]; init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE)); if (parse_code == DIRAC_PCODE_SEQ_HEADER) { if (s->seen_sequence_header) return 0; /* [DIRAC_STD] 10. Sequence header */ ret = av_dirac_parse_sequence_header(&dsh, buf + DATA_UNIT_HEADER_SIZE, size - DATA_UNIT_HEADER_SIZE, avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "error parsing sequence header"); return ret; } ret = ff_set_dimensions(avctx, dsh->width, dsh->height); if (ret < 0) { av_freep(&dsh); return ret; } ff_set_sar(avctx, dsh->sample_aspect_ratio); avctx->pix_fmt = dsh->pix_fmt; avctx->color_range = dsh->color_range; avctx->color_trc = dsh->color_trc; avctx->color_primaries = dsh->color_primaries; avctx->colorspace = dsh->colorspace; avctx->profile = dsh->profile; avctx->level = dsh->level; avctx->framerate = dsh->framerate; s->bit_depth = dsh->bit_depth; s->version.major = dsh->version.major; s->version.minor = dsh->version.minor; s->seq = *dsh; av_freep(&dsh); s->pshift = s->bit_depth > 8; avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift); ret = alloc_sequence_buffers(s); if (ret < 0) return ret; s->seen_sequence_header = 1; } else if (parse_code == DIRAC_PCODE_END_SEQ) { /* [DIRAC_STD] End of Sequence */ free_sequence_buffers(s); s->seen_sequence_header = 0; } else if (parse_code == DIRAC_PCODE_AUX) { if (buf[13] == 1) { /* encoder implementation/version */ int ver[3]; /* versions older than 1.0.8 don't store quant delta for subbands with only one codeblock */ if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3) if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7) s->old_delta_quant = 1; } } else if (parse_code & 0x8) { /* picture data unit */ if (!s->seen_sequence_header) { av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n"); return AVERROR_INVALIDDATA; } /* find an unused frame */ for (i = 0; i < MAX_FRAMES; i++) if (s->all_frames[i].avframe->data[0] == NULL) pic = &s->all_frames[i]; if (!pic) { av_log(avctx, AV_LOG_ERROR, "framelist full\n"); return AVERROR_INVALIDDATA; } av_frame_unref(pic->avframe); /* [DIRAC_STD] Defined in 9.6.1 ... */ tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */ if (tmp > 2) { av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n"); return AVERROR_INVALIDDATA; } s->num_refs = tmp; s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */ s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */ s->core_syntax = (parse_code & 0x88) == 0x08; /* [DIRAC_STD] is_core_syntax() */ s->ld_picture = (parse_code & 0xF8) == 0xC8; /* [DIRAC_STD] is_ld_picture() */ s->hq_picture = (parse_code & 0xF8) == 0xE8; /* [DIRAC_STD] is_hq_picture() */ s->dc_prediction = (parse_code & 0x28) == 0x08; /* [DIRAC_STD] using_dc_prediction() */ pic->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */ pic->avframe->key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */ pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */ /* VC-2 Low Delay has a different parse code than the Dirac Low Delay */ if (s->version.minor == 2 && parse_code == 0x88) s->ld_picture = 1; if (s->low_delay && !(s->ld_picture || s->hq_picture) ) { av_log(avctx, AV_LOG_ERROR, "Invalid low delay flag\n"); return AVERROR_INVALIDDATA; } if ((ret = get_buffer_with_edge(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0) return ret; s->current_picture = pic; s->plane[0].stride = pic->avframe->linesize[0]; s->plane[1].stride = pic->avframe->linesize[1]; s->plane[2].stride = pic->avframe->linesize[2]; if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0) return AVERROR(ENOMEM); /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */ ret = dirac_decode_picture_header(s); if (ret < 0) return ret; /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */ ret = dirac_decode_frame_internal(s); if (ret < 0) return ret; } return 0; } | 2,842 |
1 | static void filter0(int32_t *dst, const int32_t *src, int32_t coeff, ptrdiff_t len) { int i; for (i = 0; i < len; i++) dst[i] -= mul22(src[i], coeff); } | 2,843 |
0 | static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { int i, k, channel; DCAContext *c = avctx->priv_data; const int16_t *samples; int ret, real_channel = 0; if ((ret = ff_alloc_packet2(avctx, avpkt, DCA_MAX_FRAME_SIZE + DCA_HEADER_SIZE))) return ret; samples = (const int16_t *)frame->data[0]; for (i = 0; i < PCM_SAMPLES; i ++) { /* i is the decimated sample number */ for (channel = 0; channel < c->prim_channels + 1; channel++) { real_channel = c->channel_order_tab[channel]; if (real_channel >= 0) { /* Get 32 PCM samples */ for (k = 0; k < 32; k++) { /* k is the sample number in a 32-sample block */ c->pcm[k] = samples[avctx->channels * (32 * i + k) + channel] << 16; } /* Put subband samples into the proper place */ qmf_decompose(c, c->pcm, &c->subband[i][real_channel][0], real_channel); } } } if (c->lfe_channel) { for (i = 0; i < PCM_SAMPLES / 2; i++) { for (k = 0; k < LFE_INTERPOLATION; k++) /* k is the sample number in a 32-sample block */ c->pcm[k] = samples[avctx->channels * (LFE_INTERPOLATION*i+k) + c->lfe_offset] << 16; c->lfe_data[i] = lfe_downsample(c, c->pcm); } } put_frame(c, c->subband, avpkt->data); avpkt->size = c->frame_size; *got_packet_ptr = 1; return 0; } | 2,845 |
1 | static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat) { yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, dest, uDest, dstW, chrDstW, dstFormat); } | 2,847 |
1 | static void thread_pool_completion_bh(void *opaque) { ThreadPool *pool = opaque; ThreadPoolElement *elem, *next; aio_context_acquire(pool->ctx); restart: QLIST_FOREACH_SAFE(elem, &pool->head, all, next) { if (elem->state != THREAD_DONE) { continue; } trace_thread_pool_complete(pool, elem, elem->common.opaque, elem->ret); QLIST_REMOVE(elem, all); if (elem->common.cb) { /* Read state before ret. */ smp_rmb(); /* Schedule ourselves in case elem->common.cb() calls aio_poll() to * wait for another request that completed at the same time. qemu_bh_schedule(pool->completion_bh); aio_context_release(pool->ctx); elem->common.cb(elem->common.opaque, elem->ret); aio_context_acquire(pool->ctx); qemu_aio_unref(elem); goto restart; } else { qemu_aio_unref(elem); } } aio_context_release(pool->ctx); } | 2,848 |
1 | static int yop_read_packet(AVFormatContext *s, AVPacket *pkt) { YopDecContext *yop = s->priv_data; AVIOContext *pb = s->pb; int ret; int actual_video_data_size = yop->frame_size - yop->audio_block_length - yop->palette_size; yop->video_packet.stream_index = 1; if (yop->video_packet.data) { *pkt = yop->video_packet; yop->video_packet.data = NULL; yop->video_packet.size = 0; pkt->data[0] = yop->odd_frame; pkt->flags |= AV_PKT_FLAG_KEY; yop->odd_frame ^= 1; return pkt->size; } ret = av_new_packet(&yop->video_packet, yop->frame_size - yop->audio_block_length); if (ret < 0) return ret; yop->video_packet.pos = avio_tell(pb); ret = avio_read(pb, yop->video_packet.data, yop->palette_size); if (ret < 0) { goto err_out; }else if (ret < yop->palette_size) { ret = AVERROR_EOF; goto err_out; } ret = av_get_packet(pb, pkt, 920); if (ret < 0) goto err_out; // Set position to the start of the frame pkt->pos = yop->video_packet.pos; avio_skip(pb, yop->audio_block_length - ret); ret = avio_read(pb, yop->video_packet.data + yop->palette_size, actual_video_data_size); if (ret < 0) goto err_out; else if (ret < actual_video_data_size) av_shrink_packet(&yop->video_packet, yop->palette_size + ret); // Arbitrarily return the audio data first return yop->audio_block_length; err_out: av_free_packet(&yop->video_packet); return ret; } | 2,849 |
1 | static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx) { int ret = 0; switch (frame->format) { case AV_PIX_FMT_YUV420P: ret = SDL_UpdateYUVTexture(tex, NULL, frame->data[0], frame->linesize[0], frame->data[1], frame->linesize[1], frame->data[2], frame->linesize[2]); break; case AV_PIX_FMT_BGRA: ret = SDL_UpdateTexture(tex, NULL, frame->data[0], frame->linesize[0]); break; default: /* This should only happen if we are not using avfilter... */ *img_convert_ctx = sws_getCachedContext(*img_convert_ctx, frame->width, frame->height, frame->format, frame->width, frame->height, AV_PIX_FMT_BGRA, sws_flags, NULL, NULL, NULL); if (*img_convert_ctx != NULL) { uint8_t *pixels; int pitch; if (!SDL_LockTexture(tex, NULL, (void **)&pixels, &pitch)) { sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize, 0, frame->height, &pixels, &pitch); SDL_UnlockTexture(tex); } } else { av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n"); ret = -1; } break; } return ret; } | 2,850 |
1 | static int vorbis_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { vorbis_enc_context *venc = avctx->priv_data; int i, ret, need_more; int frame_size = 1 << (venc->log2_blocksize[1] - 1); vorbis_enc_mode *mode; vorbis_enc_mapping *mapping; PutBitContext pb; if (frame) { if ((ret = ff_af_queue_add(&venc->afq, frame)) < 0) return ret; ff_bufqueue_add(avctx, &venc->bufqueue, av_frame_clone(frame)); } else if (!venc->afq.remaining_samples) return 0; need_more = venc->bufqueue.available * avctx->frame_size < frame_size; need_more = frame && need_more; if (need_more) return 0; /* Pad the bufqueue with empty frames for encoding the last packet. */ if (!frame) { if (venc->bufqueue.available * avctx->frame_size < frame_size) { int frames_needed = (frame_size/avctx->frame_size) - venc->bufqueue.available; int i; for (i = 0; i < frames_needed; i++) { AVFrame *empty = spawn_empty_frame(avctx, venc->channels); if (!empty) return AVERROR(ENOMEM); ff_bufqueue_add(avctx, &venc->bufqueue, empty); } } } move_audio(venc, avctx->frame_size); if (!apply_window_and_mdct(venc)) return 0; if ((ret = ff_alloc_packet2(avctx, avpkt, 8192, 0)) < 0) return ret; init_put_bits(&pb, avpkt->data, avpkt->size); if (pb.size_in_bits - put_bits_count(&pb) < 1 + ilog(venc->nmodes - 1)) { av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); return AVERROR(EINVAL); } put_bits(&pb, 1, 0); // magic bit put_bits(&pb, ilog(venc->nmodes - 1), 1); // Mode for current frame mode = &venc->modes[1]; mapping = &venc->mappings[mode->mapping]; if (mode->blockflag) { put_bits(&pb, 1, 1); // Previous windowflag put_bits(&pb, 1, 1); // Next windowflag } for (i = 0; i < venc->channels; i++) { vorbis_enc_floor *fc = &venc->floors[mapping->floor[mapping->mux[i]]]; uint16_t posts[MAX_FLOOR_VALUES]; floor_fit(venc, fc, &venc->coeffs[i * frame_size], posts, frame_size); if (floor_encode(venc, fc, &pb, posts, &venc->floor[i * frame_size], frame_size)) { av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); return AVERROR(EINVAL); } } for (i = 0; i < venc->channels * frame_size; i++) venc->coeffs[i] /= venc->floor[i]; for (i = 0; i < mapping->coupling_steps; i++) { float *mag = venc->coeffs + mapping->magnitude[i] * frame_size; float *ang = venc->coeffs + mapping->angle[i] * frame_size; int j; for (j = 0; j < frame_size; j++) { float a = ang[j]; ang[j] -= mag[j]; if (mag[j] > 0) ang[j] = -ang[j]; if (ang[j] < 0) mag[j] = a; } } if (residue_encode(venc, &venc->residues[mapping->residue[mapping->mux[0]]], &pb, venc->coeffs, frame_size, venc->channels)) { av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); return AVERROR(EINVAL); } flush_put_bits(&pb); avpkt->size = put_bits_count(&pb) >> 3; ff_af_queue_remove(&venc->afq, frame_size, &avpkt->pts, &avpkt->duration); if (frame_size > avpkt->duration) { uint8_t *side = av_packet_new_side_data(avpkt, AV_PKT_DATA_SKIP_SAMPLES, 10); if (!side) return AVERROR(ENOMEM); AV_WL32(&side[4], frame_size - avpkt->duration); } *got_packet_ptr = 1; return 0; } | 2,851 |
1 | void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { void (*start_frame)(AVFilterLink *, AVFilterBufferRef *); AVFilterPad *dst = &link_dpad(link); FF_DPRINTF_START(NULL, start_frame); ff_dprintf_link(NULL, link, 0); dprintf(NULL, " "); ff_dprintf_ref(NULL, picref, 1); if (!(start_frame = dst->start_frame)) start_frame = avfilter_default_start_frame; /* prepare to copy the picture if it has insufficient permissions */ if ((dst->min_perms & picref->perms) != dst->min_perms || dst->rej_perms & picref->perms) { av_log(link->dst, AV_LOG_DEBUG, "frame copy needed (have perms %x, need %x, reject %x)\n", picref->perms, link_dpad(link).min_perms, link_dpad(link).rej_perms); link->cur_buf = avfilter_default_get_video_buffer(link, dst->min_perms, link->w, link->h); link->src_buf = picref; avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf); } else link->cur_buf = picref; start_frame(link, link->cur_buf); } | 2,852 |
1 | static void ivshmem_check_memdev_is_busy(Object *obj, const char *name, Object *val, Error **errp) { if (host_memory_backend_is_mapped(MEMORY_BACKEND(val))) { char *path = object_get_canonical_path_component(val); error_setg(errp, "can't use already busy memdev: %s", path); g_free(path); } else { qdev_prop_allow_set_link_before_realize(obj, name, val, errp); } } | 2,853 |
1 | static int vorbis_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; vorbis_context *vc = avctx->priv_data; AVFrame *frame = data; GetBitContext *gb = &vc->gb; float *channel_ptrs[255]; int i, len, ret; av_dlog(NULL, "packet length %d \n", buf_size); /* get output buffer */ frame->nb_samples = vc->blocksize[1] / 2; if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } if (vc->audio_channels > 8) { for (i = 0; i < vc->audio_channels; i++) channel_ptrs[i] = (float *)frame->extended_data[i]; } else { for (i = 0; i < vc->audio_channels; i++) { int ch = ff_vorbis_channel_layout_offsets[vc->audio_channels - 1][i]; channel_ptrs[ch] = (float *)frame->extended_data[i]; } } init_get_bits(gb, buf, buf_size*8); if ((len = vorbis_parse_audio_packet(vc, channel_ptrs)) <= 0) return len; if (!vc->first_frame) { vc->first_frame = 1; *got_frame_ptr = 0; return buf_size; } av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", get_bits_count(gb) / 8, get_bits_count(gb) % 8, len); frame->nb_samples = len; *got_frame_ptr = 1; return buf_size; } | 2,854 |
1 | static int qemu_balloon_status(MonitorCompletion cb, void *opaque) { if (!balloon_event_fn) { return 0; } balloon_event_fn(balloon_opaque, 0, cb, opaque); return 1; } | 2,855 |
1 | static void put_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels, int line_size) { int i; uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; /* read the pixels */ for(i=0;i<4;i++) { pixels[0] = cm[block[0]]; pixels[1] = cm[block[1]]; pixels[2] = cm[block[2]]; pixels[3] = cm[block[3]]; pixels += line_size; block += 8; } } | 2,856 |
0 | static void check_pred4x4(H264PredContext *h, uint8_t *buf0, uint8_t *buf1, int codec, int chroma_format, int bit_depth) { if (chroma_format == 1) { uint8_t *topright = buf0 + 2*16; int pred_mode; for (pred_mode = 0; pred_mode < 15; pred_mode++) { if (check_pred_func(h->pred4x4[pred_mode], "4x4", pred4x4_modes[codec][pred_mode])) { randomize_buffers(); call_ref(src0, topright, (ptrdiff_t)12*SIZEOF_PIXEL); call_new(src1, topright, (ptrdiff_t)12*SIZEOF_PIXEL); if (memcmp(buf0, buf1, BUF_SIZE)) fail(); bench_new(src1, topright, (ptrdiff_t)12*SIZEOF_PIXEL); } } } } | 2,858 |
0 | int ff_mov_iso639_to_lang(const char *lang, int mp4) { int i, code = 0; /* old way, only for QT? */ for (i = 0; !mp4 && i < FF_ARRAY_ELEMS(mov_mdhd_language_map); i++) { if (mov_mdhd_language_map[i] && !strcmp(lang, mov_mdhd_language_map[i])) return i; } /* XXX:can we do that in mov too? */ if (!mp4) return 0; /* handle undefined as such */ if (lang[0] == '\0') lang = "und"; /* 5bit ascii */ for (i = 0; i < 3; i++) { unsigned char c = (unsigned char)lang[i]; if (c < 0x60) return 0; if (c > 0x60 + 0x1f) return 0; code <<= 5; code |= (c - 0x60); } return code; } | 2,859 |
0 | static int dxva2_vp9_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size) { const VP9SharedContext *h = avctx->priv_data; AVDXVAContext *ctx = avctx->hwaccel_context; struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private; if (DXVA_CONTEXT_DECODER(avctx, ctx) == NULL || DXVA_CONTEXT_CFG(avctx, ctx) == NULL || DXVA_CONTEXT_COUNT(avctx, ctx) <= 0) return -1; av_assert0(ctx_pic); /* Fill up DXVA_PicParams_VP9 */ if (fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp) < 0) return -1; ctx_pic->bitstream_size = 0; ctx_pic->bitstream = NULL; return 0; } | 2,860 |
1 | static void test_nop(gconstpointer data) { QTestState *s; const char *machine = data; char *args; args = g_strdup_printf("-display none -machine %s", machine); s = qtest_start(args); if (s) { qtest_quit(s); } g_free(args); } | 2,861 |
1 | static int ass_get_duration(const uint8_t *p) { int sh, sm, ss, sc, eh, em, es, ec; uint64_t start, end; if (sscanf(p, "%*[^,],%d:%d:%d%*c%d,%d:%d:%d%*c%d", &sh, &sm, &ss, &sc, &eh, &em, &es, &ec) != 8) return 0; start = 3600000*sh + 60000*sm + 1000*ss + 10*sc; end = 3600000*eh + 60000*em + 1000*es + 10*ec; return end - start; } | 2,863 |
1 | static int read_sbr_grid(AACContext *ac, SpectralBandReplication *sbr, GetBitContext *gb, SBRData *ch_data) { int i; ch_data->bs_freq_res[0] = ch_data->bs_freq_res[ch_data->bs_num_env[1]]; ch_data->bs_num_env[0] = ch_data->bs_num_env[1]; ch_data->bs_amp_res = sbr->bs_amp_res_header; switch (ch_data->bs_frame_class = get_bits(gb, 2)) { case FIXFIX: ch_data->bs_num_env[1] = 1 << get_bits(gb, 2); if (ch_data->bs_num_env[1] == 1) ch_data->bs_amp_res = 0; ch_data->bs_freq_res[1] = get_bits1(gb); for (i = 1; i < ch_data->bs_num_env[1]; i++) ch_data->bs_freq_res[i + 1] = ch_data->bs_freq_res[1]; break; case FIXVAR: ch_data->bs_var_bord[1] = get_bits(gb, 2); ch_data->bs_num_rel[1] = get_bits(gb, 2); ch_data->bs_num_env[1] = ch_data->bs_num_rel[1] + 1; for (i = 0; i < ch_data->bs_num_rel[1]; i++) ch_data->bs_rel_bord[1][i] = 2 * get_bits(gb, 2) + 2; ch_data->bs_pointer = get_bits(gb, ceil_log2[ch_data->bs_num_env[1]]); for (i = 0; i < ch_data->bs_num_env[1]; i++) ch_data->bs_freq_res[ch_data->bs_num_env[1] - i] = get_bits1(gb); break; case VARFIX: ch_data->bs_var_bord[0] = get_bits(gb, 2); ch_data->bs_num_rel[0] = get_bits(gb, 2); ch_data->bs_num_env[1] = ch_data->bs_num_rel[0] + 1; for (i = 0; i < ch_data->bs_num_rel[0]; i++) ch_data->bs_rel_bord[0][i] = 2 * get_bits(gb, 2) + 2; ch_data->bs_pointer = get_bits(gb, ceil_log2[ch_data->bs_num_env[1]]); get_bits1_vector(gb, ch_data->bs_freq_res + 1, ch_data->bs_num_env[1]); break; case VARVAR: ch_data->bs_var_bord[0] = get_bits(gb, 2); ch_data->bs_var_bord[1] = get_bits(gb, 2); ch_data->bs_num_rel[0] = get_bits(gb, 2); ch_data->bs_num_rel[1] = get_bits(gb, 2); ch_data->bs_num_env[1] = ch_data->bs_num_rel[0] + ch_data->bs_num_rel[1] + 1; for (i = 0; i < ch_data->bs_num_rel[0]; i++) ch_data->bs_rel_bord[0][i] = 2 * get_bits(gb, 2) + 2; for (i = 0; i < ch_data->bs_num_rel[1]; i++) ch_data->bs_rel_bord[1][i] = 2 * get_bits(gb, 2) + 2; ch_data->bs_pointer = get_bits(gb, ceil_log2[ch_data->bs_num_env[1]]); get_bits1_vector(gb, ch_data->bs_freq_res + 1, ch_data->bs_num_env[1]); break; } if (ch_data->bs_pointer > ch_data->bs_num_env[1] + 1) { av_log(ac->avccontext, AV_LOG_ERROR, "Invalid bitstream, bs_pointer points to a middle noise border outside the time borders table: %d\n", ch_data->bs_pointer); return -1; } if (ch_data->bs_frame_class == FIXFIX && ch_data->bs_num_env[1] > 4) { av_log(ac->avccontext, AV_LOG_ERROR, "Invalid bitstream, too many SBR envelopes in FIXFIX type SBR frame: %d\n", ch_data->bs_num_env[1]); return -1; } if (ch_data->bs_frame_class == VARVAR && ch_data->bs_num_env[1] > 5) { av_log(ac->avccontext, AV_LOG_ERROR, "Invalid bitstream, too many SBR envelopes in VARVAR type SBR frame: %d\n", ch_data->bs_num_env[1]); return -1; } ch_data->bs_num_noise = (ch_data->bs_num_env[1] > 1) + 1; return 0; } | 2,864 |
1 | static int modified_clear_reset(S390CPU *cpu) { S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); pause_all_vcpus(); cpu_synchronize_all_states(); cpu_full_reset_all(); io_subsystem_reset(); scc->load_normal(CPU(cpu)); cpu_synchronize_all_post_reset(); resume_all_vcpus(); return 0; } | 2,865 |
0 | static av_cold int png_enc_init(AVCodecContext *avctx) { PNGEncContext *s = avctx->priv_data; #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->key_frame = 1; FF_ENABLE_DEPRECATION_WARNINGS #endif ff_huffyuvencdsp_init(&s->hdsp); s->filter_type = av_clip(avctx->prediction_method, PNG_FILTER_VALUE_NONE, PNG_FILTER_VALUE_MIXED); if (avctx->pix_fmt == AV_PIX_FMT_MONOBLACK) s->filter_type = PNG_FILTER_VALUE_NONE; return 0; } | 2,866 |
1 | static void test_smbios_ep_table(test_data *data) { struct smbios_entry_point *ep_table = &data->smbios_ep_table; uint32_t addr = data->smbios_ep_addr; ACPI_READ_ARRAY(ep_table->anchor_string, addr); g_assert(!memcmp(ep_table->anchor_string, "_SM_", 4)); ACPI_READ_FIELD(ep_table->checksum, addr); ACPI_READ_FIELD(ep_table->length, addr); ACPI_READ_FIELD(ep_table->smbios_major_version, addr); ACPI_READ_FIELD(ep_table->smbios_minor_version, addr); ACPI_READ_FIELD(ep_table->max_structure_size, addr); ACPI_READ_FIELD(ep_table->entry_point_revision, addr); ACPI_READ_ARRAY(ep_table->formatted_area, addr); ACPI_READ_ARRAY(ep_table->intermediate_anchor_string, addr); g_assert(!memcmp(ep_table->intermediate_anchor_string, "_DMI_", 5)); ACPI_READ_FIELD(ep_table->intermediate_checksum, addr); ACPI_READ_FIELD(ep_table->structure_table_length, addr); g_assert_cmpuint(ep_table->structure_table_length, >, 0); ACPI_READ_FIELD(ep_table->structure_table_address, addr); ACPI_READ_FIELD(ep_table->number_of_structures, addr); g_assert_cmpuint(ep_table->number_of_structures, >, 0); ACPI_READ_FIELD(ep_table->smbios_bcd_revision, addr); g_assert(!acpi_checksum((uint8_t *)ep_table, sizeof *ep_table)); g_assert(!acpi_checksum((uint8_t *)ep_table + 0x10, sizeof *ep_table - 0x10)); } | 2,867 |
1 | static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg) { int i, j, k, mx, my, dx, dy, x, y; const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1); const int part_height = 16 >> ((unsigned)(size + 1) / 3); const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0; const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width; const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width; for (i = 0; i < 16; i += part_height) for (j = 0; j < 16; j += part_width) { const int b_xy = (4 * s->mb_x + (j >> 2)) + (4 * s->mb_y + (i >> 2)) * s->b_stride; int dxy; x = 16 * s->mb_x + j; y = 16 * s->mb_y + i; k = (j >> 2 & 1) + (i >> 1 & 2) + (j >> 1 & 4) + (i & 8); if (mode != PREDICT_MODE) { svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my); } else { mx = s->next_pic->motion_val[0][b_xy][0] << 1; my = s->next_pic->motion_val[0][b_xy][1] << 1; if (dir == 0) { mx = mx * s->frame_num_offset / s->prev_frame_num_offset + 1 >> 1; my = my * s->frame_num_offset / s->prev_frame_num_offset + 1 >> 1; } else { mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) / s->prev_frame_num_offset + 1 >> 1; my = my * (s->frame_num_offset - s->prev_frame_num_offset) / s->prev_frame_num_offset + 1 >> 1; } } /* clip motion vector prediction to frame border */ mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x); my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y); /* get (optional) motion vector differential */ if (mode == PREDICT_MODE) { dx = dy = 0; } else { dy = get_interleaved_se_golomb(&s->gb_slice); dx = get_interleaved_se_golomb(&s->gb_slice); if (dx != (int16_t)dx || dy != (int16_t)dy) { av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n"); return -1; } } /* compute motion vector */ if (mode == THIRDPEL_MODE) { int fx, fy; mx = (mx + 1 >> 1) + dx; my = (my + 1 >> 1) + dy; fx = (unsigned)(mx + 0x30000) / 3 - 0x10000; fy = (unsigned)(my + 0x30000) / 3 - 0x10000; dxy = (mx - 3 * fx) + 4 * (my - 3 * fy); svq3_mc_dir_part(s, x, y, part_width, part_height, fx, fy, dxy, 1, dir, avg); mx += mx; my += my; } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) { mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000; my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000; dxy = (mx & 1) + 2 * (my & 1); svq3_mc_dir_part(s, x, y, part_width, part_height, mx >> 1, my >> 1, dxy, 0, dir, avg); mx *= 3; my *= 3; } else { mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000; my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000; svq3_mc_dir_part(s, x, y, part_width, part_height, mx, my, 0, 0, dir, avg); mx *= 6; my *= 6; } /* update mv_cache */ if (mode != PREDICT_MODE) { int32_t mv = pack16to32(mx, my); if (part_height == 8 && i < 8) { AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv); if (part_width == 8 && j < 8) AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv); } if (part_width == 8 && j < 8) AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv); if (part_width == 4 || part_height == 4) AV_WN32A(s->mv_cache[dir][scan8[k]], mv); } /* write back motion vectors */ fill_rectangle(s->cur_pic->motion_val[dir][b_xy], part_width >> 2, part_height >> 2, s->b_stride, pack16to32(mx, my), 4); } return 0; } | 2,868 |
0 | static void pxa2xx_update_display(void *opaque) { struct pxa2xx_lcdc_s *s = (struct pxa2xx_lcdc_s *) opaque; target_phys_addr_t fbptr; int miny, maxy; int ch; if (!(s->control[0] & LCCR0_ENB)) return; pxa2xx_descriptor_load(s); pxa2xx_lcdc_resize(s); miny = s->yres; maxy = 0; s->transp = s->dma_ch[2].up || s->dma_ch[3].up; /* Note: With overlay planes the order depends on LCCR0 bit 25. */ for (ch = 0; ch < PXA_LCDDMA_CHANS; ch ++) if (s->dma_ch[ch].up) { if (!s->dma_ch[ch].source) { pxa2xx_dma_ber_set(s, ch); continue; } fbptr = s->dma_ch[ch].source; if (!(fbptr >= PXA2XX_SDRAM_BASE && fbptr <= PXA2XX_SDRAM_BASE + phys_ram_size)) { pxa2xx_dma_ber_set(s, ch); continue; } if (s->dma_ch[ch].command & LDCMD_PAL) { cpu_physical_memory_read(fbptr, s->dma_ch[ch].pbuffer, MAX(LDCMD_LENGTH(s->dma_ch[ch].command), sizeof(s->dma_ch[ch].pbuffer))); pxa2xx_palette_parse(s, ch, s->bpp); } else { /* Do we need to reparse palette */ if (LCCR4_PALFOR(s->control[4]) != s->pal_for) pxa2xx_palette_parse(s, ch, s->bpp); /* ACK frame start */ pxa2xx_dma_sof_set(s, ch); s->dma_ch[ch].redraw(s, fbptr, &miny, &maxy); s->invalidated = 0; /* ACK frame completed */ pxa2xx_dma_eof_set(s, ch); } } if (s->control[0] & LCCR0_DIS) { /* ACK last frame completed */ s->control[0] &= ~LCCR0_ENB; s->status[0] |= LCSR0_LDD; } if (miny >= 0) { if (s->orientation) dpy_update(s->ds, miny, 0, maxy, s->xres); else dpy_update(s->ds, 0, miny, s->xres, maxy); } pxa2xx_lcdc_int_update(s); qemu_irq_raise(s->vsync_cb); } | 2,869 |
0 | char *object_property_print(Object *obj, const char *name, bool human, Error **errp) { StringOutputVisitor *sov; char *string = NULL; Error *local_err = NULL; sov = string_output_visitor_new(human); object_property_get(obj, string_output_get_visitor(sov), name, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } string = string_output_get_string(sov); out: visit_free(string_output_get_visitor(sov)); return string; } | 2,870 |
0 | static int send_sub_rect_solid(VncState *vs, int x, int y, int w, int h) { vnc_framebuffer_update(vs, x, y, w, h, VNC_ENCODING_TIGHT); vnc_tight_start(vs); vnc_raw_send_framebuffer_update(vs, x, y, w, h); vnc_tight_stop(vs); return send_solid_rect(vs); } | 2,872 |
0 | static void qdict_array_split_test(void) { QDict *test_dict = qdict_new(); QDict *dict1, *dict2; QList *test_list; /* * Test the split of * * { * "1.x": 0, * "3.y": 1, * "0.a": 42, * "o.o": 7, * "0.b": 23 * } * * to * * [ * { * "a": 42, * "b": 23 * }, * { * "x": 0 * } * ] * * and * * { * "3.y": 1, * "o.o": 7 * } * * (remaining in the old QDict) * * This example is given in the comment of qdict_array_split(). */ qdict_put(test_dict, "1.x", qint_from_int(0)); qdict_put(test_dict, "3.y", qint_from_int(1)); qdict_put(test_dict, "0.a", qint_from_int(42)); qdict_put(test_dict, "o.o", qint_from_int(7)); qdict_put(test_dict, "0.b", qint_from_int(23)); qdict_array_split(test_dict, &test_list); dict1 = qobject_to_qdict(qlist_pop(test_list)); dict2 = qobject_to_qdict(qlist_pop(test_list)); g_assert(dict1); g_assert(dict2); g_assert(qlist_empty(test_list)); QDECREF(test_list); g_assert(qdict_get_int(dict1, "a") == 42); g_assert(qdict_get_int(dict1, "b") == 23); g_assert(qdict_size(dict1) == 2); QDECREF(dict1); g_assert(qdict_get_int(dict2, "x") == 0); g_assert(qdict_size(dict2) == 1); QDECREF(dict2); g_assert(qdict_get_int(test_dict, "3.y") == 1); g_assert(qdict_get_int(test_dict, "o.o") == 7); g_assert(qdict_size(test_dict) == 2); QDECREF(test_dict); } | 2,873 |
0 | static target_ulong disas_insn(CPUX86State *env, DisasContext *s, target_ulong pc_start) { int b, prefixes, aflag, dflag; int shift, ot; int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val; target_ulong next_eip, tval; int rex_w, rex_r; if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(pc_start); } s->pc = pc_start; prefixes = 0; aflag = s->code32; dflag = s->code32; s->override = -1; rex_w = -1; rex_r = 0; #ifdef TARGET_X86_64 s->rex_x = 0; s->rex_b = 0; x86_64_hregs = 0; #endif s->rip_offset = 0; /* for relative ip address */ next_byte: b = cpu_ldub_code(env, s->pc); s->pc++; /* check prefixes */ #ifdef TARGET_X86_64 if (CODE64(s)) { switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; goto next_byte; case 0x2e: s->override = R_CS; goto next_byte; case 0x36: s->override = R_SS; goto next_byte; case 0x3e: s->override = R_DS; goto next_byte; case 0x26: s->override = R_ES; goto next_byte; case 0x64: s->override = R_FS; goto next_byte; case 0x65: s->override = R_GS; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; goto next_byte; case 0x40 ... 0x4f: /* REX prefix */ rex_w = (b >> 3) & 1; rex_r = (b & 0x4) << 1; s->rex_x = (b & 0x2) << 2; REX_B(s) = (b & 0x1) << 3; x86_64_hregs = 1; /* select uniform byte register addressing */ goto next_byte; } if (rex_w == 1) { /* 0x66 is ignored if rex.w is set */ dflag = 2; } else { if (prefixes & PREFIX_DATA) dflag ^= 1; } if (!(prefixes & PREFIX_ADR)) aflag = 2; } else #endif { switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; goto next_byte; case 0x2e: s->override = R_CS; goto next_byte; case 0x36: s->override = R_SS; goto next_byte; case 0x3e: s->override = R_DS; goto next_byte; case 0x26: s->override = R_ES; goto next_byte; case 0x64: s->override = R_FS; goto next_byte; case 0x65: s->override = R_GS; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; goto next_byte; } if (prefixes & PREFIX_DATA) dflag ^= 1; if (prefixes & PREFIX_ADR) aflag ^= 1; } s->prefix = prefixes; s->aflag = aflag; s->dflag = dflag; /* lock generation */ if (prefixes & PREFIX_LOCK) gen_helper_lock(); /* now check op code */ reswitch: switch(b) { case 0x0f: /**************************/ /* extended op code */ b = cpu_ldub_code(env, s->pc++) | 0x100; goto reswitch; /**************************/ /* arith & logic */ case 0x00 ... 0x05: case 0x08 ... 0x0d: case 0x10 ... 0x15: case 0x18 ... 0x1d: case 0x20 ... 0x25: case 0x28 ... 0x2d: case 0x30 ... 0x35: case 0x38 ... 0x3d: { int op, f, val; op = (b >> 3) & 7; f = (b >> 1) & 3; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; switch(f) { case 0: /* OP Ev, Gv */ modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); opreg = OR_TMP0; } else if (op == OP_XORL && rm == reg) { xor_zero: /* xor reg, reg optimisation */ gen_op_movl_T0_0(); s->cc_op = CC_OP_LOGICB + ot; gen_op_mov_reg_T0(ot, reg); gen_op_update1_cc(); break; } else { opreg = rm; } gen_op_mov_TN_reg(ot, 1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_ld_T1_A0(ot + s->mem_index); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { gen_op_mov_TN_reg(ot, 1, rm); } gen_op(s, op, ot, reg); break; case 2: /* OP A, Iv */ val = insn_get(env, s, ot); gen_op_movl_T1_im(val); gen_op(s, op, ot, OR_EAX); break; } } break; case 0x82: if (CODE64(s)) goto illegal_op; case 0x80: /* GRP1 */ case 0x81: case 0x83: { int val; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (b == 0x83) s->rip_offset = 1; else s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); opreg = OR_TMP0; } else { opreg = rm; } switch(b) { default: case 0x80: case 0x81: case 0x82: val = insn_get(env, s, ot); break; case 0x83: val = (int8_t)insn_get(env, s, OT_BYTE); break; } gen_op_movl_T1_im(val); gen_op(s, op, ot, opreg); } break; /**************************/ /* inc, dec, and other misc arith */ case 0x40 ... 0x47: /* inc Gv */ ot = dflag ? OT_LONG : OT_WORD; gen_inc(s, ot, OR_EAX + (b & 7), 1); break; case 0x48 ... 0x4f: /* dec Gv */ ot = dflag ? OT_LONG : OT_WORD; gen_inc(s, ot, OR_EAX + (b & 7), -1); break; case 0xf6: /* GRP3 */ case 0xf7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (op == 0) s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); } switch(op) { case 0: /* test */ val = insn_get(env, s, ot); gen_op_movl_T1_im(val); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 2: /* not */ tcg_gen_not_tl(cpu_T[0], cpu_T[0]); if (mod != 3) { gen_op_st_T0_A0(ot + s->mem_index); } else { gen_op_mov_reg_T0(ot, rm); } break; case 3: /* neg */ tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); if (mod != 3) { gen_op_st_T0_A0(ot + s->mem_index); } else { gen_op_mov_reg_T0(ot, rm); } gen_op_update_neg_cc(); s->cc_op = CC_OP_SUBB + ot; break; case 4: /* mul */ switch(ot) { case OT_BYTE: gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX); tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_WORD, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00); s->cc_op = CC_OP_MULB; break; case OT_WORD: gen_op_mov_TN_reg(OT_WORD, 1, R_EAX); tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_WORD, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16); gen_op_mov_reg_T0(OT_WORD, R_EDX); tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); s->cc_op = CC_OP_MULW; break; default: case OT_LONG: #ifdef TARGET_X86_64 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]); tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_LONG, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32); gen_op_mov_reg_T0(OT_LONG, R_EDX); tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); #else { TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); tcg_gen_extu_i32_i64(t0, cpu_T[0]); tcg_gen_extu_i32_i64(t1, cpu_T[1]); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_trunc_i64_i32(cpu_T[0], t0); gen_op_mov_reg_T0(OT_LONG, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_trunc_i64_i32(cpu_T[0], t0); gen_op_mov_reg_T0(OT_LONG, R_EDX); tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); } #endif s->cc_op = CC_OP_MULL; break; #ifdef TARGET_X86_64 case OT_QUAD: gen_helper_mulq_EAX_T0(cpu_env, cpu_T[0]); s->cc_op = CC_OP_MULQ; break; #endif } break; case 5: /* imul */ switch(ot) { case OT_BYTE: gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX); tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_WORD, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); s->cc_op = CC_OP_MULB; break; case OT_WORD: gen_op_mov_TN_reg(OT_WORD, 1, R_EAX); tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_WORD, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16); gen_op_mov_reg_T0(OT_WORD, R_EDX); s->cc_op = CC_OP_MULW; break; default: case OT_LONG: #ifdef TARGET_X86_64 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]); tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_LONG, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32); gen_op_mov_reg_T0(OT_LONG, R_EDX); #else { TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); tcg_gen_ext_i32_i64(t0, cpu_T[0]); tcg_gen_ext_i32_i64(t1, cpu_T[1]); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_trunc_i64_i32(cpu_T[0], t0); gen_op_mov_reg_T0(OT_LONG, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_trunc_i64_i32(cpu_T[0], t0); gen_op_mov_reg_T0(OT_LONG, R_EDX); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); } #endif s->cc_op = CC_OP_MULL; break; #ifdef TARGET_X86_64 case OT_QUAD: gen_helper_imulq_EAX_T0(cpu_env, cpu_T[0]); s->cc_op = CC_OP_MULQ; break; #endif } break; case 6: /* div */ switch(ot) { case OT_BYTE: gen_jmp_im(pc_start - s->cs_base); gen_helper_divb_AL(cpu_env, cpu_T[0]); break; case OT_WORD: gen_jmp_im(pc_start - s->cs_base); gen_helper_divw_AX(cpu_env, cpu_T[0]); break; default: case OT_LONG: gen_jmp_im(pc_start - s->cs_base); gen_helper_divl_EAX(cpu_env, cpu_T[0]); break; #ifdef TARGET_X86_64 case OT_QUAD: gen_jmp_im(pc_start - s->cs_base); gen_helper_divq_EAX(cpu_env, cpu_T[0]); break; #endif } break; case 7: /* idiv */ switch(ot) { case OT_BYTE: gen_jmp_im(pc_start - s->cs_base); gen_helper_idivb_AL(cpu_env, cpu_T[0]); break; case OT_WORD: gen_jmp_im(pc_start - s->cs_base); gen_helper_idivw_AX(cpu_env, cpu_T[0]); break; default: case OT_LONG: gen_jmp_im(pc_start - s->cs_base); gen_helper_idivl_EAX(cpu_env, cpu_T[0]); break; #ifdef TARGET_X86_64 case OT_QUAD: gen_jmp_im(pc_start - s->cs_base); gen_helper_idivq_EAX(cpu_env, cpu_T[0]); break; #endif } break; default: goto illegal_op; } break; case 0xfe: /* GRP4 */ case 0xff: /* GRP5 */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (op >= 2 && b == 0xfe) { goto illegal_op; } if (CODE64(s)) { if (op == 2 || op == 4) { /* operand size for jumps is 64 bit */ ot = OT_QUAD; } else if (op == 3 || op == 5) { ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD; } else if (op == 6) { /* default push size is 64 bit */ ot = dflag ? OT_QUAD : OT_WORD; } } if (mod != 3) { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); if (op >= 2 && op != 3 && op != 5) gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); } switch(op) { case 0: /* inc Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, 1); break; case 1: /* dec Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, -1); break; case 2: /* call Ev */ /* XXX: optimize if memory (no 'and' is necessary) */ if (s->dflag == 0) gen_op_andl_T0_ffff(); next_eip = s->pc - s->cs_base; gen_movtl_T1_im(next_eip); gen_push_T1(s); gen_op_jmp_T0(); gen_eob(s); break; case 3: /* lcall Ev */ gen_op_ld_T1_A0(ot + s->mem_index); gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); gen_op_ldu_T0_A0(OT_WORD + s->mem_index); do_lcall: if (s->pe && !s->vm86) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1], tcg_const_i32(dflag), tcg_const_i32(s->pc - pc_start)); } else { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1], tcg_const_i32(dflag), tcg_const_i32(s->pc - s->cs_base)); } gen_eob(s); break; case 4: /* jmp Ev */ if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); gen_eob(s); break; case 5: /* ljmp Ev */ gen_op_ld_T1_A0(ot + s->mem_index); gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); gen_op_ldu_T0_A0(OT_WORD + s->mem_index); do_ljmp: if (s->pe && !s->vm86) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1], tcg_const_i32(s->pc - pc_start)); } else { gen_op_movl_seg_T0_vm(R_CS); gen_op_movl_T0_T1(); gen_op_jmp_T0(); } gen_eob(s); break; case 6: /* push Ev */ gen_push_T0(s); break; default: goto illegal_op; } break; case 0x84: /* test Ev, Gv */ case 0x85: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_TN_reg(ot, 1, reg); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 0xa8: /* test eAX, Iv */ case 0xa9: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; val = insn_get(env, s, ot); gen_op_mov_TN_reg(ot, 0, OR_EAX); gen_op_movl_T1_im(val); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 0x98: /* CWDE/CBW */ #ifdef TARGET_X86_64 if (dflag == 2) { gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_QUAD, R_EAX); } else #endif if (dflag == 1) { gen_op_mov_TN_reg(OT_WORD, 0, R_EAX); tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_LONG, R_EAX); } else { gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX); tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_WORD, R_EAX); } break; case 0x99: /* CDQ/CWD */ #ifdef TARGET_X86_64 if (dflag == 2) { gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX); tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63); gen_op_mov_reg_T0(OT_QUAD, R_EDX); } else #endif if (dflag == 1) { gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31); gen_op_mov_reg_T0(OT_LONG, R_EDX); } else { gen_op_mov_TN_reg(OT_WORD, 0, R_EAX); tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15); gen_op_mov_reg_T0(OT_WORD, R_EDX); } break; case 0x1af: /* imul Gv, Ev */ case 0x69: /* imul Gv, Ev, I */ case 0x6b: ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; if (b == 0x69) s->rip_offset = insn_const_size(ot); else if (b == 0x6b) s->rip_offset = 1; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(env, s, ot); gen_op_movl_T1_im(val); } else if (b == 0x6b) { val = (int8_t)insn_get(env, s, OT_BYTE); gen_op_movl_T1_im(val); } else { gen_op_mov_TN_reg(ot, 1, reg); } #ifdef TARGET_X86_64 if (ot == OT_QUAD) { gen_helper_imulq_T0_T1(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); } else #endif if (ot == OT_LONG) { #ifdef TARGET_X86_64 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]); tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); #else { TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(t0, cpu_T[0]); tcg_gen_ext_i32_i64(t1, cpu_T[1]); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_trunc_i64_i32(cpu_T[0], t0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_trunc_i64_i32(cpu_T[1], t0); tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0); } #endif } else { tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); } gen_op_mov_reg_T0(ot, reg); s->cc_op = CC_OP_MULB + ot; break; case 0x1c0: case 0x1c1: /* xadd Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_TN_reg(ot, 0, reg); gen_op_mov_TN_reg(ot, 1, rm); gen_op_addl_T0_T1(); gen_op_mov_reg_T1(ot, reg); gen_op_mov_reg_T0(ot, rm); } else { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_mov_TN_reg(ot, 0, reg); gen_op_ld_T1_A0(ot + s->mem_index); gen_op_addl_T0_T1(); gen_op_st_T0_A0(ot + s->mem_index); gen_op_mov_reg_T1(ot, reg); } gen_op_update2_cc(); s->cc_op = CC_OP_ADDB + ot; break; case 0x1b0: case 0x1b1: /* cmpxchg Ev, Gv */ { int label1, label2; TCGv t0, t1, t2, a0; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); t2 = tcg_temp_local_new(); a0 = tcg_temp_local_new(); gen_op_mov_v_reg(ot, t1, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, t0, rm); } else { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); tcg_gen_mov_tl(a0, cpu_A0); gen_op_ld_v(ot + s->mem_index, t0, a0); rm = 0; /* avoid warning */ } label1 = gen_new_label(); tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0); gen_extu(ot, t2); tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1); label2 = gen_new_label(); if (mod == 3) { gen_op_mov_reg_v(ot, R_EAX, t0); tcg_gen_br(label2); gen_set_label(label1); gen_op_mov_reg_v(ot, rm, t1); } else { /* perform no-op store cycle like physical cpu; must be before changing accumulator to ensure idempotency if the store faults and the instruction is restarted */ gen_op_st_v(ot + s->mem_index, t0, a0); gen_op_mov_reg_v(ot, R_EAX, t0); tcg_gen_br(label2); gen_set_label(label1); gen_op_st_v(ot + s->mem_index, t1, a0); } gen_set_label(label2); tcg_gen_mov_tl(cpu_cc_src, t0); tcg_gen_mov_tl(cpu_cc_dst, t2); s->cc_op = CC_OP_SUBB + ot; tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); tcg_temp_free(a0); } break; case 0x1c7: /* cmpxchg8b */ modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if ((mod == 3) || ((modrm & 0x38) != 0x8)) goto illegal_op; #ifdef TARGET_X86_64 if (dflag == 2) { if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) goto illegal_op; gen_jmp_im(pc_start - s->cs_base); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_helper_cmpxchg16b(cpu_env, cpu_A0); } else #endif { if (!(s->cpuid_features & CPUID_CX8)) goto illegal_op; gen_jmp_im(pc_start - s->cs_base); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_helper_cmpxchg8b(cpu_env, cpu_A0); } s->cc_op = CC_OP_EFLAGS; break; /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s)); gen_push_T0(s); break; case 0x58 ... 0x5f: /* pop */ if (CODE64(s)) { ot = dflag ? OT_QUAD : OT_WORD; } else { ot = dflag + OT_WORD; } gen_pop_T0(s); /* NOTE: order is important for pop %sp */ gen_pop_update(s); gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s)); break; case 0x60: /* pusha */ if (CODE64(s)) goto illegal_op; gen_pusha(s); break; case 0x61: /* popa */ if (CODE64(s)) goto illegal_op; gen_popa(s); break; case 0x68: /* push Iv */ case 0x6a: if (CODE64(s)) { ot = dflag ? OT_QUAD : OT_WORD; } else { ot = dflag + OT_WORD; } if (b == 0x68) val = insn_get(env, s, ot); else val = (int8_t)insn_get(env, s, OT_BYTE); gen_op_movl_T0_im(val); gen_push_T0(s); break; case 0x8f: /* pop Ev */ if (CODE64(s)) { ot = dflag ? OT_QUAD : OT_WORD; } else { ot = dflag + OT_WORD; } modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; gen_pop_T0(s); if (mod == 3) { /* NOTE: order is important for pop %sp */ gen_pop_update(s); rm = (modrm & 7) | REX_B(s); gen_op_mov_reg_T0(ot, rm); } else { /* NOTE: order is important too for MMU exceptions */ s->popl_esp_hack = 1 << ot; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); s->popl_esp_hack = 0; gen_pop_update(s); } break; case 0xc8: /* enter */ { int level; val = cpu_lduw_code(env, s->pc); s->pc += 2; level = cpu_ldub_code(env, s->pc++); gen_enter(s, val, level); } break; case 0xc9: /* leave */ /* XXX: exception not precise (ESP is updated before potential exception) */ if (CODE64(s)) { gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP); gen_op_mov_reg_T0(OT_QUAD, R_ESP); } else if (s->ss32) { gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); gen_op_mov_reg_T0(OT_LONG, R_ESP); } else { gen_op_mov_TN_reg(OT_WORD, 0, R_EBP); gen_op_mov_reg_T0(OT_WORD, R_ESP); } gen_pop_T0(s); if (CODE64(s)) { ot = dflag ? OT_QUAD : OT_WORD; } else { ot = dflag + OT_WORD; } gen_op_mov_reg_T0(ot, R_EBP); gen_pop_update(s); break; case 0x06: /* push es */ case 0x0e: /* push cs */ case 0x16: /* push ss */ case 0x1e: /* push ds */ if (CODE64(s)) goto illegal_op; gen_op_movl_T0_seg(b >> 3); gen_push_T0(s); break; case 0x1a0: /* push fs */ case 0x1a8: /* push gs */ gen_op_movl_T0_seg((b >> 3) & 7); gen_push_T0(s); break; case 0x07: /* pop es */ case 0x17: /* pop ss */ case 0x1f: /* pop ds */ if (CODE64(s)) goto illegal_op; reg = b >> 3; gen_pop_T0(s); gen_movl_seg_T0(s, reg, pc_start - s->cs_base); gen_pop_update(s); if (reg == R_SS) { /* if reg == SS, inhibit interrupts/trace. */ /* If several instructions disable interrupts, only the _first_ does it */ if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) gen_helper_set_inhibit_irq(cpu_env); s->tf = 0; } if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ gen_pop_T0(s); gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base); gen_pop_update(s); if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /**************************/ /* mov */ case 0x88: case 0x89: /* mov Gv, Ev */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0xc6: case 0xc7: /* mov Ev, Iv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if (mod != 3) { s->rip_offset = insn_const_size(ot); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); } val = insn_get(env, s, ot); gen_op_movl_T0_im(val); if (mod != 3) gen_op_st_T0_A0(ot + s->mem_index); else gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s)); break; case 0x8a: case 0x8b: /* mov Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = OT_WORD + dflag; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_T0(ot, reg); break; case 0x8e: /* mov seg, Gv */ modrm = cpu_ldub_code(env, s->pc++); reg = (modrm >> 3) & 7; if (reg >= 6 || reg == R_CS) goto illegal_op; gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0); gen_movl_seg_T0(s, reg, pc_start - s->cs_base); if (reg == R_SS) { /* if reg == SS, inhibit interrupts/trace */ /* If several instructions disable interrupts, only the _first_ does it */ if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) gen_helper_set_inhibit_irq(cpu_env); s->tf = 0; } if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 0x8c: /* mov Gv, seg */ modrm = cpu_ldub_code(env, s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (reg >= 6) goto illegal_op; gen_op_movl_T0_seg(reg); if (mod == 3) ot = OT_WORD + dflag; else ot = OT_WORD; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 0x1b6: /* movzbS Gv, Eb */ case 0x1b7: /* movzwS Gv, Eb */ case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { int d_ot; /* d_ot is the size of destination */ d_ot = dflag + OT_WORD; /* ot is the size of source */ ot = (b & 1) + OT_BYTE; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { gen_op_mov_TN_reg(ot, 0, rm); switch(ot | (b & 8)) { case OT_BYTE: tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); break; case OT_BYTE | 8: tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); break; case OT_WORD: tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]); break; default: case OT_WORD | 8: tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); break; } gen_op_mov_reg_T0(d_ot, reg); } else { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); if (b & 8) { gen_op_lds_T0_A0(ot + s->mem_index); } else { gen_op_ldu_T0_A0(ot + s->mem_index); } gen_op_mov_reg_T0(d_ot, reg); } } break; case 0x8d: /* lea */ ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; /* we must ensure that no segment is added */ s->override = -1; val = s->addseg; s->addseg = 0; gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); s->addseg = val; gen_op_mov_reg_A0(ot - OT_WORD, reg); break; case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ case 0xa3: { target_ulong offset_addr; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; #ifdef TARGET_X86_64 if (s->aflag == 2) { offset_addr = cpu_ldq_code(env, s->pc); s->pc += 8; gen_op_movq_A0_im(offset_addr); } else #endif { if (s->aflag) { offset_addr = insn_get(env, s, OT_LONG); } else { offset_addr = insn_get(env, s, OT_WORD); } gen_op_movl_A0_im(offset_addr); } gen_add_A0_ds_seg(s); if ((b & 2) == 0) { gen_op_ld_T0_A0(ot + s->mem_index); gen_op_mov_reg_T0(ot, R_EAX); } else { gen_op_mov_TN_reg(ot, 0, R_EAX); gen_op_st_T0_A0(ot + s->mem_index); } } break; case 0xd7: /* xlat */ #ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_movq_A0_reg(R_EBX); gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX); tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]); } else #endif { gen_op_movl_A0_reg(R_EBX); gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]); if (s->aflag == 0) gen_op_andl_A0_ffff(); else tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); } gen_add_A0_ds_seg(s); gen_op_ldu_T0_A0(OT_BYTE + s->mem_index); gen_op_mov_reg_T0(OT_BYTE, R_EAX); break; case 0xb0 ... 0xb7: /* mov R, Ib */ val = insn_get(env, s, OT_BYTE); gen_op_movl_T0_im(val); gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s)); break; case 0xb8 ... 0xbf: /* mov R, Iv */ #ifdef TARGET_X86_64 if (dflag == 2) { uint64_t tmp; /* 64 bit case */ tmp = cpu_ldq_code(env, s->pc); s->pc += 8; reg = (b & 7) | REX_B(s); gen_movtl_T0_im(tmp); gen_op_mov_reg_T0(OT_QUAD, reg); } else #endif { ot = dflag ? OT_LONG : OT_WORD; val = insn_get(env, s, ot); reg = (b & 7) | REX_B(s); gen_op_movl_T0_im(val); gen_op_mov_reg_T0(ot, reg); } break; case 0x91 ... 0x97: /* xchg R, EAX */ do_xchg_reg_eax: ot = dflag + OT_WORD; reg = (b & 7) | REX_B(s); rm = R_EAX; goto do_xchg_reg; case 0x86: case 0x87: /* xchg Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); do_xchg_reg: gen_op_mov_TN_reg(ot, 0, reg); gen_op_mov_TN_reg(ot, 1, rm); gen_op_mov_reg_T0(ot, rm); gen_op_mov_reg_T1(ot, reg); } else { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_mov_TN_reg(ot, 0, reg); /* for xchg, lock is implicit */ if (!(prefixes & PREFIX_LOCK)) gen_helper_lock(); gen_op_ld_T1_A0(ot + s->mem_index); gen_op_st_T0_A0(ot + s->mem_index); if (!(prefixes & PREFIX_LOCK)) gen_helper_unlock(); gen_op_mov_reg_T1(ot, reg); } break; case 0xc4: /* les Gv */ if (CODE64(s)) goto illegal_op; op = R_ES; goto do_lxx; case 0xc5: /* lds Gv */ if (CODE64(s)) goto illegal_op; op = R_DS; goto do_lxx; case 0x1b2: /* lss Gv */ op = R_SS; goto do_lxx; case 0x1b4: /* lfs Gv */ op = R_FS; goto do_lxx; case 0x1b5: /* lgs Gv */ op = R_GS; do_lxx: ot = dflag ? OT_LONG : OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_ld_T1_A0(ot + s->mem_index); gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); /* load the segment first to handle exceptions properly */ gen_op_ldu_T0_A0(OT_WORD + s->mem_index); gen_movl_seg_T0(s, op, pc_start - s->cs_base); /* then put the data */ gen_op_mov_reg_T1(ot, reg); if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /************************/ /* shifts */ case 0xc0: case 0xc1: /* shift Ev,Ib */ shift = 2; grp2: { if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; if (mod != 3) { if (shift == 2) { s->rip_offset = 1; } gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); opreg = OR_TMP0; } else { opreg = (modrm & 7) | REX_B(s); } /* simpler op */ if (shift == 0) { gen_shift(s, op, ot, opreg, OR_ECX); } else { if (shift == 2) { shift = cpu_ldub_code(env, s->pc++); } gen_shifti(s, op, ot, opreg, shift); } } break; case 0xd0: case 0xd1: /* shift Ev,1 */ shift = 1; goto grp2; case 0xd2: case 0xd3: /* shift Ev,cl */ shift = 0; goto grp2; case 0x1a4: /* shld imm */ op = 0; shift = 1; goto do_shiftd; case 0x1a5: /* shld cl */ op = 0; shift = 0; goto do_shiftd; case 0x1ac: /* shrd imm */ op = 1; shift = 1; goto do_shiftd; case 0x1ad: /* shrd cl */ op = 1; shift = 0; do_shiftd: ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3) { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); opreg = OR_TMP0; } else { opreg = rm; } gen_op_mov_TN_reg(ot, 1, reg); if (shift) { val = cpu_ldub_code(env, s->pc++); tcg_gen_movi_tl(cpu_T3, val); } else { tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]); } gen_shiftd_rm_T1_T3(s, ot, opreg, op); break; /************************/ /* floats */ case 0xd8 ... 0xdf: if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { /* if CR0.EM or CR0.TS are set, generate an FPU exception */ /* XXX: what to do if illegal op ? */ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); switch(op) { case 0x00 ... 0x07: /* fxxxs */ case 0x10 ... 0x17: /* fixxxl */ case 0x20 ... 0x27: /* fxxxl */ case 0x30 ... 0x37: /* fixxx */ { int op1; op1 = op & 7; switch(op >> 4) { case 0: gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32); break; case 1: gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64); break; case 3: default: gen_op_lds_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32); break; } gen_helper_fp_arith_ST0_FT0(op1); if (op1 == 3) { /* fcomp needs pop */ gen_helper_fpop(cpu_env); } } break; case 0x08: /* flds */ case 0x0a: /* fsts */ case 0x0b: /* fstps */ case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ switch(op & 7) { case 0: switch(op >> 4) { case 0: gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32); break; case 1: gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64); break; case 3: default: gen_op_lds_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32); break; } break; case 1: /* XXX: the corresponding CPUID bit must be tested ! */ switch(op >> 4) { case 1: gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 2: gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); break; case 3: default: gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; } gen_helper_fpop(cpu_env); break; default: switch(op >> 4) { case 0: gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 1: gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 2: gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); break; case 3: default: gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; } if ((op & 7) == 3) gen_helper_fpop(cpu_env); break; } break; case 0x0c: /* fldenv mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag)); break; case 0x0d: /* fldcw mem */ gen_op_ld_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fldcw(cpu_env, cpu_tmp2_i32); break; case 0x0e: /* fnstenv mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag)); break; case 0x0f: /* fnstcw mem */ gen_helper_fnstcw(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; case 0x1d: /* fldt mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fldt_ST0(cpu_env, cpu_A0); break; case 0x1f: /* fstpt mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fstt_ST0(cpu_env, cpu_A0); gen_helper_fpop(cpu_env); break; case 0x2c: /* frstor mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag)); break; case 0x2e: /* fnsave mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag)); break; case 0x2f: /* fnstsw mem */ gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; case 0x3c: /* fbld */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fbld_ST0(cpu_env, cpu_A0); break; case 0x3e: /* fbstp */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fbst_ST0(cpu_env, cpu_A0); gen_helper_fpop(cpu_env); break; case 0x3d: /* fildll */ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64); break; case 0x3f: /* fistpll */ gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); gen_helper_fpop(cpu_env); break; default: goto illegal_op; } } else { /* register float ops */ opreg = rm; switch(op) { case 0x08: /* fld sti */ gen_helper_fpush(cpu_env); gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32((opreg + 1) & 7)); break; case 0x09: /* fxchg sti */ case 0x29: /* fxchg4 sti, undocumented op */ case 0x39: /* fxchg7 sti, undocumented op */ gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg)); break; case 0x0a: /* grp d9/2 */ switch(rm) { case 0: /* fnop */ /* check exceptions (FreeBSD FPU probe) */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fwait(cpu_env); break; default: goto illegal_op; } break; case 0x0c: /* grp d9/4 */ switch(rm) { case 0: /* fchs */ gen_helper_fchs_ST0(cpu_env); break; case 1: /* fabs */ gen_helper_fabs_ST0(cpu_env); break; case 4: /* ftst */ gen_helper_fldz_FT0(cpu_env); gen_helper_fcom_ST0_FT0(cpu_env); break; case 5: /* fxam */ gen_helper_fxam_ST0(cpu_env); break; default: goto illegal_op; } break; case 0x0d: /* grp d9/5 */ { switch(rm) { case 0: gen_helper_fpush(cpu_env); gen_helper_fld1_ST0(cpu_env); break; case 1: gen_helper_fpush(cpu_env); gen_helper_fldl2t_ST0(cpu_env); break; case 2: gen_helper_fpush(cpu_env); gen_helper_fldl2e_ST0(cpu_env); break; case 3: gen_helper_fpush(cpu_env); gen_helper_fldpi_ST0(cpu_env); break; case 4: gen_helper_fpush(cpu_env); gen_helper_fldlg2_ST0(cpu_env); break; case 5: gen_helper_fpush(cpu_env); gen_helper_fldln2_ST0(cpu_env); break; case 6: gen_helper_fpush(cpu_env); gen_helper_fldz_ST0(cpu_env); break; default: goto illegal_op; } } break; case 0x0e: /* grp d9/6 */ switch(rm) { case 0: /* f2xm1 */ gen_helper_f2xm1(cpu_env); break; case 1: /* fyl2x */ gen_helper_fyl2x(cpu_env); break; case 2: /* fptan */ gen_helper_fptan(cpu_env); break; case 3: /* fpatan */ gen_helper_fpatan(cpu_env); break; case 4: /* fxtract */ gen_helper_fxtract(cpu_env); break; case 5: /* fprem1 */ gen_helper_fprem1(cpu_env); break; case 6: /* fdecstp */ gen_helper_fdecstp(cpu_env); break; default: case 7: /* fincstp */ gen_helper_fincstp(cpu_env); break; } break; case 0x0f: /* grp d9/7 */ switch(rm) { case 0: /* fprem */ gen_helper_fprem(cpu_env); break; case 1: /* fyl2xp1 */ gen_helper_fyl2xp1(cpu_env); break; case 2: /* fsqrt */ gen_helper_fsqrt(cpu_env); break; case 3: /* fsincos */ gen_helper_fsincos(cpu_env); break; case 5: /* fscale */ gen_helper_fscale(cpu_env); break; case 4: /* frndint */ gen_helper_frndint(cpu_env); break; case 6: /* fsin */ gen_helper_fsin(cpu_env); break; default: case 7: /* fcos */ gen_helper_fcos(cpu_env); break; } break; case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ { int op1; op1 = op & 7; if (op >= 0x20) { gen_helper_fp_arith_STN_ST0(op1, opreg); if (op >= 0x30) gen_helper_fpop(cpu_env); } else { gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fp_arith_ST0_FT0(op1); } } break; case 0x02: /* fcom */ case 0x22: /* fcom2, undocumented op */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); break; case 0x03: /* fcomp */ case 0x23: /* fcomp3, undocumented op */ case 0x32: /* fcomp5, undocumented op */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x15: /* da/5 */ switch(rm) { case 1: /* fucompp */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); break; default: goto illegal_op; } break; case 0x1c: switch(rm) { case 0: /* feni (287 only, just do nop here) */ break; case 1: /* fdisi (287 only, just do nop here) */ break; case 2: /* fclex */ gen_helper_fclex(cpu_env); break; case 3: /* fninit */ gen_helper_fninit(cpu_env); break; case 4: /* fsetpm (287 only, just do nop here) */ break; default: goto illegal_op; } break; case 0x1d: /* fucomi */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); s->cc_op = CC_OP_EFLAGS; break; case 0x1e: /* fcomi */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); s->cc_op = CC_OP_EFLAGS; break; case 0x28: /* ffree sti */ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); break; case 0x2a: /* fst sti */ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); break; case 0x2b: /* fstp sti */ case 0x0b: /* fstp1 sti, undocumented op */ case 0x3a: /* fstp8 sti, undocumented op */ case 0x3b: /* fstp9 sti, undocumented op */ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x2c: /* fucom st(i) */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); break; case 0x2d: /* fucomp st(i) */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); break; case 0x33: /* de/3 */ switch(rm) { case 1: /* fcompp */ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1)); gen_helper_fcom_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); gen_helper_fpop(cpu_env); break; default: goto illegal_op; } break; case 0x38: /* ffreep sti, undocumented op */ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fpop(cpu_env); break; case 0x3c: /* df/4 */ switch(rm) { case 0: gen_helper_fnstsw(cpu_tmp2_i32, cpu_env); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_mov_reg_T0(OT_WORD, R_EAX); break; default: goto illegal_op; } break; case 0x3d: /* fucomip */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); s->cc_op = CC_OP_EFLAGS; break; case 0x3e: /* fcomip */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(cpu_env); gen_helper_fpop(cpu_env); s->cc_op = CC_OP_EFLAGS; break; case 0x10 ... 0x13: /* fcmovxx */ case 0x18 ... 0x1b: { int op1, l1; static const uint8_t fcmov_cc[8] = { (JCC_B << 1), (JCC_Z << 1), (JCC_BE << 1), (JCC_P << 1), }; op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); l1 = gen_new_label(); gen_jcc1(s, op1, l1); gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg)); gen_set_label(l1); } break; default: goto illegal_op; } } break; /************************/ /* string ops */ case 0xa4: /* movsS */ case 0xa5: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_movs(s, ot); } break; case 0xaa: /* stosS */ case 0xab: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_stos(s, ot); } break; case 0xac: /* lodsS */ case 0xad: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_lods(s, ot); } break; case 0xae: /* scasS */ case 0xaf: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & PREFIX_REPNZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_scas(s, ot); } break; case 0xa6: /* cmpsS */ case 0xa7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & PREFIX_REPNZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_cmps(s, ot); } break; case 0x6c: /* insS */ case 0x6d: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); gen_op_andl_T0_ffff(); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_ins(s, ot); if (use_icount) { gen_jmp(s, s->pc - s->cs_base); } } break; case 0x6e: /* outsS */ case 0x6f: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); gen_op_andl_T0_ffff(); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_outs(s, ot); if (use_icount) { gen_jmp(s, s->pc - s->cs_base); } } break; /************************/ /* port I/O */ case 0xe4: case 0xe5: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; val = cpu_ldub_code(env, s->pc++); gen_op_movl_T0_im(val); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (use_icount) gen_io_start(); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32); gen_op_mov_reg_T1(ot, R_EAX); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xe6: case 0xe7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; val = cpu_ldub_code(env, s->pc++); gen_op_movl_T0_im(val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_TN_reg(ot, 1, R_EAX); if (use_icount) gen_io_start(); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xec: case 0xed: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); gen_op_andl_T0_ffff(); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (use_icount) gen_io_start(); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32); gen_op_mov_reg_T1(ot, R_EAX); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xee: case 0xef: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); gen_op_andl_T0_ffff(); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_TN_reg(ot, 1, R_EAX); if (use_icount) gen_io_start(); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; /************************/ /* control */ case 0xc2: /* ret im */ val = cpu_ldsw_code(env, s->pc); s->pc += 2; gen_pop_T0(s); if (CODE64(s) && s->dflag) s->dflag = 2; gen_stack_update(s, val + (2 << s->dflag)); if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); gen_eob(s); break; case 0xc3: /* ret */ gen_pop_T0(s); gen_pop_update(s); if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); gen_eob(s); break; case 0xca: /* lret im */ val = cpu_ldsw_code(env, s->pc); s->pc += 2; do_lret: if (s->pe && !s->vm86) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag), tcg_const_i32(val)); } else { gen_stack_A0(s); /* pop offset */ gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); if (s->dflag == 0) gen_op_andl_T0_ffff(); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_T0(); /* pop selector */ gen_op_addl_A0_im(2 << s->dflag); gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); gen_op_movl_seg_T0_vm(R_CS); /* add stack offset */ gen_stack_update(s, val + (4 << s->dflag)); } gen_eob(s); break; case 0xcb: /* lret */ val = 0; goto do_lret; case 0xcf: /* iret */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); if (!s->pe) { /* real mode */ gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag)); s->cc_op = CC_OP_EFLAGS; } else if (s->vm86) { if (s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag)); s->cc_op = CC_OP_EFLAGS; } } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag), tcg_const_i32(s->pc - s->cs_base)); s->cc_op = CC_OP_EFLAGS; } gen_eob(s); break; case 0xe8: /* call im */ { if (dflag) tval = (int32_t)insn_get(env, s, OT_LONG); else tval = (int16_t)insn_get(env, s, OT_WORD); next_eip = s->pc - s->cs_base; tval += next_eip; if (s->dflag == 0) tval &= 0xffff; else if(!CODE64(s)) tval &= 0xffffffff; gen_movtl_T0_im(next_eip); gen_push_T0(s); gen_jmp(s, tval); } break; case 0x9a: /* lcall im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; offset = insn_get(env, s, ot); selector = insn_get(env, s, OT_WORD); gen_op_movl_T0_im(selector); gen_op_movl_T1_imu(offset); } goto do_lcall; case 0xe9: /* jmp im */ if (dflag) tval = (int32_t)insn_get(env, s, OT_LONG); else tval = (int16_t)insn_get(env, s, OT_WORD); tval += s->pc - s->cs_base; if (s->dflag == 0) tval &= 0xffff; else if(!CODE64(s)) tval &= 0xffffffff; gen_jmp(s, tval); break; case 0xea: /* ljmp im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; offset = insn_get(env, s, ot); selector = insn_get(env, s, OT_WORD); gen_op_movl_T0_im(selector); gen_op_movl_T1_imu(offset); } goto do_ljmp; case 0xeb: /* jmp Jb */ tval = (int8_t)insn_get(env, s, OT_BYTE); tval += s->pc - s->cs_base; if (s->dflag == 0) tval &= 0xffff; gen_jmp(s, tval); break; case 0x70 ... 0x7f: /* jcc Jb */ tval = (int8_t)insn_get(env, s, OT_BYTE); goto do_jcc; case 0x180 ... 0x18f: /* jcc Jv */ if (dflag) { tval = (int32_t)insn_get(env, s, OT_LONG); } else { tval = (int16_t)insn_get(env, s, OT_WORD); } do_jcc: next_eip = s->pc - s->cs_base; tval += next_eip; if (s->dflag == 0) tval &= 0xffff; gen_jcc(s, b, tval, next_eip); break; case 0x190 ... 0x19f: /* setcc Gv */ modrm = cpu_ldub_code(env, s->pc++); gen_setcc(s, b); gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1); break; case 0x140 ... 0x14f: /* cmov Gv, Ev */ { int l1; TCGv t0; ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; t0 = tcg_temp_local_new(); if (mod != 3) { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); } else { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, t0, rm); } #ifdef TARGET_X86_64 if (ot == OT_LONG) { /* XXX: specific Intel behaviour ? */ l1 = gen_new_label(); gen_jcc1(s, b ^ 1, l1); tcg_gen_mov_tl(cpu_regs[reg], t0); gen_set_label(l1); tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]); } else #endif { l1 = gen_new_label(); gen_jcc1(s, b ^ 1, l1); gen_op_mov_reg_v(ot, reg, t0); gen_set_label(l1); } tcg_temp_free(t0); } break; /************************/ /* flags */ case 0x9c: /* pushf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_read_eflags(cpu_T[0], cpu_env); gen_push_T0(s); } break; case 0x9d: /* popf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_pop_T0(s); if (s->cpl == 0) { if (s->dflag) { gen_helper_write_eflags(cpu_env, cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff)); } } else { if (s->cpl <= s->iopl) { if (s->dflag) { gen_helper_write_eflags(cpu_env, cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff)); } } else { if (s->dflag) { gen_helper_write_eflags(cpu_env, cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK))); } else { gen_helper_write_eflags(cpu_env, cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); } } } gen_pop_update(s); s->cc_op = CC_OP_EFLAGS; /* abort translation because TF/AC flag may change */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_op_mov_TN_reg(OT_BYTE, 0, R_AH); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_discard_tl(cpu_cc_dst); s->cc_op = CC_OP_EFLAGS; tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]); break; case 0x9f: /* lahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_T[0]); /* Note: gen_compute_eflags() only gives the condition codes */ tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02); gen_op_mov_reg_T0(OT_BYTE, R_AH); break; case 0xf5: /* cmc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); s->cc_op = CC_OP_EFLAGS; break; case 0xf8: /* clc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); s->cc_op = CC_OP_EFLAGS; break; case 0xf9: /* stc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); s->cc_op = CC_OP_EFLAGS; break; case 0xfc: /* cld */ tcg_gen_movi_i32(cpu_tmp2_i32, 1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; case 0xfd: /* std */ tcg_gen_movi_i32(cpu_tmp2_i32, -1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; /************************/ /* bit operations */ case 0x1ba: /* bt/bts/btr/btc Gv, im */ ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); op = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { s->rip_offset = 1; gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); } /* load shift */ val = cpu_ldub_code(env, s->pc++); gen_op_movl_T1_im(val); if (op < 4) goto illegal_op; op -= 4; goto bt_op; case 0x1a3: /* bt Gv, Ev */ op = 0; goto do_btx; case 0x1ab: /* bts */ op = 1; goto do_btx; case 0x1b3: /* btr */ op = 2; goto do_btx; case 0x1bb: /* btc */ op = 3; do_btx: ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); gen_op_mov_TN_reg(OT_LONG, 1, reg); if (mod != 3) { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); /* specific case: we need to add a displacement */ gen_exts(ot, cpu_T[1]); tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot); tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); } bt_op: tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1); switch(op) { case 0: tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]); tcg_gen_movi_tl(cpu_cc_dst, 0); break; case 1: tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0); break; case 2: tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0); break; default: case 3: tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0); break; } s->cc_op = CC_OP_SARB + ot; if (op != 0) { if (mod != 3) gen_op_st_T0_A0(ot + s->mem_index); else gen_op_mov_reg_T0(ot, rm); tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); tcg_gen_movi_tl(cpu_cc_dst, 0); } break; case 0x1bc: /* bsf */ case 0x1bd: /* bsr */ { int label1; TCGv t0; ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s,modrm, ot, OR_TMP0, 0); gen_extu(ot, cpu_T[0]); t0 = tcg_temp_local_new(); tcg_gen_mov_tl(t0, cpu_T[0]); if ((b & 1) && (prefixes & PREFIX_REPZ) && (s->cpuid_ext3_features & CPUID_EXT3_ABM)) { switch(ot) { case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0, tcg_const_i32(16)); break; case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0, tcg_const_i32(32)); break; case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0, tcg_const_i32(64)); break; } gen_op_mov_reg_T0(ot, reg); } else { label1 = gen_new_label(); tcg_gen_movi_tl(cpu_cc_dst, 0); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1); if (b & 1) { gen_helper_bsr(cpu_T[0], t0); } else { gen_helper_bsf(cpu_T[0], t0); } gen_op_mov_reg_T0(ot, reg); tcg_gen_movi_tl(cpu_cc_dst, 1); gen_set_label(label1); tcg_gen_discard_tl(cpu_cc_src); s->cc_op = CC_OP_LOGICB + ot; } tcg_temp_free(t0); } break; /************************/ /* bcd */ case 0x27: /* daa */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_daa(cpu_env); s->cc_op = CC_OP_EFLAGS; break; case 0x2f: /* das */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_das(cpu_env); s->cc_op = CC_OP_EFLAGS; break; case 0x37: /* aaa */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_aaa(cpu_env); s->cc_op = CC_OP_EFLAGS; break; case 0x3f: /* aas */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_aas(cpu_env); s->cc_op = CC_OP_EFLAGS; break; case 0xd4: /* aam */ if (CODE64(s)) goto illegal_op; val = cpu_ldub_code(env, s->pc++); if (val == 0) { gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); } else { gen_helper_aam(cpu_env, tcg_const_i32(val)); s->cc_op = CC_OP_LOGICB; } break; case 0xd5: /* aad */ if (CODE64(s)) goto illegal_op; val = cpu_ldub_code(env, s->pc++); gen_helper_aad(cpu_env, tcg_const_i32(val)); s->cc_op = CC_OP_LOGICB; break; /************************/ /* misc */ case 0x90: /* nop */ /* XXX: correct lock test for all insn */ if (prefixes & PREFIX_LOCK) { goto illegal_op; } /* If REX_B is set, then this is xchg eax, r8d, not a nop. */ if (REX_B(s)) { goto do_xchg_reg_eax; } if (prefixes & PREFIX_REPZ) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE); } break; case 0x9b: /* fwait */ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fwait(cpu_env); } break; case 0xcc: /* int3 */ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); break; case 0xcd: /* int N */ val = cpu_ldub_code(env, s->pc++); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); } break; case 0xce: /* into */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start)); break; #ifdef WANT_ICEBP case 0xf1: /* icebp (undocumented, exits to external debugger) */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); #if 1 gen_debug(s, pc_start - s->cs_base); #else /* start debug */ tb_flush(env); qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); #endif break; #endif case 0xfa: /* cli */ if (!s->vm86) { if (s->cpl <= s->iopl) { gen_helper_cli(cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } else { if (s->iopl == 3) { gen_helper_cli(cpu_env); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } break; case 0xfb: /* sti */ if (!s->vm86) { if (s->cpl <= s->iopl) { gen_sti: gen_helper_sti(cpu_env); /* interruptions are enabled only the first insn after sti */ /* If several instructions disable interrupts, only the _first_ does it */ if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) gen_helper_set_inhibit_irq(cpu_env); /* give a chance to handle pending irqs */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } else { if (s->iopl == 3) { goto gen_sti; } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } break; case 0x62: /* bound */ if (CODE64(s)) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_op_mov_TN_reg(ot, 0, reg); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); if (ot == OT_WORD) { gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32); } else { gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32); } break; case 0x1c8 ... 0x1cf: /* bswap reg */ reg = (b & 7) | REX_B(s); #ifdef TARGET_X86_64 if (dflag == 2) { gen_op_mov_TN_reg(OT_QUAD, 0, reg); tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_QUAD, reg); } else #endif { gen_op_mov_TN_reg(OT_LONG, 0, reg); tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]); tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_LONG, reg); } break; case 0xd6: /* salc */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags_c(cpu_T[0]); tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_BYTE, R_EAX); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ case 0xe2: /* loop */ case 0xe3: /* jecxz */ { int l1, l2, l3; tval = (int8_t)insn_get(env, s, OT_BYTE); next_eip = s->pc - s->cs_base; tval += next_eip; if (s->dflag == 0) tval &= 0xffff; l1 = gen_new_label(); l2 = gen_new_label(); l3 = gen_new_label(); b &= 3; switch(b) { case 0: /* loopnz */ case 1: /* loopz */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jz_ecx(s->aflag, l3); gen_compute_eflags(cpu_tmp0); tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z); if (b == 0) { tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1); } else { tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1); } break; case 2: /* loop */ gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jnz_ecx(s->aflag, l1); break; default: case 3: /* jcxz */ gen_op_jz_ecx(s->aflag, l1); break; } gen_set_label(l3); gen_jmp_im(next_eip); tcg_gen_br(l2); gen_set_label(l1); gen_jmp_im(tval); gen_set_label(l2); gen_eob(s); } break; case 0x130: /* wrmsr */ case 0x132: /* rdmsr */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { gen_helper_rdmsr(cpu_env); } else { gen_helper_wrmsr(cpu_env); } } break; case 0x131: /* rdtsc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); if (use_icount) gen_io_start(); gen_helper_rdtsc(cpu_env); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0x133: /* rdpmc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_rdpmc(cpu_env); break; case 0x134: /* sysenter */ /* For Intel SYSENTER is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_sysenter(cpu_env); gen_eob(s); } break; case 0x135: /* sysexit */ /* For Intel SYSEXIT is valid on 64-bit */ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_sysexit(cpu_env, tcg_const_i32(dflag)); gen_eob(s); } break; #ifdef TARGET_X86_64 case 0x105: /* syscall */ /* XXX: is it usable in real mode ? */ gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start)); gen_eob(s); break; case 0x107: /* sysret */ if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag)); /* condition codes are modified only in long mode */ if (s->lma) s->cc_op = CC_OP_EFLAGS; gen_eob(s); } break; #endif case 0x1a2: /* cpuid */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_cpuid(cpu_env); break; case 0xf4: /* hlt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start)); s->is_jmp = DISAS_TB_JUMP; } break; case 0x100: modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* sldt */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector)); ot = OT_WORD; if (mod == 3) ot += s->dflag; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 2: /* lldt */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_lldt(cpu_env, cpu_tmp2_i32); } break; case 1: /* str */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector)); ot = OT_WORD; if (mod == 3) ot += s->dflag; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1); break; case 3: /* ltr */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_ltr(cpu_env, cpu_tmp2_i32); } break; case 4: /* verr */ case 5: /* verw */ if (!s->pe || s->vm86) goto illegal_op; gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); if (op == 4) { gen_helper_verr(cpu_env, cpu_T[0]); } else { gen_helper_verw(cpu_env, cpu_T[0]); } s->cc_op = CC_OP_EFLAGS; break; default: goto illegal_op; } break; case 0x101: modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; rm = modrm & 7; switch(op) { case 0: /* sgdt */ if (mod == 3) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit)); gen_op_st_T0_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base)); if (!s->dflag) gen_op_andl_T0_im(0xffffff); gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); break; case 1: if (mod == 3) { switch (rm) { case 0: /* monitor */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); #ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_movq_A0_reg(R_EAX); } else #endif { gen_op_movl_A0_reg(R_EAX); if (s->aflag == 0) gen_op_andl_A0_ffff(); } gen_add_A0_ds_seg(s); gen_helper_monitor(cpu_env, cpu_A0); break; case 1: /* mwait */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(pc_start - s->cs_base); gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start)); gen_eob(s); break; case 2: /* clac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; } gen_helper_clac(cpu_env); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; case 3: /* stac */ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) || s->cpl != 0) { goto illegal_op; } gen_helper_stac(cpu_env); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); break; default: goto illegal_op; } } else { /* sidt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit)); gen_op_st_T0_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base)); if (!s->dflag) gen_op_andl_T0_im(0xffffff); gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); } break; case 2: /* lgdt */ case 3: /* lidt */ if (mod == 3) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); switch(rm) { case 0: /* VMRUN */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag), tcg_const_i32(s->pc - pc_start)); tcg_gen_exit_tb(0); s->is_jmp = DISAS_TB_JUMP; } break; case 1: /* VMMCALL */ if (!(s->flags & HF_SVME_MASK)) goto illegal_op; gen_helper_vmmcall(cpu_env); break; case 2: /* VMLOAD */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag)); } break; case 3: /* VMSAVE */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag)); } break; case 4: /* STGI */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_stgi(cpu_env); } break; case 5: /* CLGI */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_clgi(cpu_env); } break; case 6: /* SKINIT */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) goto illegal_op; gen_helper_skinit(cpu_env); break; case 7: /* INVLPGA */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag)); } break; default: goto illegal_op; } } else if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_ld_T1_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index); if (!s->dflag) gen_op_andl_T0_im(0xffffff); if (op == 2) { tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base)); tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit)); } else { tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base)); tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit)); } } break; case 4: /* smsw */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4); #else tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])); #endif gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1); break; case 6: /* lmsw */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0); gen_helper_lmsw(cpu_env, cpu_T[0]); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 7: if (mod != 3) { /* invlpg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_helper_invlpg(cpu_env, cpu_A0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } } else { switch (rm) { case 0: /* swapgs */ #ifdef TARGET_X86_64 if (CODE64(s)) { if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base)); tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase)); tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base)); tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase)); } } else #endif { goto illegal_op; } break; case 1: /* rdtscp */ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); if (use_icount) gen_io_start(); gen_helper_rdtscp(cpu_env); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; default: goto illegal_op; } } break; default: goto illegal_op; } break; case 0x108: /* invd */ case 0x109: /* wbinvd */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); /* nothing to do */ } break; case 0x63: /* arpl or movslS (x86_64) */ #ifdef TARGET_X86_64 if (CODE64(s)) { int d_ot; /* d_ot is the size of destination */ d_ot = dflag + OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { gen_op_mov_TN_reg(OT_LONG, 0, rm); /* sign extend */ if (d_ot == OT_QUAD) tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(d_ot, reg); } else { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); if (d_ot == OT_QUAD) { gen_op_lds_T0_A0(OT_LONG + s->mem_index); } else { gen_op_ld_T0_A0(OT_LONG + s->mem_index); } gen_op_mov_reg_T0(d_ot, reg); } } else #endif { int label1; TCGv t0, t1, t2, a0; if (!s->pe || s->vm86) goto illegal_op; t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); t2 = tcg_temp_local_new(); ot = OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); a0 = tcg_temp_local_new(); tcg_gen_mov_tl(a0, cpu_A0); } else { gen_op_mov_v_reg(ot, t0, rm); TCGV_UNUSED(a0); } gen_op_mov_v_reg(ot, t1, reg); tcg_gen_andi_tl(cpu_tmp0, t0, 3); tcg_gen_andi_tl(t1, t1, 3); tcg_gen_movi_tl(t2, 0); label1 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_or_tl(t0, t0, t1); tcg_gen_movi_tl(t2, CC_Z); gen_set_label(label1); if (mod != 3) { gen_op_st_v(ot + s->mem_index, t0, a0); tcg_temp_free(a0); } else { gen_op_mov_reg_v(ot, rm, t0); } if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); s->cc_op = CC_OP_EFLAGS; tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); } break; case 0x102: /* lar */ case 0x103: /* lsl */ { int label1; TCGv t0; if (!s->pe || s->vm86) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0); t0 = tcg_temp_local_new(); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); if (b == 0x102) { gen_helper_lar(t0, cpu_env, cpu_T[0]); } else { gen_helper_lsl(t0, cpu_env, cpu_T[0]); } tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); label1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); gen_op_mov_reg_v(ot, reg, t0); gen_set_label(label1); s->cc_op = CC_OP_EFLAGS; tcg_temp_free(t0); } break; case 0x118: modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* prefetchnta */ case 1: /* prefetchnt0 */ case 2: /* prefetchnt0 */ case 3: /* prefetchnt0 */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); /* nothing more to do */ break; default: /* nop (multi byte) */ gen_nop_modrm(env, s, modrm); break; } break; case 0x119 ... 0x11f: /* nop (multi byte) */ modrm = cpu_ldub_code(env, s->pc++); gen_nop_modrm(env, s, modrm); break; case 0x120: /* mov reg, crN */ case 0x122: /* mov crN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = cpu_ldub_code(env, s->pc++); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = OT_QUAD; else ot = OT_LONG; if ((prefixes & PREFIX_LOCK) && (reg == 0) && (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) { reg = 8; } switch(reg) { case 0: case 2: case 3: case 4: case 8: if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { gen_op_mov_TN_reg(ot, 0, rm); gen_helper_write_crN(cpu_env, tcg_const_i32(reg), cpu_T[0]); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg)); gen_op_mov_reg_T0(ot, rm); } break; default: goto illegal_op; } } break; case 0x121: /* mov reg, drN */ case 0x123: /* mov drN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = cpu_ldub_code(env, s->pc++); /* Ignore the mod bits (assume (modrm&0xc0)==0xc0). * AMD documentation (24594.pdf) and testing of * intel 386 and 486 processors all show that the mod bits * are assumed to be 1's, regardless of actual values. */ rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = OT_QUAD; else ot = OT_LONG; /* XXX: do it dynamically with CR4.DE bit */ if (reg == 4 || reg == 5 || reg >= 8) goto illegal_op; if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); gen_op_mov_TN_reg(ot, 0, rm); gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg])); gen_op_mov_reg_T0(ot, rm); } } break; case 0x106: /* clts */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_helper_clts(cpu_env); /* abort block because static cpu state changed */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ case 0x1c3: /* MOVNTI reg, mem */ if (!(s->cpuid_features & CPUID_SSE2)) goto illegal_op; ot = s->dflag == 2 ? OT_QUAD : OT_LONG; modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(env, s, modrm, ot, reg, 1); break; case 0x1ae: modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* fxsave */ if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || (s->prefix & PREFIX_LOCK)) goto illegal_op; if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2))); break; case 1: /* fxrstor */ if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || (s->prefix & PREFIX_LOCK)) goto illegal_op; if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2))); break; case 2: /* ldmxcsr */ case 3: /* stmxcsr */ if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) || mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); if (op == 2) { gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32); } else { tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); gen_op_st_T0_A0(OT_LONG + s->mem_index); } break; case 5: /* lfence */ case 6: /* mfence */ if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2)) goto illegal_op; break; case 7: /* sfence / clflush */ if ((modrm & 0xc7) == 0xc0) { /* sfence */ /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */ if (!(s->cpuid_features & CPUID_SSE)) goto illegal_op; } else { /* clflush */ if (!(s->cpuid_features & CPUID_CLFLUSH)) goto illegal_op; gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); } break; default: goto illegal_op; } break; case 0x10d: /* 3DNow! prefetch(w) */ modrm = cpu_ldub_code(env, s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr); /* ignore for now */ break; case 0x1aa: /* rsm */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); if (!(s->flags & HF_SMM_MASK)) goto illegal_op; gen_update_cc_op(s); gen_jmp_im(s->pc - s->cs_base); gen_helper_rsm(cpu_env); gen_eob(s); break; case 0x1b8: /* SSE4.2 popcnt */ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != PREFIX_REPZ) goto illegal_op; if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) goto illegal_op; modrm = cpu_ldub_code(env, s->pc++); reg = ((modrm >> 3) & 7) | rex_r; if (s->prefix & PREFIX_DATA) ot = OT_WORD; else if (s->dflag != 2) ot = OT_LONG; else ot = OT_QUAD; gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot)); gen_op_mov_reg_T0(ot, reg); s->cc_op = CC_OP_EFLAGS; break; case 0x10e ... 0x10f: /* 3DNow! instructions, ignore prefixes */ s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); case 0x110 ... 0x117: case 0x128 ... 0x12f: case 0x138 ... 0x13a: case 0x150 ... 0x179: case 0x17c ... 0x17f: case 0x1c2: case 0x1c4 ... 0x1c6: case 0x1d0 ... 0x1fe: gen_sse(env, s, b, pc_start, rex_r); break; default: goto illegal_op; } /* lock generation */ if (s->prefix & PREFIX_LOCK) gen_helper_unlock(); return s->pc; illegal_op: if (s->prefix & PREFIX_LOCK) gen_helper_unlock(); /* XXX: ensure that no lock was generated */ gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); return s->pc; } | 2,874 |
0 | int qemu_timedate_diff(struct tm *tm) { time_t seconds; if (rtc_date_offset == -1) if (rtc_utc) seconds = mktimegm(tm); else { struct tm tmp = *tm; tmp.tm_isdst = -1; /* use timezone to figure it out */ seconds = mktime(&tmp); } else seconds = mktimegm(tm) + rtc_date_offset; return seconds - time(NULL); } | 2,875 |
0 | VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) { uintptr_t key = (uintptr_t)bus; VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key); VTDAddressSpace *vtd_dev_as; char name[128]; if (!vtd_bus) { uintptr_t *new_key = g_malloc(sizeof(*new_key)); *new_key = (uintptr_t)bus; /* No corresponding free() */ vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \ X86_IOMMU_PCI_DEVFN_MAX); vtd_bus->bus = bus; g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus); } vtd_dev_as = vtd_bus->dev_as[devfn]; if (!vtd_dev_as) { snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn); vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace)); vtd_dev_as->bus = bus; vtd_dev_as->devfn = (uint8_t)devfn; vtd_dev_as->iommu_state = s; vtd_dev_as->context_cache_entry.context_cache_gen = 0; memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s), &s->iommu_ops, "intel_iommu", UINT64_MAX); memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s), &vtd_mem_ir_ops, s, "intel_iommu_ir", VTD_INTERRUPT_ADDR_SIZE); memory_region_add_subregion(&vtd_dev_as->iommu, VTD_INTERRUPT_ADDR_FIRST, &vtd_dev_as->iommu_ir); address_space_init(&vtd_dev_as->as, &vtd_dev_as->iommu, name); } return vtd_dev_as; } | 2,876 |
0 | void qemu_announce_self(void) { static QEMUTimer *timer; timer = qemu_new_timer(rt_clock, qemu_announce_self_once, &timer); qemu_announce_self_once(&timer); } | 2,877 |
0 | static int mpegts_probe(AVProbeData *p) { const int size = p->buf_size; int maxscore = 0; int sumscore = 0; int i; int check_count = size / TS_FEC_PACKET_SIZE; #define CHECK_COUNT 10 #define CHECK_BLOCK 100 if (check_count < CHECK_COUNT) return 0; for (i = 0; i<check_count; i+=CHECK_BLOCK) { int left = FFMIN(check_count - i, CHECK_BLOCK); int score = analyze(p->buf + TS_PACKET_SIZE *i, TS_PACKET_SIZE *left, TS_PACKET_SIZE , NULL, 1); int dvhs_score = analyze(p->buf + TS_DVHS_PACKET_SIZE*i, TS_DVHS_PACKET_SIZE*left, TS_DVHS_PACKET_SIZE, NULL, 1); int fec_score = analyze(p->buf + TS_FEC_PACKET_SIZE *i, TS_FEC_PACKET_SIZE *left, TS_FEC_PACKET_SIZE , NULL, 1); score = FFMAX3(score, dvhs_score, fec_score); sumscore += score; maxscore = FFMAX(maxscore, score); } sumscore = sumscore * CHECK_COUNT / check_count; maxscore = maxscore * CHECK_COUNT / CHECK_BLOCK; av_dlog(0, "TS score: %d %d\n", sumscore, maxscore); if (sumscore > 6) return AVPROBE_SCORE_MAX + sumscore - CHECK_COUNT; else if (maxscore > 6) return AVPROBE_SCORE_MAX/2 + sumscore - CHECK_COUNT; else return 0; } | 2,878 |
0 | static void do_commit(int argc, const char **argv) { int i; for (i = 0; i < MAX_DISKS; i++) { if (bs_table[i]) bdrv_commit(bs_table[i]); } } | 2,879 |
0 | static int ppc_hash32_pp_check(int key, int pp, int nx) { int access; /* Compute access rights */ access = 0; if (key == 0) { switch (pp) { case 0x0: case 0x1: case 0x2: access |= PAGE_WRITE; /* No break here */ case 0x3: access |= PAGE_READ; break; } } else { switch (pp) { case 0x0: access = 0; break; case 0x1: case 0x3: access = PAGE_READ; break; case 0x2: access = PAGE_READ | PAGE_WRITE; break; } } if (nx == 0) { access |= PAGE_EXEC; } return access; } | 2,881 |
0 | static void tpm_tis_mmio_write_intern(void *opaque, hwaddr addr, uint64_t val, unsigned size, bool hw_access) { TPMState *s = opaque; TPMTISEmuState *tis = &s->s.tis; uint16_t off = addr & 0xffc; uint8_t shift = (addr & 0x3) * 8; uint8_t locty = tpm_tis_locality_from_addr(addr); uint8_t active_locty, l; int c, set_new_locty = 1; uint16_t len; uint32_t mask = (size == 1) ? 0xff : ((size == 2) ? 0xffff : ~0); DPRINTF("tpm_tis: write.%u(%08x) = %08x\n", size, (int)addr, (uint32_t)val); if (locty == 4 && !hw_access) { DPRINTF("tpm_tis: Access to locality 4 only allowed from hardware\n"); return; } if (tpm_backend_had_startup_error(s->be_driver)) { return; } val &= mask; if (shift) { val <<= shift; mask <<= shift; } mask ^= 0xffffffff; switch (off) { case TPM_TIS_REG_ACCESS: if ((val & TPM_TIS_ACCESS_SEIZE)) { val &= ~(TPM_TIS_ACCESS_REQUEST_USE | TPM_TIS_ACCESS_ACTIVE_LOCALITY); } active_locty = tis->active_locty; if ((val & TPM_TIS_ACCESS_ACTIVE_LOCALITY)) { /* give up locality if currently owned */ if (tis->active_locty == locty) { DPRINTF("tpm_tis: Releasing locality %d\n", locty); uint8_t newlocty = TPM_TIS_NO_LOCALITY; /* anybody wants the locality ? */ for (c = TPM_TIS_NUM_LOCALITIES - 1; c >= 0; c--) { if ((tis->loc[c].access & TPM_TIS_ACCESS_REQUEST_USE)) { DPRINTF("tpm_tis: Locality %d requests use.\n", c); newlocty = c; break; } } DPRINTF("tpm_tis: TPM_TIS_ACCESS_ACTIVE_LOCALITY: " "Next active locality: %d\n", newlocty); if (TPM_TIS_IS_VALID_LOCTY(newlocty)) { set_new_locty = 0; tpm_tis_prep_abort(s, locty, newlocty); } else { active_locty = TPM_TIS_NO_LOCALITY; } } else { /* not currently the owner; clear a pending request */ tis->loc[locty].access &= ~TPM_TIS_ACCESS_REQUEST_USE; } } if ((val & TPM_TIS_ACCESS_BEEN_SEIZED)) { tis->loc[locty].access &= ~TPM_TIS_ACCESS_BEEN_SEIZED; } if ((val & TPM_TIS_ACCESS_SEIZE)) { /* * allow seize if a locality is active and the requesting * locality is higher than the one that's active * OR * allow seize for requesting locality if no locality is * active */ while ((TPM_TIS_IS_VALID_LOCTY(tis->active_locty) && locty > tis->active_locty) || !TPM_TIS_IS_VALID_LOCTY(tis->active_locty)) { bool higher_seize = FALSE; /* already a pending SEIZE ? */ if ((tis->loc[locty].access & TPM_TIS_ACCESS_SEIZE)) { break; } /* check for ongoing seize by a higher locality */ for (l = locty + 1; l < TPM_TIS_NUM_LOCALITIES; l++) { if ((tis->loc[l].access & TPM_TIS_ACCESS_SEIZE)) { higher_seize = TRUE; break; } } if (higher_seize) { break; } /* cancel any seize by a lower locality */ for (l = 0; l < locty - 1; l++) { tis->loc[l].access &= ~TPM_TIS_ACCESS_SEIZE; } tis->loc[locty].access |= TPM_TIS_ACCESS_SEIZE; DPRINTF("tpm_tis: TPM_TIS_ACCESS_SEIZE: " "Locality %d seized from locality %d\n", locty, tis->active_locty); DPRINTF("tpm_tis: TPM_TIS_ACCESS_SEIZE: Initiating abort.\n"); set_new_locty = 0; tpm_tis_prep_abort(s, tis->active_locty, locty); break; } } if ((val & TPM_TIS_ACCESS_REQUEST_USE)) { if (tis->active_locty != locty) { if (TPM_TIS_IS_VALID_LOCTY(tis->active_locty)) { tis->loc[locty].access |= TPM_TIS_ACCESS_REQUEST_USE; } else { /* no locality active -> make this one active now */ active_locty = locty; } } } if (set_new_locty) { tpm_tis_new_active_locality(s, active_locty); } break; case TPM_TIS_REG_INT_ENABLE: if (tis->active_locty != locty) { break; } tis->loc[locty].inte &= mask; tis->loc[locty].inte |= (val & (TPM_TIS_INT_ENABLED | TPM_TIS_INT_POLARITY_MASK | TPM_TIS_INTERRUPTS_SUPPORTED)); break; case TPM_TIS_REG_INT_VECTOR: /* hard wired -- ignore */ break; case TPM_TIS_REG_INT_STATUS: if (tis->active_locty != locty) { break; } /* clearing of interrupt flags */ if (((val & TPM_TIS_INTERRUPTS_SUPPORTED)) && (tis->loc[locty].ints & TPM_TIS_INTERRUPTS_SUPPORTED)) { tis->loc[locty].ints &= ~val; if (tis->loc[locty].ints == 0) { qemu_irq_lower(tis->irq); DPRINTF("tpm_tis: Lowering IRQ\n"); } } tis->loc[locty].ints &= ~(val & TPM_TIS_INTERRUPTS_SUPPORTED); break; case TPM_TIS_REG_STS: if (tis->active_locty != locty) { break; } val &= (TPM_TIS_STS_COMMAND_READY | TPM_TIS_STS_TPM_GO | TPM_TIS_STS_RESPONSE_RETRY); if (val == TPM_TIS_STS_COMMAND_READY) { switch (tis->loc[locty].state) { case TPM_TIS_STATE_READY: tis->loc[locty].w_offset = 0; tis->loc[locty].r_offset = 0; break; case TPM_TIS_STATE_IDLE: tis->loc[locty].sts = TPM_TIS_STS_COMMAND_READY; tis->loc[locty].state = TPM_TIS_STATE_READY; tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY); break; case TPM_TIS_STATE_EXECUTION: case TPM_TIS_STATE_RECEPTION: /* abort currently running command */ DPRINTF("tpm_tis: %s: Initiating abort.\n", __func__); tpm_tis_prep_abort(s, locty, locty); break; case TPM_TIS_STATE_COMPLETION: tis->loc[locty].w_offset = 0; tis->loc[locty].r_offset = 0; /* shortcut to ready state with C/R set */ tis->loc[locty].state = TPM_TIS_STATE_READY; if (!(tis->loc[locty].sts & TPM_TIS_STS_COMMAND_READY)) { tis->loc[locty].sts = TPM_TIS_STS_COMMAND_READY; tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY); } tis->loc[locty].sts &= ~(TPM_TIS_STS_DATA_AVAILABLE); break; } } else if (val == TPM_TIS_STS_TPM_GO) { switch (tis->loc[locty].state) { case TPM_TIS_STATE_RECEPTION: if ((tis->loc[locty].sts & TPM_TIS_STS_EXPECT) == 0) { tpm_tis_tpm_send(s, locty); } break; default: /* ignore */ break; } } else if (val == TPM_TIS_STS_RESPONSE_RETRY) { switch (tis->loc[locty].state) { case TPM_TIS_STATE_COMPLETION: tis->loc[locty].r_offset = 0; tis->loc[locty].sts = TPM_TIS_STS_VALID | TPM_TIS_STS_DATA_AVAILABLE; break; default: /* ignore */ break; } } break; case TPM_TIS_REG_DATA_FIFO: case TPM_TIS_REG_DATA_XFIFO ... TPM_TIS_REG_DATA_XFIFO_END: /* data fifo */ if (tis->active_locty != locty) { break; } if (tis->loc[locty].state == TPM_TIS_STATE_IDLE || tis->loc[locty].state == TPM_TIS_STATE_EXECUTION || tis->loc[locty].state == TPM_TIS_STATE_COMPLETION) { /* drop the byte */ } else { DPRINTF("tpm_tis: Data to send to TPM: %08x (size=%d)\n", val, size); if (tis->loc[locty].state == TPM_TIS_STATE_READY) { tis->loc[locty].state = TPM_TIS_STATE_RECEPTION; tis->loc[locty].sts = TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID; } val >>= shift; if (size > 4 - (addr & 0x3)) { /* prevent access beyond FIFO */ size = 4 - (addr & 0x3); } while ((tis->loc[locty].sts & TPM_TIS_STS_EXPECT) && size > 0) { if (tis->loc[locty].w_offset < tis->loc[locty].w_buffer.size) { tis->loc[locty].w_buffer. buffer[tis->loc[locty].w_offset++] = (uint8_t)val; val >>= 8; size--; } else { tis->loc[locty].sts = TPM_TIS_STS_VALID; } } /* check for complete packet */ if (tis->loc[locty].w_offset > 5 && (tis->loc[locty].sts & TPM_TIS_STS_EXPECT)) { /* we have a packet length - see if we have all of it */ #ifdef RAISE_STS_IRQ bool needIrq = !(tis->loc[locty].sts & TPM_TIS_STS_VALID); #endif len = tpm_tis_get_size_from_buffer(&tis->loc[locty].w_buffer); if (len > tis->loc[locty].w_offset) { tis->loc[locty].sts = TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID; } else { /* packet complete */ tis->loc[locty].sts = TPM_TIS_STS_VALID; } #ifdef RAISE_STS_IRQ if (needIrq) { tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID); } #endif } } break; } } | 2,882 |
0 | int smbios_entry_add(const char *t) { char buf[1024]; if (get_param_value(buf, sizeof(buf), "file", t)) { struct smbios_structure_header *header; struct smbios_table *table; int size = get_image_size(buf); if (size < sizeof(struct smbios_structure_header)) { fprintf(stderr, "Cannot read smbios file %s", buf); exit(1); } if (!smbios_entries) { smbios_entries_len = sizeof(uint16_t); smbios_entries = qemu_mallocz(smbios_entries_len); } smbios_entries = qemu_realloc(smbios_entries, smbios_entries_len + sizeof(*table) + size); table = (struct smbios_table *)(smbios_entries + smbios_entries_len); table->header.type = SMBIOS_TABLE_ENTRY; table->header.length = cpu_to_le16(sizeof(*table) + size); if (load_image(buf, table->data) != size) { fprintf(stderr, "Failed to load smbios file %s", buf); exit(1); } header = (struct smbios_structure_header *)(table->data); smbios_check_collision(header->type, SMBIOS_TABLE_ENTRY); smbios_entries_len += sizeof(*table) + size; (*(uint16_t *)smbios_entries) = cpu_to_le16(le16_to_cpu(*(uint16_t *)smbios_entries) + 1); return 0; } if (get_param_value(buf, sizeof(buf), "type", t)) { unsigned long type = strtoul(buf, NULL, 0); switch (type) { case 0: smbios_build_type_0_fields(t); return 0; case 1: smbios_build_type_1_fields(t); return 0; default: fprintf(stderr, "Don't know how to build fields for SMBIOS type " "%ld\n", type); exit(1); } } fprintf(stderr, "smbios: must specify type= or file=\n"); return -1; } | 2,883 |
0 | void do_mouse_set(Monitor *mon, const QDict *qdict) { QemuInputHandlerState *s; int index = qdict_get_int(qdict, "index"); int found = 0; QTAILQ_FOREACH(s, &handlers, node) { if (s->id == index) { found = 1; qemu_input_handler_activate(s); break; } } if (!found) { monitor_printf(mon, "Mouse at given index not found\n"); } qemu_input_check_mode_change(); } | 2,885 |
0 | static bool msix_is_masked(PCIDevice *dev, int vector) { return msix_vector_masked(dev, vector, dev->msix_function_masked); } | 2,886 |
0 | static int acpi_checksum(const uint8_t *data, int len) { int sum, i; sum = 0; for(i = 0; i < len; i++) sum += data[i]; return (-sum) & 0xff; } | 2,887 |
0 | static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2) { sc->base = get_seg_base(e1, e2); sc->limit = get_seg_limit(e1, e2); sc->flags = e2; } | 2,888 |
0 | static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block, int n, int coded, int intra, int rvlc) { int level, i, last, run; int dc_pred_dir; RLTable * rl; RL_VLC_ELEM * rl_vlc; const uint8_t * scan_table; int qmul, qadd; //Note intra & rvlc should be optimized away if this is inlined if(intra) { if(s->qscale < s->intra_dc_threshold){ /* DC coef */ if(s->partitioned_frame){ level = s->dc_val[0][ s->block_index[n] ]; if(n<4) level= FASTDIV((level + (s->y_dc_scale>>1)), s->y_dc_scale); else level= FASTDIV((level + (s->c_dc_scale>>1)), s->c_dc_scale); dc_pred_dir= (s->pred_dir_table[s->mb_x + s->mb_y*s->mb_stride]<<n)&32; }else{ level = mpeg4_decode_dc(s, n, &dc_pred_dir); if (level < 0) return -1; } block[0] = level; i = 0; }else{ i = -1; ff_mpeg4_pred_dc(s, n, 0, &dc_pred_dir, 0); } if (!coded) goto not_coded; if(rvlc){ rl = &rvlc_rl_intra; rl_vlc = rvlc_rl_intra.rl_vlc[0]; }else{ rl = &rl_intra; rl_vlc = rl_intra.rl_vlc[0]; } if (s->ac_pred) { if (dc_pred_dir == 0) scan_table = s->intra_v_scantable.permutated; /* left */ else scan_table = s->intra_h_scantable.permutated; /* top */ } else { scan_table = s->intra_scantable.permutated; } qmul=1; qadd=0; } else { i = -1; if (!coded) { s->block_last_index[n] = i; return 0; } if(rvlc) rl = &rvlc_rl_inter; else rl = &rl_inter; scan_table = s->intra_scantable.permutated; if(s->mpeg_quant){ qmul=1; qadd=0; if(rvlc){ rl_vlc = rvlc_rl_inter.rl_vlc[0]; }else{ rl_vlc = rl_inter.rl_vlc[0]; } }else{ qmul = s->qscale << 1; qadd = (s->qscale - 1) | 1; if(rvlc){ rl_vlc = rvlc_rl_inter.rl_vlc[s->qscale]; }else{ rl_vlc = rl_inter.rl_vlc[s->qscale]; } } } { OPEN_READER(re, &s->gb); for(;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 0); if (level==0) { /* escape */ if(rvlc){ if(SHOW_UBITS(re, &s->gb, 1)==0){ av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in rvlc esc\n"); return -1; }; SKIP_CACHE(re, &s->gb, 1); last= SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1); run= SHOW_UBITS(re, &s->gb, 6); LAST_SKIP_CACHE(re, &s->gb, 6); SKIP_COUNTER(re, &s->gb, 1+1+6); UPDATE_CACHE(re, &s->gb); if(SHOW_UBITS(re, &s->gb, 1)==0){ av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in rvlc esc\n"); return -1; }; SKIP_CACHE(re, &s->gb, 1); level= SHOW_UBITS(re, &s->gb, 11); SKIP_CACHE(re, &s->gb, 11); if(SHOW_UBITS(re, &s->gb, 5)!=0x10){ av_log(s->avctx, AV_LOG_ERROR, "reverse esc missing\n"); return -1; }; SKIP_CACHE(re, &s->gb, 5); level= level * qmul + qadd; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_CACHE(re, &s->gb, 1); SKIP_COUNTER(re, &s->gb, 1+11+5+1); i+= run + 1; if(last) i+=192; }else{ int cache; cache= GET_CACHE(re, &s->gb); if(IS_3IV1) cache ^= 0xC0000000; if (cache&0x80000000) { if (cache&0x40000000) { /* third escape */ SKIP_CACHE(re, &s->gb, 2); last= SHOW_UBITS(re, &s->gb, 1); SKIP_CACHE(re, &s->gb, 1); run= SHOW_UBITS(re, &s->gb, 6); LAST_SKIP_CACHE(re, &s->gb, 6); SKIP_COUNTER(re, &s->gb, 2+1+6); UPDATE_CACHE(re, &s->gb); if(IS_3IV1){ level= SHOW_SBITS(re, &s->gb, 12); LAST_SKIP_BITS(re, &s->gb, 12); }else{ if(SHOW_UBITS(re, &s->gb, 1)==0){ av_log(s->avctx, AV_LOG_ERROR, "1. marker bit missing in 3. esc\n"); return -1; }; SKIP_CACHE(re, &s->gb, 1); level= SHOW_SBITS(re, &s->gb, 12); SKIP_CACHE(re, &s->gb, 12); if(SHOW_UBITS(re, &s->gb, 1)==0){ av_log(s->avctx, AV_LOG_ERROR, "2. marker bit missing in 3. esc\n"); return -1; }; LAST_SKIP_CACHE(re, &s->gb, 1); SKIP_COUNTER(re, &s->gb, 1+12+1); } #if 0 if(s->error_resilience >= FF_ER_COMPLIANT){ const int abs_level= ABS(level); if(abs_level<=MAX_LEVEL && run<=MAX_RUN){ const int run1= run - rl->max_run[last][abs_level] - 1; if(abs_level <= rl->max_level[last][run]){ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, vlc encoding possible\n"); return -1; } if(s->error_resilience > FF_ER_COMPLIANT){ if(abs_level <= rl->max_level[last][run]*2){ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 1 encoding possible\n"); return -1; } if(run1 >= 0 && abs_level <= rl->max_level[last][run1]){ av_log(s->avctx, AV_LOG_ERROR, "illegal 3. esc, esc 2 encoding possible\n"); return -1; } } } } #endif if (level>0) level= level * qmul + qadd; else level= level * qmul - qadd; if((unsigned)(level + 2048) > 4095){ if(s->error_resilience > FF_ER_COMPLIANT){ if(level > 2560 || level<-2560){ av_log(s->avctx, AV_LOG_ERROR, "|level| overflow in 3. esc, qp=%d\n", s->qscale); return -1; } } level= level<0 ? -2048 : 2047; } i+= run + 1; if(last) i+=192; } else { /* second escape */ #if MIN_CACHE_BITS < 20 LAST_SKIP_BITS(re, &s->gb, 2); UPDATE_CACHE(re, &s->gb); #else SKIP_BITS(re, &s->gb, 2); #endif GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1); i+= run + rl->max_run[run>>7][level/qmul] +1; //FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } } else { /* first escape */ #if MIN_CACHE_BITS < 19 LAST_SKIP_BITS(re, &s->gb, 1); UPDATE_CACHE(re, &s->gb); #else SKIP_BITS(re, &s->gb, 1); #endif GET_RL_VLC(level, run, re, &s->gb, rl_vlc, TEX_VLC_BITS, 2, 1); i+= run; level = level + rl->max_level[run>>7][(run-1)&63] * qmul;//FIXME opt indexing level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } } } else { i+= run; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } if (i > 62){ i-= 192; if(i&(~63)){ av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return -1; } block[scan_table[i]] = level; break; } block[scan_table[i]] = level; } CLOSE_READER(re, &s->gb); } not_coded: if (intra) { if(s->qscale >= s->intra_dc_threshold){ block[0] = ff_mpeg4_pred_dc(s, n, block[0], &dc_pred_dir, 0); i -= i>>31; //if(i == -1) i=0; } mpeg4_pred_ac(s, block, n, dc_pred_dir); if (s->ac_pred) { i = 63; /* XXX: not optimal */ } } s->block_last_index[n] = i; return 0; } | 2,889 |
0 | static int ea_probe(AVProbeData *p) { if (p->buf_size < 4) return 0; if (AV_RL32(&p->buf[0]) != SCHl_TAG) return 0; return AVPROBE_SCORE_MAX; } | 2,890 |
0 | static int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n) { int64_t delay_ns = 0; int64_t now = qemu_get_clock_ns(rt_clock); if (limit->next_slice_time < now) { limit->next_slice_time = now + SLICE_TIME; limit->dispatched = 0; } if (limit->dispatched + n > limit->slice_quota) { delay_ns = limit->next_slice_time - now; } else { limit->dispatched += n; } return delay_ns; } | 2,891 |
0 | static uint32_t iommu_mem_readw(void *opaque, target_phys_addr_t addr) { IOMMUState *s = opaque; target_phys_addr_t saddr; uint32_t ret; saddr = (addr - s->addr) >> 2; switch (saddr) { default: ret = s->regs[saddr]; break; case IOMMU_AFAR: case IOMMU_AFSR: ret = s->regs[saddr]; qemu_irq_lower(s->irq); break; } DPRINTF("read reg[%d] = %x\n", (int)saddr, ret); return ret; } | 2,893 |
0 | static void create_cpu_model_list(ObjectClass *klass, void *opaque) { struct CpuDefinitionInfoListData *cpu_list_data = opaque; CpuDefinitionInfoList **cpu_list = &cpu_list_data->list; CpuDefinitionInfoList *entry; CpuDefinitionInfo *info; char *name = g_strdup(object_class_get_name(klass)); S390CPUClass *scc = S390_CPU_CLASS(klass); /* strip off the -s390-cpu */ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0; info = g_new0(CpuDefinitionInfo, 1); info->name = name; info->has_migration_safe = true; info->migration_safe = scc->is_migration_safe; info->q_static = scc->is_static; info->q_typename = g_strdup(object_class_get_name(klass)); /* check for unavailable features */ if (cpu_list_data->model) { Object *obj; S390CPU *sc; obj = object_new(object_class_get_name(klass)); sc = S390_CPU(obj); if (sc->model) { info->has_unavailable_features = true; check_unavailable_features(cpu_list_data->model, sc->model, &info->unavailable_features); } object_unref(obj); } entry = g_new0(CpuDefinitionInfoList, 1); entry->value = info; entry->next = *cpu_list; *cpu_list = entry; } | 2,894 |
0 | static int kvm_mips_put_fpu_registers(CPUState *cs, int level) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; int err, ret = 0; unsigned int i; /* Only put FPU state if we're emulating a CPU with an FPU */ if (env->CP0_Config1 & (1 << CP0C1_FP)) { /* FPU Control Registers */ if (level == KVM_PUT_FULL_STATE) { err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR, &env->active_fpu.fcr0); if (err < 0) { DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err); ret = err; } } err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, &env->active_fpu.fcr31); if (err < 0) { DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err); ret = err; } /* Floating point registers */ for (i = 0; i < 32; ++i) { if (env->CP0_Status & (1 << CP0St_FR)) { err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), &env->active_fpu.fpr[i].d); } else { err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); } if (err < 0) { DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err); ret = err; } } } return ret; } | 2,896 |
0 | void qemu_free(void *ptr) { free(ptr); } | 2,897 |
0 | static void v9fs_mknod(void *opaque) { int mode; gid_t gid; int32_t fid; V9fsQID qid; int err = 0; int major, minor; size_t offset = 7; V9fsString name; struct stat stbuf; V9fsString fullname; V9fsFidState *fidp; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; v9fs_string_init(&fullname); pdu_unmarshal(pdu, offset, "dsdddd", &fid, &name, &mode, &major, &minor, &gid); fidp = get_fid(s, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } v9fs_string_sprintf(&fullname, "%s/%s", fidp->path.data, name.data); err = v9fs_co_mknod(s, &fullname, fidp->uid, gid, makedev(major, minor), mode); if (err < 0) { goto out; } err = v9fs_co_lstat(s, &fullname, &stbuf); if (err < 0) { goto out; } stat_to_qid(&stbuf, &qid); err = offset; err += pdu_marshal(pdu, offset, "Q", &qid); out: put_fid(s, fidp); out_nofid: complete_pdu(s, pdu, err); v9fs_string_free(&fullname); v9fs_string_free(&name); } | 2,898 |
0 | static void intel_hda_set_st_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint32_t old) { bool output = reg->stream >= 4; IntelHDAStream *st = d->st + reg->stream; if (st->ctl & 0x01) { /* reset */ dprint(d, 1, "st #%d: reset\n", reg->stream); st->ctl = 0; } if ((st->ctl & 0x02) != (old & 0x02)) { uint32_t stnr = (st->ctl >> 20) & 0x0f; /* run bit flipped */ if (st->ctl & 0x02) { /* start */ dprint(d, 1, "st #%d: start %d (ring buf %d bytes)\n", reg->stream, stnr, st->cbl); intel_hda_parse_bdl(d, st); intel_hda_notify_codecs(d, stnr, true, output); } else { /* stop */ dprint(d, 1, "st #%d: stop %d\n", reg->stream, stnr); intel_hda_notify_codecs(d, stnr, false, output); } } intel_hda_update_irq(d); } | 2,899 |
0 | static void vnc_listen_read(void *opaque) { VncDisplay *vs = opaque; struct sockaddr_in addr; socklen_t addrlen = sizeof(addr); /* Catch-up */ vga_hw_update(); int csock = qemu_accept(vs->lsock, (struct sockaddr *)&addr, &addrlen); if (csock != -1) { vnc_connect(vs, csock); } } | 2,900 |
0 | static inline void int8x8_fmul_int32(DCADSPContext *dsp, float *dst, const int8_t *src, int scale) { dsp->int8x8_fmul_int32(dst, src, scale); } | 2,901 |
0 | static hwaddr get_offset(hwaddr phys_addr, DumpState *s) { RAMBlock *block; hwaddr offset = s->memory_offset; int64_t size_in_block, start; if (s->has_filter) { if (phys_addr < s->begin || phys_addr >= s->begin + s->length) { return -1; } } QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (s->has_filter) { if (block->offset >= s->begin + s->length || block->offset + block->length <= s->begin) { /* This block is out of the range */ continue; } if (s->begin <= block->offset) { start = block->offset; } else { start = s->begin; } size_in_block = block->length - (start - block->offset); if (s->begin + s->length < block->offset + block->length) { size_in_block -= block->offset + block->length - (s->begin + s->length); } } else { start = block->offset; size_in_block = block->length; } if (phys_addr >= start && phys_addr < start + size_in_block) { return phys_addr - start + offset; } offset += size_in_block; } return -1; } | 2,902 |
0 | void net_cleanup(void) { VLANState *vlan; #if !defined(_WIN32) /* close network clients */ for(vlan = first_vlan; vlan != NULL; vlan = vlan->next) { VLANClientState *vc; for(vc = vlan->first_client; vc != NULL; vc = vc->next) { if (vc->fd_read == tap_receive) { char ifname[64]; TAPState *s = vc->opaque; if (strcmp(vc->model, "tap") == 0 && sscanf(vc->info_str, "ifname=%63s ", ifname) == 1 && s->down_script[0]) launch_script(s->down_script, ifname, s->fd); } #if defined(CONFIG_VDE) if (vc->fd_read == vde_from_qemu) { VDEState *s = vc->opaque; vde_close(s->vde); } #endif } } #endif } | 2,904 |
0 | static int64_t coroutine_fn qcow_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum) { BDRVQcowState *s = bs->opaque; int index_in_cluster, n; uint64_t cluster_offset; qemu_co_mutex_lock(&s->lock); cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0); qemu_co_mutex_unlock(&s->lock); index_in_cluster = sector_num & (s->cluster_sectors - 1); n = s->cluster_sectors - index_in_cluster; if (n > nb_sectors) n = nb_sectors; *pnum = n; if (!cluster_offset) { return 0; } if ((cluster_offset & QCOW_OFLAG_COMPRESSED) || s->cipher) { return BDRV_BLOCK_DATA; } cluster_offset |= (index_in_cluster << BDRV_SECTOR_BITS); return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | cluster_offset; } | 2,905 |
0 | static int v9fs_synth_rename(FsContext *ctx, const char *oldpath, const char *newpath) { errno = EPERM; return -1; } | 2,906 |
0 | static void gen_spr_604 (CPUPPCState *env) { /* Processor identification */ spr_register(env, SPR_PIR, "PIR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_pir, 0x00000000); /* Breakpoints */ /* XXX : not implemented */ spr_register(env, SPR_IABR, "IABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_DABR, "DABR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* Performance counters */ /* XXX : not implemented */ spr_register(env, SPR_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_PMC3, "PMC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_PMC4, "PMC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* XXX : not implemented */ spr_register(env, SPR_SDA, "SDA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, SPR_NOACCESS, 0x00000000); /* External access control */ /* XXX : not implemented */ spr_register(env, SPR_EAR, "EAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); } | 2,907 |
0 | void helper_sysret(int dflag) { int cpl, selector; if (!(env->efer & MSR_EFER_SCE)) { raise_exception_err(EXCP06_ILLOP, 0); } cpl = env->hflags & HF_CPL_MASK; if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { raise_exception_err(EXCP0D_GPF, 0); } selector = (env->star >> 48) & 0xffff; if (env->hflags & HF_LMA_MASK) { if (dflag == 2) { cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, 0, 0xffffffff, DESC_G_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); env->eip = ECX; } else { cpu_x86_load_seg_cache(env, R_CS, selector | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); env->eip = (uint32_t)ECX; } cpu_x86_load_seg_cache(env, R_SS, selector + 8, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); cpu_x86_set_cpl(env, 3); } else { cpu_x86_load_seg_cache(env, R_CS, selector | 3, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); env->eip = (uint32_t)ECX; cpu_x86_load_seg_cache(env, R_SS, selector + 8, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); env->eflags |= IF_MASK; cpu_x86_set_cpl(env, 3); } #ifdef CONFIG_KQEMU if (kqemu_is_ok(env)) { if (env->hflags & HF_LMA_MASK) CC_OP = CC_OP_EFLAGS; env->exception_index = -1; cpu_loop_exit(); } #endif } | 2,908 |
0 | static int parallels_probe(const uint8_t *buf, int buf_size, const char *filename) { const ParallelsHeader *ph = (const void *)buf; if (buf_size < sizeof(ParallelsHeader)) return 0; if ((!memcmp(ph->magic, HEADER_MAGIC, 16) || !memcmp(ph->magic, HEADER_MAGIC2, 16)) && (le32_to_cpu(ph->version) == HEADER_VERSION)) return 100; return 0; } | 2,909 |
0 | static int decode_coeffs(WMAProDecodeCtx *s, int c) { /* Integers 0..15 as single-precision floats. The table saves a costly int to float conversion, and storing the values as integers allows fast sign-flipping. */ static const int fval_tab[16] = { 0x00000000, 0x3f800000, 0x40000000, 0x40400000, 0x40800000, 0x40a00000, 0x40c00000, 0x40e00000, 0x41000000, 0x41100000, 0x41200000, 0x41300000, 0x41400000, 0x41500000, 0x41600000, 0x41700000, }; int vlctable; VLC* vlc; WMAProChannelCtx* ci = &s->channel[c]; int rl_mode = 0; int cur_coeff = 0; int num_zeros = 0; const uint16_t* run; const float* level; av_dlog(s->avctx, "decode coefficients for channel %i\n", c); vlctable = get_bits1(&s->gb); vlc = &coef_vlc[vlctable]; if (vlctable) { run = coef1_run; level = coef1_level; } else { run = coef0_run; level = coef0_level; } /** decode vector coefficients (consumes up to 167 bits per iteration for 4 vector coded large values) */ while ((s->transmit_num_vec_coeffs || !rl_mode) && (cur_coeff + 3 < ci->num_vec_coeffs)) { int vals[4]; int i; unsigned int idx; idx = get_vlc2(&s->gb, vec4_vlc.table, VLCBITS, VEC4MAXDEPTH); if (idx == HUFF_VEC4_SIZE - 1) { for (i = 0; i < 4; i += 2) { idx = get_vlc2(&s->gb, vec2_vlc.table, VLCBITS, VEC2MAXDEPTH); if (idx == HUFF_VEC2_SIZE - 1) { int v0, v1; v0 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH); if (v0 == HUFF_VEC1_SIZE - 1) v0 += ff_wma_get_large_val(&s->gb); v1 = get_vlc2(&s->gb, vec1_vlc.table, VLCBITS, VEC1MAXDEPTH); if (v1 == HUFF_VEC1_SIZE - 1) v1 += ff_wma_get_large_val(&s->gb); ((float*)vals)[i ] = v0; ((float*)vals)[i+1] = v1; } else { vals[i] = fval_tab[symbol_to_vec2[idx] >> 4 ]; vals[i+1] = fval_tab[symbol_to_vec2[idx] & 0xF]; } } } else { vals[0] = fval_tab[ symbol_to_vec4[idx] >> 12 ]; vals[1] = fval_tab[(symbol_to_vec4[idx] >> 8) & 0xF]; vals[2] = fval_tab[(symbol_to_vec4[idx] >> 4) & 0xF]; vals[3] = fval_tab[ symbol_to_vec4[idx] & 0xF]; } /** decode sign */ for (i = 0; i < 4; i++) { if (vals[i]) { int sign = get_bits1(&s->gb) - 1; *(uint32_t*)&ci->coeffs[cur_coeff] = vals[i] ^ sign<<31; num_zeros = 0; } else { ci->coeffs[cur_coeff] = 0; /** switch to run level mode when subframe_len / 128 zeros were found in a row */ rl_mode |= (++num_zeros > s->subframe_len >> 8); } ++cur_coeff; } } /** decode run level coded coefficients */ if (cur_coeff < s->subframe_len) { memset(&ci->coeffs[cur_coeff], 0, sizeof(*ci->coeffs) * (s->subframe_len - cur_coeff)); if (ff_wma_run_level_decode(s->avctx, &s->gb, vlc, level, run, 1, ci->coeffs, cur_coeff, s->subframe_len, s->subframe_len, s->esc_len, 0)) return AVERROR_INVALIDDATA; } return 0; } | 2,910 |
0 | static void qemu_chr_parse_ringbuf(QemuOpts *opts, ChardevBackend *backend, Error **errp) { int val; ChardevRingbuf *ringbuf; ringbuf = backend->u.ringbuf = g_new0(ChardevRingbuf, 1); qemu_chr_parse_common(opts, qapi_ChardevRingbuf_base(ringbuf)); val = qemu_opt_get_size(opts, "size", 0); if (val != 0) { ringbuf->has_size = true; ringbuf->size = val; } } | 2,912 |
0 | static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov) { SheepdogAIOCB acb; BDRVSheepdogState *s = bs->opaque; sd_aio_setup(&acb, s, qiov, sector_num, nb_sectors, AIOCB_READ_UDATA); retry: if (check_overlapping_aiocb(s, &acb)) { qemu_co_queue_wait(&s->overlapping_queue); goto retry; } sd_co_rw_vector(&acb); QLIST_REMOVE(&acb, aiocb_siblings); qemu_co_queue_restart_all(&s->overlapping_queue); return acb.ret; } | 2,915 |
0 | static void multiwrite_help(void) { printf( "\n" " writes a range of bytes from the given offset source from multiple buffers,\n" " in a batch of requests that may be merged by qemu\n" "\n" " Example:\n" " 'multiwrite 512 1k 1k ; 4k 1k' \n" " writes 2 kB at 512 bytes and 1 kB at 4 kB into the open file\n" "\n" " Writes into a segment of the currently open file, using a buffer\n" " filled with a set pattern (0xcdcdcdcd). The pattern byte is increased\n" " by one for each request contained in the multiwrite command.\n" " -P, -- use different pattern to fill file\n" " -C, -- report statistics in a machine parsable format\n" " -q, -- quiet mode, do not show I/O statistics\n" "\n"); } | 2,916 |
0 | MemoryRegionSection memory_region_find(MemoryRegion *address_space, hwaddr addr, uint64_t size) { AddressSpace *as = memory_region_to_address_space(address_space); AddrRange range = addrrange_make(int128_make64(addr), int128_make64(size)); FlatRange *fr = address_space_lookup(as, range); MemoryRegionSection ret = { .mr = NULL, .size = 0 }; if (!fr) { return ret; } while (fr > as->current_map->ranges && addrrange_intersects(fr[-1].addr, range)) { --fr; } ret.mr = fr->mr; range = addrrange_intersection(range, fr->addr); ret.offset_within_region = fr->offset_in_region; ret.offset_within_region += int128_get64(int128_sub(range.start, fr->addr.start)); ret.size = int128_get64(range.size); ret.offset_within_address_space = int128_get64(range.start); ret.readonly = fr->readonly; return ret; } | 2,917 |
0 | static int check_pow_970MP (CPUPPCState *env) { if (env->spr[SPR_HID0] & 0x01C00000) return 1; return 0; } | 2,918 |
0 | void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) { trace_esp_mem_writeb(saddr, s->wregs[saddr], val); switch (saddr) { case ESP_TCHI: s->tchi_written = true; /* fall through */ case ESP_TCLO: case ESP_TCMID: s->rregs[ESP_RSTAT] &= ~STAT_TC; break; case ESP_FIFO: if (s->do_cmd) { if (s->cmdlen < TI_BUFSZ) { s->cmdbuf[s->cmdlen++] = val & 0xff; } else { trace_esp_error_fifo_overrun(); } } else if (s->ti_size == TI_BUFSZ - 1) { trace_esp_error_fifo_overrun(); } else { s->ti_size++; s->ti_buf[s->ti_wptr++] = val & 0xff; } break; case ESP_CMD: s->rregs[saddr] = val; if (val & CMD_DMA) { s->dma = 1; /* Reload DMA counter. */ s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO]; s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID]; s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI]; } else { s->dma = 0; } switch(val & CMD_CMD) { case CMD_NOP: trace_esp_mem_writeb_cmd_nop(val); break; case CMD_FLUSH: trace_esp_mem_writeb_cmd_flush(val); //s->ti_size = 0; s->rregs[ESP_RINTR] = INTR_FC; s->rregs[ESP_RSEQ] = 0; s->rregs[ESP_RFLAGS] = 0; break; case CMD_RESET: trace_esp_mem_writeb_cmd_reset(val); esp_soft_reset(s); break; case CMD_BUSRESET: trace_esp_mem_writeb_cmd_bus_reset(val); s->rregs[ESP_RINTR] = INTR_RST; if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { esp_raise_irq(s); } break; case CMD_TI: handle_ti(s); break; case CMD_ICCS: trace_esp_mem_writeb_cmd_iccs(val); write_response(s); s->rregs[ESP_RINTR] = INTR_FC; s->rregs[ESP_RSTAT] |= STAT_MI; break; case CMD_MSGACC: trace_esp_mem_writeb_cmd_msgacc(val); s->rregs[ESP_RINTR] = INTR_DC; s->rregs[ESP_RSEQ] = 0; s->rregs[ESP_RFLAGS] = 0; esp_raise_irq(s); break; case CMD_PAD: trace_esp_mem_writeb_cmd_pad(val); s->rregs[ESP_RSTAT] = STAT_TC; s->rregs[ESP_RINTR] = INTR_FC; s->rregs[ESP_RSEQ] = 0; break; case CMD_SATN: trace_esp_mem_writeb_cmd_satn(val); break; case CMD_RSTATN: trace_esp_mem_writeb_cmd_rstatn(val); break; case CMD_SEL: trace_esp_mem_writeb_cmd_sel(val); handle_s_without_atn(s); break; case CMD_SELATN: trace_esp_mem_writeb_cmd_selatn(val); handle_satn(s); break; case CMD_SELATNS: trace_esp_mem_writeb_cmd_selatns(val); handle_satn_stop(s); break; case CMD_ENSEL: trace_esp_mem_writeb_cmd_ensel(val); s->rregs[ESP_RINTR] = 0; break; case CMD_DISSEL: trace_esp_mem_writeb_cmd_dissel(val); s->rregs[ESP_RINTR] = 0; esp_raise_irq(s); break; default: trace_esp_error_unhandled_command(val); break; } break; case ESP_WBUSID ... ESP_WSYNO: break; case ESP_CFG1: case ESP_CFG2: case ESP_CFG3: case ESP_RES3: case ESP_RES4: s->rregs[saddr] = val; break; case ESP_WCCF ... ESP_WTEST: break; default: trace_esp_error_invalid_write(val, saddr); return; } s->wregs[saddr] = val; } | 2,919 |
0 | static void decode_delta_e(uint8_t *dst, const uint8_t *buf, const uint8_t *buf_end, int w, int flag, int bpp, int dst_size) { int planepitch = FFALIGN(w, 16) >> 3; int pitch = planepitch * bpp; int planepitch_byte = (w + 7) / 8; unsigned entries, ofssrc; GetByteContext gb, ptrs; PutByteContext pb; int k; if (buf_end - buf <= 4 * bpp) return; bytestream2_init_writer(&pb, dst, dst_size); bytestream2_init(&ptrs, buf, bpp * 4); for (k = 0; k < bpp; k++) { ofssrc = bytestream2_get_be32(&ptrs); if (!ofssrc) continue; if (ofssrc >= buf_end - buf) continue; bytestream2_init(&gb, buf + ofssrc, buf_end - (buf + ofssrc)); entries = bytestream2_get_be16(&gb); while (entries) { int16_t opcode = bytestream2_get_be16(&gb); unsigned offset = bytestream2_get_be32(&gb); bytestream2_seek_p(&pb, (offset / planepitch_byte) * pitch + (offset % planepitch_byte) + k * planepitch, SEEK_SET); if (opcode >= 0) { uint16_t x = bytestream2_get_be16(&gb); while (opcode && bytestream2_get_bytes_left_p(&pb) > 0) { bytestream2_put_be16(&pb, x); bytestream2_skip_p(&pb, pitch - 2); opcode--; } } else { opcode = -opcode; while (opcode && bytestream2_get_bytes_left(&gb) > 0) { bytestream2_put_be16(&pb, bytestream2_get_be16(&gb)); bytestream2_skip_p(&pb, pitch - 2); opcode--; } } entries--; } } } | 2,921 |
Subsets and Splits