project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
qemu
eb700029c7836798046191d62d595363d92c84d4
0
void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable, bool csum_enable, uint32_t gso_size) { struct tcp_hdr l4hdr; assert(pkt); /* csum has to be enabled if tso is. */ assert(csum_enable || !tso_enable); pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable); switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_NONE: pkt->virt_hdr.hdr_len = 0; pkt->virt_hdr.gso_size = 0; break; case VIRTIO_NET_HDR_GSO_UDP: pkt->virt_hdr.gso_size = IP_FRAG_ALIGN_SIZE(gso_size); pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header); break; case VIRTIO_NET_HDR_GSO_TCPV4: case VIRTIO_NET_HDR_GSO_TCPV6: iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags, 0, &l4hdr, sizeof(l4hdr)); pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t); pkt->virt_hdr.gso_size = IP_FRAG_ALIGN_SIZE(gso_size); break; default: g_assert_not_reached(); } if (csum_enable) { switch (pkt->l4proto) { case IP_PROTO_TCP: pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; pkt->virt_hdr.csum_start = pkt->hdr_len; pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum); break; case IP_PROTO_UDP: pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; pkt->virt_hdr.csum_start = pkt->hdr_len; pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum); break; default: break; } } }
16,957
qemu
df6126a7f21a1a032e41b15899ca29777399d5a2
0
static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx, uint32_t op1, uint32_t op2, int ret, int v1, int v2, int check_ret) { const char *opn = "mipsdsp add compare pick"; TCGv_i32 t0; TCGv t1; TCGv v1_t; TCGv v2_t; if ((ret == 0) && (check_ret == 1)) { /* Treat as NOP. */ MIPS_DEBUG("NOP"); return; } t0 = tcg_temp_new_i32(); t1 = tcg_temp_new(); v1_t = tcg_temp_new(); v2_t = tcg_temp_new(); gen_load_gpr(v1_t, v1); gen_load_gpr(v2_t, v2); switch (op1) { case OPC_APPEND_DSP: switch (op2) { case OPC_APPEND: tcg_gen_movi_i32(t0, v2); gen_helper_append(cpu_gpr[ret], cpu_gpr[ret], v1_t, t0); break; case OPC_PREPEND: tcg_gen_movi_i32(t0, v2); gen_helper_prepend(cpu_gpr[ret], v1_t, cpu_gpr[ret], t0); break; case OPC_BALIGN: tcg_gen_movi_i32(t0, v2); gen_helper_balign(cpu_gpr[ret], v1_t, cpu_gpr[ret], t0); break; default: /* Invid */ MIPS_INVAL("MASK APPEND"); generate_exception(ctx, EXCP_RI); break; } break; case OPC_CMPU_EQ_QB_DSP: switch (op2) { case OPC_CMPU_EQ_QB: check_dsp(ctx); gen_helper_cmpu_eq_qb(v1_t, v2_t, cpu_env); break; case OPC_CMPU_LT_QB: check_dsp(ctx); gen_helper_cmpu_lt_qb(v1_t, v2_t, cpu_env); break; case OPC_CMPU_LE_QB: check_dsp(ctx); gen_helper_cmpu_le_qb(v1_t, v2_t, cpu_env); break; case OPC_CMPGU_EQ_QB: check_dsp(ctx); gen_helper_cmpgu_eq_qb(cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGU_LT_QB: check_dsp(ctx); gen_helper_cmpgu_lt_qb(cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGU_LE_QB: check_dsp(ctx); gen_helper_cmpgu_le_qb(cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGDU_EQ_QB: check_dspr2(ctx); gen_helper_cmpgu_eq_qb(t1, v1_t, v2_t); tcg_gen_mov_tl(cpu_gpr[ret], t1); tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF); tcg_gen_shli_tl(t1, t1, 24); tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1); break; case OPC_CMPGDU_LT_QB: check_dspr2(ctx); gen_helper_cmpgu_lt_qb(t1, v1_t, v2_t); tcg_gen_mov_tl(cpu_gpr[ret], t1); tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF); tcg_gen_shli_tl(t1, t1, 24); tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1); break; case OPC_CMPGDU_LE_QB: check_dspr2(ctx); gen_helper_cmpgu_le_qb(t1, v1_t, v2_t); tcg_gen_mov_tl(cpu_gpr[ret], t1); tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF); tcg_gen_shli_tl(t1, t1, 24); tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1); break; case OPC_CMP_EQ_PH: check_dsp(ctx); gen_helper_cmp_eq_ph(v1_t, v2_t, cpu_env); break; case OPC_CMP_LT_PH: check_dsp(ctx); gen_helper_cmp_lt_ph(v1_t, v2_t, cpu_env); break; case OPC_CMP_LE_PH: check_dsp(ctx); gen_helper_cmp_le_ph(v1_t, v2_t, cpu_env); break; case OPC_PICK_QB: check_dsp(ctx); gen_helper_pick_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_PICK_PH: check_dsp(ctx); gen_helper_pick_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_PACKRL_PH: check_dsp(ctx); gen_helper_packrl_ph(cpu_gpr[ret], v1_t, v2_t); break; } break; #ifdef TARGET_MIPS64 case OPC_CMPU_EQ_OB_DSP: switch (op2) { case OPC_CMP_EQ_PW: check_dsp(ctx); gen_helper_cmp_eq_pw(v1_t, v2_t, cpu_env); break; case OPC_CMP_LT_PW: check_dsp(ctx); gen_helper_cmp_lt_pw(v1_t, v2_t, cpu_env); break; case OPC_CMP_LE_PW: check_dsp(ctx); gen_helper_cmp_le_pw(v1_t, v2_t, cpu_env); break; case OPC_CMP_EQ_QH: check_dsp(ctx); gen_helper_cmp_eq_qh(v1_t, v2_t, cpu_env); break; case OPC_CMP_LT_QH: check_dsp(ctx); gen_helper_cmp_lt_qh(v1_t, v2_t, cpu_env); break; case OPC_CMP_LE_QH: check_dsp(ctx); gen_helper_cmp_le_qh(v1_t, v2_t, cpu_env); break; case OPC_CMPGDU_EQ_OB: check_dspr2(ctx); gen_helper_cmpgdu_eq_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_CMPGDU_LT_OB: check_dspr2(ctx); gen_helper_cmpgdu_lt_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_CMPGDU_LE_OB: check_dspr2(ctx); gen_helper_cmpgdu_le_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_CMPGU_EQ_OB: check_dsp(ctx); gen_helper_cmpgu_eq_ob(cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGU_LT_OB: check_dsp(ctx); gen_helper_cmpgu_lt_ob(cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPGU_LE_OB: check_dsp(ctx); gen_helper_cmpgu_le_ob(cpu_gpr[ret], v1_t, v2_t); break; case OPC_CMPU_EQ_OB: check_dsp(ctx); gen_helper_cmpu_eq_ob(v1_t, v2_t, cpu_env); break; case OPC_CMPU_LT_OB: check_dsp(ctx); gen_helper_cmpu_lt_ob(v1_t, v2_t, cpu_env); break; case OPC_CMPU_LE_OB: check_dsp(ctx); gen_helper_cmpu_le_ob(v1_t, v2_t, cpu_env); break; case OPC_PACKRL_PW: check_dsp(ctx); gen_helper_packrl_pw(cpu_gpr[ret], v1_t, v2_t); break; case OPC_PICK_OB: check_dsp(ctx); gen_helper_pick_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_PICK_PW: check_dsp(ctx); gen_helper_pick_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; case OPC_PICK_QH: check_dsp(ctx); gen_helper_pick_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env); break; } break; case OPC_DAPPEND_DSP: switch (op2) { case OPC_DAPPEND: tcg_gen_movi_i32(t0, v2); gen_helper_dappend(cpu_gpr[ret], v1_t, cpu_gpr[ret], t0); break; case OPC_PREPENDD: tcg_gen_movi_i32(t0, v2); gen_helper_prependd(cpu_gpr[ret], v1_t, cpu_gpr[ret], t0); break; case OPC_PREPENDW: tcg_gen_movi_i32(t0, v2); gen_helper_prependw(cpu_gpr[ret], v1_t, cpu_gpr[ret], t0); break; case OPC_DBALIGN: tcg_gen_movi_i32(t0, v2); gen_helper_dbalign(cpu_gpr[ret], v1_t, cpu_gpr[ret], t0); break; default: /* Invalid */ MIPS_INVAL("MASK DAPPEND"); generate_exception(ctx, EXCP_RI); break; } break; #endif } tcg_temp_free_i32(t0); tcg_temp_free(t1); tcg_temp_free(v1_t); tcg_temp_free(v2_t); (void)opn; /* avoid a compiler warning */ MIPS_DEBUG("%s", opn); }
16,958
qemu
e69a17f65e9f12f33c48b04a789e49d40a8993f5
0
static inline int IRQ_testbit(IRQQueue *q, int n_IRQ) { return test_bit(q->queue, n_IRQ); }
16,959
FFmpeg
4db81f081743aeed366e8af7a748667818a27e0f
0
static int pic_arrays_init(HEVCContext *s) { int log2_min_cb_size = s->sps->log2_min_cb_size; int width = s->sps->width; int height = s->sps->height; int pic_size = width * height; int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) * ((height >> log2_min_cb_size) + 1); int ctb_count = s->sps->ctb_width * s->sps->ctb_height; int min_pu_width = width >> s->sps->log2_min_pu_size; int pic_height_in_min_pu = height >> s->sps->log2_min_pu_size; int pic_size_in_min_pu = min_pu_width * pic_height_in_min_pu; int pic_width_in_min_tu = width >> s->sps->log2_min_tb_size; int pic_height_in_min_tu = height >> s->sps->log2_min_tb_size; s->bs_width = width >> 3; s->bs_height = height >> 3; s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao)); s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock)); s->split_cu_flag = av_malloc(pic_size); if (!s->sao || !s->deblock || !s->split_cu_flag) goto fail; s->skip_flag = av_malloc(pic_size_in_ctb); s->tab_ct_depth = av_malloc(s->sps->min_cb_height * s->sps->min_cb_width); if (!s->skip_flag || !s->tab_ct_depth) goto fail; s->tab_ipm = av_malloc(pic_size_in_min_pu); s->cbf_luma = av_malloc(pic_width_in_min_tu * pic_height_in_min_tu); s->is_pcm = av_malloc(pic_size_in_min_pu); if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm) goto fail; s->filter_slice_edges = av_malloc(ctb_count); s->tab_slice_address = av_malloc(pic_size_in_ctb * sizeof(*s->tab_slice_address)); s->qp_y_tab = av_malloc(pic_size_in_ctb * sizeof(*s->qp_y_tab)); if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address) goto fail; s->horizontal_bs = av_mallocz(2 * s->bs_width * (s->bs_height + 1)); s->vertical_bs = av_mallocz(2 * s->bs_width * (s->bs_height + 1)); if (!s->horizontal_bs || !s->vertical_bs) goto fail; s->tab_mvf_pool = av_buffer_pool_init(pic_size_in_min_pu * sizeof(MvField), av_buffer_alloc); s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab), av_buffer_allocz); if (!s->tab_mvf_pool || !s->rpl_tab_pool) goto fail; return 0; fail: pic_arrays_free(s); return AVERROR(ENOMEM); }
16,960
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
static void ioreq_finish(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; LIST_REMOVE(ioreq, list); LIST_INSERT_HEAD(&blkdev->finished, ioreq, list); blkdev->requests_inflight--; blkdev->requests_finished++; }
16,961
qemu
f17fd4fdf0df3d2f3444399d04c38d22b9a3e1b7
0
static void test_qemu_strtosz_invalid(void) { const char *str; char *endptr = NULL; int64_t res; str = ""; res = qemu_strtosz(str, &endptr); g_assert_cmpint(res, ==, -EINVAL); g_assert(endptr == str); str = " \t "; res = qemu_strtosz(str, &endptr); g_assert_cmpint(res, ==, -EINVAL); g_assert(endptr == str); str = "crap"; res = qemu_strtosz(str, &endptr); g_assert_cmpint(res, ==, -EINVAL); g_assert(endptr == str); }
16,962
qemu
51b19ebe4320f3dcd93cea71235c1219318ddfd2
0
static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOGPU *g = VIRTIO_GPU(vdev); struct virtio_gpu_ctrl_command *cmd; if (!virtio_queue_ready(vq)) { return; } #ifdef CONFIG_VIRGL if (!g->renderer_inited && g->use_virgl_renderer) { virtio_gpu_virgl_init(g); g->renderer_inited = true; } #endif cmd = g_new(struct virtio_gpu_ctrl_command, 1); while (virtqueue_pop(vq, &cmd->elem)) { cmd->vq = vq; cmd->error = 0; cmd->finished = false; cmd->waiting = false; QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); cmd = g_new(struct virtio_gpu_ctrl_command, 1); } g_free(cmd); virtio_gpu_process_cmdq(g); #ifdef CONFIG_VIRGL if (g->use_virgl_renderer) { virtio_gpu_virgl_fence_poll(g); } #endif }
16,963
qemu
85f94f868fcd868f0f605e9d3c1ad6351c557190
0
static void handle_mousemotion(DisplayState *ds, SDL_Event *ev) { int max_x, max_y; if (is_graphic_console() && (kbd_mouse_is_absolute() || absolute_enabled)) { max_x = real_screen->w - 1; max_y = real_screen->h - 1; if (gui_grab && (ev->motion.x == 0 || ev->motion.y == 0 || ev->motion.x == max_x || ev->motion.y == max_y)) { sdl_grab_end(); } if (!gui_grab && SDL_GetAppState() & SDL_APPINPUTFOCUS && (ev->motion.x > 0 && ev->motion.x < max_x && ev->motion.y > 0 && ev->motion.y < max_y)) { sdl_grab_start(); } } if (gui_grab || kbd_mouse_is_absolute() || absolute_enabled) { sdl_send_mouse_event(ev->motion.xrel, ev->motion.yrel, 0, ev->motion.x, ev->motion.y, ev->motion.state); } }
16,964
qemu
81f3053b77f7d3a4d9100c425cd8cec99ee7a3d4
0
void helper_mwait(CPUX86State *env, int next_eip_addend) { CPUState *cs; X86CPU *cpu; if ((uint32_t)env->regs[R_ECX] != 0) { raise_exception(env, EXCP0D_GPF); } cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0); env->eip += next_eip_addend; cpu = x86_env_get_cpu(env); cs = CPU(cpu); /* XXX: not complete but not completely erroneous */ if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { /* more than one CPU: do not sleep because another CPU may wake this one */ } else { do_hlt(cpu); } }
16,965
qemu
afff2b15e89ac81c113f2ebfd729aaa02b40edb6
1
QemuConsole *graphic_console_init(DeviceState *dev, uint32_t head, const GraphicHwOps *hw_ops, void *opaque) { Error *local_err = NULL; int width = 640; int height = 480; QemuConsole *s; DisplayState *ds; ds = get_alloc_displaystate(); trace_console_gfx_new(); s = new_console(ds, GRAPHIC_CONSOLE); s->hw_ops = hw_ops; s->hw = opaque; if (dev) { object_property_set_link(OBJECT(s), OBJECT(dev), "device", &local_err); object_property_set_int(OBJECT(s), head, "head", &local_err); } s->surface = qemu_create_displaysurface(width, height); return s; }
16,968
qemu
e30d1d8c7195848abb28a8c734a82b845b8b456a
1
static int ram_save_setup(QEMUFile *f, void *opaque) { RAMBlock *block; int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS; migration_bitmap = bitmap_new(ram_pages); bitmap_set(migration_bitmap, 0, ram_pages); migration_dirty_pages = ram_pages; mig_throttle_on = false; dirty_rate_high_cnt = 0; if (migrate_use_xbzrle()) { qemu_mutex_lock_iothread(); XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); if (!XBZRLE.cache) { qemu_mutex_unlock_iothread(); DPRINTF("Error creating cache\n"); return -1; } qemu_mutex_init(&XBZRLE.lock); qemu_mutex_unlock_iothread(); /* We prefer not to abort if there is no memory */ XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); if (!XBZRLE.encoded_buf) { DPRINTF("Error allocating encoded_buf\n"); return -1; } XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE); if (!XBZRLE.current_buf) { DPRINTF("Error allocating current_buf\n"); g_free(XBZRLE.encoded_buf); XBZRLE.encoded_buf = NULL; return -1; } acct_clear(); } qemu_mutex_lock_iothread(); qemu_mutex_lock_ramlist(); bytes_transferred = 0; reset_ram_globals(); memory_global_dirty_log_start(); migration_bitmap_sync(); qemu_mutex_unlock_iothread(); qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); QTAILQ_FOREACH(block, &ram_list.blocks, next) { qemu_put_byte(f, strlen(block->idstr)); qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); qemu_put_be64(f, block->length); } qemu_mutex_unlock_ramlist(); ram_control_before_iterate(f, RAM_CONTROL_SETUP); ram_control_after_iterate(f, RAM_CONTROL_SETUP); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); return 0; }
16,970
FFmpeg
a68a6a4fb19caecc91d5f7fe3ef4f83f6d3c4586
1
static int xan_decode_chroma(AVCodecContext *avctx, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; XanContext *s = avctx->priv_data; uint8_t *U, *V; unsigned chroma_off; int val, uval, vval; int i, j; const uint8_t *src, *src_end; const uint8_t *table; int mode, offset, dec_size; chroma_off = AV_RL32(buf + 4); if (!chroma_off) return 0; if (chroma_off + 10 >= avpkt->size) { av_log(avctx, AV_LOG_ERROR, "Invalid chroma block position\n"); return -1; } src = avpkt->data + 4 + chroma_off; table = src + 2; mode = bytestream_get_le16(&src); offset = bytestream_get_le16(&src) * 2; if (src - avpkt->data >= avpkt->size - offset) { av_log(avctx, AV_LOG_ERROR, "Invalid chroma block offset\n"); return -1; } memset(s->scratch_buffer, 0, s->buffer_size); dec_size = xan_unpack(s->scratch_buffer, s->buffer_size, src + offset, avpkt->size - offset - (src - avpkt->data)); if (dec_size < 0) { av_log(avctx, AV_LOG_ERROR, "Chroma unpacking failed\n"); return -1; } U = s->pic.data[1]; V = s->pic.data[2]; src = s->scratch_buffer; src_end = src + dec_size; if (mode) { for (j = 0; j < avctx->height >> 1; j++) { for (i = 0; i < avctx->width >> 1; i++) { val = *src++; if (val) { val = AV_RL16(table + (val << 1)); uval = (val >> 3) & 0xF8; vval = (val >> 8) & 0xF8; U[i] = uval | (uval >> 5); V[i] = vval | (vval >> 5); } if (src == src_end) return 0; } U += s->pic.linesize[1]; V += s->pic.linesize[2]; } } else { uint8_t *U2 = U + s->pic.linesize[1]; uint8_t *V2 = V + s->pic.linesize[2]; for (j = 0; j < avctx->height >> 2; j++) { for (i = 0; i < avctx->width >> 1; i += 2) { val = *src++; if (val) { val = AV_RL16(table + (val << 1)); uval = (val >> 3) & 0xF8; vval = (val >> 8) & 0xF8; U[i] = U[i+1] = U2[i] = U2[i+1] = uval | (uval >> 5); V[i] = V[i+1] = V2[i] = V2[i+1] = vval | (vval >> 5); } } U += s->pic.linesize[1] * 2; V += s->pic.linesize[2] * 2; U2 += s->pic.linesize[1] * 2; V2 += s->pic.linesize[2] * 2; } } return 0; }
16,971
FFmpeg
bd5c860fdbc33d19d2ff0f6d1f06de07c17560dd
1
static int av_thread_message_queue_send_locked(AVThreadMessageQueue *mq, void *msg, unsigned flags) { while (!mq->err_send && av_fifo_space(mq->fifo) < mq->elsize) { if ((flags & AV_THREAD_MESSAGE_NONBLOCK)) return AVERROR(EAGAIN); pthread_cond_wait(&mq->cond, &mq->lock); } if (mq->err_send) return mq->err_send; av_fifo_generic_write(mq->fifo, msg, mq->elsize, NULL); pthread_cond_signal(&mq->cond); return 0; }
16,972
FFmpeg
50a3c4c5d2634b5d4076a5b7c099729cbd59ac45
1
static int rsd_probe(AVProbeData *p) { if (!memcmp(p->buf, "RSD", 3) && p->buf[3] - '0' >= 2 && p->buf[3] - '0' <= 6) return AVPROBE_SCORE_EXTENSION; return 0; }
16,973
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
1
static uint64_t ntohll(uint64_t v) { union { uint32_t lv[2]; uint64_t llv; } u; u.llv = v; return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]); }
16,974
FFmpeg
ac4b32df71bd932838043a4838b86d11e169707f
1
static av_always_inline int simple_limit(uint8_t *p, ptrdiff_t stride, int flim) { LOAD_PIXELS return 2 * FFABS(p0 - q0) + (FFABS(p1 - q1) >> 1) <= flim; }
16,975
FFmpeg
8155233413540c63e53a620ff5734fb4b0635611
1
void sample_dump(int fnum, INT32 *tab, int n) { static FILE *files[16], *f; char buf[512]; f = files[fnum]; if (!f) { sprintf(buf, "/tmp/out%d.pcm", fnum); f = fopen(buf, "w"); if (!f) return; files[fnum] = f; } if (fnum == 0) { int i; static int pos = 0; printf("pos=%d\n", pos); for(i=0;i<n;i++) { printf(" %f", (double)tab[i] / 32768.0); if ((i % 18) == 17) printf("\n"); } pos += n; } fwrite(tab, 1, n * sizeof(INT32), f); }
16,976
qemu
14a10fc39923b3af07c8c46d22cb20843bee3a72
1
static void mb_cpu_realizefn(DeviceState *dev, Error **errp) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(dev); MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(dev); cpu_reset(CPU(cpu)); mcc->parent_realize(dev, errp); }
16,978
FFmpeg
70143a3954e1c4412efb2bf1a3a818adea2d3abf
0
static int dxva2_create_decoder(AVCodecContext *s) { InputStream *ist = s->opaque; int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR; DXVA2Context *ctx = ist->hwaccel_ctx; struct dxva_context *dxva_ctx = s->hwaccel_context; GUID *guid_list = NULL; unsigned guid_count = 0, i, j; GUID device_guid = GUID_NULL; const D3DFORMAT surface_format = (s->sw_pix_fmt == AV_PIX_FMT_YUV420P10) ? MKTAG('P','0','1','0') : MKTAG('N','V','1','2'); D3DFORMAT target_format = 0; DXVA2_VideoDesc desc = { 0 }; DXVA2_ConfigPictureDecode config; HRESULT hr; int surface_alignment, num_surfaces; int ret; AVDXVA2FramesContext *frames_hwctx; AVHWFramesContext *frames_ctx; hr = IDirectXVideoDecoderService_GetDecoderDeviceGuids(ctx->decoder_service, &guid_count, &guid_list); if (FAILED(hr)) { av_log(NULL, loglevel, "Failed to retrieve decoder device GUIDs\n"); goto fail; } for (i = 0; dxva2_modes[i].guid; i++) { D3DFORMAT *target_list = NULL; unsigned target_count = 0; const dxva2_mode *mode = &dxva2_modes[i]; if (mode->codec != s->codec_id) continue; for (j = 0; j < guid_count; j++) { if (IsEqualGUID(mode->guid, &guid_list[j])) break; } if (j == guid_count) continue; hr = IDirectXVideoDecoderService_GetDecoderRenderTargets(ctx->decoder_service, mode->guid, &target_count, &target_list); if (FAILED(hr)) { continue; } for (j = 0; j < target_count; j++) { const D3DFORMAT format = target_list[j]; if (format == surface_format) { target_format = format; break; } } CoTaskMemFree(target_list); if (target_format) { device_guid = *mode->guid; break; } } CoTaskMemFree(guid_list); if (IsEqualGUID(&device_guid, &GUID_NULL)) { av_log(NULL, loglevel, "No decoder device for codec found\n"); goto fail; } desc.SampleWidth = s->coded_width; desc.SampleHeight = s->coded_height; desc.Format = target_format; ret = dxva2_get_decoder_configuration(s, &device_guid, &desc, &config); if (ret < 0) { goto fail; } /* decoding MPEG-2 requires additional alignment on some Intel GPUs, but it causes issues for H.264 on certain AMD GPUs..... */ if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) surface_alignment = 32; /* the HEVC DXVA2 spec asks for 128 pixel aligned surfaces to ensure all coding features have enough room to work with */ else if (s->codec_id == AV_CODEC_ID_HEVC) surface_alignment = 128; else surface_alignment = 16; /* 4 base work surfaces */ num_surfaces = 4; /* add surfaces based on number of possible refs */ if (s->codec_id == AV_CODEC_ID_H264 || s->codec_id == AV_CODEC_ID_HEVC) num_surfaces += 16; else if (s->codec_id == AV_CODEC_ID_VP9) num_surfaces += 8; else num_surfaces += 2; /* add extra surfaces for frame threading */ if (s->active_thread_type & FF_THREAD_FRAME) num_surfaces += s->thread_count; ctx->hw_frames_ctx = av_hwframe_ctx_alloc(ctx->hw_device_ctx); if (!ctx->hw_frames_ctx) goto fail; frames_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data; frames_hwctx = frames_ctx->hwctx; frames_ctx->format = AV_PIX_FMT_DXVA2_VLD; frames_ctx->sw_format = (target_format == MKTAG('P','0','1','0') ? AV_PIX_FMT_P010 : AV_PIX_FMT_NV12); frames_ctx->width = FFALIGN(s->coded_width, surface_alignment); frames_ctx->height = FFALIGN(s->coded_height, surface_alignment); frames_ctx->initial_pool_size = num_surfaces; frames_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget; ret = av_hwframe_ctx_init(ctx->hw_frames_ctx); if (ret < 0) { av_log(NULL, loglevel, "Failed to initialize the HW frames context\n"); goto fail; } hr = IDirectXVideoDecoderService_CreateVideoDecoder(ctx->decoder_service, &device_guid, &desc, &config, frames_hwctx->surfaces, frames_hwctx->nb_surfaces, &frames_hwctx->decoder_to_release); if (FAILED(hr)) { av_log(NULL, loglevel, "Failed to create DXVA2 video decoder\n"); goto fail; } ctx->decoder_guid = device_guid; ctx->decoder_config = config; dxva_ctx->cfg = &ctx->decoder_config; dxva_ctx->decoder = frames_hwctx->decoder_to_release; dxva_ctx->surface = frames_hwctx->surfaces; dxva_ctx->surface_count = frames_hwctx->nb_surfaces; if (IsEqualGUID(&ctx->decoder_guid, &DXVADDI_Intel_ModeH264_E)) dxva_ctx->workaround |= FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO; return 0; fail: av_buffer_unref(&ctx->hw_frames_ctx); return AVERROR(EINVAL); }
16,979
FFmpeg
8d2e0e2c7058a3eaf7f45d740be6e93972bbfd68
0
const uint8_t *ff_h263_find_resync_marker(MpegEncContext *s, const uint8_t *av_restrict p, const uint8_t *av_restrict end) { av_assert2(p < end); end-=2; p++; if(s->resync_marker){ for(;p<end; p+=2){ if(!*p){ if (!p[-1] && p[1]) return p - 1; else if(!p[ 1] && p[2]) return p; } } } return end+2; }
16,980
FFmpeg
eabbc64728c2fdb74f565aededec2ab023d20699
0
static int start_ebml_master_crc32(AVIOContext *pb, AVIOContext **dyn_cp, ebml_master *master, unsigned int elementid, uint64_t expectedsize) { int ret; if ((ret = avio_open_dyn_buf(dyn_cp)) < 0) return ret; if (pb->seekable) *master = start_ebml_master(pb, elementid, expectedsize); else *master = start_ebml_master(*dyn_cp, elementid, expectedsize); return 0; }
16,982
FFmpeg
64250d94b74d3fd47cc8b1611f48daf6a6ed804a
0
static int decode_pic_hdr(IVI45DecContext *ctx, AVCodecContext *avctx) { int pic_size_indx, i, p; IVIPicConfig pic_conf; if (get_bits(&ctx->gb, 18) != 0x3FFF8) { av_log(avctx, AV_LOG_ERROR, "Invalid picture start code!\n"); return AVERROR_INVALIDDATA; } ctx->prev_frame_type = ctx->frame_type; ctx->frame_type = get_bits(&ctx->gb, 3); if (ctx->frame_type == 7) { av_log(avctx, AV_LOG_ERROR, "Invalid frame type: %d\n", ctx->frame_type); return AVERROR_INVALIDDATA; } if (ctx->frame_type == IVI4_FRAMETYPE_BIDIR) ctx->has_b_frames = 1; ctx->transp_status = get_bits1(&ctx->gb); if (ctx->transp_status) { ctx->has_transp = 1; } /* unknown bit: Mac decoder ignores this bit, XANIM returns error */ if (get_bits1(&ctx->gb)) { av_log(avctx, AV_LOG_ERROR, "Sync bit is set!\n"); return AVERROR_INVALIDDATA; } ctx->data_size = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 24) : 0; /* null frames don't contain anything else so we just return */ if (ctx->frame_type >= IVI4_FRAMETYPE_NULL_FIRST) { ff_dlog(avctx, "Null frame encountered!\n"); return 0; } /* Check key lock status. If enabled - ignore lock word. */ /* Usually we have to prompt the user for the password, but */ /* we don't do that because Indeo 4 videos can be decoded anyway */ if (get_bits1(&ctx->gb)) { skip_bits_long(&ctx->gb, 32); ff_dlog(avctx, "Password-protected clip!\n"); } pic_size_indx = get_bits(&ctx->gb, 3); if (pic_size_indx == IVI4_PIC_SIZE_ESC) { pic_conf.pic_height = get_bits(&ctx->gb, 16); pic_conf.pic_width = get_bits(&ctx->gb, 16); } else { pic_conf.pic_height = ivi4_common_pic_sizes[pic_size_indx * 2 + 1]; pic_conf.pic_width = ivi4_common_pic_sizes[pic_size_indx * 2 ]; } /* Decode tile dimensions. */ if (get_bits1(&ctx->gb)) { pic_conf.tile_height = scale_tile_size(pic_conf.pic_height, get_bits(&ctx->gb, 4)); pic_conf.tile_width = scale_tile_size(pic_conf.pic_width, get_bits(&ctx->gb, 4)); ctx->uses_tiling = 1; } else { pic_conf.tile_height = pic_conf.pic_height; pic_conf.tile_width = pic_conf.pic_width; } /* Decode chroma subsampling. We support only 4:4 aka YVU9. */ if (get_bits(&ctx->gb, 2)) { av_log(avctx, AV_LOG_ERROR, "Only YVU9 picture format is supported!\n"); return AVERROR_INVALIDDATA; } pic_conf.chroma_height = (pic_conf.pic_height + 3) >> 2; pic_conf.chroma_width = (pic_conf.pic_width + 3) >> 2; /* decode subdivision of the planes */ pic_conf.luma_bands = decode_plane_subdivision(&ctx->gb); if (pic_conf.luma_bands) pic_conf.chroma_bands = decode_plane_subdivision(&ctx->gb); ctx->is_scalable = pic_conf.luma_bands != 1 || pic_conf.chroma_bands != 1; if (ctx->is_scalable && (pic_conf.luma_bands != 4 || pic_conf.chroma_bands != 1)) { av_log(avctx, AV_LOG_ERROR, "Scalability: unsupported subdivision! Luma bands: %d, chroma bands: %d\n", pic_conf.luma_bands, pic_conf.chroma_bands); return AVERROR_INVALIDDATA; } /* check if picture layout was changed and reallocate buffers */ if (ivi_pic_config_cmp(&pic_conf, &ctx->pic_conf)) { if (ff_ivi_init_planes(ctx->planes, &pic_conf, 1)) { av_log(avctx, AV_LOG_ERROR, "Couldn't reallocate color planes!\n"); ctx->pic_conf.luma_bands = 0; return AVERROR(ENOMEM); } ctx->pic_conf = pic_conf; /* set default macroblock/block dimensions */ for (p = 0; p <= 2; p++) { for (i = 0; i < (!p ? pic_conf.luma_bands : pic_conf.chroma_bands); i++) { ctx->planes[p].bands[i].mb_size = !p ? (!ctx->is_scalable ? 16 : 8) : 4; ctx->planes[p].bands[i].blk_size = !p ? 8 : 4; } } if (ff_ivi_init_tiles(ctx->planes, ctx->pic_conf.tile_width, ctx->pic_conf.tile_height)) { av_log(avctx, AV_LOG_ERROR, "Couldn't reallocate internal structures!\n"); return AVERROR(ENOMEM); } } ctx->frame_num = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 20) : 0; /* skip decTimeEst field if present */ if (get_bits1(&ctx->gb)) skip_bits(&ctx->gb, 8); /* decode macroblock and block huffman codebooks */ if (ff_ivi_dec_huff_desc(&ctx->gb, get_bits1(&ctx->gb), IVI_MB_HUFF, &ctx->mb_vlc, avctx) || ff_ivi_dec_huff_desc(&ctx->gb, get_bits1(&ctx->gb), IVI_BLK_HUFF, &ctx->blk_vlc, avctx)) return AVERROR_INVALIDDATA; ctx->rvmap_sel = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 3) : 8; ctx->in_imf = get_bits1(&ctx->gb); ctx->in_q = get_bits1(&ctx->gb); ctx->pic_glob_quant = get_bits(&ctx->gb, 5); /* TODO: ignore this parameter if unused */ ctx->unknown1 = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 3) : 0; ctx->checksum = get_bits1(&ctx->gb) ? get_bits(&ctx->gb, 16) : 0; /* skip picture header extension if any */ while (get_bits1(&ctx->gb)) { ff_dlog(avctx, "Pic hdr extension encountered!\n"); skip_bits(&ctx->gb, 8); } if (get_bits1(&ctx->gb)) { av_log(avctx, AV_LOG_ERROR, "Bad blocks bits encountered!\n"); } align_get_bits(&ctx->gb); return 0; }
16,983
FFmpeg
e6c90ce94f1b07f50cea2babf7471af455cca0ff
0
static void decode_finish_row(H264Context *h, H264SliceContext *sl) { int top = 16 * (h->mb_y >> FIELD_PICTURE(h)); int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h); int height = 16 << FRAME_MBAFF(h); int deblock_border = (16 + 4) << FRAME_MBAFF(h); if (h->deblocking_filter) { if ((top + height) >= pic_height) height += deblock_border; top -= deblock_border; } if (top >= pic_height || (top + height) < 0) return; height = FFMIN(height, pic_height - top); if (top < 0) { height = top + height; top = 0; } ff_h264_draw_horiz_band(h, sl, top, height); if (h->droppable) return; ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1, h->picture_structure == PICT_BOTTOM_FIELD); }
16,985
FFmpeg
e87190f5d20d380608f792ceb14d0def1d80e24b
0
static inline void show_tags(WriterContext *wctx, AVDictionary *tags, int section_id) { AVDictionaryEntry *tag = NULL; if (!tags) return; writer_print_section_header(wctx, section_id); while ((tag = av_dict_get(tags, "", tag, AV_DICT_IGNORE_SUFFIX))) writer_print_string(wctx, tag->key, tag->value, 0); writer_print_section_footer(wctx); }
16,987
qemu
7466bc49107fbd84336ba680f860d5eadd6def13
0
void qemu_spice_display_update(SimpleSpiceDisplay *ssd, int x, int y, int w, int h) { QXLRect update_area; dprint(2, "%s: x %d y %d w %d h %d\n", __FUNCTION__, x, y, w, h); update_area.left = x, update_area.right = x + w; update_area.top = y; update_area.bottom = y + h; pthread_mutex_lock(&ssd->lock); if (qemu_spice_rect_is_empty(&ssd->dirty)) { ssd->notify++; } qemu_spice_rect_union(&ssd->dirty, &update_area); pthread_mutex_unlock(&ssd->lock); }
16,989
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
static void blk_alloc(struct XenDevice *xendev) { struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); LIST_INIT(&blkdev->inflight); LIST_INIT(&blkdev->finished); LIST_INIT(&blkdev->freelist); blkdev->bh = qemu_bh_new(blk_bh, blkdev); if (xen_mode != XEN_EMULATE) batch_maps = 1; }
16,990
qemu
0ac7cc2af500b948510f2481c22e84a57b0a2447
0
static Suite *qstring_suite(void) { Suite *s; TCase *qstring_public_tcase; s = suite_create("QString test-suite"); qstring_public_tcase = tcase_create("Public Interface"); suite_add_tcase(s, qstring_public_tcase); tcase_add_test(qstring_public_tcase, qstring_from_str_test); tcase_add_test(qstring_public_tcase, qstring_destroy_test); tcase_add_test(qstring_public_tcase, qstring_get_str_test); tcase_add_test(qstring_public_tcase, qstring_append_chr_test); tcase_add_test(qstring_public_tcase, qstring_from_substr_test); tcase_add_test(qstring_public_tcase, qobject_to_qstring_test); return s; }
16,991
qemu
9b990ee5a3cc6aa38f81266fb0c6ef37a36c45b9
0
static inline TranslationBlock *tb_find(CPUState *cpu, TranslationBlock *last_tb, int tb_exit) { TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; bool acquired_tb_lock = false; uint32_t cf_mask = curr_cflags(); tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); if (tb == NULL) { /* mmap_lock is needed by tb_gen_code, and mmap_lock must be * taken outside tb_lock. As system emulation is currently * single threaded the locks are NOPs. */ mmap_lock(); tb_lock(); acquired_tb_lock = true; /* There's a chance that our desired tb has been translated while * taking the locks so we check again inside the lock. */ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); if (likely(tb == NULL)) { /* if no translated code available, then translate it now */ tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); } mmap_unlock(); /* We add the TB in the virtual pc hash table for the fast lookup */ atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); } #ifndef CONFIG_USER_ONLY /* We don't take care of direct jumps when address mapping changes in * system emulation. So it's not safe to make a direct jump to a TB * spanning two pages because the mapping for the second page can change. */ if (tb->page_addr[1] != -1) { last_tb = NULL; } #endif /* See if we can patch the calling TB. */ if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { if (!acquired_tb_lock) { tb_lock(); acquired_tb_lock = true; } if (!(tb->cflags & CF_INVALID)) { tb_add_jump(last_tb, tb_exit, tb); } } if (acquired_tb_lock) { tb_unlock(); } return tb; }
16,992
qemu
dfd100f242370886bb6732f70f1f7cbd8eb9fedc
0
static VncServerInfo2List *qmp_query_server_entry(QIOChannelSocket *ioc, bool websocket, int auth, int subauth, VncServerInfo2List *prev) { VncServerInfo2List *list; VncServerInfo2 *info; Error *err = NULL; SocketAddress *addr; addr = qio_channel_socket_get_local_address(ioc, &err); if (!addr) { error_free(err); return prev; } info = g_new0(VncServerInfo2, 1); vnc_init_basic_info(addr, qapi_VncServerInfo2_base(info), &err); qapi_free_SocketAddress(addr); if (err) { qapi_free_VncServerInfo2(info); error_free(err); return prev; } info->websocket = websocket; qmp_query_auth(auth, subauth, &info->auth, &info->vencrypt, &info->has_vencrypt); list = g_new0(VncServerInfo2List, 1); list->value = info; list->next = prev; return list; }
16,993
qemu
973945804d95878375b487c0c5c9b2556c5e4543
0
static void tcx_invalidate_cursor_position(TCXState *s) { int ymin, ymax, start, end; /* invalidate only near the cursor */ ymin = s->cursy; if (ymin >= s->height) { return; } ymax = MIN(s->height, ymin + 32); start = ymin * 1024; end = ymax * 1024; memory_region_set_dirty(&s->vram_mem, start, end-start); }
16,994
qemu
4ff927cc62ea79092e21827f17d19a3d85973e84
0
static int pxa2xx_timer_init(SysBusDevice *dev) { int i; int iomemtype; PXA2xxTimerInfo *s; qemu_irq irq4; s = FROM_SYSBUS(PXA2xxTimerInfo, dev); s->irq_enabled = 0; s->oldclock = 0; s->clock = 0; s->lastload = qemu_get_clock(vm_clock); s->reset3 = 0; for (i = 0; i < 4; i ++) { s->timer[i].value = 0; sysbus_init_irq(dev, &s->timer[i].irq); s->timer[i].info = s; s->timer[i].num = i; s->timer[i].level = 0; s->timer[i].qtimer = qemu_new_timer(vm_clock, pxa2xx_timer_tick, &s->timer[i]); } if (s->flags & (1 << PXA2XX_TIMER_HAVE_TM4)) { sysbus_init_irq(dev, &irq4); for (i = 0; i < 8; i ++) { s->tm4[i].tm.value = 0; s->tm4[i].tm.info = s; s->tm4[i].tm.num = i + 4; s->tm4[i].tm.level = 0; s->tm4[i].freq = 0; s->tm4[i].control = 0x0; s->tm4[i].tm.qtimer = qemu_new_timer(vm_clock, pxa2xx_timer_tick4, &s->tm4[i]); s->tm4[i].tm.irq = irq4; } } iomemtype = cpu_register_io_memory(pxa2xx_timer_readfn, pxa2xx_timer_writefn, s, DEVICE_NATIVE_ENDIAN); sysbus_init_mmio(dev, 0x00001000, iomemtype); return 0; }
16,995
qemu
9f2130f58d5dd4e1fcb435cca08bf77e7c32e6c6
0
static void xenfb_mouse_event(void *opaque, int dx, int dy, int dz, int button_state) { struct XenInput *xenfb = opaque; DisplaySurface *surface = qemu_console_surface(xenfb->c.con); int dw = surface_width(surface); int dh = surface_height(surface); int i; trace_xenfb_mouse_event(opaque, dx, dy, dz, button_state, xenfb->abs_pointer_wanted); if (xenfb->abs_pointer_wanted) xenfb_send_position(xenfb, dx * (dw - 1) / 0x7fff, dy * (dh - 1) / 0x7fff, dz); else xenfb_send_motion(xenfb, dx, dy, dz); for (i = 0 ; i < 8 ; i++) { int lastDown = xenfb->button_state & (1 << i); int down = button_state & (1 << i); if (down == lastDown) continue; if (xenfb_send_key(xenfb, down, BTN_LEFT+i) < 0) return; } xenfb->button_state = button_state; }
16,996
qemu
d5e6f437c5508614803d11e59ee16a758dde09ef
0
BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name, const BdrvChildRole *child_role, void *opaque) { BdrvChild *child = g_new(BdrvChild, 1); *child = (BdrvChild) { .bs = NULL, .name = g_strdup(child_name), .role = child_role, .opaque = opaque, }; bdrv_replace_child(child, child_bs); return child; }
16,997
FFmpeg
c679a1c358c30ec38ae3b1ac3ee2c62efc2f32e2
0
static av_cold int movie_common_init(AVFilterContext *ctx) { MovieContext *movie = ctx->priv; AVInputFormat *iformat = NULL; int64_t timestamp; int nb_streams, ret, i; char default_streams[16], *stream_specs, *spec, *cursor; char name[16]; AVStream *st; if (!*movie->file_name) { av_log(ctx, AV_LOG_ERROR, "No filename provided!\n"); return AVERROR(EINVAL); } movie->seek_point = movie->seek_point_d * 1000000 + 0.5; stream_specs = movie->stream_specs; if (!stream_specs) { snprintf(default_streams, sizeof(default_streams), "d%c%d", !strcmp(ctx->filter->name, "amovie") ? 'a' : 'v', movie->stream_index); stream_specs = default_streams; } for (cursor = stream_specs, nb_streams = 1; *cursor; cursor++) if (*cursor == '+') nb_streams++; if (movie->loop_count != 1 && nb_streams != 1) { av_log(ctx, AV_LOG_ERROR, "Loop with several streams is currently unsupported\n"); return AVERROR_PATCHWELCOME; } av_register_all(); // Try to find the movie format (container) iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL; movie->format_ctx = NULL; if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) { av_log(ctx, AV_LOG_ERROR, "Failed to avformat_open_input '%s'\n", movie->file_name); return ret; } if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0) av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n"); // if seeking requested, we execute it if (movie->seek_point > 0) { timestamp = movie->seek_point; // add the stream start time, should it exist if (movie->format_ctx->start_time != AV_NOPTS_VALUE) { if (timestamp > INT64_MAX - movie->format_ctx->start_time) { av_log(ctx, AV_LOG_ERROR, "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n", movie->file_name, movie->format_ctx->start_time, movie->seek_point); return AVERROR(EINVAL); } timestamp += movie->format_ctx->start_time; } if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) { av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n", movie->file_name, timestamp); return ret; } } for (i = 0; i < movie->format_ctx->nb_streams; i++) movie->format_ctx->streams[i]->discard = AVDISCARD_ALL; movie->st = av_calloc(nb_streams, sizeof(*movie->st)); if (!movie->st) return AVERROR(ENOMEM); for (i = 0; i < nb_streams; i++) { spec = av_strtok(stream_specs, "+", &cursor); if (!spec) return AVERROR_BUG; stream_specs = NULL; /* for next strtok */ st = find_stream(ctx, movie->format_ctx, spec); if (!st) return AVERROR(EINVAL); st->discard = AVDISCARD_DEFAULT; movie->st[i].st = st; movie->max_stream_index = FFMAX(movie->max_stream_index, st->index); } if (av_strtok(NULL, "+", &cursor)) return AVERROR_BUG; movie->out_index = av_calloc(movie->max_stream_index + 1, sizeof(*movie->out_index)); if (!movie->out_index) return AVERROR(ENOMEM); for (i = 0; i <= movie->max_stream_index; i++) movie->out_index[i] = -1; for (i = 0; i < nb_streams; i++) movie->out_index[movie->st[i].st->index] = i; for (i = 0; i < nb_streams; i++) { AVFilterPad pad = { 0 }; snprintf(name, sizeof(name), "out%d", i); pad.type = movie->st[i].st->codec->codec_type; pad.name = av_strdup(name); pad.config_props = movie_config_output_props; pad.request_frame = movie_request_frame; ff_insert_outpad(ctx, i, &pad); ret = open_stream(ctx, &movie->st[i]); if (ret < 0) return ret; if ( movie->st[i].st->codec->codec->type == AVMEDIA_TYPE_AUDIO && !movie->st[i].st->codec->channel_layout) { ret = guess_channel_layout(&movie->st[i], i, ctx); if (ret < 0) return ret; } } av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n", movie->seek_point, movie->format_name, movie->file_name, movie->stream_index); return 0; }
16,998
qemu
494a8ebe713055d3946183f4b395f85a18b43e9e
0
static ssize_t proxy_llistxattr(FsContext *ctx, V9fsPath *fs_path, void *value, size_t size) { int retval; retval = v9fs_request(ctx->private, T_LLISTXATTR, value, "ds", size, fs_path); if (retval < 0) { errno = -retval; } return retval; }
17,000
qemu
9ef91a677110ec200d7b2904fc4bcae5a77329ad
0
int qemu_paio_read(struct qemu_paiocb *aiocb) { return qemu_paio_submit(aiocb, QEMU_PAIO_READ); }
17,001
qemu
91b0a8f33419573c1d741e49559bfb666fd8b1f0
0
static void do_io_interrupt(CPUS390XState *env) { LowCore *lowcore; IOIntQueue *q; uint8_t isc; int disable = 1; int found = 0; if (!(env->psw.mask & PSW_MASK_IO)) { cpu_abort(env, "I/O int w/o I/O mask\n"); } for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) { if (env->io_index[isc] < 0) { continue; } if (env->io_index[isc] > MAX_IO_QUEUE) { cpu_abort(env, "I/O queue overrun for isc %d: %d\n", isc, env->io_index[isc]); } q = &env->io_queue[env->io_index[isc]][isc]; if (!(env->cregs[6] & q->word)) { disable = 0; continue; } if (!found) { uint64_t mask, addr; found = 1; lowcore = cpu_map_lowcore(env); lowcore->subchannel_id = cpu_to_be16(q->id); lowcore->subchannel_nr = cpu_to_be16(q->nr); lowcore->io_int_parm = cpu_to_be32(q->parm); lowcore->io_int_word = cpu_to_be32(q->word); lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env)); lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr); mask = be64_to_cpu(lowcore->io_new_psw.mask); addr = be64_to_cpu(lowcore->io_new_psw.addr); cpu_unmap_lowcore(lowcore); env->io_index[isc]--; DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__, env->psw.mask, env->psw.addr); load_psw(env, mask, addr); } if (env->io_index[isc] >= 0) { disable = 0; } continue; } if (disable) { env->pending_int &= ~INTERRUPT_IO; } }
17,002
qemu
6eab3de16d36c48a983366b09d0a0029a5260bc3
0
static int piix3_initfn(PCIDevice *dev) { PIIX3State *d = DO_UPCAST(PIIX3State, dev, dev); uint8_t *pci_conf; isa_bus_new(&d->dev.qdev); pci_conf = d->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_INTEL_82371SB_0); // 82371SB PIIX3 PCI-to-ISA bridge (Step A1) pci_config_set_class(pci_conf, PCI_CLASS_BRIDGE_ISA); pci_conf[PCI_HEADER_TYPE] = PCI_HEADER_TYPE_NORMAL | PCI_HEADER_TYPE_MULTI_FUNCTION; // header_type = PCI_multifunction, generic qemu_register_reset(piix3_reset, d); return 0; }
17,003
qemu
c2b38b277a7882a592f4f2ec955084b2b756daaa
0
static bool event_notifier_poll(void *opaque) { EventNotifier *e = opaque; AioContext *ctx = container_of(e, AioContext, notifier); return atomic_read(&ctx->notified); }
17,005
qemu
27898a5daa4c6d28adb32b401a011d7198494482
0
coroutine_fn iscsi_co_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; uint64_t lba; uint32_t nb_blocks; if (!is_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { return -EINVAL; } if (!(flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->has_write_same) { /* WRITE SAME without UNMAP is not supported by the target */ return -ENOTSUP; } if ((flags & BDRV_REQ_MAY_UNMAP) && !iscsilun->lbp.lbpws) { /* WRITE SAME with UNMAP is not supported by the target */ return -ENOTSUP; } lba = sector_qemu2lun(sector_num, iscsilun); nb_blocks = sector_qemu2lun(nb_sectors, iscsilun); if (iscsilun->zeroblock == NULL) { iscsilun->zeroblock = g_malloc0(iscsilun->block_size); } iscsi_co_init_iscsitask(iscsilun, &iTask); retry: if (iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba, iscsilun->zeroblock, iscsilun->block_size, nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), 0, 0, iscsi_co_generic_cb, &iTask) == NULL) { return -ENOMEM; } while (!iTask.complete) { iscsi_set_events(iscsilun); qemu_coroutine_yield(); } if (iTask.status == SCSI_STATUS_CHECK_CONDITION && iTask.task->sense.key == SCSI_SENSE_ILLEGAL_REQUEST && iTask.task->sense.ascq == SCSI_SENSE_ASCQ_INVALID_OPERATION_CODE) { /* WRITE SAME is not supported by the target */ iscsilun->has_write_same = false; scsi_free_scsi_task(iTask.task); return -ENOTSUP; } if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } if (iTask.do_retry) { iTask.complete = 0; goto retry; } if (iTask.status != SCSI_STATUS_GOOD) { return -EIO; } return 0; }
17,007
qemu
7197fb4058bcb68986bae2bb2c04d6370f3e7218
0
static size_t fd_getpagesize(int fd) { #ifdef CONFIG_LINUX struct statfs fs; int ret; if (fd != -1) { do { ret = fstatfs(fd, &fs); } while (ret != 0 && errno == EINTR); if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) { return fs.f_bsize; } } #endif return getpagesize(); }
17,008
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void m5206_mbar_writel(void *opaque, target_phys_addr_t offset, uint32_t value) { m5206_mbar_state *s = (m5206_mbar_state *)opaque; int width; offset &= 0x3ff; if (offset >= 0x200) { hw_error("Bad MBAR write offset 0x%x", (int)offset); } width = m5206_mbar_width[offset >> 2]; if (width < 4) { m5206_mbar_writew(opaque, offset, value >> 16); m5206_mbar_writew(opaque, offset + 2, value & 0xffff); return; } m5206_mbar_write(s, offset, value, 4); }
17,010
qemu
a980f7f2c2f4d7e9a1eba4f804cd66dbd458b6d4
0
static QVirtIO9P *qvirtio_9p_pci_init(void) { QVirtIO9P *v9p; QVirtioPCIDevice *dev; v9p = g_new0(QVirtIO9P, 1); v9p->alloc = pc_alloc_init(); v9p->bus = qpci_init_pc(NULL); dev = qvirtio_pci_device_find(v9p->bus, VIRTIO_ID_9P); g_assert_nonnull(dev); g_assert_cmphex(dev->vdev.device_type, ==, VIRTIO_ID_9P); v9p->dev = (QVirtioDevice *) dev; qvirtio_pci_device_enable(dev); qvirtio_reset(v9p->dev); qvirtio_set_acknowledge(v9p->dev); qvirtio_set_driver(v9p->dev); v9p->vq = qvirtqueue_setup(v9p->dev, v9p->alloc, 0); return v9p; }
17,011
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
void drive_uninit(BlockDriverState *bdrv) { DriveInfo *dinfo; TAILQ_FOREACH(dinfo, &drives, next) { if (dinfo->bdrv != bdrv) continue; qemu_opts_del(dinfo->opts); TAILQ_REMOVE(&drives, dinfo, next); qemu_free(dinfo); break; } }
17,013
qemu
bd269ebc82fbaa5fe7ce5bc7c1770ac8acecd884
0
static void qio_dns_resolver_lookup_data_free(gpointer opaque) { struct QIODNSResolverLookupData *data = opaque; size_t i; qapi_free_SocketAddressLegacy(data->addr); for (i = 0; i < data->naddrs; i++) { qapi_free_SocketAddressLegacy(data->addrs[i]); } g_free(data->addrs); g_free(data); }
17,014
qemu
5fb6c7a8b26eab1a22207d24b4784bd2b39ab54b
0
static void vnc_write(VncState *vs, const void *data, size_t len) { buffer_reserve(&vs->output, len); if (buffer_empty(&vs->output)) { qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, vnc_client_write, vs); } buffer_append(&vs->output, data, len); }
17,015
qemu
973945804d95878375b487c0c5c9b2556c5e4543
0
static void tcx_stip_writel(void *opaque, hwaddr addr, uint64_t val, unsigned size) { TCXState *s = opaque; int i; uint32_t col; if (!(addr & 4)) { s->tmpblit = val; } else { addr = (addr >> 3) & 0xfffff; col = cpu_to_be32(s->tmpblit); if (s->depth == 24) { for (i = 0; i < 32; i++) { if (val & 0x80000000) { s->vram[addr + i] = s->tmpblit; s->vram24[addr + i] = col; } val <<= 1; } } else { for (i = 0; i < 32; i++) { if (val & 0x80000000) { s->vram[addr + i] = s->tmpblit; } val <<= 1; } } memory_region_set_dirty(&s->vram_mem, addr, 32); } }
17,016
qemu
53687348813196551874409fecb49c94d20b1ae6
0
int main(int argc, char *argv[]) { const char *sparc_machines[] = { "SPARCbook", "Voyager", "SS-20", NULL }; const char *sparc64_machines[] = { "sun4u", "sun4v", NULL }; const char *mac_machines[] = { "mac99", "g3beige", NULL }; const char *arch = qtest_get_arch(); g_test_init(&argc, &argv, NULL); if (!strcmp(arch, "ppc") || !strcmp(arch, "ppc64")) { add_tests(mac_machines); } else if (!strcmp(arch, "sparc")) { add_tests(sparc_machines); } else if (!strcmp(arch, "sparc64")) { add_tests(sparc64_machines); } else { g_assert_not_reached(); } return g_test_run(); }
17,017
qemu
4a1418e07bdcfaa3177739e04707ecaec75d89e1
0
ram_addr_t qemu_ram_alloc(ram_addr_t size) { RAMBlock *new_block; #ifdef CONFIG_KQEMU if (kqemu_phys_ram_base) { return kqemu_ram_alloc(size); } #endif size = TARGET_PAGE_ALIGN(size); new_block = qemu_malloc(sizeof(*new_block)); new_block->host = qemu_vmalloc(size); new_block->offset = last_ram_offset; new_block->length = size; new_block->next = ram_blocks; ram_blocks = new_block; phys_ram_dirty = qemu_realloc(phys_ram_dirty, (last_ram_offset + size) >> TARGET_PAGE_BITS); memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS), 0xff, size >> TARGET_PAGE_BITS); last_ram_offset += size; if (kvm_enabled()) kvm_setup_guest_memory(new_block->host, size); return new_block->offset; }
17,018
qemu
203d65a4706be345c209f3408d3a011a3e48f0c9
0
static void imx_gpt_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { IMXGPTState *s = IMX_GPT(opaque); uint32_t oldreg; uint32_t reg = offset >> 2; DPRINTF("(%s, value = 0x%08x)\n", imx_gpt_reg_name(reg), (uint32_t)value); switch (reg) { case 0: oldreg = s->cr; s->cr = value & ~0x7c14; if (s->cr & GPT_CR_SWR) { /* force reset */ /* handle the reset */ imx_gpt_reset(DEVICE(s)); } else { /* set our freq, as the source might have changed */ imx_gpt_set_freq(s); if ((oldreg ^ s->cr) & GPT_CR_EN) { if (s->cr & GPT_CR_EN) { if (s->cr & GPT_CR_ENMOD) { s->next_timeout = TIMER_MAX; ptimer_set_count(s->timer, TIMER_MAX); imx_gpt_compute_next_timeout(s, false); } ptimer_run(s->timer, 1); } else { /* stop timer */ ptimer_stop(s->timer); } } } break; case 1: /* Prescaler */ s->pr = value & 0xfff; imx_gpt_set_freq(s); break; case 2: /* SR */ s->sr &= ~(value & 0x3f); imx_gpt_update_int(s); break; case 3: /* IR -- interrupt register */ s->ir = value & 0x3f; imx_gpt_update_int(s); imx_gpt_compute_next_timeout(s, false); break; case 4: /* OCR1 -- output compare register */ s->ocr1 = value; /* In non-freerun mode, reset count when this register is written */ if (!(s->cr & GPT_CR_FRR)) { s->next_timeout = TIMER_MAX; ptimer_set_limit(s->timer, TIMER_MAX, 1); } /* compute the new timeout */ imx_gpt_compute_next_timeout(s, false); break; case 5: /* OCR2 -- output compare register */ s->ocr2 = value; /* compute the new timeout */ imx_gpt_compute_next_timeout(s, false); break; case 6: /* OCR3 -- output compare register */ s->ocr3 = value; /* compute the new timeout */ imx_gpt_compute_next_timeout(s, false); break; default: IPRINTF("Bad offset %x\n", reg); break; } }
17,019
qemu
9ffe337c08388d5c587eae1d77db1b0d1a47c7b1
0
void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) { if (!s) { return; } virtio_blk_data_plane_stop(s); g_free(s->batch_notify_vqs); qemu_bh_delete(s->bh); object_unref(OBJECT(s->iothread)); g_free(s); }
17,022
qemu
c2b38b277a7882a592f4f2ec955084b2b756daaa
0
int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout) { #ifdef CONFIG_PPOLL if (timeout < 0) { return ppoll((struct pollfd *)fds, nfds, NULL, NULL); } else { struct timespec ts; int64_t tvsec = timeout / 1000000000LL; /* Avoid possibly overflowing and specifying a negative number of * seconds, which would turn a very long timeout into a busy-wait. */ if (tvsec > (int64_t)INT32_MAX) { tvsec = INT32_MAX; } ts.tv_sec = tvsec; ts.tv_nsec = timeout % 1000000000LL; return ppoll((struct pollfd *)fds, nfds, &ts, NULL); } #else return g_poll(fds, nfds, qemu_timeout_ns_to_ms(timeout)); #endif }
17,023
qemu
1687a089f103f9b7a1b4a1555068054cb46ee9e9
0
vreader_get_reader_by_name(const char *name) { VReader *reader = NULL; VReaderListEntry *current_entry = NULL; vreader_list_lock(); for (current_entry = vreader_list_get_first(vreader_list); current_entry; current_entry = vreader_list_get_next(current_entry)) { VReader *creader = vreader_list_get_reader(current_entry); if (strcmp(creader->name, name) == 0) { reader = creader; break; } vreader_free(creader); } vreader_list_unlock(); return reader; }
17,025
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
static BlockDriverState *get_bs_snapshots(void) { BlockDriverState *bs; DriveInfo *dinfo; if (bs_snapshots) return bs_snapshots; TAILQ_FOREACH(dinfo, &drives, next) { bs = dinfo->bdrv; if (bdrv_can_snapshot(bs)) goto ok; } return NULL; ok: bs_snapshots = bs; return bs; }
17,026
qemu
dc38852aaa4ac187d8b44201f75fc2835241912d
0
static void print_report(const char *op, struct timeval *t, int64_t offset, int64_t count, int64_t total, int cnt, int Cflag) { char s1[64], s2[64], ts[64]; timestr(t, ts, sizeof(ts), Cflag ? VERBOSE_FIXED_TIME : 0); if (!Cflag) { cvtstr((double)total, s1, sizeof(s1)); cvtstr(tdiv((double)total, *t), s2, sizeof(s2)); printf("%s %"PRId64"/%"PRId64" bytes at offset %" PRId64 "\n", op, total, count, offset); printf("%s, %d ops; %s (%s/sec and %.4f ops/sec)\n", s1, cnt, ts, s2, tdiv((double)cnt, *t)); } else {/* bytes,ops,time,bytes/sec,ops/sec */ printf("%"PRId64",%d,%s,%.3f,%.3f\n", total, cnt, ts, tdiv((double)total, *t), tdiv((double)cnt, *t)); } }
17,027
qemu
ef1e1e0782e99c9dcf2b35e5310cdd8ca9211374
0
static int tcp_set_msgfds(CharDriverState *chr, int *fds, int num) { TCPCharDriver *s = chr->opaque; /* clear old pending fd array */ if (s->write_msgfds) { g_free(s->write_msgfds); } if (num) { s->write_msgfds = g_malloc(num * sizeof(int)); memcpy(s->write_msgfds, fds, num * sizeof(int)); } s->write_msgfds_num = num; return 0; }
17,029
FFmpeg
b8664c929437d6d079e16979c496a2db40cf2324
0
static av_always_inline int normal_limit(uint8_t *p, ptrdiff_t stride, int E, int I) { LOAD_PIXELS return simple_limit(p, stride, E) && FFABS(p3-p2) <= I && FFABS(p2-p1) <= I && FFABS(p1-p0) <= I && FFABS(q3-q2) <= I && FFABS(q2-q1) <= I && FFABS(q1-q0) <= I; }
17,031
qemu
0e9b9edae7bebfd31fdbead4ccbbce03876a7edd
0
static void nvdimm_build_nfit(GSList *device_list, GArray *table_offsets, GArray *table_data, GArray *linker) { GArray *structures = nvdimm_build_device_structure(device_list); unsigned int header; acpi_add_table(table_offsets, table_data); /* NFIT header. */ header = table_data->len; acpi_data_push(table_data, sizeof(NvdimmNfitHeader)); /* NVDIMM device structures. */ g_array_append_vals(table_data, structures->data, structures->len); build_header(linker, table_data, (void *)(table_data->data + header), "NFIT", sizeof(NvdimmNfitHeader) + structures->len, 1, NULL, NULL); g_array_free(structures, true); }
17,032
qemu
9c605cb13547a5faa5cb1092e3e44ac8b0d0b841
0
long disas_insn(DisasContext *s, uint8_t *pc_start) { int b, prefixes, aflag, dflag; int shift, ot; int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val; unsigned int next_eip; s->pc = pc_start; prefixes = 0; aflag = s->code32; dflag = s->code32; // cur_pc = s->pc; /* for insn generation */ next_byte: b = ldub(s->pc); s->pc++; /* check prefixes */ switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; goto next_byte; case 0x2e: prefixes |= PREFIX_CS; goto next_byte; case 0x36: prefixes |= PREFIX_SS; goto next_byte; case 0x3e: prefixes |= PREFIX_DS; goto next_byte; case 0x26: prefixes |= PREFIX_ES; goto next_byte; case 0x64: prefixes |= PREFIX_FS; goto next_byte; case 0x65: prefixes |= PREFIX_GS; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; goto next_byte; case 0x9b: prefixes |= PREFIX_FWAIT; goto next_byte; } if (prefixes & PREFIX_DATA) dflag ^= 1; if (prefixes & PREFIX_ADR) aflag ^= 1; s->prefix = prefixes; s->aflag = aflag; s->dflag = dflag; /* lock generation */ if (prefixes & PREFIX_LOCK) gen_op_lock(); /* now check op code */ reswitch: switch(b) { case 0x0f: /**************************/ /* extended op code */ b = ldub(s->pc++) | 0x100; goto reswitch; /**************************/ /* arith & logic */ case 0x00 ... 0x05: case 0x08 ... 0x0d: case 0x10 ... 0x15: case 0x18 ... 0x1d: case 0x20 ... 0x25: case 0x28 ... 0x2d: case 0x30 ... 0x35: case 0x38 ... 0x3d: { int op, f, val; op = (b >> 3) & 7; f = (b >> 1) & 3; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; switch(f) { case 0: /* OP Ev, Gv */ modrm = ldub(s->pc++); reg = ((modrm >> 3) & 7) + OR_EAX; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T0_A0[ot](); opreg = OR_TMP0; } else { opreg = OR_EAX + rm; } gen_op(s, op, ot, opreg, reg); if (mod != 3 && op != 7) { gen_op_st_T0_A0[ot](); } break; case 1: /* OP Gv, Ev */ modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) + OR_EAX; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T1_A0[ot](); opreg = OR_TMP1; } else { opreg = OR_EAX + rm; } gen_op(s, op, ot, reg, opreg); break; case 2: /* OP A, Iv */ val = insn_get(s, ot); gen_opi(s, op, ot, OR_EAX, val); break; } } break; case 0x80: /* GRP1 */ case 0x81: case 0x83: { int val; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; op = (modrm >> 3) & 7; if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T0_A0[ot](); opreg = OR_TMP0; } else { opreg = rm + OR_EAX; } switch(b) { default: case 0x80: case 0x81: val = insn_get(s, ot); break; case 0x83: val = (int8_t)insn_get(s, OT_BYTE); break; } gen_opi(s, op, ot, opreg, val); if (op != 7 && mod != 3) { gen_op_st_T0_A0[ot](); } } break; /**************************/ /* inc, dec, and other misc arith */ case 0x40 ... 0x47: /* inc Gv */ ot = dflag ? OT_LONG : OT_WORD; gen_inc(s, ot, OR_EAX + (b & 7), 1); break; case 0x48 ... 0x4f: /* dec Gv */ ot = dflag ? OT_LONG : OT_WORD; gen_inc(s, ot, OR_EAX + (b & 7), -1); break; case 0xf6: /* GRP3 */ case 0xf7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; op = (modrm >> 3) & 7; if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T0_A0[ot](); } else { gen_op_mov_TN_reg[ot][0][rm](); } switch(op) { case 0: /* test */ val = insn_get(s, ot); gen_op_movl_T1_im(val); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 2: /* not */ gen_op_notl_T0(); if (mod != 3) { gen_op_st_T0_A0[ot](); } else { gen_op_mov_reg_T0[ot][rm](); } break; case 3: /* neg */ gen_op_negl_T0_cc(); if (mod != 3) { gen_op_st_T0_A0[ot](); } else { gen_op_mov_reg_T0[ot][rm](); } s->cc_op = CC_OP_SUBB + ot; break; case 4: /* mul */ switch(ot) { case OT_BYTE: gen_op_mulb_AL_T0(); break; case OT_WORD: gen_op_mulw_AX_T0(); break; default: case OT_LONG: gen_op_mull_EAX_T0(); break; } s->cc_op = CC_OP_MUL; break; case 5: /* imul */ switch(ot) { case OT_BYTE: gen_op_imulb_AL_T0(); break; case OT_WORD: gen_op_imulw_AX_T0(); break; default: case OT_LONG: gen_op_imull_EAX_T0(); break; } s->cc_op = CC_OP_MUL; break; case 6: /* div */ switch(ot) { case OT_BYTE: gen_op_divb_AL_T0(); break; case OT_WORD: gen_op_divw_AX_T0(); break; default: case OT_LONG: gen_op_divl_EAX_T0(); break; } break; case 7: /* idiv */ switch(ot) { case OT_BYTE: gen_op_idivb_AL_T0(); break; case OT_WORD: gen_op_idivw_AX_T0(); break; default: case OT_LONG: gen_op_idivl_EAX_T0(); break; } break; default: goto illegal_op; } break; case 0xfe: /* GRP4 */ case 0xff: /* GRP5 */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; op = (modrm >> 3) & 7; if (op >= 2 && b == 0xfe) { goto illegal_op; } if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); if (op != 3 && op != 5) gen_op_ld_T0_A0[ot](); } else { gen_op_mov_TN_reg[ot][0][rm](); } switch(op) { case 0: /* inc Ev */ gen_inc(s, ot, OR_TMP0, 1); if (mod != 3) gen_op_st_T0_A0[ot](); else gen_op_mov_reg_T0[ot][rm](); break; case 1: /* dec Ev */ gen_inc(s, ot, OR_TMP0, -1); if (mod != 3) gen_op_st_T0_A0[ot](); else gen_op_mov_reg_T0[ot][rm](); break; case 2: /* call Ev */ /* XXX: optimize if memory (no and is necessary) */ if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); next_eip = s->pc - s->cs_base; gen_op_movl_T0_im(next_eip); gen_push_T0(s); s->is_jmp = 1; break; case 3: /* lcall Ev */ /* push return segment + offset */ gen_op_movl_T0_seg(R_CS); gen_push_T0(s); next_eip = s->pc - s->cs_base; gen_op_movl_T0_im(next_eip); gen_push_T0(s); gen_op_ld_T1_A0[ot](); gen_op_addl_A0_im(1 << (ot - OT_WORD + 1)); gen_op_lduw_T0_A0(); gen_movl_seg_T0(s, R_CS); gen_op_movl_T0_T1(); gen_op_jmp_T0(); s->is_jmp = 1; break; case 4: /* jmp Ev */ if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); s->is_jmp = 1; break; case 5: /* ljmp Ev */ gen_op_ld_T1_A0[ot](); gen_op_addl_A0_im(1 << (ot - OT_WORD + 1)); gen_op_lduw_T0_A0(); gen_movl_seg_T0(s, R_CS); gen_op_movl_T0_T1(); gen_op_jmp_T0(); s->is_jmp = 1; break; case 6: /* push Ev */ gen_push_T0(s); break; default: goto illegal_op; } break; case 0x84: /* test Ev, Gv */ case 0x85: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; reg = (modrm >> 3) & 7; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_op_mov_TN_reg[ot][1][reg + OR_EAX](); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 0xa8: /* test eAX, Iv */ case 0xa9: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; val = insn_get(s, ot); gen_op_mov_TN_reg[ot][0][OR_EAX](); gen_op_movl_T1_im(val); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 0x98: /* CWDE/CBW */ if (dflag) gen_op_movswl_EAX_AX(); else gen_op_movsbw_AX_AL(); break; case 0x99: /* CDQ/CWD */ if (dflag) gen_op_movslq_EDX_EAX(); else gen_op_movswl_DX_AX(); break; case 0x1af: /* imul Gv, Ev */ case 0x69: /* imul Gv, Ev, I */ case 0x6b: ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = ((modrm >> 3) & 7) + OR_EAX; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(s, ot); gen_op_movl_T1_im(val); } else if (b == 0x6b) { val = insn_get(s, OT_BYTE); gen_op_movl_T1_im(val); } else { gen_op_mov_TN_reg[ot][1][reg](); } if (ot == OT_LONG) { gen_op_imull_T0_T1(); } else { gen_op_imulw_T0_T1(); } gen_op_mov_reg_T0[ot][reg](); s->cc_op = CC_OP_MUL; break; case 0x1c0: case 0x1c1: /* xadd Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) { rm = modrm & 7; gen_op_mov_TN_reg[ot][0][reg](); gen_op_mov_TN_reg[ot][1][rm](); gen_op_addl_T0_T1_cc(); gen_op_mov_reg_T0[ot][rm](); gen_op_mov_reg_T1[ot][reg](); } else { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_mov_TN_reg[ot][0][reg](); gen_op_ld_T1_A0[ot](); gen_op_addl_T0_T1_cc(); gen_op_st_T0_A0[ot](); gen_op_mov_reg_T1[ot][reg](); } s->cc_op = CC_OP_ADDB + ot; break; case 0x1b0: case 0x1b1: /* cmpxchg Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; gen_op_mov_TN_reg[ot][1][reg](); if (mod == 3) { rm = modrm & 7; gen_op_mov_TN_reg[ot][0][rm](); gen_op_cmpxchg_T0_T1_EAX_cc[ot](); gen_op_mov_reg_T0[ot][rm](); } else { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T0_A0[ot](); gen_op_cmpxchg_T0_T1_EAX_cc[ot](); gen_op_st_T0_A0[ot](); } s->cc_op = CC_OP_SUBB + ot; break; /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ gen_op_mov_TN_reg[OT_LONG][0][b & 7](); gen_push_T0(s); break; case 0x58 ... 0x5f: /* pop */ ot = dflag ? OT_LONG : OT_WORD; gen_pop_T0(s); gen_op_mov_reg_T0[ot][b & 7](); gen_pop_update(s); break; case 0x60: /* pusha */ gen_pusha(s); break; case 0x61: /* popa */ gen_popa(s); break; case 0x68: /* push Iv */ case 0x6a: ot = dflag ? OT_LONG : OT_WORD; if (b == 0x68) val = insn_get(s, ot); else val = (int8_t)insn_get(s, OT_BYTE); gen_op_movl_T0_im(val); gen_push_T0(s); break; case 0x8f: /* pop Ev */ ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); gen_pop_T0(s); gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); gen_pop_update(s); break; case 0xc8: /* enter */ { int level; val = lduw(s->pc); s->pc += 2; level = ldub(s->pc++); gen_enter(s, val, level); } break; case 0xc9: /* leave */ /* XXX: exception not precise (ESP is update before potential exception) */ if (s->ss32) { gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); gen_op_mov_reg_T0[OT_LONG][R_ESP](); } else { gen_op_mov_TN_reg[OT_WORD][0][R_EBP](); gen_op_mov_reg_T0[OT_WORD][R_ESP](); } gen_pop_T0(s); ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_reg_T0[ot][R_EBP](); gen_pop_update(s); break; case 0x06: /* push es */ case 0x0e: /* push cs */ case 0x16: /* push ss */ case 0x1e: /* push ds */ gen_op_movl_T0_seg(b >> 3); gen_push_T0(s); break; case 0x1a0: /* push fs */ case 0x1a8: /* push gs */ gen_op_movl_T0_seg(((b >> 3) & 7) + R_FS); gen_push_T0(s); break; case 0x07: /* pop es */ case 0x17: /* pop ss */ case 0x1f: /* pop ds */ gen_pop_T0(s); gen_movl_seg_T0(s, b >> 3); gen_pop_update(s); break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ gen_pop_T0(s); gen_movl_seg_T0(s, ((b >> 3) & 7) + R_FS); gen_pop_update(s); break; /**************************/ /* mov */ case 0x88: case 0x89: /* mov Gv, Ev */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; /* generate a generic store */ gen_ldst_modrm(s, modrm, ot, OR_EAX + reg, 1); break; case 0xc6: case 0xc7: /* mov Ev, Iv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; if (mod != 3) gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); val = insn_get(s, ot); gen_op_movl_T0_im(val); if (mod != 3) gen_op_st_T0_A0[ot](); else gen_op_mov_reg_T0[ot][modrm & 7](); break; case 0x8a: case 0x8b: /* mov Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_T0[ot][reg](); break; case 0x8e: /* mov seg, Gv */ ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); if (reg >= 6 || reg == R_CS) goto illegal_op; gen_movl_seg_T0(s, reg); break; case 0x8c: /* mov Gv, seg */ ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; if (reg >= 6) goto illegal_op; gen_op_movl_T0_seg(reg); gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); break; case 0x1b6: /* movzbS Gv, Eb */ case 0x1b7: /* movzwS Gv, Eb */ case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { int d_ot; /* d_ot is the size of destination */ d_ot = dflag + OT_WORD; /* ot is the size of source */ ot = (b & 1) + OT_BYTE; modrm = ldub(s->pc++); reg = ((modrm >> 3) & 7) + OR_EAX; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod == 3) { gen_op_mov_TN_reg[ot][0][rm](); switch(ot | (b & 8)) { case OT_BYTE: gen_op_movzbl_T0_T0(); break; case OT_BYTE | 8: gen_op_movsbl_T0_T0(); break; case OT_WORD: gen_op_movzwl_T0_T0(); break; default: case OT_WORD | 8: gen_op_movswl_T0_T0(); break; } gen_op_mov_reg_T0[d_ot][reg](); } else { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); if (b & 8) { gen_op_lds_T0_A0[ot](); } else { gen_op_ldu_T0_A0[ot](); } gen_op_mov_reg_T0[d_ot][reg](); } } break; case 0x8d: /* lea */ ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; /* we must ensure that no segment is added */ s->prefix &= ~(PREFIX_CS | PREFIX_SS | PREFIX_DS | PREFIX_ES | PREFIX_FS | PREFIX_GS); val = s->addseg; s->addseg = 0; gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); s->addseg = val; gen_op_mov_reg_A0[ot - OT_WORD][reg](); break; case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ case 0xa3: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; if (s->aflag) offset_addr = insn_get(s, OT_LONG); else offset_addr = insn_get(s, OT_WORD); gen_op_movl_A0_im(offset_addr); /* handle override */ /* XXX: factorize that */ { int override, must_add_seg; override = R_DS; must_add_seg = s->addseg; if (s->prefix & (PREFIX_CS | PREFIX_SS | PREFIX_DS | PREFIX_ES | PREFIX_FS | PREFIX_GS)) { if (s->prefix & PREFIX_ES) override = R_ES; else if (s->prefix & PREFIX_CS) override = R_CS; else if (s->prefix & PREFIX_SS) override = R_SS; else if (s->prefix & PREFIX_DS) override = R_DS; else if (s->prefix & PREFIX_FS) override = R_FS; else override = R_GS; must_add_seg = 1; } if (must_add_seg) { gen_op_addl_A0_seg(offsetof(CPUX86State,seg_cache[override].base)); } } if ((b & 2) == 0) { gen_op_ld_T0_A0[ot](); gen_op_mov_reg_T0[ot][R_EAX](); } else { gen_op_mov_TN_reg[ot][0][R_EAX](); gen_op_st_T0_A0[ot](); } break; case 0xd7: /* xlat */ /* handle override */ gen_op_movl_A0_reg[R_EBX](); gen_op_addl_A0_AL(); if (s->aflag == 0) gen_op_andl_A0_ffff(); /* XXX: factorize that */ { int override, must_add_seg; override = R_DS; must_add_seg = s->addseg; if (s->prefix & (PREFIX_CS | PREFIX_SS | PREFIX_DS | PREFIX_ES | PREFIX_FS | PREFIX_GS)) { if (s->prefix & PREFIX_ES) override = R_ES; else if (s->prefix & PREFIX_CS) override = R_CS; else if (s->prefix & PREFIX_SS) override = R_SS; else if (s->prefix & PREFIX_DS) override = R_DS; else if (s->prefix & PREFIX_FS) override = R_FS; else override = R_GS; must_add_seg = 1; } if (must_add_seg) { gen_op_addl_A0_seg(offsetof(CPUX86State,seg_cache[override].base)); } } gen_op_ldub_T0_A0(); gen_op_mov_reg_T0[OT_BYTE][R_EAX](); break; case 0xb0 ... 0xb7: /* mov R, Ib */ val = insn_get(s, OT_BYTE); gen_op_movl_T0_im(val); gen_op_mov_reg_T0[OT_BYTE][b & 7](); break; case 0xb8 ... 0xbf: /* mov R, Iv */ ot = dflag ? OT_LONG : OT_WORD; val = insn_get(s, ot); reg = OR_EAX + (b & 7); gen_op_movl_T0_im(val); gen_op_mov_reg_T0[ot][reg](); break; case 0x91 ... 0x97: /* xchg R, EAX */ ot = dflag ? OT_LONG : OT_WORD; reg = b & 7; rm = R_EAX; goto do_xchg_reg; case 0x86: case 0x87: /* xchg Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) { rm = modrm & 7; do_xchg_reg: gen_op_mov_TN_reg[ot][0][reg](); gen_op_mov_TN_reg[ot][1][rm](); gen_op_mov_reg_T0[ot][rm](); gen_op_mov_reg_T1[ot][reg](); } else { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_mov_TN_reg[ot][0][reg](); /* for xchg, lock is implicit */ if (!(prefixes & PREFIX_LOCK)) gen_op_lock(); gen_op_ld_T1_A0[ot](); gen_op_st_T0_A0[ot](); if (!(prefixes & PREFIX_LOCK)) gen_op_unlock(); gen_op_mov_reg_T1[ot][reg](); } break; case 0xc4: /* les Gv */ op = R_ES; goto do_lxx; case 0xc5: /* lds Gv */ op = R_DS; goto do_lxx; case 0x1b2: /* lss Gv */ op = R_SS; goto do_lxx; case 0x1b4: /* lfs Gv */ op = R_FS; goto do_lxx; case 0x1b5: /* lgs Gv */ op = R_GS; do_lxx: ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_op_ld_T1_A0[ot](); gen_op_addl_A0_im(1 << (ot - OT_WORD + 1)); /* load the segment first to handle exceptions properly */ gen_op_lduw_T0_A0(); gen_movl_seg_T0(s, op); /* then put the data */ gen_op_mov_reg_T1[ot][reg](); break; /************************/ /* shifts */ case 0xc0: case 0xc1: /* shift Ev,Ib */ shift = 2; grp2: { if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; op = (modrm >> 3) & 7; if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T0_A0[ot](); opreg = OR_TMP0; } else { opreg = rm + OR_EAX; } /* simpler op */ if (shift == 0) { gen_shift(s, op, ot, opreg, OR_ECX); } else { if (shift == 2) { shift = ldub(s->pc++); } gen_shifti(s, op, ot, opreg, shift); } if (mod != 3) { gen_op_st_T0_A0[ot](); } } break; case 0xd0: case 0xd1: /* shift Ev,1 */ shift = 1; goto grp2; case 0xd2: case 0xd3: /* shift Ev,cl */ shift = 0; goto grp2; case 0x1a4: /* shld imm */ op = 0; shift = 1; goto do_shiftd; case 0x1a5: /* shld cl */ op = 0; shift = 0; goto do_shiftd; case 0x1ac: /* shrd imm */ op = 1; shift = 1; goto do_shiftd; case 0x1ad: /* shrd cl */ op = 1; shift = 0; do_shiftd: ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; reg = (modrm >> 3) & 7; if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T0_A0[ot](); } else { gen_op_mov_TN_reg[ot][0][rm](); } gen_op_mov_TN_reg[ot][1][reg](); if (shift) { val = ldub(s->pc++); val &= 0x1f; if (val) { gen_op_shiftd_T0_T1_im_cc[ot - OT_WORD][op](val); if (op == 0 && ot != OT_WORD) s->cc_op = CC_OP_SHLB + ot; else s->cc_op = CC_OP_SARB + ot; } } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_shiftd_T0_T1_ECX_cc[ot - OT_WORD][op](); s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ } if (mod != 3) { gen_op_st_T0_A0[ot](); } else { gen_op_mov_reg_T0[ot][rm](); } break; /************************/ /* floats */ case 0xd8 ... 0xdf: modrm = ldub(s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); switch(op) { case 0x00 ... 0x07: /* fxxxs */ case 0x10 ... 0x17: /* fixxxl */ case 0x20 ... 0x27: /* fxxxl */ case 0x30 ... 0x37: /* fixxx */ { int op1; op1 = op & 7; switch(op >> 4) { case 0: gen_op_flds_FT0_A0(); break; case 1: gen_op_fildl_FT0_A0(); break; case 2: gen_op_fldl_FT0_A0(); break; case 3: default: gen_op_fild_FT0_A0(); break; } gen_op_fp_arith_ST0_FT0[op1](); if (op1 == 3) { /* fcomp needs pop */ gen_op_fpop(); } } break; case 0x08: /* flds */ case 0x0a: /* fsts */ case 0x0b: /* fstps */ case 0x18: /* fildl */ case 0x1a: /* fistl */ case 0x1b: /* fistpl */ case 0x28: /* fldl */ case 0x2a: /* fstl */ case 0x2b: /* fstpl */ case 0x38: /* filds */ case 0x3a: /* fists */ case 0x3b: /* fistps */ switch(op & 7) { case 0: gen_op_fpush(); switch(op >> 4) { case 0: gen_op_flds_ST0_A0(); break; case 1: gen_op_fildl_ST0_A0(); break; case 2: gen_op_fldl_ST0_A0(); break; case 3: default: gen_op_fild_ST0_A0(); break; } break; default: switch(op >> 4) { case 0: gen_op_fsts_ST0_A0(); break; case 1: gen_op_fistl_ST0_A0(); break; case 2: gen_op_fstl_ST0_A0(); break; case 3: default: gen_op_fist_ST0_A0(); break; } if ((op & 7) == 3) gen_op_fpop(); break; } break; case 0x0d: /* fldcw mem */ gen_op_fldcw_A0(); break; case 0x0f: /* fnstcw mem */ gen_op_fnstcw_A0(); break; case 0x1d: /* fldt mem */ gen_op_fpush(); gen_op_fldt_ST0_A0(); break; case 0x1f: /* fstpt mem */ gen_op_fstt_ST0_A0(); gen_op_fpop(); break; case 0x2f: /* fnstsw mem */ gen_op_fnstsw_A0(); break; case 0x3c: /* fbld */ gen_op_fpush(); gen_op_fbld_ST0_A0(); break; case 0x3e: /* fbstp */ gen_op_fbst_ST0_A0(); gen_op_fpop(); break; case 0x3d: /* fildll */ gen_op_fpush(); gen_op_fildll_ST0_A0(); break; case 0x3f: /* fistpll */ gen_op_fistll_ST0_A0(); gen_op_fpop(); break; default: goto illegal_op; } } else { /* register float ops */ opreg = rm; switch(op) { case 0x08: /* fld sti */ gen_op_fpush(); gen_op_fmov_ST0_STN((opreg + 1) & 7); break; case 0x09: /* fxchg sti */ gen_op_fxchg_ST0_STN(opreg); break; case 0x0a: /* grp d9/2 */ switch(rm) { case 0: /* fnop */ break; default: goto illegal_op; } break; case 0x0c: /* grp d9/4 */ switch(rm) { case 0: /* fchs */ gen_op_fchs_ST0(); break; case 1: /* fabs */ gen_op_fabs_ST0(); break; case 4: /* ftst */ gen_op_fldz_FT0(); gen_op_fcom_ST0_FT0(); break; case 5: /* fxam */ gen_op_fxam_ST0(); break; default: goto illegal_op; } break; case 0x0d: /* grp d9/5 */ { switch(rm) { case 0: gen_op_fpush(); gen_op_fld1_ST0(); break; case 1: gen_op_fpush(); gen_op_fldl2t_ST0(); break; case 2: gen_op_fpush(); gen_op_fldl2e_ST0(); break; case 3: gen_op_fpush(); gen_op_fldpi_ST0(); break; case 4: gen_op_fpush(); gen_op_fldlg2_ST0(); break; case 5: gen_op_fpush(); gen_op_fldln2_ST0(); break; case 6: gen_op_fpush(); gen_op_fldz_ST0(); break; default: goto illegal_op; } } break; case 0x0e: /* grp d9/6 */ switch(rm) { case 0: /* f2xm1 */ gen_op_f2xm1(); break; case 1: /* fyl2x */ gen_op_fyl2x(); break; case 2: /* fptan */ gen_op_fptan(); break; case 3: /* fpatan */ gen_op_fpatan(); break; case 4: /* fxtract */ gen_op_fxtract(); break; case 5: /* fprem1 */ gen_op_fprem1(); break; case 6: /* fdecstp */ gen_op_fdecstp(); break; default: case 7: /* fincstp */ gen_op_fincstp(); break; } break; case 0x0f: /* grp d9/7 */ switch(rm) { case 0: /* fprem */ gen_op_fprem(); break; case 1: /* fyl2xp1 */ gen_op_fyl2xp1(); break; case 2: /* fsqrt */ gen_op_fsqrt(); break; case 3: /* fsincos */ gen_op_fsincos(); break; case 5: /* fscale */ gen_op_fscale(); break; case 4: /* frndint */ gen_op_frndint(); break; case 6: /* fsin */ gen_op_fsin(); break; default: case 7: /* fcos */ gen_op_fcos(); break; } break; case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ { int op1; op1 = op & 7; if (op >= 0x20) { gen_op_fp_arith_STN_ST0[op1](opreg); if (op >= 0x30) gen_op_fpop(); } else { gen_op_fmov_FT0_STN(opreg); gen_op_fp_arith_ST0_FT0[op1](); } } break; case 0x02: /* fcom */ gen_op_fmov_FT0_STN(opreg); gen_op_fcom_ST0_FT0(); break; case 0x03: /* fcomp */ gen_op_fmov_FT0_STN(opreg); gen_op_fcom_ST0_FT0(); gen_op_fpop(); break; case 0x15: /* da/5 */ switch(rm) { case 1: /* fucompp */ gen_op_fmov_FT0_STN(1); gen_op_fucom_ST0_FT0(); gen_op_fpop(); gen_op_fpop(); break; default: goto illegal_op; } break; case 0x1c: switch(rm) { case 2: /* fclex */ gen_op_fclex(); break; case 3: /* fninit */ gen_op_fninit(); break; default: goto illegal_op; } break; case 0x2a: /* fst sti */ gen_op_fmov_STN_ST0(opreg); break; case 0x2b: /* fstp sti */ gen_op_fmov_STN_ST0(opreg); gen_op_fpop(); break; case 0x2c: /* fucom st(i) */ gen_op_fmov_FT0_STN(opreg); gen_op_fucom_ST0_FT0(); break; case 0x2d: /* fucomp st(i) */ gen_op_fmov_FT0_STN(opreg); gen_op_fucom_ST0_FT0(); gen_op_fpop(); break; case 0x33: /* de/3 */ switch(rm) { case 1: /* fcompp */ gen_op_fmov_FT0_STN(1); gen_op_fcom_ST0_FT0(); gen_op_fpop(); gen_op_fpop(); break; default: goto illegal_op; } break; case 0x3c: /* df/4 */ switch(rm) { case 0: gen_op_fnstsw_EAX(); break; default: goto illegal_op; } break; default: goto illegal_op; } } break; /************************/ /* string ops */ case 0xa4: /* movsS */ case 0xa5: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; if (prefixes & PREFIX_REPZ) { gen_op_movs[3 + ot](); } else { gen_op_movs[ot](); } break; case 0xaa: /* stosS */ case 0xab: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; if (prefixes & PREFIX_REPZ) { gen_op_stos[3 + ot](); } else { gen_op_stos[ot](); } break; case 0xac: /* lodsS */ case 0xad: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; if (prefixes & PREFIX_REPZ) { gen_op_lods[3 + ot](); } else { gen_op_lods[ot](); } break; case 0xae: /* scasS */ case 0xaf: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; if (prefixes & PREFIX_REPNZ) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_scas[6 + ot](); s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ } else if (prefixes & PREFIX_REPZ) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_scas[3 + ot](); s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ } else { gen_op_scas[ot](); s->cc_op = CC_OP_SUBB + ot; } break; case 0xa6: /* cmpsS */ case 0xa7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; if (prefixes & PREFIX_REPNZ) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_cmps[6 + ot](); s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ } else if (prefixes & PREFIX_REPZ) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_cmps[3 + ot](); s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */ } else { gen_op_cmps[ot](); s->cc_op = CC_OP_SUBB + ot; } break; /************************/ /* port I/O */ case 0x6c: /* insS */ case 0x6d: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; if (prefixes & PREFIX_REPZ) { gen_op_ins[3 + ot](); } else { gen_op_ins[ot](); } break; case 0x6e: /* outsS */ case 0x6f: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; if (prefixes & PREFIX_REPZ) { gen_op_outs[3 + ot](); } else { gen_op_outs[ot](); } break; case 0xe4: case 0xe5: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; val = ldub(s->pc++); gen_op_movl_T0_im(val); gen_op_in[ot](); gen_op_mov_reg_T1[ot][R_EAX](); break; case 0xe6: case 0xe7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; val = ldub(s->pc++); gen_op_movl_T0_im(val); gen_op_mov_TN_reg[ot][1][R_EAX](); gen_op_out[ot](); break; case 0xec: case 0xed: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg[OT_WORD][0][R_EDX](); gen_op_in[ot](); gen_op_mov_reg_T1[ot][R_EAX](); break; case 0xee: case 0xef: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg[OT_WORD][0][R_EDX](); gen_op_mov_TN_reg[ot][1][R_EAX](); gen_op_out[ot](); break; /************************/ /* control */ case 0xc2: /* ret im */ val = ldsw(s->pc); s->pc += 2; gen_pop_T0(s); if (s->ss32) gen_op_addl_ESP_im(val + (2 << s->dflag)); else gen_op_addw_ESP_im(val + (2 << s->dflag)); if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); s->is_jmp = 1; break; case 0xc3: /* ret */ gen_pop_T0(s); gen_pop_update(s); if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); s->is_jmp = 1; break; case 0xca: /* lret im */ val = ldsw(s->pc); s->pc += 2; /* pop offset */ gen_pop_T0(s); if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); gen_pop_update(s); /* pop selector */ gen_pop_T0(s); gen_movl_seg_T0(s, R_CS); gen_pop_update(s); /* add stack offset */ if (s->ss32) gen_op_addl_ESP_im(val + (2 << s->dflag)); else gen_op_addw_ESP_im(val + (2 << s->dflag)); s->is_jmp = 1; break; case 0xcb: /* lret */ /* pop offset */ gen_pop_T0(s); if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); gen_pop_update(s); /* pop selector */ gen_pop_T0(s); gen_movl_seg_T0(s, R_CS); gen_pop_update(s); s->is_jmp = 1; break; case 0xe8: /* call im */ { unsigned int next_eip; ot = dflag ? OT_LONG : OT_WORD; val = insn_get(s, ot); next_eip = s->pc - s->cs_base; val += next_eip; if (s->dflag == 0) val &= 0xffff; gen_op_movl_T0_im(next_eip); gen_push_T0(s); gen_op_jmp_im(val); s->is_jmp = 1; } break; case 0x9a: /* lcall im */ { unsigned int selector, offset; ot = dflag ? OT_LONG : OT_WORD; offset = insn_get(s, ot); selector = insn_get(s, OT_WORD); /* push return segment + offset */ gen_op_movl_T0_seg(R_CS); gen_push_T0(s); next_eip = s->pc - s->cs_base; gen_op_movl_T0_im(next_eip); gen_push_T0(s); /* change cs and pc */ gen_op_movl_T0_im(selector); gen_movl_seg_T0(s, R_CS); gen_op_jmp_im((unsigned long)offset); s->is_jmp = 1; } break; case 0xe9: /* jmp */ ot = dflag ? OT_LONG : OT_WORD; val = insn_get(s, ot); val += s->pc - s->cs_base; if (s->dflag == 0) val = val & 0xffff; gen_op_jmp_im(val); s->is_jmp = 1; break; case 0xea: /* ljmp im */ { unsigned int selector, offset; ot = dflag ? OT_LONG : OT_WORD; offset = insn_get(s, ot); selector = insn_get(s, OT_WORD); /* change cs and pc */ gen_op_movl_T0_im(selector); gen_movl_seg_T0(s, R_CS); gen_op_jmp_im((unsigned long)offset); s->is_jmp = 1; } break; case 0xeb: /* jmp Jb */ val = (int8_t)insn_get(s, OT_BYTE); val += s->pc - s->cs_base; if (s->dflag == 0) val = val & 0xffff; gen_op_jmp_im(val); s->is_jmp = 1; break; case 0x70 ... 0x7f: /* jcc Jb */ val = (int8_t)insn_get(s, OT_BYTE); goto do_jcc; case 0x180 ... 0x18f: /* jcc Jv */ if (dflag) { val = insn_get(s, OT_LONG); } else { val = (int16_t)insn_get(s, OT_WORD); } do_jcc: next_eip = s->pc - s->cs_base; val += next_eip; if (s->dflag == 0) val &= 0xffff; gen_jcc(s, b, val, next_eip); s->is_jmp = 1; break; case 0x190 ... 0x19f: /* setcc Gv */ modrm = ldub(s->pc++); gen_setcc(s, b); gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1); break; case 0x140 ... 0x14f: /* cmov Gv, Ev */ ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; gen_setcc(s, b); if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T1_A0[ot](); } else { rm = modrm & 7; gen_op_mov_TN_reg[ot][1][rm](); } gen_op_cmov_reg_T1_T0[ot - OT_WORD][reg](); break; /************************/ /* flags */ case 0x9c: /* pushf */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_movl_T0_eflags(); gen_push_T0(s); break; case 0x9d: /* popf */ gen_pop_T0(s); gen_op_movl_eflags_T0(); gen_pop_update(s); s->cc_op = CC_OP_EFLAGS; break; case 0x9e: /* sahf */ gen_op_mov_TN_reg[OT_BYTE][0][R_AH](); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_movb_eflags_T0(); s->cc_op = CC_OP_EFLAGS; break; case 0x9f: /* lahf */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_movl_T0_eflags(); gen_op_mov_reg_T0[OT_BYTE][R_AH](); break; case 0xf5: /* cmc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_cmc(); s->cc_op = CC_OP_EFLAGS; break; case 0xf8: /* clc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_clc(); s->cc_op = CC_OP_EFLAGS; break; case 0xf9: /* stc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_stc(); s->cc_op = CC_OP_EFLAGS; break; case 0xfc: /* cld */ gen_op_cld(); break; case 0xfd: /* std */ gen_op_std(); break; /************************/ /* bit operations */ case 0x1ba: /* bt/bts/btr/btc Gv, im */ ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); op = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T0_A0[ot](); } else { gen_op_mov_TN_reg[ot][0][rm](); } /* load shift */ val = ldub(s->pc++); gen_op_movl_T1_im(val); if (op < 4) goto illegal_op; op -= 4; gen_op_btx_T0_T1_cc[ot - OT_WORD][op](); s->cc_op = CC_OP_SARB + ot; if (op != 0) { if (mod != 3) gen_op_st_T0_A0[ot](); else gen_op_mov_reg_T0[ot][rm](); } break; case 0x1a3: /* bt Gv, Ev */ op = 0; goto do_btx; case 0x1ab: /* bts */ op = 1; goto do_btx; case 0x1b3: /* btr */ op = 2; goto do_btx; case 0x1bb: /* btc */ op = 3; do_btx: ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = modrm & 7; gen_op_mov_TN_reg[OT_LONG][1][reg](); if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); /* specific case: we need to add a displacement */ if (ot == OT_WORD) gen_op_add_bitw_A0_T1(); else gen_op_add_bitl_A0_T1(); gen_op_ld_T0_A0[ot](); } else { gen_op_mov_TN_reg[ot][0][rm](); } gen_op_btx_T0_T1_cc[ot - OT_WORD][op](); s->cc_op = CC_OP_SARB + ot; if (op != 0) { if (mod != 3) gen_op_st_T0_A0[ot](); else gen_op_mov_reg_T0[ot][rm](); } break; case 0x1bc: /* bsf */ case 0x1bd: /* bsr */ ot = dflag ? OT_LONG : OT_WORD; modrm = ldub(s->pc++); reg = (modrm >> 3) & 7; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_op_bsx_T0_cc[ot - OT_WORD][b & 1](); /* NOTE: we always write back the result. Intel doc says it is undefined if T0 == 0 */ gen_op_mov_reg_T0[ot][reg](); s->cc_op = CC_OP_LOGICB + ot; break; /************************/ /* bcd */ case 0x27: /* daa */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_daa(); s->cc_op = CC_OP_EFLAGS; break; case 0x2f: /* das */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_das(); s->cc_op = CC_OP_EFLAGS; break; case 0x37: /* aaa */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_aaa(); s->cc_op = CC_OP_EFLAGS; break; case 0x3f: /* aas */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_aas(); s->cc_op = CC_OP_EFLAGS; break; case 0xd4: /* aam */ val = ldub(s->pc++); gen_op_aam(val); s->cc_op = CC_OP_LOGICB; break; case 0xd5: /* aad */ val = ldub(s->pc++); gen_op_aad(val); s->cc_op = CC_OP_LOGICB; break; /************************/ /* misc */ case 0x90: /* nop */ break; case 0xcc: /* int3 */ gen_op_int3((long)pc_start); s->is_jmp = 1; break; case 0xcd: /* int N */ val = ldub(s->pc++); /* XXX: currently we ignore the interrupt number */ gen_op_int_im((long)pc_start); s->is_jmp = 1; break; case 0xce: /* into */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_into((long)pc_start, (long)s->pc); s->is_jmp = 1; break; case 0x1c8 ... 0x1cf: /* bswap reg */ reg = b & 7; gen_op_mov_TN_reg[OT_LONG][0][reg](); gen_op_bswapl_T0(); gen_op_mov_reg_T0[OT_LONG][reg](); break; case 0xd6: /* salc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_salc(); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); /* FALL THRU */ case 0xe2: /* loop */ case 0xe3: /* jecxz */ val = (int8_t)insn_get(s, OT_BYTE); next_eip = s->pc - s->cs_base; val += next_eip; if (s->dflag == 0) val &= 0xffff; gen_op_loop[s->aflag][b & 3](val, next_eip); s->is_jmp = 1; break; case 0x131: /* rdtsc */ gen_op_rdtsc(); break; #if 0 case 0x1a2: /* cpuid */ gen_insn0(OP_ASM); break; #endif default: goto illegal_op; } /* lock generation */ if (s->prefix & PREFIX_LOCK) gen_op_unlock(); return (long)s->pc; illegal_op: /* XXX: ensure that no lock was generated */ return -1; }
17,034
qemu
273a2142176098fe2c27f263d86ad66b133b43cb
0
static void pci_mmio_map(PCIDevice * pci_dev, int region_num, uint32_t addr, uint32_t size, int type) { PCIEEPRO100State *d = DO_UPCAST(PCIEEPRO100State, dev, pci_dev); logout("region %d, addr=0x%08x, size=0x%08x, type=%d\n", region_num, addr, size, type); if (region_num == 0) { /* Map control / status registers. */ cpu_register_physical_memory(addr, size, d->eepro100.mmio_index); d->eepro100.region[region_num] = addr; } }
17,035
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
uint32_t ldl_le_phys(target_phys_addr_t addr) { return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN); }
17,037
qemu
e9cb190ad4cea8e6fd24afb973c5007b9a439bc9
0
static void ivshmem_check_memdev_is_busy(const Object *obj, const char *name, Object *val, Error **errp) { if (host_memory_backend_is_mapped(MEMORY_BACKEND(val))) { char *path = object_get_canonical_path_component(val); error_setg(errp, "can't use already busy memdev: %s", path); g_free(path); } else { qdev_prop_allow_set_link_before_realize(obj, name, val, errp); } }
17,038
qemu
4618e658e6dadd1ba53585157984eac71cb706c6
0
static int connect_to_ssh(BDRVSSHState *s, QDict *options, int ssh_flags, int creat_mode) { int r, ret; Error *err = NULL; const char *host, *user, *path, *host_key_check; int port; host = qdict_get_str(options, "host"); if (qdict_haskey(options, "port")) { port = qdict_get_int(options, "port"); } else { port = 22; } path = qdict_get_str(options, "path"); if (qdict_haskey(options, "user")) { user = qdict_get_str(options, "user"); } else { user = g_get_user_name(); if (!user) { ret = -errno; goto err; } } if (qdict_haskey(options, "host_key_check")) { host_key_check = qdict_get_str(options, "host_key_check"); } else { host_key_check = "yes"; } /* Construct the host:port name for inet_connect. */ g_free(s->hostport); s->hostport = g_strdup_printf("%s:%d", host, port); /* Open the socket and connect. */ s->sock = inet_connect(s->hostport, &err); if (err != NULL) { ret = -errno; qerror_report_err(err); error_free(err); goto err; } /* Create SSH session. */ s->session = libssh2_session_init(); if (!s->session) { ret = -EINVAL; session_error_report(s, "failed to initialize libssh2 session"); goto err; } #if TRACE_LIBSSH2 != 0 libssh2_trace(s->session, TRACE_LIBSSH2); #endif r = libssh2_session_handshake(s->session, s->sock); if (r != 0) { ret = -EINVAL; session_error_report(s, "failed to establish SSH session"); goto err; } /* Check the remote host's key against known_hosts. */ ret = check_host_key(s, host, port, host_key_check, &err); if (ret < 0) { qerror_report_err(err); error_free(err); goto err; } /* Authenticate. */ ret = authenticate(s, user); if (ret < 0) { goto err; } /* Start SFTP. */ s->sftp = libssh2_sftp_init(s->session); if (!s->sftp) { session_error_report(s, "failed to initialize sftp handle"); ret = -EINVAL; goto err; } /* Open the remote file. */ DPRINTF("opening file %s flags=0x%x creat_mode=0%o", path, ssh_flags, creat_mode); s->sftp_handle = libssh2_sftp_open(s->sftp, path, ssh_flags, creat_mode); if (!s->sftp_handle) { session_error_report(s, "failed to open remote file '%s'", path); ret = -EINVAL; goto err; } r = libssh2_sftp_fstat(s->sftp_handle, &s->attrs); if (r < 0) { sftp_error_report(s, "failed to read file attributes"); return -EINVAL; } /* Delete the options we've used; any not deleted will cause the * block layer to give an error about unused options. */ qdict_del(options, "host"); qdict_del(options, "port"); qdict_del(options, "user"); qdict_del(options, "path"); qdict_del(options, "host_key_check"); return 0; err: if (s->sftp_handle) { libssh2_sftp_close(s->sftp_handle); } s->sftp_handle = NULL; if (s->sftp) { libssh2_sftp_shutdown(s->sftp); } s->sftp = NULL; if (s->session) { libssh2_session_disconnect(s->session, "from qemu ssh client: " "error opening connection"); libssh2_session_free(s->session); } s->session = NULL; return ret; }
17,039
qemu
b131c74a0e485b084ddaffc8214c8a19af492be7
0
int kvm_irqchip_add_irq_notifier(KVMState *s, EventNotifier *n, int virq) { return -ENOSYS; }
17,040
qemu
7ec1e5ea4bd0700fa48da86bffa2fcc6146c410a
0
static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, int size) { TCGv_i32 r_asi, r_size; r_asi = gen_get_asi(dc, insn); r_size = tcg_const_i32(size); #ifdef TARGET_SPARC64 gen_helper_st_asi(cpu_env, addr, src, r_asi, r_size); #else { TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(t64, src); gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_size); tcg_temp_free_i64(t64); } #endif tcg_temp_free_i32(r_size); tcg_temp_free_i32(r_asi); }
17,041
FFmpeg
1ec83d9a9e472f485897ac92bad9631d551a8c5b
0
static double tget_double(const uint8_t **p, int le) { av_alias64 i = { .u64 = le ? AV_RL64(*p) : AV_RB64(*p)}; *p += 8; return i.f64; }
17,042
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
void empty_slot_init(target_phys_addr_t addr, uint64_t slot_size) { if (slot_size > 0) { /* Only empty slots larger than 0 byte need handling. */ DeviceState *dev; SysBusDevice *s; EmptySlot *e; dev = qdev_create(NULL, "empty_slot"); s = sysbus_from_qdev(dev); e = FROM_SYSBUS(EmptySlot, s); e->size = slot_size; qdev_init_nofail(dev); sysbus_mmio_map(s, 0, addr); } }
17,044
qemu
4678124bb9bfb49e93b83f95c4d2feeb443ea38b
0
build_fadt(GArray *table_data, BIOSLinker *linker, unsigned dsdt) { AcpiFadtDescriptorRev5_1 *fadt = acpi_data_push(table_data, sizeof(*fadt)); /* Hardware Reduced = 1 and use PSCI 0.2+ and with HVC */ fadt->flags = cpu_to_le32(1 << ACPI_FADT_F_HW_REDUCED_ACPI); fadt->arm_boot_flags = cpu_to_le16((1 << ACPI_FADT_ARM_USE_PSCI_G_0_2) | (1 << ACPI_FADT_ARM_PSCI_USE_HVC)); /* ACPI v5.1 (fadt->revision.fadt->minor_revision) */ fadt->minor_revision = 0x1; fadt->dsdt = cpu_to_le32(dsdt); /* DSDT address to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, ACPI_BUILD_TABLE_FILE, &fadt->dsdt, sizeof fadt->dsdt); build_header(linker, table_data, (void *)fadt, "FACP", sizeof(*fadt), 5, NULL, NULL); }
17,045
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
void apic_enable_vapic(DeviceState *d, target_phys_addr_t paddr) { APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d); APICCommonClass *info = APIC_COMMON_GET_CLASS(s); s->vapic_paddr = paddr; info->vapic_base_update(s); }
17,046
qemu
32bafa8fdd098d52fbf1102d5a5e48d29398c0aa
0
void qmp_blockdev_snapshot_internal_sync(const char *device, const char *name, Error **errp) { BlockdevSnapshotInternal snapshot = { .device = (char *) device, .name = (char *) name }; TransactionAction action = { .type = TRANSACTION_ACTION_KIND_BLOCKDEV_SNAPSHOT_INTERNAL_SYNC, .u.blockdev_snapshot_internal_sync = &snapshot, }; blockdev_do_action(&action, errp); }
17,047
qemu
284197e41f0fe98d58ce5e8acd4966c91f28c4bd
0
void nvdimm_acpi_hotplug(AcpiNVDIMMState *state) { nvdimm_build_fit_buffer(&state->fit_buf); }
17,049
qemu
6a042827b638dc73da6a72c72596f5be80bd4581
0
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); hwaddr raddr, pte_addr; uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte; int page_size, prot, fault_cause = 0; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); assert(!msr_hv); /* For now there is no Radix PowerNV Support */ assert(cpu->vhyp); assert(ppc64_use_proc_tbl(cpu)); /* Real Mode Access */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* In real mode top 4 effective addr bits (mostly) ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { ppc_radix64_raise_segi(cpu, rwx, eaddr); return 1; } /* Get Process Table */ patbe = vhc->get_patbe(cpu->vhyp); /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); return 1; } prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset); /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ page_size = PRTBE_R_GET_RTS(prtbe0); pte = ppc_radix64_walk_tree(cpu, rwx, eaddr & R_EADDR_MASK, prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, &raddr, &page_size, &fault_cause, &prot, &pte_addr); if (!pte) { ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); return 1; } /* Update Reference and Change Bits */ ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1UL << page_size); return 0; }
17,050
FFmpeg
919f3554387e043bdfe10c6369356d1104882183
1
static inline void dxt1_decode_pixels(const uint8_t *s, uint32_t *d, unsigned int qstride, unsigned int flag, uint64_t alpha) { unsigned int x, y, c0, c1, a = (!flag * 255u) << 24; unsigned int rb0, rb1, rb2, rb3, g0, g1, g2, g3; uint32_t colors[4], pixels; c0 = AV_RL16(s); c1 = AV_RL16(s+2); rb0 = (c0<<3 | c0<<8) & 0xf800f8; rb1 = (c1<<3 | c1<<8) & 0xf800f8; rb0 += (rb0>>5) & 0x070007; rb1 += (rb1>>5) & 0x070007; g0 = (c0 <<5) & 0x00fc00; g1 = (c1 <<5) & 0x00fc00; g0 += (g0 >>6) & 0x000300; g1 += (g1 >>6) & 0x000300; colors[0] = rb0 + g0 + a; colors[1] = rb1 + g1 + a; if (c0 > c1 || flag) { rb2 = (((2*rb0+rb1) * 21) >> 6) & 0xff00ff; rb3 = (((2*rb1+rb0) * 21) >> 6) & 0xff00ff; g2 = (((2*g0 +g1 ) * 21) >> 6) & 0x00ff00; g3 = (((2*g1 +g0 ) * 21) >> 6) & 0x00ff00; colors[3] = rb3 + g3 + a; } else { rb2 = ((rb0+rb1) >> 1) & 0xff00ff; g2 = ((g0 +g1 ) >> 1) & 0x00ff00; colors[3] = 0; } colors[2] = rb2 + g2 + a; pixels = AV_RL32(s+4); for (y=0; y<4; y++) { for (x=0; x<4; x++) { a = (alpha & 0x0f) << 28; a += a >> 4; d[x] = a + colors[pixels&3]; pixels >>= 2; alpha >>= 4; } d += qstride; } }
17,051
qemu
2ba1eeb62c29d23238b95dc7e9ade3444b49f0a1
1
int cpu_restore_state(TranslationBlock *tb, CPUState *env, unsigned long searched_pc, void *puc) { TCGContext *s = &tcg_ctx; int j; unsigned long tc_ptr; #ifdef CONFIG_PROFILER int64_t ti; #endif #ifdef CONFIG_PROFILER ti = profile_getclock(); #endif tcg_func_start(s); if (gen_intermediate_code_pc(env, tb) < 0) return -1; /* find opc index corresponding to search_pc */ tc_ptr = (unsigned long)tb->tc_ptr; if (searched_pc < tc_ptr) return -1; s->tb_next_offset = tb->tb_next_offset; #ifdef USE_DIRECT_JUMP s->tb_jmp_offset = tb->tb_jmp_offset; s->tb_next = NULL; #else s->tb_jmp_offset = NULL; s->tb_next = tb->tb_next; #endif j = dyngen_code_search_pc(s, (uint8_t *)tc_ptr, (void *)searched_pc); if (j < 0) return -1; /* now find start of instruction before */ while (gen_opc_instr_start[j] == 0) j--; #if defined(TARGET_I386) { int cc_op; #ifdef DEBUG_DISAS if (loglevel & CPU_LOG_TB_OP) { int i; fprintf(logfile, "RESTORE:\n"); for(i=0;i<=j; i++) { if (gen_opc_instr_start[i]) { fprintf(logfile, "0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]); } } fprintf(logfile, "spc=0x%08lx j=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n", searched_pc, j, gen_opc_pc[j] - tb->cs_base, (uint32_t)tb->cs_base); } #endif env->eip = gen_opc_pc[j] - tb->cs_base; cc_op = gen_opc_cc_op[j]; if (cc_op != CC_OP_DYNAMIC) env->cc_op = cc_op; } #elif defined(TARGET_ARM) env->regs[15] = gen_opc_pc[j]; #elif defined(TARGET_SPARC) { target_ulong npc; env->pc = gen_opc_pc[j]; npc = gen_opc_npc[j]; if (npc == 1) { /* dynamic NPC: already stored */ } else if (npc == 2) { target_ulong t2 = (target_ulong)(unsigned long)puc; /* jump PC: use T2 and the jump targets of the translation */ if (t2) env->npc = gen_opc_jump_pc[0]; else env->npc = gen_opc_jump_pc[1]; } else { env->npc = npc; } } #elif defined(TARGET_PPC) { int type, c; /* for PPC, we need to look at the micro operation to get the access type */ env->nip = gen_opc_pc[j]; c = gen_opc_buf[j]; switch(c) { #if defined(CONFIG_USER_ONLY) #define CASE3(op)\ case INDEX_op_ ## op ## _raw #else #define CASE3(op)\ case INDEX_op_ ## op ## _user:\ case INDEX_op_ ## op ## _kernel:\ case INDEX_op_ ## op ## _hypv #endif CASE3(stfd): CASE3(stfs): CASE3(lfd): CASE3(lfs): type = ACCESS_FLOAT; break; CASE3(lwarx): type = ACCESS_RES; break; CASE3(stwcx): type = ACCESS_RES; break; CASE3(eciwx): CASE3(ecowx): type = ACCESS_EXT; break; default: type = ACCESS_INT; break; } env->access_type = type; } #elif defined(TARGET_M68K) env->pc = gen_opc_pc[j]; #elif defined(TARGET_MIPS) env->PC[env->current_tc] = gen_opc_pc[j]; env->hflags &= ~MIPS_HFLAG_BMASK; env->hflags |= gen_opc_hflags[j]; #elif defined(TARGET_ALPHA) env->pc = gen_opc_pc[j]; #elif defined(TARGET_SH4) env->pc = gen_opc_pc[j]; env->flags = gen_opc_hflags[j]; #endif #ifdef CONFIG_PROFILER dyngen_restore_time += profile_getclock() - ti; dyngen_restore_count++; #endif return 0; }
17,052
FFmpeg
544286b3d39365b30298ae07e66a755200b0895c
1
void h263_encode_picture_header(MpegEncContext * s, int picture_number) { int format; align_put_bits(&s->pb); put_bits(&s->pb, 22, 0x20); put_bits(&s->pb, 8, ((s->picture_number * 30 * FRAME_RATE_BASE) / s->frame_rate) & 0xff); put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 0); /* h263 id */ put_bits(&s->pb, 1, 0); /* split screen off */ put_bits(&s->pb, 1, 0); /* camera off */ put_bits(&s->pb, 1, 0); /* freeze picture release off */ format = h263_get_picture_format(s->width, s->height); if (!s->h263_plus) { /* H.263v1 */ put_bits(&s->pb, 3, format); put_bits(&s->pb, 1, (s->pict_type == P_TYPE)); /* By now UMV IS DISABLED ON H.263v1, since the restrictions of H.263v1 UMV implies to check the predicted MV after calculation of the current MB to see if we're on the limits */ put_bits(&s->pb, 1, 0); /* unrestricted motion vector: off */ put_bits(&s->pb, 1, 0); /* SAC: off */ put_bits(&s->pb, 1, 0); /* advanced prediction mode: off */ put_bits(&s->pb, 1, 0); /* not PB frame */ put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */ } else { /* H.263v2 */ /* H.263 Plus PTYPE */ put_bits(&s->pb, 3, 7); put_bits(&s->pb,3,1); /* Update Full Extended PTYPE */ if (format == 7) put_bits(&s->pb,3,6); /* Custom Source Format */ else put_bits(&s->pb, 3, format); put_bits(&s->pb,1,0); /* Custom PCF: off */ umvplus = (s->pict_type == P_TYPE) && s->unrestricted_mv; put_bits(&s->pb, 1, umvplus); /* Unrestricted Motion Vector */ put_bits(&s->pb,1,0); /* SAC: off */ put_bits(&s->pb,1,0); /* Advanced Prediction Mode: off */ put_bits(&s->pb,1,0); /* Advanced Intra Coding: off */ put_bits(&s->pb,1,0); /* Deblocking Filter: off */ put_bits(&s->pb,1,0); /* Slice Structured: off */ put_bits(&s->pb,1,0); /* Reference Picture Selection: off */ put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */ put_bits(&s->pb,1,0); /* Alternative Inter VLC: off */ put_bits(&s->pb,1,0); /* Modified Quantization: off */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,3,0); /* Reserved */ put_bits(&s->pb, 3, s->pict_type == P_TYPE); put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */ put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */ put_bits(&s->pb,1,0); /* Rounding Type */ put_bits(&s->pb,2,0); /* Reserved */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ /* This should be here if PLUSPTYPE */ put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */ if (format == 7) { /* Custom Picture Format (CPFMT) */ put_bits(&s->pb,4,2); /* Aspect ratio: CIF 12:11 (4:3) picture */ put_bits(&s->pb,9,(s->width >> 2) - 1); put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,9,(s->height >> 2)); } /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */ if (umvplus) put_bits(&s->pb,1,1); /* Limited according tables of Annex D */ put_bits(&s->pb, 5, s->qscale); } put_bits(&s->pb, 1, 0); /* no PEI */ }
17,053
qemu
0dacea92d26c31d453c58de2e99c178fee554166
1
_net_rx_pkt_calc_l4_csum(struct NetRxPkt *pkt) { uint32_t cntr; uint16_t csum; uint16_t csl; uint32_t cso; trace_net_rx_pkt_l4_csum_calc_entry(); if (pkt->isip4) { if (pkt->isudp) { csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen); trace_net_rx_pkt_l4_csum_calc_ip4_udp(); } else { csl = be16_to_cpu(pkt->ip4hdr_info.ip4_hdr.ip_len) - IP_HDR_GET_LEN(&pkt->ip4hdr_info.ip4_hdr); trace_net_rx_pkt_l4_csum_calc_ip4_tcp(); } cntr = eth_calc_ip4_pseudo_hdr_csum(&pkt->ip4hdr_info.ip4_hdr, csl, &cso); trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl); } else { if (pkt->isudp) { csl = be16_to_cpu(pkt->l4hdr_info.hdr.udp.uh_ulen); trace_net_rx_pkt_l4_csum_calc_ip6_udp(); } else { struct ip6_header *ip6hdr = &pkt->ip6hdr_info.ip6_hdr; size_t full_ip6hdr_len = pkt->l4hdr_off - pkt->l3hdr_off; size_t ip6opts_len = full_ip6hdr_len - sizeof(struct ip6_header); csl = be16_to_cpu(ip6hdr->ip6_ctlun.ip6_un1.ip6_un1_plen) - ip6opts_len; trace_net_rx_pkt_l4_csum_calc_ip6_tcp(); } cntr = eth_calc_ip6_pseudo_hdr_csum(&pkt->ip6hdr_info.ip6_hdr, csl, pkt->ip6hdr_info.l4proto, &cso); trace_net_rx_pkt_l4_csum_calc_ph_csum(cntr, csl); } cntr += net_checksum_add_iov(pkt->vec, pkt->vec_len, pkt->l4hdr_off, csl, cso); csum = net_checksum_finish(cntr); trace_net_rx_pkt_l4_csum_calc_csum(pkt->l4hdr_off, csl, cntr, csum); return csum; }
17,054
qemu
3996e85c1822e05c50250f8d2d1e57b6bea1229d
1
int xen_hvm_init(ram_addr_t *below_4g_mem_size, ram_addr_t *above_4g_mem_size, MemoryRegion **ram_memory) { int i, rc; unsigned long ioreq_pfn; unsigned long bufioreq_evtchn; XenIOState *state; state = g_malloc0(sizeof (XenIOState)); state->xce_handle = xen_xc_evtchn_open(NULL, 0); if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) { perror("xen: event channel open"); return -1; } state->xenstore = xs_daemon_open(); if (state->xenstore == NULL) { perror("xen: xenstore open"); return -1; } state->exit.notify = xen_exit_notifier; qemu_add_exit_notifier(&state->exit); state->suspend.notify = xen_suspend_notifier; qemu_register_suspend_notifier(&state->suspend); state->wakeup.notify = xen_wakeup_notifier; qemu_register_wakeup_notifier(&state->wakeup); xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn); DPRINTF("shared page at pfn %lx\n", ioreq_pfn); state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); if (state->shared_page == NULL) { hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT, errno, xen_xc); } rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); if (!rc) { DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); state->shared_vmport_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); if (state->shared_vmport_page == NULL) { hw_error("map shared vmport IO page returned error %d handle=" XC_INTERFACE_FMT, errno, xen_xc); } } else if (rc != -ENOSYS) { hw_error("get vmport regs pfn returned error %d, rc=%d", errno, rc); } xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn); DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn); state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, PROT_READ|PROT_WRITE, ioreq_pfn); if (state->buffered_io_page == NULL) { hw_error("map buffered IO page returned error %d", errno); } /* Note: cpus is empty at this point in init */ state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); /* FIXME: how about if we overflow the page here? */ for (i = 0; i < max_cpus; i++) { rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, xen_vcpu_eport(state->shared_page, i)); if (rc == -1) { fprintf(stderr, "bind interdomain ioctl error %d\n", errno); return -1; } state->ioreq_local_port[i] = rc; } rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN, &bufioreq_evtchn); if (rc < 0) { fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n"); return -1; } rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, (uint32_t)bufioreq_evtchn); if (rc == -1) { fprintf(stderr, "bind interdomain ioctl error %d\n", errno); return -1; } state->bufioreq_local_port = rc; /* Init RAM management */ xen_map_cache_init(xen_phys_offset_to_gaddr, state); xen_ram_init(below_4g_mem_size, above_4g_mem_size, ram_size, ram_memory); qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); state->memory_listener = xen_memory_listener; QLIST_INIT(&state->physmap); memory_listener_register(&state->memory_listener, &address_space_memory); state->log_for_dirtybit = NULL; /* Initialize backend core & drivers */ if (xen_be_init() != 0) { fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__); return -1; } xen_be_register("console", &xen_console_ops); xen_be_register("vkbd", &xen_kbdmouse_ops); xen_be_register("qdisk", &xen_blkdev_ops); xen_read_physmap(state); return 0; }
17,055
FFmpeg
8136f234445862c94d1c081606b2d1e3d44fccf3
1
static void yop_paint_block(YopDecContext *s, int tag) { s->dstptr[0] = s->srcptr[0]; s->dstptr[1] = s->srcptr[paint_lut[tag][0]]; s->dstptr[s->frame.linesize[0]] = s->srcptr[paint_lut[tag][1]]; s->dstptr[s->frame.linesize[0] + 1] = s->srcptr[paint_lut[tag][2]]; // The number of src bytes consumed is in the last part of the lut entry. s->srcptr += paint_lut[tag][3]; }
17,057
FFmpeg
fdbd924b84e85ac5c80f01ee059ed5c81d3cc205
1
av_cold int ff_rv34_decode_init(AVCodecContext *avctx) { RV34DecContext *r = avctx->priv_data; MpegEncContext *s = &r->s; int ret; ff_MPV_decode_defaults(s); s->avctx = avctx; s->out_format = FMT_H263; s->codec_id = avctx->codec_id; s->width = avctx->width; s->height = avctx->height; r->s.avctx = avctx; avctx->flags |= CODEC_FLAG_EMU_EDGE; r->s.flags |= CODEC_FLAG_EMU_EDGE; avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->has_b_frames = 1; s->low_delay = 0; if ((ret = ff_MPV_common_init(s)) < 0) return ret; ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1); #if CONFIG_RV30_DECODER if (avctx->codec_id == AV_CODEC_ID_RV30) ff_rv30dsp_init(&r->rdsp); #endif #if CONFIG_RV40_DECODER if (avctx->codec_id == AV_CODEC_ID_RV40) ff_rv40dsp_init(&r->rdsp); #endif if ((ret = rv34_decoder_alloc(r)) < 0) return ret; if(!intra_vlcs[0].cbppattern[0].bits) rv34_init_tables(); avctx->internal->allocate_progress = 1; return 0; }
17,058
FFmpeg
90e8317b3b33dcb54ae01e419d85cbbfbd874963
1
static int flic_decode_frame_24BPP(AVCodecContext *avctx, void *data, int *got_frame, const uint8_t *buf, int buf_size) { FlicDecodeContext *s = avctx->priv_data; GetByteContext g2; int pixel_ptr; unsigned char palette_idx1; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j, ret; int lines; int compressed_lines; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; int pixel; unsigned int pixel_limit; bytestream2_init(&g2, buf, buf_size); if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; pixels = s->frame->data[0]; pixel_limit = s->avctx->height * s->frame->linesize[0]; frame_size = bytestream2_get_le32(&g2); bytestream2_skip(&g2, 2); /* skip the magic number */ num_chunks = bytestream2_get_le16(&g2); bytestream2_skip(&g2, 8); /* skip padding */ if (frame_size > buf_size) frame_size = buf_size; if (frame_size < 16) frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0) && bytestream2_get_bytes_left(&g2) >= 4) { int stream_ptr_after_chunk; chunk_size = bytestream2_get_le32(&g2); if (chunk_size > frame_size) { av_log(avctx, AV_LOG_WARNING, "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size); chunk_size = frame_size; } stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size; chunk_type = bytestream2_get_le16(&g2); switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: /* For some reason, it seems that non-palettized flics do * include one of these chunks in their first frame. * Why I do not know, it seems rather extraneous. */ ff_dlog(avctx, "Unexpected Palette chunk %d in non-palettized FLC\n", chunk_type); bytestream2_skip(&g2, chunk_size - 6); break; case FLI_DELTA: case FLI_DTA_LC: y_ptr = 0; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk) break; line_packets = bytestream2_get_le16(&g2); if (line_packets < 0) { line_packets = -line_packets; y_ptr += line_packets * s->frame->linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk) break; pixel_skip = bytestream2_get_byte(&g2); pixel_ptr += (pixel_skip*3); /* Pixel is 3 bytes wide */ pixel_countdown -= pixel_skip; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run < 0) { byte_run = -byte_run; pixel = bytestream2_get_le24(&g2); CHECK_PIXEL_PTR(3 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown -= 1) { AV_WL24(&pixels[pixel_ptr], pixel); pixel_ptr += 3; } } else { if (bytestream2_tell(&g2) + 2*byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { pixel = bytestream2_get_le24(&g2); AV_WL24(&pixels[pixel_ptr], pixel); pixel_ptr += 3; } } } y_ptr += s->frame->linesize[0]; } } break; case FLI_LC: av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-palettized FLC\n"); bytestream2_skip(&g2, chunk_size - 6); break; case FLI_BLACK: /* set the whole frame to 0x00 which is black for 24 bit mode. */ memset(pixels, 0x00, s->frame->linesize[0] * s->avctx->height); break; case FLI_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = (s->avctx->width * 3); while (pixel_countdown > 0) { if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { palette_idx1 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { palette_idx1 = bytestream2_get_byte(&g2); pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } } y_ptr += s->frame->linesize[0]; } break; case FLI_DTA_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */ while (pixel_countdown > 0) { if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { pixel = bytestream2_get_le24(&g2); CHECK_PIXEL_PTR(3 * byte_run); for (j = 0; j < byte_run; j++) { AV_WL24(pixels + pixel_ptr, pixel); pixel_ptr += 3; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n", pixel_countdown); } } else { /* copy pixels if byte_run < 0 */ byte_run = -byte_run; if (bytestream2_tell(&g2) + 3 * byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(3 * byte_run); for (j = 0; j < byte_run; j++) { pixel = bytestream2_get_le24(&g2); AV_WL24(pixels + pixel_ptr, pixel); pixel_ptr += 3; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n", pixel_countdown); } } } y_ptr += s->frame->linesize[0]; } break; case FLI_COPY: case FLI_DTA_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > (unsigned int)(FFALIGN(s->avctx->width, 2) * s->avctx->height)*3) { av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \ "bigger than image, skipping chunk\n", chunk_size - 6); bytestream2_skip(&g2, chunk_size - 6); } else { for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height; y_ptr += s->frame->linesize[0]) { pixel_countdown = s->avctx->width; pixel_ptr = 0; while (pixel_countdown > 0) { pixel = bytestream2_get_le24(&g2); AV_WL24(&pixels[y_ptr + pixel_ptr], pixel); pixel_ptr += 3; pixel_countdown--; } if (s->avctx->width & 1) bytestream2_skip(&g2, 3); } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ bytestream2_skip(&g2, chunk_size - 6); break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type); break; } if (stream_ptr_after_chunk - bytestream2_tell(&g2) >= 0) { bytestream2_skip(&g2, stream_ptr_after_chunk - bytestream2_tell(&g2)); } else { av_log(avctx, AV_LOG_ERROR, "Chunk overread\n"); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1)) av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2)); if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; return buf_size; }
17,059
FFmpeg
02dd3666c2944a3db44ba13916ba40dbdd41f9b1
1
static int h264_mp4toannexb_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe) { H264BSFContext *ctx = bsfc->priv_data; uint8_t unit_type; int32_t nal_size; uint32_t cumul_size = 0; const uint8_t *buf_end = buf + buf_size; /* nothing to filter */ if (!avctx->extradata || avctx->extradata_size < 6) { *poutbuf = (uint8_t*) buf; *poutbuf_size = buf_size; return 0; } /* retrieve sps and pps NAL units from extradata */ if (!ctx->extradata_parsed) { uint16_t unit_size; uint64_t total_size = 0; uint8_t *out = NULL, unit_nb, sps_done = 0; const uint8_t *extradata = avctx->extradata+4; static const uint8_t nalu_header[4] = {0, 0, 0, 1}; /* retrieve length coded size */ ctx->length_size = (*extradata++ & 0x3) + 1; if (ctx->length_size == 3) return AVERROR(EINVAL); /* retrieve sps and pps unit(s) */ unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */ if (!unit_nb) { unit_nb = *extradata++; /* number of pps unit(s) */ sps_done++; } while (unit_nb--) { void *tmp; unit_size = AV_RB16(extradata); total_size += unit_size+4; if (total_size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE || extradata+2+unit_size > avctx->extradata+avctx->extradata_size) { av_free(out); return AVERROR(EINVAL); } tmp = av_realloc(out, total_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!tmp) { av_free(out); return AVERROR(ENOMEM); } out = tmp; memcpy(out+total_size-unit_size-4, nalu_header, 4); memcpy(out+total_size-unit_size, extradata+2, unit_size); extradata += 2+unit_size; if (!unit_nb && !sps_done++) unit_nb = *extradata++; /* number of pps unit(s) */ } memset(out + total_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); av_free(avctx->extradata); avctx->extradata = out; avctx->extradata_size = total_size; ctx->first_idr = 1; ctx->extradata_parsed = 1; } *poutbuf_size = 0; *poutbuf = NULL; do { if (buf + ctx->length_size > buf_end) goto fail; if (ctx->length_size == 1) { nal_size = buf[0]; } else if (ctx->length_size == 2) { nal_size = AV_RB16(buf); } else nal_size = AV_RB32(buf); buf += ctx->length_size; unit_type = *buf & 0x1f; if (buf + nal_size > buf_end || nal_size < 0) goto fail; /* prepend only to the first type 5 NAL unit of an IDR picture */ if (ctx->first_idr && unit_type == 5) { if (alloc_and_copy(poutbuf, poutbuf_size, avctx->extradata, avctx->extradata_size, buf, nal_size) < 0) goto fail; ctx->first_idr = 0; } else { if (alloc_and_copy(poutbuf, poutbuf_size, NULL, 0, buf, nal_size) < 0) goto fail; if (!ctx->first_idr && unit_type == 1) ctx->first_idr = 1; } buf += nal_size; cumul_size += nal_size + ctx->length_size; } while (cumul_size < buf_size); return 1; fail: av_freep(poutbuf); *poutbuf_size = 0; return AVERROR(EINVAL); }
17,060
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void dchip_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { /* Skip this. It's all related to DRAM timing and setup. */ }
17,061
qemu
19494f811a43c6bc226aa272d86300d9229224fe
0
qemu_irq get_cps_irq(MIPSCPSState *s, int pin_number) { MIPSCPU *cpu = MIPS_CPU(first_cpu); CPUMIPSState *env = &cpu->env; assert(pin_number < s->num_irq); /* TODO: return GIC pins once implemented */ return env->irq[pin_number]; }
17,062
qemu
455aa1e0818653c41fd794435b982426ce21ba2f
0
static int tcp_chr_write(CharDriverState *chr, const uint8_t *buf, int len) { TCPCharDriver *s = chr->opaque; if (s->connected) { return send_all(s->fd, buf, len); } else { /* (Re-)connect for unconnected writing */ tcp_chr_connect(chr); return 0; } }
17,063
qemu
9f56640c8536a8dfb78fc05a39c1bf9921483b12
0
static int img_create(int argc, char **argv) { int c, ret, flags; const char *fmt = "raw"; const char *base_fmt = NULL; const char *filename; const char *base_filename = NULL; BlockDriver *drv; QEMUOptionParameter *param = NULL; char *options = NULL; flags = 0; for(;;) { c = getopt(argc, argv, "F:b:f:he6o:"); if (c == -1) break; switch(c) { case 'h': help(); break; case 'F': base_fmt = optarg; break; case 'b': base_filename = optarg; break; case 'f': fmt = optarg; break; case 'e': flags |= BLOCK_FLAG_ENCRYPT; break; case '6': flags |= BLOCK_FLAG_COMPAT6; break; case 'o': options = optarg; break; } } /* Find driver and parse its options */ drv = bdrv_find_format(fmt); if (!drv) error("Unknown file format '%s'", fmt); if (options && !strcmp(options, "?")) { print_option_help(drv->create_options); return 0; } if (options) { param = parse_option_parameters(options, drv->create_options, param); if (param == NULL) { error("Invalid options for file format '%s'.", fmt); } } else { param = parse_option_parameters("", drv->create_options, param); } /* Get the filename */ if (optind >= argc) help(); filename = argv[optind++]; /* Add size to parameters */ if (optind < argc) { set_option_parameter(param, BLOCK_OPT_SIZE, argv[optind++]); } /* Add old-style options to parameters */ add_old_style_options(fmt, param, flags, base_filename, base_fmt); // The size for the image must always be specified, with one exception: // If we are using a backing file, we can obtain the size from there if (get_option_parameter(param, BLOCK_OPT_SIZE)->value.n == 0) { QEMUOptionParameter *backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE); QEMUOptionParameter *backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT); if (backing_file && backing_file->value.s) { BlockDriverState *bs; uint64_t size; const char *fmt = NULL; char buf[32]; if (backing_fmt && backing_fmt->value.s) { if (bdrv_find_format(backing_fmt->value.s)) { fmt = backing_fmt->value.s; } else { error("Unknown backing file format '%s'", backing_fmt->value.s); } } bs = bdrv_new_open(backing_file->value.s, fmt); bdrv_get_geometry(bs, &size); size *= 512; bdrv_delete(bs); snprintf(buf, sizeof(buf), "%" PRId64, size); set_option_parameter(param, BLOCK_OPT_SIZE, buf); } else { error("Image creation needs a size parameter"); } } printf("Formatting '%s', fmt=%s ", filename, fmt); print_option_parameters(param); puts(""); ret = bdrv_create(drv, filename, param); free_option_parameters(param); if (ret < 0) { if (ret == -ENOTSUP) { error("Formatting or formatting option not supported for file format '%s'", fmt); } else if (ret == -EFBIG) { error("The image size is too large for file format '%s'", fmt); } else { error("Error while formatting"); } } return 0; }
17,064
FFmpeg
cb5469462d427ea38625e255306f07b37d75280f
0
static int fill_filter_caches(H264Context *h, int mb_type){ MpegEncContext * const s = &h->s; const int mb_xy= h->mb_xy; int top_xy, left_xy[2]; int top_type, left_type[2]; top_xy = mb_xy - (s->mb_stride << MB_FIELD); //FIXME deblocking could skip the intra and nnz parts. /* Wow, what a mess, why didn't they simplify the interlacing & intra * stuff, I can't imagine that these complex rules are worth it. */ left_xy[1] = left_xy[0] = mb_xy-1; if(FRAME_MBAFF){ const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]); const int curr_mb_field_flag = IS_INTERLACED(mb_type); if(s->mb_y&1){ if (left_mb_field_flag != curr_mb_field_flag) { left_xy[0] -= s->mb_stride; } }else{ if(curr_mb_field_flag){ top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1); } if (left_mb_field_flag != curr_mb_field_flag) { left_xy[1] += s->mb_stride; } } } h->top_mb_xy = top_xy; h->left_mb_xy[0] = left_xy[0]; h->left_mb_xy[1] = left_xy[1]; { //for sufficiently low qp, filtering wouldn't do anything //this is a conservative estimate: could also check beta_offset and more accurate chroma_qp int qp_thresh = h->qp_thresh; //FIXME strictly we should store qp_thresh for each mb of a slice int qp = s->current_picture.qscale_table[mb_xy]; if(qp <= qp_thresh && (left_xy[0]<0 || ((qp + s->current_picture.qscale_table[left_xy[0]] + 1)>>1) <= qp_thresh) && (top_xy < 0 || ((qp + s->current_picture.qscale_table[top_xy ] + 1)>>1) <= qp_thresh)){ if(!FRAME_MBAFF) return 1; if( (left_xy[0]< 0 || ((qp + s->current_picture.qscale_table[left_xy[1] ] + 1)>>1) <= qp_thresh) && (top_xy < s->mb_stride || ((qp + s->current_picture.qscale_table[top_xy -s->mb_stride] + 1)>>1) <= qp_thresh)) return 1; } } top_type = s->current_picture.mb_type[top_xy] ; left_type[0] = s->current_picture.mb_type[left_xy[0]]; left_type[1] = s->current_picture.mb_type[left_xy[1]]; if(h->deblocking_filter == 2){ if(h->slice_table[top_xy ] != h->slice_num) top_type= 0; if(h->slice_table[left_xy[0] ] != h->slice_num) left_type[0]= left_type[1]= 0; }else{ if(h->slice_table[top_xy ] == 0xFFFF) top_type= 0; if(h->slice_table[left_xy[0] ] == 0xFFFF) left_type[0]= left_type[1] =0; } h->top_type = top_type ; h->left_type[0]= left_type[0]; h->left_type[1]= left_type[1]; if(IS_INTRA(mb_type)) return 0; AV_COPY32(&h->non_zero_count_cache[4+8* 1], &h->non_zero_count[mb_xy][ 0]); AV_COPY32(&h->non_zero_count_cache[4+8* 2], &h->non_zero_count[mb_xy][ 4]); AV_COPY32(&h->non_zero_count_cache[4+8* 3], &h->non_zero_count[mb_xy][ 8]); AV_COPY32(&h->non_zero_count_cache[4+8* 4], &h->non_zero_count[mb_xy][12]); h->cbp= h->cbp_table[mb_xy]; { int list; for(list=0; list<h->list_count; list++){ int8_t *ref; int y, b_stride; int16_t (*mv_dst)[2]; int16_t (*mv_src)[2]; if(!USES_LIST(mb_type, list)){ fill_rectangle( h->mv_cache[list][scan8[0]], 4, 4, 8, pack16to32(0,0), 4); AV_WN32A(&h->ref_cache[list][scan8[ 0]], ((LIST_NOT_USED)&0xFF)*0x01010101u); AV_WN32A(&h->ref_cache[list][scan8[ 2]], ((LIST_NOT_USED)&0xFF)*0x01010101u); AV_WN32A(&h->ref_cache[list][scan8[ 8]], ((LIST_NOT_USED)&0xFF)*0x01010101u); AV_WN32A(&h->ref_cache[list][scan8[10]], ((LIST_NOT_USED)&0xFF)*0x01010101u); continue; } ref = &s->current_picture.ref_index[list][4*mb_xy]; { int (*ref2frm)[64] = h->ref2frm[ h->slice_num&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); AV_WN32A(&h->ref_cache[list][scan8[ 0]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); AV_WN32A(&h->ref_cache[list][scan8[ 2]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); ref += 2; AV_WN32A(&h->ref_cache[list][scan8[ 8]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); AV_WN32A(&h->ref_cache[list][scan8[10]], (pack16to32(ref2frm[list][ref[0]],ref2frm[list][ref[1]])&0x00FF00FF)*0x0101); } b_stride = h->b_stride; mv_dst = &h->mv_cache[list][scan8[0]]; mv_src = &s->current_picture.motion_val[list][4*s->mb_x + 4*s->mb_y*b_stride]; for(y=0; y<4; y++){ AV_COPY128(mv_dst + 8*y, mv_src + y*b_stride); } } } /* 0 . T T. T T T T 1 L . .L . . . . 2 L . .L . . . . 3 . T TL . . . . 4 L . .L . . . . 5 L . .. . . . . */ //FIXME constraint_intra_pred & partitioning & nnz (let us hope this is just a typo in the spec) if(top_type){ AV_COPY32(&h->non_zero_count_cache[4+8*0], &h->non_zero_count[top_xy][3*4]); } if(left_type[0]){ h->non_zero_count_cache[3+8*1]= h->non_zero_count[left_xy[0]][3+0*4]; h->non_zero_count_cache[3+8*2]= h->non_zero_count[left_xy[0]][3+1*4]; h->non_zero_count_cache[3+8*3]= h->non_zero_count[left_xy[0]][3+2*4]; h->non_zero_count_cache[3+8*4]= h->non_zero_count[left_xy[0]][3+3*4]; } // CAVLC 8x8dct requires NNZ values for residual decoding that differ from what the loop filter needs if(!CABAC && h->pps.transform_8x8_mode){ if(IS_8x8DCT(top_type)){ h->non_zero_count_cache[4+8*0]= h->non_zero_count_cache[5+8*0]= (h->cbp_table[top_xy] & 0x4000) >> 12; h->non_zero_count_cache[6+8*0]= h->non_zero_count_cache[7+8*0]= (h->cbp_table[top_xy] & 0x8000) >> 12; } if(IS_8x8DCT(left_type[0])){ h->non_zero_count_cache[3+8*1]= h->non_zero_count_cache[3+8*2]= (h->cbp_table[left_xy[0]]&0x2000) >> 12; //FIXME check MBAFF } if(IS_8x8DCT(left_type[1])){ h->non_zero_count_cache[3+8*3]= h->non_zero_count_cache[3+8*4]= (h->cbp_table[left_xy[1]]&0x8000) >> 12; //FIXME check MBAFF } if(IS_8x8DCT(mb_type)){ h->non_zero_count_cache[scan8[0 ]]= h->non_zero_count_cache[scan8[1 ]]= h->non_zero_count_cache[scan8[2 ]]= h->non_zero_count_cache[scan8[3 ]]= (h->cbp & 0x1000) >> 12; h->non_zero_count_cache[scan8[0+ 4]]= h->non_zero_count_cache[scan8[1+ 4]]= h->non_zero_count_cache[scan8[2+ 4]]= h->non_zero_count_cache[scan8[3+ 4]]= (h->cbp & 0x2000) >> 12; h->non_zero_count_cache[scan8[0+ 8]]= h->non_zero_count_cache[scan8[1+ 8]]= h->non_zero_count_cache[scan8[2+ 8]]= h->non_zero_count_cache[scan8[3+ 8]]= (h->cbp & 0x4000) >> 12; h->non_zero_count_cache[scan8[0+12]]= h->non_zero_count_cache[scan8[1+12]]= h->non_zero_count_cache[scan8[2+12]]= h->non_zero_count_cache[scan8[3+12]]= (h->cbp & 0x8000) >> 12; } } if(IS_INTER(mb_type) || IS_DIRECT(mb_type)){ int list; for(list=0; list<h->list_count; list++){ if(USES_LIST(top_type, list)){ const int b_xy= h->mb2b_xy[top_xy] + 3*h->b_stride; const int b8_xy= 4*top_xy + 2; int (*ref2frm)[64] = h->ref2frm[ h->slice_table[top_xy]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); AV_COPY128(h->mv_cache[list][scan8[0] + 0 - 1*8], s->current_picture.motion_val[list][b_xy + 0]); h->ref_cache[list][scan8[0] + 0 - 1*8]= h->ref_cache[list][scan8[0] + 1 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 0]]; h->ref_cache[list][scan8[0] + 2 - 1*8]= h->ref_cache[list][scan8[0] + 3 - 1*8]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 1]]; }else{ AV_ZERO128(h->mv_cache[list][scan8[0] + 0 - 1*8]); AV_WN32A(&h->ref_cache[list][scan8[0] + 0 - 1*8], ((LIST_NOT_USED)&0xFF)*0x01010101u); } if(!IS_INTERLACED(mb_type^left_type[0])){ if(USES_LIST(left_type[0], list)){ const int b_xy= h->mb2b_xy[left_xy[0]] + 3; const int b8_xy= 4*left_xy[0] + 1; int (*ref2frm)[64] = h->ref2frm[ h->slice_table[left_xy[0]]&(MAX_SLICES-1) ][0] + (MB_MBAFF ? 20 : 2); AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 0 ], s->current_picture.motion_val[list][b_xy + h->b_stride*0]); AV_COPY32(h->mv_cache[list][scan8[0] - 1 + 8 ], s->current_picture.motion_val[list][b_xy + h->b_stride*1]); AV_COPY32(h->mv_cache[list][scan8[0] - 1 +16 ], s->current_picture.motion_val[list][b_xy + h->b_stride*2]); AV_COPY32(h->mv_cache[list][scan8[0] - 1 +24 ], s->current_picture.motion_val[list][b_xy + h->b_stride*3]); h->ref_cache[list][scan8[0] - 1 + 0 ]= h->ref_cache[list][scan8[0] - 1 + 8 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*0]]; h->ref_cache[list][scan8[0] - 1 +16 ]= h->ref_cache[list][scan8[0] - 1 +24 ]= ref2frm[list][s->current_picture.ref_index[list][b8_xy + 2*1]]; }else{ AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 0 ]); AV_ZERO32(h->mv_cache [list][scan8[0] - 1 + 8 ]); AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +16 ]); AV_ZERO32(h->mv_cache [list][scan8[0] - 1 +24 ]); h->ref_cache[list][scan8[0] - 1 + 0 ]= h->ref_cache[list][scan8[0] - 1 + 8 ]= h->ref_cache[list][scan8[0] - 1 + 16 ]= h->ref_cache[list][scan8[0] - 1 + 24 ]= LIST_NOT_USED; } } } } return 0; }
17,065
qemu
ddf21908961073199f3d186204da4810f2ea150b
0
static void test_visitor_in_list(TestInputVisitorData *data, const void *unused) { UserDefOneList *item, *head = NULL; Error *err = NULL; Visitor *v; int i; v = visitor_input_test_init(data, "[ { 'string': 'string0', 'integer': 42 }, { 'string': 'string1', 'integer': 43 }, { 'string': 'string2', 'integer': 44 } ]"); visit_type_UserDefOneList(v, &head, NULL, &err); g_assert(!err); g_assert(head != NULL); for (i = 0, item = head; item; item = item->next, i++) { char string[12]; snprintf(string, sizeof(string), "string%d", i); g_assert_cmpstr(item->value->string, ==, string); g_assert_cmpint(item->value->base->integer, ==, 42 + i); } qapi_free_UserDefOneList(head); }
17,066
qemu
eabb7b91b36b202b4dac2df2d59d698e3aff197a
0
static inline void tcg_out_ext8u(TCGContext *s, int dest, int src) { /* movzbl */ assert(src < 4 || TCG_TARGET_REG_BITS == 64); tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src); }
17,067
qemu
4b7a6bf402bd064605c287eecadc493ccf2d4897
0
static int cpu_post_load(void *opaque, int version_id) { ARMCPU *cpu = opaque; int i, v; /* Update the values list from the incoming migration data. * Anything in the incoming data which we don't know about is * a migration failure; anything we know about but the incoming * data doesn't specify retains its current (reset) value. * The indexes list remains untouched -- we only inspect the * incoming migration index list so we can match the values array * entries with the right slots in our own values array. */ for (i = 0, v = 0; i < cpu->cpreg_array_len && v < cpu->cpreg_vmstate_array_len; i++) { if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) { /* register in our list but not incoming : skip it */ continue; } if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) { /* register in their list but not ours: fail migration */ return -1; } /* matching register, copy the value over */ cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v]; v++; } if (kvm_enabled()) { if (!write_list_to_kvmstate(cpu)) { return -1; } /* Note that it's OK for the TCG side not to know about * every register in the list; KVM is authoritative if * we're using it. */ write_list_to_cpustate(cpu); } else { if (!write_list_to_cpustate(cpu)) { return -1; } } hw_breakpoint_update_all(cpu); hw_watchpoint_update_all(cpu); return 0; }
17,068
qemu
e58481234ef9c132554cc529d9981ebd78fb6903
0
static void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev, struct vhost_virtqueue *vq, unsigned idx) { int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); struct vhost_vring_state state = { .index = vhost_vq_index, }; int r; r = dev->vhost_ops->vhost_get_vring_base(dev, &state); if (r < 0) { fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r); fflush(stderr); } virtio_queue_set_last_avail_idx(vdev, idx, state.num); virtio_queue_invalidate_signalled_used(vdev, idx); /* In the cross-endian case, we need to reset the vring endianness to * native as legacy devices expect so by default. */ if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) && vhost_needs_vring_endian(vdev)) { r = vhost_virtqueue_set_vring_endian_legacy(dev, !virtio_is_big_endian(vdev), vhost_vq_index); if (r < 0) { error_report("failed to reset vring endianness"); } } assert (r >= 0); cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 0, virtio_queue_get_ring_size(vdev, idx)); cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 1, virtio_queue_get_used_size(vdev, idx)); cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 0, virtio_queue_get_avail_size(vdev, idx)); cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 0, virtio_queue_get_desc_size(vdev, idx)); }
17,069
qemu
273a2142176098fe2c27f263d86ad66b133b43cb
0
static void pci_map(PCIDevice * pci_dev, int region_num, uint32_t addr, uint32_t size, int type) { PCIEEPRO100State *d = DO_UPCAST(PCIEEPRO100State, dev, pci_dev); EEPRO100State *s = &d->eepro100; logout("region %d, addr=0x%08x, size=0x%08x, type=%d\n", region_num, addr, size, type); assert(region_num == 1); register_ioport_write(addr, size, 1, ioport_write1, s); register_ioport_read(addr, size, 1, ioport_read1, s); register_ioport_write(addr, size, 2, ioport_write2, s); register_ioport_read(addr, size, 2, ioport_read2, s); register_ioport_write(addr, size, 4, ioport_write4, s); register_ioport_read(addr, size, 4, ioport_read4, s); s->region[region_num] = addr; }
17,070
qemu
3098dba01c7daab60762b6f6624ea88c0d6cb65a
0
static void host_signal_handler(int host_signum, siginfo_t *info, void *puc) { int sig; target_siginfo_t tinfo; /* the CPU emulator uses some host signals to detect exceptions, we forward to it some signals */ if ((host_signum == SIGSEGV || host_signum == SIGBUS) && info->si_code > 0) { if (cpu_signal_handler(host_signum, info, puc)) return; } /* get target signal number */ sig = host_to_target_signal(host_signum); if (sig < 1 || sig > TARGET_NSIG) return; #if defined(DEBUG_SIGNAL) fprintf(stderr, "qemu: got signal %d\n", sig); #endif host_to_target_siginfo_noswap(&tinfo, info); if (queue_signal(thread_env, sig, &tinfo) == 1) { /* interrupt the virtual CPU as soon as possible */ cpu_interrupt(thread_env, CPU_INTERRUPT_EXIT); } }
17,071
qemu
a5cf8262e4eb9c4646434e2c6211ef8608db3233
0
static char *scsibus_get_fw_dev_path(DeviceState *dev) { SCSIDevice *d = SCSI_DEVICE(dev); char path[100]; snprintf(path, sizeof(path), "channel@%x/%s@%x,%x", d->channel, qdev_fw_name(dev), d->id, d->lun); return strdup(path); }
17,073
qemu
c39ce112b60ffafbaf700853e32bea74cbb2c148
0
static void lsi_do_command(LSIState *s) { SCSIDevice *dev; uint8_t buf[16]; uint32_t id; int n; DPRINTF("Send command len=%d\n", s->dbc); if (s->dbc > 16) s->dbc = 16; cpu_physical_memory_read(s->dnad, buf, s->dbc); s->sfbr = buf[0]; s->command_complete = 0; id = (s->select_tag >> 8) & 0xf; dev = s->bus.devs[id]; if (!dev) { lsi_bad_selection(s, id); return; } assert(s->current == NULL); s->current = qemu_mallocz(sizeof(lsi_request)); s->current->tag = s->select_tag; s->current->req = scsi_req_new(dev, s->current->tag, s->current_lun, s->current); n = scsi_req_enqueue(s->current->req, buf); if (n) { if (n > 0) { lsi_set_phase(s, PHASE_DI); } else if (n < 0) { lsi_set_phase(s, PHASE_DO); } scsi_req_continue(s->current->req); } if (!s->command_complete) { if (n) { /* Command did not complete immediately so disconnect. */ lsi_add_msg_byte(s, 2); /* SAVE DATA POINTER */ lsi_add_msg_byte(s, 4); /* DISCONNECT */ /* wait data */ lsi_set_phase(s, PHASE_MI); s->msg_action = 1; lsi_queue_command(s); } else { /* wait command complete */ lsi_set_phase(s, PHASE_DI); } } }
17,074
qemu
7e01376daea75e888c370aab521a7d4aeaf2ffd1
0
void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb) { ChscReq *req; ChscResp *res; uint64_t addr; int reg; uint16_t len; uint16_t command; CPUS390XState *env = &cpu->env; uint8_t buf[TARGET_PAGE_SIZE]; trace_ioinst("chsc"); reg = (ipb >> 20) & 0x00f; addr = env->regs[reg]; /* Page boundary? */ if (addr & 0xfff) { program_interrupt(env, PGM_SPECIFICATION, 2); return; } /* * Reading sizeof(ChscReq) bytes is currently enough for all of our * present CHSC sub-handlers ... if we ever need more, we should take * care of req->len here first. */ if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) { return; } req = (ChscReq *)buf; len = be16_to_cpu(req->len); /* Length field valid? */ if ((len < 16) || (len > 4088) || (len & 7)) { program_interrupt(env, PGM_OPERAND, 2); return; } memset((char *)req + len, 0, TARGET_PAGE_SIZE - len); res = (void *)((char *)req + len); command = be16_to_cpu(req->command); trace_ioinst_chsc_cmd(command, len); switch (command) { case CHSC_SCSC: ioinst_handle_chsc_scsc(req, res); break; case CHSC_SCPD: ioinst_handle_chsc_scpd(req, res); break; case CHSC_SDA: ioinst_handle_chsc_sda(req, res); break; case CHSC_SEI: ioinst_handle_chsc_sei(req, res); break; default: ioinst_handle_chsc_unimplemented(res); break; } if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res, be16_to_cpu(res->len))) { setcc(cpu, 0); /* Command execution complete */ } }
17,075
FFmpeg
ebbcdc9ac0ea190748a1605bda86ce84466c8b4e
0
static int ff_estimate_motion_b(MpegEncContext * s, int mb_x, int mb_y, int16_t (*mv_table)[2], Picture *picture, int f_code) { int mx, my, range, dmin; int xmin, ymin, xmax, ymax; int rel_xmin, rel_ymin, rel_xmax, rel_ymax; int pred_x=0, pred_y=0; int P[10][2]; const int shift= 1+s->quarter_sample; const int mot_stride = s->mb_width + 2; const int mot_xy = (mb_y + 1)*mot_stride + mb_x + 1; uint8_t * const ref_picture= picture->data[0]; uint16_t * const mv_penalty= s->me.mv_penalty[f_code] + MAX_MV; int mv_scale; s->me.penalty_factor = get_penalty_factor(s, s->avctx->me_cmp); s->me.sub_penalty_factor= get_penalty_factor(s, s->avctx->me_sub_cmp); s->me.mb_penalty_factor = get_penalty_factor(s, s->avctx->mb_cmp); get_limits(s, &range, &xmin, &ymin, &xmax, &ymax, f_code); rel_xmin= xmin - mb_x*16; rel_xmax= xmax - mb_x*16; rel_ymin= ymin - mb_y*16; rel_ymax= ymax - mb_y*16; switch(s->me_method) { case ME_ZERO: default: no_motion_search(s, &mx, &my); dmin = 0; mx-= mb_x*16; my-= mb_y*16; break; case ME_FULL: dmin = full_motion_search(s, &mx, &my, range, xmin, ymin, xmax, ymax, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; case ME_LOG: dmin = log_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; case ME_PHODS: dmin = phods_motion_search(s, &mx, &my, range / 2, xmin, ymin, xmax, ymax, ref_picture); mx-= mb_x*16; my-= mb_y*16; break; case ME_X1: case ME_EPZS: { P_LEFT[0] = mv_table[mot_xy - 1][0]; P_LEFT[1] = mv_table[mot_xy - 1][1]; if(P_LEFT[0] > (rel_xmax<<shift)) P_LEFT[0] = (rel_xmax<<shift); /* special case for first line */ if (mb_y) { P_TOP[0] = mv_table[mot_xy - mot_stride ][0]; P_TOP[1] = mv_table[mot_xy - mot_stride ][1]; P_TOPRIGHT[0] = mv_table[mot_xy - mot_stride + 1 ][0]; P_TOPRIGHT[1] = mv_table[mot_xy - mot_stride + 1 ][1]; if(P_TOP[1] > (rel_ymax<<shift)) P_TOP[1]= (rel_ymax<<shift); if(P_TOPRIGHT[0] < (rel_xmin<<shift)) P_TOPRIGHT[0]= (rel_xmin<<shift); if(P_TOPRIGHT[1] > (rel_ymax<<shift)) P_TOPRIGHT[1]= (rel_ymax<<shift); P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]); P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]); } pred_x= P_LEFT[0]; pred_y= P_LEFT[1]; } if(mv_table == s->b_forw_mv_table){ mv_scale= (s->pb_time<<16) / (s->pp_time<<shift); }else{ mv_scale= ((s->pb_time - s->pp_time)<<16) / (s->pp_time<<shift); } dmin = s->me.motion_search[0](s, 0, &mx, &my, P, pred_x, pred_y, rel_xmin, rel_ymin, rel_xmax, rel_ymax, picture, s->p_mv_table, mv_scale, mv_penalty); break; } dmin= s->me.sub_motion_search(s, &mx, &my, dmin, rel_xmin, rel_ymin, rel_xmax, rel_ymax, pred_x, pred_y, picture, 0, 0, mv_penalty); if(s->avctx->me_sub_cmp != s->avctx->mb_cmp && !s->me.skip) dmin= s->me.get_mb_score(s, mx, my, pred_x, pred_y, picture, mv_penalty); //printf("%d %d %d %d//", s->mb_x, s->mb_y, mx, my); // s->mb_type[mb_y*s->mb_width + mb_x]= mb_type; mv_table[mot_xy][0]= mx; mv_table[mot_xy][1]= my; return dmin; }
17,076
qemu
3f9286b7214fbc7135d4fc223f07b0b30b91e2f1
0
int inet_connect_opts(QemuOpts *opts, Error **errp, NonBlockingConnectHandler *callback, void *opaque) { struct addrinfo *res, *e; int sock = -1; bool in_progress; ConnectState *connect_state = NULL; res = inet_parse_connect_opts(opts, errp); if (!res) { return -1; } if (callback != NULL) { connect_state = g_malloc0(sizeof(*connect_state)); connect_state->addr_list = res; connect_state->callback = callback; connect_state->opaque = opaque; } for (e = res; e != NULL; e = e->ai_next) { if (error_is_set(errp)) { error_free(*errp); *errp = NULL; } if (connect_state != NULL) { connect_state->current_addr = e; } sock = inet_connect_addr(e, &in_progress, connect_state, errp); if (in_progress) { return sock; } else if (sock >= 0) { /* non blocking socket immediate success, call callback */ if (callback != NULL) { callback(sock, opaque); } break; } } g_free(connect_state); freeaddrinfo(res); return sock; }
17,077
qemu
ad674e53b5cce265fadafbde2c6a4f190345cd00
0
static void channel_run(DBDMA_channel *ch) { dbdma_cmd *current = &ch->current; uint16_t cmd, key; uint16_t req_count; uint32_t phy_addr; DBDMA_DPRINTF("channel_run\n"); dump_dbdma_cmd(current); /* clear WAKE flag at command fetch */ ch->regs[DBDMA_STATUS] &= cpu_to_be32(~WAKE); cmd = le16_to_cpu(current->command) & COMMAND_MASK; switch (cmd) { case DBDMA_NOP: nop(ch); return; case DBDMA_STOP: stop(ch); return; } key = le16_to_cpu(current->command) & 0x0700; req_count = le16_to_cpu(current->req_count); phy_addr = le32_to_cpu(current->phy_addr); if (key == KEY_STREAM4) { printf("command %x, invalid key 4\n", cmd); kill_channel(ch); return; } switch (cmd) { case OUTPUT_MORE: start_output(ch, key, phy_addr, req_count, 0); return; case OUTPUT_LAST: start_output(ch, key, phy_addr, req_count, 1); return; case INPUT_MORE: start_input(ch, key, phy_addr, req_count, 0); return; case INPUT_LAST: start_input(ch, key, phy_addr, req_count, 1); return; } if (key < KEY_REGS) { printf("command %x, invalid key %x\n", cmd, key); key = KEY_SYSTEM; } /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits * and BRANCH is invalid */ req_count = req_count & 0x0007; if (req_count & 0x4) { req_count = 4; phy_addr &= ~3; } else if (req_count & 0x2) { req_count = 2; phy_addr &= ~1; } else req_count = 1; switch (cmd) { case LOAD_WORD: load_word(ch, key, phy_addr, req_count); return; case STORE_WORD: store_word(ch, key, phy_addr, req_count); return; } }
17,078
qemu
8b7968f7c4ac8c07cad6a1a0891d38cf239a2839
0
static void error(const char *fmt, ...) { va_list ap; va_start(ap, fmt); fprintf(stderr, "qemu-img: "); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); }
17,079
qemu
3ba235a02284c39b34a68a2a588508ffb52a7b55
0
void aio_set_fd_handler(AioContext *ctx, int fd, IOHandler *io_read, IOHandler *io_write, void *opaque) { /* fd is a SOCKET in our case */ AioHandler *node; QLIST_FOREACH(node, &ctx->aio_handlers, node) { if (node->pfd.fd == fd && !node->deleted) { break; } } /* Are we deleting the fd handler? */ if (!io_read && !io_write) { if (node) { /* If the lock is held, just mark the node as deleted */ if (ctx->walking_handlers) { node->deleted = 1; node->pfd.revents = 0; } else { /* Otherwise, delete it for real. We can't just mark it as * deleted because deleted nodes are only cleaned up after * releasing the walking_handlers lock. */ QLIST_REMOVE(node, node); g_free(node); } } } else { HANDLE event; if (node == NULL) { /* Alloc and insert if it's not already there */ node = g_malloc0(sizeof(AioHandler)); node->pfd.fd = fd; QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); } node->pfd.events = 0; if (node->io_read) { node->pfd.events |= G_IO_IN; } if (node->io_write) { node->pfd.events |= G_IO_OUT; } node->e = &ctx->notifier; /* Update handler with latest information */ node->opaque = opaque; node->io_read = io_read; node->io_write = io_write; event = event_notifier_get_handle(&ctx->notifier); WSAEventSelect(node->pfd.fd, event, FD_READ | FD_ACCEPT | FD_CLOSE | FD_CONNECT | FD_WRITE | FD_OOB); } aio_notify(ctx); }
17,080