label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | int avpriv_dv_produce_packet(DVDemuxContext *c, AVPacket *pkt, uint8_t *buf, int buf_size) { int size, i; uint8_t *ppcm[4] = { 0 }; if (buf_size < DV_PROFILE_BYTES || !(c->sys = avpriv_dv_frame_profile(c->sys, buf, buf_size)) || buf_size < c->sys->frame_size) { return -1; /* Broken frame, or not enough data */ } /* Queueing audio packet */ /* FIXME: in case of no audio/bad audio we have to do something */ size = dv_extract_audio_info(c, buf); for (i = 0; i < c->ach; i++) { c->audio_pkt[i].size = size; c->audio_pkt[i].pts = c->abytes * 30000 * 8 / c->ast[i]->codec->bit_rate; ppcm[i] = c->audio_buf[i]; } if (c->ach) dv_extract_audio(buf, ppcm, c->sys); /* We work with 720p frames split in half, thus even frames have * channels 0,1 and odd 2,3. */ if (c->sys->height == 720) { if (buf[1] & 0x0C) { c->audio_pkt[2].size = c->audio_pkt[3].size = 0; } else { c->audio_pkt[0].size = c->audio_pkt[1].size = 0; c->abytes += size; } } else { c->abytes += size; } /* Now it's time to return video packet */ size = dv_extract_video_info(c, buf); av_init_packet(pkt); pkt->data = buf; pkt->size = size; pkt->flags |= AV_PKT_FLAG_KEY; pkt->stream_index = c->vst->index; pkt->pts = c->frames; c->frames++; return size; } | 3,548 |
1 | static void handle_event(int event) { static bool logged; if (event & ~PVPANIC_PANICKED && !logged) { qemu_log_mask(LOG_GUEST_ERROR, "pvpanic: unknown event %#x.\n", event); logged = true; } if (event & PVPANIC_PANICKED) { panicked_mon_event("pause"); vm_stop(RUN_STATE_GUEST_PANICKED); return; } } | 3,549 |
1 | static void *iothread_run(void *opaque) { IOThread *iothread = opaque; rcu_register_thread(); my_iothread = iothread; qemu_mutex_lock(&iothread->init_done_lock); iothread->thread_id = qemu_get_thread_id(); qemu_cond_signal(&iothread->init_done_cond); qemu_mutex_unlock(&iothread->init_done_lock); while (!atomic_read(&iothread->stopping)) { aio_poll(iothread->ctx, true); if (atomic_read(&iothread->worker_context)) { GMainLoop *loop; g_main_context_push_thread_default(iothread->worker_context); iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE); loop = iothread->main_loop; g_main_loop_run(iothread->main_loop); iothread->main_loop = NULL; g_main_loop_unref(loop); g_main_context_pop_thread_default(iothread->worker_context); } } rcu_unregister_thread(); return NULL; } | 3,550 |
1 | static void omap_rtc_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { struct omap_rtc_s *s = (struct omap_rtc_s *) opaque; int offset = addr & OMAP_MPUI_REG_MASK; struct tm new_tm; time_t ti[2]; if (size != 1) { return omap_badwidth_write8(opaque, addr, value); } switch (offset) { case 0x00: /* SECONDS_REG */ #ifdef ALMDEBUG printf("RTC SEC_REG <-- %02x\n", value); #endif s->ti -= s->current_tm.tm_sec; s->ti += from_bcd(value); return; case 0x04: /* MINUTES_REG */ #ifdef ALMDEBUG printf("RTC MIN_REG <-- %02x\n", value); #endif s->ti -= s->current_tm.tm_min * 60; s->ti += from_bcd(value) * 60; return; case 0x08: /* HOURS_REG */ #ifdef ALMDEBUG printf("RTC HRS_REG <-- %02x\n", value); #endif s->ti -= s->current_tm.tm_hour * 3600; if (s->pm_am) { s->ti += (from_bcd(value & 0x3f) & 12) * 3600; s->ti += ((value >> 7) & 1) * 43200; } else s->ti += from_bcd(value & 0x3f) * 3600; return; case 0x0c: /* DAYS_REG */ #ifdef ALMDEBUG printf("RTC DAY_REG <-- %02x\n", value); #endif s->ti -= s->current_tm.tm_mday * 86400; s->ti += from_bcd(value) * 86400; return; case 0x10: /* MONTHS_REG */ #ifdef ALMDEBUG printf("RTC MTH_REG <-- %02x\n", value); #endif memcpy(&new_tm, &s->current_tm, sizeof(new_tm)); new_tm.tm_mon = from_bcd(value); ti[0] = mktimegm(&s->current_tm); ti[1] = mktimegm(&new_tm); if (ti[0] != -1 && ti[1] != -1) { s->ti -= ti[0]; s->ti += ti[1]; } else { /* A less accurate version */ s->ti -= s->current_tm.tm_mon * 2592000; s->ti += from_bcd(value) * 2592000; } return; case 0x14: /* YEARS_REG */ #ifdef ALMDEBUG printf("RTC YRS_REG <-- %02x\n", value); #endif memcpy(&new_tm, &s->current_tm, sizeof(new_tm)); new_tm.tm_year += from_bcd(value) - (new_tm.tm_year % 100); ti[0] = mktimegm(&s->current_tm); ti[1] = mktimegm(&new_tm); if (ti[0] != -1 && ti[1] != -1) { s->ti -= ti[0]; s->ti += ti[1]; } else { /* A less accurate version */ s->ti -= (s->current_tm.tm_year % 100) * 31536000; s->ti += from_bcd(value) * 31536000; } return; case 0x18: /* WEEK_REG */ return; /* Ignored */ case 0x20: /* ALARM_SECONDS_REG */ #ifdef ALMDEBUG printf("ALM SEC_REG <-- %02x\n", value); #endif s->alarm_tm.tm_sec = from_bcd(value); omap_rtc_alarm_update(s); return; case 0x24: /* ALARM_MINUTES_REG */ #ifdef ALMDEBUG printf("ALM MIN_REG <-- %02x\n", value); #endif s->alarm_tm.tm_min = from_bcd(value); omap_rtc_alarm_update(s); return; case 0x28: /* ALARM_HOURS_REG */ #ifdef ALMDEBUG printf("ALM HRS_REG <-- %02x\n", value); #endif if (s->pm_am) s->alarm_tm.tm_hour = ((from_bcd(value & 0x3f)) % 12) + ((value >> 7) & 1) * 12; else s->alarm_tm.tm_hour = from_bcd(value); omap_rtc_alarm_update(s); return; case 0x2c: /* ALARM_DAYS_REG */ #ifdef ALMDEBUG printf("ALM DAY_REG <-- %02x\n", value); #endif s->alarm_tm.tm_mday = from_bcd(value); omap_rtc_alarm_update(s); return; case 0x30: /* ALARM_MONTHS_REG */ #ifdef ALMDEBUG printf("ALM MON_REG <-- %02x\n", value); #endif s->alarm_tm.tm_mon = from_bcd(value); omap_rtc_alarm_update(s); return; case 0x34: /* ALARM_YEARS_REG */ #ifdef ALMDEBUG printf("ALM YRS_REG <-- %02x\n", value); #endif s->alarm_tm.tm_year = from_bcd(value); omap_rtc_alarm_update(s); return; case 0x40: /* RTC_CTRL_REG */ #ifdef ALMDEBUG printf("RTC CONTROL <-- %02x\n", value); #endif s->pm_am = (value >> 3) & 1; s->auto_comp = (value >> 2) & 1; s->round = (value >> 1) & 1; s->running = value & 1; s->status &= 0xfd; s->status |= s->running << 1; return; case 0x44: /* RTC_STATUS_REG */ #ifdef ALMDEBUG printf("RTC STATUSL <-- %02x\n", value); #endif s->status &= ~((value & 0xc0) ^ 0x80); omap_rtc_interrupts_update(s); return; case 0x48: /* RTC_INTERRUPTS_REG */ #ifdef ALMDEBUG printf("RTC INTRS <-- %02x\n", value); #endif s->interrupts = value; return; case 0x4c: /* RTC_COMP_LSB_REG */ #ifdef ALMDEBUG printf("RTC COMPLSB <-- %02x\n", value); #endif s->comp_reg &= 0xff00; s->comp_reg |= 0x00ff & value; return; case 0x50: /* RTC_COMP_MSB_REG */ #ifdef ALMDEBUG printf("RTC COMPMSB <-- %02x\n", value); #endif s->comp_reg &= 0x00ff; s->comp_reg |= 0xff00 & (value << 8); return; default: OMAP_BAD_REG(addr); return; } } | 3,552 |
1 | static void neon_store_reg(int reg, int pass, TCGv var) { tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass)); dead_tmp(var); } | 3,553 |
1 | static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) { int b, prefixes, aflag, dflag; int shift, ot; int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val; target_ulong next_eip, tval; int rex_w, rex_r; if (unlikely(loglevel & CPU_LOG_TB_OP)) tcg_gen_debug_insn_start(pc_start); s->pc = pc_start; prefixes = 0; aflag = s->code32; dflag = s->code32; s->override = -1; rex_w = -1; rex_r = 0; #ifdef TARGET_X86_64 s->rex_x = 0; s->rex_b = 0; x86_64_hregs = 0; #endif s->rip_offset = 0; /* for relative ip address */ next_byte: b = ldub_code(s->pc); s->pc++; /* check prefixes */ #ifdef TARGET_X86_64 if (CODE64(s)) { switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; goto next_byte; case 0x2e: s->override = R_CS; goto next_byte; case 0x36: s->override = R_SS; goto next_byte; case 0x3e: s->override = R_DS; goto next_byte; case 0x26: s->override = R_ES; goto next_byte; case 0x64: s->override = R_FS; goto next_byte; case 0x65: s->override = R_GS; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; goto next_byte; case 0x40 ... 0x4f: /* REX prefix */ rex_w = (b >> 3) & 1; rex_r = (b & 0x4) << 1; s->rex_x = (b & 0x2) << 2; REX_B(s) = (b & 0x1) << 3; x86_64_hregs = 1; /* select uniform byte register addressing */ goto next_byte; } if (rex_w == 1) { /* 0x66 is ignored if rex.w is set */ dflag = 2; } else { if (prefixes & PREFIX_DATA) dflag ^= 1; } if (!(prefixes & PREFIX_ADR)) aflag = 2; } else #endif { switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; goto next_byte; case 0x2e: s->override = R_CS; goto next_byte; case 0x36: s->override = R_SS; goto next_byte; case 0x3e: s->override = R_DS; goto next_byte; case 0x26: s->override = R_ES; goto next_byte; case 0x64: s->override = R_FS; goto next_byte; case 0x65: s->override = R_GS; goto next_byte; case 0x66: prefixes |= PREFIX_DATA; goto next_byte; case 0x67: prefixes |= PREFIX_ADR; goto next_byte; } if (prefixes & PREFIX_DATA) dflag ^= 1; if (prefixes & PREFIX_ADR) aflag ^= 1; } s->prefix = prefixes; s->aflag = aflag; s->dflag = dflag; /* lock generation */ if (prefixes & PREFIX_LOCK) gen_helper_lock(); /* now check op code */ reswitch: switch(b) { case 0x0f: /**************************/ /* extended op code */ b = ldub_code(s->pc++) | 0x100; goto reswitch; /**************************/ /* arith & logic */ case 0x00 ... 0x05: case 0x08 ... 0x0d: case 0x10 ... 0x15: case 0x18 ... 0x1d: case 0x20 ... 0x25: case 0x28 ... 0x2d: case 0x30 ... 0x35: case 0x38 ... 0x3d: { int op, f, val; op = (b >> 3) & 7; f = (b >> 1) & 3; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; switch(f) { case 0: /* OP Ev, Gv */ modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); opreg = OR_TMP0; } else if (op == OP_XORL && rm == reg) { xor_zero: /* xor reg, reg optimisation */ gen_op_movl_T0_0(); s->cc_op = CC_OP_LOGICB + ot; gen_op_mov_reg_T0(ot, reg); gen_op_update1_cc(); break; } else { opreg = rm; } gen_op_mov_TN_reg(ot, 1, reg); gen_op(s, op, ot, opreg); break; case 1: /* OP Gv, Ev */ modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; reg = ((modrm >> 3) & 7) | rex_r; rm = (modrm & 7) | REX_B(s); if (mod != 3) { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_ld_T1_A0(ot + s->mem_index); } else if (op == OP_XORL && rm == reg) { goto xor_zero; } else { gen_op_mov_TN_reg(ot, 1, rm); } gen_op(s, op, ot, reg); break; case 2: /* OP A, Iv */ val = insn_get(s, ot); gen_op_movl_T1_im(val); gen_op(s, op, ot, OR_EAX); break; } } break; case 0x82: if (CODE64(s)) goto illegal_op; case 0x80: /* GRP1 */ case 0x81: case 0x83: { int val; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (b == 0x83) s->rip_offset = 1; else s->rip_offset = insn_const_size(ot); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); opreg = OR_TMP0; } else { opreg = rm; } switch(b) { default: case 0x80: case 0x81: case 0x82: val = insn_get(s, ot); break; case 0x83: val = (int8_t)insn_get(s, OT_BYTE); break; } gen_op_movl_T1_im(val); gen_op(s, op, ot, opreg); } break; /**************************/ /* inc, dec, and other misc arith */ case 0x40 ... 0x47: /* inc Gv */ ot = dflag ? OT_LONG : OT_WORD; gen_inc(s, ot, OR_EAX + (b & 7), 1); break; case 0x48 ... 0x4f: /* dec Gv */ ot = dflag ? OT_LONG : OT_WORD; gen_inc(s, ot, OR_EAX + (b & 7), -1); break; case 0xf6: /* GRP3 */ case 0xf7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (mod != 3) { if (op == 0) s->rip_offset = insn_const_size(ot); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); } switch(op) { case 0: /* test */ val = insn_get(s, ot); gen_op_movl_T1_im(val); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 2: /* not */ tcg_gen_not_tl(cpu_T[0], cpu_T[0]); if (mod != 3) { gen_op_st_T0_A0(ot + s->mem_index); } else { gen_op_mov_reg_T0(ot, rm); } break; case 3: /* neg */ tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); if (mod != 3) { gen_op_st_T0_A0(ot + s->mem_index); } else { gen_op_mov_reg_T0(ot, rm); } gen_op_update_neg_cc(); s->cc_op = CC_OP_SUBB + ot; break; case 4: /* mul */ switch(ot) { case OT_BYTE: gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX); tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_WORD, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00); s->cc_op = CC_OP_MULB; break; case OT_WORD: gen_op_mov_TN_reg(OT_WORD, 1, R_EAX); tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_WORD, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16); gen_op_mov_reg_T0(OT_WORD, R_EDX); tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); s->cc_op = CC_OP_MULW; break; default: case OT_LONG: #ifdef TARGET_X86_64 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]); tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_LONG, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32); gen_op_mov_reg_T0(OT_LONG, R_EDX); tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); #else { TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); tcg_gen_extu_i32_i64(t0, cpu_T[0]); tcg_gen_extu_i32_i64(t1, cpu_T[1]); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_trunc_i64_i32(cpu_T[0], t0); gen_op_mov_reg_T0(OT_LONG, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_trunc_i64_i32(cpu_T[0], t0); gen_op_mov_reg_T0(OT_LONG, R_EDX); tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]); } #endif s->cc_op = CC_OP_MULL; break; #ifdef TARGET_X86_64 case OT_QUAD: gen_helper_mulq_EAX_T0(cpu_T[0]); s->cc_op = CC_OP_MULQ; break; #endif } break; case 5: /* imul */ switch(ot) { case OT_BYTE: gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX); tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_WORD, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); s->cc_op = CC_OP_MULB; break; case OT_WORD: gen_op_mov_TN_reg(OT_WORD, 1, R_EAX); tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_WORD, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16); gen_op_mov_reg_T0(OT_WORD, R_EDX); s->cc_op = CC_OP_MULW; break; default: case OT_LONG: #ifdef TARGET_X86_64 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]); tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); gen_op_mov_reg_T0(OT_LONG, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32); gen_op_mov_reg_T0(OT_LONG, R_EDX); #else { TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_op_mov_TN_reg(OT_LONG, 1, R_EAX); tcg_gen_ext_i32_i64(t0, cpu_T[0]); tcg_gen_ext_i32_i64(t1, cpu_T[1]); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_trunc_i64_i32(cpu_T[0], t0); gen_op_mov_reg_T0(OT_LONG, R_EAX); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_trunc_i64_i32(cpu_T[0], t0); gen_op_mov_reg_T0(OT_LONG, R_EDX); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); } #endif s->cc_op = CC_OP_MULL; break; #ifdef TARGET_X86_64 case OT_QUAD: gen_helper_imulq_EAX_T0(cpu_T[0]); s->cc_op = CC_OP_MULQ; break; #endif } break; case 6: /* div */ switch(ot) { case OT_BYTE: gen_jmp_im(pc_start - s->cs_base); gen_helper_divb_AL(cpu_T[0]); break; case OT_WORD: gen_jmp_im(pc_start - s->cs_base); gen_helper_divw_AX(cpu_T[0]); break; default: case OT_LONG: gen_jmp_im(pc_start - s->cs_base); gen_helper_divl_EAX(cpu_T[0]); break; #ifdef TARGET_X86_64 case OT_QUAD: gen_jmp_im(pc_start - s->cs_base); gen_helper_divq_EAX(cpu_T[0]); break; #endif } break; case 7: /* idiv */ switch(ot) { case OT_BYTE: gen_jmp_im(pc_start - s->cs_base); gen_helper_idivb_AL(cpu_T[0]); break; case OT_WORD: gen_jmp_im(pc_start - s->cs_base); gen_helper_idivw_AX(cpu_T[0]); break; default: case OT_LONG: gen_jmp_im(pc_start - s->cs_base); gen_helper_idivl_EAX(cpu_T[0]); break; #ifdef TARGET_X86_64 case OT_QUAD: gen_jmp_im(pc_start - s->cs_base); gen_helper_idivq_EAX(cpu_T[0]); break; #endif } break; default: goto illegal_op; } break; case 0xfe: /* GRP4 */ case 0xff: /* GRP5 */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); op = (modrm >> 3) & 7; if (op >= 2 && b == 0xfe) { goto illegal_op; } if (CODE64(s)) { if (op == 2 || op == 4) { /* operand size for jumps is 64 bit */ ot = OT_QUAD; } else if (op == 3 || op == 5) { /* for call calls, the operand is 16 or 32 bit, even in long mode */ ot = dflag ? OT_LONG : OT_WORD; } else if (op == 6) { /* default push size is 64 bit */ ot = dflag ? OT_QUAD : OT_WORD; } } if (mod != 3) { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); if (op >= 2 && op != 3 && op != 5) gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); } switch(op) { case 0: /* inc Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, 1); break; case 1: /* dec Ev */ if (mod != 3) opreg = OR_TMP0; else opreg = rm; gen_inc(s, ot, opreg, -1); break; case 2: /* call Ev */ /* XXX: optimize if memory (no 'and' is necessary) */ if (s->dflag == 0) gen_op_andl_T0_ffff(); next_eip = s->pc - s->cs_base; gen_movtl_T1_im(next_eip); gen_push_T1(s); gen_op_jmp_T0(); gen_eob(s); break; case 3: /* lcall Ev */ gen_op_ld_T1_A0(ot + s->mem_index); gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); gen_op_ldu_T0_A0(OT_WORD + s->mem_index); do_lcall: if (s->pe && !s->vm86) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1], tcg_const_i32(dflag), tcg_const_i32(s->pc - pc_start)); } else { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1], tcg_const_i32(dflag), tcg_const_i32(s->pc - s->cs_base)); } gen_eob(s); break; case 4: /* jmp Ev */ if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); gen_eob(s); break; case 5: /* ljmp Ev */ gen_op_ld_T1_A0(ot + s->mem_index); gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); gen_op_ldu_T0_A0(OT_WORD + s->mem_index); do_ljmp: if (s->pe && !s->vm86) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1], tcg_const_i32(s->pc - pc_start)); } else { gen_op_movl_seg_T0_vm(R_CS); gen_op_movl_T0_T1(); gen_op_jmp_T0(); } gen_eob(s); break; case 6: /* push Ev */ gen_push_T0(s); break; default: goto illegal_op; } break; case 0x84: /* test Ev, Gv */ case 0x85: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_op_mov_TN_reg(ot, 1, reg); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 0xa8: /* test eAX, Iv */ case 0xa9: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; val = insn_get(s, ot); gen_op_mov_TN_reg(ot, 0, OR_EAX); gen_op_movl_T1_im(val); gen_op_testl_T0_T1_cc(); s->cc_op = CC_OP_LOGICB + ot; break; case 0x98: /* CWDE/CBW */ #ifdef TARGET_X86_64 if (dflag == 2) { gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_QUAD, R_EAX); } else #endif if (dflag == 1) { gen_op_mov_TN_reg(OT_WORD, 0, R_EAX); tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_LONG, R_EAX); } else { gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX); tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_WORD, R_EAX); } break; case 0x99: /* CDQ/CWD */ #ifdef TARGET_X86_64 if (dflag == 2) { gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX); tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63); gen_op_mov_reg_T0(OT_QUAD, R_EDX); } else #endif if (dflag == 1) { gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31); gen_op_mov_reg_T0(OT_LONG, R_EDX); } else { gen_op_mov_TN_reg(OT_WORD, 0, R_EAX); tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15); gen_op_mov_reg_T0(OT_WORD, R_EDX); } break; case 0x1af: /* imul Gv, Ev */ case 0x69: /* imul Gv, Ev, I */ case 0x6b: ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; if (b == 0x69) s->rip_offset = insn_const_size(ot); else if (b == 0x6b) s->rip_offset = 1; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); if (b == 0x69) { val = insn_get(s, ot); gen_op_movl_T1_im(val); } else if (b == 0x6b) { val = (int8_t)insn_get(s, OT_BYTE); gen_op_movl_T1_im(val); } else { gen_op_mov_TN_reg(ot, 1, reg); } #ifdef TARGET_X86_64 if (ot == OT_QUAD) { gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]); } else #endif if (ot == OT_LONG) { #ifdef TARGET_X86_64 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]); tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); #else { TCGv_i64 t0, t1; t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(t0, cpu_T[0]); tcg_gen_ext_i32_i64(t1, cpu_T[1]); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_trunc_i64_i32(cpu_T[0], t0); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_trunc_i64_i32(cpu_T[1], t0); tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0); } #endif } else { tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]); /* XXX: use 32 bit mul which could be faster */ tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]); tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]); tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]); tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0); } gen_op_mov_reg_T0(ot, reg); s->cc_op = CC_OP_MULB + ot; break; case 0x1c0: case 0x1c1: /* xadd Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_TN_reg(ot, 0, reg); gen_op_mov_TN_reg(ot, 1, rm); gen_op_addl_T0_T1(); gen_op_mov_reg_T1(ot, reg); gen_op_mov_reg_T0(ot, rm); } else { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_mov_TN_reg(ot, 0, reg); gen_op_ld_T1_A0(ot + s->mem_index); gen_op_addl_T0_T1(); gen_op_st_T0_A0(ot + s->mem_index); gen_op_mov_reg_T1(ot, reg); } gen_op_update2_cc(); s->cc_op = CC_OP_ADDB + ot; break; case 0x1b0: case 0x1b1: /* cmpxchg Ev, Gv */ { int label1, label2; TCGv t0, t1, t2, a0; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); t2 = tcg_temp_local_new(); a0 = tcg_temp_local_new(); gen_op_mov_v_reg(ot, t1, reg); if (mod == 3) { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, t0, rm); } else { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); tcg_gen_mov_tl(a0, cpu_A0); gen_op_ld_v(ot + s->mem_index, t0, a0); rm = 0; /* avoid warning */ } label1 = gen_new_label(); tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX])); tcg_gen_sub_tl(t2, t2, t0); gen_extu(ot, t2); tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1); if (mod == 3) { label2 = gen_new_label(); gen_op_mov_reg_v(ot, R_EAX, t0); tcg_gen_br(label2); gen_set_label(label1); gen_op_mov_reg_v(ot, rm, t1); gen_set_label(label2); } else { tcg_gen_mov_tl(t1, t0); gen_op_mov_reg_v(ot, R_EAX, t0); gen_set_label(label1); /* always store */ gen_op_st_v(ot + s->mem_index, t1, a0); } tcg_gen_mov_tl(cpu_cc_src, t0); tcg_gen_mov_tl(cpu_cc_dst, t2); s->cc_op = CC_OP_SUBB + ot; tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); tcg_temp_free(a0); } break; case 0x1c7: /* cmpxchg8b */ modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; if ((mod == 3) || ((modrm & 0x38) != 0x8)) goto illegal_op; #ifdef TARGET_X86_64 if (dflag == 2) { if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) goto illegal_op; gen_jmp_im(pc_start - s->cs_base); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_helper_cmpxchg16b(cpu_A0); } else #endif { if (!(s->cpuid_features & CPUID_CX8)) goto illegal_op; gen_jmp_im(pc_start - s->cs_base); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_helper_cmpxchg8b(cpu_A0); } s->cc_op = CC_OP_EFLAGS; break; /**************************/ /* push/pop */ case 0x50 ... 0x57: /* push */ gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s)); gen_push_T0(s); break; case 0x58 ... 0x5f: /* pop */ if (CODE64(s)) { ot = dflag ? OT_QUAD : OT_WORD; } else { ot = dflag + OT_WORD; } gen_pop_T0(s); /* NOTE: order is important for pop %sp */ gen_pop_update(s); gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s)); break; case 0x60: /* pusha */ if (CODE64(s)) goto illegal_op; gen_pusha(s); break; case 0x61: /* popa */ if (CODE64(s)) goto illegal_op; gen_popa(s); break; case 0x68: /* push Iv */ case 0x6a: if (CODE64(s)) { ot = dflag ? OT_QUAD : OT_WORD; } else { ot = dflag + OT_WORD; } if (b == 0x68) val = insn_get(s, ot); else val = (int8_t)insn_get(s, OT_BYTE); gen_op_movl_T0_im(val); gen_push_T0(s); break; case 0x8f: /* pop Ev */ if (CODE64(s)) { ot = dflag ? OT_QUAD : OT_WORD; } else { ot = dflag + OT_WORD; } modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; gen_pop_T0(s); if (mod == 3) { /* NOTE: order is important for pop %sp */ gen_pop_update(s); rm = (modrm & 7) | REX_B(s); gen_op_mov_reg_T0(ot, rm); } else { /* NOTE: order is important too for MMU exceptions */ s->popl_esp_hack = 1 << ot; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); s->popl_esp_hack = 0; gen_pop_update(s); } break; case 0xc8: /* enter */ { int level; val = lduw_code(s->pc); s->pc += 2; level = ldub_code(s->pc++); gen_enter(s, val, level); } break; case 0xc9: /* leave */ /* XXX: exception not precise (ESP is updated before potential exception) */ if (CODE64(s)) { gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP); gen_op_mov_reg_T0(OT_QUAD, R_ESP); } else if (s->ss32) { gen_op_mov_TN_reg(OT_LONG, 0, R_EBP); gen_op_mov_reg_T0(OT_LONG, R_ESP); } else { gen_op_mov_TN_reg(OT_WORD, 0, R_EBP); gen_op_mov_reg_T0(OT_WORD, R_ESP); } gen_pop_T0(s); if (CODE64(s)) { ot = dflag ? OT_QUAD : OT_WORD; } else { ot = dflag + OT_WORD; } gen_op_mov_reg_T0(ot, R_EBP); gen_pop_update(s); break; case 0x06: /* push es */ case 0x0e: /* push cs */ case 0x16: /* push ss */ case 0x1e: /* push ds */ if (CODE64(s)) goto illegal_op; gen_op_movl_T0_seg(b >> 3); gen_push_T0(s); break; case 0x1a0: /* push fs */ case 0x1a8: /* push gs */ gen_op_movl_T0_seg((b >> 3) & 7); gen_push_T0(s); break; case 0x07: /* pop es */ case 0x17: /* pop ss */ case 0x1f: /* pop ds */ if (CODE64(s)) goto illegal_op; reg = b >> 3; gen_pop_T0(s); gen_movl_seg_T0(s, reg, pc_start - s->cs_base); gen_pop_update(s); if (reg == R_SS) { /* if reg == SS, inhibit interrupts/trace. */ /* If several instructions disable interrupts, only the _first_ does it */ if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) gen_helper_set_inhibit_irq(); s->tf = 0; } if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ gen_pop_T0(s); gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base); gen_pop_update(s); if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /**************************/ /* mov */ case 0x88: case 0x89: /* mov Gv, Ev */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(s, modrm, ot, reg, 1); break; case 0xc6: case 0xc7: /* mov Ev, Iv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; if (mod != 3) { s->rip_offset = insn_const_size(ot); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); } val = insn_get(s, ot); gen_op_movl_T0_im(val); if (mod != 3) gen_op_st_T0_A0(ot + s->mem_index); else gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s)); break; case 0x8a: case 0x8b: /* mov Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = OT_WORD + dflag; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_op_mov_reg_T0(ot, reg); break; case 0x8e: /* mov seg, Gv */ modrm = ldub_code(s->pc++); reg = (modrm >> 3) & 7; if (reg >= 6 || reg == R_CS) goto illegal_op; gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); gen_movl_seg_T0(s, reg, pc_start - s->cs_base); if (reg == R_SS) { /* if reg == SS, inhibit interrupts/trace */ /* If several instructions disable interrupts, only the _first_ does it */ if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) gen_helper_set_inhibit_irq(); s->tf = 0; } if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 0x8c: /* mov Gv, seg */ modrm = ldub_code(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (reg >= 6) goto illegal_op; gen_op_movl_T0_seg(reg); if (mod == 3) ot = OT_WORD + dflag; else ot = OT_WORD; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); break; case 0x1b6: /* movzbS Gv, Eb */ case 0x1b7: /* movzwS Gv, Eb */ case 0x1be: /* movsbS Gv, Eb */ case 0x1bf: /* movswS Gv, Eb */ { int d_ot; /* d_ot is the size of destination */ d_ot = dflag + OT_WORD; /* ot is the size of source */ ot = (b & 1) + OT_BYTE; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { gen_op_mov_TN_reg(ot, 0, rm); switch(ot | (b & 8)) { case OT_BYTE: tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]); break; case OT_BYTE | 8: tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]); break; case OT_WORD: tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]); break; default: case OT_WORD | 8: tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]); break; } gen_op_mov_reg_T0(d_ot, reg); } else { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); if (b & 8) { gen_op_lds_T0_A0(ot + s->mem_index); } else { gen_op_ldu_T0_A0(ot + s->mem_index); } gen_op_mov_reg_T0(d_ot, reg); } } break; case 0x8d: /* lea */ ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; /* we must ensure that no segment is added */ s->override = -1; val = s->addseg; s->addseg = 0; gen_lea_modrm(s, modrm, ®_addr, &offset_addr); s->addseg = val; gen_op_mov_reg_A0(ot - OT_WORD, reg); break; case 0xa0: /* mov EAX, Ov */ case 0xa1: case 0xa2: /* mov Ov, EAX */ case 0xa3: { target_ulong offset_addr; if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; #ifdef TARGET_X86_64 if (s->aflag == 2) { offset_addr = ldq_code(s->pc); s->pc += 8; gen_op_movq_A0_im(offset_addr); } else #endif { if (s->aflag) { offset_addr = insn_get(s, OT_LONG); } else { offset_addr = insn_get(s, OT_WORD); } gen_op_movl_A0_im(offset_addr); } gen_add_A0_ds_seg(s); if ((b & 2) == 0) { gen_op_ld_T0_A0(ot + s->mem_index); gen_op_mov_reg_T0(ot, R_EAX); } else { gen_op_mov_TN_reg(ot, 0, R_EAX); gen_op_st_T0_A0(ot + s->mem_index); } } break; case 0xd7: /* xlat */ #ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_movq_A0_reg(R_EBX); gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX); tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]); } else #endif { gen_op_movl_A0_reg(R_EBX); gen_op_mov_TN_reg(OT_LONG, 0, R_EAX); tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]); if (s->aflag == 0) gen_op_andl_A0_ffff(); else tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); } gen_add_A0_ds_seg(s); gen_op_ldu_T0_A0(OT_BYTE + s->mem_index); gen_op_mov_reg_T0(OT_BYTE, R_EAX); break; case 0xb0 ... 0xb7: /* mov R, Ib */ val = insn_get(s, OT_BYTE); gen_op_movl_T0_im(val); gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s)); break; case 0xb8 ... 0xbf: /* mov R, Iv */ #ifdef TARGET_X86_64 if (dflag == 2) { uint64_t tmp; /* 64 bit case */ tmp = ldq_code(s->pc); s->pc += 8; reg = (b & 7) | REX_B(s); gen_movtl_T0_im(tmp); gen_op_mov_reg_T0(OT_QUAD, reg); } else #endif { ot = dflag ? OT_LONG : OT_WORD; val = insn_get(s, ot); reg = (b & 7) | REX_B(s); gen_op_movl_T0_im(val); gen_op_mov_reg_T0(ot, reg); } break; case 0x91 ... 0x97: /* xchg R, EAX */ ot = dflag + OT_WORD; reg = (b & 7) | REX_B(s); rm = R_EAX; goto do_xchg_reg; case 0x86: case 0x87: /* xchg Ev, Gv */ if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) { rm = (modrm & 7) | REX_B(s); do_xchg_reg: gen_op_mov_TN_reg(ot, 0, reg); gen_op_mov_TN_reg(ot, 1, rm); gen_op_mov_reg_T0(ot, rm); gen_op_mov_reg_T1(ot, reg); } else { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_mov_TN_reg(ot, 0, reg); /* for xchg, lock is implicit */ if (!(prefixes & PREFIX_LOCK)) gen_helper_lock(); gen_op_ld_T1_A0(ot + s->mem_index); gen_op_st_T0_A0(ot + s->mem_index); if (!(prefixes & PREFIX_LOCK)) gen_helper_unlock(); gen_op_mov_reg_T1(ot, reg); } break; case 0xc4: /* les Gv */ if (CODE64(s)) goto illegal_op; op = R_ES; goto do_lxx; case 0xc5: /* lds Gv */ if (CODE64(s)) goto illegal_op; op = R_DS; goto do_lxx; case 0x1b2: /* lss Gv */ op = R_SS; goto do_lxx; case 0x1b4: /* lfs Gv */ op = R_FS; goto do_lxx; case 0x1b5: /* lgs Gv */ op = R_GS; do_lxx: ot = dflag ? OT_LONG : OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_ld_T1_A0(ot + s->mem_index); gen_add_A0_im(s, 1 << (ot - OT_WORD + 1)); /* load the segment first to handle exceptions properly */ gen_op_ldu_T0_A0(OT_WORD + s->mem_index); gen_movl_seg_T0(s, op, pc_start - s->cs_base); /* then put the data */ gen_op_mov_reg_T1(ot, reg); if (s->is_jmp) { gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /************************/ /* shifts */ case 0xc0: case 0xc1: /* shift Ev,Ib */ shift = 2; grp2: { if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; if (mod != 3) { if (shift == 2) { s->rip_offset = 1; } gen_lea_modrm(s, modrm, ®_addr, &offset_addr); opreg = OR_TMP0; } else { opreg = (modrm & 7) | REX_B(s); } /* simpler op */ if (shift == 0) { gen_shift(s, op, ot, opreg, OR_ECX); } else { if (shift == 2) { shift = ldub_code(s->pc++); } gen_shifti(s, op, ot, opreg, shift); } } break; case 0xd0: case 0xd1: /* shift Ev,1 */ shift = 1; goto grp2; case 0xd2: case 0xd3: /* shift Ev,cl */ shift = 0; goto grp2; case 0x1a4: /* shld imm */ op = 0; shift = 1; goto do_shiftd; case 0x1a5: /* shld cl */ op = 0; shift = 0; goto do_shiftd; case 0x1ac: /* shrd imm */ op = 1; shift = 1; goto do_shiftd; case 0x1ad: /* shrd cl */ op = 1; shift = 0; do_shiftd: ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (mod != 3) { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); opreg = OR_TMP0; } else { opreg = rm; } gen_op_mov_TN_reg(ot, 1, reg); if (shift) { val = ldub_code(s->pc++); tcg_gen_movi_tl(cpu_T3, val); } else { tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX])); } gen_shiftd_rm_T1_T3(s, ot, opreg, op); break; /************************/ /* floats */ case 0xd8 ... 0xdf: if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { /* if CR0.EM or CR0.TS are set, generate an FPU exception */ /* XXX: what to do if illegal op ? */ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; rm = modrm & 7; op = ((b & 7) << 3) | ((modrm >> 3) & 7); if (mod != 3) { /* memory op */ gen_lea_modrm(s, modrm, ®_addr, &offset_addr); switch(op) { case 0x00 ... 0x07: /* fxxxs */ case 0x10 ... 0x17: /* fixxxl */ case 0x20 ... 0x27: /* fxxxl */ case 0x30 ... 0x37: /* fixxx */ { int op1; op1 = op & 7; switch(op >> 4) { case 0: gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_flds_FT0(cpu_tmp2_i32); break; case 1: gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fildl_FT0(cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); gen_helper_fldl_FT0(cpu_tmp1_i64); break; case 3: default: gen_op_lds_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fildl_FT0(cpu_tmp2_i32); break; } gen_helper_fp_arith_ST0_FT0(op1); if (op1 == 3) { /* fcomp needs pop */ gen_helper_fpop(); } } break; case 0x08: /* flds */ case 0x0a: /* fsts */ case 0x0b: /* fstps */ case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */ case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */ case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */ switch(op & 7) { case 0: switch(op >> 4) { case 0: gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_flds_ST0(cpu_tmp2_i32); break; case 1: gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fildl_ST0(cpu_tmp2_i32); break; case 2: tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); gen_helper_fldl_ST0(cpu_tmp1_i64); break; case 3: default: gen_op_lds_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fildl_ST0(cpu_tmp2_i32); break; } break; case 1: /* XXX: the corresponding CPUID bit must be tested ! */ switch(op >> 4) { case 1: gen_helper_fisttl_ST0(cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 2: gen_helper_fisttll_ST0(cpu_tmp1_i64); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); break; case 3: default: gen_helper_fistt_ST0(cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; } gen_helper_fpop(); break; default: switch(op >> 4) { case 0: gen_helper_fsts_ST0(cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 1: gen_helper_fistl_ST0(cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_LONG + s->mem_index); break; case 2: gen_helper_fstl_ST0(cpu_tmp1_i64); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); break; case 3: default: gen_helper_fist_ST0(cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; } if ((op & 7) == 3) gen_helper_fpop(); break; } break; case 0x0c: /* fldenv mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fldenv( cpu_A0, tcg_const_i32(s->dflag)); break; case 0x0d: /* fldcw mem */ gen_op_ld_T0_A0(OT_WORD + s->mem_index); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_fldcw(cpu_tmp2_i32); break; case 0x0e: /* fnstenv mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag)); break; case 0x0f: /* fnstcw mem */ gen_helper_fnstcw(cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; case 0x1d: /* fldt mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fldt_ST0(cpu_A0); break; case 0x1f: /* fstpt mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fstt_ST0(cpu_A0); gen_helper_fpop(); break; case 0x2c: /* frstor mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag)); break; case 0x2e: /* fnsave mem */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag)); break; case 0x2f: /* fnstsw mem */ gen_helper_fnstsw(cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_st_T0_A0(OT_WORD + s->mem_index); break; case 0x3c: /* fbld */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fbld_ST0(cpu_A0); break; case 0x3e: /* fbstp */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fbst_ST0(cpu_A0); gen_helper_fpop(); break; case 0x3d: /* fildll */ tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); gen_helper_fildll_ST0(cpu_tmp1_i64); break; case 0x3f: /* fistpll */ gen_helper_fistll_ST0(cpu_tmp1_i64); tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); gen_helper_fpop(); break; default: goto illegal_op; } } else { /* register float ops */ opreg = rm; switch(op) { case 0x08: /* fld sti */ gen_helper_fpush(); gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7)); break; case 0x09: /* fxchg sti */ case 0x29: /* fxchg4 sti, undocumented op */ case 0x39: /* fxchg7 sti, undocumented op */ gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg)); break; case 0x0a: /* grp d9/2 */ switch(rm) { case 0: /* fnop */ /* check exceptions (FreeBSD FPU probe) */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fwait(); break; default: goto illegal_op; } break; case 0x0c: /* grp d9/4 */ switch(rm) { case 0: /* fchs */ gen_helper_fchs_ST0(); break; case 1: /* fabs */ gen_helper_fabs_ST0(); break; case 4: /* ftst */ gen_helper_fldz_FT0(); gen_helper_fcom_ST0_FT0(); break; case 5: /* fxam */ gen_helper_fxam_ST0(); break; default: goto illegal_op; } break; case 0x0d: /* grp d9/5 */ { switch(rm) { case 0: gen_helper_fpush(); gen_helper_fld1_ST0(); break; case 1: gen_helper_fpush(); gen_helper_fldl2t_ST0(); break; case 2: gen_helper_fpush(); gen_helper_fldl2e_ST0(); break; case 3: gen_helper_fpush(); gen_helper_fldpi_ST0(); break; case 4: gen_helper_fpush(); gen_helper_fldlg2_ST0(); break; case 5: gen_helper_fpush(); gen_helper_fldln2_ST0(); break; case 6: gen_helper_fpush(); gen_helper_fldz_ST0(); break; default: goto illegal_op; } } break; case 0x0e: /* grp d9/6 */ switch(rm) { case 0: /* f2xm1 */ gen_helper_f2xm1(); break; case 1: /* fyl2x */ gen_helper_fyl2x(); break; case 2: /* fptan */ gen_helper_fptan(); break; case 3: /* fpatan */ gen_helper_fpatan(); break; case 4: /* fxtract */ gen_helper_fxtract(); break; case 5: /* fprem1 */ gen_helper_fprem1(); break; case 6: /* fdecstp */ gen_helper_fdecstp(); break; default: case 7: /* fincstp */ gen_helper_fincstp(); break; } break; case 0x0f: /* grp d9/7 */ switch(rm) { case 0: /* fprem */ gen_helper_fprem(); break; case 1: /* fyl2xp1 */ gen_helper_fyl2xp1(); break; case 2: /* fsqrt */ gen_helper_fsqrt(); break; case 3: /* fsincos */ gen_helper_fsincos(); break; case 5: /* fscale */ gen_helper_fscale(); break; case 4: /* frndint */ gen_helper_frndint(); break; case 6: /* fsin */ gen_helper_fsin(); break; default: case 7: /* fcos */ gen_helper_fcos(); break; } break; case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */ { int op1; op1 = op & 7; if (op >= 0x20) { gen_helper_fp_arith_STN_ST0(op1, opreg); if (op >= 0x30) gen_helper_fpop(); } else { gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fp_arith_ST0_FT0(op1); } } break; case 0x02: /* fcom */ case 0x22: /* fcom2, undocumented op */ gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(); break; case 0x03: /* fcomp */ case 0x23: /* fcomp3, undocumented op */ case 0x32: /* fcomp5, undocumented op */ gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fcom_ST0_FT0(); gen_helper_fpop(); break; case 0x15: /* da/5 */ switch(rm) { case 1: /* fucompp */ gen_helper_fmov_FT0_STN(tcg_const_i32(1)); gen_helper_fucom_ST0_FT0(); gen_helper_fpop(); gen_helper_fpop(); break; default: goto illegal_op; } break; case 0x1c: switch(rm) { case 0: /* feni (287 only, just do nop here) */ break; case 1: /* fdisi (287 only, just do nop here) */ break; case 2: /* fclex */ gen_helper_fclex(); break; case 3: /* fninit */ gen_helper_fninit(); break; case 4: /* fsetpm (287 only, just do nop here) */ break; default: goto illegal_op; } break; case 0x1d: /* fucomi */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(); s->cc_op = CC_OP_EFLAGS; break; case 0x1e: /* fcomi */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(); s->cc_op = CC_OP_EFLAGS; break; case 0x28: /* ffree sti */ gen_helper_ffree_STN(tcg_const_i32(opreg)); break; case 0x2a: /* fst sti */ gen_helper_fmov_STN_ST0(tcg_const_i32(opreg)); break; case 0x2b: /* fstp sti */ case 0x0b: /* fstp1 sti, undocumented op */ case 0x3a: /* fstp8 sti, undocumented op */ case 0x3b: /* fstp9 sti, undocumented op */ gen_helper_fmov_STN_ST0(tcg_const_i32(opreg)); gen_helper_fpop(); break; case 0x2c: /* fucom st(i) */ gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(); break; case 0x2d: /* fucomp st(i) */ gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fucom_ST0_FT0(); gen_helper_fpop(); break; case 0x33: /* de/3 */ switch(rm) { case 1: /* fcompp */ gen_helper_fmov_FT0_STN(tcg_const_i32(1)); gen_helper_fcom_ST0_FT0(); gen_helper_fpop(); gen_helper_fpop(); break; default: goto illegal_op; } break; case 0x38: /* ffreep sti, undocumented op */ gen_helper_ffree_STN(tcg_const_i32(opreg)); gen_helper_fpop(); break; case 0x3c: /* df/4 */ switch(rm) { case 0: gen_helper_fnstsw(cpu_tmp2_i32); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_mov_reg_T0(OT_WORD, R_EAX); break; default: goto illegal_op; } break; case 0x3d: /* fucomip */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fucomi_ST0_FT0(); gen_helper_fpop(); s->cc_op = CC_OP_EFLAGS; break; case 0x3e: /* fcomip */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_fmov_FT0_STN(tcg_const_i32(opreg)); gen_helper_fcomi_ST0_FT0(); gen_helper_fpop(); s->cc_op = CC_OP_EFLAGS; break; case 0x10 ... 0x13: /* fcmovxx */ case 0x18 ... 0x1b: { int op1, l1; static const uint8_t fcmov_cc[8] = { (JCC_B << 1), (JCC_Z << 1), (JCC_BE << 1), (JCC_P << 1), }; op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1); l1 = gen_new_label(); gen_jcc1(s, s->cc_op, op1, l1); gen_helper_fmov_ST0_STN(tcg_const_i32(opreg)); gen_set_label(l1); } break; default: goto illegal_op; } } break; /************************/ /* string ops */ case 0xa4: /* movsS */ case 0xa5: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_movs(s, ot); } break; case 0xaa: /* stosS */ case 0xab: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_stos(s, ot); } break; case 0xac: /* lodsS */ case 0xad: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_lods(s, ot); } break; case 0xae: /* scasS */ case 0xaf: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & PREFIX_REPNZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_scas(s, ot); s->cc_op = CC_OP_SUBB + ot; } break; case 0xa6: /* cmpsS */ case 0xa7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag + OT_WORD; if (prefixes & PREFIX_REPNZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1); } else if (prefixes & PREFIX_REPZ) { gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0); } else { gen_cmps(s, ot); s->cc_op = CC_OP_SUBB + ot; } break; case 0x6c: /* insS */ case 0x6d: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); gen_op_andl_T0_ffff(); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_ins(s, ot); if (use_icount) { gen_jmp(s, s->pc - s->cs_base); } } break; case 0x6e: /* outsS */ case 0x6f: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); gen_op_andl_T0_ffff(); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes) | 4); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base); } else { gen_outs(s, ot); if (use_icount) { gen_jmp(s, s->pc - s->cs_base); } } break; /************************/ /* port I/O */ case 0xe4: case 0xe5: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; val = ldub_code(s->pc++); gen_op_movl_T0_im(val); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (use_icount) gen_io_start(); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32); gen_op_mov_reg_T1(ot, R_EAX); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xe6: case 0xe7: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; val = ldub_code(s->pc++); gen_op_movl_T0_im(val); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_TN_reg(ot, 1, R_EAX); if (use_icount) gen_io_start(); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xec: case 0xed: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); gen_op_andl_T0_ffff(); gen_check_io(s, ot, pc_start - s->cs_base, SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes)); if (use_icount) gen_io_start(); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32); gen_op_mov_reg_T1(ot, R_EAX); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0xee: case 0xef: if ((b & 1) == 0) ot = OT_BYTE; else ot = dflag ? OT_LONG : OT_WORD; gen_op_mov_TN_reg(OT_WORD, 0, R_EDX); gen_op_andl_T0_ffff(); gen_check_io(s, ot, pc_start - s->cs_base, svm_is_rep(prefixes)); gen_op_mov_TN_reg(ot, 1, R_EAX); if (use_icount) gen_io_start(); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff); tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]); gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; /************************/ /* control */ case 0xc2: /* ret im */ val = ldsw_code(s->pc); s->pc += 2; gen_pop_T0(s); if (CODE64(s) && s->dflag) s->dflag = 2; gen_stack_update(s, val + (2 << s->dflag)); if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); gen_eob(s); break; case 0xc3: /* ret */ gen_pop_T0(s); gen_pop_update(s); if (s->dflag == 0) gen_op_andl_T0_ffff(); gen_op_jmp_T0(); gen_eob(s); break; case 0xca: /* lret im */ val = ldsw_code(s->pc); s->pc += 2; do_lret: if (s->pe && !s->vm86) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_lret_protected(tcg_const_i32(s->dflag), tcg_const_i32(val)); } else { gen_stack_A0(s); /* pop offset */ gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); if (s->dflag == 0) gen_op_andl_T0_ffff(); /* NOTE: keeping EIP updated is not a problem in case of exception */ gen_op_jmp_T0(); /* pop selector */ gen_op_addl_A0_im(2 << s->dflag); gen_op_ld_T0_A0(1 + s->dflag + s->mem_index); gen_op_movl_seg_T0_vm(R_CS); /* add stack offset */ gen_stack_update(s, val + (4 << s->dflag)); } gen_eob(s); break; case 0xcb: /* lret */ val = 0; goto do_lret; case 0xcf: /* iret */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET); if (!s->pe) { /* real mode */ gen_helper_iret_real(tcg_const_i32(s->dflag)); s->cc_op = CC_OP_EFLAGS; } else if (s->vm86) { if (s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_helper_iret_real(tcg_const_i32(s->dflag)); s->cc_op = CC_OP_EFLAGS; } } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_iret_protected(tcg_const_i32(s->dflag), tcg_const_i32(s->pc - s->cs_base)); s->cc_op = CC_OP_EFLAGS; } gen_eob(s); break; case 0xe8: /* call im */ { if (dflag) tval = (int32_t)insn_get(s, OT_LONG); else tval = (int16_t)insn_get(s, OT_WORD); next_eip = s->pc - s->cs_base; tval += next_eip; if (s->dflag == 0) tval &= 0xffff; gen_movtl_T0_im(next_eip); gen_push_T0(s); gen_jmp(s, tval); } break; case 0x9a: /* lcall im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; offset = insn_get(s, ot); selector = insn_get(s, OT_WORD); gen_op_movl_T0_im(selector); gen_op_movl_T1_imu(offset); } goto do_lcall; case 0xe9: /* jmp im */ if (dflag) tval = (int32_t)insn_get(s, OT_LONG); else tval = (int16_t)insn_get(s, OT_WORD); tval += s->pc - s->cs_base; if (s->dflag == 0) tval &= 0xffff; gen_jmp(s, tval); break; case 0xea: /* ljmp im */ { unsigned int selector, offset; if (CODE64(s)) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; offset = insn_get(s, ot); selector = insn_get(s, OT_WORD); gen_op_movl_T0_im(selector); gen_op_movl_T1_imu(offset); } goto do_ljmp; case 0xeb: /* jmp Jb */ tval = (int8_t)insn_get(s, OT_BYTE); tval += s->pc - s->cs_base; if (s->dflag == 0) tval &= 0xffff; gen_jmp(s, tval); break; case 0x70 ... 0x7f: /* jcc Jb */ tval = (int8_t)insn_get(s, OT_BYTE); goto do_jcc; case 0x180 ... 0x18f: /* jcc Jv */ if (dflag) { tval = (int32_t)insn_get(s, OT_LONG); } else { tval = (int16_t)insn_get(s, OT_WORD); } do_jcc: next_eip = s->pc - s->cs_base; tval += next_eip; if (s->dflag == 0) tval &= 0xffff; gen_jcc(s, b, tval, next_eip); break; case 0x190 ... 0x19f: /* setcc Gv */ modrm = ldub_code(s->pc++); gen_setcc(s, b); gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1); break; case 0x140 ... 0x14f: /* cmov Gv, Ev */ { int l1; TCGv t0; ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; t0 = tcg_temp_local_new(); if (mod != 3) { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); } else { rm = (modrm & 7) | REX_B(s); gen_op_mov_v_reg(ot, t0, rm); } #ifdef TARGET_X86_64 if (ot == OT_LONG) { /* XXX: specific Intel behaviour ? */ l1 = gen_new_label(); gen_jcc1(s, s->cc_op, b ^ 1, l1); tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET); gen_set_label(l1); tcg_gen_movi_tl(cpu_tmp0, 0); tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET); } else #endif { l1 = gen_new_label(); gen_jcc1(s, s->cc_op, b ^ 1, l1); gen_op_mov_reg_v(ot, reg, t0); gen_set_label(l1); } tcg_temp_free(t0); } break; /************************/ /* flags */ case 0x9c: /* pushf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_read_eflags(cpu_T[0]); gen_push_T0(s); } break; case 0x9d: /* popf */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_pop_T0(s); if (s->cpl == 0) { if (s->dflag) { gen_helper_write_eflags(cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK))); } else { gen_helper_write_eflags(cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff)); } } else { if (s->cpl <= s->iopl) { if (s->dflag) { gen_helper_write_eflags(cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK))); } else { gen_helper_write_eflags(cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff)); } } else { if (s->dflag) { gen_helper_write_eflags(cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK))); } else { gen_helper_write_eflags(cpu_T[0], tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff)); } } } gen_pop_update(s); s->cc_op = CC_OP_EFLAGS; /* abort translation because TF flag may change */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 0x9e: /* sahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; gen_op_mov_TN_reg(OT_BYTE, 0, R_AH); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O); tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]); s->cc_op = CC_OP_EFLAGS; break; case 0x9f: /* lahf */ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_T[0]); /* Note: gen_compute_eflags() only gives the condition codes */ tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02); gen_op_mov_reg_T0(OT_BYTE, R_AH); break; case 0xf5: /* cmc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C); s->cc_op = CC_OP_EFLAGS; break; case 0xf8: /* clc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C); s->cc_op = CC_OP_EFLAGS; break; case 0xf9: /* stc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C); s->cc_op = CC_OP_EFLAGS; break; case 0xfc: /* cld */ tcg_gen_movi_i32(cpu_tmp2_i32, 1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df)); break; case 0xfd: /* std */ tcg_gen_movi_i32(cpu_tmp2_i32, -1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df)); break; /************************/ /* bit operations */ case 0x1ba: /* bt/bts/btr/btc Gv, im */ ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); op = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod != 3) { s->rip_offset = 1; gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); } /* load shift */ val = ldub_code(s->pc++); gen_op_movl_T1_im(val); if (op < 4) goto illegal_op; op -= 4; goto bt_op; case 0x1a3: /* bt Gv, Ev */ op = 0; goto do_btx; case 0x1ab: /* bts */ op = 1; goto do_btx; case 0x1b3: /* btr */ op = 2; goto do_btx; case 0x1bb: /* btc */ op = 3; do_btx: ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); gen_op_mov_TN_reg(OT_LONG, 1, reg); if (mod != 3) { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); /* specific case: we need to add a displacement */ gen_exts(ot, cpu_T[1]); tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot); tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); gen_op_ld_T0_A0(ot + s->mem_index); } else { gen_op_mov_TN_reg(ot, 0, rm); } bt_op: tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1); switch(op) { case 0: tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]); tcg_gen_movi_tl(cpu_cc_dst, 0); break; case 1: tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0); break; case 2: tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); tcg_gen_not_tl(cpu_tmp0, cpu_tmp0); tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0); break; default: case 3: tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]); tcg_gen_movi_tl(cpu_tmp0, 1); tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]); tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0); break; } s->cc_op = CC_OP_SARB + ot; if (op != 0) { if (mod != 3) gen_op_st_T0_A0(ot + s->mem_index); else gen_op_mov_reg_T0(ot, rm); tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4); tcg_gen_movi_tl(cpu_cc_dst, 0); } break; case 0x1bc: /* bsf */ case 0x1bd: /* bsr */ { int label1; TCGv t0; ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_extu(ot, cpu_T[0]); label1 = gen_new_label(); tcg_gen_movi_tl(cpu_cc_dst, 0); t0 = tcg_temp_local_new(); tcg_gen_mov_tl(t0, cpu_T[0]); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1); if (b & 1) { gen_helper_bsr(cpu_T[0], t0); } else { gen_helper_bsf(cpu_T[0], t0); } gen_op_mov_reg_T0(ot, reg); tcg_gen_movi_tl(cpu_cc_dst, 1); gen_set_label(label1); tcg_gen_discard_tl(cpu_cc_src); s->cc_op = CC_OP_LOGICB + ot; tcg_temp_free(t0); } break; /************************/ /* bcd */ case 0x27: /* daa */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_daa(); s->cc_op = CC_OP_EFLAGS; break; case 0x2f: /* das */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_das(); s->cc_op = CC_OP_EFLAGS; break; case 0x37: /* aaa */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_aaa(); s->cc_op = CC_OP_EFLAGS; break; case 0x3f: /* aas */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_helper_aas(); s->cc_op = CC_OP_EFLAGS; break; case 0xd4: /* aam */ if (CODE64(s)) goto illegal_op; val = ldub_code(s->pc++); if (val == 0) { gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base); } else { gen_helper_aam(tcg_const_i32(val)); s->cc_op = CC_OP_LOGICB; } break; case 0xd5: /* aad */ if (CODE64(s)) goto illegal_op; val = ldub_code(s->pc++); gen_helper_aad(tcg_const_i32(val)); s->cc_op = CC_OP_LOGICB; break; /************************/ /* misc */ case 0x90: /* nop */ /* XXX: xchg + rex handling */ /* XXX: correct lock test for all insn */ if (prefixes & PREFIX_LOCK) goto illegal_op; if (prefixes & PREFIX_REPZ) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE); } break; case 0x9b: /* fwait */ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == (HF_MP_MASK | HF_TS_MASK)) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fwait(); } break; case 0xcc: /* int3 */ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); break; case 0xcd: /* int N */ val = ldub_code(s->pc++); if (s->vm86 && s->iopl != 3) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); } break; case 0xce: /* into */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_into(tcg_const_i32(s->pc - pc_start)); break; #ifdef WANT_ICEBP case 0xf1: /* icebp (undocumented, exits to external debugger) */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP); #if 1 gen_debug(s, pc_start - s->cs_base); #else /* start debug */ tb_flush(cpu_single_env); cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM); #endif break; #endif case 0xfa: /* cli */ if (!s->vm86) { if (s->cpl <= s->iopl) { gen_helper_cli(); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } else { if (s->iopl == 3) { gen_helper_cli(); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } break; case 0xfb: /* sti */ if (!s->vm86) { if (s->cpl <= s->iopl) { gen_sti: gen_helper_sti(); /* interruptions are enabled only the first insn after sti */ /* If several instructions disable interrupts, only the _first_ does it */ if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK)) gen_helper_set_inhibit_irq(); /* give a chance to handle pending irqs */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } else { if (s->iopl == 3) { goto gen_sti; } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } break; case 0x62: /* bound */ if (CODE64(s)) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; modrm = ldub_code(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_op_mov_TN_reg(ot, 0, reg); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); if (ot == OT_WORD) gen_helper_boundw(cpu_A0, cpu_tmp2_i32); else gen_helper_boundl(cpu_A0, cpu_tmp2_i32); break; case 0x1c8 ... 0x1cf: /* bswap reg */ reg = (b & 7) | REX_B(s); #ifdef TARGET_X86_64 if (dflag == 2) { gen_op_mov_TN_reg(OT_QUAD, 0, reg); tcg_gen_bswap_i64(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_QUAD, reg); } else { TCGv_i32 tmp0; gen_op_mov_TN_reg(OT_LONG, 0, reg); tmp0 = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]); tcg_gen_bswap_i32(tmp0, tmp0); tcg_gen_extu_i32_i64(cpu_T[0], tmp0); gen_op_mov_reg_T0(OT_LONG, reg); } #else { gen_op_mov_TN_reg(OT_LONG, 0, reg); tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_LONG, reg); } #endif break; case 0xd6: /* salc */ if (CODE64(s)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags_c(cpu_T[0]); tcg_gen_neg_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(OT_BYTE, R_EAX); break; case 0xe0: /* loopnz */ case 0xe1: /* loopz */ case 0xe2: /* loop */ case 0xe3: /* jecxz */ { int l1, l2, l3; tval = (int8_t)insn_get(s, OT_BYTE); next_eip = s->pc - s->cs_base; tval += next_eip; if (s->dflag == 0) tval &= 0xffff; l1 = gen_new_label(); l2 = gen_new_label(); l3 = gen_new_label(); b &= 3; switch(b) { case 0: /* loopnz */ case 1: /* loopz */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jz_ecx(s->aflag, l3); gen_compute_eflags(cpu_tmp0); tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z); if (b == 0) { tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1); } else { tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1); } break; case 2: /* loop */ gen_op_add_reg_im(s->aflag, R_ECX, -1); gen_op_jnz_ecx(s->aflag, l1); break; default: case 3: /* jcxz */ gen_op_jz_ecx(s->aflag, l1); break; } gen_set_label(l3); gen_jmp_im(next_eip); tcg_gen_br(l2); gen_set_label(l1); gen_jmp_im(tval); gen_set_label(l2); gen_eob(s); } break; case 0x130: /* wrmsr */ case 0x132: /* rdmsr */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { gen_helper_rdmsr(); } else { gen_helper_wrmsr(); } } break; case 0x131: /* rdtsc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); if (use_icount) gen_io_start(); gen_helper_rdtsc(); if (use_icount) { gen_io_end(); gen_jmp(s, s->pc - s->cs_base); } break; case 0x133: /* rdpmc */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_rdpmc(); break; case 0x134: /* sysenter */ /* For Intel SYSENTER is valid on 64-bit */ if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_jmp_im(pc_start - s->cs_base); gen_helper_sysenter(); gen_eob(s); } break; case 0x135: /* sysexit */ /* For Intel SYSEXIT is valid on 64-bit */ if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) goto illegal_op; if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_jmp_im(pc_start - s->cs_base); gen_helper_sysexit(tcg_const_i32(dflag)); gen_eob(s); } break; #ifdef TARGET_X86_64 case 0x105: /* syscall */ /* XXX: is it usable in real mode ? */ if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_jmp_im(pc_start - s->cs_base); gen_helper_syscall(tcg_const_i32(s->pc - pc_start)); gen_eob(s); break; case 0x107: /* sysret */ if (!s->pe) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_jmp_im(pc_start - s->cs_base); gen_helper_sysret(tcg_const_i32(s->dflag)); /* condition codes are modified only in long mode */ if (s->lma) s->cc_op = CC_OP_EFLAGS; gen_eob(s); } break; #endif case 0x1a2: /* cpuid */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_cpuid(); break; case 0xf4: /* hlt */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_hlt(tcg_const_i32(s->pc - pc_start)); s->is_jmp = 3; } break; case 0x100: modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* sldt */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector)); ot = OT_WORD; if (mod == 3) ot += s->dflag; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); break; case 2: /* lldt */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE); gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_lldt(cpu_tmp2_i32); } break; case 1: /* str */ if (!s->pe || s->vm86) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector)); ot = OT_WORD; if (mod == 3) ot += s->dflag; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1); break; case 3: /* ltr */ if (!s->pe || s->vm86) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE); gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); gen_jmp_im(pc_start - s->cs_base); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_helper_ltr(cpu_tmp2_i32); } break; case 4: /* verr */ case 5: /* verw */ if (!s->pe || s->vm86) goto illegal_op; gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); if (op == 4) gen_helper_verr(cpu_T[0]); else gen_helper_verw(cpu_T[0]); s->cc_op = CC_OP_EFLAGS; break; default: goto illegal_op; } break; case 0x101: modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; rm = modrm & 7; switch(op) { case 0: /* sgdt */ if (mod == 3) goto illegal_op; gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit)); gen_op_st_T0_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base)); if (!s->dflag) gen_op_andl_T0_im(0xffffff); gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); break; case 1: if (mod == 3) { switch (rm) { case 0: /* monitor */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); #ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_movq_A0_reg(R_EAX); } else #endif { gen_op_movl_A0_reg(R_EAX); if (s->aflag == 0) gen_op_andl_A0_ffff(); } gen_add_A0_ds_seg(s); gen_helper_monitor(cpu_A0); break; case 1: /* mwait */ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_jmp_im(pc_start - s->cs_base); gen_helper_mwait(tcg_const_i32(s->pc - pc_start)); gen_eob(s); break; default: goto illegal_op; } } else { /* sidt */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit)); gen_op_st_T0_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base)); if (!s->dflag) gen_op_andl_T0_im(0xffffff); gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index); } break; case 2: /* lgdt */ case 3: /* lidt */ if (mod == 3) { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); switch(rm) { case 0: /* VMRUN */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_vmrun(tcg_const_i32(s->aflag), tcg_const_i32(s->pc - pc_start)); tcg_gen_exit_tb(0); s->is_jmp = 3; } break; case 1: /* VMMCALL */ if (!(s->flags & HF_SVME_MASK)) goto illegal_op; gen_helper_vmmcall(); break; case 2: /* VMLOAD */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_vmload(tcg_const_i32(s->aflag)); } break; case 3: /* VMSAVE */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_vmsave(tcg_const_i32(s->aflag)); } break; case 4: /* STGI */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_stgi(); } break; case 5: /* CLGI */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_clgi(); } break; case 6: /* SKINIT */ if ((!(s->flags & HF_SVME_MASK) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) || !s->pe) goto illegal_op; gen_helper_skinit(); break; case 7: /* INVLPGA */ if (!(s->flags & HF_SVME_MASK) || !s->pe) goto illegal_op; if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); break; } else { gen_helper_invlpga(tcg_const_i32(s->aflag)); } break; default: goto illegal_op; } } else if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_ld_T1_A0(OT_WORD + s->mem_index); gen_add_A0_im(s, 2); gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index); if (!s->dflag) gen_op_andl_T0_im(0xffffff); if (op == 2) { tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base)); tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit)); } else { tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base)); tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit)); } } break; case 4: /* smsw */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0])); gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1); break; case 6: /* lmsw */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); gen_helper_lmsw(cpu_T[0]); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; case 7: /* invlpg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { if (mod == 3) { #ifdef TARGET_X86_64 if (CODE64(s) && rm == 0) { /* swapgs */ tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base)); tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase)); tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base)); tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase)); } else #endif { goto illegal_op; } } else { if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_helper_invlpg(cpu_A0); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } } break; default: goto illegal_op; } break; case 0x108: /* invd */ case 0x109: /* wbinvd */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); /* nothing to do */ } break; case 0x63: /* arpl or movslS (x86_64) */ #ifdef TARGET_X86_64 if (CODE64(s)) { int d_ot; /* d_ot is the size of destination */ d_ot = dflag + OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { gen_op_mov_TN_reg(OT_LONG, 0, rm); /* sign extend */ if (d_ot == OT_QUAD) tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]); gen_op_mov_reg_T0(d_ot, reg); } else { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); if (d_ot == OT_QUAD) { gen_op_lds_T0_A0(OT_LONG + s->mem_index); } else { gen_op_ld_T0_A0(OT_LONG + s->mem_index); } gen_op_mov_reg_T0(d_ot, reg); } } else #endif { int label1; TCGv t0, t1, t2; if (!s->pe || s->vm86) goto illegal_op; t0 = tcg_temp_local_new(); t1 = tcg_temp_local_new(); t2 = tcg_temp_local_new(); ot = OT_WORD; modrm = ldub_code(s->pc++); reg = (modrm >> 3) & 7; mod = (modrm >> 6) & 3; rm = modrm & 7; if (mod != 3) { gen_lea_modrm(s, modrm, ®_addr, &offset_addr); gen_op_ld_v(ot + s->mem_index, t0, cpu_A0); } else { gen_op_mov_v_reg(ot, t0, rm); } gen_op_mov_v_reg(ot, t1, reg); tcg_gen_andi_tl(cpu_tmp0, t0, 3); tcg_gen_andi_tl(t1, t1, 3); tcg_gen_movi_tl(t2, 0); label1 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1); tcg_gen_andi_tl(t0, t0, ~3); tcg_gen_or_tl(t0, t0, t1); tcg_gen_movi_tl(t2, CC_Z); gen_set_label(label1); if (mod != 3) { gen_op_st_v(ot + s->mem_index, t0, cpu_A0); } else { gen_op_mov_reg_v(ot, rm, t0); } if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_compute_eflags(cpu_cc_src); tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z); tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2); s->cc_op = CC_OP_EFLAGS; tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(t2); } break; case 0x102: /* lar */ case 0x103: /* lsl */ { int label1; TCGv t0; if (!s->pe || s->vm86) goto illegal_op; ot = dflag ? OT_LONG : OT_WORD; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); t0 = tcg_temp_local_new(); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); if (b == 0x102) gen_helper_lar(t0, cpu_T[0]); else gen_helper_lsl(t0, cpu_T[0]); tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z); label1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1); gen_op_mov_reg_v(ot, reg, t0); gen_set_label(label1); s->cc_op = CC_OP_EFLAGS; tcg_temp_free(t0); } break; case 0x118: modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* prefetchnta */ case 1: /* prefetchnt0 */ case 2: /* prefetchnt0 */ case 3: /* prefetchnt0 */ if (mod == 3) goto illegal_op; gen_lea_modrm(s, modrm, ®_addr, &offset_addr); /* nothing more to do */ break; default: /* nop (multi byte) */ gen_nop_modrm(s, modrm); break; } break; case 0x119 ... 0x11f: /* nop (multi byte) */ modrm = ldub_code(s->pc++); gen_nop_modrm(s, modrm); break; case 0x120: /* mov reg, crN */ case 0x122: /* mov crN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = ldub_code(s->pc++); if ((modrm & 0xc0) != 0xc0) goto illegal_op; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = OT_QUAD; else ot = OT_LONG; switch(reg) { case 0: case 2: case 3: case 4: case 8: if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); if (b & 2) { gen_op_mov_TN_reg(ot, 0, rm); gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg)); gen_op_mov_reg_T0(ot, rm); } break; default: goto illegal_op; } } break; case 0x121: /* mov reg, drN */ case 0x123: /* mov drN, reg */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { modrm = ldub_code(s->pc++); if ((modrm & 0xc0) != 0xc0) goto illegal_op; rm = (modrm & 7) | REX_B(s); reg = ((modrm >> 3) & 7) | rex_r; if (CODE64(s)) ot = OT_QUAD; else ot = OT_LONG; /* XXX: do it dynamically with CR4.DE bit */ if (reg == 4 || reg == 5 || reg >= 8) goto illegal_op; if (b & 2) { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg); gen_op_mov_TN_reg(ot, 0, rm); gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]); gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg); tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg])); gen_op_mov_reg_T0(ot, rm); } } break; case 0x106: /* clts */ if (s->cpl != 0) { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } else { gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0); gen_helper_clts(); /* abort block because static cpu state changed */ gen_jmp_im(s->pc - s->cs_base); gen_eob(s); } break; /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */ case 0x1c3: /* MOVNTI reg, mem */ if (!(s->cpuid_features & CPUID_SSE2)) goto illegal_op; ot = s->dflag == 2 ? OT_QUAD : OT_LONG; modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; reg = ((modrm >> 3) & 7) | rex_r; /* generate a generic store */ gen_ldst_modrm(s, modrm, ot, reg, 1); break; case 0x1ae: modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; op = (modrm >> 3) & 7; switch(op) { case 0: /* fxsave */ if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || (s->flags & HF_EM_MASK)) goto illegal_op; if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(s, modrm, ®_addr, &offset_addr); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2))); break; case 1: /* fxrstor */ if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || (s->flags & HF_EM_MASK)) goto illegal_op; if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } gen_lea_modrm(s, modrm, ®_addr, &offset_addr); if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(pc_start - s->cs_base); gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2))); break; case 2: /* ldmxcsr */ case 3: /* stmxcsr */ if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); break; } if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) || mod == 3) goto illegal_op; gen_lea_modrm(s, modrm, ®_addr, &offset_addr); if (op == 2) { gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); } else { tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr)); gen_op_st_T0_A0(OT_LONG + s->mem_index); } break; case 5: /* lfence */ case 6: /* mfence */ if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE)) goto illegal_op; break; case 7: /* sfence / clflush */ if ((modrm & 0xc7) == 0xc0) { /* sfence */ /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */ if (!(s->cpuid_features & CPUID_SSE)) goto illegal_op; } else { /* clflush */ if (!(s->cpuid_features & CPUID_CLFLUSH)) goto illegal_op; gen_lea_modrm(s, modrm, ®_addr, &offset_addr); } break; default: goto illegal_op; } break; case 0x10d: /* 3DNow! prefetch(w) */ modrm = ldub_code(s->pc++); mod = (modrm >> 6) & 3; if (mod == 3) goto illegal_op; gen_lea_modrm(s, modrm, ®_addr, &offset_addr); /* ignore for now */ break; case 0x1aa: /* rsm */ gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM); if (!(s->flags & HF_SMM_MASK)) goto illegal_op; if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_jmp_im(s->pc - s->cs_base); gen_helper_rsm(); gen_eob(s); break; case 0x1b8: /* SSE4.2 popcnt */ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) != PREFIX_REPZ) goto illegal_op; if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT)) goto illegal_op; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7); if (s->prefix & PREFIX_DATA) ot = OT_WORD; else if (s->dflag != 2) ot = OT_LONG; else ot = OT_QUAD; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot)); gen_op_mov_reg_T0(ot, reg); s->cc_op = CC_OP_EFLAGS; break; case 0x10e ... 0x10f: /* 3DNow! instructions, ignore prefixes */ s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA); case 0x110 ... 0x117: case 0x128 ... 0x12f: case 0x138 ... 0x13a: case 0x150 ... 0x177: case 0x17c ... 0x17f: case 0x1c2: case 0x1c4 ... 0x1c6: case 0x1d0 ... 0x1fe: gen_sse(s, b, pc_start, rex_r); break; default: goto illegal_op; } /* lock generation */ if (s->prefix & PREFIX_LOCK) gen_helper_unlock(); return s->pc; illegal_op: if (s->prefix & PREFIX_LOCK) gen_helper_unlock(); /* XXX: ensure that no lock was generated */ gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); return s->pc; } | 3,554 |
1 | static void align_position(AVIOContext *pb, int64_t offset, uint64_t size) { if (avio_tell(pb) != offset + size) avio_seek(pb, offset + size, SEEK_SET); } | 3,556 |
1 | static int usb_serial_handle_data(USBDevice *dev, USBPacket *p) { USBSerialState *s = (USBSerialState *)dev; int ret = 0; uint8_t devep = p->devep; uint8_t *data = p->data; int len = p->len; int first_len; switch (p->pid) { case USB_TOKEN_OUT: if (devep != 2) goto fail; qemu_chr_write(s->cs, data, len); break; case USB_TOKEN_IN: if (devep != 1) goto fail; first_len = RECV_BUF - s->recv_ptr; if (len <= 2) { ret = USB_RET_NAK; break; } *data++ = usb_get_modem_lines(s) | 1; /* We do not have the uart details */ /* handle serial break */ if (s->event_trigger && s->event_trigger & FTDI_BI) { s->event_trigger &= ~FTDI_BI; *data = FTDI_BI; ret = 2; break; } else { *data++ = 0; } len -= 2; if (len > s->recv_used) len = s->recv_used; if (!len) { ret = USB_RET_NAK; break; } if (first_len > len) first_len = len; memcpy(data, s->recv_buf + s->recv_ptr, first_len); if (len > first_len) memcpy(data + first_len, s->recv_buf, len - first_len); s->recv_used -= len; s->recv_ptr = (s->recv_ptr + len) % RECV_BUF; ret = len + 2; break; default: DPRINTF("Bad token\n"); fail: ret = USB_RET_STALL; break; } return ret; } | 3,557 |
1 | void isa_register_portio_list(ISADevice *dev, uint16_t start, const MemoryRegionPortio *pio_start, void *opaque, const char *name) { PortioList piolist; /* START is how we should treat DEV, regardless of the actual contents of the portio array. This is how the old code actually handled e.g. the FDC device. */ isa_init_ioport(dev, start); /* FIXME: the device should store created PortioList in its state. Note that DEV can be NULL here and that single device can register several portio lists. Current implementation is leaking memory allocated in portio_list_init. The leak is not critical because it happens only at initialization time. */ portio_list_init(&piolist, OBJECT(dev), pio_start, opaque, name); portio_list_add(&piolist, isabus->address_space_io, start); } | 3,558 |
1 | static inline void write_mem(IVState *s, uint64_t off, const void *buf, size_t len) { QTestState *qtest = global_qtest; global_qtest = s->qtest; qpci_memwrite(s->dev, s->mem_base + off, buf, len); global_qtest = qtest; } | 3,559 |
1 | static void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time) { QEMUTimer **pt, *t; qemu_del_timer(ts); /* add the timer in the sorted list */ /* NOTE: this code must be signal safe because qemu_timer_expired() can be called from a signal. */ pt = &active_timers[ts->clock->type]; for(;;) { t = *pt; if (!t) break; if (t->expire_time > expire_time) break; pt = &t->next; } ts->expire_time = expire_time; ts->next = *pt; *pt = ts; /* Rearm if necessary */ if (pt == &active_timers[ts->clock->type]) { if (!alarm_timer->pending) { qemu_rearm_alarm_timer(alarm_timer); } /* Interrupt execution to force deadline recalculation. */ if (use_icount) qemu_notify_event(); } } | 3,562 |
1 | static void co_read_response(void *opaque) { BDRVSheepdogState *s = opaque; if (!s->co_recv) { s->co_recv = qemu_coroutine_create(aio_read_response); } qemu_coroutine_enter(s->co_recv, opaque); } | 3,563 |
1 | static void omap2_inth_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) opaque; int offset = addr; int bank_no, line_no; struct omap_intr_handler_bank_s *bank = NULL; if ((offset & 0xf80) == 0x80) { bank_no = (offset & 0x60) >> 5; if (bank_no < s->nbanks) { offset &= ~0x60; bank = &s->bank[bank_no]; } } switch (offset) { case 0x10: /* INTC_SYSCONFIG */ s->autoidle &= 4; s->autoidle |= (value & 1) << 2; if (value & 2) /* SOFTRESET */ omap_inth_reset(&s->busdev.qdev); case 0x48: /* INTC_CONTROL */ s->mask = (value & 4) ? 0 : ~0; /* GLOBALMASK */ if (value & 2) { /* NEWFIQAGR */ qemu_set_irq(s->parent_intr[1], 0); s->new_agr[1] = ~0; omap_inth_update(s, 1); } if (value & 1) { /* NEWIRQAGR */ qemu_set_irq(s->parent_intr[0], 0); s->new_agr[0] = ~0; omap_inth_update(s, 0); } case 0x4c: /* INTC_PROTECTION */ /* TODO: Make a bitmap (or sizeof(char)map) of access privileges * for every register, see Chapter 3 and 4 for privileged mode. */ if (value & 1) fprintf(stderr, "%s: protection mode enable attempt\n", __FUNCTION__); case 0x50: /* INTC_IDLE */ s->autoidle &= ~3; s->autoidle |= value & 3; /* Per-bank registers */ case 0x84: /* INTC_MIR */ bank->mask = value; omap_inth_update(s, 0); omap_inth_update(s, 1); case 0x88: /* INTC_MIR_CLEAR */ bank->mask &= ~value; omap_inth_update(s, 0); omap_inth_update(s, 1); case 0x8c: /* INTC_MIR_SET */ bank->mask |= value; case 0x90: /* INTC_ISR_SET */ bank->irqs |= bank->swi |= value; omap_inth_update(s, 0); omap_inth_update(s, 1); case 0x94: /* INTC_ISR_CLEAR */ bank->swi &= ~value; bank->irqs = bank->swi & bank->inputs; /* Per-line registers */ case 0x100 ... 0x300: /* INTC_ILR */ bank_no = (offset - 0x100) >> 7; if (bank_no > s->nbanks) break; bank = &s->bank[bank_no]; line_no = (offset & 0x7f) >> 2; bank->priority[line_no] = (value >> 2) & 0x3f; bank->fiq &= ~(1 << line_no); bank->fiq |= (value & 1) << line_no; case 0x00: /* INTC_REVISION */ case 0x14: /* INTC_SYSSTATUS */ case 0x40: /* INTC_SIR_IRQ */ case 0x44: /* INTC_SIR_FIQ */ case 0x80: /* INTC_ITR */ case 0x98: /* INTC_PENDING_IRQ */ case 0x9c: /* INTC_PENDING_FIQ */ OMAP_RO_REG(addr); } } | 3,564 |
1 | static int decode_hextile(VmncContext *c, uint8_t* dst, uint8_t* src, int w, int h, int stride) { int i, j, k; int bg = 0, fg = 0, rects, color, flags, xy, wh; const int bpp = c->bpp2; uint8_t *dst2; int bw = 16, bh = 16; uint8_t *ssrc=src; for(j = 0; j < h; j += 16) { dst2 = dst; bw = 16; if(j + 16 > h) bh = h - j; for(i = 0; i < w; i += 16, dst2 += 16 * bpp) { if(i + 16 > w) bw = w - i; flags = *src++; if(flags & HT_RAW) { paint_raw(dst2, bw, bh, src, bpp, c->bigendian, stride); src += bw * bh * bpp; } else { if(flags & HT_BKG) { bg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp; } if(flags & HT_FG) { fg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp; } rects = 0; if(flags & HT_SUB) rects = *src++; color = (flags & HT_CLR); paint_rect(dst2, 0, 0, bw, bh, bg, bpp, stride); for(k = 0; k < rects; k++) { if(color) { fg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp; } xy = *src++; wh = *src++; paint_rect(dst2, xy >> 4, xy & 0xF, (wh>>4)+1, (wh & 0xF)+1, fg, bpp, stride); } } } dst += stride * 16; } return src - ssrc; } | 3,565 |
0 | static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int bsi, int qp ) { int i; int index_a = qp + h->slice_alpha_c0_offset; int alpha = (alpha_table+52)[index_a]; int beta = (beta_table+52)[qp + h->slice_beta_offset]; for( i = 0; i < 8; i++, pix += stride) { const int bS_index = (i >> 1) * bsi; if( bS[bS_index] == 0 ) { continue; } if( bS[bS_index] < 4 ) { const int tc0 = (tc0_table+52)[index_a][bS[bS_index]]; const int p0 = pix[-1]; const int p1 = pix[-2]; const int p2 = pix[-3]; const int q0 = pix[0]; const int q1 = pix[1]; const int q2 = pix[2]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { int tc = tc0; int i_delta; if( FFABS( p2 - p0 ) < beta ) { if(tc0) pix[-2] = p1 + av_clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } if( FFABS( q2 - q0 ) < beta ) { if(tc0) pix[1] = q1 + av_clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 ); tc++; } i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc ); pix[-1] = av_clip_uint8( p0 + i_delta ); /* p0' */ pix[0] = av_clip_uint8( q0 - i_delta ); /* q0' */ tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d, tc:%d\n# bS:%d -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, tc, bS[bS_index], pix[-3], p1, p0, q0, q1, pix[2], p1, pix[-1], pix[0], q1); } }else{ const int p0 = pix[-1]; const int p1 = pix[-2]; const int p2 = pix[-3]; const int q0 = pix[0]; const int q1 = pix[1]; const int q2 = pix[2]; if( FFABS( p0 - q0 ) < alpha && FFABS( p1 - p0 ) < beta && FFABS( q1 - q0 ) < beta ) { if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){ if( FFABS( p2 - p0 ) < beta) { const int p3 = pix[-4]; /* p0', p1', p2' */ pix[-1] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3; pix[-2] = ( p2 + p1 + p0 + q0 + 2 ) >> 2; pix[-3] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3; } else { /* p0' */ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; } if( FFABS( q2 - q0 ) < beta) { const int q3 = pix[3]; /* q0', q1', q2' */ pix[0] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3; pix[1] = ( p0 + q0 + q1 + q2 + 2 ) >> 2; pix[2] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3; } else { /* q0' */ pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } }else{ /* p0', q0' */ pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; pix[ 0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; } tprintf(h->s.avctx, "filter_mb_mbaff_edgev i:%d, qp:%d, indexA:%d, alpha:%d, beta:%d\n# bS:4 -> [%02x, %02x, %02x, %02x, %02x, %02x] =>[%02x, %02x, %02x, %02x, %02x, %02x]\n", i, qp[qp_index], index_a, alpha, beta, p2, p1, p0, q0, q1, q2, pix[-3], pix[-2], pix[-1], pix[0], pix[1], pix[2]); } } } } | 3,567 |
0 | static void arm_idct_add(UINT8 *dest, int line_size, DCTELEM *block) { j_rev_dct_ARM (block); add_pixels_clamped(block, dest, line_size); } | 3,570 |
0 | static inline void decode_block_intra(MadContext * t, DCTELEM * block) { MpegEncContext *s = &t->s; int level, i, j, run; RLTable *rl = &ff_rl_mpeg1; const uint8_t *scantable = s->intra_scantable.permutated; int16_t *quant_matrix = s->intra_matrix; block[0] = (128 + get_sbits(&s->gb, 8)) * quant_matrix[0]; /* The RL decoder is derived from mpeg1_decode_block_intra; Escaped level and run values a decoded differently */ i = 0; { OPEN_READER(re, &s->gb); /* now quantify & encode AC coefficients */ for (;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); if (level == 127) { break; } else if (level != 0) { i += run; j = scantable[i]; level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } else { /* escape */ UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 10); SKIP_BITS(re, &s->gb, 10); UPDATE_CACHE(re, &s->gb); run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); i += run; j = scantable[i]; if (level < 0) { level = -level; level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; level = -level; } else { level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; } } if (i > 63) { av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return; } block[j] = level; } CLOSE_READER(re, &s->gb); } } | 3,571 |
1 | static void test_ivshmem_memdev(void) { IVState state; /* just for the sake of checking memory-backend property */ setup_vm_cmd(&state, "-object memory-backend-ram,size=1M,id=mb1" " -device ivshmem,x-memdev=mb1", false); qtest_quit(state.qtest); } | 3,572 |
1 | static av_cold void init_static(void) { if (!huff_vlc[0].bits) { INIT_VLC_STATIC(&huff_vlc[0], VLC_BITS, 18, &ff_mlp_huffman_tables[0][0][1], 2, 1, &ff_mlp_huffman_tables[0][0][0], 2, 1, 512); INIT_VLC_STATIC(&huff_vlc[1], VLC_BITS, 16, &ff_mlp_huffman_tables[1][0][1], 2, 1, &ff_mlp_huffman_tables[1][0][0], 2, 1, 512); INIT_VLC_STATIC(&huff_vlc[2], VLC_BITS, 15, &ff_mlp_huffman_tables[2][0][1], 2, 1, &ff_mlp_huffman_tables[2][0][0], 2, 1, 512); ff_mlp_init_crc(); | 3,573 |
1 | void gen_pc_load(CPUState *env, TranslationBlock *tb, unsigned long searched_pc, int pc_pos, void *puc) { env->regs[15] = gen_opc_pc[pc_pos]; } | 3,574 |
1 | void virtio_scsi_dataplane_notify(VirtIODevice *vdev, VirtIOSCSIReq *req) { if (virtio_should_notify(vdev, req->vq)) { event_notifier_set(virtio_queue_get_guest_notifier(req->vq)); } } | 3,575 |
1 | SwsFunc ff_yuv2rgb_init_mmx(SwsContext *c) { int cpu_flags = av_get_cpu_flags(); if (c->srcFormat != PIX_FMT_YUV420P && c->srcFormat != PIX_FMT_YUVA420P) return NULL; if (HAVE_MMX2 && cpu_flags & AV_CPU_FLAG_MMX2) { switch (c->dstFormat) { case PIX_FMT_RGB24: return yuv420_rgb24_MMX2; case PIX_FMT_BGR24: return yuv420_bgr24_MMX2; } } if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) { switch (c->dstFormat) { case PIX_FMT_RGB32: if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) { #if HAVE_7REGS return yuva420_rgb32_MMX; #endif break; } else return yuv420_rgb32_MMX; case PIX_FMT_BGR32: if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) { #if HAVE_7REGS return yuva420_bgr32_MMX; #endif break; } else return yuv420_bgr32_MMX; case PIX_FMT_RGB24: return yuv420_rgb24_MMX; case PIX_FMT_BGR24: return yuv420_bgr24_MMX; case PIX_FMT_RGB565: return yuv420_rgb16_MMX; case PIX_FMT_RGB555: return yuv420_rgb15_MMX; } } return NULL; } | 3,576 |
1 | int ide_init_drive(IDEState *s, BlockDriverState *bs, const char *version, const char *serial) { int cylinders, heads, secs; uint64_t nb_sectors; s->bs = bs; bdrv_get_geometry(bs, &nb_sectors); bdrv_guess_geometry(bs, &cylinders, &heads, &secs); if (cylinders < 1 || cylinders > 16383) { error_report("cyls must be between 1 and 16383"); if (heads < 1 || heads > 16) { error_report("heads must be between 1 and 16"); if (secs < 1 || secs > 63) { error_report("secs must be between 1 and 63"); s->cylinders = cylinders; s->heads = heads; s->sectors = secs; s->nb_sectors = nb_sectors; /* The SMART values should be preserved across power cycles but they aren't. */ s->smart_enabled = 1; s->smart_autosave = 1; s->smart_errors = 0; s->smart_selftest_count = 0; if (bdrv_get_type_hint(bs) == BDRV_TYPE_CDROM) { s->drive_kind = IDE_CD; bdrv_set_change_cb(bs, cdrom_change_cb, s); } else { if (bdrv_is_read_only(bs)) { error_report("Can't use a read-only drive"); if (serial) { strncpy(s->drive_serial_str, serial, sizeof(s->drive_serial_str)); } else { snprintf(s->drive_serial_str, sizeof(s->drive_serial_str), "QM%05d", s->drive_serial); if (version) { pstrcpy(s->version, sizeof(s->version), version); } else { pstrcpy(s->version, sizeof(s->version), QEMU_VERSION); ide_reset(s); bdrv_set_removable(bs, s->drive_kind == IDE_CD); return 0; | 3,577 |
1 | SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockDriverState *bdrv, int unit, bool removable, int bootindex, const char *serial, Error **errp) { const char *driver; DeviceState *dev; Error *err = NULL; driver = bdrv_is_sg(bdrv) ? "scsi-generic" : "scsi-disk"; dev = qdev_create(&bus->qbus, driver); qdev_prop_set_uint32(dev, "scsi-id", unit); if (bootindex >= 0) { qdev_prop_set_int32(dev, "bootindex", bootindex); } if (object_property_find(OBJECT(dev), "removable", NULL)) { qdev_prop_set_bit(dev, "removable", removable); } if (serial) { qdev_prop_set_string(dev, "serial", serial); } if (qdev_prop_set_drive(dev, "drive", bdrv) < 0) { error_setg(errp, "Setting drive property failed"); qdev_free(dev); return NULL; } object_property_set_bool(OBJECT(dev), true, "realized", &err); if (err != NULL) { error_propagate(errp, err); qdev_free(dev); return NULL; } return SCSI_DEVICE(dev); } | 3,578 |
1 | static int unix_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size) { QEMUFileSocket *s = opaque; ssize_t len; for (;;) { len = read(s->fd, buf, size); if (len != -1) { break; } if (errno == EAGAIN) { yield_until_fd_readable(s->fd); } else if (errno != EINTR) { break; } } if (len == -1) { len = -errno; } return len; } | 3,579 |
1 | DriveInfo *add_init_drive(const char *optstr) { DriveInfo *dinfo; QemuOpts *opts; MachineClass *mc; opts = drive_def(optstr); if (!opts) return NULL; mc = MACHINE_GET_CLASS(current_machine); dinfo = drive_new(opts, mc->block_default_type); if (!dinfo) { qemu_opts_del(opts); return NULL; } return dinfo; } | 3,580 |
1 | static void decode_p_block(FourXContext *f, uint16_t *dst, uint16_t *src, int log2w, int log2h, int stride){ const int index= size2index[log2h][log2w]; const int h= 1<<log2h; int code= get_vlc2(&f->gb, block_type_vlc[1-(f->version>1)][index].table, BLOCK_TYPE_VLC_BITS, 1); uint16_t *start= (uint16_t*)f->last_picture.data[0]; uint16_t *end= start + stride*(f->avctx->height-h+1) - (1<<log2w); assert(code>=0 && code<=6); if(code == 0){ src += f->mv[ *f->bytestream++ ]; if(start > src || src > end){ av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return; } mcdc(dst, src, log2w, h, stride, 1, 0); }else if(code == 1){ log2h--; decode_p_block(f, dst , src , log2w, log2h, stride); decode_p_block(f, dst + (stride<<log2h), src + (stride<<log2h), log2w, log2h, stride); }else if(code == 2){ log2w--; decode_p_block(f, dst , src , log2w, log2h, stride); decode_p_block(f, dst + (1<<log2w), src + (1<<log2w), log2w, log2h, stride); }else if(code == 3 && f->version<2){ mcdc(dst, src, log2w, h, stride, 1, 0); }else if(code == 4){ src += f->mv[ *f->bytestream++ ]; if(start > src || src > end){ av_log(f->avctx, AV_LOG_ERROR, "mv out of pic\n"); return; } mcdc(dst, src, log2w, h, stride, 1, av_le2ne16(*f->wordstream++)); }else if(code == 5){ mcdc(dst, src, log2w, h, stride, 0, av_le2ne16(*f->wordstream++)); }else if(code == 6){ if(log2w){ dst[0] = av_le2ne16(*f->wordstream++); dst[1] = av_le2ne16(*f->wordstream++); }else{ dst[0 ] = av_le2ne16(*f->wordstream++); dst[stride] = av_le2ne16(*f->wordstream++); } } } | 3,581 |
0 | static int mov_write_int8_metadata(AVFormatContext *s, AVIOContext *pb, const char *name, const char *tag, int len) { AVDictionaryEntry *t = NULL; uint8_t num; if (!(t = av_dict_get(s->metadata, tag, NULL, 0))) return 0; num = t ? atoi(t->value) : 0; avio_wb32(pb, len+8); ffio_wfourcc(pb, name); if (len==4) avio_wb32(pb, num); else avio_w8 (pb, num); return len+8; } | 3,583 |
0 | int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt, const uint8_t *buf, int len) { unsigned int ssrc, h; int payload_type, seq, ret; AVStream *st; uint32_t timestamp; int rv= 0; if (!buf) { /* return the next packets, if any */ if(s->st && s->parse_packet) { timestamp= 0; ///< Should not be used if buf is NULL, but should be set to the timestamp of the packet returned.... rv= s->parse_packet(s, pkt, ×tamp, NULL, 0); finalize_packet(s, pkt, timestamp); return rv; } else { // TODO: Move to a dynamic packet handler (like above) if (s->read_buf_index >= s->read_buf_size) return -1; ret = mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index, s->read_buf_size - s->read_buf_index); if (ret < 0) return -1; s->read_buf_index += ret; if (s->read_buf_index < s->read_buf_size) return 1; else return 0; } } if (len < 12) return -1; if ((buf[0] & 0xc0) != (RTP_VERSION << 6)) return -1; if (buf[1] >= 200 && buf[1] <= 204) { rtcp_parse_packet(s, buf, len); return -1; } payload_type = buf[1] & 0x7f; seq = (buf[2] << 8) | buf[3]; timestamp = decode_be32(buf + 4); ssrc = decode_be32(buf + 8); /* store the ssrc in the RTPDemuxContext */ s->ssrc = ssrc; /* NOTE: we can handle only one payload type */ if (s->payload_type != payload_type) return -1; st = s->st; #if defined(DEBUG) || 1 if (seq != ((s->seq + 1) & 0xffff)) { av_log(st?st->codec:NULL, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n", payload_type, seq, ((s->seq + 1) & 0xffff)); } #endif s->seq = seq; len -= 12; buf += 12; if (!st) { /* specific MPEG2TS demux support */ ret = mpegts_parse_packet(s->ts, pkt, buf, len); if (ret < 0) return -1; if (ret < len) { s->read_buf_size = len - ret; memcpy(s->buf, buf + ret, s->read_buf_size); s->read_buf_index = 0; return 1; } } else { // at this point, the RTP header has been stripped; This is ASSUMING that there is only 1 CSRC, which in't wise. switch(st->codec->codec_id) { case CODEC_ID_MP2: /* better than nothing: skip mpeg audio RTP header */ if (len <= 4) return -1; h = decode_be32(buf); len -= 4; buf += 4; av_new_packet(pkt, len); memcpy(pkt->data, buf, len); break; case CODEC_ID_MPEG1VIDEO: /* better than nothing: skip mpeg video RTP header */ if (len <= 4) return -1; h = decode_be32(buf); buf += 4; len -= 4; if (h & (1 << 26)) { /* mpeg2 */ if (len <= 4) return -1; buf += 4; len -= 4; } av_new_packet(pkt, len); memcpy(pkt->data, buf, len); break; // moved from below, verbatim. this is because this section handles packets, and the lower switch handles // timestamps. // TODO: Put this into a dynamic packet handler... case CODEC_ID_MPEG4AAC: if (rtp_parse_mp4_au(s, buf)) return -1; { rtp_payload_data_t *infos = s->rtp_payload_data; if (infos == NULL) return -1; buf += infos->au_headers_length_bytes + 2; len -= infos->au_headers_length_bytes + 2; /* XXX: Fixme we only handle the case where rtp_parse_mp4_au define one au_header */ av_new_packet(pkt, infos->au_headers[0].size); memcpy(pkt->data, buf, infos->au_headers[0].size); buf += infos->au_headers[0].size; len -= infos->au_headers[0].size; } s->read_buf_size = len; s->buf_ptr = buf; rv= 0; break; default: if(s->parse_packet) { rv= s->parse_packet(s, pkt, ×tamp, buf, len); } else { av_new_packet(pkt, len); memcpy(pkt->data, buf, len); } break; } // now perform timestamp things.... finalize_packet(s, pkt, timestamp); } return rv; } | 3,584 |
1 | static int decode_mb_info(IVI4DecContext *ctx, IVIBandDesc *band, IVITile *tile, AVCodecContext *avctx) { int x, y, mv_x, mv_y, mv_delta, offs, mb_offset, blks_per_mb, mv_scale, mb_type_bits, s; IVIMbInfo *mb, *ref_mb; int row_offset = band->mb_size * band->pitch; mb = tile->mbs; ref_mb = tile->ref_mbs; offs = tile->ypos * band->pitch + tile->xpos; blks_per_mb = band->mb_size != band->blk_size ? 4 : 1; mb_type_bits = ctx->frame_type == FRAMETYPE_BIDIR ? 2 : 1; /* scale factor for motion vectors */ mv_scale = (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3); mv_x = mv_y = 0; if (((tile->width + band->mb_size-1)/band->mb_size) * ((tile->height + band->mb_size-1)/band->mb_size) != tile->num_MBs) { av_log(avctx, AV_LOG_ERROR, "num_MBs mismatch %d %d %d %d\n", tile->width, tile->height, band->mb_size, tile->num_MBs); return -1; } for (y = tile->ypos; y < tile->ypos + tile->height; y += band->mb_size) { mb_offset = offs; for (x = tile->xpos; x < tile->xpos + tile->width; x += band->mb_size) { mb->xpos = x; mb->ypos = y; mb->buf_offs = mb_offset; if (get_bits1(&ctx->gb)) { if (ctx->frame_type == FRAMETYPE_INTRA) { av_log(avctx, AV_LOG_ERROR, "Empty macroblock in an INTRA picture!\n"); return AVERROR_INVALIDDATA; } mb->type = 1; /* empty macroblocks are always INTER */ mb->cbp = 0; /* all blocks are empty */ mb->q_delta = 0; if (!band->plane && !band->band_num && ctx->in_q) { mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mb->q_delta = IVI_TOSIGNED(mb->q_delta); } mb->mv_x = mb->mv_y = 0; /* no motion vector coded */ if (band->inherit_mv) { /* motion vector inheritance */ if (mv_scale) { mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); } else { mb->mv_x = ref_mb->mv_x; mb->mv_y = ref_mb->mv_y; } } } else { if (band->inherit_mv && ref_mb) { mb->type = ref_mb->type; /* copy mb_type from corresponding reference mb */ } else if (ctx->frame_type == FRAMETYPE_INTRA) { mb->type = 0; /* mb_type is always INTRA for intra-frames */ } else { mb->type = get_bits(&ctx->gb, mb_type_bits); } mb->cbp = get_bits(&ctx->gb, blks_per_mb); mb->q_delta = 0; if (band->inherit_qdelta) { if (ref_mb) mb->q_delta = ref_mb->q_delta; } else if (mb->cbp || (!band->plane && !band->band_num && ctx->in_q)) { mb->q_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mb->q_delta = IVI_TOSIGNED(mb->q_delta); } if (!mb->type) { mb->mv_x = mb->mv_y = 0; /* there is no motion vector in intra-macroblocks */ } else { if (band->inherit_mv) { /* motion vector inheritance */ if (mv_scale) { mb->mv_x = ivi_scale_mv(ref_mb->mv_x, mv_scale); mb->mv_y = ivi_scale_mv(ref_mb->mv_y, mv_scale); } else { mb->mv_x = ref_mb->mv_x; mb->mv_y = ref_mb->mv_y; } } else { /* decode motion vector deltas */ mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mv_y += IVI_TOSIGNED(mv_delta); mv_delta = get_vlc2(&ctx->gb, ctx->mb_vlc.tab->table, IVI_VLC_BITS, 1); mv_x += IVI_TOSIGNED(mv_delta); mb->mv_x = mv_x; mb->mv_y = mv_y; } } } s= band->is_halfpel; if (mb->type) if ( x + (mb->mv_x >>s) + (y+ (mb->mv_y >>s))*band->pitch < 0 || x + ((mb->mv_x+s)>>s) + band->mb_size - 1 + (y+band->mb_size - 1 +((mb->mv_y+s)>>s))*band->pitch > band->bufsize -1) { av_log(avctx, AV_LOG_ERROR, "motion vector %d %d outside reference\n", x*s + mb->mv_x, y*s + mb->mv_y); return AVERROR_INVALIDDATA; } mb++; if (ref_mb) ref_mb++; mb_offset += band->mb_size; } offs += row_offset; } align_get_bits(&ctx->gb); return 0; } | 3,586 |
1 | static int balloon_parse(const char *arg) { QemuOpts *opts; if (strcmp(arg, "none") == 0) { return 0; } if (!strncmp(arg, "virtio", 6)) { if (arg[6] == ',') { /* have params -> parse them */ opts = qemu_opts_parse(qemu_find_opts("device"), arg+7, 0); if (!opts) return -1; } else { /* create empty opts */ opts = qemu_opts_create(qemu_find_opts("device"), NULL, 0); } qemu_opt_set(opts, "driver", "virtio-balloon"); return 0; } return -1; } | 3,587 |
1 | static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize, int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize, uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW) { #ifdef HAVE_MMX if(uDest != NULL) { asm volatile( YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET) :: "r" (&c->redDither), "r" (uDest), "p" ((long)chrDstW) : "%"REG_a, "%"REG_d, "%"REG_S ); asm volatile( YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET) :: "r" (&c->redDither), "r" (vDest), "p" ((long)chrDstW) : "%"REG_a, "%"REG_d, "%"REG_S ); } asm volatile( YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET) :: "r" (&c->redDither), "r" (dest), "p" ((long)dstW) : "%"REG_a, "%"REG_d, "%"REG_S ); #else #ifdef HAVE_ALTIVEC yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, dest, uDest, vDest, dstW, chrDstW); #else //HAVE_ALTIVEC yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize, chrFilter, chrSrc, chrFilterSize, dest, uDest, vDest, dstW, chrDstW); #endif //!HAVE_ALTIVEC #endif } | 3,588 |
0 | int attribute_align_arg avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples, int *frame_size_ptr, uint8_t *buf, int buf_size) { int ret; if((avctx->codec->capabilities & CODEC_CAP_DELAY) || buf_size){ //FIXME remove the check below _after_ ensuring that all audio check that the available space is enough if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){ av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n"); return -1; } if(*frame_size_ptr < FF_MIN_BUFFER_SIZE || *frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t) || *frame_size_ptr < buf_size){ av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr); return -1; } ret = avctx->codec->decode(avctx, samples, frame_size_ptr, buf, buf_size); avctx->frame_number++; }else{ ret= 0; *frame_size_ptr=0; } return ret; } | 3,589 |
1 | static void nbd_co_receive_reply(NBDClientSession *s, NBDRequest *request, NBDReply *reply, QEMUIOVector *qiov) { int ret; /* Wait until we're woken up by nbd_read_reply_entry. */ qemu_coroutine_yield(); *reply = s->reply; if (reply->handle != request->handle || !s->ioc) { reply->error = EIO; } else { if (qiov && reply->error == 0) { ret = nbd_rwv(s->ioc, qiov->iov, qiov->niov, request->len, true, NULL); if (ret != request->len) { reply->error = EIO; } } /* Tell the read handler to read another header. */ s->reply.handle = 0; } } | 3,590 |
1 | static const char *local_mapped_attr_path(FsContext *ctx, const char *path, char *buffer) { char *dir_name; char *tmp_path = strdup(path); char *base_name = basename(tmp_path); /* NULL terminate the directory */ dir_name = tmp_path; *(base_name - 1) = '\0'; snprintf(buffer, PATH_MAX, "%s/%s/%s/%s", ctx->fs_root, dir_name, VIRTFS_META_DIR, base_name); free(tmp_path); return buffer; } | 3,591 |
1 | void usb_host_info(Monitor *mon, const QDict *qdict) { libusb_device **devs; struct libusb_device_descriptor ddesc; char port[16]; int i, n; if (usb_host_init() != 0) { return; } n = libusb_get_device_list(ctx, &devs); for (i = 0; i < n; i++) { if (libusb_get_device_descriptor(devs[i], &ddesc) != 0) { continue; } if (ddesc.bDeviceClass == LIBUSB_CLASS_HUB) { continue; } usb_host_get_port(devs[i], port, sizeof(port)); monitor_printf(mon, " Bus %d, Addr %d, Port %s, Speed %s Mb/s\n", libusb_get_bus_number(devs[i]), libusb_get_device_address(devs[i]), port, speed_name[libusb_get_device_speed(devs[i])]); monitor_printf(mon, " Class %02x:", ddesc.bDeviceClass); monitor_printf(mon, " USB device %04x:%04x", ddesc.idVendor, ddesc.idProduct); if (ddesc.iProduct) { libusb_device_handle *handle; if (libusb_open(devs[i], &handle) == 0) { unsigned char name[64] = ""; libusb_get_string_descriptor_ascii(handle, ddesc.iProduct, name, sizeof(name)); libusb_close(handle); monitor_printf(mon, ", %s", name); } } monitor_printf(mon, "\n"); } libusb_free_device_list(devs, 1); } | 3,592 |
1 | static void gen_mulldo(DisasContext *ctx) { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0); tcg_gen_sari_i64(t0, t0, 63); tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1); tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); if (unlikely(Rc(ctx->opcode) != 0)) { gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); | 3,593 |
1 | static inline target_phys_addr_t get_pgaddr (target_phys_addr_t sdr1, int sdr_sh, target_phys_addr_t hash, target_phys_addr_t mask) { return (sdr1 & ((target_ulong)(-1ULL) << sdr_sh)) | (hash & mask); } | 3,594 |
1 | static unsigned int mszh_decomp(const unsigned char * srcptr, int srclen, unsigned char * destptr, unsigned int destsize) { unsigned char *destptr_bak = destptr; unsigned char *destptr_end = destptr + destsize; const unsigned char *srcptr_end = srcptr + srclen; unsigned mask = *srcptr++; unsigned maskbit = 0x80; while (srcptr < srcptr_end && destptr < destptr_end) { if (!(mask & maskbit)) { memcpy(destptr, srcptr, 4); destptr += 4; srcptr += 4; } else { unsigned ofs = bytestream_get_le16(&srcptr); unsigned cnt = (ofs >> 11) + 1; ofs &= 0x7ff; ofs = FFMIN(ofs, destptr - destptr_bak); cnt *= 4; cnt = FFMIN(cnt, destptr_end - destptr); av_memcpy_backptr(destptr, ofs, cnt); destptr += cnt; } maskbit >>= 1; if (!maskbit) { mask = *srcptr++; while (!mask) { if (destptr_end - destptr < 32 || srcptr_end - srcptr < 32) break; memcpy(destptr, srcptr, 32); destptr += 32; srcptr += 32; mask = *srcptr++; } maskbit = 0x80; } } return destptr - destptr_bak; } | 3,595 |
1 | static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) { #ifdef DEBUG_UNASSIGNED printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); #endif #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) do_unassigned_access(addr, 0, 0, 0, 1); #endif return 0; } | 3,597 |
1 | static int mxg_update_cache(AVFormatContext *s, unsigned int cache_size) { MXGContext *mxg = s->priv_data; unsigned int current_pos = mxg->buffer_ptr - mxg->buffer; unsigned int soi_pos; int ret; /* reallocate internal buffer */ if (current_pos > current_pos + cache_size) return AVERROR(ENOMEM); soi_pos = mxg->soi_ptr - mxg->buffer; mxg->buffer = av_fast_realloc(mxg->buffer, &mxg->buffer_size, current_pos + cache_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!mxg->buffer) return AVERROR(ENOMEM); mxg->buffer_ptr = mxg->buffer + current_pos; if (mxg->soi_ptr) mxg->soi_ptr = mxg->buffer + soi_pos; /* get data */ ret = avio_read(s->pb, mxg->buffer_ptr + mxg->cache_size, cache_size - mxg->cache_size); if (ret < 0) return ret; mxg->cache_size += ret; return ret; } | 3,599 |
1 | vubr_set_mem_table_exec(VubrDev *dev, VhostUserMsg *vmsg) { int i; VhostUserMemory *memory = &vmsg->payload.memory; dev->nregions = memory->nregions; DPRINT("Nregions: %d\n", memory->nregions); for (i = 0; i < dev->nregions; i++) { void *mmap_addr; VhostUserMemoryRegion *msg_region = &memory->regions[i]; VubrDevRegion *dev_region = &dev->regions[i]; DPRINT("Region %d\n", i); DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", msg_region->guest_phys_addr); DPRINT(" memory_size: 0x%016"PRIx64"\n", msg_region->memory_size); DPRINT(" userspace_addr 0x%016"PRIx64"\n", msg_region->userspace_addr); DPRINT(" mmap_offset 0x%016"PRIx64"\n", msg_region->mmap_offset); dev_region->gpa = msg_region->guest_phys_addr; dev_region->size = msg_region->memory_size; dev_region->qva = msg_region->userspace_addr; dev_region->mmap_offset = msg_region->mmap_offset; /* We don't use offset argument of mmap() since the * mapped address has to be page aligned, and we use huge * pages. */ mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, PROT_READ | PROT_WRITE, MAP_SHARED, vmsg->fds[i], 0); if (mmap_addr == MAP_FAILED) { vubr_die("mmap"); } dev_region->mmap_addr = (uint64_t) mmap_addr; DPRINT(" mmap_addr: 0x%016"PRIx64"\n", dev_region->mmap_addr); } return 0; } | 3,601 |
0 | static av_cold int dilate_init(AVFilterContext *ctx, const char *args) { OCVContext *s = ctx->priv; DilateContext *dilate = s->priv; char default_kernel_str[] = "3x3+0x0/rect"; char *kernel_str; const char *buf = args; int ret; dilate->nb_iterations = 1; if (args) kernel_str = av_get_token(&buf, "|"); if ((ret = parse_iplconvkernel(&dilate->kernel, *kernel_str ? kernel_str : default_kernel_str, ctx)) < 0) return ret; av_free(kernel_str); sscanf(buf, "|%d", &dilate->nb_iterations); av_log(ctx, AV_LOG_VERBOSE, "iterations_nb:%d\n", dilate->nb_iterations); if (dilate->nb_iterations <= 0) { av_log(ctx, AV_LOG_ERROR, "Invalid non-positive value '%d' for nb_iterations\n", dilate->nb_iterations); return AVERROR(EINVAL); } return 0; } | 3,603 |
0 | static int transcode(OutputFile *output_files, int nb_output_files, InputFile *input_files, int nb_input_files) { int ret = 0, i; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; OutputStream *ost; InputStream *ist; char error[1024]; int key; int want_sdp = 1; uint8_t *no_packet; int no_packet_count=0; int64_t timer_start; if (!(no_packet = av_mallocz(nb_input_files))) exit_program(1); if (rate_emu) for (i = 0; i < nb_input_streams; i++) input_streams[i].start = av_gettime(); /* output stream init */ for(i=0;i<nb_output_files;i++) { os = output_files[i].ctx; if (!os->nb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) { av_dump_format(os, i, os->filename, 1); fprintf(stderr, "Output file #%d does not contain any stream\n", i); ret = AVERROR(EINVAL); goto fail; } } /* for each output stream, we compute the right encoding parameters */ for (i = 0; i < nb_output_streams; i++) { ost = &output_streams[i]; os = output_files[ost->file_index].ctx; ist = &input_streams[ost->source_index]; codec = ost->st->codec; icodec = ist->st->codec; ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample= icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; if (ost->st->stream_copy) { uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) { ret = AVERROR(EINVAL); goto fail; } /* if stream_copy is selected, no need to decode or encode */ codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; if(!codec->codec_tag){ if( !os->oformat->codec_tag || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0) codec->codec_tag = icodec->codec_tag; } codec->bit_rate = icodec->bit_rate; codec->rc_max_rate = icodec->rc_max_rate; codec->rc_buffer_size = icodec->rc_buffer_size; codec->extradata= av_mallocz(extra_size); if (!codec->extradata) { ret = AVERROR(ENOMEM); goto fail; } memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size= icodec->extradata_size; codec->time_base = ist->st->time_base; if(!strcmp(os->oformat->name, "avi")) { if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > 2*av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; codec->time_base.den *= 2; } } else if(!(os->oformat->flags & AVFMT_VARIABLE_FPS)) { if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; } } av_reduce(&codec->time_base.num, &codec->time_base.den, codec->time_base.num, codec->time_base.den, INT_MAX); switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(audio_volume != 256) { fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n"); exit_program(1); } codec->channel_layout = icodec->channel_layout; codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; codec->frame_size = icodec->frame_size; codec->audio_service_type = icodec->audio_service_type; codec->block_align= icodec->block_align; if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3) codec->block_align= 0; if(codec->codec_id == CODEC_ID_AC3) codec->block_align= 0; break; case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = icodec->pix_fmt; codec->width = icodec->width; codec->height = icodec->height; codec->has_b_frames = icodec->has_b_frames; if (!codec->sample_aspect_ratio.num) { codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : ist->st->codec->sample_aspect_ratio.num ? ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; } break; case AVMEDIA_TYPE_SUBTITLE: codec->width = icodec->width; codec->height = icodec->height; break; case AVMEDIA_TYPE_DATA: break; default: abort(); } } else { if (!ost->enc) ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo= av_fifo_alloc(1024); if (!ost->fifo) { ret = AVERROR(ENOMEM); goto fail; } ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); if (!codec->sample_rate) { codec->sample_rate = icodec->sample_rate; } choose_sample_rate(ost->st, ost->enc); codec->time_base = (AVRational){1, codec->sample_rate}; if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) codec->sample_fmt = icodec->sample_fmt; choose_sample_fmt(ost->st, ost->enc); if (!codec->channels) { codec->channels = icodec->channels; codec->channel_layout = icodec->channel_layout; } if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; icodec->request_channels = codec->channels; ist->decoding_needed = 1; ost->encoding_needed = 1; ost->resample_sample_fmt = icodec->sample_fmt; ost->resample_sample_rate = icodec->sample_rate; ost->resample_channels = icodec->channels; break; case AVMEDIA_TYPE_VIDEO: if (codec->pix_fmt == PIX_FMT_NONE) codec->pix_fmt = icodec->pix_fmt; choose_pixel_fmt(ost->st, ost->enc); if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n"); exit_program(1); } if (!codec->width || !codec->height) { codec->width = icodec->width; codec->height = icodec->height; } ost->video_resample = codec->width != icodec->width || codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt; if (ost->video_resample) { codec->bits_per_raw_sample= frame_bits_per_raw_sample; } ost->resample_height = icodec->height; ost->resample_width = icodec->width; ost->resample_pix_fmt= icodec->pix_fmt; ost->encoding_needed = 1; ist->decoding_needed = 1; if (!ost->frame_rate.num) ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1}; if (ost->enc && ost->enc->supported_framerates && !force_fps) { int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); ost->frame_rate = ost->enc->supported_framerates[idx]; } codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; if( av_q2d(codec->time_base) < 0.001 && video_sync_method && (video_sync_method==1 || (video_sync_method<0 && !(os->oformat->flags & AVFMT_VARIABLE_FPS)))){ av_log(os, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\n" "Please consider specifiying a lower framerate, a different muxer or -vsync 2\n"); } #if CONFIG_AVFILTER if (configure_video_filters(ist, ost)) { fprintf(stderr, "Error opening filters!\n"); exit(1); } #endif break; case AVMEDIA_TYPE_SUBTITLE: ost->encoding_needed = 1; ist->decoding_needed = 1; break; default: abort(); break; } /* two pass mode */ if (ost->encoding_needed && codec->codec_id != CODEC_ID_H264 && (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; snprintf(logfilename, sizeof(logfilename), "%s-%d.log", pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, i); if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(logfilename, "wb"); if (!f) { fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno)); exit_program(1); } ost->logfile = f; } else { char *logbuffer; size_t logbuffer_size; if (read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { fprintf(stderr, "Error reading log file '%s' for pass-2 encoding\n", logfilename); exit_program(1); } codec->stats_in = logbuffer; } } } if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ /* maximum video buffer size is 6-bytes per pixel, plus DPX header size */ int size= codec->width * codec->height; bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 1664); } } if (!bit_buffer) bit_buffer = av_malloc(bit_buffer_size); if (!bit_buffer) { fprintf(stderr, "Cannot allocate %d bytes output buffer\n", bit_buffer_size); ret = AVERROR(ENOMEM); goto fail; } /* open each encoder */ for (i = 0; i < nb_output_streams; i++) { ost = &output_streams[i]; if (ost->encoding_needed) { AVCodec *codec = ost->enc; AVCodecContext *dec = input_streams[ost->source_index].st->codec; if (!codec) { snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d.%d", avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } if (dec->subtitle_header) { ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); if (!ost->st->codec->subtitle_header) { ret = AVERROR(ENOMEM); goto dump_format; } memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ost->st->codec, 1); assert_avoptions(ost->opts); if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." "It takes bits/s as argument, not kbits/s\n"); extra_size += ost->st->codec->extradata_size; } } /* init input streams */ for (i = 0; i < nb_input_streams; i++) if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error)) < 0)) goto dump_format; /* open files and write file headers */ for (i = 0; i < nb_output_files; i++) { os = output_files[i].ctx; if (avformat_write_header(os, &output_files[i].opts) < 0) { snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i); ret = AVERROR(EINVAL); goto dump_format; } // assert_avoptions(output_files[i].opts); if (strcmp(os->oformat->name, "rtp")) { want_sdp = 0; } } dump_format: /* dump the file output parameters - cannot be done before in case of stream copy */ for(i=0;i<nb_output_files;i++) { av_dump_format(output_files[i].ctx, i, output_files[i].ctx->filename, 1); } /* dump the stream mapping */ if (verbose >= 0) { fprintf(stderr, "Stream mapping:\n"); for (i = 0; i < nb_output_streams;i ++) { ost = &output_streams[i]; fprintf(stderr, " Stream #%d.%d -> #%d.%d", input_streams[ost->source_index].file_index, input_streams[ost->source_index].st->index, ost->file_index, ost->index); if (ost->sync_ist != &input_streams[ost->source_index]) fprintf(stderr, " [sync #%d.%d]", ost->sync_ist->file_index, ost->sync_ist->st->index); if(ost->encoding_needed) fprintf(stderr, ": %s -> %s", input_streams[ost->source_index].dec ? input_streams[ost->source_index].dec->name : "?", ost->enc ? ost->enc->name : "?"); else fprintf(stderr, ": copy"); fprintf(stderr, "\n"); } } if (ret) { fprintf(stderr, "%s\n", error); goto fail; } if (want_sdp) { print_sdp(output_files, nb_output_files); } if (!using_stdin) { if(verbose >= 0) fprintf(stderr, "Press [q] to stop, [?] for help\n"); avio_set_interrupt_cb(decode_interrupt_cb); } term_init(); timer_start = av_gettime(); for(; received_sigterm == 0;) { int file_index, ist_index; AVPacket pkt; int64_t ipts_min; double opts_min; redo: ipts_min= INT64_MAX; opts_min= 1e100; /* if 'q' pressed, exits */ if (!using_stdin) { if (q_pressed) break; /* read_key() returns 0 on EOF */ key = read_key(); if (key == 'q') break; if (key == '+') verbose++; if (key == '-') verbose--; if (key == 's') qp_hist ^= 1; if (key == 'h'){ if (do_hex_dump){ do_hex_dump = do_pkt_dump = 0; } else if(do_pkt_dump){ do_hex_dump = 1; } else do_pkt_dump = 1; av_log_set_level(AV_LOG_DEBUG); } if (key == 'c' || key == 'C'){ char ret[4096], target[64], cmd[256], arg[256]={0}; double ts; fprintf(stderr, "\nEnter command: <target> <time> <command>[ <argument>]\n"); if(scanf("%4095[^\n\r]%*c", ret) == 1 && sscanf(ret, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &ts, cmd, arg) >= 3){ for(i=0;i<nb_output_streams;i++) { int r; ost = &output_streams[i]; if(ost->graph){ if(ts<0){ r= avfilter_graph_send_command(ost->graph, target, cmd, arg, ret, sizeof(ret), key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0); fprintf(stderr, "Command reply for %d: %d, %s\n", i, r, ret); }else{ r= avfilter_graph_queue_command(ost->graph, target, cmd, arg, 0, ts); } } } }else{ fprintf(stderr, "Parse error\n"); } } if (key == 'd' || key == 'D'){ int debug=0; if(key == 'D') { debug = input_streams[0].st->codec->debug<<1; if(!debug) debug = 1; while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash debug += debug; }else scanf("%d", &debug); for(i=0;i<nb_input_streams;i++) { input_streams[i].st->codec->debug = debug; } for(i=0;i<nb_output_streams;i++) { ost = &output_streams[i]; ost->st->codec->debug = debug; } if(debug) av_log_set_level(AV_LOG_DEBUG); fprintf(stderr,"debug=%d\n", debug); } if (key == '?'){ fprintf(stderr, "key function\n" "? show this help\n" "+ increase verbosity\n" "- decrease verbosity\n" "c Send command to filtergraph\n" "D cycle through available debug modes\n" "h dump packets/hex press to cycle through the 3 states\n" "q quit\n" "s Show QP histogram\n" ); } } /* select the stream that we must read now by looking at the smallest output pts */ file_index = -1; for (i = 0; i < nb_output_streams; i++) { OutputFile *of; int64_t ipts; double opts; ost = &output_streams[i]; of = &output_files[ost->file_index]; os = output_files[ost->file_index].ctx; ist = &input_streams[ost->source_index]; if (ost->is_past_recording_time || no_packet[ist->file_index] || (os->pb && avio_tell(os->pb) >= of->limit_filesize)) continue; opts = ost->st->pts.val * av_q2d(ost->st->time_base); ipts = ist->pts; if (!input_files[ist->file_index].eof_reached){ if(ipts < ipts_min) { ipts_min = ipts; if(input_sync ) file_index = ist->file_index; } if(opts < opts_min) { opts_min = opts; if(!input_sync) file_index = ist->file_index; } } if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){ file_index= -1; break; } } /* if none, if is finished */ if (file_index < 0) { if(no_packet_count){ no_packet_count=0; memset(no_packet, 0, nb_input_files); usleep(10000); continue; } break; } /* read a frame from it and output it in the fifo */ is = input_files[file_index].ctx; ret= av_read_frame(is, &pkt); if(ret == AVERROR(EAGAIN)){ no_packet[file_index]=1; no_packet_count++; continue; } if (ret < 0) { input_files[file_index].eof_reached = 1; if (opt_shortest) break; else continue; } no_packet_count=0; memset(no_packet, 0, nb_input_files); if (do_pkt_dump) { av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, is->streams[pkt.stream_index]); } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ if (pkt.stream_index >= input_files[file_index].nb_streams) goto discard_packet; ist_index = input_files[file_index].ist_index + pkt.stream_index; ist = &input_streams[ist_index]; if (ist->discard) goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (ist->ts_scale) { if(pkt.pts != AV_NOPTS_VALUE) pkt.pts *= ist->ts_scale; if(pkt.dts != AV_NOPTS_VALUE) pkt.dts *= ist->ts_scale; } // fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type); if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta= pkt_dts - ist->next_pts; if((delta < -1LL*dts_delta_threshold*AV_TIME_BASE || (delta > 1LL*dts_delta_threshold*AV_TIME_BASE && ist->st->codec->codec_type != AVMEDIA_TYPE_SUBTITLE) || pkt_dts+1<ist->pts)&& !copy_ts){ input_files[ist->file_index].ts_offset -= delta; if (verbose > 2) fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files[ist->file_index].ts_offset); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if(pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); if (output_packet(ist, ist_index, output_streams, nb_output_streams, &pkt) < 0) { if (verbose >= 0) fprintf(stderr, "Error while decoding stream #%d.%d\n", ist->file_index, ist->st->index); if (exit_on_error) exit_program(1); av_free_packet(&pkt); goto redo; } discard_packet: av_free_packet(&pkt); /* dump report by using the output first video and audio streams */ print_report(output_files, output_streams, nb_output_streams, 0, timer_start); } /* at the end of stream, we must flush the decoder buffers */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { output_packet(ist, i, output_streams, nb_output_streams, NULL); } } flush_encoders(output_streams, nb_output_streams); term_exit(); /* write the trailer if needed and close file */ for(i=0;i<nb_output_files;i++) { os = output_files[i].ctx; av_write_trailer(os); } /* dump report by using the first video and audio streams */ print_report(output_files, output_streams, nb_output_streams, 1, timer_start); /* close each encoder */ for (i = 0; i < nb_output_streams; i++) { ost = &output_streams[i]; if (ost->encoding_needed) { av_freep(&ost->st->codec->stats_in); avcodec_close(ost->st->codec); } #if CONFIG_AVFILTER avfilter_graph_free(&ost->graph); #endif } /* close each decoder */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { avcodec_close(ist->st->codec); } } /* finished ! */ ret = 0; fail: av_freep(&bit_buffer); av_freep(&no_packet); if (output_streams) { for (i = 0; i < nb_output_streams; i++) { ost = &output_streams[i]; if (ost) { if (ost->st->stream_copy) av_freep(&ost->st->codec->extradata); if (ost->logfile) { fclose(ost->logfile); ost->logfile = NULL; } av_fifo_free(ost->fifo); /* works even if fifo is not initialized but set to zero */ av_freep(&ost->st->codec->subtitle_header); av_free(ost->resample_frame.data[0]); av_free(ost->forced_kf_pts); if (ost->video_resample) sws_freeContext(ost->img_resample_ctx); if (ost->resample) audio_resample_close(ost->resample); if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); av_dict_free(&ost->opts); } } } return ret; } | 3,604 |
0 | static int dvvideo_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { DVVideoContext *s = avctx->priv_data; /* special case for last picture */ if(buf_size==0) return 0; s->sys = dv_frame_profile(buf); if (!s->sys || buf_size < s->sys->frame_size) return -1; /* NOTE: we only accept several full frames */ if(s->picture.data[0]) avctx->release_buffer(avctx, &s->picture); s->picture.reference = 0; avctx->pix_fmt = s->sys->pix_fmt; avctx->width = s->sys->width; avctx->height = s->sys->height; if(avctx->get_buffer(avctx, &s->picture) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } s->picture.interlaced_frame = 1; s->picture.top_field_first = 0; s->buf = buf; avctx->execute(avctx, dv_decode_mt, (void**)&dv_anchor[0], NULL, s->sys->difseg_size * 27); emms_c(); /* return image */ *data_size = sizeof(AVFrame); *(AVFrame*)data= s->picture; return s->sys->frame_size; } | 3,605 |
0 | rdt_parse_packet (PayloadContext *rdt, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, int flags) { int seq = 1, res; ByteIOContext pb; if (rdt->audio_pkt_cnt == 0) { int pos; init_put_byte(&pb, buf, len, 0, NULL, NULL, NULL, NULL); flags = (flags & PKT_FLAG_KEY) ? 2 : 0; res = ff_rm_parse_packet (rdt->rmctx, &pb, st, rdt->rmst[0], len, pkt, &seq, &flags, timestamp); pos = url_ftell(&pb); if (res < 0) return res; rdt->audio_pkt_cnt[st->id] = res; if (rdt->audio_pkt_cnt[st->id] > 0 && st->codec->codec_id == CODEC_ID_AAC) { memcpy (rdt->buffer, buf + pos, len - pos); rdt->rmctx->pb = av_alloc_put_byte (rdt->buffer, len - pos, 0, NULL, NULL, NULL, NULL); } } else { ff_rm_retrieve_cache (rdt->rmctx, rdt->rmctx->pb, st, rdt->rmst[0], pkt); if (rdt->audio_pkt_cnt[st->id] == 0 && st->codec->codec_id == CODEC_ID_AAC) av_freep(&rdt->rmctx->pb); } pkt->stream_index = st->index; pkt->pts = *timestamp; return rdt->audio_pkt_cnt[st->id] > 0; } | 3,606 |
0 | static void FUNC(hevc_v_loop_filter_luma)(uint8_t *pix, ptrdiff_t stride, int *beta, int *tc, uint8_t *no_p, uint8_t *no_q) { FUNC(hevc_loop_filter_luma)(pix, sizeof(pixel), stride, beta, tc, no_p, no_q); } | 3,607 |
0 | static int http_server(void) { int server_fd, ret, rtsp_server_fd, delay, delay1; struct pollfd poll_table[HTTP_MAX_CONNECTIONS + 2], *poll_entry; HTTPContext *c, *c_next; server_fd = socket_open_listen(&my_http_addr); if (server_fd < 0) return -1; rtsp_server_fd = socket_open_listen(&my_rtsp_addr); if (rtsp_server_fd < 0) return -1; http_log("ffserver started.\n"); start_children(first_feed); first_http_ctx = NULL; nb_connections = 0; start_multicast(); for(;;) { poll_entry = poll_table; poll_entry->fd = server_fd; poll_entry->events = POLLIN; poll_entry++; poll_entry->fd = rtsp_server_fd; poll_entry->events = POLLIN; poll_entry++; /* wait for events on each HTTP handle */ c = first_http_ctx; delay = 1000; while (c != NULL) { int fd; fd = c->fd; switch(c->state) { case HTTPSTATE_SEND_HEADER: case RTSPSTATE_SEND_REPLY: case RTSPSTATE_SEND_PACKET: c->poll_entry = poll_entry; poll_entry->fd = fd; poll_entry->events = POLLOUT; poll_entry++; break; case HTTPSTATE_SEND_DATA_HEADER: case HTTPSTATE_SEND_DATA: case HTTPSTATE_SEND_DATA_TRAILER: if (!c->is_packetized) { /* for TCP, we output as much as we can (may need to put a limit) */ c->poll_entry = poll_entry; poll_entry->fd = fd; poll_entry->events = POLLOUT; poll_entry++; } else { /* when ffserver is doing the timing, we work by looking at which packet need to be sent every 10 ms */ delay1 = 10; /* one tick wait XXX: 10 ms assumed */ if (delay1 < delay) delay = delay1; } break; case HTTPSTATE_WAIT_REQUEST: case HTTPSTATE_RECEIVE_DATA: case HTTPSTATE_WAIT_FEED: case RTSPSTATE_WAIT_REQUEST: /* need to catch errors */ c->poll_entry = poll_entry; poll_entry->fd = fd; poll_entry->events = POLLIN;/* Maybe this will work */ poll_entry++; break; default: c->poll_entry = NULL; break; } c = c->next; } /* wait for an event on one connection. We poll at least every second to handle timeouts */ do { ret = poll(poll_table, poll_entry - poll_table, delay); if (ret < 0 && errno != EAGAIN && errno != EINTR) return -1; } while (ret <= 0); cur_time = av_gettime() / 1000; if (need_to_start_children) { need_to_start_children = 0; start_children(first_feed); } /* now handle the events */ for(c = first_http_ctx; c != NULL; c = c_next) { c_next = c->next; if (handle_connection(c) < 0) { /* close and free the connection */ log_connection(c); close_connection(c); } } poll_entry = poll_table; /* new HTTP connection request ? */ if (poll_entry->revents & POLLIN) { new_connection(server_fd, 0); } poll_entry++; /* new RTSP connection request ? */ if (poll_entry->revents & POLLIN) { new_connection(rtsp_server_fd, 1); } } } | 3,608 |
1 | static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice) { int i; const SPS *sps; int last_pic_structure, last_pic_droppable, ret; ret = h264_init_ps(h, sl, first_slice); if (ret < 0) return ret; sps = h->ps.sps; last_pic_droppable = h->droppable; last_pic_structure = h->picture_structure; h->droppable = (nal->ref_idc == 0); h->picture_structure = sl->picture_structure; h->poc.frame_num = sl->frame_num; h->poc.poc_lsb = sl->poc_lsb; h->poc.delta_poc_bottom = sl->delta_poc_bottom; h->poc.delta_poc[0] = sl->delta_poc[0]; h->poc.delta_poc[1] = sl->delta_poc[1]; /* Shorten frame num gaps so we don't have to allocate reference * frames just to throw them away */ if (h->poc.frame_num != h->poc.prev_frame_num) { int unwrap_prev_frame_num = h->poc.prev_frame_num; int max_frame_num = 1 << sps->log2_max_frame_num; if (unwrap_prev_frame_num > h->poc.frame_num) unwrap_prev_frame_num -= max_frame_num; if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) { unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1; if (unwrap_prev_frame_num < 0) unwrap_prev_frame_num += max_frame_num; h->poc.prev_frame_num = unwrap_prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * Here, we're using that to see if we should mark previously * decode frames as "finished". * We have to do that before the "dummy" in-between frame allocation, * since that can modify h->cur_pic_ptr. */ if (h->first_field) { av_assert0(h->cur_pic_ptr); av_assert0(h->cur_pic_ptr->f->buf[0]); assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); /* Mark old field/frame as completed */ if (h->cur_pic_ptr->tf.owner == h->avctx) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_BOTTOM_FIELD); } /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ if (last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { if (h->cur_pic_ptr->frame_num != h->poc.frame_num) { /* This and previous field were reference, but had * different frame_nums. Consider this field first in * pair. Throw away previous field except for reference * purposes. */ if (last_pic_structure != PICT_FRAME) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_pic_structure == PICT_TOP_FIELD); } } else { /* Second field in complementary pair */ if (!((last_pic_structure == PICT_TOP_FIELD && h->picture_structure == PICT_BOTTOM_FIELD) || (last_pic_structure == PICT_BOTTOM_FIELD && h->picture_structure == PICT_TOP_FIELD))) { av_log(h->avctx, AV_LOG_ERROR, "Invalid field mode combination %d/%d\n", last_pic_structure, h->picture_structure); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_INVALIDDATA; } else if (last_pic_droppable != h->droppable) { avpriv_request_sample(h->avctx, "Found reference and non-reference fields in the same frame, which"); h->picture_structure = last_pic_structure; h->droppable = last_pic_droppable; return AVERROR_PATCHWELCOME; } } } } while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field && h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) { H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->poc.frame_num, h->poc.prev_frame_num); if (!sps->gaps_in_frame_num_allowed_flag) for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++) h->last_pocs[i] = INT_MIN; ret = h264_frame_start(h); if (ret < 0) { h->first_field = 0; return ret; } h->poc.prev_frame_num++; h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num; h->cur_pic_ptr->frame_num = h->poc.prev_frame_num; h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); h->explicit_ref_marking = 0; ret = ff_h264_execute_ref_pic_marking(h); if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; /* Error concealment: If a ref is missing, copy the previous ref * in its place. * FIXME: Avoiding a memcpy would be nice, but ref handling makes * many assumptions about there being no actual duplicates. * FIXME: This does not copy padding for out-of-frame motion * vectors. Given we are concealing a lost frame, this probably * is not noticeable by comparison, but it should be fixed. */ if (h->short_ref_count) { if (prev && h->short_ref[0]->f->width == prev->f->width && h->short_ref[0]->f->height == prev->f->height && h->short_ref[0]->f->format == prev->f->format) { ff_thread_await_progress(&prev->tf, INT_MAX, 0); if (prev->field_picture) ff_thread_await_progress(&prev->tf, INT_MAX, 1); av_image_copy(h->short_ref[0]->f->data, h->short_ref[0]->f->linesize, (const uint8_t **)prev->f->data, prev->f->linesize, prev->f->format, prev->f->width, prev->f->height); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->poc.prev_frame_num; } } /* See if we have a decoded first field looking for a pair... * We're using that to see whether to continue decoding in that * frame, or to allocate a new one. */ if (h->first_field) { av_assert0(h->cur_pic_ptr); av_assert0(h->cur_pic_ptr->f->buf[0]); assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) { /* Previous field is unmatched. Don't display it, but let it * remain for reference if marked as such. */ h->missing_fields ++; h->cur_pic_ptr = NULL; h->first_field = FIELD_PICTURE(h); } else { h->missing_fields = 0; if (h->cur_pic_ptr->frame_num != h->poc.frame_num) { ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure==PICT_BOTTOM_FIELD); /* This and the previous field had different frame_nums. * Consider this field first in pair. Throw away previous * one except for reference purposes. */ h->first_field = 1; h->cur_pic_ptr = NULL; } else { /* Second field in complementary pair */ h->first_field = 0; } } } else { /* Frame or first field in a potentially complementary pair */ h->first_field = FIELD_PICTURE(h); } if (!FIELD_PICTURE(h) || h->first_field) { if (h264_frame_start(h) < 0) { h->first_field = 0; return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); } /* Some macroblocks can be accessed before they're available in case * of lost slices, MBAFF or threading. */ if (FIELD_PICTURE(h)) { for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++) memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table)); } else { memset(h->slice_table, -1, (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table)); } ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc, h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc); memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco)); h->nb_mmco = sl->nb_mmco; h->explicit_ref_marking = sl->explicit_ref_marking; h->picture_idr = nal->type == H264_NAL_IDR_SLICE; if (h->sei.recovery_point.recovery_frame_cnt >= 0) { const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt; if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I) h->valid_recovery_point = 1; if ( h->recovery_frame < 0 || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) { h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num); if (!h->valid_recovery_point) h->recovery_frame = h->poc.frame_num; } } h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE); if (nal->type == H264_NAL_IDR_SLICE || (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) { h->recovery_frame = -1; h->cur_pic_ptr->recovered = 1; } // If we have an IDR, all frames after it in decoded order are // "recovered". if (nal->type == H264_NAL_IDR_SLICE) h->frame_recovered |= FRAME_RECOVERED_IDR; #if 1 h->cur_pic_ptr->recovered |= h->frame_recovered; #else h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR); #endif /* Set the frame properties/side data. Only done for the second field in * field coded frames, since some SEI information is present for each field * and is merged by the SEI parsing code. */ if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) { ret = h264_export_frame_props(h); if (ret < 0) return ret; ret = h264_select_output_frame(h); if (ret < 0) return ret; } return 0; } | 3,609 |
1 | void fork_start(void) { mmap_fork_start(); qemu_mutex_lock(&tb_ctx.tb_lock); cpu_list_lock(); } | 3,612 |
1 | static int config_output(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; LIBVMAFContext *s = ctx->priv; AVFilterLink *mainlink = ctx->inputs[0]; int ret; outlink->w = mainlink->w; outlink->h = mainlink->h; outlink->time_base = mainlink->time_base; outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; outlink->frame_rate = mainlink->frame_rate; if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) return ret; return 0; } | 3,613 |
1 | void h263_encode_mb(MpegEncContext * s, DCTELEM block[6][64], int motion_x, int motion_y) { int cbpc, cbpy, i, cbp, pred_x, pred_y; // printf("**mb x=%d y=%d\n", s->mb_x, s->mb_y); if (!s->mb_intra) { /* compute cbp */ cbp = 0; for (i = 0; i < 6; i++) { if (s->block_last_index[i] >= 0) cbp |= 1 << (5 - i); } if ((cbp | motion_x | motion_y) == 0) { /* skip macroblock */ put_bits(&s->pb, 1, 1); return; } put_bits(&s->pb, 1, 0); /* mb coded */ cbpc = cbp & 3; put_bits(&s->pb, inter_MCBPC_bits[cbpc], inter_MCBPC_code[cbpc]); cbpy = cbp >> 2; cbpy ^= 0xf; put_bits(&s->pb, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]); /* motion vectors: 16x16 mode only now */ h263_pred_motion(s, 0, &pred_x, &pred_y); if (!umvplus) { h263_encode_motion(s, motion_x - pred_x); h263_encode_motion(s, motion_y - pred_y); } else { h263p_encode_umotion(s, motion_x - pred_x); h263p_encode_umotion(s, motion_y - pred_y); if (((motion_x - pred_x) == 1) && ((motion_y - pred_y) == 1)) /* To prevent Start Code emulation */ put_bits(&s->pb,1,1); } } else { /* compute cbp */ cbp = 0; for (i = 0; i < 6; i++) { if (s->block_last_index[i] >= 1) cbp |= 1 << (5 - i); } cbpc = cbp & 3; if (s->pict_type == I_TYPE) { put_bits(&s->pb, intra_MCBPC_bits[cbpc], intra_MCBPC_code[cbpc]); } else { put_bits(&s->pb, 1, 0); /* mb coded */ put_bits(&s->pb, inter_MCBPC_bits[cbpc + 4], inter_MCBPC_code[cbpc + 4]); } if (s->h263_pred) { /* XXX: currently, we do not try to use ac prediction */ put_bits(&s->pb, 1, 0); /* no ac prediction */ } cbpy = cbp >> 2; put_bits(&s->pb, cbpy_tab[cbpy][1], cbpy_tab[cbpy][0]); } /* encode each block */ if (s->h263_pred) { for (i = 0; i < 6; i++) { mpeg4_encode_block(s, block[i], i); } } else { for (i = 0; i < 6; i++) { h263_encode_block(s, block[i], i); } } } | 3,614 |
0 | static int decode_band(IVI5DecContext *ctx, int plane_num, IVIBandDesc *band, AVCodecContext *avctx) { int result, i, t, idx1, idx2; IVITile *tile; band->buf = band->bufs[ctx->dst_buf]; band->ref_buf = band->bufs[ctx->ref_buf]; band->data_ptr = ctx->frame_data + (get_bits_count(&ctx->gb) >> 3); result = decode_band_hdr(ctx, band, avctx); if (result) { av_log(avctx, AV_LOG_ERROR, "Error while decoding band header: %d\n", result); return -1; } if (band->is_empty) { av_log(avctx, AV_LOG_ERROR, "Empty band encountered!\n"); return -1; } band->rv_map = &ctx->rvmap_tabs[band->rvmap_sel]; /* apply corrections to the selected rvmap table if present */ for (i = 0; i < band->num_corr; i++) { idx1 = band->corr[i*2]; idx2 = band->corr[i*2+1]; FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]); FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]); } for (t = 0; t < band->num_tiles; t++) { tile = &band->tiles[t]; tile->is_empty = get_bits1(&ctx->gb); if (tile->is_empty) { ff_ivi_process_empty_tile(avctx, band, tile, (ctx->planes[0].bands[0].mb_size >> 3) - (band->mb_size >> 3)); align_get_bits(&ctx->gb); } else { tile->data_size = ff_ivi_dec_tile_data_size(&ctx->gb); result = decode_mb_info(ctx, band, tile, avctx); if (result < 0) break; if (band->blk_size == 8) { band->intra_base = &ivi5_base_quant_8x8_intra[band->quant_mat][0]; band->inter_base = &ivi5_base_quant_8x8_inter[band->quant_mat][0]; band->intra_scale = &ivi5_scale_quant_8x8_intra[band->quant_mat][0]; band->inter_scale = &ivi5_scale_quant_8x8_inter[band->quant_mat][0]; } else { band->intra_base = ivi5_base_quant_4x4_intra; band->inter_base = ivi5_base_quant_4x4_inter; band->intra_scale = ivi5_scale_quant_4x4_intra; band->inter_scale = ivi5_scale_quant_4x4_inter; } result = ff_ivi_decode_blocks(&ctx->gb, band, tile); if (result < 0) { av_log(avctx, AV_LOG_ERROR, "Corrupted blocks data encountered!\n"); break; } } } /* restore the selected rvmap table by applying its corrections in reverse order */ for (i = band->num_corr-1; i >= 0; i--) { idx1 = band->corr[i*2]; idx2 = band->corr[i*2+1]; FFSWAP(uint8_t, band->rv_map->runtab[idx1], band->rv_map->runtab[idx2]); FFSWAP(int16_t, band->rv_map->valtab[idx1], band->rv_map->valtab[idx2]); } #if IVI_DEBUG if (band->checksum_present) { uint16_t chksum = ivi_calc_band_checksum(band); if (chksum != band->checksum) { av_log(avctx, AV_LOG_ERROR, "Band checksum mismatch! Plane %d, band %d, received: %x, calculated: %x\n", band->plane, band->band_num, band->checksum, chksum); } } #endif return result; } | 3,615 |
1 | static int select_reference_stream(AVFormatContext *s) { SegmentContext *seg = s->priv_data; int ret, i; seg->reference_stream_index = -1; if (!strcmp(seg->reference_stream_specifier, "auto")) { /* select first index of type with highest priority */ int type_index_map[AVMEDIA_TYPE_NB]; static const enum AVMediaType type_priority_list[] = { AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_SUBTITLE, AVMEDIA_TYPE_DATA, AVMEDIA_TYPE_ATTACHMENT }; enum AVMediaType type; for (i = 0; i < AVMEDIA_TYPE_NB; i++) type_index_map[i] = -1; /* select first index for each type */ for (i = 0; i < s->nb_streams; i++) { type = s->streams[i]->codec->codec_type; if ((unsigned)type < AVMEDIA_TYPE_NB && type_index_map[type] == -1 /* ignore attached pictures/cover art streams */ && !(s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC)) type_index_map[type] = i; } for (i = 0; i < FF_ARRAY_ELEMS(type_priority_list); i++) { type = type_priority_list[i]; if ((seg->reference_stream_index = type_index_map[type]) >= 0) break; } } else { for (i = 0; i < s->nb_streams; i++) { ret = avformat_match_stream_specifier(s, s->streams[i], seg->reference_stream_specifier); if (ret < 0) break; if (ret > 0) { seg->reference_stream_index = i; break; } } } if (seg->reference_stream_index < 0) { av_log(s, AV_LOG_ERROR, "Could not select stream matching identifier '%s'\n", seg->reference_stream_specifier); return AVERROR(EINVAL); } return 0; } | 3,617 |
1 | static void property_get_tm(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { TMProperty *prop = opaque; Error *err = NULL; struct tm value; prop->get(obj, &value, &err); if (err) { goto out; } visit_start_struct(v, name, NULL, 0, &err); if (err) { goto out; } visit_type_int32(v, "tm_year", &value.tm_year, &err); if (err) { goto out_end; } visit_type_int32(v, "tm_mon", &value.tm_mon, &err); if (err) { goto out_end; } visit_type_int32(v, "tm_mday", &value.tm_mday, &err); if (err) { goto out_end; } visit_type_int32(v, "tm_hour", &value.tm_hour, &err); if (err) { goto out_end; } visit_type_int32(v, "tm_min", &value.tm_min, &err); if (err) { goto out_end; } visit_type_int32(v, "tm_sec", &value.tm_sec, &err); if (err) { goto out_end; } out_end: error_propagate(errp, err); err = NULL; visit_end_struct(v, errp); out: error_propagate(errp, err); } | 3,618 |
1 | e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, uint16_t length) { uint32_t status_flags, rss, mrq; uint16_t ip_id; struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; memset(d, 0, sizeof(*d)); assert(!rss_info->enabled); d->length = cpu_to_le16(length); e1000e_build_rx_metadata(core, pkt, pkt != NULL, rss_info, &rss, &mrq, &status_flags, &ip_id, &d->special); d->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); d->status = (uint8_t) le32_to_cpu(status_flags); } | 3,622 |
1 | void qemu_put_byte(QEMUFile *f, int v) { if (f->last_error) { return; } f->buf[f->buf_index] = v; f->bytes_xfer++; if (f->ops->writev_buffer) { add_to_iovec(f, f->buf + f->buf_index, 1); } f->buf_index++; if (f->buf_index == IO_BUF_SIZE) { qemu_fflush(f); } } | 3,623 |
1 | long do_rt_sigreturn(CPUX86State *env) { abi_ulong frame_addr; struct rt_sigframe *frame; sigset_t set; frame_addr = env->regs[R_ESP] - 4; trace_user_do_rt_sigreturn(env, frame_addr); if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; target_to_host_sigset(&set, &frame->uc.tuc_sigmask); set_sigmask(&set); if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { goto badframe; } if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) { goto badframe; } unlock_user_struct(frame, frame_addr, 0); return -TARGET_QEMU_ESIGRETURN; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; } | 3,624 |
1 | unsigned ff_els_decode_unsigned(ElsDecCtx *ctx, ElsUnsignedRung *ur) { int i, n, r, bit; ElsRungNode *rung_node; if (ctx->err) return 0; /* decode unary prefix */ for (n = 0; n < ELS_EXPGOLOMB_LEN + 1; n++) if (ff_els_decode_bit(ctx, &ur->prefix_rung[n])) break; /* handle the error/overflow case */ if (ctx->err || n >= ELS_EXPGOLOMB_LEN) { ctx->err = AVERROR(EOVERFLOW); return 0; } /* handle the zero case */ if (!n) return 0; /* initialize probability tree */ if (!ur->rem_rung_list) { ur->rem_rung_list = av_realloc(NULL, RUNG_SPACE); if (!ur->rem_rung_list) { ctx->err = AVERROR(ENOMEM); return 0; } memset(ur->rem_rung_list, 0, RUNG_SPACE); ur->rung_list_size = RUNG_SPACE; ur->avail_index = ELS_EXPGOLOMB_LEN; } /* decode the remainder */ for (i = 0, r = 0, bit = 0; i < n; i++) { if (!i) rung_node = &ur->rem_rung_list[n]; else { if (!rung_node->next_index) { if (ur->rung_list_size <= (ur->avail_index + 2) * sizeof(ElsRungNode)) { // remember rung_node position ptrdiff_t pos = rung_node - ur->rem_rung_list; ur->rem_rung_list = av_realloc(ur->rem_rung_list, ur->rung_list_size + RUNG_SPACE); if (!ur->rem_rung_list) { av_free(ur->rem_rung_list); ctx->err = AVERROR(ENOMEM); return 0; } memset((uint8_t *) ur->rem_rung_list + ur->rung_list_size, 0, RUNG_SPACE); ur->rung_list_size += RUNG_SPACE; // restore rung_node position in the new list rung_node = &ur->rem_rung_list[pos]; } rung_node->next_index = ur->avail_index; ur->avail_index += 2; } rung_node = &ur->rem_rung_list[rung_node->next_index + bit]; } bit = ff_els_decode_bit(ctx, &rung_node->rung); if (ctx->err) return bit; r = (r << 1) + bit; } return (1 << n) - 1 + r; /* make value from exp golomb code */ } | 3,625 |
1 | static int sse_mb(MpegEncContext *s){ int w= 16; int h= 16; if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16; if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16; if(w==16 && h==16) if(s->avctx->mb_cmp == FF_CMP_NSSE){ return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); }else{ return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16) +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8) +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8); } else return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize) +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize) +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize); } | 3,626 |
1 | static uint64_t alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, int n_start, int n_end, int *num, QCowL2Meta *m) { BDRVQcowState *s = bs->opaque; int l2_index, ret; uint64_t l2_offset, *l2_table, cluster_offset; int nb_clusters, i = 0; ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); if (ret == 0) return 0; nb_clusters = size_to_clusters(s, n_end << 9); nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); cluster_offset = be64_to_cpu(l2_table[l2_index]); /* We keep all QCOW_OFLAG_COPIED clusters */ if (cluster_offset & QCOW_OFLAG_COPIED) { nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, &l2_table[l2_index], 0); cluster_offset &= ~QCOW_OFLAG_COPIED; m->nb_clusters = 0; goto out; } /* for the moment, multiple compressed clusters are not managed */ if (cluster_offset & QCOW_OFLAG_COMPRESSED) nb_clusters = 1; /* how many available clusters ? */ while (i < nb_clusters) { i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, &l2_table[l2_index + i], 0); if(be64_to_cpu(l2_table[l2_index + i])) break; i += count_contiguous_free_clusters(nb_clusters - i, &l2_table[l2_index + i]); cluster_offset = be64_to_cpu(l2_table[l2_index + i]); if ((cluster_offset & QCOW_OFLAG_COPIED) || (cluster_offset & QCOW_OFLAG_COMPRESSED)) break; } nb_clusters = i; /* allocate a new cluster */ cluster_offset = alloc_clusters(bs, nb_clusters * s->cluster_size); /* save info needed for meta data update */ m->offset = offset; m->n_start = n_start; m->nb_clusters = nb_clusters; out: m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end); *num = m->nb_available - n_start; return cluster_offset; } | 3,627 |
1 | static int dv_extract_audio(uint8_t* frame, uint8_t* ppcm[4], const DVprofile *sys) { int size, chan, i, j, d, of, smpls, freq, quant, half_ch; uint16_t lc, rc; const uint8_t* as_pack; uint8_t *pcm, ipcm; as_pack = dv_extract_pack(frame, dv_audio_source); if (!as_pack) /* No audio ? */ return 0; smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */ if (quant > 1) return -1; /* unsupported quantization */ size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */ half_ch = sys->difseg_size / 2; /* We work with 720p frames split in half, thus even frames have * channels 0,1 and odd 2,3. */ ipcm = (sys->height == 720 && !(frame[1] & 0x0C)) ? 2 : 0; pcm = ppcm[ipcm++]; /* for each DIF channel */ for (chan = 0; chan < sys->n_difchan; chan++) { /* for each DIF segment */ for (i = 0; i < sys->difseg_size; i++) { frame += 6 * 80; /* skip DIF segment header */ if (quant == 1 && i == half_ch) { /* next stereo channel (12bit mode only) */ pcm = ppcm[ipcm++]; if (!pcm) break; } /* for each AV sequence */ for (j = 0; j < 9; j++) { for (d = 8; d < 80; d += 2) { if (quant == 0) { /* 16bit quantization */ of = sys->audio_shuffle[i][j] + (d - 8) / 2 * sys->audio_stride; if (of*2 >= size) continue; pcm[of*2] = frame[d+1]; // FIXME: maybe we have to admit pcm[of*2+1] = frame[d]; // that DV is a big-endian PCM if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00) pcm[of*2+1] = 0; } else { /* 12bit quantization */ lc = ((uint16_t)frame[d] << 4) | ((uint16_t)frame[d+2] >> 4); rc = ((uint16_t)frame[d+1] << 4) | ((uint16_t)frame[d+2] & 0x0f); lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc)); rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc)); of = sys->audio_shuffle[i%half_ch][j] + (d - 8) / 3 * sys->audio_stride; if (of*2 >= size) continue; pcm[of*2] = lc & 0xff; // FIXME: maybe we have to admit pcm[of*2+1] = lc >> 8; // that DV is a big-endian PCM of = sys->audio_shuffle[i%half_ch+half_ch][j] + (d - 8) / 3 * sys->audio_stride; pcm[of*2] = rc & 0xff; // FIXME: maybe we have to admit pcm[of*2+1] = rc >> 8; // that DV is a big-endian PCM ++d; } } frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */ } } /* next stereo channel (50Mbps and 100Mbps only) */ pcm = ppcm[ipcm++]; if (!pcm) break; } return size; } | 3,628 |
1 | static int transcode_init(void) { int ret = 0, i, j, k; AVFormatContext *oc; AVCodecContext *codec, *icodec; OutputStream *ost; InputStream *ist; char error[1024]; int want_sdp = 1; /* init framerate emulation */ for (i = 0; i < nb_input_files; i++) { InputFile *ifile = input_files[i]; if (ifile->rate_emu) for (j = 0; j < ifile->nb_streams; j++) input_streams[j + ifile->ist_index]->start = av_gettime(); } /* output stream init */ for (i = 0; i < nb_output_files; i++) { oc = output_files[i]->ctx; if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) { av_dump_format(oc, i, oc->filename, 1); av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i); return AVERROR(EINVAL); } } /* init complex filtergraphs */ for (i = 0; i < nb_filtergraphs; i++) if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0) return ret; /* for each output stream, we compute the right encoding parameters */ for (i = 0; i < nb_output_streams; i++) { ost = output_streams[i]; oc = output_files[ost->file_index]->ctx; ist = get_input_stream(ost); if (ost->attachment_filename) continue; codec = ost->st->codec; if (ist) { icodec = ist->st->codec; ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample = icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; } if (ost->stream_copy) { uint64_t extra_size; av_assert0(ist && !ost->filter); extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) { return AVERROR(EINVAL); } /* if stream_copy is selected, no need to decode or encode */ codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; if (!codec->codec_tag) { if (!oc->oformat->codec_tag || av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0) codec->codec_tag = icodec->codec_tag; } codec->bit_rate = icodec->bit_rate; codec->rc_max_rate = icodec->rc_max_rate; codec->rc_buffer_size = icodec->rc_buffer_size; codec->field_order = icodec->field_order; codec->extradata = av_mallocz(extra_size); if (!codec->extradata) { return AVERROR(ENOMEM); } memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size = icodec->extradata_size; if (!copy_tb) { codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; av_reduce(&codec->time_base.num, &codec->time_base.den, codec->time_base.num, codec->time_base.den, INT_MAX); } else codec->time_base = ist->st->time_base; switch (codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if (audio_volume != 256) { av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n"); exit_program(1); } codec->channel_layout = icodec->channel_layout; codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; codec->frame_size = icodec->frame_size; codec->audio_service_type = icodec->audio_service_type; codec->block_align = icodec->block_align; break; case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = icodec->pix_fmt; codec->width = icodec->width; codec->height = icodec->height; codec->has_b_frames = icodec->has_b_frames; if (!codec->sample_aspect_ratio.num) { codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : ist->st->codec->sample_aspect_ratio.num ? ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; } break; case AVMEDIA_TYPE_SUBTITLE: codec->width = icodec->width; codec->height = icodec->height; break; case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_ATTACHMENT: break; default: abort(); } } else { if (!ost->enc) { /* should only happen when a default codec is not present. */ snprintf(error, sizeof(error), "Automatic encoder selection " "failed for output stream #%d:%d. Default encoder for " "format %s is probably disabled. Please choose an " "encoder manually.\n", ost->file_index, ost->index, oc->oformat->name); ret = AVERROR(EINVAL); goto dump_format; } if (ist) ist->decoding_needed = 1; ost->encoding_needed = 1; switch (codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo = av_fifo_alloc(1024); if (!ost->fifo) { return AVERROR(ENOMEM); } if (!codec->sample_rate) codec->sample_rate = icodec->sample_rate; choose_sample_rate(ost->st, ost->enc); codec->time_base = (AVRational){ 1, codec->sample_rate }; if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) codec->sample_fmt = icodec->sample_fmt; choose_sample_fmt(ost->st, ost->enc); if (!codec->channels) codec->channels = icodec->channels; if (!codec->channel_layout) codec->channel_layout = icodec->channel_layout; if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; icodec->request_channels = codec-> channels; ost->resample_sample_fmt = icodec->sample_fmt; ost->resample_sample_rate = icodec->sample_rate; ost->resample_channels = icodec->channels; ost->resample_channel_layout = icodec->channel_layout; break; case AVMEDIA_TYPE_VIDEO: if (!ost->filter) { FilterGraph *fg; fg = init_simple_filtergraph(ist, ost); if (configure_video_filters(fg)) { av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n"); exit(1); } } /* * We want CFR output if and only if one of those is true: * 1) user specified output framerate with -r * 2) user specified -vsync cfr * 3) output format is CFR and the user didn't force vsync to * something else than CFR * * in such a case, set ost->frame_rate */ if (!ost->frame_rate.num && ist && (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) { ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1}; if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) { int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); ost->frame_rate = ost->enc->supported_framerates[idx]; } } if (ost->frame_rate.num) { codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; video_sync_method = VSYNC_CFR; } else if (ist) codec->time_base = ist->st->time_base; else codec->time_base = ost->filter->filter->inputs[0]->time_base; codec->width = ost->filter->filter->inputs[0]->w; codec->height = ost->filter->filter->inputs[0]->h; codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = ost->frame_aspect_ratio ? // overridden by the -aspect cli option av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) : ost->filter->filter->inputs[0]->sample_aspect_ratio; codec->pix_fmt = ost->filter->filter->inputs[0]->format; if (codec->width != icodec->width || codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt) { codec->bits_per_raw_sample = 0; } break; case AVMEDIA_TYPE_SUBTITLE: codec->time_base = (AVRational){1, 1000}; break; default: abort(); break; } /* two pass mode */ if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; snprintf(logfilename, sizeof(logfilename), "%s-%d.log", pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, i); if (!strcmp(ost->enc->name, "libx264")) { av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE); } else { if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(logfilename, "wb"); if (!f) { av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno)); exit_program(1); } ost->logfile = f; } else { char *logbuffer; size_t logbuffer_size; if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n", logfilename); exit_program(1); } codec->stats_in = logbuffer; } } } } } /* open each encoder */ for (i = 0; i < nb_output_streams; i++) { ost = output_streams[i]; if (ost->encoding_needed) { AVCodec *codec = ost->enc; AVCodecContext *dec = NULL; if ((ist = get_input_stream(ost))) dec = ist->st->codec; if (dec && dec->subtitle_header) { ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); if (!ost->st->codec->subtitle_header) { ret = AVERROR(ENOMEM); goto dump_format; } memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } if (!av_dict_get(ost->opts, "threads", NULL, 0)) av_dict_set(&ost->opts, "threads", "auto", 0); if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ost->st->codec, 1); assert_avoptions(ost->opts); if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." "It takes bits/s as argument, not kbits/s\n"); extra_size += ost->st->codec->extradata_size; if (ost->st->codec->me_threshold) input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV; } } /* init input streams */ for (i = 0; i < nb_input_streams; i++) if ((ret = init_input_stream(i, error, sizeof(error))) < 0) goto dump_format; /* discard unused programs */ for (i = 0; i < nb_input_files; i++) { InputFile *ifile = input_files[i]; for (j = 0; j < ifile->ctx->nb_programs; j++) { AVProgram *p = ifile->ctx->programs[j]; int discard = AVDISCARD_ALL; for (k = 0; k < p->nb_stream_indexes; k++) if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) { discard = AVDISCARD_DEFAULT; break; } p->discard = discard; } } /* open files and write file headers */ for (i = 0; i < nb_output_files; i++) { oc = output_files[i]->ctx; oc->interrupt_callback = int_cb; if (avformat_write_header(oc, &output_files[i]->opts) < 0) { snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i); ret = AVERROR(EINVAL); goto dump_format; } assert_avoptions(output_files[i]->opts); if (strcmp(oc->oformat->name, "rtp")) { want_sdp = 0; } } dump_format: /* dump the file output parameters - cannot be done before in case of stream copy */ for (i = 0; i < nb_output_files; i++) { av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1); } /* dump the stream mapping */ av_log(NULL, AV_LOG_INFO, "Stream mapping:\n"); for (i = 0; i < nb_input_streams; i++) { ist = input_streams[i]; for (j = 0; j < ist->nb_filters; j++) { AVFilterLink *link = ist->filters[j]->filter->outputs[0]; if (ist->filters[j]->graph->graph_desc) { av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s", ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?", link->dst->filter->name); if (link->dst->input_count > 1) av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name); if (nb_filtergraphs > 1) av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index); av_log(NULL, AV_LOG_INFO, "\n"); } } } for (i = 0; i < nb_output_streams; i++) { ost = output_streams[i]; if (ost->attachment_filename) { /* an attached file */ av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n", ost->attachment_filename, ost->file_index, ost->index); continue; } if (ost->filter && ost->filter->graph->graph_desc) { /* output from a complex graph */ AVFilterLink *link = ost->filter->filter->inputs[0]; av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name); if (link->src->output_count > 1) av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name); if (nb_filtergraphs > 1) av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index); av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index, ost->index, ost->enc ? ost->enc->name : "?"); continue; } av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d", input_streams[ost->source_index]->file_index, input_streams[ost->source_index]->st->index, ost->file_index, ost->index); if (ost->sync_ist != input_streams[ost->source_index]) av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]", ost->sync_ist->file_index, ost->sync_ist->st->index); if (ost->stream_copy) av_log(NULL, AV_LOG_INFO, " (copy)"); else av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ? input_streams[ost->source_index]->dec->name : "?", ost->enc ? ost->enc->name : "?"); av_log(NULL, AV_LOG_INFO, "\n"); } if (ret) { av_log(NULL, AV_LOG_ERROR, "%s\n", error); return ret; } if (want_sdp) { print_sdp(); } return 0; } | 3,629 |
1 | static int get_aiff_header(AVFormatContext *s, int size, unsigned version) { AVIOContext *pb = s->pb; AVCodecParameters *par = s->streams[0]->codecpar; AIFFInputContext *aiff = s->priv_data; int exp; uint64_t val; int sample_rate; unsigned int num_frames; if (size & 1) size++; par->codec_type = AVMEDIA_TYPE_AUDIO; par->channels = avio_rb16(pb); num_frames = avio_rb32(pb); par->bits_per_coded_sample = avio_rb16(pb); exp = avio_rb16(pb) - 16383 - 63; val = avio_rb64(pb); if (exp <-63 || exp >63) { av_log(s, AV_LOG_ERROR, "exp %d is out of range\n", exp); return AVERROR_INVALIDDATA; } if (exp >= 0) sample_rate = val << exp; else sample_rate = (val + (1ULL<<(-exp-1))) >> -exp; par->sample_rate = sample_rate; size -= 18; /* get codec id for AIFF-C */ if (size < 4) { version = AIFF; } else if (version == AIFF_C_VERSION1) { par->codec_tag = avio_rl32(pb); par->codec_id = ff_codec_get_id(ff_codec_aiff_tags, par->codec_tag); if (par->codec_id == AV_CODEC_ID_NONE) { char tag[32]; av_get_codec_tag_string(tag, sizeof(tag), par->codec_tag); avpriv_request_sample(s, "unknown or unsupported codec tag: %s", tag); } size -= 4; } if (version != AIFF_C_VERSION1 || par->codec_id == AV_CODEC_ID_PCM_S16BE) { par->codec_id = aiff_codec_get_id(par->bits_per_coded_sample); par->bits_per_coded_sample = av_get_bits_per_sample(par->codec_id); aiff->block_duration = 1; } else { switch (par->codec_id) { case AV_CODEC_ID_PCM_F32BE: case AV_CODEC_ID_PCM_F64BE: case AV_CODEC_ID_PCM_S16LE: case AV_CODEC_ID_PCM_ALAW: case AV_CODEC_ID_PCM_MULAW: aiff->block_duration = 1; break; case AV_CODEC_ID_ADPCM_IMA_QT: par->block_align = 34 * par->channels; break; case AV_CODEC_ID_MACE3: par->block_align = 2 * par->channels; break; case AV_CODEC_ID_ADPCM_G726LE: par->bits_per_coded_sample = 5; case AV_CODEC_ID_ADPCM_IMA_WS: case AV_CODEC_ID_ADPCM_G722: case AV_CODEC_ID_MACE6: case AV_CODEC_ID_SDX2_DPCM: par->block_align = 1 * par->channels; break; case AV_CODEC_ID_GSM: par->block_align = 33; break; default: aiff->block_duration = 1; break; } if (par->block_align > 0) aiff->block_duration = av_get_audio_frame_duration2(par, par->block_align); } /* Block align needs to be computed in all cases, as the definition * is specific to applications -> here we use the WAVE format definition */ if (!par->block_align) par->block_align = (av_get_bits_per_sample(par->codec_id) * par->channels) >> 3; if (aiff->block_duration) { par->bit_rate = par->sample_rate * (par->block_align << 3) / aiff->block_duration; } /* Chunk is over */ if (size) avio_skip(pb, size); return num_frames; } | 3,630 |
1 | static void xen_init_pv(QEMUMachineInitArgs *args) { const char *cpu_model = args->cpu_model; const char *kernel_filename = args->kernel_filename; const char *kernel_cmdline = args->kernel_cmdline; const char *initrd_filename = args->initrd_filename; X86CPU *cpu; CPUState *cs; DriveInfo *dinfo; int i; /* Initialize a dummy CPU */ if (cpu_model == NULL) { #ifdef TARGET_X86_64 cpu_model = "qemu64"; #else cpu_model = "qemu32"; #endif } cpu = cpu_x86_init(cpu_model); cs = CPU(cpu); cs->halted = 1; /* Initialize backend core & drivers */ if (xen_be_init() != 0) { fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__); exit(1); } switch (xen_mode) { case XEN_ATTACH: /* nothing to do, xend handles everything */ break; case XEN_CREATE: if (xen_domain_build_pv(kernel_filename, initrd_filename, kernel_cmdline) < 0) { fprintf(stderr, "xen pv domain creation failed\n"); exit(1); } break; case XEN_EMULATE: fprintf(stderr, "xen emulation not implemented (yet)\n"); exit(1); break; } xen_be_register("console", &xen_console_ops); xen_be_register("vkbd", &xen_kbdmouse_ops); xen_be_register("vfb", &xen_framebuffer_ops); xen_be_register("qdisk", &xen_blkdev_ops); xen_be_register("qnic", &xen_netdev_ops); /* configure framebuffer */ if (xenfb_enabled) { xen_config_dev_vfb(0, "vnc"); xen_config_dev_vkbd(0); } /* configure disks */ for (i = 0; i < 16; i++) { dinfo = drive_get(IF_XEN, 0, i); if (!dinfo) continue; xen_config_dev_blk(dinfo); } /* configure nics */ for (i = 0; i < nb_nics; i++) { if (!nd_table[i].model || 0 != strcmp(nd_table[i].model, "xen")) continue; xen_config_dev_nic(nd_table + i); } /* config cleanup hook */ atexit(xen_config_cleanup); /* setup framebuffer */ xen_init_display(xen_domid); } | 3,631 |
1 | void signal_init(void) { struct sigaction act; int i; /* set all host signal handlers. ALL signals are blocked during the handlers to serialize them. */ sigfillset(&act.sa_mask); act.sa_flags = SA_SIGINFO; act.sa_sigaction = host_signal_handler; for(i = 1; i < NSIG; i++) { sigaction(i, &act, NULL); } memset(sigact_table, 0, sizeof(sigact_table)); first_free = &sigqueue_table[0]; for(i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) sigqueue_table[i].next = &sigqueue_table[i + 1]; sigqueue_table[MAX_SIGQUEUE_SIZE - 1].next = NULL; } | 3,632 |
1 | static inline void gen_bx(DisasContext *s, TCGv_i32 var) { s->is_jmp = DISAS_UPDATE; tcg_gen_andi_i32(cpu_R[15], var, ~1); tcg_gen_andi_i32(var, var, 1); store_cpu_field(var, thumb); } | 3,633 |
1 | static int get_nb_samples(AVCodecContext *avctx, GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples) { ADPCMDecodeContext *s = avctx->priv_data; int nb_samples = 0; int ch = avctx->channels; int has_coded_samples = 0; int header_size; *coded_samples = 0; *approx_nb_samples = 0; if(ch <= 0) return 0; switch (avctx->codec->id) { /* constant, only check buf_size */ case AV_CODEC_ID_ADPCM_EA_XAS: if (buf_size < 76 * ch) return 0; nb_samples = 128; break; case AV_CODEC_ID_ADPCM_IMA_QT: if (buf_size < 34 * ch) return 0; nb_samples = 64; break; /* simple 4-bit adpcm */ case AV_CODEC_ID_ADPCM_CT: case AV_CODEC_ID_ADPCM_IMA_APC: case AV_CODEC_ID_ADPCM_IMA_EA_SEAD: case AV_CODEC_ID_ADPCM_IMA_OKI: case AV_CODEC_ID_ADPCM_IMA_WS: case AV_CODEC_ID_ADPCM_YAMAHA: nb_samples = buf_size * 2 / ch; break; } if (nb_samples) return nb_samples; /* simple 4-bit adpcm, with header */ header_size = 0; switch (avctx->codec->id) { case AV_CODEC_ID_ADPCM_4XM: case AV_CODEC_ID_ADPCM_IMA_ISS: header_size = 4 * ch; break; case AV_CODEC_ID_ADPCM_IMA_AMV: header_size = 8; break; case AV_CODEC_ID_ADPCM_IMA_SMJPEG: header_size = 4 * ch; break; } if (header_size > 0) return (buf_size - header_size) * 2 / ch; /* more complex formats */ switch (avctx->codec->id) { case AV_CODEC_ID_ADPCM_EA: has_coded_samples = 1; *coded_samples = bytestream2_get_le32(gb); *coded_samples -= *coded_samples % 28; nb_samples = (buf_size - 12) / 30 * 28; break; case AV_CODEC_ID_ADPCM_IMA_EA_EACS: has_coded_samples = 1; *coded_samples = bytestream2_get_le32(gb); nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch; break; case AV_CODEC_ID_ADPCM_EA_MAXIS_XA: nb_samples = (buf_size - ch) / ch * 2; break; case AV_CODEC_ID_ADPCM_EA_R1: case AV_CODEC_ID_ADPCM_EA_R2: case AV_CODEC_ID_ADPCM_EA_R3: /* maximum number of samples */ /* has internal offsets and a per-frame switch to signal raw 16-bit */ has_coded_samples = 1; switch (avctx->codec->id) { case AV_CODEC_ID_ADPCM_EA_R1: header_size = 4 + 9 * ch; *coded_samples = bytestream2_get_le32(gb); break; case AV_CODEC_ID_ADPCM_EA_R2: header_size = 4 + 5 * ch; *coded_samples = bytestream2_get_le32(gb); break; case AV_CODEC_ID_ADPCM_EA_R3: header_size = 4 + 5 * ch; *coded_samples = bytestream2_get_be32(gb); break; } *coded_samples -= *coded_samples % 28; nb_samples = (buf_size - header_size) * 2 / ch; nb_samples -= nb_samples % 28; *approx_nb_samples = 1; break; case AV_CODEC_ID_ADPCM_IMA_DK3: if (avctx->block_align > 0) buf_size = FFMIN(buf_size, avctx->block_align); nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch; break; case AV_CODEC_ID_ADPCM_IMA_DK4: if (avctx->block_align > 0) buf_size = FFMIN(buf_size, avctx->block_align); nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch; break; case AV_CODEC_ID_ADPCM_IMA_RAD: if (avctx->block_align > 0) buf_size = FFMIN(buf_size, avctx->block_align); nb_samples = (buf_size - 4 * ch) * 2 / ch; break; case AV_CODEC_ID_ADPCM_IMA_WAV: { int bsize = ff_adpcm_ima_block_sizes[avctx->bits_per_coded_sample - 2]; int bsamples = ff_adpcm_ima_block_samples[avctx->bits_per_coded_sample - 2]; if (avctx->block_align > 0) buf_size = FFMIN(buf_size, avctx->block_align); nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples; break; } case AV_CODEC_ID_ADPCM_MS: if (avctx->block_align > 0) buf_size = FFMIN(buf_size, avctx->block_align); nb_samples = 2 + (buf_size - 7 * ch) * 2 / ch; break; case AV_CODEC_ID_ADPCM_SBPRO_2: case AV_CODEC_ID_ADPCM_SBPRO_3: case AV_CODEC_ID_ADPCM_SBPRO_4: { int samples_per_byte; switch (avctx->codec->id) { case AV_CODEC_ID_ADPCM_SBPRO_2: samples_per_byte = 4; break; case AV_CODEC_ID_ADPCM_SBPRO_3: samples_per_byte = 3; break; case AV_CODEC_ID_ADPCM_SBPRO_4: samples_per_byte = 2; break; } if (!s->status[0].step_index) { nb_samples++; buf_size -= ch; } nb_samples += buf_size * samples_per_byte / ch; break; } case AV_CODEC_ID_ADPCM_SWF: { int buf_bits = buf_size * 8 - 2; int nbits = (bytestream2_get_byte(gb) >> 6) + 2; int block_hdr_size = 22 * ch; int block_size = block_hdr_size + nbits * ch * 4095; int nblocks = buf_bits / block_size; int bits_left = buf_bits - nblocks * block_size; nb_samples = nblocks * 4096; if (bits_left >= block_hdr_size) nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch); break; } case AV_CODEC_ID_ADPCM_THP: if (avctx->extradata) { nb_samples = buf_size / (8 * ch) * 14; break; } has_coded_samples = 1; bytestream2_skip(gb, 4); // channel size *coded_samples = bytestream2_get_be32(gb); *coded_samples -= *coded_samples % 14; nb_samples = (buf_size - (8 + 36 * ch)) / (8 * ch) * 14; break; case AV_CODEC_ID_ADPCM_AFC: nb_samples = buf_size / (9 * ch) * 16; break; case AV_CODEC_ID_ADPCM_XA: nb_samples = (buf_size / 128) * 224 / ch; break; case AV_CODEC_ID_ADPCM_DTK: nb_samples = buf_size / (16 * ch) * 28; break; } /* validate coded sample count */ if (has_coded_samples && (*coded_samples <= 0 || *coded_samples > nb_samples)) return AVERROR_INVALIDDATA; return nb_samples; } | 3,635 |
1 | static void serial_receive1(void *opaque, const uint8_t *buf, int size) { SerialState *s = opaque; serial_receive_byte(s, buf[0]); } | 3,636 |
0 | static TCGv_i32 gen_get_asi(DisasContext *dc, int insn) { int asi; if (IS_IMM) { #ifdef TARGET_SPARC64 asi = dc->asi; #else gen_exception(dc, TT_ILL_INSN); asi = 0; #endif } else { asi = GET_FIELD(insn, 19, 26); } return tcg_const_i32(asi); } | 3,639 |
0 | static void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUSH4State *regs) { struct target_rt_sigframe *frame; abi_ulong frame_addr; int i; int err = 0; int signal; frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame)); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto give_sigsegv; signal = current_exec_domain_sig(sig); err |= copy_siginfo_to_user(&frame->info, info); /* Create the ucontext. */ __put_user(0, &frame->uc.tuc_flags); __put_user(0, (unsigned long *)&frame->uc.tuc_link); __put_user((unsigned long)target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); __put_user(sas_ss_flags(regs->gregs[15]), &frame->uc.tuc_stack.ss_flags); __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); setup_sigcontext(&frame->uc.tuc_mcontext, regs, set->sig[0]); for(i = 0; i < TARGET_NSIG_WORDS; i++) { __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); } /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { regs->pr = (unsigned long) ka->sa_restorer; } else { /* Generate return code (system call to sigreturn) */ __put_user(MOVW(2), &frame->retcode[0]); __put_user(TRAP_NOARG, &frame->retcode[1]); __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); regs->pr = (unsigned long) frame->retcode; } if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->gregs[15] = frame_addr; regs->gregs[4] = signal; /* Arg for signal handler */ regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info); regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc); regs->pc = (unsigned long) ka->_sa_handler; unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: unlock_user_struct(frame, frame_addr, 1); force_sig(TARGET_SIGSEGV); } | 3,640 |
0 | static void display_mouse_set(DisplayChangeListener *dcl, int x, int y, int on) { SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); qemu_mutex_lock(&ssd->lock); ssd->ptr_x = x; ssd->ptr_y = y; if (ssd->ptr_move) { g_free(ssd->ptr_move); } ssd->ptr_move = qemu_spice_create_cursor_update(ssd, NULL, on); qemu_mutex_unlock(&ssd->lock); } | 3,641 |
0 | static void cpu_request_exit(void *opaque, int irq, int level) { CPUState *cpu = current_cpu; if (cpu && level) { cpu_exit(cpu); } } | 3,642 |
0 | int qemu_get_thread_id(void) { #if defined (__linux__) return syscall(SYS_gettid); #else return getpid(); #endif } | 3,643 |
0 | static void listflags(char *buf, int bufsize, uint32_t fbits, const char **featureset, uint32_t flags) { const char **p = &featureset[31]; char *q, *b, bit; int nc; b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL; *buf = '\0'; for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit) if (fbits & 1 << bit && (*p || !flags)) { if (*p) nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p); else nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit); if (bufsize <= nc) { if (b) sprintf(b, "..."); return; } q += nc; bufsize -= nc; } } | 3,644 |
0 | void watchdog_add_model(WatchdogTimerModel *model) { LIST_INSERT_HEAD(&watchdog_list, model, entry); } | 3,645 |
0 | static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index) { int x, y, i; const int ring_size = s->avctx->context_model ? 3 : 2; int16_t *sample[3]; s->run_index = 0; memset(s->sample_buffer, 0, ring_size * (w + 6) * sizeof(*s->sample_buffer)); for (y = 0; y < h; y++) { for (i = 0; i < ring_size; i++) sample[i] = s->sample_buffer + (w + 6) * ((h + i - y) % ring_size) + 3; sample[0][-1]= sample[1][0 ]; sample[1][ w]= sample[1][w-1]; // { START_TIMER if (s->bits_per_raw_sample <= 8) { for (x = 0; x < w; x++) sample[0][x] = src[x + stride * y]; encode_line(s, w, sample, plane_index, 8); } else { if (s->packed_at_lsb) { for (x = 0; x < w; x++) { sample[0][x] = ((uint16_t*)(src + stride*y))[x]; } } else { for (x = 0; x < w; x++) { sample[0][x] = ((uint16_t*)(src + stride*y))[x] >> (16 - s->bits_per_raw_sample); } } encode_line(s, w, sample, plane_index, s->bits_per_raw_sample); } // STOP_TIMER("encode line") } } } | 3,646 |
0 | static int decode_plane(Indeo3DecodeContext *ctx, AVCodecContext *avctx, Plane *plane, const uint8_t *data, int32_t data_size, int32_t strip_width) { Cell curr_cell; int num_vectors; /* each plane data starts with mc_vector_count field, */ /* an optional array of motion vectors followed by the vq data */ num_vectors = bytestream_get_le32(&data); ctx->mc_vectors = num_vectors ? data : 0; /* init the bitreader */ init_get_bits(&ctx->gb, &data[num_vectors * 2], data_size << 3); ctx->skip_bits = 0; ctx->need_resync = 0; ctx->last_byte = data + data_size - 1; /* initialize the 1st cell and set its dimensions to whole plane */ curr_cell.xpos = curr_cell.ypos = 0; curr_cell.width = plane->width >> 2; curr_cell.height = plane->height >> 2; curr_cell.tree = 0; // we are in the MC tree now curr_cell.mv_ptr = 0; // no motion vector = INTRA cell return parse_bintree(ctx, avctx, plane, INTRA_NULL, &curr_cell, CELL_STACK_MAX, strip_width); } | 3,647 |
0 | int qemu_savevm_state_iterate(QEMUFile *f) { SaveStateEntry *se; int ret = 1; TAILQ_FOREACH(se, &savevm_handlers, entry) { if (se->save_live_state == NULL) continue; /* Section type */ qemu_put_byte(f, QEMU_VM_SECTION_PART); qemu_put_be32(f, se->section_id); ret &= !!se->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque); } if (ret) return 1; if (qemu_file_has_error(f)) return -EIO; return 0; } | 3,648 |
0 | static void qmp_cleanup(void *datap) { QmpSerializeData *d = datap; visit_free(qmp_output_get_visitor(d->qov)); visit_free(d->qiv); g_free(d); } | 3,649 |
0 | void parse_numa_opts(MachineClass *mc) { int i; if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, NULL, NULL)) { exit(1); } assert(max_numa_nodeid <= MAX_NODES); /* No support for sparse NUMA node IDs yet: */ for (i = max_numa_nodeid - 1; i >= 0; i--) { /* Report large node IDs first, to make mistakes easier to spot */ if (!numa_info[i].present) { error_report("numa: Node ID missing: %d", i); exit(1); } } /* This must be always true if all nodes are present: */ assert(nb_numa_nodes == max_numa_nodeid); if (nb_numa_nodes > 0) { uint64_t numa_total; if (nb_numa_nodes > MAX_NODES) { nb_numa_nodes = MAX_NODES; } /* If no memory size is given for any node, assume the default case * and distribute the available memory equally across all nodes */ for (i = 0; i < nb_numa_nodes; i++) { if (numa_info[i].node_mem != 0) { break; } } if (i == nb_numa_nodes) { uint64_t usedmem = 0; /* On Linux, each node's border has to be 8MB aligned, * the final node gets the rest. */ for (i = 0; i < nb_numa_nodes - 1; i++) { numa_info[i].node_mem = (ram_size / nb_numa_nodes) & ~((1 << 23UL) - 1); usedmem += numa_info[i].node_mem; } numa_info[i].node_mem = ram_size - usedmem; } numa_total = 0; for (i = 0; i < nb_numa_nodes; i++) { numa_total += numa_info[i].node_mem; } if (numa_total != ram_size) { error_report("total memory for NUMA nodes (0x%" PRIx64 ")" " should equal RAM size (0x" RAM_ADDR_FMT ")", numa_total, ram_size); exit(1); } for (i = 0; i < nb_numa_nodes; i++) { QLIST_INIT(&numa_info[i].addr); } numa_set_mem_ranges(); for (i = 0; i < nb_numa_nodes; i++) { if (!bitmap_empty(numa_info[i].node_cpu, MAX_CPUMASK_BITS)) { break; } } /* Historically VCPUs were assigned in round-robin order to NUMA * nodes. However it causes issues with guest not handling it nice * in case where cores/threads from a multicore CPU appear on * different nodes. So allow boards to override default distribution * rule grouping VCPUs by socket so that VCPUs from the same socket * would be on the same node. */ if (i == nb_numa_nodes) { for (i = 0; i < max_cpus; i++) { unsigned node_id = i % nb_numa_nodes; if (mc->cpu_index_to_socket_id) { node_id = mc->cpu_index_to_socket_id(i) % nb_numa_nodes; } set_bit(i, numa_info[node_id].node_cpu); } } validate_numa_cpus(); } else { numa_set_mem_node_id(0, ram_size, 0); } } | 3,650 |
0 | static inline uint8_t *bt_hci_event_start(struct bt_hci_s *hci, int evt, int len) { uint8_t *packet, mask; int mask_byte; if (len > 255) { fprintf(stderr, "%s: HCI event params too long (%ib)\n", __FUNCTION__, len); exit(-1); } mask_byte = (evt - 1) >> 3; mask = 1 << ((evt - 1) & 3); if (mask & bt_event_reserved_mask[mask_byte] & ~hci->event_mask[mask_byte]) return NULL; packet = hci->evt_packet(hci->opaque); packet[0] = evt; packet[1] = len; return &packet[2]; } | 3,651 |
0 | uint64_t helper_efdctsidz (uint64_t val) { CPU_DoubleU u; u.ll = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float64_is_nan(u.d))) return 0; return float64_to_int64_round_to_zero(u.d, &env->vec_status); } | 3,652 |
0 | static int kvm_has_msr_star(CPUState *env) { kvm_supported_msrs(env); return has_msr_star; } | 3,653 |
0 | int64_t qemu_fseek(QEMUFile *f, int64_t pos, int whence) { if (whence == SEEK_SET) { /* nothing to do */ } else if (whence == SEEK_CUR) { pos += qemu_ftell(f); } else { /* SEEK_END not supported */ return -1; } if (f->is_writable) { qemu_fflush(f); f->buf_offset = pos; } else { f->buf_offset = pos; f->buf_index = 0; f->buf_size = 0; } return pos; } | 3,654 |
0 | static void qbus_list_dev(BusState *bus, char *dest, int len) { DeviceState *dev; const char *sep = " "; int pos = 0; pos += snprintf(dest+pos, len-pos, "devices at \"%s\":", bus->name); LIST_FOREACH(dev, &bus->children, sibling) { pos += snprintf(dest+pos, len-pos, "%s\"%s\"", sep, dev->info->name); if (dev->id) pos += snprintf(dest+pos, len-pos, "/\"%s\"", dev->id); sep = ", "; } } | 3,655 |
0 | void vnc_zlib_zfree(void *x, void *addr) { qemu_free(addr); } | 3,656 |
0 | static target_ulong h_put_term_char(CPUState *env, sPAPREnvironment *spapr, target_ulong opcode, target_ulong *args) { target_ulong reg = args[0]; target_ulong len = args[1]; target_ulong char0_7 = args[2]; target_ulong char8_15 = args[3]; VIOsPAPRDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg); uint8_t buf[16]; if (!sdev) { return H_PARAMETER; } if (len > 16) { return H_PARAMETER; } *((uint64_t *)buf) = cpu_to_be64(char0_7); *((uint64_t *)buf + 1) = cpu_to_be64(char8_15); vty_putchars(sdev, buf, len); return H_SUCCESS; } | 3,657 |
0 | static void thread_pool_co_cb(void *opaque, int ret) { ThreadPoolCo *co = opaque; co->ret = ret; qemu_coroutine_enter(co->co); } | 3,659 |
0 | static int qemu_balloon(ram_addr_t target) { if (!balloon_event_fn) { return 0; } trace_balloon_event(balloon_opaque, target); balloon_event_fn(balloon_opaque, target); return 1; } | 3,660 |
0 | static bool cmd_write_dma(IDEState *s, uint8_t cmd) { bool lba48 = (cmd == WIN_WRITEDMA_EXT); if (!s->bs) { ide_abort_command(s); return true; } ide_cmd_lba48_transform(s, lba48); ide_sector_start_dma(s, IDE_DMA_WRITE); s->media_changed = 1; return false; } | 3,661 |
0 | static uint64_t pxa2xx_pm_read(void *opaque, hwaddr addr, unsigned size) { PXA2xxState *s = (PXA2xxState *) opaque; switch (addr) { case PMCR ... PCMD31: if (addr & 3) goto fail; return s->pm_regs[addr >> 2]; default: fail: printf("%s: Bad register " REG_FMT "\n", __FUNCTION__, addr); break; } return 0; } | 3,663 |
0 | static void ics_base_realize(DeviceState *dev, Error **errp) { ICSStateClass *icsc = ICS_BASE_GET_CLASS(dev); ICSState *ics = ICS_BASE(dev); Object *obj; Error *err = NULL; obj = object_property_get_link(OBJECT(dev), ICS_PROP_XICS, &err); if (!obj) { error_setg(errp, "%s: required link '" ICS_PROP_XICS "' not found: %s", __func__, error_get_pretty(err)); return; } ics->xics = XICS_FABRIC(obj); if (icsc->realize) { icsc->realize(dev, errp); } } | 3,664 |
0 | bool postcopy_ram_supported_by_host(void) { error_report("%s: No OS support", __func__); return false; } | 3,665 |
1 | int64_t qemu_file_get_rate_limit(QEMUFile *f) { return f->xfer_limit; } | 3,666 |
1 | static inline void put_codeword(PutBitContext *pb, vorbis_enc_codebook *cb, int entry) { assert(entry >= 0); assert(entry < cb->nentries); assert(cb->lens[entry]); put_bits(pb, cb->lens[entry], cb->codewords[entry]); } | 3,667 |
1 | static inline int qemu_rdma_buffer_mergable(RDMAContext *rdma, uint64_t offset, uint64_t len) { RDMALocalBlock *block; uint8_t *host_addr; uint8_t *chunk_end; if (rdma->current_index < 0) { return 0; } if (rdma->current_chunk < 0) { return 0; } block = &(rdma->local_ram_blocks.block[rdma->current_index]); host_addr = block->local_host_addr + (offset - block->offset); chunk_end = ram_chunk_end(block, rdma->current_chunk); if (rdma->current_length == 0) { return 0; } /* * Only merge into chunk sequentially. */ if (offset != (rdma->current_addr + rdma->current_length)) { return 0; } if (offset < block->offset) { return 0; } if ((offset + len) > (block->offset + block->length)) { return 0; } if ((host_addr + len) > chunk_end) { return 0; } return 1; } | 3,668 |
1 | static int vmdk_open_desc_file(BlockDriverState *bs, int flags, int64_t desc_offset) { int ret; char buf[2048]; char ct[128]; BDRVVmdkState *s = bs->opaque; ret = bdrv_pread(bs->file, desc_offset, buf, sizeof(buf)); if (ret < 0) { return ret; } buf[2047] = '\0'; if (vmdk_parse_description(buf, "createType", ct, sizeof(ct))) { return -EINVAL; } if (strcmp(ct, "monolithicFlat") && strcmp(ct, "twoGbMaxExtentSparse") && strcmp(ct, "twoGbMaxExtentFlat")) { fprintf(stderr, "VMDK: Not supported image type \"%s\""".\n", ct); return -ENOTSUP; } s->desc_offset = 0; ret = vmdk_parse_extents(buf, bs, bs->file->filename); if (ret) { return ret; } /* try to open parent images, if exist */ if (vmdk_parent_open(bs)) { g_free(s->extents); return -EINVAL; } s->parent_cid = vmdk_read_cid(bs, 1); return 0; } | 3,669 |
1 | static void vc1_mc_4mv_chroma(VC1Context *v, int dir) { MpegEncContext *s = &v->s; H264ChromaContext *h264chroma = &v->h264chroma; uint8_t *srcU, *srcV; int uvmx, uvmy, uvsrc_x, uvsrc_y; int k, tx = 0, ty = 0; int mvx[4], mvy[4], intra[4], mv_f[4]; int valid_count; int chroma_ref_type = v->cur_field_type; int v_edge_pos = s->v_edge_pos >> v->field_mode; uint8_t (*lutuv)[256]; int use_ic; if (!v->field_mode && !v->s.last_picture.f.data[0]) return; if (s->flags & CODEC_FLAG_GRAY) return; for (k = 0; k < 4; k++) { mvx[k] = s->mv[dir][k][0]; mvy[k] = s->mv[dir][k][1]; intra[k] = v->mb_type[0][s->block_index[k]]; if (v->field_mode) mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off]; } /* calculate chroma MV vector from four luma MVs */ if (!v->field_mode || (v->field_mode && !v->numref)) { valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty); chroma_ref_type = v->reffield; if (!valid_count) { s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0; s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0; v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; return; //no need to do MC for intra blocks } } else { int dominant = 0; if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2) dominant = 1; valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty); if (dominant) chroma_ref_type = !v->cur_field_type; } if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0]) return; s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx; s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty; uvmx = (tx + ((tx & 3) == 3)) >> 1; uvmy = (ty + ((ty & 3) == 3)) >> 1; v->luma_mv[s->mb_x][0] = uvmx; v->luma_mv[s->mb_x][1] = uvmy; if (v->fastuvmc) { uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1)); uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1)); } // Field conversion bias if (v->cur_field_type != chroma_ref_type) uvmy += 2 - 4 * chroma_ref_type; uvsrc_x = s->mb_x * 8 + (uvmx >> 2); uvsrc_y = s->mb_y * 8 + (uvmy >> 2); if (v->profile != PROFILE_ADVANCED) { uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8); uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8); } else { uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1); uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1); } if (!dir) { if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) { srcU = s->current_picture.f.data[1]; srcV = s->current_picture.f.data[2]; lutuv = v->curr_lutuv; use_ic = v->curr_use_ic; } else { srcU = s->last_picture.f.data[1]; srcV = s->last_picture.f.data[2]; lutuv = v->last_lutuv; use_ic = v->last_use_ic; } } else { srcU = s->next_picture.f.data[1]; srcV = s->next_picture.f.data[2]; lutuv = v->next_lutuv; use_ic = v->next_use_ic; } if (!srcU) { av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n"); return; } srcU += uvsrc_y * s->uvlinesize + uvsrc_x; srcV += uvsrc_y * s->uvlinesize + uvsrc_x; if (v->field_mode) { if (chroma_ref_type) { srcU += s->current_picture_ptr->f.linesize[1]; srcV += s->current_picture_ptr->f.linesize[2]; } } if (v->rangeredfrm || use_ic || s->h_edge_pos < 18 || v_edge_pos < 18 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) { s->vdsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize, s->uvlinesize, 8 + 1, 8 + 1, uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1); s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, s->uvlinesize, 8 + 1, 8 + 1, uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1); srcU = s->edge_emu_buffer; srcV = s->edge_emu_buffer + 16; /* if we deal with range reduction we need to scale source blocks */ if (v->rangeredfrm) { int i, j; uint8_t *src, *src2; src = srcU; src2 = srcV; for (j = 0; j < 9; j++) { for (i = 0; i < 9; i++) { src[i] = ((src[i] - 128) >> 1) + 128; src2[i] = ((src2[i] - 128) >> 1) + 128; } src += s->uvlinesize; src2 += s->uvlinesize; } } /* if we deal with intensity compensation we need to scale source blocks */ if (use_ic) { int i, j; uint8_t *src, *src2; src = srcU; src2 = srcV; for (j = 0; j < 9; j++) { int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1); for (i = 0; i < 9; i++) { src[i] = lutuv[f][src[i]]; src2[i] = lutuv[f][src2[i]]; } src += s->uvlinesize; src2 += s->uvlinesize; } } } /* Chroma MC always uses qpel bilinear */ uvmx = (uvmx & 3) << 1; uvmy = (uvmy & 3) << 1; if (!v->rnd) { h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); } else { v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy); v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy); } } | 3,670 |
1 | void palette8torgb24(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette) { unsigned i; /* writes 1 byte o much and might cause alignment issues on some architectures? for(i=0; i<num_pixels; i++) ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ]; */ for(i=0; i<num_pixels; i++) { //FIXME slow? dst[0]= palette[ src[i]*4+2 ]; dst[1]= palette[ src[i]*4+1 ]; dst[2]= palette[ src[i]*4+0 ]; dst+= 3; } } | 3,671 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.