id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
13,708 |
static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
RTPMuxContext *s = s1->priv_data;
AVStream *st = s1->streams[0];
int rtcp_bytes;
int size= pkt->size;
av_dlog(s1, "%d: write len=%d\n", pkt->stream_index, size);
rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
RTCP_TX_RATIO_DEN;
if ((s->first_packet || ((rtcp_bytes >= RTCP_SR_SIZE) &&
(ff_ntp_time() - s->last_rtcp_ntp_time > 5000000))) &&
!(s->flags & FF_RTP_FLAG_SKIP_RTCP)) {
rtcp_send_sr(s1, ff_ntp_time(), 0);
s->last_octet_count = s->octet_count;
s->first_packet = 0;
}
s->cur_timestamp = s->base_timestamp + pkt->pts;
switch(st->codec->codec_id) {
case AV_CODEC_ID_PCM_MULAW:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_U8:
case AV_CODEC_ID_PCM_S8:
return rtp_send_samples(s1, pkt->data, size, 8 * st->codec->channels);
case AV_CODEC_ID_PCM_U16BE:
case AV_CODEC_ID_PCM_U16LE:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_S16LE:
return rtp_send_samples(s1, pkt->data, size, 16 * st->codec->channels);
case AV_CODEC_ID_ADPCM_G722:
/* The actual sample size is half a byte per sample, but since the
* stream clock rate is 8000 Hz while the sample rate is 16000 Hz,
* the correct parameter for send_samples_bits is 8 bits per stream
* clock. */
return rtp_send_samples(s1, pkt->data, size, 8 * st->codec->channels);
case AV_CODEC_ID_ADPCM_G726:
return rtp_send_samples(s1, pkt->data, size,
st->codec->bits_per_coded_sample * st->codec->channels);
case AV_CODEC_ID_MP2:
case AV_CODEC_ID_MP3:
rtp_send_mpegaudio(s1, pkt->data, size);
break;
case AV_CODEC_ID_MPEG1VIDEO:
case AV_CODEC_ID_MPEG2VIDEO:
ff_rtp_send_mpegvideo(s1, pkt->data, size);
break;
case AV_CODEC_ID_AAC:
if (s->flags & FF_RTP_FLAG_MP4A_LATM)
ff_rtp_send_latm(s1, pkt->data, size);
else
ff_rtp_send_aac(s1, pkt->data, size);
break;
case AV_CODEC_ID_AMR_NB:
case AV_CODEC_ID_AMR_WB:
ff_rtp_send_amr(s1, pkt->data, size);
break;
case AV_CODEC_ID_MPEG2TS:
rtp_send_mpegts_raw(s1, pkt->data, size);
break;
case AV_CODEC_ID_H264:
ff_rtp_send_h264(s1, pkt->data, size);
break;
case AV_CODEC_ID_H261:
ff_rtp_send_h261(s1, pkt->data, size);
break;
case AV_CODEC_ID_H263:
if (s->flags & FF_RTP_FLAG_RFC2190) {
int mb_info_size = 0;
const uint8_t *mb_info =
av_packet_get_side_data(pkt, AV_PKT_DATA_H263_MB_INFO,
&mb_info_size);
ff_rtp_send_h263_rfc2190(s1, pkt->data, size, mb_info, mb_info_size);
break;
}
/* Fallthrough */
case AV_CODEC_ID_H263P:
ff_rtp_send_h263(s1, pkt->data, size);
break;
case AV_CODEC_ID_HEVC:
ff_rtp_send_hevc(s1, pkt->data, size);
break;
case AV_CODEC_ID_VORBIS:
case AV_CODEC_ID_THEORA:
ff_rtp_send_xiph(s1, pkt->data, size);
break;
case AV_CODEC_ID_VP8:
ff_rtp_send_vp8(s1, pkt->data, size);
break;
case AV_CODEC_ID_ILBC:
rtp_send_ilbc(s1, pkt->data, size);
break;
case AV_CODEC_ID_MJPEG:
ff_rtp_send_jpeg(s1, pkt->data, size);
break;
case AV_CODEC_ID_OPUS:
if (size > s->max_payload_size) {
av_log(s1, AV_LOG_ERROR,
"Packet size %d too large for max RTP payload size %d\n",
size, s->max_payload_size);
return AVERROR(EINVAL);
}
/* Intentional fallthrough */
default:
/* better than nothing : send the codec raw data */
rtp_send_raw(s1, pkt->data, size);
break;
}
return 0;
}
| true |
FFmpeg
|
c82bf15dca00f67a701d126e47ea9075fc9459cb
|
13,709 |
static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h)
{
int sx, sy;
int dx, dy;
int width, height;
int depth;
int notify = 0;
depth = s->get_bpp((VGAState *)s) / 8;
s->get_resolution((VGAState *)s, &width, &height);
/* extra x, y */
sx = (src % (width * depth)) / depth;
sy = (src / (width * depth));
dx = (dst % (width *depth)) / depth;
dy = (dst / (width * depth));
/* normalize width */
w /= depth;
/* if we're doing a backward copy, we have to adjust
our x/y to be the upper left corner (instead of the lower
right corner) */
if (s->cirrus_blt_dstpitch < 0) {
sx -= (s->cirrus_blt_width / depth) - 1;
dx -= (s->cirrus_blt_width / depth) - 1;
sy -= s->cirrus_blt_height - 1;
dy -= s->cirrus_blt_height - 1;
}
/* are we in the visible portion of memory? */
if (sx >= 0 && sy >= 0 && dx >= 0 && dy >= 0 &&
(sx + w) <= width && (sy + h) <= height &&
(dx + w) <= width && (dy + h) <= height) {
notify = 1;
}
/* make to sure only copy if it's a plain copy ROP */
if (*s->cirrus_rop != cirrus_bitblt_rop_fwd_src &&
*s->cirrus_rop != cirrus_bitblt_rop_bkwd_src)
notify = 0;
/* we have to flush all pending changes so that the copy
is generated at the appropriate moment in time */
if (notify)
vga_hw_update();
(*s->cirrus_rop) (s, s->vram_ptr +
(s->cirrus_blt_dstaddr & s->cirrus_addr_mask),
s->vram_ptr +
(s->cirrus_blt_srcaddr & s->cirrus_addr_mask),
s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch,
s->cirrus_blt_width, s->cirrus_blt_height);
if (notify)
qemu_console_copy(s->ds,
sx, sy, dx, dy,
s->cirrus_blt_width / depth,
s->cirrus_blt_height);
/* we don't have to notify the display that this portion has
changed since qemu_console_copy implies this */
if (!notify)
cirrus_invalidate_region(s, s->cirrus_blt_dstaddr,
s->cirrus_blt_dstpitch, s->cirrus_blt_width,
s->cirrus_blt_height);
}
| true |
qemu
|
d85d0d3883f5a567fa2969a0396e42e0a662b3fa
|
13,710 |
void vnc_write(VncState *vs, const void *data, size_t len)
{
buffer_reserve(&vs->output, len);
if (buffer_empty(&vs->output)) {
qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, vnc_client_write, vs);
}
buffer_append(&vs->output, data, len);
}
| true |
qemu
|
198a0039c5fca224a77e9761e2350dd9cc102ad0
|
13,711 |
static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
const char* strmmu, CPUState *env1)
{
unsigned int i;
target_ulong mask;
for (i = 0; i < 64; i++) {
if (TTE_IS_VALID(tlb[i].tte)) {
mask = 0xffffffffffffe000ULL;
mask <<= 3 * ((tlb[i].tte >> 61) & 3);
if ((demap_addr & mask) == (tlb[i].tag & mask)) {
replace_tlb_entry(&tlb[i], 0, 0, env1);
#ifdef DEBUG_MMU
DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
dump_mmu(env1);
#endif
}
//return;
}
}
}
| true |
qemu
|
299b520cd4092be3c53f8380b81315c33927d9d3
|
13,715 |
static void vda_decoder_callback(void *vda_hw_ctx,
CFDictionaryRef user_info,
OSStatus status,
uint32_t infoFlags,
CVImageBufferRef image_buffer)
{
struct vda_context *vda_ctx = vda_hw_ctx;
if (!image_buffer)
return;
if (vda_ctx->cv_pix_fmt_type != CVPixelBufferGetPixelFormatType(image_buffer))
return;
vda_ctx->cv_buffer = CVPixelBufferRetain(image_buffer);
}
| true |
FFmpeg
|
31a0ca9e75e4c91437c8681b9655a67f09b693dd
|
13,716 |
static int wc3_read_close(AVFormatContext *s)
{
Wc3DemuxContext *wc3 = s->priv_data;
av_free(wc3->palettes);
return 0;
}
| true |
FFmpeg
|
24ae353dfbe61019a86093a9c5cd15476aabef49
|
13,717 |
static int64_t cpu_get_clock_locked(void)
{
int64_t ti;
if (!timers_state.cpu_ticks_enabled) {
ti = timers_state.cpu_clock_offset;
} else {
ti = get_clock();
ti += timers_state.cpu_clock_offset;
}
return ti;
}
| true |
qemu
|
5f3e31012e334f3410e04abae7f88565df17c91a
|
13,718 |
int main(void){
int i,k;
AVTreeNode *root= NULL, *node=NULL;
for(i=0; i<10000; i++){
int j= (random()%86294);
if(check(root) > 999){
av_log(NULL, AV_LOG_ERROR, "FATAL error %d\n", i);
print(root, 0);
return -1;
}
av_log(NULL, AV_LOG_ERROR, "inserting %4d\n", j);
if(!node)
node= av_mallocz(av_tree_node_size);
av_tree_insert(&root, (void*)(j+1), cmp, &node);
j= (random()%86294);
k= av_tree_find(root, (void*)(j+1), cmp, NULL);
if(k){
AVTreeNode *node2=NULL;
av_log(NULL, AV_LOG_ERROR, "removing %4d\n", j);
av_tree_insert(&root, (void*)(j+1), cmp, &node2);
k= av_tree_find(root, (void*)(j+1), cmp, NULL);
if(k)
av_log(NULL, AV_LOG_ERROR, "removial failure %d\n", i);
}
}
return 0;
}
| true |
FFmpeg
|
eed36075645ecc3d3ef202c94badb66818114c2c
|
13,719 |
static void qpeg_decode_inter(uint8_t *src, uint8_t *dst, int size,
int stride, int width, int height,
int delta, uint8_t *ctable, uint8_t *refdata)
{
int i, j;
int code;
int filled = 0;
uint8_t *blkdata;
/* copy prev frame */
for(i = 0; i < height; i++)
memcpy(refdata + (i * width), dst + (i * stride), width);
blkdata = src - 0x86;
height--;
dst = dst + height * stride;
while(size > 0) {
code = *src++;
size--;
if(delta) {
/* motion compensation */
while((code & 0xF0) == 0xF0) {
if(delta == 1) {
int me_idx;
int me_w, me_h, me_x, me_y;
uint8_t *me_plane;
int corr, val;
/* get block size by index */
me_idx = code & 0xF;
me_w = qpeg_table_w[me_idx];
me_h = qpeg_table_h[me_idx];
/* extract motion vector */
corr = *src++;
size--;
val = corr >> 4;
if(val > 7)
val -= 16;
me_x = val;
val = corr & 0xF;
if(val > 7)
val -= 16;
me_y = val;
/* do motion compensation */
me_plane = refdata + (filled + me_x) + (height - me_y) * width;
for(j = 0; j < me_h; j++) {
for(i = 0; i < me_w; i++)
dst[filled + i - (j * stride)] = me_plane[i - (j * width)];
}
}
code = *src++;
size--;
}
}
if(code == 0xE0) /* end-of-picture code */
break;
if(code > 0xE0) { /* run code: 0xE1..0xFF */
int p;
code &= 0x1F;
p = *src++;
size--;
for(i = 0; i <= code; i++) {
dst[filled++] = p;
if(filled >= width) {
filled = 0;
dst -= stride;
height--;
}
}
} else if(code >= 0xC0) { /* copy code: 0xC0..0xDF */
code &= 0x1F;
for(i = 0; i <= code; i++) {
dst[filled++] = *src++;
if(filled >= width) {
filled = 0;
dst -= stride;
height--;
}
}
size -= code + 1;
} else if(code >= 0x80) { /* skip code: 0x80..0xBF */
int skip;
code &= 0x3F;
/* codes 0x80 and 0x81 are actually escape codes,
skip value minus constant is in the next byte */
if(!code)
skip = (*src++) + 64;
else if(code == 1)
skip = (*src++) + 320;
else
skip = code;
filled += skip;
while( filled >= width) {
filled -= width;
dst -= stride;
height--;
}
} else {
/* zero code treated as one-pixel skip */
if(code)
dst[filled++] = ctable[code & 0x7F];
else
filled++;
if(filled >= width) {
filled = 0;
dst -= stride;
height--;
}
}
}
}
| true |
FFmpeg
|
f63166f8dff65942c633adf32da9847ee1da3a47
|
13,720 |
static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model)
{
unsigned int i;
const sparc_def_t *def = NULL;
char *s = strdup(cpu_model);
char *featurestr, *name = strtok(s, ",");
uint32_t plus_features = 0;
uint32_t minus_features = 0;
uint64_t iu_version;
uint32_t fpu_version, mmu_version, nwindows;
for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
if (strcasecmp(name, sparc_defs[i].name) == 0) {
def = &sparc_defs[i];
}
}
if (!def) {
goto error;
}
memcpy(cpu_def, def, sizeof(*def));
featurestr = strtok(NULL, ",");
while (featurestr) {
char *val;
if (featurestr[0] == '+') {
add_flagname_to_bitmaps(featurestr + 1, &plus_features);
} else if (featurestr[0] == '-') {
add_flagname_to_bitmaps(featurestr + 1, &minus_features);
} else if ((val = strchr(featurestr, '='))) {
*val = 0; val++;
if (!strcmp(featurestr, "iu_version")) {
char *err;
iu_version = strtoll(val, &err, 0);
if (!*val || *err) {
fprintf(stderr, "bad numerical value %s\n", val);
goto error;
}
cpu_def->iu_version = iu_version;
#ifdef DEBUG_FEATURES
fprintf(stderr, "iu_version %" PRIx64 "\n", iu_version);
#endif
} else if (!strcmp(featurestr, "fpu_version")) {
char *err;
fpu_version = strtol(val, &err, 0);
if (!*val || *err) {
fprintf(stderr, "bad numerical value %s\n", val);
goto error;
}
cpu_def->fpu_version = fpu_version;
#ifdef DEBUG_FEATURES
fprintf(stderr, "fpu_version %x\n", fpu_version);
#endif
} else if (!strcmp(featurestr, "mmu_version")) {
char *err;
mmu_version = strtol(val, &err, 0);
if (!*val || *err) {
fprintf(stderr, "bad numerical value %s\n", val);
goto error;
}
cpu_def->mmu_version = mmu_version;
#ifdef DEBUG_FEATURES
fprintf(stderr, "mmu_version %x\n", mmu_version);
#endif
} else if (!strcmp(featurestr, "nwindows")) {
char *err;
nwindows = strtol(val, &err, 0);
if (!*val || *err || nwindows > MAX_NWINDOWS ||
nwindows < MIN_NWINDOWS) {
fprintf(stderr, "bad numerical value %s\n", val);
goto error;
}
cpu_def->nwindows = nwindows;
#ifdef DEBUG_FEATURES
fprintf(stderr, "nwindows %d\n", nwindows);
#endif
} else {
fprintf(stderr, "unrecognized feature %s\n", featurestr);
goto error;
}
} else {
fprintf(stderr, "feature string `%s' not in format "
"(+feature|-feature|feature=xyz)\n", featurestr);
goto error;
}
featurestr = strtok(NULL, ",");
}
cpu_def->features |= plus_features;
cpu_def->features &= ~minus_features;
#ifdef DEBUG_FEATURES
print_features(stderr, fprintf, cpu_def->features, NULL);
#endif
free(s);
return 0;
error:
free(s);
return -1;
}
| true |
qemu
|
bfad67399bcca8c1afbbc93593d365044d92f7c6
|
13,721 |
void spapr_drc_reset(sPAPRDRConnector *drc)
{
trace_spapr_drc_reset(spapr_drc_index(drc));
g_free(drc->ccs);
drc->ccs = NULL;
/* immediately upon reset we can safely assume DRCs whose devices
* are pending removal can be safely removed.
*/
if (drc->awaiting_release) {
spapr_drc_release(drc);
}
drc->awaiting_allocation = false;
if (drc->dev) {
/* A device present at reset is coldplugged */
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
}
drc->dr_indicator = SPAPR_DR_INDICATOR_ACTIVE;
} else {
/* Otherwise device is absent, but might be hotplugged */
drc->isolation_state = SPAPR_DR_ISOLATION_STATE_ISOLATED;
if (spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_UNUSABLE;
}
drc->dr_indicator = SPAPR_DR_INDICATOR_INACTIVE;
}
}
| true |
qemu
|
82a93a1d307064f35c363f79b04b0a0149ac53d9
|
13,722 |
static void clipper_init(MachineState *machine)
{
ram_addr_t ram_size = machine->ram_size;
const char *cpu_model = machine->cpu_model;
const char *kernel_filename = machine->kernel_filename;
const char *kernel_cmdline = machine->kernel_cmdline;
const char *initrd_filename = machine->initrd_filename;
AlphaCPU *cpus[4];
PCIBus *pci_bus;
ISABus *isa_bus;
qemu_irq rtc_irq;
long size, i;
const char *palcode_filename;
uint64_t palcode_entry, palcode_low, palcode_high;
uint64_t kernel_entry, kernel_low, kernel_high;
/* Create up to 4 cpus. */
memset(cpus, 0, sizeof(cpus));
for (i = 0; i < smp_cpus; ++i) {
cpus[i] = cpu_alpha_init(cpu_model ? cpu_model : "ev67");
}
cpus[0]->env.trap_arg0 = ram_size;
cpus[0]->env.trap_arg1 = 0;
cpus[0]->env.trap_arg2 = smp_cpus;
/* Init the chipset. */
pci_bus = typhoon_init(ram_size, &isa_bus, &rtc_irq, cpus,
clipper_pci_map_irq);
/* Since we have an SRM-compatible PALcode, use the SRM epoch. */
rtc_init(isa_bus, 1900, rtc_irq);
pit_init(isa_bus, 0x40, 0, NULL);
isa_create_simple(isa_bus, "i8042");
/* VGA setup. Don't bother loading the bios. */
pci_vga_init(pci_bus);
/* Serial code setup. */
serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS);
/* Network setup. e1000 is good enough, failing Tulip support. */
for (i = 0; i < nb_nics; i++) {
pci_nic_init_nofail(&nd_table[i], pci_bus, "e1000", NULL);
}
/* IDE disk setup. */
{
DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
ide_drive_get(hd, ARRAY_SIZE(hd));
pci_cmd646_ide_init(pci_bus, hd, 0);
}
/* Load PALcode. Given that this is not "real" cpu palcode,
but one explicitly written for the emulation, we might as
well load it directly from and ELF image. */
palcode_filename = (bios_name ? bios_name : "palcode-clipper");
palcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, palcode_filename);
if (palcode_filename == NULL) {
hw_error("no palcode provided\n");
exit(1);
}
size = load_elf(palcode_filename, cpu_alpha_superpage_to_phys,
NULL, &palcode_entry, &palcode_low, &palcode_high,
0, EM_ALPHA, 0);
if (size < 0) {
hw_error("could not load palcode '%s'\n", palcode_filename);
exit(1);
}
/* Start all cpus at the PALcode RESET entry point. */
for (i = 0; i < smp_cpus; ++i) {
cpus[i]->env.pal_mode = 1;
cpus[i]->env.pc = palcode_entry;
cpus[i]->env.palbr = palcode_entry;
}
/* Load a kernel. */
if (kernel_filename) {
uint64_t param_offset;
size = load_elf(kernel_filename, cpu_alpha_superpage_to_phys,
NULL, &kernel_entry, &kernel_low, &kernel_high,
0, EM_ALPHA, 0);
if (size < 0) {
hw_error("could not load kernel '%s'\n", kernel_filename);
exit(1);
}
cpus[0]->env.trap_arg1 = kernel_entry;
param_offset = kernel_low - 0x6000;
if (kernel_cmdline) {
pstrcpy_targphys("cmdline", param_offset, 0x100, kernel_cmdline);
}
if (initrd_filename) {
long initrd_base, initrd_size;
initrd_size = get_image_size(initrd_filename);
if (initrd_size < 0) {
hw_error("could not load initial ram disk '%s'\n",
initrd_filename);
exit(1);
}
/* Put the initrd image as high in memory as possible. */
initrd_base = (ram_size - initrd_size) & TARGET_PAGE_MASK;
load_image_targphys(initrd_filename, initrd_base,
ram_size - initrd_base);
address_space_stq(&address_space_memory, param_offset + 0x100,
initrd_base + 0xfffffc0000000000ULL,
MEMTXATTRS_UNSPECIFIED,
NULL);
address_space_stq(&address_space_memory, param_offset + 0x108,
initrd_size, MEMTXATTRS_UNSPECIFIED, NULL);
}
}
}
| true |
qemu
|
c18f855697ab6b64a895f37cf47fd7061ce9e798
|
13,723 |
ogm_dshow_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
uint8_t *p = os->buf + os->pstart;
uint32_t t;
if(!(*p & 1))
return 0;
if(*p != 1)
return 1;
t = AV_RL32(p + 96);
if(t == 0x05589f80){
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, AV_RL32(p + 68));
avpriv_set_pts_info(st, 64, AV_RL64(p + 164), 10000000);
st->codec->width = AV_RL32(p + 176);
st->codec->height = AV_RL32(p + 180);
} else if(t == 0x05589f81){
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, AV_RL16(p + 124));
st->codec->channels = AV_RL16(p + 126);
st->codec->sample_rate = AV_RL32(p + 128);
st->codec->bit_rate = AV_RL32(p + 132) * 8;
}
return 1;
}
| true |
FFmpeg
|
6359872877269fa0c1874587676e952d30f9b79f
|
13,724 |
void qemu_bh_schedule(QEMUBH *bh)
{
AioContext *ctx;
if (bh->scheduled)
return;
ctx = bh->ctx;
bh->idle = 0;
/* Make sure that:
* 1. idle & any writes needed by the callback are done before the
* locations are read in the aio_bh_poll.
* 2. ctx is loaded before scheduled is set and the callback has a chance
* to execute.
*/
smp_mb();
bh->scheduled = 1;
aio_notify(ctx);
}
| true |
qemu
|
e8d3b1a25f284cdf9705b7cf0412281cc9ee3a36
|
13,727 |
static void emulate_spapr_hypercall(CPUPPCState *env)
{
env->gpr[3] = spapr_hypercall(env, env->gpr[3], &env->gpr[4]);
}
| true |
qemu
|
efcb9383b974114e5f682e531346006f8f2466c0
|
13,729 |
static struct pathelem *add_dir_maybe(struct pathelem *path)
{
DIR *dir;
if ((dir = opendir(path->pathname)) != NULL) {
struct dirent *dirent;
while ((dirent = readdir(dir)) != NULL) {
if (!streq(dirent->d_name,".") && !streq(dirent->d_name,"..")){
path = add_entry(path, dirent->d_name);
}
}
closedir(dir);
}
return path;
}
| true |
qemu
|
2296f194dfde4c0a54f249d3fdb8c8ca21dc611b
|
13,730 |
static void test_qemu_strtoul_whitespace(void)
{
const char *str = " \t ";
char f = 'X';
const char *endptr = &f;
unsigned long res = 999;
int err;
err = qemu_strtoul(str, &endptr, 0, &res);
g_assert_cmpint(err, ==, 0);
g_assert_cmpint(res, ==, 0);
g_assert(endptr == str);
}
| true |
qemu
|
47d4be12c3997343e436c6cca89aefbbbeb70863
|
13,731 |
static int vmd_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
VmdDemuxContext *vmd = (VmdDemuxContext *)s->priv_data;
ByteIOContext *pb = &s->pb;
int ret = 0;
vmd_frame_t *frame;
if (vmd->current_frame >= vmd->frame_count)
return -EIO;
frame = &vmd->frame_table[vmd->current_frame];
/* position the stream (will probably be there already) */
url_fseek(pb, frame->frame_offset, SEEK_SET);
if (av_new_packet(pkt, frame->frame_size + BYTES_PER_FRAME_RECORD))
return AVERROR_NOMEM;
memcpy(pkt->data, frame->frame_record, BYTES_PER_FRAME_RECORD);
ret = get_buffer(pb, pkt->data + BYTES_PER_FRAME_RECORD,
frame->frame_size);
if (ret != frame->frame_size)
ret = -EIO;
pkt->stream_index = frame->stream_index;
pkt->pts = frame->pts;
vmd->current_frame++;
return ret;
}
| true |
FFmpeg
|
23fe14bb20888038b91e62b16d50fe0b75043a10
|
13,732 |
int qemu_add_balloon_handler(QEMUBalloonEvent *event_func,
QEMUBalloonStatus *stat_func, void *opaque)
{
if (balloon_event_fn || balloon_stat_fn || balloon_opaque) {
/* We're already registered one balloon handler. How many can
* a guest really have?
*/
error_report("Another balloon device already registered");
return -1;
}
balloon_event_fn = event_func;
balloon_stat_fn = stat_func;
balloon_opaque = opaque;
return 0;
}
| true |
qemu
|
46abb8124006887d071921c5e657eeec3c50a9e2
|
13,733 |
static int parse_object_segment(AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
PGSSubContext *ctx = avctx->priv_data;
PGSSubObject *object;
uint8_t sequence_desc;
unsigned int rle_bitmap_len, width, height;
int id;
if (buf_size <= 4)
buf_size -= 4;
id = bytestream_get_be16(&buf);
object = find_object(id, &ctx->objects);
if (!object) {
if (ctx->objects.count >= MAX_EPOCH_OBJECTS) {
av_log(avctx, AV_LOG_ERROR, "Too many objects in epoch\n");
object = &ctx->objects.object[ctx->objects.count++];
object->id = id;
/* skip object version number */
buf += 1;
/* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */
sequence_desc = bytestream_get_byte(&buf);
if (!(sequence_desc & 0x80)) {
/* Additional RLE data */
if (buf_size > object->rle_remaining_len)
memcpy(object->rle + object->rle_data_len, buf, buf_size);
object->rle_data_len += buf_size;
object->rle_remaining_len -= buf_size;
return 0;
if (buf_size <= 7)
buf_size -= 7;
/* Decode rle bitmap length, stored size includes width/height data */
rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
/* Get bitmap dimensions from data */
width = bytestream_get_be16(&buf);
height = bytestream_get_be16(&buf);
/* Make sure the bitmap is not too large */
if (avctx->width < width || avctx->height < height) {
av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger than video.\n");
object->w = width;
object->h = height;
av_fast_malloc(&object->rle, &object->rle_buffer_size, rle_bitmap_len);
if (!object->rle)
return AVERROR(ENOMEM);
memcpy(object->rle, buf, buf_size);
object->rle_data_len = buf_size;
object->rle_remaining_len = rle_bitmap_len - buf_size;
return 0;
| true |
FFmpeg
|
d98e6c5d5d80c1dfe0c30f2e73d41a3aea0b920d
|
13,735 |
static void pci_error_message(Monitor *mon)
{
monitor_printf(mon, "PCI devices not supported\n");
}
| true |
qemu
|
04e00c92ef75629a241ebc50537f75de0867928d
|
13,737 |
static void usbredir_interface_info(void *priv,
struct usb_redir_interface_info_header *interface_info)
{
USBRedirDevice *dev = priv;
dev->interface_info = *interface_info;
/*
* If we receive interface info after the device has already been
* connected (ie on a set_config), re-check the filter.
*/
if (qemu_timer_pending(dev->attach_timer) || dev->dev.attached) {
if (usbredir_check_filter(dev)) {
ERROR("Device no longer matches filter after interface info "
"change, disconnecting!\n");
}
}
}
| true |
qemu
|
b2d1fe67d09d2b6c7da647fbcea6ca0148c206d3
|
13,741 |
static void update_irq(struct HPETTimer *timer)
{
qemu_irq irq;
int route;
if (timer->tn <= 1 && hpet_in_legacy_mode()) {
/* if LegacyReplacementRoute bit is set, HPET specification requires
* timer0 be routed to IRQ0 in NON-APIC or IRQ2 in the I/O APIC,
* timer1 be routed to IRQ8 in NON-APIC or IRQ8 in the I/O APIC.
*/
if (timer->tn == 0) {
irq=timer->state->irqs[0];
} else
irq=timer->state->irqs[8];
} else {
route=timer_int_route(timer);
irq=timer->state->irqs[route];
}
if (timer_enabled(timer) && hpet_enabled()) {
qemu_irq_pulse(irq);
}
}
| true |
qemu
|
27bb0b2d6f80f058bdb6fcc8fcdfa69b0c8a6d71
|
13,742 |
void virtio_reset(void *opaque)
{
VirtIODevice *vdev = opaque;
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
int i;
virtio_set_status(vdev, 0);
if (current_cpu) {
/* Guest initiated reset */
vdev->device_endian = virtio_current_cpu_endian();
} else {
/* System reset */
vdev->device_endian = virtio_default_endian();
}
if (k->reset) {
k->reset(vdev);
}
vdev->guest_features = 0;
vdev->queue_sel = 0;
vdev->status = 0;
vdev->isr = 0;
vdev->config_vector = VIRTIO_NO_VECTOR;
virtio_notify_vector(vdev, vdev->config_vector);
for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
vdev->vq[i].vring.desc = 0;
vdev->vq[i].vring.avail = 0;
vdev->vq[i].vring.used = 0;
vdev->vq[i].last_avail_idx = 0;
vdev->vq[i].shadow_avail_idx = 0;
vdev->vq[i].used_idx = 0;
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
vdev->vq[i].signalled_used = 0;
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
vdev->vq[i].inuse = 0;
}
}
| true |
qemu
|
f5ed36635d8fa73feb66fe12b3b9c2ed90a1adbe
|
13,744 |
int ffurl_get_short_seek(URLContext *h)
{
if (!h->prot->url_get_short_seek)
return AVERROR(ENOSYS);
return h->prot->url_get_short_seek(h);
}
| false |
FFmpeg
|
be4dfbf7b71e44a53ca8da882a081e35ea134c83
|
13,745 |
static uint64_t vmxnet3_get_command_status(VMXNET3State *s)
{
uint64_t ret;
switch (s->last_command) {
case VMXNET3_CMD_ACTIVATE_DEV:
ret = (s->device_active) ? 0 : -1;
VMW_CFPRN("Device active: %" PRIx64, ret);
break;
case VMXNET3_CMD_RESET_DEV:
case VMXNET3_CMD_QUIESCE_DEV:
case VMXNET3_CMD_GET_QUEUE_STATUS:
ret = 0;
break;
case VMXNET3_CMD_GET_LINK:
ret = s->link_status_and_speed;
VMW_CFPRN("Link and speed: %" PRIx64, ret);
break;
case VMXNET3_CMD_GET_PERM_MAC_LO:
ret = vmxnet3_get_mac_low(&s->perm_mac);
break;
case VMXNET3_CMD_GET_PERM_MAC_HI:
ret = vmxnet3_get_mac_high(&s->perm_mac);
break;
case VMXNET3_CMD_GET_CONF_INTR:
ret = vmxnet3_get_interrupt_config(s);
break;
case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO:
ret = VMXNET3_DISABLE_ADAPTIVE_RING;
break;
default:
VMW_WRPRN("Received request for unknown command: %x", s->last_command);
ret = -1;
break;
}
return ret;
}
| true |
qemu
|
fde58177aa112da377bbe1af71e0ec3ee7750196
|
13,746 |
static int mxpeg_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MXpegDecodeContext *s = avctx->priv_data;
MJpegDecodeContext *jpg = &s->jpg;
const uint8_t *buf_end, *buf_ptr;
const uint8_t *unescaped_buf_ptr;
int unescaped_buf_size;
int start_code;
int ret;
buf_ptr = buf;
buf_end = buf + buf_size;
jpg->got_picture = 0;
s->got_mxm_bitmask = 0;
while (buf_ptr < buf_end) {
start_code = ff_mjpeg_find_marker(jpg, &buf_ptr, buf_end,
&unescaped_buf_ptr, &unescaped_buf_size);
if (start_code < 0)
goto the_end;
{
init_get_bits(&jpg->gb, unescaped_buf_ptr, unescaped_buf_size*8);
if (start_code >= APP0 && start_code <= APP15) {
mxpeg_decode_app(s, unescaped_buf_ptr, unescaped_buf_size);
}
switch (start_code) {
case SOI:
if (jpg->got_picture) //emulating EOI
goto the_end;
break;
case EOI:
goto the_end;
case DQT:
ret = ff_mjpeg_decode_dqt(jpg);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"quantization table decode error\n");
return ret;
}
break;
case DHT:
ret = ff_mjpeg_decode_dht(jpg);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"huffman table decode error\n");
return ret;
}
break;
case COM:
ret = mxpeg_decode_com(s, unescaped_buf_ptr,
unescaped_buf_size);
if (ret < 0)
return ret;
break;
case SOF0:
s->got_sof_data = 0;
ret = ff_mjpeg_decode_sof(jpg);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"SOF data decode error\n");
return ret;
}
if (jpg->interlaced) {
av_log(avctx, AV_LOG_ERROR,
"Interlaced mode not supported in MxPEG\n");
return AVERROR(EINVAL);
}
s->got_sof_data = 1;
break;
case SOS:
if (!s->got_sof_data) {
av_log(avctx, AV_LOG_WARNING,
"Can not process SOS without SOF data, skipping\n");
break;
}
if (!jpg->got_picture) {
if (jpg->first_picture) {
av_log(avctx, AV_LOG_WARNING,
"First picture has no SOF, skipping\n");
break;
}
if (!s->got_mxm_bitmask){
av_log(avctx, AV_LOG_WARNING,
"Non-key frame has no MXM, skipping\n");
break;
}
/* use stored SOF data to allocate current picture */
av_frame_unref(jpg->picture_ptr);
if ((ret = ff_get_buffer(avctx, jpg->picture_ptr,
AV_GET_BUFFER_FLAG_REF)) < 0)
return ret;
jpg->picture_ptr->pict_type = AV_PICTURE_TYPE_P;
jpg->picture_ptr->key_frame = 0;
jpg->got_picture = 1;
} else {
jpg->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
jpg->picture_ptr->key_frame = 1;
}
if (s->got_mxm_bitmask) {
AVFrame *reference_ptr = s->picture[s->picture_index ^ 1];
if (mxpeg_check_dimensions(s, jpg, reference_ptr) < 0)
break;
/* allocate dummy reference picture if needed */
if (!reference_ptr->data[0] &&
(ret = ff_get_buffer(avctx, reference_ptr,
AV_GET_BUFFER_FLAG_REF)) < 0)
return ret;
ret = ff_mjpeg_decode_sos(jpg, s->mxm_bitmask, reference_ptr);
if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
return ret;
} else {
ret = ff_mjpeg_decode_sos(jpg, NULL, NULL);
if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
return ret;
}
break;
}
buf_ptr += (get_bits_count(&jpg->gb)+7) >> 3;
}
}
the_end:
if (jpg->got_picture) {
int ret = av_frame_ref(data, jpg->picture_ptr);
if (ret < 0)
return ret;
*got_frame = 1;
s->picture_index ^= 1;
jpg->picture_ptr = s->picture[s->picture_index];
if (!s->has_complete_frame) {
if (!s->got_mxm_bitmask)
s->has_complete_frame = 1;
else
*got_frame = 0;
}
}
return buf_ptr - buf;
}
| true |
FFmpeg
|
2884688bd51a808ccda3c0e13367619cd79e0579
|
13,747 |
int main(int argc, char* argv[])
{
FILE *f[2];
uint8_t *buf[2], *plane[2][3];
int *temp;
uint64_t ssd[3] = {0,0,0};
double ssim[3] = {0,0,0};
int frame_size, w, h;
int frames, seek;
int i;
if( argc<4 || 2 != sscanf(argv[3], "%dx%d", &w, &h) )
{
printf("tiny_ssim <file1.yuv> <file2.yuv> <width>x<height> [<seek>]\n");
return -1;
}
f[0] = fopen(argv[1], "rb");
f[1] = fopen(argv[2], "rb");
sscanf(argv[3], "%dx%d", &w, &h);
frame_size = w*h*3/2;
for( i=0; i<2; i++ )
{
buf[i] = malloc(frame_size);
plane[i][0] = buf[i];
plane[i][1] = plane[i][0] + w*h;
plane[i][2] = plane[i][1] + w*h/4;
}
temp = malloc((2*w+12)*sizeof(*temp));
seek = argc<5 ? 0 : atoi(argv[4]);
fseek(f[seek<0], seek < 0 ? -seek : seek, SEEK_SET);
for( frames=0;; frames++ )
{
uint64_t ssd_one[3];
double ssim_one[3];
if( fread(buf[0], frame_size, 1, f[0]) != 1) break;
if( fread(buf[1], frame_size, 1, f[1]) != 1) break;
for( i=0; i<3; i++ )
{
ssd_one[i] = ssd_plane ( plane[0][i], plane[1][i], w*h>>2*!!i );
ssim_one[i] = ssim_plane( plane[0][i], w>>!!i,
plane[1][i], w>>!!i,
w>>!!i, h>>!!i, temp, NULL );
ssd[i] += ssd_one[i];
ssim[i] += ssim_one[i];
}
printf("Frame %d | ", frames);
print_results(ssd_one, ssim_one, 1, w, h);
printf(" \r");
fflush(stdout);
}
if( !frames ) return 0;
printf("Total %d frames | ", frames);
print_results(ssd, ssim, frames, w, h);
printf("\n");
return 0;
}
| true |
FFmpeg
|
a69e16a97e40f3841766347bd0c0ba2c672c51ca
|
13,749 |
static void load_linux(void *fw_cfg,
const char *kernel_filename,
const char *initrd_filename,
const char *kernel_cmdline,
target_phys_addr_t max_ram_size)
{
uint16_t protocol;
int setup_size, kernel_size, initrd_size = 0, cmdline_size;
uint32_t initrd_max;
uint8_t header[8192], *setup, *kernel, *initrd_data;
target_phys_addr_t real_addr, prot_addr, cmdline_addr, initrd_addr = 0;
FILE *f;
char *vmode;
/* Align to 16 bytes as a paranoia measure */
cmdline_size = (strlen(kernel_cmdline)+16) & ~15;
/* load the kernel header */
f = fopen(kernel_filename, "rb");
if (!f || !(kernel_size = get_file_size(f)) ||
fread(header, 1, MIN(ARRAY_SIZE(header), kernel_size), f) !=
MIN(ARRAY_SIZE(header), kernel_size)) {
fprintf(stderr, "qemu: could not load kernel '%s': %s\n",
kernel_filename, strerror(errno));
/* kernel protocol version */
#if 0
fprintf(stderr, "header magic: %#x\n", ldl_p(header+0x202));
#endif
if (ldl_p(header+0x202) == 0x53726448)
protocol = lduw_p(header+0x206);
else {
/* This looks like a multiboot kernel. If it is, let's stop
treating it like a Linux kernel. */
if (load_multiboot(fw_cfg, f, kernel_filename, initrd_filename,
kernel_cmdline, kernel_size, header))
return;
protocol = 0;
if (protocol < 0x200 || !(header[0x211] & 0x01)) {
/* Low kernel */
real_addr = 0x90000;
cmdline_addr = 0x9a000 - cmdline_size;
prot_addr = 0x10000;
} else if (protocol < 0x202) {
/* High but ancient kernel */
real_addr = 0x90000;
cmdline_addr = 0x9a000 - cmdline_size;
prot_addr = 0x100000;
} else {
/* High and recent kernel */
real_addr = 0x10000;
cmdline_addr = 0x20000;
prot_addr = 0x100000;
#if 0
fprintf(stderr,
"qemu: real_addr = 0x" TARGET_FMT_plx "\n"
"qemu: cmdline_addr = 0x" TARGET_FMT_plx "\n"
"qemu: prot_addr = 0x" TARGET_FMT_plx "\n",
real_addr,
cmdline_addr,
prot_addr);
#endif
/* highest address for loading the initrd */
if (protocol >= 0x203)
initrd_max = ldl_p(header+0x22c);
else
initrd_max = 0x37ffffff;
if (initrd_max >= max_ram_size-ACPI_DATA_SIZE)
initrd_max = max_ram_size-ACPI_DATA_SIZE-1;
fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline)+1);
fw_cfg_add_bytes(fw_cfg, FW_CFG_CMDLINE_DATA,
(uint8_t*)strdup(kernel_cmdline),
strlen(kernel_cmdline)+1);
if (protocol >= 0x202) {
stl_p(header+0x228, cmdline_addr);
} else {
stw_p(header+0x20, 0xA33F);
stw_p(header+0x22, cmdline_addr-real_addr);
/* handle vga= parameter */
vmode = strstr(kernel_cmdline, "vga=");
if (vmode) {
unsigned int video_mode;
/* skip "vga=" */
vmode += 4;
if (!strncmp(vmode, "normal", 6)) {
video_mode = 0xffff;
} else if (!strncmp(vmode, "ext", 3)) {
video_mode = 0xfffe;
} else if (!strncmp(vmode, "ask", 3)) {
video_mode = 0xfffd;
} else {
video_mode = strtol(vmode, NULL, 0);
stw_p(header+0x1fa, video_mode);
/* loader type */
/* High nybble = B reserved for Qemu; low nybble is revision number.
If this code is substantially changed, you may want to consider
incrementing the revision. */
if (protocol >= 0x200)
header[0x210] = 0xB0;
/* heap */
if (protocol >= 0x201) {
header[0x211] |= 0x80; /* CAN_USE_HEAP */
stw_p(header+0x224, cmdline_addr-real_addr-0x200);
/* load initrd */
if (initrd_filename) {
if (protocol < 0x200) {
fprintf(stderr, "qemu: linux kernel too old to load a ram disk\n");
initrd_size = get_image_size(initrd_filename);
initrd_addr = (initrd_max-initrd_size) & ~4095;
initrd_data = qemu_malloc(initrd_size);
load_image(initrd_filename, initrd_data);
fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, initrd_size);
stl_p(header+0x218, initrd_addr);
stl_p(header+0x21c, initrd_size);
/* load kernel and setup */
setup_size = header[0x1f1];
if (setup_size == 0)
setup_size = 4;
setup_size = (setup_size+1)*512;
kernel_size -= setup_size;
setup = qemu_malloc(setup_size);
kernel = qemu_malloc(kernel_size);
fseek(f, 0, SEEK_SET);
if (fread(setup, 1, setup_size, f) != setup_size) {
fprintf(stderr, "fread() failed\n");
if (fread(kernel, 1, kernel_size, f) != kernel_size) {
fprintf(stderr, "fread() failed\n");
fclose(f);
memcpy(setup, header, MIN(sizeof(header), setup_size));
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size);
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr);
fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size);
fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size);
option_rom[nb_option_roms] = "linuxboot.bin";
nb_option_roms++;
| true |
qemu
|
d6fa4b77fb8f27ac84cf23fb1e15016673d98a47
|
13,750 |
target_ulong spapr_hypercall(CPUPPCState *env, target_ulong opcode,
target_ulong *args)
{
if (msr_pr) {
hcall_dprintf("Hypercall made with MSR[PR]=1\n");
return H_PRIVILEGE;
}
if ((opcode <= MAX_HCALL_OPCODE)
&& ((opcode & 0x3) == 0)) {
spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
if (fn) {
return fn(env, spapr, opcode, args);
}
} else if ((opcode >= KVMPPC_HCALL_BASE) &&
(opcode <= KVMPPC_HCALL_MAX)) {
spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
if (fn) {
return fn(env, spapr, opcode, args);
}
}
hcall_dprintf("Unimplemented hcall 0x" TARGET_FMT_lx "\n", opcode);
return H_FUNCTION;
}
| true |
qemu
|
efcb9383b974114e5f682e531346006f8f2466c0
|
13,751 |
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq,
int err)
{
#if HAVE_THREADS
pthread_mutex_lock(&mq->lock);
mq->err_send = err;
pthread_cond_broadcast(&mq->cond);
pthread_mutex_unlock(&mq->lock);
#endif /* HAVE_THREADS */
}
| true |
FFmpeg
|
bd5c860fdbc33d19d2ff0f6d1f06de07c17560dd
|
13,752 |
static int piix4_pm_initfn(PCIDevice *dev)
{
PIIX4PMState *s = DO_UPCAST(PIIX4PMState, dev, dev);
uint8_t *pci_conf;
pci_conf = s->dev.config;
pci_conf[0x06] = 0x80;
pci_conf[0x07] = 0x02;
pci_conf[0x09] = 0x00;
pci_conf[0x3d] = 0x01; // interrupt pin 1
/* APM */
apm_init(dev, &s->apm, apm_ctrl_changed, s);
if (s->kvm_enabled) {
/* Mark SMM as already inited to prevent SMM from running. KVM does not
* support SMM mode. */
pci_conf[0x5B] = 0x02;
}
/* XXX: which specification is used ? The i82731AB has different
mappings */
pci_conf[0x90] = s->smb_io_base | 1;
pci_conf[0x91] = s->smb_io_base >> 8;
pci_conf[0xd2] = 0x09;
pm_smbus_init(&s->dev.qdev, &s->smb);
memory_region_set_enabled(&s->smb.io, pci_conf[0xd2] & 1);
memory_region_add_subregion(pci_address_space_io(dev),
s->smb_io_base, &s->smb.io);
memory_region_init(&s->io, "piix4-pm", 64);
memory_region_set_enabled(&s->io, false);
memory_region_add_subregion(pci_address_space_io(dev),
0, &s->io);
acpi_pm_tmr_init(&s->ar, pm_tmr_timer, &s->io);
acpi_pm1_evt_init(&s->ar, pm_tmr_timer, &s->io);
acpi_pm1_cnt_init(&s->ar, &s->io);
acpi_gpe_init(&s->ar, GPE_LEN);
s->powerdown_notifier.notify = piix4_pm_powerdown_req;
qemu_register_powerdown_notifier(&s->powerdown_notifier);
s->machine_ready.notify = piix4_pm_machine_ready;
qemu_add_machine_init_done_notifier(&s->machine_ready);
qemu_register_reset(piix4_reset, s);
piix4_acpi_system_hot_add_init(pci_address_space_io(dev), dev->bus, s);
return 0;
}
| false |
qemu
|
560e63965232e37d1916a447125cf91c18a96930
|
13,755 |
static AVIndexEntry *mov_find_next_sample(AVFormatContext *s, AVStream **st)
{
AVIndexEntry *sample = NULL;
int64_t best_dts = INT64_MAX;
int i;
for (i = 0; i < s->nb_streams; i++) {
AVStream *avst = s->streams[i];
MOVStreamContext *msc = avst->priv_data;
if (msc->pb && msc->current_sample < avst->nb_index_entries) {
AVIndexEntry *current_sample = &avst->index_entries[msc->current_sample];
int64_t dts;
if (msc->ctts_data)
dts = av_rescale(current_sample->timestamp - msc->dts_shift - msc->ctts_data[msc->ctts_index].duration,
AV_TIME_BASE, msc->time_scale);
else
dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale);
av_dlog(s, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts);
if (!sample || (!s->pb->seekable && current_sample->pos < sample->pos) ||
(s->pb->seekable &&
((msc->pb != s->pb && dts < best_dts) || (msc->pb == s->pb &&
((FFABS(best_dts - dts) <= AV_TIME_BASE && current_sample->pos < sample->pos) ||
(FFABS(best_dts - dts) > AV_TIME_BASE && dts < best_dts)))))) {
sample = current_sample;
best_dts = dts;
*st = avst;
}
}
}
return sample;
}
| false |
FFmpeg
|
bbbc8c618884a838c00faaaa91898017dd431117
|
13,756 |
static int kvm_put_msrs(X86CPU *cpu, int level)
{
CPUX86State *env = &cpu->env;
int i;
int ret;
kvm_msr_buf_reset(cpu);
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
if (has_msr_star) {
kvm_msr_entry_add(cpu, MSR_STAR, env->star);
}
if (has_msr_hsave_pa) {
kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
}
if (has_msr_tsc_aux) {
kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
}
if (has_msr_tsc_adjust) {
kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
}
if (has_msr_misc_enable) {
kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
env->msr_ia32_misc_enable);
}
if (has_msr_smbase) {
kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
}
if (has_msr_bndcfgs) {
kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
}
if (has_msr_xss) {
kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
}
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
}
#endif
/*
* The following MSRs have side effects on the guest or are too heavy
* for normal writeback. Limit them to reset or full state updates.
*/
if (level >= KVM_PUT_RESET_STATE) {
kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
if (has_msr_async_pf_en) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
}
if (has_msr_pv_eoi_en) {
kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
}
if (has_msr_kvm_steal_time) {
kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
}
if (has_msr_architectural_pmu) {
/* Stop the counter. */
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
/* Set the counter values. */
for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
env->msr_fixed_counters[i]);
}
for (i = 0; i < num_architectural_pmu_counters; i++) {
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
env->msr_gp_counters[i]);
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
env->msr_gp_evtsel[i]);
}
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
env->msr_global_status);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
env->msr_global_ovf_ctrl);
/* Now start the PMU. */
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
env->msr_fixed_ctr_ctrl);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
env->msr_global_ctrl);
}
if (has_msr_hv_hypercall) {
kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
env->msr_hv_guest_os_id);
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
env->msr_hv_hypercall);
}
if (has_msr_hv_vapic) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
if (has_msr_hv_tsc) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
}
if (has_msr_hv_crash) {
int j;
for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
env->msr_hv_crash_params[j]);
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
HV_X64_MSR_CRASH_CTL_NOTIFY);
}
if (has_msr_hv_runtime) {
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
}
if (cpu->hyperv_synic) {
int j;
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
env->msr_hv_synic_control);
kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
env->msr_hv_synic_version);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
env->msr_hv_synic_evt_page);
kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
env->msr_hv_synic_msg_page);
for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
env->msr_hv_synic_sint[j]);
}
}
if (has_msr_hv_stimer) {
int j;
for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
env->msr_hv_stimer_config[j]);
}
for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
env->msr_hv_stimer_count[j]);
}
}
if (has_msr_mtrr) {
kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
env->mtrr_var[i].base);
kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i),
env->mtrr_var[i].mask);
}
}
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
}
if (env->mcg_cap) {
int i;
kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
if (has_msr_mcg_ext_ctl) {
kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
}
for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
}
}
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
if (ret < 0) {
return ret;
}
assert(ret == cpu->kvm_msr_buf->nmsrs);
return 0;
}
| false |
qemu
|
112dad69d723a68205f255dd46d78871b5c5a8ca
|
13,757 |
static void mv88w8618_pic_write(void *opaque, target_phys_addr_t offset,
uint64_t value, unsigned size)
{
mv88w8618_pic_state *s = opaque;
switch (offset) {
case MP_PIC_ENABLE_SET:
s->enabled |= value;
break;
case MP_PIC_ENABLE_CLR:
s->enabled &= ~value;
s->level &= ~value;
break;
}
mv88w8618_pic_update(s);
}
| false |
qemu
|
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
|
13,759 |
static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
ram_addr_t addr;
int flags;
if (version_id != 3)
return -EINVAL;
do {
addr = qemu_get_be64(f);
flags = addr & ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
if (addr != last_ram_offset)
return -EINVAL;
}
if (flags & RAM_SAVE_FLAG_COMPRESS) {
uint8_t ch = qemu_get_byte(f);
memset(qemu_get_ram_ptr(addr), ch, TARGET_PAGE_SIZE);
#ifndef _WIN32
if (ch == 0 &&
(!kvm_enabled() || kvm_has_sync_mmu())) {
madvise(qemu_get_ram_ptr(addr), TARGET_PAGE_SIZE, MADV_DONTNEED);
}
#endif
} else if (flags & RAM_SAVE_FLAG_PAGE) {
qemu_get_buffer(f, qemu_get_ram_ptr(addr), TARGET_PAGE_SIZE);
}
if (qemu_file_has_error(f)) {
return -EIO;
}
} while (!(flags & RAM_SAVE_FLAG_EOS));
return 0;
}
| false |
qemu
|
ad96090a01d848df67d70c5259ed8aa321fa8716
|
13,760 |
int gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
int search_pc)
{
DisasContext ctx, *ctxp = &ctx;
opc_handler_t **table, *handler;
uint32_t pc_start;
uint16_t *gen_opc_end;
int j, lj = -1;
pc_start = tb->pc;
gen_opc_ptr = gen_opc_buf;
gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
gen_opparam_ptr = gen_opparam_buf;
ctx.nip = pc_start;
ctx.tb = tb;
ctx.exception = EXCP_NONE;
#if defined(CONFIG_USER_ONLY)
ctx.mem_idx = 0;
#else
ctx.supervisor = 1 - msr_pr;
ctx.mem_idx = (1 - msr_pr);
#endif
#if defined (DO_SINGLE_STEP)
/* Single step trace mode */
msr_se = 1;
#endif
env->access_type = ACCESS_CODE;
/* Set env in case of segfault during code fetch */
while (ctx.exception == EXCP_NONE && gen_opc_ptr < gen_opc_end) {
if (search_pc) {
if (loglevel > 0)
fprintf(logfile, "Search PC...\n");
j = gen_opc_ptr - gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
gen_opc_instr_start[lj++] = 0;
gen_opc_pc[lj] = ctx.nip;
gen_opc_instr_start[lj] = 1;
}
}
#if defined PPC_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "----------------\n");
fprintf(logfile, "nip=%08x super=%d ir=%d\n",
ctx.nip, 1 - msr_pr, msr_ir);
}
#endif
ctx.opcode = ldl_code((void *)ctx.nip);
#if defined PPC_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "translate opcode %08x (%02x %02x %02x)\n",
ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode));
}
#endif
ctx.nip += 4;
table = ppc_opcodes;
handler = table[opc1(ctx.opcode)];
if (is_indirect_opcode(handler)) {
table = ind_table(handler);
handler = table[opc2(ctx.opcode)];
if (is_indirect_opcode(handler)) {
table = ind_table(handler);
handler = table[opc3(ctx.opcode)];
}
}
/* Is opcode *REALLY* valid ? */
if (handler->handler == &gen_invalid) {
if (loglevel > 0) {
fprintf(logfile, "invalid/unsupported opcode: "
"%02x - %02x - %02x (%08x) 0x%08x %d\n",
opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, msr_ir);
} else {
printf("invalid/unsupported opcode: "
"%02x - %02x - %02x (%08x) 0x%08x %d\n",
opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, msr_ir);
}
} else {
if ((ctx.opcode & handler->inval) != 0) {
if (loglevel > 0) {
fprintf(logfile, "invalid bits: %08x for opcode: "
"%02x -%02x - %02x (0x%08x) (0x%08x)\n",
ctx.opcode & handler->inval, opc1(ctx.opcode),
opc2(ctx.opcode), opc3(ctx.opcode),
ctx.opcode, ctx.nip - 4);
} else {
printf("invalid bits: %08x for opcode: "
"%02x -%02x - %02x (0x%08x) (0x%08x)\n",
ctx.opcode & handler->inval, opc1(ctx.opcode),
opc2(ctx.opcode), opc3(ctx.opcode),
ctx.opcode, ctx.nip - 4);
}
RET_INVAL(ctxp);
break;
}
}
(*(handler->handler))(&ctx);
/* Check trace mode exceptions */
if ((msr_be && ctx.exception == EXCP_BRANCH) ||
/* Check in single step trace mode
* we need to stop except if:
* - rfi, trap or syscall
* - first instruction of an exception handler
*/
(msr_se && (ctx.nip < 0x100 ||
ctx.nip > 0xF00 ||
(ctx.nip & 0xFC) != 0x04) &&
ctx.exception != EXCP_SYSCALL && ctx.exception != EXCP_RFI &&
ctx.exception != EXCP_TRAP)) {
RET_EXCP(ctxp, EXCP_TRACE, 0);
}
/* if we reach a page boundary, stop generation */
if ((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) {
RET_EXCP(ctxp, EXCP_BRANCH, 0);
}
}
if (ctx.exception == EXCP_NONE) {
gen_op_b((unsigned long)ctx.tb, ctx.nip);
} else if (ctx.exception != EXCP_BRANCH) {
gen_op_set_T0(0);
}
#if 1
/* TO BE FIXED: T0 hasn't got a proper value, which makes tb_add_jump
* do bad business and then qemu crashes !
*/
gen_op_set_T0(0);
#endif
/* Generate the return instruction */
gen_op_exit_tb();
*gen_opc_ptr = INDEX_op_end;
if (search_pc) {
j = gen_opc_ptr - gen_opc_buf;
lj++;
while (lj <= j)
gen_opc_instr_start[lj++] = 0;
tb->size = 0;
#if 0
if (loglevel > 0) {
page_dump(logfile);
}
#endif
} else {
tb->size = ctx.nip - pc_start;
}
#ifdef DEBUG_DISAS
if (loglevel & CPU_LOG_TB_CPU) {
fprintf(logfile, "---------------- excp: %04x\n", ctx.exception);
cpu_ppc_dump_state(env, logfile, 0);
}
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "IN: %s\n", lookup_symbol((void *)pc_start));
disas(logfile, (void *)pc_start, ctx.nip - pc_start, 0, 0);
fprintf(logfile, "\n");
}
if (loglevel & CPU_LOG_TB_OP) {
fprintf(logfile, "OP:\n");
dump_ops(gen_opc_buf, gen_opparam_buf);
fprintf(logfile, "\n");
}
#endif
env->access_type = ACCESS_INT;
return 0;
}
| false |
qemu
|
b769d8fef6c06ddb39ef0337882a4f8872b9c2bc
|
13,761 |
static void bdrv_cow_init(void)
{
bdrv_register(&bdrv_cow);
}
| false |
qemu
|
550830f9351291c585c963204ad9127998b1c1ce
|
13,762 |
void vnc_zlib_clear(VncState *vs)
{
if (vs->zlib_stream.opaque) {
deflateEnd(&vs->zlib_stream);
}
buffer_free(&vs->zlib);
}
| false |
qemu
|
245f7b51c0ea04fb2224b1127430a096c91aee70
|
13,763 |
void bt_device_done(struct bt_device_s *dev)
{
struct bt_device_s **p = &dev->net->slave;
while (*p && *p != dev)
p = &(*p)->next;
if (*p != dev) {
fprintf(stderr, "%s: bad bt device \"%s\"\n", __FUNCTION__,
dev->lmp_name ?: "(null)");
exit(-1);
}
*p = dev->next;
}
| false |
qemu
|
a89f364ae8740dfc31b321eed9ee454e996dc3c1
|
13,764 |
static void s390_init(MachineState *machine)
{
ram_addr_t my_ram_size = machine->ram_size;
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
int increment_size = 20;
void *virtio_region;
hwaddr virtio_region_len;
hwaddr virtio_region_start;
if (machine->ram_slots) {
error_report("Memory hotplug not supported by the selected machine.");
exit(EXIT_FAILURE);
}
/*
* The storage increment size is a multiple of 1M and is a power of 2.
* The number of storage increments must be MAX_STORAGE_INCREMENTS or
* fewer.
*/
while ((my_ram_size >> increment_size) > MAX_STORAGE_INCREMENTS) {
increment_size++;
}
my_ram_size = my_ram_size >> increment_size << increment_size;
/* let's propagate the changed ram size into the global variable. */
ram_size = my_ram_size;
/* get a BUS */
s390_bus = s390_virtio_bus_init(&my_ram_size);
s390_sclp_init();
s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
machine->initrd_filename, ZIPL_FILENAME, false);
s390_flic_init();
/* register hypercalls */
s390_virtio_register_hcalls();
/* allocate RAM */
memory_region_init_ram(ram, NULL, "s390.ram", my_ram_size, &error_abort);
vmstate_register_ram_global(ram);
memory_region_add_subregion(sysmem, 0, ram);
/* clear virtio region */
virtio_region_len = my_ram_size - ram_size;
virtio_region_start = ram_size;
virtio_region = cpu_physical_memory_map(virtio_region_start,
&virtio_region_len, true);
memset(virtio_region, 0, virtio_region_len);
cpu_physical_memory_unmap(virtio_region, virtio_region_len, 1,
virtio_region_len);
/* Initialize storage key device */
s390_skeys_init();
/* init CPUs */
s390_init_cpus(machine->cpu_model);
/* Create VirtIO network adapters */
s390_create_virtio_net((BusState *)s390_bus, "virtio-net-s390");
/* Register savevm handler for guest TOD clock */
register_savevm(NULL, "todclock", 0, 1, gtod_save, gtod_load, NULL);
}
| false |
qemu
|
1cf065fb87e8787e3e9cebcdb4713b81e4e61422
|
13,765 |
static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
{
switch (buf[0]) {
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case VERIFY_10:
case VERIFY_12:
case VERIFY_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY_10:
case WRITE_VERIFY_12:
case WRITE_VERIFY_16:
/* If we are not using O_DIRECT, we might read stale data from the
* host cache if writes were made using other commands than these
* ones (such as WRITE SAME or EXTENDED COPY, etc.). So, without
* O_DIRECT everything must go through SG_IO.
*/
if (!(bdrv_get_flags(s->qdev.conf.bs) & BDRV_O_NOCACHE)) {
break;
}
/* MMC writing cannot be done via pread/pwrite, because it sometimes
* involves writing beyond the maximum LBA or to negative LBA (lead-in).
* And once you do these writes, reading from the block device is
* unreliable, too. It is even possible that reads deliver random data
* from the host page cache (this is probably a Linux bug).
*
* We might use scsi_disk_dma_reqops as long as no writing commands are
* seen, but performance usually isn't paramount on optical media. So,
* just make scsi-block operate the same as scsi-generic for them.
*/
if (s->qdev.type != TYPE_ROM) {
return false;
}
break;
default:
break;
}
return true;
}
| false |
qemu
|
4be746345f13e99e468c60acbd3a355e8183e3ce
|
13,766 |
int ff_lpc_calc_coefs(LPCContext *s,
const int32_t *samples, int blocksize, int min_order,
int max_order, int precision,
int32_t coefs[][MAX_LPC_ORDER], int *shift,
enum FFLPCType lpc_type, int lpc_passes,
int omethod, int max_shift, int zero_shift)
{
double autoc[MAX_LPC_ORDER+1];
double ref[MAX_LPC_ORDER];
double lpc[MAX_LPC_ORDER][MAX_LPC_ORDER];
int i, j, pass;
int opt_order;
av_assert2(max_order >= MIN_LPC_ORDER && max_order <= MAX_LPC_ORDER &&
lpc_type > FF_LPC_TYPE_FIXED);
/* reinit LPC context if parameters have changed */
if (blocksize != s->blocksize || max_order != s->max_order ||
lpc_type != s->lpc_type) {
ff_lpc_end(s);
ff_lpc_init(s, blocksize, max_order, lpc_type);
}
if (lpc_type == FF_LPC_TYPE_LEVINSON) {
s->lpc_apply_welch_window(samples, blocksize, s->windowed_samples);
s->lpc_compute_autocorr(s->windowed_samples, blocksize, max_order, autoc);
compute_lpc_coefs(autoc, max_order, &lpc[0][0], MAX_LPC_ORDER, 0, 1);
for(i=0; i<max_order; i++)
ref[i] = fabs(lpc[i][i]);
} else if (lpc_type == FF_LPC_TYPE_CHOLESKY) {
LLSModel m[2];
double var[MAX_LPC_ORDER+1], av_uninit(weight);
if(lpc_passes <= 0)
lpc_passes = 2;
for(pass=0; pass<lpc_passes; pass++){
av_init_lls(&m[pass&1], max_order);
weight=0;
for(i=max_order; i<blocksize; i++){
for(j=0; j<=max_order; j++)
var[j]= samples[i-j];
if(pass){
double eval, inv, rinv;
eval= av_evaluate_lls(&m[(pass-1)&1], var+1, max_order-1);
eval= (512>>pass) + fabs(eval - var[0]);
inv = 1/eval;
rinv = sqrt(inv);
for(j=0; j<=max_order; j++)
var[j] *= rinv;
weight += inv;
}else
weight++;
av_update_lls(&m[pass&1], var, 1.0);
}
av_solve_lls(&m[pass&1], 0.001, 0);
}
for(i=0; i<max_order; i++){
for(j=0; j<max_order; j++)
lpc[i][j]=-m[(pass-1)&1].coeff[i][j];
ref[i]= sqrt(m[(pass-1)&1].variance[i] / weight) * (blocksize - max_order) / 4000;
}
for(i=max_order-1; i>0; i--)
ref[i] = ref[i-1] - ref[i];
}
opt_order = max_order;
if(omethod == ORDER_METHOD_EST) {
opt_order = estimate_best_order(ref, min_order, max_order);
i = opt_order-1;
quantize_lpc_coefs(lpc[i], i+1, precision, coefs[i], &shift[i], max_shift, zero_shift);
} else {
for(i=min_order-1; i<max_order; i++) {
quantize_lpc_coefs(lpc[i], i+1, precision, coefs[i], &shift[i], max_shift, zero_shift);
}
}
return opt_order;
}
| false |
FFmpeg
|
c4a36b6f70f37e668874d134f955eb96e23853c9
|
13,767 |
static uint64_t pci_config_get_memory_base(PCIDevice *d, uint32_t base)
{
return ((uint64_t)pci_get_word(d->config + base) & PCI_MEMORY_RANGE_MASK)
<< 16;
}
| false |
qemu
|
d46636b88339ecc2cb8d10113f45ada164817773
|
13,769 |
void pci_default_write_config(PCIDevice *d,
uint32_t address, uint32_t val, int len)
{
int can_write, i;
uint32_t end, addr;
if (len == 4 && ((address >= 0x10 && address < 0x10 + 4 * 6) ||
(address >= 0x30 && address < 0x34))) {
PCIIORegion *r;
int reg;
if ( address >= 0x30 ) {
reg = PCI_ROM_SLOT;
}else{
reg = (address - 0x10) >> 2;
}
r = &d->io_regions[reg];
if (r->size == 0)
goto default_config;
/* compute the stored value */
if (reg == PCI_ROM_SLOT) {
/* keep ROM enable bit */
val &= (~(r->size - 1)) | 1;
} else {
val &= ~(r->size - 1);
val |= r->type;
}
*(uint32_t *)(d->config + address) = cpu_to_le32(val);
pci_update_mappings(d);
return;
}
default_config:
/* not efficient, but simple */
addr = address;
for(i = 0; i < len; i++) {
/* default read/write accesses */
switch(d->config[0x0e]) {
case 0x00:
case 0x80:
switch(addr) {
case 0x00:
case 0x01:
case 0x02:
case 0x03:
case 0x06:
case 0x07:
case 0x08:
case 0x09:
case 0x0a:
case 0x0b:
case 0x0e:
case 0x10 ... 0x27: /* base */
case 0x2c ... 0x2f: /* read-only subsystem ID & vendor ID */
case 0x30 ... 0x33: /* rom */
case 0x3d:
can_write = 0;
break;
default:
can_write = 1;
break;
}
break;
default:
case 0x01:
switch(addr) {
case 0x00:
case 0x01:
case 0x02:
case 0x03:
case 0x06:
case 0x07:
case 0x08:
case 0x09:
case 0x0a:
case 0x0b:
case 0x0e:
case 0x2c ... 0x2f: /* read-only subsystem ID & vendor ID */
case 0x38 ... 0x3b: /* rom */
case 0x3d:
can_write = 0;
break;
default:
can_write = 1;
break;
}
break;
}
if (can_write) {
/* Mask out writes to reserved bits in registers */
switch (addr) {
case 0x05:
val &= ~PCI_COMMAND_RESERVED_MASK_HI;
break;
case 0x06:
val &= ~PCI_STATUS_RESERVED_MASK_LO;
break;
case 0x07:
val &= ~PCI_STATUS_RESERVED_MASK_HI;
break;
}
d->config[addr] = val;
}
if (++addr > 0xff)
break;
val >>= 8;
}
end = address + len;
if (end > PCI_COMMAND && address < (PCI_COMMAND + 2)) {
/* if the command register is modified, we must modify the mappings */
pci_update_mappings(d);
}
}
| false |
qemu
|
b7ee1603c16c1feb0d439d2ddf6cf824119d0aab
|
13,770 |
static int ehci_state_writeback(EHCIQueue *q, int async)
{
int again = 0;
/* Write back the QTD from the QH area */
ehci_trace_qtd(q, NLPTR_GET(q->qtdaddr), (EHCIqtd*) &q->qh.next_qtd);
put_dwords(NLPTR_GET(q->qtdaddr),(uint32_t *) &q->qh.next_qtd,
sizeof(EHCIqtd) >> 2);
/*
* EHCI specs say go horizontal here.
*
* We can also advance the queue here for performance reasons. We
* need to take care to only take that shortcut in case we've
* processed the qtd just written back without errors, i.e. halt
* bit is clear.
*/
if (q->qh.token & QTD_TOKEN_HALT) {
ehci_set_state(q->ehci, async, EST_HORIZONTALQH);
again = 1;
} else {
ehci_set_state(q->ehci, async, EST_ADVANCEQUEUE);
again = 1;
}
return again;
}
| false |
qemu
|
68d553587c0aa271c3eb2902921b503740d775b6
|
13,771 |
static uint64_t omap_dpll_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
struct dpll_ctl_s *s = (struct dpll_ctl_s *) opaque;
if (size != 2) {
return omap_badwidth_read16(opaque, addr);
}
if (addr == 0x00) /* CTL_REG */
return s->mode;
OMAP_BAD_REG(addr);
return 0;
}
| false |
qemu
|
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
|
13,774 |
static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor)
{
QEMUCursor *c;
uint8_t *image, *mask;
size_t size;
c = cursor_alloc(cursor->header.width, cursor->header.height);
c->hot_x = cursor->header.hot_spot_x;
c->hot_y = cursor->header.hot_spot_y;
switch (cursor->header.type) {
case SPICE_CURSOR_TYPE_ALPHA:
size = sizeof(uint32_t) * cursor->header.width * cursor->header.height;
memcpy(c->data, cursor->chunk.data, size);
if (qxl->debug > 2) {
cursor_print_ascii_art(c, "qxl/alpha");
}
break;
case SPICE_CURSOR_TYPE_MONO:
mask = cursor->chunk.data;
image = mask + cursor_get_mono_bpl(c) * c->width;
cursor_set_mono(c, 0xffffff, 0x000000, image, 1, mask);
if (qxl->debug > 2) {
cursor_print_ascii_art(c, "qxl/mono");
}
break;
default:
fprintf(stderr, "%s: not implemented: type %d\n",
__FUNCTION__, cursor->header.type);
goto fail;
}
return c;
fail:
cursor_put(c);
return NULL;
}
| true |
qemu
|
79c5a10cdda1aed00d7ee4ef87de2ef8c854f4a5
|
13,775 |
static inline int silk_is_lpc_stable(const int16_t lpc[16], int order)
{
int k, j, DC_resp = 0;
int32_t lpc32[2][16]; // Q24
int totalinvgain = 1 << 30; // 1.0 in Q30
int32_t *row = lpc32[0], *prevrow;
/* initialize the first row for the Levinson recursion */
for (k = 0; k < order; k++) {
DC_resp += lpc[k];
row[k] = lpc[k] * 4096;
}
if (DC_resp >= 4096)
return 0;
/* check if prediction gain pushes any coefficients too far */
for (k = order - 1; 1; k--) {
int rc; // Q31; reflection coefficient
int gaindiv; // Q30; inverse of the gain (the divisor)
int gain; // gain for this reflection coefficient
int fbits; // fractional bits used for the gain
int error; // Q29; estimate of the error of our partial estimate of 1/gaindiv
if (FFABS(row[k]) > 16773022)
return 0;
rc = -(row[k] * 128);
gaindiv = (1 << 30) - MULH(rc, rc);
totalinvgain = MULH(totalinvgain, gaindiv) << 2;
if (k == 0)
return (totalinvgain >= 107374);
/* approximate 1.0/gaindiv */
fbits = opus_ilog(gaindiv);
gain = ((1 << 29) - 1) / (gaindiv >> (fbits + 1 - 16)); // Q<fbits-16>
error = (1 << 29) - MULL(gaindiv << (15 + 16 - fbits), gain, 16);
gain = ((gain << 16) + (error * gain >> 13));
/* switch to the next row of the LPC coefficients */
prevrow = row;
row = lpc32[k & 1];
for (j = 0; j < k; j++) {
int x = prevrow[j] - ROUND_MULL(prevrow[k - j - 1], rc, 31);
row[j] = ROUND_MULL(x, gain, fbits);
}
}
}
| true |
FFmpeg
|
511e6f17f493719058229630c7db4d8d7c05aeac
|
13,776 |
static inline int mov_get_stsc_samples(MOVStreamContext *sc, int index)
{
int chunk_count;
if (mov_stsc_index_valid(index, sc->stsc_count))
chunk_count = sc->stsc_data[index + 1].first - sc->stsc_data[index].first;
else
chunk_count = sc->chunk_count - (sc->stsc_data[index].first - 1);
return sc->stsc_data[index].count * chunk_count;
}
| true |
FFmpeg
|
53ea595eec984e3109310e8bb7ff4b5786d91057
|
13,777 |
static void test_machine(const void *data)
{
const testdef_t *test = data;
char *args;
char tmpname[] = "/tmp/qtest-boot-serial-XXXXXX";
int fd;
fd = mkstemp(tmpname);
g_assert(fd != -1);
args = g_strdup_printf("-M %s,accel=tcg -chardev file,id=serial0,path=%s"
" -serial chardev:serial0 %s", test->machine,
tmpname, test->extra);
qtest_start(args);
unlink(tmpname);
check_guest_output(test, fd);
qtest_quit(global_qtest);
g_free(args);
close(fd);
}
| true |
qemu
|
7150d34a1d60851d73d6ab6783b12b1d25e68f86
|
13,778 |
static int write_number(void *obj, const AVOption *o, void *dst, double num, int den, int64_t intnum)
{
if (o->max*den < num*intnum || o->min*den > num*intnum) {
av_log(obj, AV_LOG_ERROR, "Value %f for parameter '%s' out of range\n",
num*intnum/den, o->name);
return AVERROR(ERANGE);
}
switch (o->type) {
case AV_OPT_TYPE_FLAGS:
case AV_OPT_TYPE_INT: *(int *)dst= llrint(num/den)*intnum; break;
case AV_OPT_TYPE_INT64: *(int64_t *)dst= llrint(num/den)*intnum; break;
case AV_OPT_TYPE_FLOAT: *(float *)dst= num*intnum/den; break;
case AV_OPT_TYPE_DOUBLE:*(double *)dst= num*intnum/den; break;
case AV_OPT_TYPE_RATIONAL:
if ((int)num == num) *(AVRational*)dst= (AVRational){num*intnum, den};
else *(AVRational*)dst= av_d2q(num*intnum/den, 1<<24);
break;
default:
return AVERROR(EINVAL);
}
return 0;
}
| false |
FFmpeg
|
0b357a8095e72b092cc5c2aacc2f806db75ecae3
|
13,779 |
static void rtas_stop_self(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
cs->halted = 1;
qemu_cpu_kick(cs);
/*
* While stopping a CPU, the guest calls H_CPPR which
* effectively disables interrupts on XICS level.
* However decrementer interrupts in TCG can still
* wake the CPU up so here we disable interrupts in MSR
* as well.
* As rtas_start_cpu() resets the whole MSR anyway, there is
* no need to bother with specific bits, we just clear it.
*/
env->msr = 0;
}
| true |
qemu
|
9a94ee5bb15793ef69692998ef57794a33074134
|
13,780 |
static void count_colors(AVCodecContext *avctx, unsigned hits[33],
const AVSubtitleRect *r)
{
DVDSubtitleContext *dvdc = avctx->priv_data;
unsigned count[256] = { 0 };
uint32_t *palette = (uint32_t *)r->pict.data[1];
uint32_t color;
int x, y, i, j, match, d, best_d, av_uninit(best_j);
uint8_t *p = r->pict.data[0];
for (y = 0; y < r->h; y++) {
for (x = 0; x < r->w; x++)
count[*(p++)]++;
p += r->pict.linesize[0] - r->w;
}
for (i = 0; i < 256; i++) {
if (!count[i]) /* avoid useless search */
continue;
color = palette[i];
/* 0: transparent, 1-16: semi-transparent, 17-33 opaque */
match = color < 0x33000000 ? 0 : color < 0xCC000000 ? 1 : 17;
if (match) {
best_d = INT_MAX;
for (j = 0; j < 16; j++) {
d = color_distance(color & 0xFFFFFF, dvdc->global_palette[j]);
if (d < best_d) {
best_d = d;
best_j = j;
}
}
match += best_j;
}
hits[match] += count[i];
}
}
| true |
FFmpeg
|
5ed5e90f2ae299cbec66996860d794771a85fee8
|
13,781 |
Coroutine *qemu_coroutine_new(void)
{
const size_t stack_size = 1 << 20;
CoroutineUContext *co;
CoroutineThreadState *coTS;
struct sigaction sa;
struct sigaction osa;
stack_t ss;
stack_t oss;
sigset_t sigs;
sigset_t osigs;
jmp_buf old_env;
/* The way to manipulate stack is with the sigaltstack function. We
* prepare a stack, with it delivering a signal to ourselves and then
* put sigsetjmp/siglongjmp where needed.
* This has been done keeping coroutine-ucontext as a model and with the
* pth ideas (GNU Portable Threads). See coroutine-ucontext for the basics
* of the coroutines and see pth_mctx.c (from the pth project) for the
* sigaltstack way of manipulating stacks.
*/
co = g_malloc0(sizeof(*co));
co->stack = g_malloc(stack_size);
co->base.entry_arg = &old_env; /* stash away our jmp_buf */
coTS = coroutine_get_thread_state();
coTS->tr_handler = co;
/*
* Preserve the SIGUSR2 signal state, block SIGUSR2,
* and establish our signal handler. The signal will
* later transfer control onto the signal stack.
*/
sigemptyset(&sigs);
sigaddset(&sigs, SIGUSR2);
pthread_sigmask(SIG_BLOCK, &sigs, &osigs);
sa.sa_handler = coroutine_trampoline;
sigfillset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK;
if (sigaction(SIGUSR2, &sa, &osa) != 0) {
abort();
}
/*
* Set the new stack.
*/
ss.ss_sp = co->stack;
ss.ss_size = stack_size;
ss.ss_flags = 0;
if (sigaltstack(&ss, &oss) < 0) {
abort();
}
/*
* Now transfer control onto the signal stack and set it up.
* It will return immediately via "return" after the sigsetjmp()
* was performed. Be careful here with race conditions. The
* signal can be delivered the first time sigsuspend() is
* called.
*/
coTS->tr_called = 0;
pthread_kill(pthread_self(), SIGUSR2);
sigfillset(&sigs);
sigdelset(&sigs, SIGUSR2);
while (!coTS->tr_called) {
sigsuspend(&sigs);
}
/*
* Inform the system that we are back off the signal stack by
* removing the alternative signal stack. Be careful here: It
* first has to be disabled, before it can be removed.
*/
sigaltstack(NULL, &ss);
ss.ss_flags = SS_DISABLE;
if (sigaltstack(&ss, NULL) < 0) {
abort();
}
sigaltstack(NULL, &ss);
if (!(oss.ss_flags & SS_DISABLE)) {
sigaltstack(&oss, NULL);
}
/*
* Restore the old SIGUSR2 signal handler and mask
*/
sigaction(SIGUSR2, &osa, NULL);
pthread_sigmask(SIG_SETMASK, &osigs, NULL);
/*
* Now enter the trampoline again, but this time not as a signal
* handler. Instead we jump into it directly. The functionally
* redundant ping-pong pointer arithmetic is necessary to avoid
* type-conversion warnings related to the `volatile' qualifier and
* the fact that `jmp_buf' usually is an array type.
*/
if (!sigsetjmp(old_env, 0)) {
siglongjmp(coTS->tr_reenter, 1);
}
/*
* Ok, we returned again, so now we're finished
*/
return &co->base;
}
| true |
qemu
|
7f151e6f718f2edaf8661c4dedf6fcdb30b10e1b
|
13,782 |
static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
hwaddr addr,
uint64_t *value,
unsigned size,
unsigned shift,
uint64_t mask,
MemTxAttrs attrs)
{
uint64_t tmp = 0;
MemTxResult r;
r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
if (mr->subpage) {
trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
} else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
}
*value |= (tmp & mask) << shift;
return r;
}
| true |
qemu
|
f2d089425d43735b5369f70f3a36b712440578e5
|
13,783 |
void kvm_arch_remove_all_hw_breakpoints(void)
{
}
| true |
qemu
|
88365d17d586bcf0d9f4432447db345f72278a2a
|
13,785 |
static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
int i;
assert(src1==src2);
for(i=0; i<width; i++)
{
int r= src1[6*i + 0] + src1[6*i + 3];
int g= src1[6*i + 1] + src1[6*i + 4];
int b= src1[6*i + 2] + src1[6*i + 5];
dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
}
}
| true |
FFmpeg
|
2da0d70d5eebe42f9fcd27ee554419ebe2a5da06
|
13,786 |
void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
void (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
AVFilterPad *dst = link->dstpad;
FF_DPRINTF_START(NULL, filter_samples); ff_dlog_link(NULL, link, 1);
if (!(filter_samples = dst->filter_samples))
filter_samples = ff_default_filter_samples;
/* prepare to copy the samples if the buffer has insufficient permissions */
if ((dst->min_perms & samplesref->perms) != dst->min_perms ||
dst->rej_perms & samplesref->perms) {
int i, planar = av_sample_fmt_is_planar(samplesref->format);
int planes = !planar ? 1:
av_get_channel_layout_nb_channels(samplesref->audio->channel_layout);
av_log(link->dst, AV_LOG_DEBUG,
"Copying audio data in avfilter (have perms %x, need %x, reject %x)\n",
samplesref->perms, link->dstpad->min_perms, link->dstpad->rej_perms);
link->cur_buf = ff_default_get_audio_buffer(link, dst->min_perms,
samplesref->audio->nb_samples);
link->cur_buf->pts = samplesref->pts;
link->cur_buf->audio->sample_rate = samplesref->audio->sample_rate;
/* Copy actual data into new samples buffer */
for (i = 0; i < planes; i++)
memcpy(link->cur_buf->extended_data[i], samplesref->extended_data[i], samplesref->linesize[0]);
avfilter_unref_buffer(samplesref);
} else
link->cur_buf = samplesref;
filter_samples(link, link->cur_buf);
}
| true |
FFmpeg
|
9cdf74f904f76b2a1da474a2290c7e9ed34dd431
|
13,787 |
int nbd_send_request(int csock, struct nbd_request *request)
{
uint8_t buf[4 + 4 + 8 + 8 + 4];
cpu_to_be32w((uint32_t*)buf, NBD_REQUEST_MAGIC);
cpu_to_be32w((uint32_t*)(buf + 4), request->type);
cpu_to_be64w((uint64_t*)(buf + 8), request->handle);
cpu_to_be64w((uint64_t*)(buf + 16), request->from);
cpu_to_be32w((uint32_t*)(buf + 24), request->len);
TRACE("Sending request to client: "
"{ .from = %" PRIu64", .len = %u, .handle = %" PRIu64", .type=%i}",
request->from, request->len, request->handle, request->type);
if (write_sync(csock, buf, sizeof(buf)) != sizeof(buf)) {
LOG("writing to socket failed");
errno = EINVAL;
return -1;
}
return 0;
}
| true |
qemu
|
94e7340b5db8bce7866e44e700ffa8fd26585c7e
|
13,788 |
static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
{
MovieContext *movie = ctx->priv;
AVPacket *pkt = &movie->pkt;
enum AVMediaType frame_type;
MovieStream *st;
int ret, got_frame = 0, pkt_out_id;
AVFilterLink *outlink;
if (!pkt->size) {
if (movie->eof) {
if (movie->st[out_id].done) {
if (movie->loop_count != 1) {
ret = rewind_file(ctx);
if (ret < 0)
return ret;
movie->loop_count -= movie->loop_count > 1;
av_log(ctx, AV_LOG_VERBOSE, "Stream finished, looping.\n");
return 0; /* retry */
}
return AVERROR_EOF;
}
pkt->stream_index = movie->st[out_id].st->index;
/* packet is already ready for flushing */
} else {
ret = av_read_frame(movie->format_ctx, &movie->pkt0);
if (ret < 0) {
av_init_packet(&movie->pkt0); /* ready for flushing */
*pkt = movie->pkt0;
if (ret == AVERROR_EOF) {
movie->eof = 1;
return 0; /* start flushing */
}
return ret;
}
*pkt = movie->pkt0;
}
}
pkt_out_id = pkt->stream_index > movie->max_stream_index ? -1 :
movie->out_index[pkt->stream_index];
if (pkt_out_id < 0) {
av_free_packet(&movie->pkt0);
pkt->size = 0; /* ready for next run */
pkt->data = NULL;
return 0;
}
st = &movie->st[pkt_out_id];
outlink = ctx->outputs[pkt_out_id];
movie->frame = av_frame_alloc();
if (!movie->frame)
return AVERROR(ENOMEM);
frame_type = st->st->codec->codec_type;
switch (frame_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_decode_video2(st->st->codec, movie->frame, &got_frame, pkt);
break;
case AVMEDIA_TYPE_AUDIO:
ret = avcodec_decode_audio4(st->st->codec, movie->frame, &got_frame, pkt);
break;
default:
ret = AVERROR(ENOSYS);
break;
}
if (ret < 0) {
av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret));
av_frame_free(&movie->frame);
av_free_packet(&movie->pkt0);
movie->pkt.size = 0;
movie->pkt.data = NULL;
return 0;
}
if (!ret || st->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
ret = pkt->size;
pkt->data += ret;
pkt->size -= ret;
if (pkt->size <= 0) {
av_free_packet(&movie->pkt0);
pkt->size = 0; /* ready for next run */
pkt->data = NULL;
}
if (!got_frame) {
if (!ret)
st->done = 1;
av_frame_free(&movie->frame);
return 0;
}
movie->frame->pts = av_frame_get_best_effort_timestamp(movie->frame);
av_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name,
describe_frame_to_str((char[1024]){0}, 1024, movie->frame, frame_type, outlink));
if (st->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if (movie->frame->format != outlink->format) {
av_log(ctx, AV_LOG_ERROR, "Format changed %s -> %s, discarding frame\n",
av_get_pix_fmt_name(outlink->format),
av_get_pix_fmt_name(movie->frame->format)
);
av_frame_free(&movie->frame);
return 0;
}
}
ret = ff_filter_frame(outlink, movie->frame);
movie->frame = NULL;
if (ret < 0)
return ret;
return pkt_out_id == out_id;
}
| true |
FFmpeg
|
97392553656a7f4fabde9ded4d2b7f538d98ee17
|
13,789 |
void blk_remove_bs(BlockBackend *blk)
{
BlockDriverState *bs;
ThrottleTimers *tt;
notifier_list_notify(&blk->remove_bs_notifiers, blk);
if (blk->public.throttle_group_member.throttle_state) {
tt = &blk->public.throttle_group_member.throttle_timers;
bs = blk_bs(blk);
bdrv_drained_begin(bs);
throttle_timers_detach_aio_context(tt);
bdrv_drained_end(bs);
}
blk_update_root_state(blk);
bdrv_root_unref_child(blk->root);
blk->root = NULL;
}
| true |
qemu
|
c89bcf3af01e7a8834cca5344e098bf879e99999
|
13,790 |
static void vmsvga_init(DeviceState *dev, struct vmsvga_state_s *s,
MemoryRegion *address_space, MemoryRegion *io)
{
s->scratch_size = SVGA_SCRATCH_SIZE;
s->scratch = g_malloc(s->scratch_size * 4);
s->vga.con = graphic_console_init(dev, 0, &vmsvga_ops, s);
s->fifo_size = SVGA_FIFO_SIZE;
memory_region_init_ram(&s->fifo_ram, NULL, "vmsvga.fifo", s->fifo_size,
&error_abort);
vmstate_register_ram_global(&s->fifo_ram);
s->fifo_ptr = memory_region_get_ram_ptr(&s->fifo_ram);
vga_common_init(&s->vga, OBJECT(dev), true);
vga_init(&s->vga, OBJECT(dev), address_space, io, true);
vmstate_register(NULL, 0, &vmstate_vga_common, &s->vga);
s->new_depth = 32;
}
| true |
qemu
|
f8ed85ac992c48814d916d5df4d44f9a971c5de4
|
13,791 |
static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
{
X86CPUClass *xcc = X86_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
DeviceClass *dc = DEVICE_CLASS(oc);
xcc->parent_realize = dc->realize;
dc->realize = x86_cpu_realizefn;
dc->props = x86_cpu_properties;
xcc->parent_reset = cc->reset;
cc->reset = x86_cpu_reset;
cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
cc->class_by_name = x86_cpu_class_by_name;
cc->parse_features = x86_cpu_parse_featurestr;
cc->has_work = x86_cpu_has_work;
cc->do_interrupt = x86_cpu_do_interrupt;
cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
cc->gdb_read_register = x86_cpu_gdb_read_register;
cc->gdb_write_register = x86_cpu_gdb_write_register;
cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
#else
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
cc->write_elf64_note = x86_cpu_write_elf64_note;
cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
cc->write_elf32_note = x86_cpu_write_elf32_note;
cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
cc->vmsd = &vmstate_x86_cpu;
#endif
cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
#ifndef CONFIG_USER_ONLY
cc->debug_excp_handler = breakpoint_handler;
#endif
cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit;
}
| true |
qemu
|
4c315c27661502a0813b129e41c0bf640c34a8d6
|
13,792 |
static int ds1225y_set_to_mode(ds1225y_t *NVRAM, nvram_open_mode mode, const char *filemode)
{
if (NVRAM->open_mode != mode)
{
if (NVRAM->file)
qemu_fclose(NVRAM->file);
NVRAM->file = qemu_fopen(NVRAM->filename, filemode);
NVRAM->open_mode = mode;
}
return (NVRAM->file != NULL);
}
| true |
qemu
|
30aa5c0d303c334c646e9db1ebadda0c0db8b13f
|
13,793 |
static int usb_wacom_handle_data(USBDevice *dev, USBPacket *p)
{
USBWacomState *s = (USBWacomState *) dev;
int ret = 0;
switch (p->pid) {
case USB_TOKEN_IN:
if (p->devep == 1) {
if (!(s->changed || s->idle))
return USB_RET_NAK;
s->changed = 0;
if (s->mode == WACOM_MODE_HID)
ret = usb_mouse_poll(s, p->data, p->len);
else if (s->mode == WACOM_MODE_WACOM)
ret = usb_wacom_poll(s, p->data, p->len);
break;
}
/* Fall through. */
case USB_TOKEN_OUT:
default:
ret = USB_RET_STALL;
break;
}
return ret;
}
| true |
qemu
|
4f4321c11ff6e98583846bfd6f0e81954924b003
|
13,794 |
static float quantize_band_cost(struct AACEncContext *s, const float *in,
const float *scaled, int size, int scale_idx,
int cb, const float lambda, const float uplim,
int *bits)
{
const float IQ = ff_aac_pow2sf_tab[200 + scale_idx - SCALE_ONE_POS + SCALE_DIV_512];
const float Q = ff_aac_pow2sf_tab[200 - scale_idx + SCALE_ONE_POS - SCALE_DIV_512];
const float CLIPPED_ESCAPE = 165140.0f*IQ;
int i, j, k;
float cost = 0;
const int dim = cb < FIRST_PAIR_BT ? 4 : 2;
int resbits = 0;
#ifndef USE_REALLY_FULL_SEARCH
const float Q34 = sqrtf(Q * sqrtf(Q));
const int range = aac_cb_range[cb];
const int maxval = aac_cb_maxval[cb];
int offs[4];
#endif /* USE_REALLY_FULL_SEARCH */
if (!cb) {
for (i = 0; i < size; i++)
cost += in[i]*in[i];
if (bits)
*bits = 0;
return cost * lambda;
}
#ifndef USE_REALLY_FULL_SEARCH
offs[0] = 1;
for (i = 1; i < dim; i++)
offs[i] = offs[i-1]*range;
quantize_bands(s->qcoefs, in, scaled, size, Q34, !IS_CODEBOOK_UNSIGNED(cb), maxval);
#endif /* USE_REALLY_FULL_SEARCH */
for (i = 0; i < size; i += dim) {
float mincost;
int minidx = 0;
int minbits = 0;
const float *vec;
#ifndef USE_REALLY_FULL_SEARCH
int (*quants)[2] = &s->qcoefs[i];
mincost = 0.0f;
for (j = 0; j < dim; j++)
mincost += in[i+j]*in[i+j];
minidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40;
minbits = ff_aac_spectral_bits[cb-1][minidx];
mincost = mincost * lambda + minbits;
for (j = 0; j < (1<<dim); j++) {
float rd = 0.0f;
int curbits;
int curidx = IS_CODEBOOK_UNSIGNED(cb) ? 0 : 40;
int same = 0;
for (k = 0; k < dim; k++) {
if ((j & (1 << k)) && quants[k][0] == quants[k][1]) {
same = 1;
break;
}
}
if (same)
continue;
for (k = 0; k < dim; k++)
curidx += quants[k][!!(j & (1 << k))] * offs[dim - 1 - k];
curbits = ff_aac_spectral_bits[cb-1][curidx];
vec = &ff_aac_codebook_vectors[cb-1][curidx*dim];
#else
mincost = INFINITY;
vec = ff_aac_codebook_vectors[cb-1];
for (j = 0; j < ff_aac_spectral_sizes[cb-1]; j++, vec += dim) {
float rd = 0.0f;
int curbits = ff_aac_spectral_bits[cb-1][j];
#endif /* USE_REALLY_FULL_SEARCH */
if (IS_CODEBOOK_UNSIGNED(cb)) {
for (k = 0; k < dim; k++) {
float t = fabsf(in[i+k]);
float di;
if (vec[k] == 64.0f) { //FIXME: slow
//do not code with escape sequence small values
if (t < 39.0f*IQ) {
rd = INFINITY;
break;
}
if (t >= CLIPPED_ESCAPE) {
di = t - CLIPPED_ESCAPE;
curbits += 21;
} else {
int c = av_clip(quant(t, Q), 0, 8191);
di = t - c*cbrtf(c)*IQ;
curbits += av_log2(c)*2 - 4 + 1;
}
} else {
di = t - vec[k]*IQ;
}
if (vec[k] != 0.0f)
curbits++;
rd += di*di;
}
} else {
for (k = 0; k < dim; k++) {
float di = in[i+k] - vec[k]*IQ;
rd += di*di;
}
}
rd = rd * lambda + curbits;
if (rd < mincost) {
mincost = rd;
minidx = j;
minbits = curbits;
}
}
cost += mincost;
resbits += minbits;
if (cost >= uplim)
return uplim;
}
if (bits)
*bits = resbits;
return cost;
}
| true |
FFmpeg
|
508f092a783f7d305d1e9938c953e375139e2cba
|
13,795 |
static void sysbus_esp_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = sysbus_esp_realize;
dc->reset = sysbus_esp_hard_reset;
dc->vmsd = &vmstate_sysbus_esp_scsi;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
}
| true |
qemu
|
e4f4fb1eca795e36f363b4647724221e774523c1
|
13,796 |
static int vfio_initfn(PCIDevice *pdev)
{
VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
VFIOGroup *group;
char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
ssize_t len;
struct stat st;
int groupid;
int ret;
/* Check that the host device exists */
snprintf(path, sizeof(path),
"/sys/bus/pci/devices/%04x:%02x:%02x.%01x/",
vdev->host.domain, vdev->host.bus, vdev->host.slot,
vdev->host.function);
if (stat(path, &st) < 0) {
error_report("vfio: error: no such host device: %s", path);
return -errno;
}
strncat(path, "iommu_group", sizeof(path) - strlen(path) - 1);
len = readlink(path, iommu_group_path, PATH_MAX);
if (len <= 0) {
error_report("vfio: error no iommu_group for device");
return -errno;
}
iommu_group_path[len] = 0;
group_name = basename(iommu_group_path);
if (sscanf(group_name, "%d", &groupid) != 1) {
error_report("vfio: error reading %s: %m", path);
return -errno;
}
DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
group = vfio_get_group(groupid);
if (!group) {
error_report("vfio: failed to get group %d", groupid);
return -ENOENT;
}
snprintf(path, sizeof(path), "%04x:%02x:%02x.%01x",
vdev->host.domain, vdev->host.bus, vdev->host.slot,
vdev->host.function);
QLIST_FOREACH(pvdev, &group->device_list, next) {
if (pvdev->host.domain == vdev->host.domain &&
pvdev->host.bus == vdev->host.bus &&
pvdev->host.slot == vdev->host.slot &&
pvdev->host.function == vdev->host.function) {
error_report("vfio: error: device %s is already attached", path);
vfio_put_group(group);
return -EBUSY;
}
}
ret = vfio_get_device(group, path, vdev);
if (ret) {
error_report("vfio: failed to get device %s", path);
vfio_put_group(group);
return ret;
}
/* Get a copy of config space */
ret = pread(vdev->fd, vdev->pdev.config,
MIN(pci_config_size(&vdev->pdev), vdev->config_size),
vdev->config_offset);
if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
ret = ret < 0 ? -errno : -EFAULT;
error_report("vfio: Failed to read device config space");
goto out_put;
}
/* vfio emulates a lot for us, but some bits need extra love */
vdev->emulated_config_bits = g_malloc0(vdev->config_size);
/* QEMU can choose to expose the ROM or not */
memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
/* QEMU can change multi-function devices to single function, or reverse */
vdev->emulated_config_bits[PCI_HEADER_TYPE] =
PCI_HEADER_TYPE_MULTI_FUNCTION;
/* Restore or clear multifunction, this is always controlled by QEMU */
if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
} else {
vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
}
/*
* Clear host resource mapping info. If we choose not to register a
* BAR, such as might be the case with the option ROM, we can get
* confusing, unwritable, residual addresses from the host here.
*/
memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
vfio_pci_size_rom(vdev);
ret = vfio_early_setup_msix(vdev);
if (ret) {
goto out_put;
}
vfio_map_bars(vdev);
ret = vfio_add_capabilities(vdev);
if (ret) {
goto out_teardown;
}
/* QEMU emulates all of MSI & MSIX */
if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
MSIX_CAP_LENGTH);
}
if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
vdev->msi_cap_size);
}
if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
vfio_intx_mmap_enable, vdev);
pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_update_irq);
ret = vfio_enable_intx(vdev);
if (ret) {
goto out_teardown;
}
}
add_boot_device_path(vdev->bootindex, &pdev->qdev, NULL);
vfio_register_err_notifier(vdev);
return 0;
out_teardown:
pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
vfio_teardown_msi(vdev);
vfio_unmap_bars(vdev);
out_put:
g_free(vdev->emulated_config_bits);
vfio_put_device(vdev);
vfio_put_group(group);
return ret;
}
| true |
qemu
|
13665a2d2f675341e73618fcd7f9d36b6c68b509
|
13,797 |
static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
target_ulong pc_start)
{
int b, prefixes, aflag, dflag;
int shift, ot;
int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
target_ulong next_eip, tval;
int rex_w, rex_r;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(pc_start);
s->pc = pc_start;
prefixes = 0;
s->override = -1;
rex_w = -1;
rex_r = 0;
#ifdef TARGET_X86_64
s->rex_x = 0;
s->rex_b = 0;
x86_64_hregs = 0;
#endif
s->rip_offset = 0; /* for relative ip address */
s->vex_l = 0;
s->vex_v = 0;
next_byte:
b = cpu_ldub_code(env, s->pc);
s->pc++;
/* Collect prefixes. */
switch (b) {
case 0xf3:
prefixes |= PREFIX_REPZ;
goto next_byte;
case 0xf2:
prefixes |= PREFIX_REPNZ;
goto next_byte;
case 0xf0:
prefixes |= PREFIX_LOCK;
goto next_byte;
case 0x2e:
s->override = R_CS;
goto next_byte;
case 0x36:
s->override = R_SS;
goto next_byte;
case 0x3e:
s->override = R_DS;
goto next_byte;
case 0x26:
s->override = R_ES;
goto next_byte;
case 0x64:
s->override = R_FS;
goto next_byte;
case 0x65:
s->override = R_GS;
goto next_byte;
case 0x66:
prefixes |= PREFIX_DATA;
goto next_byte;
case 0x67:
prefixes |= PREFIX_ADR;
goto next_byte;
#ifdef TARGET_X86_64
case 0x40 ... 0x4f:
if (CODE64(s)) {
/* REX prefix */
rex_w = (b >> 3) & 1;
rex_r = (b & 0x4) << 1;
s->rex_x = (b & 0x2) << 2;
REX_B(s) = (b & 0x1) << 3;
x86_64_hregs = 1; /* select uniform byte register addressing */
goto next_byte;
break;
#endif
case 0xc5: /* 2-byte VEX */
case 0xc4: /* 3-byte VEX */
/* VEX prefixes cannot be used except in 32-bit mode.
Otherwise the instruction is LES or LDS. */
if (s->code32 && !s->vm86) {
static const int pp_prefix[4] = {
0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
};
int vex3, vex2 = cpu_ldub_code(env, s->pc);
if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
/* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
otherwise the instruction is LES or LDS. */
break;
s->pc++;
/* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
| PREFIX_LOCK | PREFIX_DATA)) {
#ifdef TARGET_X86_64
if (x86_64_hregs) {
#endif
rex_r = (~vex2 >> 4) & 8;
if (b == 0xc5) {
vex3 = vex2;
b = cpu_ldub_code(env, s->pc++);
} else {
#ifdef TARGET_X86_64
s->rex_x = (~vex2 >> 3) & 8;
s->rex_b = (~vex2 >> 2) & 8;
#endif
vex3 = cpu_ldub_code(env, s->pc++);
rex_w = (vex3 >> 7) & 1;
switch (vex2 & 0x1f) {
case 0x01: /* Implied 0f leading opcode bytes. */
b = cpu_ldub_code(env, s->pc++) | 0x100;
break;
case 0x02: /* Implied 0f 38 leading opcode bytes. */
b = 0x138;
break;
case 0x03: /* Implied 0f 3a leading opcode bytes. */
b = 0x13a;
break;
default: /* Reserved for future use. */
s->vex_v = (~vex3 >> 3) & 0xf;
s->vex_l = (vex3 >> 2) & 1;
prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
break;
/* Post-process prefixes. */
if (CODE64(s)) {
/* In 64-bit mode, the default data size is 32-bit. Select 64-bit
data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
over 0x66 if both are present. */
dflag = (rex_w > 0 ? 2 : prefixes & PREFIX_DATA ? 0 : 1);
/* In 64-bit mode, 0x67 selects 32-bit addressing. */
aflag = (prefixes & PREFIX_ADR ? 1 : 2);
} else {
/* In 16/32-bit mode, 0x66 selects the opposite data size. */
dflag = s->code32;
if (prefixes & PREFIX_DATA) {
dflag ^= 1;
/* In 16/32-bit mode, 0x67 selects the opposite addressing. */
aflag = s->code32;
if (prefixes & PREFIX_ADR) {
aflag ^= 1;
s->prefix = prefixes;
s->aflag = aflag;
s->dflag = dflag;
/* lock generation */
if (prefixes & PREFIX_LOCK)
gen_helper_lock();
/* now check op code */
reswitch:
switch(b) {
case 0x0f:
/**************************/
/* extended op code */
b = cpu_ldub_code(env, s->pc++) | 0x100;
goto reswitch;
/**************************/
/* arith & logic */
case 0x00 ... 0x05:
case 0x08 ... 0x0d:
case 0x10 ... 0x15:
case 0x18 ... 0x1d:
case 0x20 ... 0x25:
case 0x28 ... 0x2d:
case 0x30 ... 0x35:
case 0x38 ... 0x3d:
{
int op, f, val;
op = (b >> 3) & 7;
f = (b >> 1) & 3;
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
switch(f) {
case 0: /* OP Ev, Gv */
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
opreg = OR_TMP0;
} else if (op == OP_XORL && rm == reg) {
xor_zero:
/* xor reg, reg optimisation */
set_cc_op(s, CC_OP_CLR);
gen_op_movl_T0_0();
gen_op_mov_reg_T0(ot, reg);
break;
} else {
opreg = rm;
gen_op_mov_TN_reg(ot, 1, reg);
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_op_ld_T1_A0(ot + s->mem_index);
} else if (op == OP_XORL && rm == reg) {
goto xor_zero;
} else {
gen_op_mov_TN_reg(ot, 1, rm);
gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
gen_op(s, op, ot, OR_EAX);
break;
break;
case 0x82:
if (CODE64(s))
case 0x80: /* GRP1 */
case 0x81:
case 0x83:
{
int val;
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (b == 0x83)
s->rip_offset = 1;
else
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = rm;
switch(b) {
default:
case 0x80:
case 0x81:
case 0x82:
val = insn_get(env, s, ot);
break;
case 0x83:
val = (int8_t)insn_get(env, s, OT_BYTE);
break;
gen_op_movl_T1_im(val);
gen_op(s, op, ot, opreg);
break;
/**************************/
/* inc, dec, and other misc arith */
case 0x40 ... 0x47: /* inc Gv */
ot = dflag ? OT_LONG : OT_WORD;
gen_inc(s, ot, OR_EAX + (b & 7), 1);
break;
case 0x48 ... 0x4f: /* dec Gv */
ot = dflag ? OT_LONG : OT_WORD;
gen_inc(s, ot, OR_EAX + (b & 7), -1);
break;
case 0xf6: /* GRP3 */
case 0xf7:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (op == 0)
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_TN_reg(ot, 0, rm);
switch(op) {
case 0: /* test */
val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 2: /* not */
tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
if (mod != 3) {
gen_op_st_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_reg_T0(ot, rm);
break;
case 3: /* neg */
tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
if (mod != 3) {
gen_op_st_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_reg_T0(ot, rm);
gen_op_update_neg_cc();
set_cc_op(s, CC_OP_SUBB + ot);
break;
case 4: /* mul */
switch(ot) {
case OT_BYTE:
gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
gen_op_mov_reg_T0(OT_WORD, R_EAX);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
set_cc_op(s, CC_OP_MULB);
break;
case OT_WORD:
gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
gen_op_mov_reg_T0(OT_WORD, R_EAX);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
gen_op_mov_reg_T0(OT_WORD, R_EDX);
tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
set_cc_op(s, CC_OP_MULW);
break;
default:
case OT_LONG:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULL);
break;
#ifdef TARGET_X86_64
case OT_QUAD:
tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
cpu_T[0], cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
break;
#endif
break;
case 5: /* imul */
switch(ot) {
case OT_BYTE:
gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
gen_op_mov_reg_T0(OT_WORD, R_EAX);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
set_cc_op(s, CC_OP_MULB);
break;
case OT_WORD:
gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
gen_op_mov_reg_T0(OT_WORD, R_EAX);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
gen_op_mov_reg_T0(OT_WORD, R_EDX);
set_cc_op(s, CC_OP_MULW);
break;
default:
case OT_LONG:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
set_cc_op(s, CC_OP_MULL);
break;
#ifdef TARGET_X86_64
case OT_QUAD:
tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
cpu_T[0], cpu_regs[R_EAX]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
set_cc_op(s, CC_OP_MULQ);
break;
#endif
break;
case 6: /* div */
switch(ot) {
case OT_BYTE:
gen_jmp_im(pc_start - s->cs_base);
gen_helper_divb_AL(cpu_env, cpu_T[0]);
break;
case OT_WORD:
gen_jmp_im(pc_start - s->cs_base);
gen_helper_divw_AX(cpu_env, cpu_T[0]);
break;
default:
case OT_LONG:
gen_jmp_im(pc_start - s->cs_base);
gen_helper_divl_EAX(cpu_env, cpu_T[0]);
break;
#ifdef TARGET_X86_64
case OT_QUAD:
gen_jmp_im(pc_start - s->cs_base);
gen_helper_divq_EAX(cpu_env, cpu_T[0]);
break;
#endif
break;
case 7: /* idiv */
switch(ot) {
case OT_BYTE:
gen_jmp_im(pc_start - s->cs_base);
gen_helper_idivb_AL(cpu_env, cpu_T[0]);
break;
case OT_WORD:
gen_jmp_im(pc_start - s->cs_base);
gen_helper_idivw_AX(cpu_env, cpu_T[0]);
break;
default:
case OT_LONG:
gen_jmp_im(pc_start - s->cs_base);
gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
break;
#ifdef TARGET_X86_64
case OT_QUAD:
gen_jmp_im(pc_start - s->cs_base);
gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
break;
#endif
break;
default:
break;
case 0xfe: /* GRP4 */
case 0xff: /* GRP5 */
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (op >= 2 && b == 0xfe) {
if (CODE64(s)) {
if (op == 2 || op == 4) {
/* operand size for jumps is 64 bit */
ot = OT_QUAD;
} else if (op == 3 || op == 5) {
ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
} else if (op == 6) {
/* default push size is 64 bit */
ot = dflag ? OT_QUAD : OT_WORD;
if (mod != 3) {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
if (op >= 2 && op != 3 && op != 5)
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_TN_reg(ot, 0, rm);
switch(op) {
case 0: /* inc Ev */
if (mod != 3)
opreg = OR_TMP0;
else
opreg = rm;
gen_inc(s, ot, opreg, 1);
break;
case 1: /* dec Ev */
if (mod != 3)
opreg = OR_TMP0;
else
opreg = rm;
gen_inc(s, ot, opreg, -1);
break;
case 2: /* call Ev */
/* XXX: optimize if memory (no 'and' is necessary) */
if (s->dflag == 0)
gen_op_andl_T0_ffff();
next_eip = s->pc - s->cs_base;
gen_movtl_T1_im(next_eip);
gen_push_T1(s);
gen_op_jmp_T0();
gen_eob(s);
break;
case 3: /* lcall Ev */
gen_op_ld_T1_A0(ot + s->mem_index);
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
do_lcall:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
tcg_const_i32(dflag),
tcg_const_i32(s->pc - pc_start));
} else {
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
tcg_const_i32(dflag),
tcg_const_i32(s->pc - s->cs_base));
gen_eob(s);
break;
case 4: /* jmp Ev */
if (s->dflag == 0)
gen_op_andl_T0_ffff();
gen_op_jmp_T0();
gen_eob(s);
break;
case 5: /* ljmp Ev */
gen_op_ld_T1_A0(ot + s->mem_index);
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
do_ljmp:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
tcg_const_i32(s->pc - pc_start));
} else {
gen_op_movl_seg_T0_vm(R_CS);
gen_op_movl_T0_T1();
gen_op_jmp_T0();
gen_eob(s);
break;
case 6: /* push Ev */
gen_push_T0(s);
break;
default:
break;
case 0x84: /* test Ev, Gv */
case 0x85:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_TN_reg(ot, 1, reg);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0xa8: /* test eAX, Iv */
case 0xa9:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
val = insn_get(env, s, ot);
gen_op_mov_TN_reg(ot, 0, OR_EAX);
gen_op_movl_T1_im(val);
gen_op_testl_T0_T1_cc();
set_cc_op(s, CC_OP_LOGICB + ot);
break;
case 0x98: /* CWDE/CBW */
#ifdef TARGET_X86_64
if (dflag == 2) {
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(OT_QUAD, R_EAX);
} else
#endif
if (dflag == 1) {
gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(OT_LONG, R_EAX);
} else {
gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(OT_WORD, R_EAX);
break;
case 0x99: /* CDQ/CWD */
#ifdef TARGET_X86_64
if (dflag == 2) {
gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
gen_op_mov_reg_T0(OT_QUAD, R_EDX);
} else
#endif
if (dflag == 1) {
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
gen_op_mov_reg_T0(OT_LONG, R_EDX);
} else {
gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
gen_op_mov_reg_T0(OT_WORD, R_EDX);
break;
case 0x1af: /* imul Gv, Ev */
case 0x69: /* imul Gv, Ev, I */
case 0x6b:
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (b == 0x69)
s->rip_offset = insn_const_size(ot);
else if (b == 0x6b)
s->rip_offset = 1;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (b == 0x69) {
val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
} else if (b == 0x6b) {
val = (int8_t)insn_get(env, s, OT_BYTE);
gen_op_movl_T1_im(val);
} else {
gen_op_mov_TN_reg(ot, 1, reg);
switch (ot) {
#ifdef TARGET_X86_64
case OT_QUAD:
tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
break;
#endif
case OT_LONG:
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
break;
default:
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
/* XXX: use 32 bit mul which could be faster */
tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
gen_op_mov_reg_T0(ot, reg);
break;
set_cc_op(s, CC_OP_MULB + ot);
break;
case 0x1c0:
case 0x1c1: /* xadd Ev, Gv */
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
gen_op_mov_TN_reg(ot, 0, reg);
gen_op_mov_TN_reg(ot, 1, rm);
gen_op_addl_T0_T1();
gen_op_mov_reg_T1(ot, reg);
gen_op_mov_reg_T0(ot, rm);
} else {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_op_mov_TN_reg(ot, 0, reg);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_op_addl_T0_T1();
gen_op_st_T0_A0(ot + s->mem_index);
gen_op_mov_reg_T1(ot, reg);
gen_op_update2_cc();
set_cc_op(s, CC_OP_ADDB + ot);
break;
case 0x1b0:
case 0x1b1: /* cmpxchg Ev, Gv */
{
int label1, label2;
TCGv t0, t1, t2, a0;
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
a0 = tcg_temp_local_new();
gen_op_mov_v_reg(ot, t1, reg);
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, t0, rm);
} else {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
tcg_gen_mov_tl(a0, cpu_A0);
gen_op_ld_v(ot + s->mem_index, t0, a0);
rm = 0; /* avoid warning */
label1 = gen_new_label();
tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
gen_extu(ot, t0);
gen_extu(ot, t2);
tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
label2 = gen_new_label();
if (mod == 3) {
gen_op_mov_reg_v(ot, R_EAX, t0);
tcg_gen_br(label2);
gen_set_label(label1);
gen_op_mov_reg_v(ot, rm, t1);
} else {
/* perform no-op store cycle like physical cpu; must be
before changing accumulator to ensure idempotency if
the store faults and the instruction is restarted */
gen_op_st_v(ot + s->mem_index, t0, a0);
gen_op_mov_reg_v(ot, R_EAX, t0);
tcg_gen_br(label2);
gen_set_label(label1);
gen_op_st_v(ot + s->mem_index, t1, a0);
gen_set_label(label2);
tcg_gen_mov_tl(cpu_cc_src, t0);
tcg_gen_mov_tl(cpu_cc_srcT, t2);
tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
set_cc_op(s, CC_OP_SUBB + ot);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
tcg_temp_free(a0);
break;
case 0x1c7: /* cmpxchg8b */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if ((mod == 3) || ((modrm & 0x38) != 0x8))
#ifdef TARGET_X86_64
if (dflag == 2) {
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
gen_jmp_im(pc_start - s->cs_base);
gen_update_cc_op(s);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else
#endif
{
if (!(s->cpuid_features & CPUID_CX8))
gen_jmp_im(pc_start - s->cs_base);
gen_update_cc_op(s);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
set_cc_op(s, CC_OP_EFLAGS);
break;
/**************************/
/* push/pop */
case 0x50 ... 0x57: /* push */
gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
gen_push_T0(s);
break;
case 0x58 ... 0x5f: /* pop */
if (CODE64(s)) {
ot = dflag ? OT_QUAD : OT_WORD;
} else {
ot = dflag + OT_WORD;
gen_pop_T0(s);
/* NOTE: order is important for pop %sp */
gen_pop_update(s);
gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
break;
case 0x60: /* pusha */
if (CODE64(s))
gen_pusha(s);
break;
case 0x61: /* popa */
if (CODE64(s))
gen_popa(s);
break;
case 0x68: /* push Iv */
case 0x6a:
if (CODE64(s)) {
ot = dflag ? OT_QUAD : OT_WORD;
} else {
ot = dflag + OT_WORD;
if (b == 0x68)
val = insn_get(env, s, ot);
else
val = (int8_t)insn_get(env, s, OT_BYTE);
gen_op_movl_T0_im(val);
gen_push_T0(s);
break;
case 0x8f: /* pop Ev */
if (CODE64(s)) {
ot = dflag ? OT_QUAD : OT_WORD;
} else {
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
gen_pop_T0(s);
if (mod == 3) {
/* NOTE: order is important for pop %sp */
gen_pop_update(s);
rm = (modrm & 7) | REX_B(s);
gen_op_mov_reg_T0(ot, rm);
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 1 << ot;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
s->popl_esp_hack = 0;
gen_pop_update(s);
break;
case 0xc8: /* enter */
{
int level;
val = cpu_lduw_code(env, s->pc);
s->pc += 2;
level = cpu_ldub_code(env, s->pc++);
gen_enter(s, val, level);
break;
case 0xc9: /* leave */
/* XXX: exception not precise (ESP is updated before potential exception) */
if (CODE64(s)) {
gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
gen_op_mov_reg_T0(OT_QUAD, R_ESP);
} else if (s->ss32) {
gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
gen_op_mov_reg_T0(OT_LONG, R_ESP);
} else {
gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
gen_op_mov_reg_T0(OT_WORD, R_ESP);
gen_pop_T0(s);
if (CODE64(s)) {
ot = dflag ? OT_QUAD : OT_WORD;
} else {
ot = dflag + OT_WORD;
gen_op_mov_reg_T0(ot, R_EBP);
gen_pop_update(s);
break;
case 0x06: /* push es */
case 0x0e: /* push cs */
case 0x16: /* push ss */
case 0x1e: /* push ds */
if (CODE64(s))
gen_op_movl_T0_seg(b >> 3);
gen_push_T0(s);
break;
case 0x1a0: /* push fs */
case 0x1a8: /* push gs */
gen_op_movl_T0_seg((b >> 3) & 7);
gen_push_T0(s);
break;
case 0x07: /* pop es */
case 0x17: /* pop ss */
case 0x1f: /* pop ds */
if (CODE64(s))
reg = b >> 3;
gen_pop_T0(s);
gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
gen_pop_update(s);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace. */
/* If several instructions disable interrupts, only the
_first_ does it */
if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
gen_helper_set_inhibit_irq(cpu_env);
s->tf = 0;
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
gen_pop_T0(s);
gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
gen_pop_update(s);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
/**************************/
/* mov */
case 0x88:
case 0x89: /* mov Gv, Ev */
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0xc6:
case 0xc7: /* mov Ev, Iv */
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod != 3) {
s->rip_offset = insn_const_size(ot);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
val = insn_get(env, s, ot);
gen_op_movl_T0_im(val);
if (mod != 3)
gen_op_st_T0_A0(ot + s->mem_index);
else
gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
break;
case 0x8a:
case 0x8b: /* mov Ev, Gv */
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = OT_WORD + dflag;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_reg_T0(ot, reg);
break;
case 0x8e: /* mov seg, Gv */
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
if (reg >= 6 || reg == R_CS)
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace */
/* If several instructions disable interrupts, only the
_first_ does it */
if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
gen_helper_set_inhibit_irq(cpu_env);
s->tf = 0;
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0x8c: /* mov Gv, seg */
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (reg >= 6)
gen_op_movl_T0_seg(reg);
if (mod == 3)
ot = OT_WORD + dflag;
else
ot = OT_WORD;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0x1b6: /* movzbS Gv, Eb */
case 0x1b7: /* movzwS Gv, Eb */
case 0x1be: /* movsbS Gv, Eb */
case 0x1bf: /* movswS Gv, Eb */
{
int d_ot;
/* d_ot is the size of destination */
d_ot = dflag + OT_WORD;
/* ot is the size of source */
ot = (b & 1) + OT_BYTE;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
gen_op_mov_TN_reg(ot, 0, rm);
switch(ot | (b & 8)) {
case OT_BYTE:
tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
break;
case OT_BYTE | 8:
tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
break;
case OT_WORD:
tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
break;
default:
case OT_WORD | 8:
tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
break;
gen_op_mov_reg_T0(d_ot, reg);
} else {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
if (b & 8) {
gen_op_lds_T0_A0(ot + s->mem_index);
} else {
gen_op_ldu_T0_A0(ot + s->mem_index);
gen_op_mov_reg_T0(d_ot, reg);
break;
case 0x8d: /* lea */
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
reg = ((modrm >> 3) & 7) | rex_r;
/* we must ensure that no segment is added */
s->override = -1;
val = s->addseg;
s->addseg = 0;
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
s->addseg = val;
gen_op_mov_reg_A0(ot - OT_WORD, reg);
break;
case 0xa0: /* mov EAX, Ov */
case 0xa1:
case 0xa2: /* mov Ov, EAX */
case 0xa3:
{
target_ulong offset_addr;
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
#ifdef TARGET_X86_64
if (s->aflag == 2) {
offset_addr = cpu_ldq_code(env, s->pc);
s->pc += 8;
gen_op_movq_A0_im(offset_addr);
} else
#endif
{
if (s->aflag) {
offset_addr = insn_get(env, s, OT_LONG);
} else {
offset_addr = insn_get(env, s, OT_WORD);
gen_op_movl_A0_im(offset_addr);
gen_add_A0_ds_seg(s);
if ((b & 2) == 0) {
gen_op_ld_T0_A0(ot + s->mem_index);
gen_op_mov_reg_T0(ot, R_EAX);
} else {
gen_op_mov_TN_reg(ot, 0, R_EAX);
gen_op_st_T0_A0(ot + s->mem_index);
break;
case 0xd7: /* xlat */
#ifdef TARGET_X86_64
if (s->aflag == 2) {
gen_op_movq_A0_reg(R_EBX);
gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
} else
#endif
{
gen_op_movl_A0_reg(R_EBX);
gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
if (s->aflag == 0)
gen_op_andl_A0_ffff();
else
tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
gen_add_A0_ds_seg(s);
gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
gen_op_mov_reg_T0(OT_BYTE, R_EAX);
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
val = insn_get(env, s, OT_BYTE);
gen_op_movl_T0_im(val);
gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
break;
case 0xb8 ... 0xbf: /* mov R, Iv */
#ifdef TARGET_X86_64
if (dflag == 2) {
uint64_t tmp;
/* 64 bit case */
tmp = cpu_ldq_code(env, s->pc);
s->pc += 8;
reg = (b & 7) | REX_B(s);
gen_movtl_T0_im(tmp);
gen_op_mov_reg_T0(OT_QUAD, reg);
} else
#endif
{
ot = dflag ? OT_LONG : OT_WORD;
val = insn_get(env, s, ot);
reg = (b & 7) | REX_B(s);
gen_op_movl_T0_im(val);
gen_op_mov_reg_T0(ot, reg);
break;
case 0x91 ... 0x97: /* xchg R, EAX */
do_xchg_reg_eax:
ot = dflag + OT_WORD;
reg = (b & 7) | REX_B(s);
rm = R_EAX;
goto do_xchg_reg;
case 0x86:
case 0x87: /* xchg Ev, Gv */
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
rm = (modrm & 7) | REX_B(s);
do_xchg_reg:
gen_op_mov_TN_reg(ot, 0, reg);
gen_op_mov_TN_reg(ot, 1, rm);
gen_op_mov_reg_T0(ot, rm);
gen_op_mov_reg_T1(ot, reg);
} else {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_op_mov_TN_reg(ot, 0, reg);
/* for xchg, lock is implicit */
if (!(prefixes & PREFIX_LOCK))
gen_helper_lock();
gen_op_ld_T1_A0(ot + s->mem_index);
gen_op_st_T0_A0(ot + s->mem_index);
if (!(prefixes & PREFIX_LOCK))
gen_helper_unlock();
gen_op_mov_reg_T1(ot, reg);
break;
case 0xc4: /* les Gv */
/* In CODE64 this is VEX3; see above. */
op = R_ES;
goto do_lxx;
case 0xc5: /* lds Gv */
/* In CODE64 this is VEX2; see above. */
op = R_DS;
goto do_lxx;
case 0x1b2: /* lss Gv */
op = R_SS;
goto do_lxx;
case 0x1b4: /* lfs Gv */
op = R_FS;
goto do_lxx;
case 0x1b5: /* lgs Gv */
op = R_GS;
do_lxx:
ot = dflag ? OT_LONG : OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3)
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
/* load the segment first to handle exceptions properly */
gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
gen_movl_seg_T0(s, op, pc_start - s->cs_base);
/* then put the data */
gen_op_mov_reg_T1(ot, reg);
if (s->is_jmp) {
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
/************************/
/* shifts */
case 0xc0:
case 0xc1:
/* shift Ev,Ib */
shift = 2;
grp2:
{
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
if (mod != 3) {
if (shift == 2) {
s->rip_offset = 1;
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = (modrm & 7) | REX_B(s);
/* simpler op */
if (shift == 0) {
gen_shift(s, op, ot, opreg, OR_ECX);
} else {
if (shift == 2) {
shift = cpu_ldub_code(env, s->pc++);
gen_shifti(s, op, ot, opreg, shift);
break;
case 0xd0:
case 0xd1:
/* shift Ev,1 */
shift = 1;
goto grp2;
case 0xd2:
case 0xd3:
/* shift Ev,cl */
shift = 0;
goto grp2;
case 0x1a4: /* shld imm */
op = 0;
shift = 1;
goto do_shiftd;
case 0x1a5: /* shld cl */
op = 0;
shift = 0;
goto do_shiftd;
case 0x1ac: /* shrd imm */
op = 1;
shift = 1;
goto do_shiftd;
case 0x1ad: /* shrd cl */
op = 1;
shift = 0;
do_shiftd:
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3) {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = rm;
gen_op_mov_TN_reg(ot, 1, reg);
if (shift) {
TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
tcg_temp_free(imm);
} else {
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
break;
/************************/
/* floats */
case 0xd8 ... 0xdf:
if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
/* if CR0.EM or CR0.TS are set, generate an FPU exception */
/* XXX: what to do if illegal op ? */
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = modrm & 7;
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
if (mod != 3) {
/* memory op */
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
switch(op) {
case 0x00 ... 0x07: /* fxxxs */
case 0x10 ... 0x17: /* fixxxl */
case 0x20 ... 0x27: /* fxxxl */
case 0x30 ... 0x37: /* fixxx */
{
int op1;
op1 = op & 7;
switch(op >> 4) {
case 0:
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
break;
case 1:
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
break;
case 3:
default:
gen_op_lds_T0_A0(OT_WORD + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
break;
gen_helper_fp_arith_ST0_FT0(op1);
if (op1 == 3) {
/* fcomp needs pop */
gen_helper_fpop(cpu_env);
break;
case 0x08: /* flds */
case 0x0a: /* fsts */
case 0x0b: /* fstps */
case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
switch(op & 7) {
case 0:
switch(op >> 4) {
case 0:
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
break;
case 1:
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
break;
case 2:
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
break;
case 3:
default:
gen_op_lds_T0_A0(OT_WORD + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
break;
break;
case 1:
/* XXX: the corresponding CPUID bit must be tested ! */
switch(op >> 4) {
case 1:
gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
gen_op_st_T0_A0(OT_LONG + s->mem_index);
break;
case 2:
gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
break;
case 3:
default:
gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
gen_op_st_T0_A0(OT_WORD + s->mem_index);
break;
gen_helper_fpop(cpu_env);
break;
default:
switch(op >> 4) {
case 0:
gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
gen_op_st_T0_A0(OT_LONG + s->mem_index);
break;
case 1:
gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
gen_op_st_T0_A0(OT_LONG + s->mem_index);
break;
case 2:
gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
break;
case 3:
default:
gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
gen_op_st_T0_A0(OT_WORD + s->mem_index);
break;
if ((op & 7) == 3)
gen_helper_fpop(cpu_env);
break;
break;
case 0x0c: /* fldenv mem */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
break;
case 0x0d: /* fldcw mem */
gen_op_ld_T0_A0(OT_WORD + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
break;
case 0x0e: /* fnstenv mem */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
break;
case 0x0f: /* fnstcw mem */
gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
gen_op_st_T0_A0(OT_WORD + s->mem_index);
break;
case 0x1d: /* fldt mem */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fldt_ST0(cpu_env, cpu_A0);
break;
case 0x1f: /* fstpt mem */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fstt_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* frstor mem */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
break;
case 0x2e: /* fnsave mem */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
break;
case 0x2f: /* fnstsw mem */
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
gen_op_st_T0_A0(OT_WORD + s->mem_index);
break;
case 0x3c: /* fbld */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fbld_ST0(cpu_env, cpu_A0);
break;
case 0x3e: /* fbstp */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fbst_ST0(cpu_env, cpu_A0);
gen_helper_fpop(cpu_env);
break;
case 0x3d: /* fildll */
tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
(s->mem_index >> 2) - 1);
gen_helper_fpop(cpu_env);
break;
default:
} else {
/* register float ops */
opreg = rm;
switch(op) {
case 0x08: /* fld sti */
gen_helper_fpush(cpu_env);
gen_helper_fmov_ST0_STN(cpu_env,
tcg_const_i32((opreg + 1) & 7));
break;
case 0x09: /* fxchg sti */
case 0x29: /* fxchg4 sti, undocumented op */
case 0x39: /* fxchg7 sti, undocumented op */
gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
break;
case 0x0a: /* grp d9/2 */
switch(rm) {
case 0: /* fnop */
/* check exceptions (FreeBSD FPU probe) */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fwait(cpu_env);
break;
default:
break;
case 0x0c: /* grp d9/4 */
switch(rm) {
case 0: /* fchs */
gen_helper_fchs_ST0(cpu_env);
break;
case 1: /* fabs */
gen_helper_fabs_ST0(cpu_env);
break;
case 4: /* ftst */
gen_helper_fldz_FT0(cpu_env);
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 5: /* fxam */
gen_helper_fxam_ST0(cpu_env);
break;
default:
break;
case 0x0d: /* grp d9/5 */
{
switch(rm) {
case 0:
gen_helper_fpush(cpu_env);
gen_helper_fld1_ST0(cpu_env);
break;
case 1:
gen_helper_fpush(cpu_env);
gen_helper_fldl2t_ST0(cpu_env);
break;
case 2:
gen_helper_fpush(cpu_env);
gen_helper_fldl2e_ST0(cpu_env);
break;
case 3:
gen_helper_fpush(cpu_env);
gen_helper_fldpi_ST0(cpu_env);
break;
case 4:
gen_helper_fpush(cpu_env);
gen_helper_fldlg2_ST0(cpu_env);
break;
case 5:
gen_helper_fpush(cpu_env);
gen_helper_fldln2_ST0(cpu_env);
break;
case 6:
gen_helper_fpush(cpu_env);
gen_helper_fldz_ST0(cpu_env);
break;
default:
break;
case 0x0e: /* grp d9/6 */
switch(rm) {
case 0: /* f2xm1 */
gen_helper_f2xm1(cpu_env);
break;
case 1: /* fyl2x */
gen_helper_fyl2x(cpu_env);
break;
case 2: /* fptan */
gen_helper_fptan(cpu_env);
break;
case 3: /* fpatan */
gen_helper_fpatan(cpu_env);
break;
case 4: /* fxtract */
gen_helper_fxtract(cpu_env);
break;
case 5: /* fprem1 */
gen_helper_fprem1(cpu_env);
break;
case 6: /* fdecstp */
gen_helper_fdecstp(cpu_env);
break;
default:
case 7: /* fincstp */
gen_helper_fincstp(cpu_env);
break;
break;
case 0x0f: /* grp d9/7 */
switch(rm) {
case 0: /* fprem */
gen_helper_fprem(cpu_env);
break;
case 1: /* fyl2xp1 */
gen_helper_fyl2xp1(cpu_env);
break;
case 2: /* fsqrt */
gen_helper_fsqrt(cpu_env);
break;
case 3: /* fsincos */
gen_helper_fsincos(cpu_env);
break;
case 5: /* fscale */
gen_helper_fscale(cpu_env);
break;
case 4: /* frndint */
gen_helper_frndint(cpu_env);
break;
case 6: /* fsin */
gen_helper_fsin(cpu_env);
break;
default:
case 7: /* fcos */
gen_helper_fcos(cpu_env);
break;
break;
case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
{
int op1;
op1 = op & 7;
if (op >= 0x20) {
gen_helper_fp_arith_STN_ST0(op1, opreg);
if (op >= 0x30)
gen_helper_fpop(cpu_env);
} else {
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fp_arith_ST0_FT0(op1);
break;
case 0x02: /* fcom */
case 0x22: /* fcom2, undocumented op */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
break;
case 0x03: /* fcomp */
case 0x23: /* fcomp3, undocumented op */
case 0x32: /* fcomp5, undocumented op */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x15: /* da/5 */
switch(rm) {
case 1: /* fucompp */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
break;
default:
break;
case 0x1c:
switch(rm) {
case 0: /* feni (287 only, just do nop here) */
break;
case 1: /* fdisi (287 only, just do nop here) */
break;
case 2: /* fclex */
gen_helper_fclex(cpu_env);
break;
case 3: /* fninit */
gen_helper_fninit(cpu_env);
break;
case 4: /* fsetpm (287 only, just do nop here) */
break;
default:
break;
case 0x1d: /* fucomi */
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x1e: /* fcomi */
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x28: /* ffree sti */
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
break;
case 0x2a: /* fst sti */
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
break;
case 0x2b: /* fstp sti */
case 0x0b: /* fstp1 sti, undocumented op */
case 0x3a: /* fstp8 sti, undocumented op */
case 0x3b: /* fstp9 sti, undocumented op */
gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x2c: /* fucom st(i) */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
break;
case 0x2d: /* fucomp st(i) */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
break;
case 0x33: /* de/3 */
switch(rm) {
case 1: /* fcompp */
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
gen_helper_fcom_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
gen_helper_fpop(cpu_env);
break;
default:
break;
case 0x38: /* ffreep sti, undocumented op */
gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fpop(cpu_env);
break;
case 0x3c: /* df/4 */
switch(rm) {
case 0:
gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
gen_op_mov_reg_T0(OT_WORD, R_EAX);
break;
default:
break;
case 0x3d: /* fucomip */
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fucomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3e: /* fcomip */
gen_update_cc_op(s);
gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
gen_helper_fcomi_ST0_FT0(cpu_env);
gen_helper_fpop(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x10 ... 0x13: /* fcmovxx */
case 0x18 ... 0x1b:
{
int op1, l1;
static const uint8_t fcmov_cc[8] = {
(JCC_B << 1),
(JCC_Z << 1),
(JCC_BE << 1),
(JCC_P << 1),
};
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
l1 = gen_new_label();
gen_jcc1_noeob(s, op1, l1);
gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
gen_set_label(l1);
break;
default:
break;
/************************/
/* string ops */
case 0xa4: /* movsS */
case 0xa5:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_movs(s, ot);
break;
case 0xaa: /* stosS */
case 0xab:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_stos(s, ot);
break;
case 0xac: /* lodsS */
case 0xad:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_lods(s, ot);
break;
case 0xae: /* scasS */
case 0xaf:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
if (prefixes & PREFIX_REPNZ) {
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
} else if (prefixes & PREFIX_REPZ) {
gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
} else {
gen_scas(s, ot);
break;
case 0xa6: /* cmpsS */
case 0xa7:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
if (prefixes & PREFIX_REPNZ) {
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
} else if (prefixes & PREFIX_REPZ) {
gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
} else {
gen_cmps(s, ot);
break;
case 0x6c: /* insS */
case 0x6d:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
gen_op_andl_T0_ffff();
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_ins(s, ot);
if (use_icount) {
gen_jmp(s, s->pc - s->cs_base);
break;
case 0x6e: /* outsS */
case 0x6f:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
gen_op_andl_T0_ffff();
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes) | 4);
if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
} else {
gen_outs(s, ot);
if (use_icount) {
gen_jmp(s, s->pc - s->cs_base);
break;
/************************/
/* port I/O */
case 0xe4:
case 0xe5:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T0_im(val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (use_icount)
gen_io_start();
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
gen_op_mov_reg_T1(ot, R_EAX);
if (use_icount) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
break;
case 0xe6:
case 0xe7:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T0_im(val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_TN_reg(ot, 1, R_EAX);
if (use_icount)
gen_io_start();
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
if (use_icount) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
break;
case 0xec:
case 0xed:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
gen_op_andl_T0_ffff();
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
if (use_icount)
gen_io_start();
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
gen_op_mov_reg_T1(ot, R_EAX);
if (use_icount) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
break;
case 0xee:
case 0xef:
if ((b & 1) == 0)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
gen_op_andl_T0_ffff();
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
gen_op_mov_TN_reg(ot, 1, R_EAX);
if (use_icount)
gen_io_start();
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
if (use_icount) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
break;
/************************/
/* control */
case 0xc2: /* ret im */
val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
gen_pop_T0(s);
if (CODE64(s) && s->dflag)
s->dflag = 2;
gen_stack_update(s, val + (2 << s->dflag));
if (s->dflag == 0)
gen_op_andl_T0_ffff();
gen_op_jmp_T0();
gen_eob(s);
break;
case 0xc3: /* ret */
gen_pop_T0(s);
gen_pop_update(s);
if (s->dflag == 0)
gen_op_andl_T0_ffff();
gen_op_jmp_T0();
gen_eob(s);
break;
case 0xca: /* lret im */
val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
do_lret:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
tcg_const_i32(val));
} else {
gen_stack_A0(s);
/* pop offset */
gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
if (s->dflag == 0)
gen_op_andl_T0_ffff();
/* NOTE: keeping EIP updated is not a problem in case of
exception */
gen_op_jmp_T0();
/* pop selector */
gen_op_addl_A0_im(2 << s->dflag);
gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
gen_op_movl_seg_T0_vm(R_CS);
/* add stack offset */
gen_stack_update(s, val + (4 << s->dflag));
gen_eob(s);
break;
case 0xcb: /* lret */
val = 0;
goto do_lret;
case 0xcf: /* iret */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
if (!s->pe) {
/* real mode */
gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
set_cc_op(s, CC_OP_EFLAGS);
} else if (s->vm86) {
if (s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
set_cc_op(s, CC_OP_EFLAGS);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
tcg_const_i32(s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
gen_eob(s);
break;
case 0xe8: /* call im */
{
if (dflag)
tval = (int32_t)insn_get(env, s, OT_LONG);
else
tval = (int16_t)insn_get(env, s, OT_WORD);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (s->dflag == 0)
tval &= 0xffff;
else if(!CODE64(s))
tval &= 0xffffffff;
gen_movtl_T0_im(next_eip);
gen_push_T0(s);
gen_jmp(s, tval);
break;
case 0x9a: /* lcall im */
{
unsigned int selector, offset;
if (CODE64(s))
ot = dflag ? OT_LONG : OT_WORD;
offset = insn_get(env, s, ot);
selector = insn_get(env, s, OT_WORD);
gen_op_movl_T0_im(selector);
gen_op_movl_T1_imu(offset);
goto do_lcall;
case 0xe9: /* jmp im */
if (dflag)
tval = (int32_t)insn_get(env, s, OT_LONG);
else
tval = (int16_t)insn_get(env, s, OT_WORD);
tval += s->pc - s->cs_base;
if (s->dflag == 0)
tval &= 0xffff;
else if(!CODE64(s))
tval &= 0xffffffff;
gen_jmp(s, tval);
break;
case 0xea: /* ljmp im */
{
unsigned int selector, offset;
if (CODE64(s))
ot = dflag ? OT_LONG : OT_WORD;
offset = insn_get(env, s, ot);
selector = insn_get(env, s, OT_WORD);
gen_op_movl_T0_im(selector);
gen_op_movl_T1_imu(offset);
goto do_ljmp;
case 0xeb: /* jmp Jb */
tval = (int8_t)insn_get(env, s, OT_BYTE);
tval += s->pc - s->cs_base;
if (s->dflag == 0)
tval &= 0xffff;
gen_jmp(s, tval);
break;
case 0x70 ... 0x7f: /* jcc Jb */
tval = (int8_t)insn_get(env, s, OT_BYTE);
goto do_jcc;
case 0x180 ... 0x18f: /* jcc Jv */
if (dflag) {
tval = (int32_t)insn_get(env, s, OT_LONG);
} else {
tval = (int16_t)insn_get(env, s, OT_WORD);
do_jcc:
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (s->dflag == 0)
tval &= 0xffff;
gen_jcc(s, b, tval, next_eip);
break;
case 0x190 ... 0x19f: /* setcc Gv */
modrm = cpu_ldub_code(env, s->pc++);
gen_setcc1(s, b, cpu_T[0]);
gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_cmovcc1(env, s, ot, b, modrm, reg);
break;
/************************/
/* flags */
case 0x9c: /* pushf */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_helper_read_eflags(cpu_T[0], cpu_env);
gen_push_T0(s);
break;
case 0x9d: /* popf */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_pop_T0(s);
if (s->cpl == 0) {
if (s->dflag) {
gen_helper_write_eflags(cpu_env, cpu_T[0],
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK |
IOPL_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T[0],
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK |
IF_MASK | IOPL_MASK)
& 0xffff));
} else {
if (s->cpl <= s->iopl) {
if (s->dflag) {
gen_helper_write_eflags(cpu_env, cpu_T[0],
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T[0],
tcg_const_i32((TF_MASK |
AC_MASK |
ID_MASK |
NT_MASK |
IF_MASK)
& 0xffff));
} else {
if (s->dflag) {
gen_helper_write_eflags(cpu_env, cpu_T[0],
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)));
} else {
gen_helper_write_eflags(cpu_env, cpu_T[0],
tcg_const_i32((TF_MASK | AC_MASK |
ID_MASK | NT_MASK)
& 0xffff));
gen_pop_update(s);
set_cc_op(s, CC_OP_EFLAGS);
/* abort translation because TF/AC flag may change */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 0x9e: /* sahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
break;
case 0x9f: /* lahf */
if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
gen_compute_eflags(s);
/* Note: gen_compute_eflags() only gives the condition codes */
tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
gen_op_mov_reg_T0(OT_BYTE, R_AH);
break;
case 0xf5: /* cmc */
gen_compute_eflags(s);
tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
break;
case 0xf8: /* clc */
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
break;
case 0xf9: /* stc */
gen_compute_eflags(s);
tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
break;
case 0xfc: /* cld */
tcg_gen_movi_i32(cpu_tmp2_i32, 1);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
case 0xfd: /* std */
tcg_gen_movi_i32(cpu_tmp2_i32, -1);
tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
break;
/************************/
/* bit operations */
case 0x1ba: /* bt/bts/btr/btc Gv, im */
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
op = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
s->rip_offset = 1;
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_TN_reg(ot, 0, rm);
/* load shift */
val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T1_im(val);
if (op < 4)
op -= 4;
goto bt_op;
case 0x1a3: /* bt Gv, Ev */
op = 0;
goto do_btx;
case 0x1ab: /* bts */
op = 1;
goto do_btx;
case 0x1b3: /* btr */
op = 2;
goto do_btx;
case 0x1bb: /* btc */
op = 3;
do_btx:
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op_mov_TN_reg(OT_LONG, 1, reg);
if (mod != 3) {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
/* specific case: we need to add a displacement */
gen_exts(ot, cpu_T[1]);
tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_TN_reg(ot, 0, rm);
bt_op:
tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
switch(op) {
case 0:
tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
tcg_gen_movi_tl(cpu_cc_dst, 0);
break;
case 1:
tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
tcg_gen_movi_tl(cpu_tmp0, 1);
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
break;
case 2:
tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
tcg_gen_movi_tl(cpu_tmp0, 1);
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
break;
default:
case 3:
tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
tcg_gen_movi_tl(cpu_tmp0, 1);
tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
break;
set_cc_op(s, CC_OP_SARB + ot);
if (op != 0) {
if (mod != 3)
gen_op_st_T0_A0(ot + s->mem_index);
else
gen_op_mov_reg_T0(ot, rm);
tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
tcg_gen_movi_tl(cpu_cc_dst, 0);
break;
case 0x1bc: /* bsf / tzcnt */
case 0x1bd: /* bsr / lzcnt */
ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T[0]);
/* Note that lzcnt and tzcnt are in different extensions. */
if ((prefixes & PREFIX_REPZ)
&& (b & 1
? s->cpuid_ext3_features & CPUID_EXT3_ABM
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
int size = 8 << ot;
tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
if (b & 1) {
/* For lzcnt, reduce the target_ulong result by the
number of zeros that we expect to find at the top. */
gen_helper_clz(cpu_T[0], cpu_T[0]);
tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
} else {
/* For tzcnt, a zero input must return the operand size:
force all bits outside the operand size to 1. */
target_ulong mask = (target_ulong)-2 << (size - 1);
tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
gen_helper_ctz(cpu_T[0], cpu_T[0]);
/* For lzcnt/tzcnt, C and Z bits are defined and are
related to the result. */
gen_op_update1_cc();
set_cc_op(s, CC_OP_BMILGB + ot);
} else {
/* For bsr/bsf, only the Z bit is defined and it is related
to the input and not the result. */
tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
set_cc_op(s, CC_OP_LOGICB + ot);
if (b & 1) {
/* For bsr, return the bit index of the first 1 bit,
not the count of leading zeros. */
gen_helper_clz(cpu_T[0], cpu_T[0]);
tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
} else {
gen_helper_ctz(cpu_T[0], cpu_T[0]);
/* ??? The manual says that the output is undefined when the
input is zero, but real hardware leaves it unchanged, and
real programs appear to depend on that. */
tcg_gen_movi_tl(cpu_tmp0, 0);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
cpu_regs[reg], cpu_T[0]);
gen_op_mov_reg_T0(ot, reg);
break;
/************************/
/* bcd */
case 0x27: /* daa */
if (CODE64(s))
gen_update_cc_op(s);
gen_helper_daa(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x2f: /* das */
if (CODE64(s))
gen_update_cc_op(s);
gen_helper_das(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x37: /* aaa */
if (CODE64(s))
gen_update_cc_op(s);
gen_helper_aaa(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x3f: /* aas */
if (CODE64(s))
gen_update_cc_op(s);
gen_helper_aas(cpu_env);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0xd4: /* aam */
if (CODE64(s))
val = cpu_ldub_code(env, s->pc++);
if (val == 0) {
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
} else {
gen_helper_aam(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
case 0xd5: /* aad */
if (CODE64(s))
val = cpu_ldub_code(env, s->pc++);
gen_helper_aad(cpu_env, tcg_const_i32(val));
set_cc_op(s, CC_OP_LOGICB);
break;
/************************/
/* misc */
case 0x90: /* nop */
/* XXX: correct lock test for all insn */
if (prefixes & PREFIX_LOCK) {
/* If REX_B is set, then this is xchg eax, r8d, not a nop. */
if (REX_B(s)) {
goto do_xchg_reg_eax;
if (prefixes & PREFIX_REPZ) {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
break;
case 0x9b: /* fwait */
if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
(HF_MP_MASK | HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fwait(cpu_env);
break;
case 0xcc: /* int3 */
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
break;
case 0xcd: /* int N */
val = cpu_ldub_code(env, s->pc++);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
break;
case 0xce: /* into */
if (CODE64(s))
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
break;
#ifdef WANT_ICEBP
case 0xf1: /* icebp (undocumented, exits to external debugger) */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
#if 1
gen_debug(s, pc_start - s->cs_base);
#else
/* start debug */
tb_flush(env);
qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
#endif
break;
#endif
case 0xfa: /* cli */
if (!s->vm86) {
if (s->cpl <= s->iopl) {
gen_helper_cli(cpu_env);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
if (s->iopl == 3) {
gen_helper_cli(cpu_env);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
case 0xfb: /* sti */
if (!s->vm86) {
if (s->cpl <= s->iopl) {
gen_sti:
gen_helper_sti(cpu_env);
/* interruptions are enabled only the first insn after sti */
/* If several instructions disable interrupts, only the
_first_ does it */
if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
gen_helper_set_inhibit_irq(cpu_env);
/* give a chance to handle pending irqs */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
if (s->iopl == 3) {
goto gen_sti;
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
case 0x62: /* bound */
if (CODE64(s))
ot = dflag ? OT_LONG : OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (mod == 3)
gen_op_mov_TN_reg(ot, 0, reg);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
if (ot == OT_WORD) {
gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
} else {
gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
break;
case 0x1c8 ... 0x1cf: /* bswap reg */
reg = (b & 7) | REX_B(s);
#ifdef TARGET_X86_64
if (dflag == 2) {
gen_op_mov_TN_reg(OT_QUAD, 0, reg);
tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(OT_QUAD, reg);
} else
#endif
{
gen_op_mov_TN_reg(OT_LONG, 0, reg);
tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(OT_LONG, reg);
break;
case 0xd6: /* salc */
if (CODE64(s))
gen_compute_eflags_c(s, cpu_T[0]);
tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(OT_BYTE, R_EAX);
break;
case 0xe0: /* loopnz */
case 0xe1: /* loopz */
case 0xe2: /* loop */
case 0xe3: /* jecxz */
{
int l1, l2, l3;
tval = (int8_t)insn_get(env, s, OT_BYTE);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (s->dflag == 0)
tval &= 0xffff;
l1 = gen_new_label();
l2 = gen_new_label();
l3 = gen_new_label();
b &= 3;
switch(b) {
case 0: /* loopnz */
case 1: /* loopz */
gen_op_add_reg_im(s->aflag, R_ECX, -1);
gen_op_jz_ecx(s->aflag, l3);
gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
break;
case 2: /* loop */
gen_op_add_reg_im(s->aflag, R_ECX, -1);
gen_op_jnz_ecx(s->aflag, l1);
break;
default:
case 3: /* jcxz */
gen_op_jz_ecx(s->aflag, l1);
break;
gen_set_label(l3);
gen_jmp_im(next_eip);
tcg_gen_br(l2);
gen_set_label(l1);
gen_jmp_im(tval);
gen_set_label(l2);
gen_eob(s);
break;
case 0x130: /* wrmsr */
case 0x132: /* rdmsr */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
gen_helper_rdmsr(cpu_env);
} else {
gen_helper_wrmsr(cpu_env);
break;
case 0x131: /* rdtsc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (use_icount)
gen_io_start();
gen_helper_rdtsc(cpu_env);
if (use_icount) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
break;
case 0x133: /* rdpmc */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_rdpmc(cpu_env);
break;
case 0x134: /* sysenter */
/* For Intel SYSENTER is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysenter(cpu_env);
gen_eob(s);
break;
case 0x135: /* sysexit */
/* For Intel SYSEXIT is valid on 64-bit */
if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
gen_eob(s);
break;
#ifdef TARGET_X86_64
case 0x105: /* syscall */
/* XXX: is it usable in real mode ? */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
case 0x107: /* sysret */
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
/* condition codes are modified only in long mode */
if (s->lma) {
set_cc_op(s, CC_OP_EFLAGS);
gen_eob(s);
break;
#endif
case 0x1a2: /* cpuid */
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_cpuid(cpu_env);
break;
case 0xf4: /* hlt */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
s->is_jmp = DISAS_TB_JUMP;
break;
case 0x100:
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* sldt */
if (!s->pe || s->vm86)
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
ot = OT_WORD;
if (mod == 3)
ot += s->dflag;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 2: /* lldt */
if (!s->pe || s->vm86)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
break;
case 1: /* str */
if (!s->pe || s->vm86)
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
ot = OT_WORD;
if (mod == 3)
ot += s->dflag;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 3: /* ltr */
if (!s->pe || s->vm86)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
break;
case 4: /* verr */
case 5: /* verw */
if (!s->pe || s->vm86)
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_update_cc_op(s);
if (op == 4) {
gen_helper_verr(cpu_env, cpu_T[0]);
} else {
gen_helper_verw(cpu_env, cpu_T[0]);
set_cc_op(s, CC_OP_EFLAGS);
break;
default:
break;
case 0x101:
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
rm = modrm & 7;
switch(op) {
case 0: /* sgdt */
if (mod == 3)
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_T0_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
break;
case 1:
if (mod == 3) {
switch (rm) {
case 0: /* monitor */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
s->cpl != 0)
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
#ifdef TARGET_X86_64
if (s->aflag == 2) {
gen_op_movq_A0_reg(R_EAX);
} else
#endif
{
gen_op_movl_A0_reg(R_EAX);
if (s->aflag == 0)
gen_op_andl_A0_ffff();
gen_add_A0_ds_seg(s);
gen_helper_monitor(cpu_env, cpu_A0);
break;
case 1: /* mwait */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
s->cpl != 0)
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
case 2: /* clac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
s->cpl != 0) {
gen_helper_clac(cpu_env);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 3: /* stac */
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
s->cpl != 0) {
gen_helper_stac(cpu_env);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
default:
} else { /* sidt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
gen_op_st_T0_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
break;
case 2: /* lgdt */
case 3: /* lidt */
if (mod == 3) {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
switch(rm) {
case 0: /* VMRUN */
if (!(s->flags & HF_SVME_MASK) || !s->pe)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
} else {
gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
tcg_const_i32(s->pc - pc_start));
tcg_gen_exit_tb(0);
s->is_jmp = DISAS_TB_JUMP;
break;
case 1: /* VMMCALL */
if (!(s->flags & HF_SVME_MASK))
gen_helper_vmmcall(cpu_env);
break;
case 2: /* VMLOAD */
if (!(s->flags & HF_SVME_MASK) || !s->pe)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
} else {
gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
break;
case 3: /* VMSAVE */
if (!(s->flags & HF_SVME_MASK) || !s->pe)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
} else {
gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
break;
case 4: /* STGI */
if ((!(s->flags & HF_SVME_MASK) &&
!(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
} else {
gen_helper_stgi(cpu_env);
break;
case 5: /* CLGI */
if (!(s->flags & HF_SVME_MASK) || !s->pe)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
} else {
gen_helper_clgi(cpu_env);
break;
case 6: /* SKINIT */
if ((!(s->flags & HF_SVME_MASK) &&
!(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
gen_helper_skinit(cpu_env);
break;
case 7: /* INVLPGA */
if (!(s->flags & HF_SVME_MASK) || !s->pe)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
break;
} else {
gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
break;
default:
} else if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start,
op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_op_ld_T1_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
if (op == 2) {
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
} else {
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
break;
case 4: /* smsw */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
#else
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
#endif
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
break;
case 6: /* lmsw */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_helper_lmsw(cpu_env, cpu_T[0]);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
case 7:
if (mod != 3) { /* invlpg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_helper_invlpg(cpu_env, cpu_A0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
switch (rm) {
case 0: /* swapgs */
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
tcg_gen_ld_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,segs[R_GS].base));
tcg_gen_ld_tl(cpu_T[1], cpu_env,
offsetof(CPUX86State,kernelgsbase));
tcg_gen_st_tl(cpu_T[1], cpu_env,
offsetof(CPUX86State,segs[R_GS].base));
tcg_gen_st_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,kernelgsbase));
} else
#endif
{
break;
case 1: /* rdtscp */
if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (use_icount)
gen_io_start();
gen_helper_rdtscp(cpu_env);
if (use_icount) {
gen_io_end();
gen_jmp(s, s->pc - s->cs_base);
break;
default:
break;
default:
break;
case 0x108: /* invd */
case 0x109: /* wbinvd */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
/* nothing to do */
break;
case 0x63: /* arpl or movslS (x86_64) */
#ifdef TARGET_X86_64
if (CODE64(s)) {
int d_ot;
/* d_ot is the size of destination */
d_ot = dflag + OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod == 3) {
gen_op_mov_TN_reg(OT_LONG, 0, rm);
/* sign extend */
if (d_ot == OT_QUAD)
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(d_ot, reg);
} else {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
if (d_ot == OT_QUAD) {
gen_op_lds_T0_A0(OT_LONG + s->mem_index);
} else {
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
gen_op_mov_reg_T0(d_ot, reg);
} else
#endif
{
int label1;
TCGv t0, t1, t2, a0;
if (!s->pe || s->vm86)
t0 = tcg_temp_local_new();
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
ot = OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
if (mod != 3) {
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
a0 = tcg_temp_local_new();
tcg_gen_mov_tl(a0, cpu_A0);
} else {
gen_op_mov_v_reg(ot, t0, rm);
TCGV_UNUSED(a0);
gen_op_mov_v_reg(ot, t1, reg);
tcg_gen_andi_tl(cpu_tmp0, t0, 3);
tcg_gen_andi_tl(t1, t1, 3);
tcg_gen_movi_tl(t2, 0);
label1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
tcg_gen_andi_tl(t0, t0, ~3);
tcg_gen_or_tl(t0, t0, t1);
tcg_gen_movi_tl(t2, CC_Z);
gen_set_label(label1);
if (mod != 3) {
gen_op_st_v(ot + s->mem_index, t0, a0);
tcg_temp_free(a0);
} else {
gen_op_mov_reg_v(ot, rm, t0);
gen_compute_eflags(s);
tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
tcg_temp_free(t0);
tcg_temp_free(t1);
tcg_temp_free(t2);
break;
case 0x102: /* lar */
case 0x103: /* lsl */
{
int label1;
TCGv t0;
if (!s->pe || s->vm86)
ot = dflag ? OT_LONG : OT_WORD;
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
t0 = tcg_temp_local_new();
gen_update_cc_op(s);
if (b == 0x102) {
gen_helper_lar(t0, cpu_env, cpu_T[0]);
} else {
gen_helper_lsl(t0, cpu_env, cpu_T[0]);
tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
label1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
gen_op_mov_reg_v(ot, reg, t0);
gen_set_label(label1);
set_cc_op(s, CC_OP_EFLAGS);
tcg_temp_free(t0);
break;
case 0x118:
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* prefetchnta */
case 1: /* prefetchnt0 */
case 2: /* prefetchnt0 */
case 3: /* prefetchnt0 */
if (mod == 3)
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
/* nothing more to do */
break;
default: /* nop (multi byte) */
gen_nop_modrm(env, s, modrm);
break;
break;
case 0x119 ... 0x11f: /* nop (multi byte) */
modrm = cpu_ldub_code(env, s->pc++);
gen_nop_modrm(env, s, modrm);
break;
case 0x120: /* mov reg, crN */
case 0x122: /* mov crN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
modrm = cpu_ldub_code(env, s->pc++);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
ot = OT_QUAD;
else
ot = OT_LONG;
if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
reg = 8;
switch(reg) {
case 0:
case 2:
case 3:
case 4:
case 8:
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
if (b & 2) {
gen_op_mov_TN_reg(ot, 0, rm);
gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
cpu_T[0]);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
gen_op_mov_reg_T0(ot, rm);
break;
default:
break;
case 0x121: /* mov reg, drN */
case 0x123: /* mov drN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
modrm = cpu_ldub_code(env, s->pc++);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
* are assumed to be 1's, regardless of actual values.
*/
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (CODE64(s))
ot = OT_QUAD;
else
ot = OT_LONG;
/* XXX: do it dynamically with CR4.DE bit */
if (reg == 4 || reg == 5 || reg >= 8)
if (b & 2) {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
gen_op_mov_TN_reg(ot, 0, rm);
gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
gen_op_mov_reg_T0(ot, rm);
break;
case 0x106: /* clts */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
gen_helper_clts(cpu_env);
/* abort block because static cpu state changed */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
break;
/* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
case 0x1c3: /* MOVNTI reg, mem */
if (!(s->cpuid_features & CPUID_SSE2))
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0x1ae:
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
case 0: /* fxsave */
if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
(s->prefix & PREFIX_LOCK))
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
break;
case 1: /* fxrstor */
if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
(s->prefix & PREFIX_LOCK))
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
gen_update_cc_op(s);
gen_jmp_im(pc_start - s->cs_base);
gen_helper_fxrstor(cpu_env, cpu_A0,
tcg_const_i32((s->dflag == 2)));
break;
case 2: /* ldmxcsr */
case 3: /* stmxcsr */
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
mod == 3)
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
if (op == 2) {
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
} else {
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
gen_op_st_T0_A0(OT_LONG + s->mem_index);
break;
case 5: /* lfence */
case 6: /* mfence */
if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
break;
case 7: /* sfence / clflush */
if ((modrm & 0xc7) == 0xc0) {
/* sfence */
/* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
if (!(s->cpuid_features & CPUID_SSE))
} else {
/* clflush */
if (!(s->cpuid_features & CPUID_CLFLUSH))
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
break;
default:
break;
case 0x10d: /* 3DNow! prefetch(w) */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
gen_lea_modrm(env, s, modrm, ®_addr, &offset_addr);
/* ignore for now */
break;
case 0x1aa: /* rsm */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
if (!(s->flags & HF_SMM_MASK))
gen_update_cc_op(s);
gen_jmp_im(s->pc - s->cs_base);
gen_helper_rsm(cpu_env);
gen_eob(s);
break;
case 0x1b8: /* SSE4.2 popcnt */
if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
PREFIX_REPZ)
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (s->prefix & PREFIX_DATA)
ot = OT_WORD;
else if (s->dflag != 2)
ot = OT_LONG;
else
ot = OT_QUAD;
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
gen_op_mov_reg_T0(ot, reg);
set_cc_op(s, CC_OP_EFLAGS);
break;
case 0x10e ... 0x10f:
/* 3DNow! instructions, ignore prefixes */
s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
case 0x110 ... 0x117:
case 0x128 ... 0x12f:
case 0x138 ... 0x13a:
case 0x150 ... 0x179:
case 0x17c ... 0x17f:
case 0x1c2:
case 0x1c4 ... 0x1c6:
case 0x1d0 ... 0x1fe:
gen_sse(env, s, b, pc_start, rex_r);
break;
default:
/* lock generation */
if (s->prefix & PREFIX_LOCK)
gen_helper_unlock();
return s->pc;
illegal_op:
if (s->prefix & PREFIX_LOCK)
gen_helper_unlock();
/* XXX: ensure that no lock was generated */
gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
return s->pc;
| true |
qemu
|
bff93281a75def2e3197005d72ad5cdc4719383f
|
13,798 |
int h261_decode_picture_header(H261Context *h){
MpegEncContext * const s = &h->s;
int format, i;
static int h261_framecounter = 0;
uint32_t startcode;
align_get_bits(&s->gb);
startcode = (h->last_bits << (12 - (8-h->bits_left))) | get_bits(&s->gb, 20-8 - (8- h->bits_left));
for(i= s->gb.size_in_bits - get_bits_count(&s->gb); i>24; i-=1){
startcode = ((startcode << 1) | get_bits(&s->gb, 1)) & 0x000FFFFF;
if(startcode == 0x10)
break;
}
if (startcode != 0x10){
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
return -1;
}
/* temporal reference */
s->picture_number = get_bits(&s->gb, 5); /* picture timestamp */
/* PTYPE starts here */
skip_bits1(&s->gb); /* split screen off */
skip_bits1(&s->gb); /* camera off */
skip_bits1(&s->gb); /* freeze picture release off */
format = get_bits1(&s->gb);
//only 2 formats possible
if (format == 0){//QCIF
s->width = 176;
s->height = 144;
s->mb_width = 11;
s->mb_height = 9;
}else{//CIF
s->width = 352;
s->height = 288;
s->mb_width = 22;
s->mb_height = 18;
}
s->mb_num = s->mb_width * s->mb_height;
skip_bits1(&s->gb); /* still image mode off */
skip_bits1(&s->gb); /* Reserved */
/* PEI */
while (get_bits1(&s->gb) != 0){
skip_bits(&s->gb, 8);
}
//h261 has no I-FRAMES, pass the test in MPV_frame_start in mpegvideo.c
if(h261_framecounter > 1)
s->pict_type = P_TYPE;
else
s->pict_type = I_TYPE;
h261_framecounter++;
h->gob_number = 0;
return 0;
}
| true |
FFmpeg
|
49e5dcbce5f9e08ec375fd54c413148beb81f1d7
|
13,799 |
void HELPER(srst)(CPUS390XState *env, uint32_t r1, uint32_t r2)
{
uintptr_t ra = GETPC();
uint64_t end, str;
uint32_t len;
uint8_t v, c = env->regs[0];
/* Bits 32-55 must contain all 0. */
if (env->regs[0] & 0xffffff00u) {
cpu_restore_state(ENV_GET_CPU(env), ra);
program_interrupt(env, PGM_SPECIFICATION, 6);
}
str = get_address(env, r2);
end = get_address(env, r1);
/* Lest we fail to service interrupts in a timely manner, limit the
amount of work we're willing to do. For now, let's cap at 8k. */
for (len = 0; len < 0x2000; ++len) {
if (str + len == end) {
/* Character not found. R1 & R2 are unmodified. */
env->cc_op = 2;
return;
}
v = cpu_ldub_data_ra(env, str + len, ra);
if (v == c) {
/* Character found. Set R1 to the location; R2 is unmodified. */
env->cc_op = 1;
set_address(env, r1, str + len);
return;
}
}
/* CPU-determined bytes processed. Advance R2 to next byte to process. */
env->cc_op = 3;
set_address(env, r2, str + len);
}
| false |
qemu
|
8d2f850a5ab7579a852f23b28273940a47dfd7ff
|
13,800 |
void qemu_vfree(void *ptr)
{
/* may be useful some day, but currently we do not need to free */
}
| false |
qemu
|
6cb7ee859a1b28aae8eab7f88908c9c9262b8a5c
|
13,801 |
int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
{
int rc = WaitForSingleObject(sem->sema, ms);
if (rc == WAIT_OBJECT_0) {
return 0;
}
if (rc != WAIT_TIMEOUT) {
error_exit(GetLastError(), __func__);
}
return -1;
}
| false |
qemu
|
c096358e747e88fc7364e40e3c354ee0bb683960
|
13,803 |
static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
{
int is_q = extract32(insn, 30, 1);
int u = extract32(insn, 29, 1);
int size = extract32(insn, 22, 2);
int opcode = extract32(insn, 11, 5);
int rm = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
int pass;
switch (opcode) {
case 0x13: /* MUL, PMUL */
if (u && size != 0) {
unallocated_encoding(s);
return;
}
/* fall through */
case 0x0: /* SHADD, UHADD */
case 0x2: /* SRHADD, URHADD */
case 0x4: /* SHSUB, UHSUB */
case 0xc: /* SMAX, UMAX */
case 0xd: /* SMIN, UMIN */
case 0xe: /* SABD, UABD */
case 0xf: /* SABA, UABA */
case 0x12: /* MLA, MLS */
if (size == 3) {
unallocated_encoding(s);
return;
}
break;
case 0x16: /* SQDMULH, SQRDMULH */
if (size == 0 || size == 3) {
unallocated_encoding(s);
return;
}
break;
default:
if (size == 3 && !is_q) {
unallocated_encoding(s);
return;
}
break;
}
if (!fp_access_check(s)) {
return;
}
if (size == 3) {
for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
TCGv_i64 tcg_op1 = tcg_temp_new_i64();
TCGv_i64 tcg_op2 = tcg_temp_new_i64();
TCGv_i64 tcg_res = tcg_temp_new_i64();
read_vec_element(s, tcg_op1, rn, pass, MO_64);
read_vec_element(s, tcg_op2, rm, pass, MO_64);
handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
write_vec_element(s, tcg_res, rd, pass, MO_64);
tcg_temp_free_i64(tcg_res);
tcg_temp_free_i64(tcg_op1);
tcg_temp_free_i64(tcg_op2);
}
} else {
for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
TCGv_i32 tcg_op1 = tcg_temp_new_i32();
TCGv_i32 tcg_op2 = tcg_temp_new_i32();
TCGv_i32 tcg_res = tcg_temp_new_i32();
NeonGenTwoOpFn *genfn = NULL;
NeonGenTwoOpEnvFn *genenvfn = NULL;
read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
switch (opcode) {
case 0x0: /* SHADD, UHADD */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
{ gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
{ gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
};
genfn = fns[size][u];
break;
}
case 0x1: /* SQADD, UQADD */
{
static NeonGenTwoOpEnvFn * const fns[3][2] = {
{ gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
{ gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
{ gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
};
genenvfn = fns[size][u];
break;
}
case 0x2: /* SRHADD, URHADD */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
{ gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
{ gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
};
genfn = fns[size][u];
break;
}
case 0x4: /* SHSUB, UHSUB */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
{ gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
{ gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
};
genfn = fns[size][u];
break;
}
case 0x5: /* SQSUB, UQSUB */
{
static NeonGenTwoOpEnvFn * const fns[3][2] = {
{ gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
{ gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
{ gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
};
genenvfn = fns[size][u];
break;
}
case 0x6: /* CMGT, CMHI */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_cgt_s8, gen_helper_neon_cgt_u8 },
{ gen_helper_neon_cgt_s16, gen_helper_neon_cgt_u16 },
{ gen_helper_neon_cgt_s32, gen_helper_neon_cgt_u32 },
};
genfn = fns[size][u];
break;
}
case 0x7: /* CMGE, CMHS */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_cge_s8, gen_helper_neon_cge_u8 },
{ gen_helper_neon_cge_s16, gen_helper_neon_cge_u16 },
{ gen_helper_neon_cge_s32, gen_helper_neon_cge_u32 },
};
genfn = fns[size][u];
break;
}
case 0x8: /* SSHL, USHL */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
{ gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
{ gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
};
genfn = fns[size][u];
break;
}
case 0x9: /* SQSHL, UQSHL */
{
static NeonGenTwoOpEnvFn * const fns[3][2] = {
{ gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
{ gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
{ gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
};
genenvfn = fns[size][u];
break;
}
case 0xa: /* SRSHL, URSHL */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
{ gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
{ gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
};
genfn = fns[size][u];
break;
}
case 0xb: /* SQRSHL, UQRSHL */
{
static NeonGenTwoOpEnvFn * const fns[3][2] = {
{ gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
{ gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
{ gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
};
genenvfn = fns[size][u];
break;
}
case 0xc: /* SMAX, UMAX */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
{ gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
{ gen_max_s32, gen_max_u32 },
};
genfn = fns[size][u];
break;
}
case 0xd: /* SMIN, UMIN */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
{ gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
{ gen_min_s32, gen_min_u32 },
};
genfn = fns[size][u];
break;
}
case 0xe: /* SABD, UABD */
case 0xf: /* SABA, UABA */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
{ gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
{ gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
};
genfn = fns[size][u];
break;
}
case 0x10: /* ADD, SUB */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
{ gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
{ tcg_gen_add_i32, tcg_gen_sub_i32 },
};
genfn = fns[size][u];
break;
}
case 0x11: /* CMTST, CMEQ */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_tst_u8, gen_helper_neon_ceq_u8 },
{ gen_helper_neon_tst_u16, gen_helper_neon_ceq_u16 },
{ gen_helper_neon_tst_u32, gen_helper_neon_ceq_u32 },
};
genfn = fns[size][u];
break;
}
case 0x13: /* MUL, PMUL */
if (u) {
/* PMUL */
assert(size == 0);
genfn = gen_helper_neon_mul_p8;
break;
}
/* fall through : MUL */
case 0x12: /* MLA, MLS */
{
static NeonGenTwoOpFn * const fns[3] = {
gen_helper_neon_mul_u8,
gen_helper_neon_mul_u16,
tcg_gen_mul_i32,
};
genfn = fns[size];
break;
}
case 0x16: /* SQDMULH, SQRDMULH */
{
static NeonGenTwoOpEnvFn * const fns[2][2] = {
{ gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
{ gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
};
assert(size == 1 || size == 2);
genenvfn = fns[size - 1][u];
break;
}
default:
g_assert_not_reached();
}
if (genenvfn) {
genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
} else {
genfn(tcg_res, tcg_op1, tcg_op2);
}
if (opcode == 0xf || opcode == 0x12) {
/* SABA, UABA, MLA, MLS: accumulating ops */
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
{ gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
{ tcg_gen_add_i32, tcg_gen_sub_i32 },
};
bool is_sub = (opcode == 0x12 && u); /* MLS */
genfn = fns[size][is_sub];
read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
genfn(tcg_res, tcg_op1, tcg_res);
}
write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
tcg_temp_free_i32(tcg_res);
tcg_temp_free_i32(tcg_op1);
tcg_temp_free_i32(tcg_op2);
}
}
if (!is_q) {
clear_vec_high(s, rd);
}
}
| false |
qemu
|
220ad4ca846d8e0734dd2d2af38c61a6f5436d66
|
13,804 |
static ssize_t proxy_preadv(FsContext *ctx, V9fsFidOpenState *fs,
const struct iovec *iov,
int iovcnt, off_t offset)
{
ssize_t ret;
#ifdef CONFIG_PREADV
ret = preadv(fs->fd, iov, iovcnt, offset);
#else
ret = lseek(fs->fd, offset, SEEK_SET);
if (ret >= 0) {
ret = readv(fs->fd, iov, iovcnt);
}
#endif
return ret;
}
| false |
qemu
|
494a8ebe713055d3946183f4b395f85a18b43e9e
|
13,805 |
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
bool is_write)
{
IOMMUTLBEntry iotlb;
MemoryRegionSection *section;
MemoryRegion *mr;
hwaddr len = *plen;
for (;;) {
section = address_space_translate_internal(as->dispatch, addr, &addr, &len, true);
mr = section->mr;
if (!mr->iommu_ops) {
break;
}
iotlb = mr->iommu_ops->translate(mr, addr);
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
if (!(iotlb.perm & (1 << is_write))) {
mr = &io_mem_unassigned;
break;
}
as = iotlb.target_as;
}
*plen = len;
*xlat = addr;
return mr;
}
| false |
qemu
|
a87f39543a9259f671c5413723311180ee2ad2a8
|
13,806 |
static void shpc_set_status(SHPCDevice *shpc,
int slot, uint8_t value, uint16_t msk)
{
uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot);
pci_word_test_and_clear_mask(status, msk);
pci_word_test_and_set_mask(status, value << (ffs(msk) - 1));
}
| false |
qemu
|
786a4ea82ec9c87e3a895cf41081029b285a5fe5
|
13,807 |
static void prom_set(uint32_t* prom_buf, int index, const char *string, ...)
{
va_list ap;
int32_t table_addr;
if (index >= ENVP_NB_ENTRIES)
return;
if (string == NULL) {
prom_buf[index] = 0;
return;
}
table_addr = sizeof(int32_t) * ENVP_NB_ENTRIES + index * ENVP_ENTRY_SIZE;
prom_buf[index] = tswap32(ENVP_ADDR + table_addr);
va_start(ap, string);
vsnprintf((char *)prom_buf + table_addr, ENVP_ENTRY_SIZE, string, ap);
va_end(ap);
}
| false |
qemu
|
8b7968f7c4ac8c07cad6a1a0891d38cf239a2839
|
13,808 |
DriveInfo *drive_get(BlockInterfaceType type, int bus, int unit)
{
DriveInfo *dinfo;
/* seek interface, bus and unit */
TAILQ_FOREACH(dinfo, &drives, next) {
if (dinfo->type == type &&
dinfo->bus == bus &&
dinfo->unit == unit)
return dinfo;
}
return NULL;
}
| false |
qemu
|
72cf2d4f0e181d0d3a3122e04129c58a95da713e
|
13,810 |
static void do_rematrixing(AC3DecodeContext *ctx)
{
ac3_audio_block *ab = &ctx->audio_block;
uint8_t bnd1 = 13, bnd2 = 25, bnd3 = 37, bnd4 = 61;
uint8_t bndend;
bndend = FFMIN(ab->endmant[0], ab->endmant[1]);
if (ab->rematflg & 1)
_do_rematrixing(ctx, bnd1, bnd2);
if (ab->rematflg & 2)
_do_rematrixing(ctx, bnd2, bnd3);
if (ab->rematflg & 4) {
if (ab->cplbegf > 0 && ab->cplbegf <= 2 && (ab->flags & AC3_AB_CPLINU))
_do_rematrixing(ctx, bnd3, bndend);
else {
_do_rematrixing(ctx, bnd3, bnd4);
if (ab->rematflg & 8)
_do_rematrixing(ctx, bnd4, bndend);
}
}
}
| false |
FFmpeg
|
0058584580b87feb47898e60e4b80c7f425882ad
|
13,811 |
static void acpi_table_install(const char unsigned *blob, size_t bloblen,
bool has_header,
const struct AcpiTableOptions *hdrs,
Error **errp)
{
size_t body_start;
const char unsigned *hdr_src;
size_t body_size, acpi_payload_size;
struct acpi_table_header *ext_hdr;
unsigned changed_fields;
/* Calculate where the ACPI table body starts within the blob, plus where
* to copy the ACPI table header from.
*/
if (has_header) {
/* _length | ACPI header in blob | blob body
* ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^
* ACPI_TABLE_PFX_SIZE sizeof dfl_hdr body_size
* == body_start
*
* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* acpi_payload_size == bloblen
*/
body_start = sizeof dfl_hdr;
if (bloblen < body_start) {
error_setg(errp, "ACPI table claiming to have header is too "
"short, available: %zu, expected: %zu", bloblen,
body_start);
return;
}
hdr_src = blob;
} else {
/* _length | ACPI header in template | blob body
* ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^
* ACPI_TABLE_PFX_SIZE sizeof dfl_hdr body_size
* == bloblen
*
* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* acpi_payload_size
*/
body_start = 0;
hdr_src = dfl_hdr;
}
body_size = bloblen - body_start;
acpi_payload_size = sizeof dfl_hdr + body_size;
if (acpi_payload_size > UINT16_MAX) {
error_setg(errp, "ACPI table too big, requested: %zu, max: %u",
acpi_payload_size, (unsigned)UINT16_MAX);
return;
}
/* We won't fail from here on. Initialize / extend the globals. */
if (acpi_tables == NULL) {
acpi_tables_len = sizeof(uint16_t);
acpi_tables = g_malloc0(acpi_tables_len);
}
acpi_tables = g_realloc(acpi_tables, acpi_tables_len +
ACPI_TABLE_PFX_SIZE +
sizeof dfl_hdr + body_size);
ext_hdr = (struct acpi_table_header *)(acpi_tables + acpi_tables_len);
acpi_tables_len += ACPI_TABLE_PFX_SIZE;
memcpy(acpi_tables + acpi_tables_len, hdr_src, sizeof dfl_hdr);
acpi_tables_len += sizeof dfl_hdr;
if (blob != NULL) {
memcpy(acpi_tables + acpi_tables_len, blob + body_start, body_size);
acpi_tables_len += body_size;
}
/* increase number of tables */
stw_le_p(acpi_tables, lduw_le_p(acpi_tables) + 1u);
/* Update the header fields. The strings need not be NUL-terminated. */
changed_fields = 0;
ext_hdr->_length = cpu_to_le16(acpi_payload_size);
if (hdrs->has_sig) {
strncpy(ext_hdr->sig, hdrs->sig, sizeof ext_hdr->sig);
++changed_fields;
}
if (has_header && le32_to_cpu(ext_hdr->length) != acpi_payload_size) {
fprintf(stderr,
"warning: ACPI table has wrong length, header says "
"%" PRIu32 ", actual size %zu bytes\n",
le32_to_cpu(ext_hdr->length), acpi_payload_size);
}
ext_hdr->length = cpu_to_le32(acpi_payload_size);
if (hdrs->has_rev) {
ext_hdr->revision = hdrs->rev;
++changed_fields;
}
ext_hdr->checksum = 0;
if (hdrs->has_oem_id) {
strncpy(ext_hdr->oem_id, hdrs->oem_id, sizeof ext_hdr->oem_id);
++changed_fields;
}
if (hdrs->has_oem_table_id) {
strncpy(ext_hdr->oem_table_id, hdrs->oem_table_id,
sizeof ext_hdr->oem_table_id);
++changed_fields;
}
if (hdrs->has_oem_rev) {
ext_hdr->oem_revision = cpu_to_le32(hdrs->oem_rev);
++changed_fields;
}
if (hdrs->has_asl_compiler_id) {
strncpy(ext_hdr->asl_compiler_id, hdrs->asl_compiler_id,
sizeof ext_hdr->asl_compiler_id);
++changed_fields;
}
if (hdrs->has_asl_compiler_rev) {
ext_hdr->asl_compiler_revision = cpu_to_le32(hdrs->asl_compiler_rev);
++changed_fields;
}
if (!has_header && changed_fields == 0) {
warn_report("ACPI table: no headers are specified");
}
/* recalculate checksum */
ext_hdr->checksum = acpi_checksum((const char unsigned *)ext_hdr +
ACPI_TABLE_PFX_SIZE, acpi_payload_size);
}
| false |
qemu
|
8297be80f7cf71e09617669a8bd8b2836dcfd4c3
|
13,813 |
static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
sPAPRDRConnectorType drc_type,
union drc_identifier *drc_id)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
struct hp_log_full *new_hp;
struct rtas_error_log *hdr;
struct rtas_event_log_v6 *v6hdr;
struct rtas_event_log_v6_maina *maina;
struct rtas_event_log_v6_mainb *mainb;
struct rtas_event_log_v6_hp *hp;
new_hp = g_malloc0(sizeof(struct hp_log_full));
hdr = &new_hp->hdr;
v6hdr = &new_hp->v6hdr;
maina = &new_hp->maina;
mainb = &new_hp->mainb;
hp = &new_hp->hp;
hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
| RTAS_LOG_SEVERITY_EVENT
| RTAS_LOG_DISPOSITION_NOT_RECOVERED
| RTAS_LOG_OPTIONAL_PART_PRESENT
| RTAS_LOG_INITIATOR_HOTPLUG
| RTAS_LOG_TYPE_HOTPLUG);
hdr->extended_length = cpu_to_be32(sizeof(*new_hp)
- sizeof(new_hp->hdr));
spapr_init_v6hdr(v6hdr);
spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
mainb->subsystem_id = 0x80; /* External environment */
mainb->event_severity = 0x00; /* Informational / non-error */
mainb->event_subtype = 0x00; /* Normal shutdown */
hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
hp->hdr.section_version = 1; /* includes extended modifier */
hp->hotplug_action = hp_action;
hp->hotplug_identifier = hp_id;
switch (drc_type) {
case SPAPR_DR_CONNECTOR_TYPE_PCI:
hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
if (hp->hotplug_action == RTAS_LOG_V6_HP_ACTION_ADD) {
spapr_hotplug_set_signalled(drc_id->index);
}
break;
case SPAPR_DR_CONNECTOR_TYPE_LMB:
hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
break;
case SPAPR_DR_CONNECTOR_TYPE_CPU:
hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_CPU;
break;
default:
/* we shouldn't be signaling hotplug events for resources
* that don't support them
*/
g_assert(false);
return;
}
if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
hp->drc_id.count = cpu_to_be32(drc_id->count);
} else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) {
hp->drc_id.index = cpu_to_be32(drc_id->index);
} else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT_INDEXED) {
/* we should not be using count_indexed value unless the guest
* supports dedicated hotplug event source
*/
g_assert(spapr_ovec_test(spapr->ov5_cas, OV5_HP_EVT));
hp->drc_id.count_indexed.count =
cpu_to_be32(drc_id->count_indexed.count);
hp->drc_id.count_indexed.index =
cpu_to_be32(drc_id->count_indexed.index);
}
rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp, true);
qemu_irq_pulse(xics_get_qirq(XICS_FABRIC(spapr),
rtas_event_log_to_irq(spapr,
RTAS_LOG_TYPE_HOTPLUG)));
}
| false |
qemu
|
bff3063837a76b37a4bbbfe614324ca38e859f2b
|
13,814 |
void qemu_aio_flush(void)
{
AioHandler *node;
int ret;
do {
ret = 0;
/*
* If there are pending emulated aio start them now so flush
* will be able to return 1.
*/
qemu_aio_wait();
QLIST_FOREACH(node, &aio_handlers, node) {
if (node->io_flush) {
ret |= node->io_flush(node->opaque);
}
}
} while (qemu_bh_poll() || ret > 0);
}
| false |
qemu
|
bcdc18578d5b41180db2e17baa7563c5f05b39ee
|
13,815 |
static void gen_srs(DisasContext *s,
uint32_t mode, uint32_t amode, bool writeback)
{
int32_t offset;
TCGv_i32 addr = tcg_temp_new_i32();
TCGv_i32 tmp = tcg_const_i32(mode);
gen_helper_get_r13_banked(addr, cpu_env, tmp);
tcg_temp_free_i32(tmp);
switch (amode) {
case 0: /* DA */
offset = -4;
break;
case 1: /* IA */
offset = 0;
break;
case 2: /* DB */
offset = -8;
break;
case 3: /* IB */
offset = 4;
break;
default:
abort();
}
tcg_gen_addi_i32(addr, addr, offset);
tmp = load_reg(s, 14);
gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tmp = load_cpu_field(spsr);
tcg_gen_addi_i32(addr, addr, 4);
gen_aa32_st32(tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
if (writeback) {
switch (amode) {
case 0:
offset = -8;
break;
case 1:
offset = 4;
break;
case 2:
offset = -4;
break;
case 3:
offset = 0;
break;
default:
abort();
}
tcg_gen_addi_i32(addr, addr, offset);
tmp = tcg_const_i32(mode);
gen_helper_set_r13_banked(cpu_env, tmp, addr);
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
}
| false |
qemu
|
cbc0326b6fb905f80b7cef85b24571f7ebb62077
|
13,816 |
void kvm_mips_reset_vcpu(MIPSCPU *cpu)
{
CPUMIPSState *env = &cpu->env;
if (env->CP0_Config1 & (1 << CP0C1_FP)) {
fprintf(stderr, "Warning: FPU not supported with KVM, disabling\n");
env->CP0_Config1 &= ~(1 << CP0C1_FP);
}
DPRINTF("%s\n", __func__);
}
| false |
qemu
|
152db36ae63c70adc95afc3228f858ef6369519a
|
13,817 |
static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque)
{
BDRVQcow2State *s = bs->opaque;
int old_version = s->qcow_version, new_version = old_version;
uint64_t new_size = 0;
const char *backing_file = NULL, *backing_format = NULL;
bool lazy_refcounts = s->use_lazy_refcounts;
const char *compat = NULL;
uint64_t cluster_size = s->cluster_size;
bool encrypt;
int refcount_bits = s->refcount_bits;
Error *local_err = NULL;
int ret;
QemuOptDesc *desc = opts->list->desc;
Qcow2AmendHelperCBInfo helper_cb_info;
while (desc && desc->name) {
if (!qemu_opt_find(opts, desc->name)) {
/* only change explicitly defined options */
desc++;
continue;
}
if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) {
compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL);
if (!compat) {
/* preserve default */
} else if (!strcmp(compat, "0.10")) {
new_version = 2;
} else if (!strcmp(compat, "1.1")) {
new_version = 3;
} else {
error_report("Unknown compatibility level %s", compat);
return -EINVAL;
}
} else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) {
error_report("Cannot change preallocation mode");
return -ENOTSUP;
} else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) {
new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
} else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) {
backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
} else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) {
backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
} else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) {
encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT,
!!s->cipher);
if (encrypt != !!s->cipher) {
error_report("Changing the encryption flag is not supported");
return -ENOTSUP;
}
} else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) {
cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE,
cluster_size);
if (cluster_size != s->cluster_size) {
error_report("Changing the cluster size is not supported");
return -ENOTSUP;
}
} else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) {
lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS,
lazy_refcounts);
} else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) {
refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS,
refcount_bits);
if (refcount_bits <= 0 || refcount_bits > 64 ||
!is_power_of_2(refcount_bits))
{
error_report("Refcount width must be a power of two and may "
"not exceed 64 bits");
return -EINVAL;
}
} else {
/* if this point is reached, this probably means a new option was
* added without having it covered here */
abort();
}
desc++;
}
helper_cb_info = (Qcow2AmendHelperCBInfo){
.original_status_cb = status_cb,
.original_cb_opaque = cb_opaque,
.total_operations = (new_version < old_version)
+ (s->refcount_bits != refcount_bits)
};
/* Upgrade first (some features may require compat=1.1) */
if (new_version > old_version) {
s->qcow_version = new_version;
ret = qcow2_update_header(bs);
if (ret < 0) {
s->qcow_version = old_version;
return ret;
}
}
if (s->refcount_bits != refcount_bits) {
int refcount_order = ctz32(refcount_bits);
if (new_version < 3 && refcount_bits != 16) {
error_report("Different refcount widths than 16 bits require "
"compatibility level 1.1 or above (use compat=1.1 or "
"greater)");
return -EINVAL;
}
helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER;
ret = qcow2_change_refcount_order(bs, refcount_order,
&qcow2_amend_helper_cb,
&helper_cb_info, &local_err);
if (ret < 0) {
error_report_err(local_err);
return ret;
}
}
if (backing_file || backing_format) {
ret = qcow2_change_backing_file(bs,
backing_file ?: s->image_backing_file,
backing_format ?: s->image_backing_format);
if (ret < 0) {
return ret;
}
}
if (s->use_lazy_refcounts != lazy_refcounts) {
if (lazy_refcounts) {
if (new_version < 3) {
error_report("Lazy refcounts only supported with compatibility "
"level 1.1 and above (use compat=1.1 or greater)");
return -EINVAL;
}
s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
ret = qcow2_update_header(bs);
if (ret < 0) {
s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
return ret;
}
s->use_lazy_refcounts = true;
} else {
/* make image clean first */
ret = qcow2_mark_clean(bs);
if (ret < 0) {
return ret;
}
/* now disallow lazy refcounts */
s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
ret = qcow2_update_header(bs);
if (ret < 0) {
s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
return ret;
}
s->use_lazy_refcounts = false;
}
}
if (new_size) {
BlockBackend *blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(blk, bs, &local_err);
if (ret < 0) {
error_report_err(local_err);
blk_unref(blk);
return ret;
}
ret = blk_truncate(blk, new_size, &local_err);
blk_unref(blk);
if (ret < 0) {
error_report_err(local_err);
return ret;
}
}
/* Downgrade last (so unsupported features can be removed before) */
if (new_version < old_version) {
helper_cb_info.current_operation = QCOW2_DOWNGRADING;
ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb,
&helper_cb_info);
if (ret < 0) {
return ret;
}
}
return 0;
}
| false |
qemu
|
b25b387fa5928e516cb2c9e7fde68e958bd7e50a
|
13,818 |
static void bmdma_map(PCIDevice *pci_dev, int region_num,
pcibus_t addr, pcibus_t size, int type)
{
PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, pci_dev);
int i;
for(i = 0;i < 2; i++) {
BMDMAState *bm = &d->bmdma[i];
d->bus[i].bmdma = bm;
bm->bus = d->bus+i;
bm->pci_dev = d;
qemu_add_vm_change_state_handler(ide_dma_restart_cb, bm);
register_ioport_write(addr, 1, 1, bmdma_cmd_writeb, bm);
register_ioport_write(addr + 1, 3, 1, bmdma_writeb, bm);
register_ioport_read(addr, 4, 1, bmdma_readb, bm);
register_ioport_write(addr + 4, 4, 1, bmdma_addr_writeb, bm);
register_ioport_read(addr + 4, 4, 1, bmdma_addr_readb, bm);
register_ioport_write(addr + 4, 4, 2, bmdma_addr_writew, bm);
register_ioport_read(addr + 4, 4, 2, bmdma_addr_readw, bm);
register_ioport_write(addr + 4, 4, 4, bmdma_addr_writel, bm);
register_ioport_read(addr + 4, 4, 4, bmdma_addr_readl, bm);
addr += 8;
}
}
| false |
qemu
|
70ae65f5d91462e1905a53236179fde21cda3a2f
|
13,819 |
static void pcnet_transmit(PCNetState *s)
{
target_phys_addr_t xmit_cxda = 0;
int count = CSR_XMTRL(s)-1;
s->xmit_pos = -1;
if (!CSR_TXON(s)) {
s->csr[0] &= ~0x0008;
return;
}
s->tx_busy = 1;
txagain:
if (pcnet_tdte_poll(s)) {
struct pcnet_TMD tmd;
TMDLOAD(&tmd, PHYSADDR(s,CSR_CXDA(s)));
#ifdef PCNET_DEBUG_TMD
printf(" TMDLOAD 0x%08x\n", PHYSADDR(s,CSR_CXDA(s)));
PRINT_TMD(&tmd);
#endif
if (GET_FIELD(tmd.status, TMDS, STP)) {
s->xmit_pos = 0;
xmit_cxda = PHYSADDR(s,CSR_CXDA(s));
}
if (!GET_FIELD(tmd.status, TMDS, ENP)) {
int bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
s->xmit_pos += bcnt;
} else if (s->xmit_pos >= 0) {
int bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT);
s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr),
s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s));
s->xmit_pos += bcnt;
#ifdef PCNET_DEBUG
printf("pcnet_transmit size=%d\n", s->xmit_pos);
#endif
if (CSR_LOOP(s))
pcnet_receive(s, s->buffer, s->xmit_pos);
else
if (s->vc)
qemu_send_packet(s->vc, s->buffer, s->xmit_pos);
s->csr[0] &= ~0x0008; /* clear TDMD */
s->csr[4] |= 0x0004; /* set TXSTRT */
s->xmit_pos = -1;
}
SET_FIELD(&tmd.status, TMDS, OWN, 0);
TMDSTORE(&tmd, PHYSADDR(s,CSR_CXDA(s)));
if (!CSR_TOKINTD(s) || (CSR_LTINTEN(s) && GET_FIELD(tmd.status, TMDS, LTINT)))
s->csr[0] |= 0x0200; /* set TINT */
if (CSR_XMTRC(s)<=1)
CSR_XMTRC(s) = CSR_XMTRL(s);
else
CSR_XMTRC(s)--;
if (count--)
goto txagain;
} else
if (s->xmit_pos >= 0) {
struct pcnet_TMD tmd;
TMDLOAD(&tmd, PHYSADDR(s,xmit_cxda));
SET_FIELD(&tmd.misc, TMDM, BUFF, 1);
SET_FIELD(&tmd.misc, TMDM, UFLO, 1);
SET_FIELD(&tmd.status, TMDS, ERR, 1);
SET_FIELD(&tmd.status, TMDS, OWN, 0);
TMDSTORE(&tmd, PHYSADDR(s,xmit_cxda));
s->csr[0] |= 0x0200; /* set TINT */
if (!CSR_DXSUFLO(s)) {
s->csr[0] &= ~0x0010;
} else
if (count--)
goto txagain;
}
s->tx_busy = 0;
}
| false |
qemu
|
89b190a2bb82b1226b5cc05846e9a063c0d0efa3
|
13,821 |
static av_cold int decoder_init(AVCodecContext * avctx)
{
G729Context* ctx = avctx->priv_data;
int i,k;
if (avctx->channels != 1) {
av_log(avctx, AV_LOG_ERROR, "Only mono sound is supported (requested channels: %d).\n", avctx->channels);
return AVERROR(EINVAL);
}
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
/* Both 8kbit/s and 6.4kbit/s modes uses two subframes per frame. */
avctx->frame_size = SUBFRAME_SIZE << 1;
ctx->gain_coeff = 16384; // 1.0 in (1.14)
for (k = 0; k < MA_NP + 1; k++) {
ctx->past_quantizer_outputs[k] = ctx->past_quantizer_output_buf[k];
for (i = 1; i < 11; i++)
ctx->past_quantizer_outputs[k][i - 1] = (18717 * i) >> 3;
}
ctx->lsp[0] = ctx->lsp_buf[0];
ctx->lsp[1] = ctx->lsp_buf[1];
memcpy(ctx->lsp[0], lsp_init, 10 * sizeof(int16_t));
ctx->exc = &ctx->exc_base[PITCH_DELAY_MAX+INTERPOL_LEN];
/* random seed initialization */
ctx->rand_value = 21845;
/* quantized prediction error */
for(i=0; i<4; i++)
ctx->quant_energy[i] = -14336; // -14 in (5.10)
avctx->dsp_mask= ~AV_CPU_FLAG_FORCE;
dsputil_init(&ctx->dsp, avctx);
return 0;
}
| false |
FFmpeg
|
c3299726874829e8eb7a937c247956ab3c2ccae6
|
13,822 |
static void set_enum(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
DeviceState *dev = DEVICE(obj);
Property *prop = opaque;
int *ptr = qdev_get_prop_ptr(dev, prop);
if (dev->state != DEV_STATE_CREATED) {
error_set(errp, QERR_PERMISSION_DENIED);
return;
}
visit_type_enum(v, ptr, prop->info->enum_table,
prop->info->name, prop->name, errp);
}
| false |
qemu
|
d4d34b0d3f5af5c8e09980da0de2eebe9a27dc71
|
13,823 |
static void imx_timerg_write(void *opaque, target_phys_addr_t offset,
uint64_t value, unsigned size)
{
IMXTimerGState *s = (IMXTimerGState *)opaque;
DPRINTF("g-write(offset=%x, value = 0x%x)\n", (unsigned int)offset >> 2,
(unsigned int)value);
switch (offset >> 2) {
case 0: {
uint32_t oldcr = s->cr;
/* CR */
if (value & GPT_CR_SWR) { /* force reset */
value &= ~GPT_CR_SWR;
imx_timerg_reset(&s->busdev.qdev);
imx_timerg_update(s);
}
s->cr = value & ~0x7c00;
imx_timerg_set_freq(s);
if ((oldcr ^ value) & GPT_CR_EN) {
if (value & GPT_CR_EN) {
if (value & GPT_CR_ENMOD) {
ptimer_set_count(s->timer, s->ocr1);
s->cnt = 0;
}
ptimer_run(s->timer,
(value & GPT_CR_FRR) && (s->ocr1 != TIMER_MAX));
} else {
ptimer_stop(s->timer);
};
}
return;
}
case 1: /* Prescaler */
s->pr = value & 0xfff;
imx_timerg_set_freq(s);
return;
case 2: /* SR */
/*
* No point in implementing the status register bits to do with
* external interrupt sources.
*/
value &= GPT_SR_OF1 | GPT_SR_ROV;
s->sr &= ~value;
imx_timerg_update(s);
return;
case 3: /* IR -- interrupt register */
s->ir = value & 0x3f;
imx_timerg_update(s);
return;
case 4: /* OCR1 -- output compare register */
/* In non-freerun mode, reset count when this register is written */
if (!(s->cr & GPT_CR_FRR)) {
s->waiting_rov = 0;
ptimer_set_limit(s->timer, value, 1);
} else {
imx_timerg_update_counts(s);
if (value > s->cnt) {
s->waiting_rov = 0;
imx_timerg_reload(s, value);
} else {
s->waiting_rov = 1;
imx_timerg_reload(s, TIMER_MAX - s->cnt);
}
}
s->ocr1 = value;
return;
default:
IPRINTF("imx_timerg_write: Bad offset %x\n",
(int)offset >> 2);
}
}
| false |
qemu
|
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
|
13,824 |
GuestFileSeek *qmp_guest_file_seek(int64_t handle, int64_t offset,
int64_t whence, Error **errp)
{
GuestFileHandle *gfh;
GuestFileSeek *seek_data;
HANDLE fh;
LARGE_INTEGER new_pos, off_pos;
off_pos.QuadPart = offset;
BOOL res;
gfh = guest_file_handle_find(handle, errp);
if (!gfh) {
return NULL;
}
fh = gfh->fh;
res = SetFilePointerEx(fh, off_pos, &new_pos, whence);
if (!res) {
error_setg_win32(errp, GetLastError(), "failed to seek file");
return NULL;
}
seek_data = g_new0(GuestFileSeek, 1);
seek_data->position = new_pos.QuadPart;
return seek_data;
}
| false |
qemu
|
0a982b1bf3953dc8640c4d6e619fb1132ebbebc3
|
13,825 |
static void add_to_iovec(QEMUFile *f, const uint8_t *buf, int size)
{
/* check for adjacent buffer and coalesce them */
if (f->iovcnt > 0 && buf == f->iov[f->iovcnt - 1].iov_base +
f->iov[f->iovcnt - 1].iov_len) {
f->iov[f->iovcnt - 1].iov_len += size;
} else {
f->iov[f->iovcnt].iov_base = (uint8_t *)buf;
f->iov[f->iovcnt++].iov_len = size;
}
if (f->buf_index >= IO_BUF_SIZE || f->iovcnt >= MAX_IOV_SIZE) {
qemu_fflush(f);
}
}
| false |
qemu
|
4d1172472cdf28a444321ca8b165ce7326eb919e
|
13,826 |
static void qapi_clone_type_str(Visitor *v, const char *name, char **obj,
Error **errp)
{
QapiCloneVisitor *qcv = to_qcv(v);
assert(qcv->depth);
/*
* Pointer was already cloned by g_memdup; create fresh copy.
* Note that as long as qmp-output-visitor accepts NULL instead of
* "", then we must do likewise. However, we want to obey the
* input visitor semantics of never producing NULL when the empty
* string is intended.
*/
*obj = g_strdup(*obj ?: "");
}
| false |
qemu
|
b3db211f3c80bb996a704d665fe275619f728bd4
|
13,827 |
static int eth_can_receive(void *opaque)
{
return 1;
}
| false |
qemu
|
e3f5ec2b5e92706e3b807059f79b1fb5d936e567
|
13,828 |
static inline void terminate_compression_threads(void)
{
int idx, thread_count;
thread_count = migrate_compress_threads();
quit_comp_thread = true;
for (idx = 0; idx < thread_count; idx++) {
qemu_mutex_lock(&comp_param[idx].mutex);
qemu_cond_signal(&comp_param[idx].cond);
qemu_mutex_unlock(&comp_param[idx].mutex);
}
}
| false |
qemu
|
90e56fb46d0a7add88ed463efa4e723a6238f692
|
13,830 |
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
{
VP56RangeCoder *c = &s->c;
int header_size, hscale, vscale, ret;
int width = s->avctx->width;
int height = s->avctx->height;
s->keyframe = !(buf[0] & 1);
s->profile = (buf[0]>>1) & 7;
s->invisible = !(buf[0] & 0x10);
header_size = AV_RL24(buf) >> 5;
buf += 3;
buf_size -= 3;
if (s->profile > 3)
av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
if (!s->profile)
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab,
sizeof(s->put_pixels_tab));
else // profile 1-3 use bilinear, 4+ aren't defined so whatever
memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab,
sizeof(s->put_pixels_tab));
if (header_size > buf_size - 7 * s->keyframe) {
av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
return AVERROR_INVALIDDATA;
}
if (s->keyframe) {
if (AV_RL24(buf) != 0x2a019d) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid start code 0x%x\n", AV_RL24(buf));
return AVERROR_INVALIDDATA;
}
width = AV_RL16(buf + 3) & 0x3fff;
height = AV_RL16(buf + 5) & 0x3fff;
hscale = buf[4] >> 6;
vscale = buf[6] >> 6;
buf += 7;
buf_size -= 7;
if (hscale || vscale)
avpriv_request_sample(s->avctx, "Upscaling");
s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
vp78_reset_probability_tables(s);
memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter,
sizeof(s->prob->pred16x16));
memcpy(s->prob->pred8x8c, vp8_pred8x8c_prob_inter,
sizeof(s->prob->pred8x8c));
memcpy(s->prob->mvc, vp8_mv_default_prob,
sizeof(s->prob->mvc));
memset(&s->segmentation, 0, sizeof(s->segmentation));
memset(&s->lf_delta, 0, sizeof(s->lf_delta));
}
ff_vp56_init_range_decoder(c, buf, header_size);
buf += header_size;
buf_size -= header_size;
if (s->keyframe) {
if (vp8_rac_get(c))
av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
vp8_rac_get(c); // whether we can skip clamping in dsp functions
}
if ((s->segmentation.enabled = vp8_rac_get(c)))
parse_segment_info(s);
else
s->segmentation.update_map = 0; // FIXME: move this to some init function?
s->filter.simple = vp8_rac_get(c);
s->filter.level = vp8_rac_get_uint(c, 6);
s->filter.sharpness = vp8_rac_get_uint(c, 3);
if ((s->lf_delta.enabled = vp8_rac_get(c)))
if (vp8_rac_get(c))
update_lf_deltas(s);
if (setup_partitions(s, buf, buf_size)) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
return AVERROR_INVALIDDATA;
}
if (!s->macroblocks_base || /* first frame */
width != s->avctx->width || height != s->avctx->height)
if ((ret = vp8_update_dimensions(s, width, height)) < 0)
return ret;
get_quants(s);
if (!s->keyframe) {
update_refs(s);
s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
}
// if we aren't saving this frame's probabilities for future frames,
// make a copy of the current probabilities
if (!(s->update_probabilities = vp8_rac_get(c)))
s->prob[1] = s->prob[0];
s->update_last = s->keyframe || vp8_rac_get(c);
vp78_update_probability_tables(s);
if ((s->mbskip_enabled = vp8_rac_get(c)))
s->prob->mbskip = vp8_rac_get_uint(c, 8);
if (!s->keyframe) {
s->prob->intra = vp8_rac_get_uint(c, 8);
s->prob->last = vp8_rac_get_uint(c, 8);
s->prob->golden = vp8_rac_get_uint(c, 8);
vp78_update_pred16x16_pred8x8_mvc_probabilities(s, VP8_MVC_SIZE);
}
return 0;
}
| false |
FFmpeg
|
65875a8b3b079752da25a61ec188d2e3d90a569f
|
13,831 |
static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWidth, long srcHeight, long srcStride, long dstStride)
{
long x,y;
dst[0]= src[0];
// first line
for (x=0; x<srcWidth-1; x++) {
dst[2*x+1]= (3*src[x] + src[x+1])>>2;
dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
}
dst[2*srcWidth-1]= src[srcWidth-1];
dst+= dstStride;
for (y=1; y<srcHeight; y++) {
#if COMPILE_TEMPLATE_MMX2 || COMPILE_TEMPLATE_AMD3DNOW
const x86_reg mmxSize= srcWidth&~15;
__asm__ volatile(
"mov %4, %%"REG_a" \n\t"
"movq "MANGLE(mmx_ff)", %%mm0 \n\t"
"movq (%0, %%"REG_a"), %%mm4 \n\t"
"movq %%mm4, %%mm2 \n\t"
"psllq $8, %%mm4 \n\t"
"pand %%mm0, %%mm2 \n\t"
"por %%mm2, %%mm4 \n\t"
"movq (%1, %%"REG_a"), %%mm5 \n\t"
"movq %%mm5, %%mm3 \n\t"
"psllq $8, %%mm5 \n\t"
"pand %%mm0, %%mm3 \n\t"
"por %%mm3, %%mm5 \n\t"
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq (%1, %%"REG_a"), %%mm1 \n\t"
"movq 1(%0, %%"REG_a"), %%mm2 \n\t"
"movq 1(%1, %%"REG_a"), %%mm3 \n\t"
PAVGB" %%mm0, %%mm5 \n\t"
PAVGB" %%mm0, %%mm3 \n\t"
PAVGB" %%mm0, %%mm5 \n\t"
PAVGB" %%mm0, %%mm3 \n\t"
PAVGB" %%mm1, %%mm4 \n\t"
PAVGB" %%mm1, %%mm2 \n\t"
PAVGB" %%mm1, %%mm4 \n\t"
PAVGB" %%mm1, %%mm2 \n\t"
"movq %%mm5, %%mm7 \n\t"
"movq %%mm4, %%mm6 \n\t"
"punpcklbw %%mm3, %%mm5 \n\t"
"punpckhbw %%mm3, %%mm7 \n\t"
"punpcklbw %%mm2, %%mm4 \n\t"
"punpckhbw %%mm2, %%mm6 \n\t"
#if 1
MOVNTQ" %%mm5, (%2, %%"REG_a", 2) \n\t"
MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2) \n\t"
MOVNTQ" %%mm4, (%3, %%"REG_a", 2) \n\t"
MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2) \n\t"
#else
"movq %%mm5, (%2, %%"REG_a", 2) \n\t"
"movq %%mm7, 8(%2, %%"REG_a", 2) \n\t"
"movq %%mm4, (%3, %%"REG_a", 2) \n\t"
"movq %%mm6, 8(%3, %%"REG_a", 2) \n\t"
#endif
"add $8, %%"REG_a" \n\t"
"movq -1(%0, %%"REG_a"), %%mm4 \n\t"
"movq -1(%1, %%"REG_a"), %%mm5 \n\t"
" js 1b \n\t"
:: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
"r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
"g" (-mmxSize)
: "%"REG_a
);
#else
const x86_reg mmxSize=1;
dst[0 ]= (3*src[0] + src[srcStride])>>2;
dst[dstStride]= ( src[0] + 3*src[srcStride])>>2;
#endif
for (x=mmxSize-1; x<srcWidth-1; x++) {
dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
}
dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
dst+=dstStride*2;
src+=srcStride;
}
// last line
#if 1
dst[0]= src[0];
for (x=0; x<srcWidth-1; x++) {
dst[2*x+1]= (3*src[x] + src[x+1])>>2;
dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
}
dst[2*srcWidth-1]= src[srcWidth-1];
#else
for (x=0; x<srcWidth; x++) {
dst[2*x+0]=
dst[2*x+1]= src[x];
}
#endif
#if COMPILE_TEMPLATE_MMX
__asm__ volatile(EMMS" \n\t"
SFENCE" \n\t"
:::"memory");
#endif
}
| false |
FFmpeg
|
d1adad3cca407f493c3637e20ecd4f7124e69212
|
13,832 |
static av_cold void ffat_encode_flush(AVCodecContext *avctx)
{
ATDecodeContext *at = avctx->priv_data;
AudioConverterReset(at->converter);
av_frame_unref(&at->new_in_frame);
av_frame_unref(&at->in_frame);
}
| false |
FFmpeg
|
143685a42bbc8861b626457ce4cb8b1ce4b0c436
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.