id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
6,216 | static av_cold int fdk_aac_decode_init(AVCodecContext *avctx)
{
FDKAACDecContext *s = avctx->priv_data;
AAC_DECODER_ERROR err;
int ret;
s->handle = aacDecoder_Open(avctx->extradata_size ? TT_MP4_RAW : TT_MP4_ADTS, 1);
if (!s->handle) {
av_log(avctx, AV_LOG_ERROR, "Error opening decoder\n");
return AVERROR_UNKNOWN;
}
if (avctx->extradata_size) {
if ((err = aacDecoder_ConfigRaw(s->handle, &avctx->extradata,
&avctx->extradata_size)) != AAC_DEC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set extradata\n");
return AVERROR_INVALIDDATA;
}
}
if ((err = aacDecoder_SetParam(s->handle, AAC_CONCEAL_METHOD,
s->conceal_method)) != AAC_DEC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set error concealment method\n");
return AVERROR_UNKNOWN;
}
if (avctx->request_channel_layout > 0 &&
avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE) {
int downmix_channels = -1;
switch (avctx->request_channel_layout) {
case AV_CH_LAYOUT_STEREO:
case AV_CH_LAYOUT_STEREO_DOWNMIX:
downmix_channels = 2;
break;
case AV_CH_LAYOUT_MONO:
downmix_channels = 1;
break;
default:
av_log(avctx, AV_LOG_WARNING, "Invalid request_channel_layout\n");
break;
}
if (downmix_channels != -1) {
if (aacDecoder_SetParam(s->handle, AAC_PCM_MAX_OUTPUT_CHANNELS,
downmix_channels) != AAC_DEC_OK) {
av_log(avctx, AV_LOG_WARNING, "Unable to set output channels in the decoder\n");
} else {
s->anc_buffer = av_malloc(DMX_ANC_BUFFSIZE);
if (!s->anc_buffer) {
av_log(avctx, AV_LOG_ERROR, "Unable to allocate ancillary buffer for the decoder\n");
ret = AVERROR(ENOMEM);
goto fail;
}
if (aacDecoder_AncDataInit(s->handle, s->anc_buffer, DMX_ANC_BUFFSIZE)) {
av_log(avctx, AV_LOG_ERROR, "Unable to register downmix ancillary buffer in the decoder\n");
ret = AVERROR_UNKNOWN;
goto fail;
}
}
}
}
if (s->drc_boost != -1) {
if (aacDecoder_SetParam(s->handle, AAC_DRC_BOOST_FACTOR, s->drc_boost) != AAC_DEC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set DRC boost factor in the decoder\n");
return AVERROR_UNKNOWN;
}
}
if (s->drc_cut != -1) {
if (aacDecoder_SetParam(s->handle, AAC_DRC_ATTENUATION_FACTOR, s->drc_cut) != AAC_DEC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set DRC attenuation factor in the decoder\n");
return AVERROR_UNKNOWN;
}
}
if (s->drc_level != -1) {
if (aacDecoder_SetParam(s->handle, AAC_DRC_REFERENCE_LEVEL, s->drc_level) != AAC_DEC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set DRC reference level in the decoder\n");
return AVERROR_UNKNOWN;
}
}
if (s->drc_heavy != -1) {
if (aacDecoder_SetParam(s->handle, AAC_DRC_HEAVY_COMPRESSION, s->drc_heavy) != AAC_DEC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set DRC heavy compression in the decoder\n");
return AVERROR_UNKNOWN;
}
}
#ifdef AACDECODER_LIB_VL0
if (aacDecoder_SetParam(s->handle, AAC_PCM_LIMITER_ENABLE, s->level_limit) != AAC_DEC_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to set in signal level limiting in the decoder\n");
return AVERROR_UNKNOWN;
}
#endif
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
s->decoder_buffer_size = DECODER_BUFFSIZE * DECODER_MAX_CHANNELS;
s->decoder_buffer = av_malloc(s->decoder_buffer_size);
if (!s->decoder_buffer) {
ret = AVERROR(ENOMEM);
goto fail;
}
return 0;
fail:
fdk_aac_decode_close(avctx);
return ret;
}
| true | FFmpeg | f34b152eb7b7e8d2aee57c710a072cf74173fbe1 |
6,218 | int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps,
int *is_nalff, int *nal_length_size,
int err_recognition, void *logctx)
{
int ret = 0;
GetByteContext gb;
bytestream2_init(&gb, data, size);
if (size > 3 && (data[0] || data[1] || data[2] > 1)) {
/* It seems the extradata is encoded as hvcC format.
* Temporarily, we support configurationVersion==0 until 14496-15 3rd
* is finalized. When finalized, configurationVersion will be 1 and we
* can recognize hvcC by checking if avctx->extradata[0]==1 or not. */
int i, j, num_arrays, nal_len_size;
*is_nalff = 1;
bytestream2_skip(&gb, 21);
nal_len_size = (bytestream2_get_byte(&gb) & 3) + 1;
num_arrays = bytestream2_get_byte(&gb);
/* nal units in the hvcC always have length coded with 2 bytes,
* so put a fake nal_length_size = 2 while parsing them */
*nal_length_size = 2;
/* Decode nal units from hvcC. */
for (i = 0; i < num_arrays; i++) {
int type = bytestream2_get_byte(&gb) & 0x3f;
int cnt = bytestream2_get_be16(&gb);
for (j = 0; j < cnt; j++) {
// +2 for the nal size field
int nalsize = bytestream2_peek_be16(&gb) + 2;
if (bytestream2_get_bytes_left(&gb) < nalsize) {
av_log(logctx, AV_LOG_ERROR,
"Invalid NAL unit size in extradata.\n");
return AVERROR_INVALIDDATA;
}
ret = hevc_decode_nal_units(gb.buffer, nalsize, ps, *is_nalff, *nal_length_size, logctx);
if (ret < 0) {
av_log(logctx, AV_LOG_ERROR,
"Decoding nal unit %d %d from hvcC failed\n",
type, i);
return ret;
}
bytestream2_skip(&gb, nalsize);
}
}
/* Now store right nal length size, that will be used to parse
* all other nals */
*nal_length_size = nal_len_size;
} else {
*is_nalff = 0;
ret = hevc_decode_nal_units(data, size, ps, *is_nalff, *nal_length_size, logctx);
if (ret < 0)
return ret;
}
return ret;
}
| true | FFmpeg | 159ab4625bd3641e79b564335be8069dca881978 |
6,219 | static void frame_thread_free(AVCodecContext *avctx, int thread_count)
{
FrameThreadContext *fctx = avctx->thread_opaque;
AVCodec *codec = avctx->codec;
int i;
park_frame_worker_threads(fctx, thread_count);
if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0);
fctx->die = 1;
for (i = 0; i < thread_count; i++) {
PerThreadContext *p = &fctx->threads[i];
pthread_mutex_lock(&p->mutex);
pthread_cond_signal(&p->input_cond);
pthread_mutex_unlock(&p->mutex);
pthread_join(p->thread, NULL);
if (codec->close)
codec->close(p->avctx);
avctx->codec = NULL;
release_delayed_buffers(p);
}
for (i = 0; i < thread_count; i++) {
PerThreadContext *p = &fctx->threads[i];
avcodec_default_free_buffers(p->avctx);
pthread_mutex_destroy(&p->mutex);
pthread_mutex_destroy(&p->progress_mutex);
pthread_cond_destroy(&p->input_cond);
pthread_cond_destroy(&p->progress_cond);
pthread_cond_destroy(&p->output_cond);
av_freep(&p->avpkt.data);
if (i)
av_freep(&p->avctx->priv_data);
av_freep(&p->avctx);
}
av_freep(&fctx->threads);
pthread_mutex_destroy(&fctx->buffer_mutex);
av_freep(&avctx->thread_opaque);
avctx->has_b_frames -= avctx->thread_count - 1;
}
| true | FFmpeg | 26ae9a5d7c448a3eb42641b546ee8d585ab716e6 |
6,220 | int qio_dns_resolver_lookup_sync(QIODNSResolver *resolver,
SocketAddress *addr,
size_t *naddrs,
SocketAddress ***addrs,
Error **errp)
{
switch (addr->type) {
case SOCKET_ADDRESS_KIND_INET:
return qio_dns_resolver_lookup_sync_inet(resolver,
addr,
naddrs,
addrs,
errp);
case SOCKET_ADDRESS_KIND_UNIX:
case SOCKET_ADDRESS_KIND_VSOCK:
case SOCKET_ADDRESS_KIND_FD:
return qio_dns_resolver_lookup_sync_nop(resolver,
addr,
naddrs,
addrs,
errp);
default:
abort();
}
}
| false | qemu | dfd100f242370886bb6732f70f1f7cbd8eb9fedc |
6,221 | int fw_cfg_add_file(FWCfgState *s, const char *dir, const char *filename,
uint8_t *data, uint32_t len)
{
const char *basename;
int index;
if (!s->files) {
int dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * FW_CFG_FILE_SLOTS;
s->files = qemu_mallocz(dsize);
fw_cfg_add_bytes(s, FW_CFG_FILE_DIR, (uint8_t*)s->files, dsize);
}
index = be32_to_cpu(s->files->count);
if (index == FW_CFG_FILE_SLOTS) {
fprintf(stderr, "fw_cfg: out of file slots\n");
return 0;
}
fw_cfg_add_bytes(s, FW_CFG_FILE_FIRST + index, data, len);
basename = strrchr(filename, '/');
if (basename) {
basename++;
} else {
basename = filename;
}
if (dir) {
snprintf(s->files->f[index].name, sizeof(s->files->f[index].name),
"%s/%s", dir, basename);
} else {
snprintf(s->files->f[index].name, sizeof(s->files->f[index].name),
"%s", basename);
}
s->files->f[index].size = cpu_to_be32(len);
s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index);
FW_CFG_DPRINTF("%s: #%d: %s (%d bytes)\n", __FUNCTION__,
index, s->files->f[index].name, len);
s->files->count = cpu_to_be32(index+1);
return 1;
}
| false | qemu | de9352bcaed2452af1d2b06b829748676c691794 |
6,222 | static void mov_text_text_cb(void *priv, const char *text, int len)
{
MovTextContext *s = priv;
av_strlcpy(s->ptr, text, FFMIN(s->end - s->ptr, len + 1));
s->ptr += len;
}
| false | FFmpeg | b0635e2fcf80717dd618ef75d3317d62ed85c300 |
6,223 | void qmp_balloon(int64_t value, Error **errp)
{
if (kvm_enabled() && !kvm_has_sync_mmu()) {
error_set(errp, QERR_KVM_MISSING_CAP, "synchronous MMU", "balloon");
return;
}
if (value <= 0) {
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "target", "a size");
return;
}
if (qemu_balloon(value) == 0) {
error_set(errp, QERR_DEVICE_NOT_ACTIVE, "balloon");
}
}
| false | qemu | 6502a14734e71b2f6dd079b0a1e546e6aa2d2f8d |
6,224 | struct GuestFileSeek *qmp_guest_file_seek(int64_t handle, int64_t offset,
int64_t whence, Error **errp)
{
GuestFileHandle *gfh = guest_file_handle_find(handle, errp);
GuestFileSeek *seek_data = NULL;
FILE *fh;
int ret;
if (!gfh) {
return NULL;
}
fh = gfh->fh;
ret = fseek(fh, offset, whence);
if (ret == -1) {
error_setg_errno(errp, errno, "failed to seek file");
if (errno == ESPIPE) {
/* file is non-seekable, stdio shouldn't be buffering anyways */
gfh->state = RW_STATE_NEW;
}
} else {
seek_data = g_new0(GuestFileSeek, 1);
seek_data->position = ftell(fh);
seek_data->eof = feof(fh);
gfh->state = RW_STATE_NEW;
}
clearerr(fh);
return seek_data;
}
| false | qemu | 0a982b1bf3953dc8640c4d6e619fb1132ebbebc3 |
6,225 | static bool virtio_device_endian_needed(void *opaque)
{
VirtIODevice *vdev = opaque;
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
return vdev->device_endian != virtio_default_endian();
}
/* Devices conforming to VIRTIO 1.0 or later are always LE. */
return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
}
| false | qemu | 95129d6fc9ead97155627a4ca0cfd37282883658 |
6,226 | static int uhci_handle_td(UHCIState *s, uint32_t addr, UHCI_TD *td, uint32_t *int_mask)
{
UHCIAsync *async;
int len = 0, max_len;
uint8_t pid;
/* Is active ? */
if (!(td->ctrl & TD_CTRL_ACTIVE))
return 1;
async = uhci_async_find_td(s, addr, td->token);
if (async) {
/* Already submitted */
async->valid = 32;
if (!async->done)
return 1;
uhci_async_unlink(s, async);
goto done;
}
/* Allocate new packet */
async = uhci_async_alloc(s);
if (!async)
return 1;
async->valid = 10;
async->td = addr;
async->token = td->token;
max_len = ((td->token >> 21) + 1) & 0x7ff;
pid = td->token & 0xff;
async->packet.pid = pid;
async->packet.devaddr = (td->token >> 8) & 0x7f;
async->packet.devep = (td->token >> 15) & 0xf;
async->packet.data = async->buffer;
async->packet.len = max_len;
async->packet.complete_cb = uhci_async_complete;
async->packet.complete_opaque = s;
switch(pid) {
case USB_TOKEN_OUT:
case USB_TOKEN_SETUP:
cpu_physical_memory_read(td->buffer, async->buffer, max_len);
len = uhci_broadcast_packet(s, &async->packet);
if (len >= 0)
len = max_len;
break;
case USB_TOKEN_IN:
len = uhci_broadcast_packet(s, &async->packet);
break;
default:
/* invalid pid : frame interrupted */
uhci_async_free(s, async);
s->status |= UHCI_STS_HCPERR;
uhci_update_irq(s);
return -1;
}
if (len == USB_RET_ASYNC) {
uhci_async_link(s, async);
return 2;
}
async->packet.len = len;
done:
len = uhci_complete_td(s, td, async, int_mask);
uhci_async_free(s, async);
return len;
}
| false | qemu | 8e65b7c04965c8355e4ce43211582b6b83054e3d |
6,227 | static void omap_pwt_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
struct omap_pwt_s *s = (struct omap_pwt_s *) opaque;
int offset = addr & OMAP_MPUI_REG_MASK;
if (size != 1) {
omap_badwidth_write8(opaque, addr, value);
return;
}
switch (offset) {
case 0x00: /* FRC */
s->frc = value & 0x3f;
break;
case 0x04: /* VRC */
if ((value ^ s->vrc) & 1) {
if (value & 1)
printf("%s: %iHz buzz on\n", __FUNCTION__, (int)
/* 1.5 MHz from a 12-MHz or 13-MHz PWT_CLK */
((omap_clk_getrate(s->clk) >> 3) /
/* Pre-multiplexer divider */
((s->gcr & 2) ? 1 : 154) /
/* Octave multiplexer */
(2 << (value & 3)) *
/* 101/107 divider */
((value & (1 << 2)) ? 101 : 107) *
/* 49/55 divider */
((value & (1 << 3)) ? 49 : 55) *
/* 50/63 divider */
((value & (1 << 4)) ? 50 : 63) *
/* 80/127 divider */
((value & (1 << 5)) ? 80 : 127) /
(107 * 55 * 63 * 127)));
else
printf("%s: silence!\n", __FUNCTION__);
}
s->vrc = value & 0x7f;
break;
case 0x08: /* GCR */
s->gcr = value & 3;
break;
default:
OMAP_BAD_REG(addr);
return;
}
}
| false | qemu | a89f364ae8740dfc31b321eed9ee454e996dc3c1 |
6,228 | START_TEST(unterminated_array_comma)
{
QObject *obj = qobject_from_json("[32,");
fail_unless(obj == NULL);
}
| false | qemu | ef76dc59fa5203d146a2acf85a0ad5a5971a4824 |
6,229 | uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
{
uint64_t token = 0;
hwaddr pte_offset;
pte_offset = pte_index * HASH_PTE_SIZE_64;
if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
/*
* HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
*/
token = kvmppc_hash64_read_pteg(cpu, pte_index);
} else if (cpu->env.external_htab) {
/*
* HTAB is controlled by QEMU. Just point to the internally
* accessible PTEG.
*/
token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
} else if (cpu->env.htab_base) {
token = cpu->env.htab_base + pte_offset;
}
return token;
}
| false | qemu | 1ad9f0a464fe78d30ee60b3629f7a825cf2fab13 |
6,230 | static inline void gen_neon_negl(TCGv var, int size)
{
switch (size) {
case 0: gen_helper_neon_negl_u16(var, var); break;
case 1: gen_helper_neon_negl_u32(var, var); break;
case 2: gen_helper_neon_negl_u64(var, var); break;
default: abort();
}
}
| false | qemu | a7812ae412311d7d47f8aa85656faadac9d64b56 |
6,231 | void curses_display_init(DisplayState *ds, int full_screen)
{
#ifndef _WIN32
if (!isatty(1)) {
fprintf(stderr, "We need a terminal output\n");
exit(1);
}
#endif
curses_setup();
curses_keyboard_setup();
atexit(curses_atexit);
#ifndef _WIN32
signal(SIGINT, SIG_DFL);
signal(SIGQUIT, SIG_DFL);
#if defined(SIGWINCH) && defined(KEY_RESIZE)
/* some curses implementations provide a handler, but we
* want to be sure this is handled regardless of the library */
signal(SIGWINCH, curses_winch_handler);
#endif
#endif
ds->data = (void *) screen;
ds->linesize = 0;
ds->depth = 0;
ds->width = 640;
ds->height = 400;
ds->dpy_update = curses_update;
ds->dpy_resize = curses_resize;
ds->dpy_refresh = curses_refresh;
ds->dpy_text_cursor = curses_cursor_position;
invalidate = 1;
/* Standard VGA initial text mode dimensions */
curses_resize(ds, 80, 25);
}
| false | qemu | 5b08fc106d3146ddc1447d82d4770fc402fc363b |
6,232 | void qio_channel_socket_dgram_async(QIOChannelSocket *ioc,
SocketAddress *localAddr,
SocketAddress *remoteAddr,
QIOTaskFunc callback,
gpointer opaque,
GDestroyNotify destroy)
{
QIOTask *task = qio_task_new(
OBJECT(ioc), callback, opaque, destroy);
struct QIOChannelSocketDGramWorkerData *data = g_new0(
struct QIOChannelSocketDGramWorkerData, 1);
data->localAddr = QAPI_CLONE(SocketAddress, localAddr);
data->remoteAddr = QAPI_CLONE(SocketAddress, remoteAddr);
trace_qio_channel_socket_dgram_async(ioc, localAddr, remoteAddr);
qio_task_run_in_thread(task,
qio_channel_socket_dgram_worker,
data,
qio_channel_socket_dgram_worker_free);
}
| false | qemu | dfd100f242370886bb6732f70f1f7cbd8eb9fedc |
6,233 | static av_cold int libx265_encode_init(AVCodecContext *avctx)
{
libx265Context *ctx = avctx->priv_data;
ctx->api = x265_api_get(av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth_minus1 + 1);
if (!ctx->api)
ctx->api = x265_api_get(0);
if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL &&
!av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_w) {
av_log(avctx, AV_LOG_ERROR,
"4:2:2 and 4:4:4 support is not fully defined for HEVC yet. "
"Set -strict experimental to encode anyway.\n");
return AVERROR(ENOSYS);
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
return AVERROR(ENOMEM);
}
ctx->params = ctx->api->param_alloc();
if (!ctx->params) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n");
return AVERROR(ENOMEM);
}
if (ctx->api->param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {
int i;
av_log(avctx, AV_LOG_ERROR, "Error setting preset/tune %s/%s.\n", ctx->preset, ctx->tune);
av_log(avctx, AV_LOG_INFO, "Possible presets:");
for (i = 0; x265_preset_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x265_preset_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
av_log(avctx, AV_LOG_INFO, "Possible tunes:");
for (i = 0; x265_tune_names[i]; i++)
av_log(avctx, AV_LOG_INFO, " %s", x265_tune_names[i]);
av_log(avctx, AV_LOG_INFO, "\n");
return AVERROR(EINVAL);
}
ctx->params->frameNumThreads = avctx->thread_count;
ctx->params->fpsNum = avctx->time_base.den;
ctx->params->fpsDenom = avctx->time_base.num * avctx->ticks_per_frame;
ctx->params->sourceWidth = avctx->width;
ctx->params->sourceHeight = avctx->height;
ctx->params->bEnablePsnr = !!(avctx->flags & CODEC_FLAG_PSNR);
if ((avctx->color_primaries <= AVCOL_PRI_BT2020 &&
avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) ||
(avctx->color_trc <= AVCOL_TRC_BT2020_12 &&
avctx->color_trc != AVCOL_TRC_UNSPECIFIED) ||
(avctx->colorspace <= AVCOL_SPC_BT2020_CL &&
avctx->colorspace != AVCOL_SPC_UNSPECIFIED)) {
ctx->params->vui.bEnableVideoSignalTypePresentFlag = 1;
ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;
// x265 validates the parameters internally
ctx->params->vui.colorPrimaries = avctx->color_primaries;
ctx->params->vui.transferCharacteristics = avctx->color_trc;
ctx->params->vui.matrixCoeffs = avctx->colorspace;
}
if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {
char sar[12];
int sar_num, sar_den;
av_reduce(&sar_num, &sar_den,
avctx->sample_aspect_ratio.num,
avctx->sample_aspect_ratio.den, 65535);
snprintf(sar, sizeof(sar), "%d:%d", sar_num, sar_den);
if (ctx->api->param_parse(ctx->params, "sar", sar) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid SAR: %d:%d.\n", sar_num, sar_den);
return AVERROR_INVALIDDATA;
}
}
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV420P10:
ctx->params->internalCsp = X265_CSP_I420;
break;
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV422P10:
ctx->params->internalCsp = X265_CSP_I422;
break;
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUV444P10:
ctx->params->internalCsp = X265_CSP_I444;
break;
}
if (ctx->crf >= 0) {
char crf[6];
snprintf(crf, sizeof(crf), "%2.2f", ctx->crf);
if (ctx->api->param_parse(ctx->params, "crf", crf) == X265_PARAM_BAD_VALUE) {
av_log(avctx, AV_LOG_ERROR, "Invalid crf: %2.2f.\n", ctx->crf);
return AVERROR(EINVAL);
}
} else if (avctx->bit_rate > 0) {
ctx->params->rc.bitrate = avctx->bit_rate / 1000;
ctx->params->rc.rateControlMode = X265_RC_ABR;
}
if (!(avctx->flags & CODEC_FLAG_GLOBAL_HEADER))
ctx->params->bRepeatHeaders = 1;
if (ctx->x265_opts) {
AVDictionary *dict = NULL;
AVDictionaryEntry *en = NULL;
if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
int parse_ret = ctx->api->param_parse(ctx->params, en->key, en->value);
switch (parse_ret) {
case X265_PARAM_BAD_NAME:
av_log(avctx, AV_LOG_WARNING,
"Unknown option: %s.\n", en->key);
break;
case X265_PARAM_BAD_VALUE:
av_log(avctx, AV_LOG_WARNING,
"Invalid value for %s: %s.\n", en->key, en->value);
break;
default:
break;
}
}
av_dict_free(&dict);
}
}
ctx->encoder = ctx->api->encoder_open(ctx->params);
if (!ctx->encoder) {
av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n");
libx265_encode_close(avctx);
return AVERROR_INVALIDDATA;
}
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
x265_nal *nal;
int nnal;
avctx->extradata_size = ctx->api->encoder_headers(ctx->encoder, &nal, &nnal);
if (avctx->extradata_size <= 0) {
av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n");
libx265_encode_close(avctx);
return AVERROR_INVALIDDATA;
}
avctx->extradata = av_malloc(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata) {
av_log(avctx, AV_LOG_ERROR,
"Cannot allocate HEVC header of size %d.\n", avctx->extradata_size);
libx265_encode_close(avctx);
return AVERROR(ENOMEM);
}
memcpy(avctx->extradata, nal[0].payload, avctx->extradata_size);
}
return 0;
}
| false | FFmpeg | d6604b29ef544793479d7fb4e05ef6622bb3e534 |
6,234 | static uint32_t set_allocation_state(sPAPRDRConnector *drc,
sPAPRDRAllocationState state)
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
trace_spapr_drc_set_allocation_state(get_index(drc), state);
if (state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
/* if there's no resource/device associated with the DRC, there's
* no way for us to put it in an allocation state consistent with
* being 'USABLE'. PAPR 2.7, 13.5.3.4 documents that this should
* result in an RTAS return code of -3 / "no such indicator"
*/
if (!drc->dev) {
return RTAS_OUT_NO_SUCH_INDICATOR;
}
if (drc->awaiting_release && drc->awaiting_allocation) {
/* kernel is acknowledging a previous hotplug event
* while we are already removing it.
* it's safe to ignore awaiting_allocation here since we know the
* situation is predicated on the guest either already having done
* so (boot-time hotplug), or never being able to acquire in the
* first place (hotplug followed by immediate unplug).
*/
drc->awaiting_allocation_skippable = true;
return RTAS_OUT_NO_SUCH_INDICATOR;
}
}
if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI) {
drc->allocation_state = state;
if (drc->awaiting_release &&
drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
trace_spapr_drc_set_allocation_state_finalizing(get_index(drc));
drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
drc->detach_cb_opaque, NULL);
} else if (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
drc->awaiting_allocation = false;
}
}
return RTAS_OUT_SUCCESS;
}
| false | qemu | 318347234d7069b62d38391dd27e269a3107d668 |
6,235 | static void vfio_pci_reset_handler(void *opaque)
{
VFIOGroup *group;
VFIOPCIDevice *vdev;
QLIST_FOREACH(group, &group_list, next) {
QLIST_FOREACH(vdev, &group->device_list, next) {
if (!vdev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
vdev->needs_reset = true;
}
}
}
QLIST_FOREACH(group, &group_list, next) {
QLIST_FOREACH(vdev, &group->device_list, next) {
if (vdev->needs_reset) {
vfio_pci_hot_reset_multi(vdev);
}
}
}
}
| false | qemu | b47d8efa9f430c332bf96ce6eede169eb48422ad |
6,236 | static void virt_machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
mc->init = machvirt_init;
/* Start max_cpus at the maximum QEMU supports. We'll further restrict
* it later in machvirt_init, where we have more information about the
* configuration of the particular instance.
*/
mc->max_cpus = MAX_CPUMASK_BITS;
mc->has_dynamic_sysbus = true;
mc->block_default_type = IF_VIRTIO;
mc->no_cdrom = 1;
mc->pci_allow_0_address = true;
}
| false | qemu | 079019f2e319bd1279681b6c1d7dde785d09e69e |
6,237 | static void xtensa_lx60_init(MachineState *machine)
{
static const LxBoardDesc lx60_board = {
.flash_base = 0xf8000000,
.flash_size = 0x00400000,
.flash_sector_size = 0x10000,
.sram_size = 0x20000,
};
lx_init(&lx60_board, machine);
}
| false | qemu | 68931a4082812f56657b39168e815c48f0ab0a8c |
6,238 | int udp_output2(struct socket *so, struct mbuf *m,
struct sockaddr_in *saddr, struct sockaddr_in *daddr,
int iptos)
{
register struct udpiphdr *ui;
int error = 0;
DEBUG_CALL("udp_output");
DEBUG_ARG("so = %p", so);
DEBUG_ARG("m = %p", m);
DEBUG_ARG("saddr = %lx", (long)saddr->sin_addr.s_addr);
DEBUG_ARG("daddr = %lx", (long)daddr->sin_addr.s_addr);
/*
* Adjust for header
*/
m->m_data -= sizeof(struct udpiphdr);
m->m_len += sizeof(struct udpiphdr);
/*
* Fill in mbuf with extended UDP header
* and addresses and length put into network format.
*/
ui = mtod(m, struct udpiphdr *);
memset(&ui->ui_i.ih_mbuf, 0 , sizeof(struct mbuf_ptr));
ui->ui_x1 = 0;
ui->ui_pr = IPPROTO_UDP;
ui->ui_len = htons(m->m_len - sizeof(struct ip));
/* XXXXX Check for from-one-location sockets, or from-any-location sockets */
ui->ui_src = saddr->sin_addr;
ui->ui_dst = daddr->sin_addr;
ui->ui_sport = saddr->sin_port;
ui->ui_dport = daddr->sin_port;
ui->ui_ulen = ui->ui_len;
/*
* Stuff checksum and output datagram.
*/
ui->ui_sum = 0;
if ((ui->ui_sum = cksum(m, m->m_len)) == 0)
ui->ui_sum = 0xffff;
((struct ip *)ui)->ip_len = m->m_len;
((struct ip *)ui)->ip_ttl = IPDEFTTL;
((struct ip *)ui)->ip_tos = iptos;
error = ip_output(so, m);
return (error);
}
| false | qemu | 5379229a2708df3a1506113315214c3ce5325859 |
6,239 | static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
{
S390CPU *cpu = S390_CPU(cs);
S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
SigpInfo *si = arg.host_ptr;
cpu_synchronize_state(cs);
scc->initial_cpu_reset(cs);
cpu_synchronize_post_reset(cs);
si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
}
| false | qemu | 74b4c74d5efb0a489bdf0acc5b5d0197167e7649 |
6,240 | void cris_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
{
unsigned int i;
(*cpu_fprintf)(f, "Available CPUs:\n");
for (i = 0; i < ARRAY_SIZE(cris_cores); i++) {
(*cpu_fprintf)(f, " %s\n", cris_cores[i].name);
}
}
| false | qemu | 9a78eead0c74333a394c0f7bbfc4423ac746fcd5 |
6,242 | void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
error_vreport(fmt, ap);
va_end(ap);
vdev->broken = true;
if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
virtio_notify_config(vdev);
}
}
| false | qemu | 66453cff9e5e75344c601cd7674c8ef5fefee8a6 |
6,244 | const char *avcodec_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
| false | FFmpeg | 29ba091136a5e04574f7bfc1b17536c923958f6f |
6,245 | av_cold void ff_ps_ctx_init(PSContext *ps)
{
ipdopd_reset(ps->ipd_hist, ps->opd_hist);
}
| false | FFmpeg | e90e1f558a194ef75e396ac9ae5128be03e66362 |
6,246 | void omap_mpuio_key(struct omap_mpuio_s *s, int row, int col, int down)
{
if (row >= 5 || row < 0)
hw_error("%s: No key %i-%i\n", __FUNCTION__, col, row);
if (down)
s->buttons[row] |= 1 << col;
else
s->buttons[row] &= ~(1 << col);
omap_mpuio_kbd_update(s);
}
| false | qemu | a89f364ae8740dfc31b321eed9ee454e996dc3c1 |
6,247 | int ff_put_wav_header(AVIOContext *pb, AVCodecContext *enc)
{
int bps, blkalign, bytespersec;
int hdrsize = 18;
int waveformatextensible;
uint8_t temp[256];
uint8_t *riff_extradata= temp;
uint8_t *riff_extradata_start= temp;
if(!enc->codec_tag || enc->codec_tag > 0xffff)
return -1;
waveformatextensible = (enc->channels > 2 && enc->channel_layout)
|| enc->sample_rate > 48000
|| av_get_bits_per_sample(enc->codec_id) > 16;
if (waveformatextensible) {
avio_wl16(pb, 0xfffe);
} else {
avio_wl16(pb, enc->codec_tag);
}
avio_wl16(pb, enc->channels);
avio_wl32(pb, enc->sample_rate);
if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3 || enc->codec_id == CODEC_ID_GSM_MS) {
bps = 0;
} else if (enc->codec_id == CODEC_ID_ADPCM_G726) {
bps = 4;
} else {
if (!(bps = av_get_bits_per_sample(enc->codec_id)))
bps = 16; // default to 16
}
if(bps != enc->bits_per_coded_sample && enc->bits_per_coded_sample){
av_log(enc, AV_LOG_WARNING, "requested bits_per_coded_sample (%d) and actually stored (%d) differ\n", enc->bits_per_coded_sample, bps);
}
if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
blkalign = enc->frame_size; //this is wrong, but it seems many demuxers do not work if this is set correctly
//blkalign = 144 * enc->bit_rate/enc->sample_rate;
} else if (enc->codec_id == CODEC_ID_AC3) {
blkalign = 3840; //maximum bytes per frame
} else if (enc->codec_id == CODEC_ID_ADPCM_G726) { //
blkalign = 1;
} else if (enc->block_align != 0) { /* specified by the codec */
blkalign = enc->block_align;
} else
blkalign = enc->channels*bps >> 3;
if (enc->codec_id == CODEC_ID_PCM_U8 ||
enc->codec_id == CODEC_ID_PCM_S24LE ||
enc->codec_id == CODEC_ID_PCM_S32LE ||
enc->codec_id == CODEC_ID_PCM_F32LE ||
enc->codec_id == CODEC_ID_PCM_F64LE ||
enc->codec_id == CODEC_ID_PCM_S16LE) {
bytespersec = enc->sample_rate * blkalign;
} else {
bytespersec = enc->bit_rate / 8;
}
avio_wl32(pb, bytespersec); /* bytes per second */
avio_wl16(pb, blkalign); /* block align */
avio_wl16(pb, bps); /* bits per sample */
if (enc->codec_id == CODEC_ID_MP3) {
hdrsize += 12;
bytestream_put_le16(&riff_extradata, 1); /* wID */
bytestream_put_le32(&riff_extradata, 2); /* fdwFlags */
bytestream_put_le16(&riff_extradata, 1152); /* nBlockSize */
bytestream_put_le16(&riff_extradata, 1); /* nFramesPerBlock */
bytestream_put_le16(&riff_extradata, 1393); /* nCodecDelay */
} else if (enc->codec_id == CODEC_ID_MP2) {
hdrsize += 22;
bytestream_put_le16(&riff_extradata, 2); /* fwHeadLayer */
bytestream_put_le32(&riff_extradata, enc->bit_rate); /* dwHeadBitrate */
bytestream_put_le16(&riff_extradata, enc->channels == 2 ? 1 : 8); /* fwHeadMode */
bytestream_put_le16(&riff_extradata, 0); /* fwHeadModeExt */
bytestream_put_le16(&riff_extradata, 1); /* wHeadEmphasis */
bytestream_put_le16(&riff_extradata, 16); /* fwHeadFlags */
bytestream_put_le32(&riff_extradata, 0); /* dwPTSLow */
bytestream_put_le32(&riff_extradata, 0); /* dwPTSHigh */
} else if (enc->codec_id == CODEC_ID_GSM_MS || enc->codec_id == CODEC_ID_ADPCM_IMA_WAV) {
hdrsize += 2;
bytestream_put_le16(&riff_extradata, enc->frame_size); /* wSamplesPerBlock */
} else if(enc->extradata_size){
riff_extradata_start= enc->extradata;
riff_extradata= enc->extradata + enc->extradata_size;
hdrsize += enc->extradata_size;
} else if (!waveformatextensible){
hdrsize -= 2;
}
if(waveformatextensible) { /* write WAVEFORMATEXTENSIBLE extensions */
hdrsize += 22;
avio_wl16(pb, riff_extradata - riff_extradata_start + 22); /* 22 is WAVEFORMATEXTENSIBLE size */
avio_wl16(pb, enc->bits_per_coded_sample); /* ValidBitsPerSample || SamplesPerBlock || Reserved */
avio_wl32(pb, enc->channel_layout); /* dwChannelMask */
avio_wl32(pb, enc->codec_tag); /* GUID + next 3 */
avio_wl32(pb, 0x00100000);
avio_wl32(pb, 0xAA000080);
avio_wl32(pb, 0x719B3800);
} else if(riff_extradata - riff_extradata_start) {
avio_wl16(pb, riff_extradata - riff_extradata_start);
}
avio_write(pb, riff_extradata_start, riff_extradata - riff_extradata_start);
if(hdrsize&1){
hdrsize++;
avio_w8(pb, 0);
}
return hdrsize;
}
| false | FFmpeg | 2c4e08d89327595f7f4be57dda4b3775e1198d5e |
6,248 | static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
{
CPUState *cpu = current_cpu;
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState *env = cpu->env_ptr;
target_ulong pc, cs_base;
target_ulong vaddr;
CPUWatchpoint *wp;
uint32_t cpu_flags;
assert(tcg_enabled());
if (cpu->watchpoint_hit) {
/* We re-entered the check after replacing the TB. Now raise
* the debug interrupt so that is will trigger after the
* current instruction. */
cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
return;
}
vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len);
QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
if (cpu_watchpoint_address_matches(wp, vaddr, len)
&& (wp->flags & flags)) {
if (flags == BP_MEM_READ) {
wp->flags |= BP_WATCHPOINT_HIT_READ;
} else {
wp->flags |= BP_WATCHPOINT_HIT_WRITE;
}
wp->hitaddr = vaddr;
wp->hitattrs = attrs;
if (!cpu->watchpoint_hit) {
if (wp->flags & BP_CPU &&
!cc->debug_check_watchpoint(cpu, wp)) {
wp->flags &= ~BP_WATCHPOINT_HIT;
continue;
}
cpu->watchpoint_hit = wp;
/* Both tb_lock and iothread_mutex will be reset when
* cpu_loop_exit or cpu_loop_exit_noexc longjmp
* back into the cpu_exec main loop.
*/
tb_lock();
tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG;
cpu_loop_exit(cpu);
} else {
cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
tb_gen_code(cpu, pc, cs_base, cpu_flags, 1 | curr_cflags());
cpu_loop_exit_noexc(cpu);
}
}
} else {
wp->flags &= ~BP_WATCHPOINT_HIT;
}
}
}
| false | qemu | 9b990ee5a3cc6aa38f81266fb0c6ef37a36c45b9 |
6,249 | dbdma_control_write(DBDMA_channel *ch)
{
uint16_t mask, value;
uint32_t status;
mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff;
value = ch->regs[DBDMA_CONTROL] & 0xffff;
value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT);
status = ch->regs[DBDMA_STATUS];
status = (value & mask) | (status & ~mask);
if (status & WAKE)
status |= ACTIVE;
if (status & RUN) {
status |= ACTIVE;
status &= ~DEAD;
}
if (status & PAUSE)
status &= ~ACTIVE;
if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) {
/* RUN is cleared */
status &= ~(ACTIVE|DEAD);
}
DBDMA_DPRINTF(" status 0x%08x\n", status);
ch->regs[DBDMA_STATUS] = status;
if (status & ACTIVE)
qemu_bh_schedule(dbdma_bh);
if (status & FLUSH)
ch->flush(&ch->io);
}
| false | qemu | a9ceb76d55abfed9426a819024aa3a4b87266c9f |
6,250 | static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
/* Read an LPAE long-descriptor translation table. */
MMUFaultType fault_type = translation_fault;
uint32_t level;
uint32_t epd = 0;
int32_t t0sz, t1sz;
uint32_t tg;
uint64_t ttbr;
int ttbr_select;
hwaddr descaddr, indexmask, indexmask_grainsize;
uint32_t tableattrs;
target_ulong page_size;
uint32_t attrs;
int32_t stride = 9;
int32_t va_size;
int inputsize;
int32_t tbi = 0;
TCR *tcr = regime_tcr(env, mmu_idx);
int ap, ns, xn, pxn;
uint32_t el = regime_el(env, mmu_idx);
bool ttbr1_valid = true;
uint64_t descaddrmask;
/* TODO:
* This code does not handle the different format TCR for VTCR_EL2.
* This code also does not support shareability levels.
* Attribute and permission bit handling should also be checked when adding
* support for those page table walks.
*/
if (arm_el_is_aa64(env, el)) {
level = 0;
va_size = 64;
if (el > 1) {
if (mmu_idx != ARMMMUIdx_S2NS) {
tbi = extract64(tcr->raw_tcr, 20, 1);
}
} else {
if (extract64(address, 55, 1)) {
tbi = extract64(tcr->raw_tcr, 38, 1);
} else {
tbi = extract64(tcr->raw_tcr, 37, 1);
}
}
tbi *= 8;
/* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
* invalid.
*/
if (el > 1) {
ttbr1_valid = false;
}
} else {
level = 1;
va_size = 32;
/* There is no TTBR1 for EL2 */
if (el == 2) {
ttbr1_valid = false;
}
}
/* Determine whether this address is in the region controlled by
* TTBR0 or TTBR1 (or if it is in neither region and should fault).
* This is a Non-secure PL0/1 stage 1 translation, so controlled by
* TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
*/
if (va_size == 64) {
/* AArch64 translation. */
t0sz = extract32(tcr->raw_tcr, 0, 6);
t0sz = MIN(t0sz, 39);
t0sz = MAX(t0sz, 16);
} else if (mmu_idx != ARMMMUIdx_S2NS) {
/* AArch32 stage 1 translation. */
t0sz = extract32(tcr->raw_tcr, 0, 3);
} else {
/* AArch32 stage 2 translation. */
bool sext = extract32(tcr->raw_tcr, 4, 1);
bool sign = extract32(tcr->raw_tcr, 3, 1);
t0sz = sextract32(tcr->raw_tcr, 0, 4);
/* If the sign-extend bit is not the same as t0sz[3], the result
* is unpredictable. Flag this as a guest error. */
if (sign != sext) {
qemu_log_mask(LOG_GUEST_ERROR,
"AArch32: VTCR.S / VTCR.T0SZ[3] missmatch\n");
}
}
t1sz = extract32(tcr->raw_tcr, 16, 6);
if (va_size == 64) {
t1sz = MIN(t1sz, 39);
t1sz = MAX(t1sz, 16);
}
if (t0sz && !extract64(address, va_size - t0sz, t0sz - tbi)) {
/* there is a ttbr0 region and we are in it (high bits all zero) */
ttbr_select = 0;
} else if (ttbr1_valid && t1sz &&
!extract64(~address, va_size - t1sz, t1sz - tbi)) {
/* there is a ttbr1 region and we are in it (high bits all one) */
ttbr_select = 1;
} else if (!t0sz) {
/* ttbr0 region is "everything not in the ttbr1 region" */
ttbr_select = 0;
} else if (!t1sz && ttbr1_valid) {
/* ttbr1 region is "everything not in the ttbr0 region" */
ttbr_select = 1;
} else {
/* in the gap between the two regions, this is a Translation fault */
fault_type = translation_fault;
goto do_fault;
}
/* Note that QEMU ignores shareability and cacheability attributes,
* so we don't need to do anything with the SH, ORGN, IRGN fields
* in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
* ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
* implement any ASID-like capability so we can ignore it (instead
* we will always flush the TLB any time the ASID is changed).
*/
if (ttbr_select == 0) {
ttbr = regime_ttbr(env, mmu_idx, 0);
if (el < 2) {
epd = extract32(tcr->raw_tcr, 7, 1);
}
inputsize = va_size - t0sz;
tg = extract32(tcr->raw_tcr, 14, 2);
if (tg == 1) { /* 64KB pages */
stride = 13;
}
if (tg == 2) { /* 16KB pages */
stride = 11;
}
} else {
/* We should only be here if TTBR1 is valid */
assert(ttbr1_valid);
ttbr = regime_ttbr(env, mmu_idx, 1);
epd = extract32(tcr->raw_tcr, 23, 1);
inputsize = va_size - t1sz;
tg = extract32(tcr->raw_tcr, 30, 2);
if (tg == 3) { /* 64KB pages */
stride = 13;
}
if (tg == 1) { /* 16KB pages */
stride = 11;
}
}
/* Here we should have set up all the parameters for the translation:
* va_size, inputsize, ttbr, epd, stride, tbi
*/
if (epd) {
/* Translation table walk disabled => Translation fault on TLB miss
* Note: This is always 0 on 64-bit EL2 and EL3.
*/
goto do_fault;
}
if (mmu_idx != ARMMMUIdx_S2NS) {
/* The starting level depends on the virtual address size (which can
* be up to 48 bits) and the translation granule size. It indicates
* the number of strides (stride bits at a time) needed to
* consume the bits of the input address. In the pseudocode this is:
* level = 4 - RoundUp((inputsize - grainsize) / stride)
* where their 'inputsize' is our 'inputsize', 'grainsize' is
* our 'stride + 3' and 'stride' is our 'stride'.
* Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
* = 4 - (inputsize - stride - 3 + stride - 1) / stride
* = 4 - (inputsize - 4) / stride;
*/
level = 4 - (inputsize - 4) / stride;
} else {
/* For stage 2 translations the starting level is specified by the
* VTCR_EL2.SL0 field (whose interpretation depends on the page size)
*/
uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
uint32_t startlevel;
bool ok;
if (va_size == 32 || stride == 9) {
/* AArch32 or 4KB pages */
startlevel = 2 - sl0;
} else {
/* 16KB or 64KB pages */
startlevel = 3 - sl0;
}
/* Check that the starting level is valid. */
ok = check_s2_mmu_setup(cpu, va_size == 64, startlevel,
inputsize, stride);
if (!ok) {
fault_type = translation_fault;
goto do_fault;
}
level = startlevel;
}
indexmask_grainsize = (1ULL << (stride + 3)) - 1;
indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
/* Now we can extract the actual base address from the TTBR */
descaddr = extract64(ttbr, 0, 48);
descaddr &= ~indexmask;
/* The address field in the descriptor goes up to bit 39 for ARMv7
* but up to bit 47 for ARMv8, but we use the descaddrmask
* up to bit 39 for AArch32, because we don't need other bits in that case
* to construct next descriptor address (anyway they should be all zeroes).
*/
descaddrmask = ((1ull << (va_size == 64 ? 48 : 40)) - 1) &
~indexmask_grainsize;
/* Secure accesses start with the page table in secure memory and
* can be downgraded to non-secure at any step. Non-secure accesses
* remain non-secure. We implement this by just ORing in the NSTable/NS
* bits at each step.
*/
tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
for (;;) {
uint64_t descriptor;
bool nstable;
descaddr |= (address >> (stride * (4 - level))) & indexmask;
descaddr &= ~7ULL;
nstable = extract32(tableattrs, 4, 1);
descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
if (fi->s1ptw) {
goto do_fault;
}
if (!(descriptor & 1) ||
(!(descriptor & 2) && (level == 3))) {
/* Invalid, or the Reserved level 3 encoding */
goto do_fault;
}
descaddr = descriptor & descaddrmask;
if ((descriptor & 2) && (level < 3)) {
/* Table entry. The top five bits are attributes which may
* propagate down through lower levels of the table (and
* which are all arranged so that 0 means "no effect", so
* we can gather them up by ORing in the bits at each level).
*/
tableattrs |= extract64(descriptor, 59, 5);
level++;
indexmask = indexmask_grainsize;
continue;
}
/* Block entry at level 1 or 2, or page entry at level 3.
* These are basically the same thing, although the number
* of bits we pull in from the vaddr varies.
*/
page_size = (1ULL << ((stride * (4 - level)) + 3));
descaddr |= (address & (page_size - 1));
/* Extract attributes from the descriptor */
attrs = extract64(descriptor, 2, 10)
| (extract64(descriptor, 52, 12) << 10);
if (mmu_idx == ARMMMUIdx_S2NS) {
/* Stage 2 table descriptors do not include any attribute fields */
break;
}
/* Merge in attributes from table descriptors */
attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
/* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
* means "force PL1 access only", which means forcing AP[1] to 0.
*/
if (extract32(tableattrs, 2, 1)) {
attrs &= ~(1 << 4);
}
attrs |= nstable << 3; /* NS */
break;
}
/* Here descaddr is the final physical address, and attributes
* are all in attrs.
*/
fault_type = access_fault;
if ((attrs & (1 << 8)) == 0) {
/* Access flag */
goto do_fault;
}
ap = extract32(attrs, 4, 2);
xn = extract32(attrs, 12, 1);
if (mmu_idx == ARMMMUIdx_S2NS) {
ns = true;
*prot = get_S2prot(env, ap, xn);
} else {
ns = extract32(attrs, 3, 1);
pxn = extract32(attrs, 11, 1);
*prot = get_S1prot(env, mmu_idx, va_size == 64, ap, ns, xn, pxn);
}
fault_type = permission_fault;
if (!(*prot & (1 << access_type))) {
goto do_fault;
}
if (ns) {
/* The NS bit will (as required by the architecture) have no effect if
* the CPU doesn't support TZ or this is a non-secure translation
* regime, because the attribute will already be non-secure.
*/
txattrs->secure = false;
}
*phys_ptr = descaddr;
*page_size_ptr = page_size;
return false;
do_fault:
/* Long-descriptor format IFSR/DFSR value */
*fsr = (1 << 9) | (fault_type << 2) | level;
/* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
return true;
}
| false | qemu | 6e99f762612827afeff54add2e4fc2c3b2657fed |
6,252 | static void monitor_parse(const char *optarg, const char *mode, bool pretty)
{
static int monitor_device_index = 0;
Error *local_err = NULL;
QemuOpts *opts;
const char *p;
char label[32];
int def = 0;
if (strstart(optarg, "chardev:", &p)) {
snprintf(label, sizeof(label), "%s", p);
} else {
snprintf(label, sizeof(label), "compat_monitor%d",
monitor_device_index);
if (monitor_device_index == 0) {
def = 1;
}
opts = qemu_chr_parse_compat(label, optarg);
if (!opts) {
fprintf(stderr, "parse error: %s\n", optarg);
exit(1);
}
}
opts = qemu_opts_create(qemu_find_opts("mon"), label, 1, &local_err);
if (!opts) {
error_report_err(local_err);
exit(1);
}
qemu_opt_set(opts, "mode", mode, &error_abort);
qemu_opt_set(opts, "chardev", label, &error_abort);
qemu_opt_set_bool(opts, "pretty", pretty, &error_abort);
if (def)
qemu_opt_set(opts, "default", "on", &error_abort);
monitor_device_index++;
}
| false | qemu | f61eddcb2bb5cbbdd1d911b7e937db9affc29028 |
6,253 | void drive_hot_add(Monitor *mon, const QDict *qdict)
{
int dom, pci_bus;
unsigned slot;
int type, bus;
int success = 0;
PCIDevice *dev;
DriveInfo *dinfo;
const char *pci_addr = qdict_get_str(qdict, "pci_addr");
const char *opts = qdict_get_str(qdict, "opts");
BusState *scsibus;
if (pci_read_devaddr(mon, pci_addr, &dom, &pci_bus, &slot)) {
return;
}
dev = pci_find_device(pci_bus, slot, 0);
if (!dev) {
monitor_printf(mon, "no pci device with address %s\n", pci_addr);
return;
}
dinfo = add_init_drive(opts);
if (!dinfo)
return;
if (dinfo->devaddr) {
monitor_printf(mon, "Parameter addr not supported\n");
return;
}
type = dinfo->type;
bus = drive_get_max_bus (type);
switch (type) {
case IF_SCSI:
success = 1;
scsibus = LIST_FIRST(&dev->qdev.child_bus);
scsi_bus_legacy_add_drive(DO_UPCAST(SCSIBus, qbus, scsibus),
dinfo, dinfo->unit);
break;
default:
monitor_printf(mon, "Can't hot-add drive to type %d\n", type);
}
if (success)
monitor_printf(mon, "OK bus %d, unit %d\n",
dinfo->bus,
dinfo->unit);
return;
}
| false | qemu | 72cf2d4f0e181d0d3a3122e04129c58a95da713e |
6,254 | build_append_notify(GArray *device, const char *name,
const char *format, int skip, int count)
{
int i;
GArray *method = build_alloc_array();
uint8_t op = 0x14; /* MethodOp */
build_append_nameseg(method, "%s", name);
build_append_byte(method, 0x02); /* MethodFlags: ArgCount */
for (i = skip; i < count; i++) {
GArray *target = build_alloc_array();
build_append_nameseg(target, format, i);
assert(i < 256); /* Fits in 1 byte */
build_append_notify_target(method, target, i, 1);
build_free_array(target);
}
build_package(method, op, 2);
build_append_array(device, method);
build_free_array(method);
}
| false | qemu | 99fd437dee468609de8218f0eb3b16621fb6a9c9 |
6,255 | void virtio_input_send(VirtIOInput *vinput, virtio_input_event *event)
{
VirtQueueElement *elem;
unsigned have, need;
int i, len;
if (!vinput->active) {
return;
}
/* queue up events ... */
if (vinput->qindex == vinput->qsize) {
vinput->qsize++;
vinput->queue = g_realloc(vinput->queue, vinput->qsize *
sizeof(virtio_input_event));
}
vinput->queue[vinput->qindex++] = *event;
/* ... until we see a report sync ... */
if (event->type != cpu_to_le16(EV_SYN) ||
event->code != cpu_to_le16(SYN_REPORT)) {
return;
}
/* ... then check available space ... */
need = sizeof(virtio_input_event) * vinput->qindex;
virtqueue_get_avail_bytes(vinput->evt, &have, NULL, need, 0);
if (have < need) {
vinput->qindex = 0;
trace_virtio_input_queue_full();
return;
}
/* ... and finally pass them to the guest */
for (i = 0; i < vinput->qindex; i++) {
elem = virtqueue_pop(vinput->evt, sizeof(VirtQueueElement));
if (!elem) {
/* should not happen, we've checked for space beforehand */
fprintf(stderr, "%s: Huh? No vq elem available ...\n", __func__);
return;
}
len = iov_from_buf(elem->in_sg, elem->in_num,
0, vinput->queue+i, sizeof(virtio_input_event));
virtqueue_push(vinput->evt, elem, len);
g_free(elem);
}
virtio_notify(VIRTIO_DEVICE(vinput), vinput->evt);
vinput->qindex = 0;
}
| false | qemu | 57094547dfea4ce784923b8abb53ac3ab4e3961a |
6,256 | int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
{
MultiwriteCB *mcb;
int i;
/* don't submit writes if we don't have a medium */
if (bs->drv == NULL) {
for (i = 0; i < num_reqs; i++) {
reqs[i].error = -ENOMEDIUM;
}
return -1;
}
if (num_reqs == 0) {
return 0;
}
// Create MultiwriteCB structure
mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
mcb->num_requests = 0;
mcb->num_callbacks = num_reqs;
for (i = 0; i < num_reqs; i++) {
mcb->callbacks[i].cb = reqs[i].cb;
mcb->callbacks[i].opaque = reqs[i].opaque;
}
// Check for mergable requests
num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
/* Run the aio requests. */
mcb->num_requests = num_reqs;
for (i = 0; i < num_reqs; i++) {
bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
reqs[i].nb_sectors, reqs[i].flags,
multiwrite_cb, mcb,
true);
}
return 0;
}
| false | qemu | 61007b316cd71ee7333ff7a0a749a8949527575f |
6,257 | static void omap_sti_fifo_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
struct omap_sti_s *s = (struct omap_sti_s *) opaque;
int ch = addr >> 6;
uint8_t byte = value;
if (size != 1) {
return omap_badwidth_write8(opaque, addr, size);
}
if (ch == STI_TRACE_CONTROL_CHANNEL) {
/* Flush channel <i>value</i>. */
qemu_chr_fe_write(s->chr, (const uint8_t *) "\r", 1);
} else if (ch == STI_TRACE_CONSOLE_CHANNEL || 1) {
if (value == 0xc0 || value == 0xc3) {
/* Open channel <i>ch</i>. */
} else if (value == 0x00)
qemu_chr_fe_write(s->chr, (const uint8_t *) "\n", 1);
else
qemu_chr_fe_write(s->chr, &byte, 1);
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
6,258 | static void active_parameter_sets(HEVCContext *s)
{
GetBitContext *gb = &s->HEVClc->gb;
int num_sps_ids_minus1;
int i;
get_bits(gb, 4); // active_video_parameter_set_id
get_bits(gb, 1); // self_contained_cvs_flag
get_bits(gb, 1); // num_sps_ids_minus1
num_sps_ids_minus1 = get_ue_golomb_long(gb); // num_sps_ids_minus1
s->active_seq_parameter_set_id = get_ue_golomb_long(gb);
for (i = 1; i <= num_sps_ids_minus1; i++)
get_ue_golomb_long(gb); // active_seq_parameter_set_id[i]
}
| false | FFmpeg | 63a37d0e1ec59377af9fb7973ffc847f928ba851 |
6,259 | static void term_backward_char(void)
{
if (term_cmd_buf_index > 0) {
term_cmd_buf_index--;
}
}
| false | qemu | 7e2515e87c41e2e658aaed466e11cbdf1ea8bcb1 |
6,260 | static int omap_validate_tipb_mpui_addr(struct omap_mpu_state_s *s,
target_phys_addr_t addr)
{
return range_covers_byte(0xe1010000, 0xe1020004 - 0xe1010000, addr);
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
6,261 | static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
int dirty_flags;
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 1);
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
#endif
}
stb_p(qemu_get_ram_ptr(ram_addr), val);
#ifdef CONFIG_KQEMU
if (cpu_single_env->kqemu_enabled &&
(dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
kqemu_modify_page(cpu_single_env, ram_addr);
#endif
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
}
| false | qemu | 4a1418e07bdcfaa3177739e04707ecaec75d89e1 |
6,262 | BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockDriverState *target, int64_t speed,
MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
bool compress,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
int creation_flags,
BlockCompletionFunc *cb, void *opaque,
BlockJobTxn *txn, Error **errp)
{
int64_t len;
BlockDriverInfo bdi;
BackupBlockJob *job = NULL;
int ret;
assert(bs);
assert(target);
if (bs == target) {
error_setg(errp, "Source and target cannot be the same");
return NULL;
}
if (!bdrv_is_inserted(bs)) {
error_setg(errp, "Device is not inserted: %s",
bdrv_get_device_name(bs));
return NULL;
}
if (!bdrv_is_inserted(target)) {
error_setg(errp, "Device is not inserted: %s",
bdrv_get_device_name(target));
return NULL;
}
if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
error_setg(errp, "Compression is not supported for this drive %s",
bdrv_get_device_name(target));
return NULL;
}
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
return NULL;
}
if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
return NULL;
}
if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
if (!sync_bitmap) {
error_setg(errp, "must provide a valid bitmap name for "
"\"incremental\" sync mode");
return NULL;
}
/* Create a new bitmap, and freeze/disable this one. */
if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
return NULL;
}
} else if (sync_bitmap) {
error_setg(errp,
"a sync_bitmap was provided to backup_run, "
"but received an incompatible sync_mode (%s)",
MirrorSyncMode_lookup[sync_mode]);
return NULL;
}
len = bdrv_getlength(bs);
if (len < 0) {
error_setg_errno(errp, -len, "unable to get length for '%s'",
bdrv_get_device_name(bs));
goto error;
}
/* job->common.len is fixed, so we can't allow resize */
job = block_job_create(job_id, &backup_job_driver, bs,
BLK_PERM_CONSISTENT_READ,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
speed, creation_flags, cb, opaque, errp);
if (!job) {
goto error;
}
/* The target must match the source in size, so no resize here either */
job->target = blk_new(BLK_PERM_WRITE,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
ret = blk_insert_bs(job->target, target, errp);
if (ret < 0) {
goto error;
}
job->on_source_error = on_source_error;
job->on_target_error = on_target_error;
job->sync_mode = sync_mode;
job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
sync_bitmap : NULL;
job->compress = compress;
/* If there is no backing file on the target, we cannot rely on COW if our
* backup cluster size is smaller than the target cluster size. Even for
* targets with a backing file, try to avoid COW if possible. */
ret = bdrv_get_info(target, &bdi);
if (ret == -ENOTSUP && !target->backing) {
/* Cluster size is not defined */
error_report("WARNING: The target block device doesn't provide "
"information about the block size and it doesn't have a "
"backing file. The default block size of %u bytes is "
"used. If the actual block size of the target exceeds "
"this default, the backup may be unusable",
BACKUP_CLUSTER_SIZE_DEFAULT);
job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
} else if (ret < 0 && !target->backing) {
error_setg_errno(errp, -ret,
"Couldn't determine the cluster size of the target image, "
"which has no backing file");
error_append_hint(errp,
"Aborting, since this may create an unusable destination image\n");
goto error;
} else if (ret < 0 && target->backing) {
/* Not fatal; just trudge on ahead. */
job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
} else {
job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
}
/* Required permissions are already taken with target's blk_new() */
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
&error_abort);
job->common.len = len;
block_job_txn_add_job(txn, &job->common);
return &job->common;
error:
if (sync_bitmap) {
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
}
if (job) {
backup_clean(&job->common);
block_job_early_fail(&job->common);
}
return NULL;
}
| false | qemu | 3dc6f8693694a649a9c83f1e2746565b47683923 |
6,263 | void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
CPUState *cs = CPU(x86_env_get_cpu(env));
target_ulong addr;
uint32_t event_inj;
uint32_t int_ctl;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
if (aflag == 2) {
addr = env->regs[R_EAX];
} else {
addr = (uint32_t)env->regs[R_EAX];
}
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
env->vm_vmcb = addr;
/* save the current CPU state in the hsave page */
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
env->gdt.base);
stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
env->gdt.limit);
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
env->idt.base);
stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
env->idt.limit);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.rflags),
cpu_compute_eflags(env));
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
&env->segs[R_ES]);
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
&env->segs[R_CS]);
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
&env->segs[R_SS]);
svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
&env->segs[R_DS]);
stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
env->eip + next_eip_addend);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
stq_phys(cs->as,
env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
/* load the interception bitmaps so we do not need to access the
vmcb in svm mode */
env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.intercept));
env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_cr_read));
env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_cr_write));
env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_dr_read));
env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_dr_write));
env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.intercept_exceptions
));
/* enable intercepts */
env->hflags |= HF_SVMI_MASK;
env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb, control.tsc_offset));
env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
save.gdtr.base));
env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
save.gdtr.limit));
env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
save.idtr.base));
env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
save.idtr.limit));
/* clear exit_info_2 so we behave like the real hardware */
stq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
cpu_x86_update_cr0(env, ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb,
save.cr0)));
cpu_x86_update_cr4(env, ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb,
save.cr4)));
cpu_x86_update_cr3(env, ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb,
save.cr3)));
env->cr[2] = ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.cr2));
int_ctl = ldl_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
if (int_ctl & V_INTR_MASKING_MASK) {
env->v_tpr = int_ctl & V_TPR_MASK;
env->hflags2 |= HF2_VINTR_MASK;
if (env->eflags & IF_MASK) {
env->hflags2 |= HF2_HIF_MASK;
}
}
cpu_load_efer(env,
ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.efer)));
env->eflags = 0;
cpu_load_eflags(env, ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb,
save.rflags)),
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
CC_OP = CC_OP_EFLAGS;
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
R_ES);
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
R_CS);
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
R_SS);
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
R_DS);
env->eip = ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.rip));
env->regs[R_ESP] = ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.rsp));
env->regs[R_EAX] = ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.rax));
env->dr[7] = ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.dr7));
env->dr[6] = ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.dr6));
cpu_x86_set_cpl(env, ldub_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb,
save.cpl)));
/* FIXME: guest state consistency checks */
switch (ldub_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
case TLB_CONTROL_DO_NOTHING:
break;
case TLB_CONTROL_FLUSH_ALL_ASID:
/* FIXME: this is not 100% correct but should work for now */
tlb_flush(cs, 1);
break;
}
env->hflags2 |= HF2_GIF_MASK;
if (int_ctl & V_IRQ_MASK) {
CPUState *cs = CPU(x86_env_get_cpu(env));
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
}
/* maybe we need to inject an event */
event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
if (event_inj & SVM_EVTINJ_VALID) {
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
offsetof(struct vmcb,
control.event_inj_err));
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
/* FIXME: need to implement valid_err */
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
case SVM_EVTINJ_TYPE_INTR:
cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = -1;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
/* XXX: is it always correct? */
do_interrupt_x86_hardirq(env, vector, 1);
break;
case SVM_EVTINJ_TYPE_NMI:
cs->exception_index = EXCP02_NMI;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = env->eip;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
cpu_loop_exit(cs);
break;
case SVM_EVTINJ_TYPE_EXEPT:
cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 0;
env->exception_next_eip = -1;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
cpu_loop_exit(cs);
break;
case SVM_EVTINJ_TYPE_SOFT:
cs->exception_index = vector;
env->error_code = event_inj_err;
env->exception_is_int = 1;
env->exception_next_eip = env->eip;
qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
cpu_loop_exit(cs);
break;
}
qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
env->error_code);
}
}
| false | qemu | 7848c8d19f8556666df25044bbd5d8b29439c368 |
6,264 | static void spr_write_ibatu (void *opaque, int sprn)
{
DisasContext *ctx = opaque;
gen_op_store_ibatu((sprn - SPR_IBAT0U) / 2);
RET_STOP(ctx);
}
| false | qemu | e1833e1f96456fd8fc17463246fe0b2050e68efb |
6,265 | static void machine_numa_validate(MachineState *machine)
{
int i;
GString *s = g_string_new(NULL);
MachineClass *mc = MACHINE_GET_CLASS(machine);
const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(machine);
assert(nb_numa_nodes);
for (i = 0; i < possible_cpus->len; i++) {
const CPUArchId *cpu_slot = &possible_cpus->cpus[i];
/* at this point numa mappings are initilized by CLI options
* or with default mappings so it's sufficient to list
* all not yet mapped CPUs here */
/* TODO: make it hard error in future */
if (!cpu_slot->props.has_node_id) {
char *cpu_str = cpu_slot_to_string(cpu_slot);
g_string_append_printf(s, "%sCPU %d [%s]", s->len ? ", " : "", i,
cpu_str);
g_free(cpu_str);
}
}
if (s->len) {
error_report("warning: CPU(s) not present in any NUMA nodes: %s",
s->str);
error_report("warning: All CPU(s) up to maxcpus should be described "
"in NUMA config, ability to start up with partial NUMA "
"mappings is obsoleted and will be removed in future");
}
g_string_free(s, true);
}
| false | qemu | c6ff347c8078bb86f75d38955641cb73e9d5b309 |
6,266 | int qcow2_refcount_init(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
unsigned int refcount_table_size2, i;
int ret;
assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
s->refcount_table = g_try_malloc(refcount_table_size2);
if (s->refcount_table_size > 0) {
if (s->refcount_table == NULL) {
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
ret = bdrv_pread(bs->file, s->refcount_table_offset,
s->refcount_table, refcount_table_size2);
if (ret != refcount_table_size2)
goto fail;
for(i = 0; i < s->refcount_table_size; i++)
be64_to_cpus(&s->refcount_table[i]);
}
return 0;
fail:
return -ENOMEM;
}
| true | qemu | 8fcffa9853473ab148d36858f15c5531161a1824 |
6,267 | static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
{
int j, k, l, sb_x, sb_y;
int coding_mode;
int motion_x[6];
int motion_y[6];
int last_motion_x = 0;
int last_motion_y = 0;
int prior_last_motion_x = 0;
int prior_last_motion_y = 0;
int current_macroblock;
int current_fragment;
if (s->keyframe)
return 0;
memset(motion_x, 0, 6 * sizeof(int));
memset(motion_y, 0, 6 * sizeof(int));
/* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
coding_mode = get_bits1(gb);
/* iterate through all of the macroblocks that contain 1 or more
* coded fragments */
for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
for (j = 0; j < 4; j++) {
int mb_x = 2*sb_x + (j>>1);
int mb_y = 2*sb_y + (((j>>1)+j)&1);
current_macroblock = mb_y * s->macroblock_width + mb_x;
if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
(s->macroblock_coding[current_macroblock] == MODE_COPY))
continue;
switch (s->macroblock_coding[current_macroblock]) {
case MODE_INTER_PLUS_MV:
case MODE_GOLDEN_MV:
/* all 6 fragments use the same motion vector */
if (coding_mode == 0) {
motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
} else {
motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
}
/* vector maintenance, only on MODE_INTER_PLUS_MV */
if (s->macroblock_coding[current_macroblock] ==
MODE_INTER_PLUS_MV) {
prior_last_motion_x = last_motion_x;
prior_last_motion_y = last_motion_y;
last_motion_x = motion_x[0];
last_motion_y = motion_y[0];
}
break;
case MODE_INTER_FOURMV:
/* vector maintenance */
prior_last_motion_x = last_motion_x;
prior_last_motion_y = last_motion_y;
/* fetch 4 vectors from the bitstream, one for each
* Y fragment, then average for the C fragment vectors */
motion_x[4] = motion_y[4] = 0;
for (k = 0; k < 4; k++) {
current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X;
for (l = 0; l < s->coded_fragment_list_index; l++)
if (s->coded_fragment_list[l] == current_fragment)
break;
if (l < s->coded_fragment_list_index) {
if (coding_mode == 0) {
motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
} else {
motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
}
last_motion_x = motion_x[k];
last_motion_y = motion_y[k];
} else {
motion_x[k] = 0;
motion_y[k] = 0;
}
motion_x[4] += motion_x[k];
motion_y[4] += motion_y[k];
}
motion_x[5]=
motion_x[4]= RSHIFT(motion_x[4], 2);
motion_y[5]=
motion_y[4]= RSHIFT(motion_y[4], 2);
break;
case MODE_INTER_LAST_MV:
/* all 6 fragments use the last motion vector */
motion_x[0] = last_motion_x;
motion_y[0] = last_motion_y;
/* no vector maintenance (last vector remains the
* last vector) */
break;
case MODE_INTER_PRIOR_LAST:
/* all 6 fragments use the motion vector prior to the
* last motion vector */
motion_x[0] = prior_last_motion_x;
motion_y[0] = prior_last_motion_y;
/* vector maintenance */
prior_last_motion_x = last_motion_x;
prior_last_motion_y = last_motion_y;
last_motion_x = motion_x[0];
last_motion_y = motion_y[0];
break;
default:
/* covers intra, inter without MV, golden without MV */
motion_x[0] = 0;
motion_y[0] = 0;
/* no vector maintenance */
break;
}
/* assign the motion vectors to the correct fragments */
for (k = 0; k < 4; k++) {
current_fragment =
BLOCK_Y*s->fragment_width + BLOCK_X;
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
s->all_fragments[current_fragment].motion_x = motion_x[k];
s->all_fragments[current_fragment].motion_y = motion_y[k];
} else {
s->all_fragments[current_fragment].motion_x = motion_x[0];
s->all_fragments[current_fragment].motion_y = motion_y[0];
}
}
for (k = 0; k < 2; k++) {
current_fragment = s->fragment_start[k+1] +
mb_y*(s->fragment_width>>1) + mb_x;
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
s->all_fragments[current_fragment].motion_x = motion_x[k+4];
s->all_fragments[current_fragment].motion_y = motion_y[k+4];
} else {
s->all_fragments[current_fragment].motion_x = motion_x[0];
s->all_fragments[current_fragment].motion_y = motion_y[0];
}
}
}
}
}
return 0;
}
| false | FFmpeg | eb691ef219860fa6432d460cdd8edc9dff4886db |
6,268 | void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp)
{
int64_t value;
if (!error_is_set(errp)) {
if (v->type_int32) {
v->type_int32(v, obj, name, errp);
} else {
value = *obj;
v->type_int(v, &value, name, errp);
if (value < INT32_MIN || value > INT32_MAX) {
error_set(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null",
"int32_t");
return;
}
*obj = value;
}
}
}
| true | qemu | 297a3646c2947ee64a6d42ca264039732c6218e0 |
6,270 | static inline void gen_bx_im(DisasContext *s, uint32_t addr)
{
TCGv tmp;
s->is_jmp = DISAS_UPDATE;
if (s->thumb != (addr & 1)) {
tmp = new_tmp();
tcg_gen_movi_i32(tmp, addr & 1);
tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
dead_tmp(tmp);
}
tcg_gen_movi_i32(cpu_R[15], addr & ~1);
}
| true | qemu | 7d1b0095bff7157e856d1d0e6c4295641ced2752 |
6,271 | int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size)
{
int pending = size;
int done = 0;
while (pending > 0) {
int res;
res = qemu_peek_buffer(f, buf, pending, 0);
if (res == 0) {
return done;
}
qemu_file_skip(f, res);
buf += res;
pending -= res;
done += res;
}
return done;
}
| true | qemu | 548f52ea06951c20f0b91cae6cde0512ec073c83 |
6,273 | static void buffer_append(Buffer *buffer, const void *data, size_t len)
{
memcpy(buffer->buffer + buffer->offset, data, len);
buffer->offset += len;
}
| true | qemu | 2f9606b3736c3be4dbd606c46525c7b770ced119 |
6,276 | void av_dump_format(AVFormatContext *ic,
int index,
const char *url,
int is_output)
{
int i;
uint8_t *printed = av_mallocz(ic->nb_streams);
if (ic->nb_streams && !printed)
return;
av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
is_output ? "Output" : "Input",
index,
is_output ? ic->oformat->name : ic->iformat->name,
is_output ? "to" : "from", url);
dump_metadata(NULL, ic->metadata, " ");
if (!is_output) {
av_log(NULL, AV_LOG_INFO, " Duration: ");
if (ic->duration != AV_NOPTS_VALUE) {
int hours, mins, secs, us;
secs = ic->duration / AV_TIME_BASE;
us = ic->duration % AV_TIME_BASE;
mins = secs / 60;
secs %= 60;
hours = mins / 60;
mins %= 60;
av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
(100 * us) / AV_TIME_BASE);
} else {
av_log(NULL, AV_LOG_INFO, "N/A");
}
if (ic->start_time != AV_NOPTS_VALUE) {
int secs, us;
av_log(NULL, AV_LOG_INFO, ", start: ");
secs = ic->start_time / AV_TIME_BASE;
us = abs(ic->start_time % AV_TIME_BASE);
av_log(NULL, AV_LOG_INFO, "%d.%06d",
secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
}
av_log(NULL, AV_LOG_INFO, ", bitrate: ");
if (ic->bit_rate) {
av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
} else {
av_log(NULL, AV_LOG_INFO, "N/A");
}
av_log(NULL, AV_LOG_INFO, "\n");
}
for (i = 0; i < ic->nb_chapters; i++) {
AVChapter *ch = ic->chapters[i];
av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
dump_metadata(NULL, ch->metadata, " ");
}
if(ic->nb_programs) {
int j, k, total = 0;
for(j=0; j<ic->nb_programs; j++) {
AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
"name", NULL, 0);
av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
name ? name->value : "");
dump_metadata(NULL, ic->programs[j]->metadata, " ");
for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
printed[ic->programs[j]->stream_index[k]] = 1;
}
total += ic->programs[j]->nb_stream_indexes;
}
if (total < ic->nb_streams)
av_log(NULL, AV_LOG_INFO, " No Program\n");
}
for(i=0;i<ic->nb_streams;i++)
if (!printed[i])
dump_stream_format(ic, i, index, is_output);
av_free(printed);
}
| true | FFmpeg | e81e5e8ad2bb5746df0c343c396019aca165cf66 |
6,278 | static int r3d_read_redv(AVFormatContext *s, AVPacket *pkt, Atom *atom)
{
AVStream *st = s->streams[0];
int tmp;
int av_unused tmp2;
uint64_t pos = avio_tell(s->pb);
unsigned dts;
int ret;
dts = avio_rb32(s->pb);
tmp = avio_rb32(s->pb);
av_dlog(s, "frame num %d\n", tmp);
tmp = avio_r8(s->pb); // major version
tmp2 = avio_r8(s->pb); // minor version
av_dlog(s, "version %d.%d\n", tmp, tmp2);
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown %d\n", tmp);
if (tmp > 4) {
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown %d\n", tmp);
tmp = avio_rb16(s->pb); // unknown
av_dlog(s, "unknown %d\n", tmp);
tmp = avio_rb32(s->pb);
av_dlog(s, "width %d\n", tmp);
tmp = avio_rb32(s->pb);
av_dlog(s, "height %d\n", tmp);
tmp = avio_rb32(s->pb);
av_dlog(s, "metadata len %d\n", tmp);
}
tmp = atom->size - 8 - (avio_tell(s->pb) - pos);
if (tmp < 0)
return -1;
ret = av_get_packet(s->pb, pkt, tmp);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "error reading video packet\n");
return -1;
}
pkt->stream_index = 0;
pkt->dts = dts;
if (st->avg_frame_rate.num)
pkt->duration = (uint64_t)st->time_base.den*
st->avg_frame_rate.den/st->avg_frame_rate.num;
av_dlog(s, "pkt dts %"PRId64" duration %d\n", pkt->dts, pkt->duration);
return 0;
}
| false | FFmpeg | 898276c16b1683ac77723e97574a3bfdb29507fd |
6,279 | void vnc_sent_lossy_rect(VncState *vs, int x, int y, int w, int h)
{
int i, j;
w = (x + w) / VNC_STAT_RECT;
h = (y + h) / VNC_STAT_RECT;
x /= VNC_STAT_RECT;
y /= VNC_STAT_RECT;
for (j = y; j <= y + h; j++) {
for (i = x; i <= x + w; i++) {
vs->lossy_rect[j][i] = 1;
}
}
}
| false | qemu | 207f328afc2137d422f59293ba37b8be5d3e1617 |
6,280 | static void slow_bar_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
{
AssignedDevRegion *d = opaque;
uint8_t *out = d->u.r_virtbase + addr;
DEBUG("slow_bar_writeb addr=0x" TARGET_FMT_plx " val=0x%02x\n", addr, val);
*out = val;
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
6,281 | static void exynos4210_i2c_write(void *opaque, target_phys_addr_t offset,
uint64_t value, unsigned size)
{
Exynos4210I2CState *s = (Exynos4210I2CState *)opaque;
uint8_t v = value & 0xff;
DPRINT("write %s [0x%02x] <- 0x%02x\n", exynos4_i2c_get_regname(offset),
(unsigned int)offset, v);
switch (offset) {
case I2CCON_ADDR:
s->i2ccon = (v & ~I2CCON_INT_PEND) | (s->i2ccon & I2CCON_INT_PEND);
if ((s->i2ccon & I2CCON_INT_PEND) && !(v & I2CCON_INT_PEND)) {
s->i2ccon &= ~I2CCON_INT_PEND;
qemu_irq_lower(s->irq);
if (!(s->i2ccon & I2CCON_INTRS_EN)) {
s->i2cstat &= ~I2CSTAT_START_BUSY;
}
if (s->i2cstat & I2CSTAT_START_BUSY) {
if (s->scl_free) {
if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Tx) {
exynos4210_i2c_data_send(s);
} else if (EXYNOS4_I2C_MODE(s->i2cstat) ==
I2CMODE_MASTER_Rx) {
exynos4210_i2c_data_receive(s);
}
} else {
s->i2ccon |= I2CCON_INT_PEND;
qemu_irq_raise(s->irq);
}
}
}
break;
case I2CSTAT_ADDR:
s->i2cstat =
(s->i2cstat & I2CSTAT_START_BUSY) | (v & ~I2CSTAT_START_BUSY);
if (!(s->i2cstat & I2CSTAT_OUTPUT_EN)) {
s->i2cstat &= ~I2CSTAT_START_BUSY;
s->scl_free = true;
qemu_irq_lower(s->irq);
break;
}
/* Nothing to do if in i2c slave mode */
if (!I2C_IN_MASTER_MODE(s->i2cstat)) {
break;
}
if (v & I2CSTAT_START_BUSY) {
s->i2cstat &= ~I2CSTAT_LAST_BIT;
s->i2cstat |= I2CSTAT_START_BUSY; /* Line is busy */
s->scl_free = false;
/* Generate start bit and send slave address */
if (i2c_start_transfer(s->bus, s->i2cds >> 1, s->i2cds & 0x1) &&
(s->i2ccon & I2CCON_ACK_GEN)) {
s->i2cstat |= I2CSTAT_LAST_BIT;
} else if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Rx) {
exynos4210_i2c_data_receive(s);
}
exynos4210_i2c_raise_interrupt(s);
} else {
i2c_end_transfer(s->bus);
if (!(s->i2ccon & I2CCON_INT_PEND)) {
s->i2cstat &= ~I2CSTAT_START_BUSY;
}
s->scl_free = true;
}
break;
case I2CADD_ADDR:
if ((s->i2cstat & I2CSTAT_OUTPUT_EN) == 0) {
s->i2cadd = v;
}
break;
case I2CDS_ADDR:
if (s->i2cstat & I2CSTAT_OUTPUT_EN) {
s->i2cds = v;
s->scl_free = true;
if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Tx &&
(s->i2cstat & I2CSTAT_START_BUSY) &&
!(s->i2ccon & I2CCON_INT_PEND)) {
exynos4210_i2c_data_send(s);
}
}
break;
case I2CLC_ADDR:
s->i2clc = v;
break;
default:
DPRINT("ERROR: Bad write offset 0x%x\n", (unsigned int)offset);
break;
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
6,283 | static void virtio_net_apply_guest_offloads(VirtIONet *n)
{
qemu_peer_set_offload(qemu_get_subqueue(n->nic, 0),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
!!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
}
| false | qemu | d6085e3ace20bc9b0fa625d8d79b22668710e217 |
6,284 | static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
XenPTReg *cfg_entry, uint16_t *val,
uint16_t dev_value, uint16_t valid_mask)
{
XenPTRegInfo *reg = cfg_entry->reg;
uint16_t writable_mask = 0;
uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
int debug_msix_enabled_old;
/* modify emulate register */
writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
/* create value for writing to I/O device register */
*val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
/* update MSI-X */
if ((*val & PCI_MSIX_FLAGS_ENABLE)
&& !(*val & PCI_MSIX_FLAGS_MASKALL)) {
xen_pt_msix_update(s);
} else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) {
xen_pt_msix_disable(s);
}
debug_msix_enabled_old = s->msix->enabled;
s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
if (s->msix->enabled != debug_msix_enabled_old) {
XEN_PT_LOG(&s->dev, "%s MSI-X\n",
s->msix->enabled ? "enable" : "disable");
}
return 0;
}
| false | qemu | e2779de053b64f023de382fd87b3596613d47d1e |
6,285 | int v9fs_co_mknod(V9fsState *s, V9fsString *path, uid_t uid,
gid_t gid, dev_t dev, mode_t mode)
{
int err;
FsCred cred;
cred_init(&cred);
cred.fc_uid = uid;
cred.fc_gid = gid;
cred.fc_mode = mode;
cred.fc_rdev = dev;
v9fs_co_run_in_worker(
{
err = s->ops->mknod(&s->ctx, path->data, &cred);
if (err < 0) {
err = -errno;
}
});
return err;
}
| false | qemu | 02cb7f3a256517cbf3136caff2863fbafc57b540 |
6,288 | static uint64_t icp_control_read(void *opaque, target_phys_addr_t offset,
unsigned size)
{
switch (offset >> 2) {
case 0: /* CP_IDFIELD */
return 0x41034003;
case 1: /* CP_FLASHPROG */
return 0;
case 2: /* CP_INTREG */
return 0;
case 3: /* CP_DECODE */
return 0x11;
default:
hw_error("icp_control_read: Bad offset %x\n", (int)offset);
return 0;
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
6,289 | int64_t bdrv_get_block_status_above(BlockDriverState *bs,
BlockDriverState *base,
int64_t sector_num,
int nb_sectors, int *pnum,
BlockDriverState **file)
{
Coroutine *co;
BdrvCoGetBlockStatusData data = {
.bs = bs,
.base = base,
.file = file,
.sector_num = sector_num,
.nb_sectors = nb_sectors,
.pnum = pnum,
.done = false,
};
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
bdrv_get_block_status_above_co_entry(&data);
} else {
AioContext *aio_context = bdrv_get_aio_context(bs);
co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
&data);
qemu_coroutine_enter(co);
while (!data.done) {
aio_poll(aio_context, true);
}
}
return data.ret;
}
| false | qemu | 88b062c2036cfd05b5111147736a08ba05ea05a9 |
6,290 | static int block_save_complete(QEMUFile *f, void *opaque)
{
int ret;
DPRINTF("Enter save live complete submitted %d transferred %d\n",
block_mig_state.submitted, block_mig_state.transferred);
ret = flush_blks(f);
if (ret) {
blk_mig_cleanup();
return ret;
}
blk_mig_reset_dirty_cursor();
/* we know for sure that save bulk is completed and
all async read completed */
assert(block_mig_state.submitted == 0);
do {
ret = blk_mig_save_dirty_block(f, 0);
} while (ret == 0);
blk_mig_cleanup();
if (ret) {
return ret;
}
/* report completion */
qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
DPRINTF("Block migration completed\n");
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
return 0;
}
| false | qemu | 9ee0cb201e6bfe03549a649fd165a85cfed34d05 |
6,291 | static void omap_pwl_write(void *opaque, target_phys_addr_t addr,
uint64_t value, unsigned size)
{
struct omap_pwl_s *s = (struct omap_pwl_s *) opaque;
int offset = addr & OMAP_MPUI_REG_MASK;
if (size != 1) {
return omap_badwidth_write8(opaque, addr, value);
}
switch (offset) {
case 0x00: /* PWL_LEVEL */
s->level = value;
omap_pwl_update(s);
break;
case 0x04: /* PWL_CTRL */
s->enable = value & 1;
omap_pwl_update(s);
break;
default:
OMAP_BAD_REG(addr);
return;
}
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
6,292 | static int id_wellformed(const char *id)
{
int i;
if (!qemu_isalpha(id[0])) {
return 0;
}
for (i = 1; id[i]; i++) {
if (!qemu_isalnum(id[i]) && !strchr("-._", id[i])) {
return 0;
}
}
return 1;
}
| false | qemu | 9aebf3b89281a173d2dfeee379b800be5e3f363e |
6,294 | static void memory_region_iorange_destructor(IORange *iorange)
{
g_free(container_of(iorange, MemoryRegionIORange, iorange));
}
| false | qemu | b40acf99bef69fa8ab0f9092ff162fde945eec12 |
6,295 | static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags)
{
BDRVRBDState *s = bs->opaque;
char pool[RBD_MAX_POOL_NAME_SIZE];
char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
char conf[RBD_MAX_CONF_SIZE];
int r;
if (qemu_rbd_parsename(filename, pool, sizeof(pool),
snap_buf, sizeof(snap_buf),
s->name, sizeof(s->name),
conf, sizeof(conf)) < 0) {
return -EINVAL;
}
s->snap = NULL;
if (snap_buf[0] != '\0') {
s->snap = g_strdup(snap_buf);
}
r = rados_create(&s->cluster, NULL);
if (r < 0) {
error_report("error initializing");
return r;
}
if (strstr(conf, "conf=") == NULL) {
r = rados_conf_read_file(s->cluster, NULL);
if (r < 0) {
error_report("error reading config file");
rados_shutdown(s->cluster);
return r;
}
}
if (conf[0] != '\0') {
r = qemu_rbd_set_conf(s->cluster, conf);
if (r < 0) {
error_report("error setting config options");
rados_shutdown(s->cluster);
return r;
}
}
r = rados_connect(s->cluster);
if (r < 0) {
error_report("error connecting");
rados_shutdown(s->cluster);
return r;
}
r = rados_ioctx_create(s->cluster, pool, &s->io_ctx);
if (r < 0) {
error_report("error opening pool %s", pool);
rados_shutdown(s->cluster);
return r;
}
r = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
if (r < 0) {
error_report("error reading header from %s", s->name);
rados_ioctx_destroy(s->io_ctx);
rados_shutdown(s->cluster);
return r;
}
bs->read_only = (s->snap != NULL);
s->event_reader_pos = 0;
r = qemu_pipe(s->fds);
if (r < 0) {
error_report("error opening eventfd");
goto failed;
}
fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
NULL, qemu_rbd_aio_flush_cb, NULL, s);
return 0;
failed:
rbd_close(s->image);
rados_ioctx_destroy(s->io_ctx);
rados_shutdown(s->cluster);
return r;
}
| false | qemu | 7c7e9df0232a1ce5c411f0f348038d2e72097ae1 |
6,296 | static void net_socket_receive_dgram(void *opaque, const uint8_t *buf, size_t size)
{
NetSocketState *s = opaque;
sendto(s->fd, buf, size, 0,
(struct sockaddr *)&s->dgram_dst, sizeof(s->dgram_dst));
}
| false | qemu | e3f5ec2b5e92706e3b807059f79b1fb5d936e567 |
6,297 | int cpu_ppc_register (CPUPPCState *env, ppc_def_t *def)
{
env->msr_mask = def->msr_mask;
env->mmu_model = def->mmu_model;
env->excp_model = def->excp_model;
env->bus_model = def->bus_model;
env->bfd_mach = def->bfd_mach;
if (create_ppc_opcodes(env, def) < 0)
return -1;
init_ppc_proc(env, def);
#if defined(PPC_DUMP_CPU)
{
const unsigned char *mmu_model, *excp_model, *bus_model;
switch (env->mmu_model) {
case POWERPC_MMU_32B:
mmu_model = "PowerPC 32";
break;
case POWERPC_MMU_64B:
mmu_model = "PowerPC 64";
break;
case POWERPC_MMU_601:
mmu_model = "PowerPC 601";
break;
case POWERPC_MMU_SOFT_6xx:
mmu_model = "PowerPC 6xx/7xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_74xx:
mmu_model = "PowerPC 74xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_4xx:
mmu_model = "PowerPC 4xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_4xx_Z:
mmu_model = "PowerPC 4xx with software driven TLBs "
"and zones protections";
break;
case POWERPC_MMU_REAL_4xx:
mmu_model = "PowerPC 4xx real mode only";
break;
case POWERPC_MMU_BOOKE:
mmu_model = "PowerPC BookE";
break;
case POWERPC_MMU_BOOKE_FSL:
mmu_model = "PowerPC BookE FSL";
break;
case POWERPC_MMU_64BRIDGE:
mmu_model = "PowerPC 64 bridge";
break;
default:
mmu_model = "Unknown or invalid";
break;
}
switch (env->excp_model) {
case POWERPC_EXCP_STD:
excp_model = "PowerPC";
break;
case POWERPC_EXCP_40x:
excp_model = "PowerPC 40x";
break;
case POWERPC_EXCP_601:
excp_model = "PowerPC 601";
break;
case POWERPC_EXCP_602:
excp_model = "PowerPC 602";
break;
case POWERPC_EXCP_603:
excp_model = "PowerPC 603";
break;
case POWERPC_EXCP_603E:
excp_model = "PowerPC 603e";
break;
case POWERPC_EXCP_604:
excp_model = "PowerPC 604";
break;
case POWERPC_EXCP_7x0:
excp_model = "PowerPC 740/750";
break;
case POWERPC_EXCP_7x5:
excp_model = "PowerPC 745/755";
break;
case POWERPC_EXCP_74xx:
excp_model = "PowerPC 74xx";
break;
case POWERPC_EXCP_970:
excp_model = "PowerPC 970";
break;
case POWERPC_EXCP_BOOKE:
excp_model = "PowerPC BookE";
break;
default:
excp_model = "Unknown or invalid";
break;
}
switch (env->bus_model) {
case PPC_FLAGS_INPUT_6xx:
bus_model = "PowerPC 6xx";
break;
case PPC_FLAGS_INPUT_BookE:
bus_model = "PowerPC BookE";
break;
case PPC_FLAGS_INPUT_405:
bus_model = "PowerPC 405";
break;
case PPC_FLAGS_INPUT_970:
bus_model = "PowerPC 970";
break;
case PPC_FLAGS_INPUT_401:
bus_model = "PowerPC 401/403";
break;
default:
bus_model = "Unknown or invalid";
break;
}
printf("PowerPC %-12s : PVR %08x MSR %016" PRIx64 "\n"
" MMU model : %s\n",
def->name, def->pvr, def->msr_mask, mmu_model);
if (env->tlb != NULL) {
printf(" %d %s TLB in %d ways\n",
env->nb_tlb, env->id_tlbs ? "splitted" : "merged",
env->nb_ways);
}
printf(" Exceptions model : %s\n"
" Bus model : %s\n",
excp_model, bus_model);
}
dump_ppc_insns(env);
dump_ppc_sprs(env);
fflush(stdout);
#endif
return 0;
}
| false | qemu | 00af685fc974e4941ef2d309a2e8818d311a370c |
6,298 | static char *get_human_readable_size(char *buf, int buf_size, int64_t size)
{
static const char suffixes[NB_SUFFIXES] = "KMGT";
int64_t base;
int i;
if (size <= 999) {
snprintf(buf, buf_size, "%" PRId64, size);
} else {
base = 1024;
for (i = 0; i < NB_SUFFIXES; i++) {
if (size < (10 * base)) {
snprintf(buf, buf_size, "%0.1f%c",
(double)size / base,
suffixes[i]);
break;
} else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
snprintf(buf, buf_size, "%" PRId64 "%c",
((size + (base >> 1)) / base),
suffixes[i]);
break;
}
base = base * 1024;
}
}
return buf;
}
| false | qemu | 2c20fa2cc26fd203a1260bb5251a523320faa905 |
6,299 | static coroutine_fn int cloop_co_read(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
int ret;
BDRVCloopState *s = bs->opaque;
qemu_co_mutex_lock(&s->lock);
ret = cloop_read(bs, sector_num, buf, nb_sectors);
qemu_co_mutex_unlock(&s->lock);
return ret;
}
| false | qemu | 5cd230819ec26caf199bf73d38cf2407344e4443 |
6,300 | static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
{
unsigned int opc, rs1, rs2, rd;
TCGv cpu_src1, cpu_src2;
TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
target_long simm;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
opc = GET_FIELD(insn, 0, 1);
rd = GET_FIELD(insn, 2, 6);
switch (opc) {
case 0: /* branches/sethi */
{
unsigned int xop = GET_FIELD(insn, 7, 9);
int32_t target;
switch (xop) {
#ifdef TARGET_SPARC64
case 0x1: /* V9 BPcc */
{
int cc;
target = GET_FIELD_SP(insn, 0, 18);
target = sign_extend(target, 19);
target <<= 2;
cc = GET_FIELD_SP(insn, 20, 21);
if (cc == 0)
do_branch(dc, target, insn, 0);
else if (cc == 2)
do_branch(dc, target, insn, 1);
else
goto illegal_insn;
goto jmp_insn;
}
case 0x3: /* V9 BPr */
{
target = GET_FIELD_SP(insn, 0, 13) |
(GET_FIELD_SP(insn, 20, 21) << 14);
target = sign_extend(target, 16);
target <<= 2;
cpu_src1 = get_src1(dc, insn);
do_branch_reg(dc, target, insn, cpu_src1);
goto jmp_insn;
}
case 0x5: /* V9 FBPcc */
{
int cc = GET_FIELD_SP(insn, 20, 21);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
target = GET_FIELD_SP(insn, 0, 18);
target = sign_extend(target, 19);
target <<= 2;
do_fbranch(dc, target, insn, cc);
goto jmp_insn;
}
#else
case 0x7: /* CBN+x */
{
goto ncp_insn;
}
#endif
case 0x2: /* BN+x */
{
target = GET_FIELD(insn, 10, 31);
target = sign_extend(target, 22);
target <<= 2;
do_branch(dc, target, insn, 0);
goto jmp_insn;
}
case 0x6: /* FBN+x */
{
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
target = GET_FIELD(insn, 10, 31);
target = sign_extend(target, 22);
target <<= 2;
do_fbranch(dc, target, insn, 0);
goto jmp_insn;
}
case 0x4: /* SETHI */
/* Special-case %g0 because that's the canonical nop. */
if (rd) {
uint32_t value = GET_FIELD(insn, 10, 31);
TCGv t = gen_dest_gpr(dc, rd);
tcg_gen_movi_tl(t, value << 10);
gen_store_gpr(dc, rd, t);
}
break;
case 0x0: /* UNIMPL */
default:
goto illegal_insn;
}
break;
}
break;
case 1: /*CALL*/
{
target_long target = GET_FIELDs(insn, 2, 31) << 2;
TCGv o7 = gen_dest_gpr(dc, 15);
tcg_gen_movi_tl(o7, dc->pc);
gen_store_gpr(dc, 15, o7);
target += dc->pc;
gen_mov_pc_npc(dc);
#ifdef TARGET_SPARC64
if (unlikely(AM_CHECK(dc))) {
target &= 0xffffffffULL;
}
#endif
dc->npc = target;
}
goto jmp_insn;
case 2: /* FPU & Logical Operations */
{
unsigned int xop = GET_FIELD(insn, 7, 12);
if (xop == 0x3a) { /* generate trap */
int cond = GET_FIELD(insn, 3, 6);
TCGv_i32 trap;
int l1 = -1, mask;
if (cond == 0) {
/* Trap never. */
break;
}
save_state(dc);
if (cond != 8) {
/* Conditional trap. */
DisasCompare cmp;
#ifdef TARGET_SPARC64
/* V9 icc/xcc */
int cc = GET_FIELD_SP(insn, 11, 12);
if (cc == 0) {
gen_compare(&cmp, 0, cond, dc);
} else if (cc == 2) {
gen_compare(&cmp, 1, cond, dc);
} else {
goto illegal_insn;
}
#else
gen_compare(&cmp, 0, cond, dc);
#endif
l1 = gen_new_label();
tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
cmp.c1, cmp.c2, l1);
free_compare(&cmp);
}
mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
? UA2005_HTRAP_MASK : V8_TRAP_MASK);
/* Don't use the normal temporaries, as they may well have
gone out of scope with the branch above. While we're
doing that we might as well pre-truncate to 32-bit. */
trap = tcg_temp_new_i32();
rs1 = GET_FIELD_SP(insn, 14, 18);
if (IS_IMM) {
rs2 = GET_FIELD_SP(insn, 0, 6);
if (rs1 == 0) {
tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
/* Signal that the trap value is fully constant. */
mask = 0;
} else {
TCGv t1 = gen_load_gpr(dc, rs1);
tcg_gen_trunc_tl_i32(trap, t1);
tcg_gen_addi_i32(trap, trap, rs2);
}
} else {
TCGv t1, t2;
rs2 = GET_FIELD_SP(insn, 0, 4);
t1 = gen_load_gpr(dc, rs1);
t2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(t1, t1, t2);
tcg_gen_trunc_tl_i32(trap, t1);
}
if (mask != 0) {
tcg_gen_andi_i32(trap, trap, mask);
tcg_gen_addi_i32(trap, trap, TT_TRAP);
}
gen_helper_raise_exception(cpu_env, trap);
tcg_temp_free_i32(trap);
if (cond == 8) {
/* An unconditional trap ends the TB. */
dc->is_br = 1;
goto jmp_insn;
} else {
/* A conditional trap falls through to the next insn. */
gen_set_label(l1);
break;
}
} else if (xop == 0x28) {
rs1 = GET_FIELD(insn, 13, 17);
switch(rs1) {
case 0: /* rdy */
#ifndef TARGET_SPARC64
case 0x01 ... 0x0e: /* undefined in the SPARCv8
manual, rdy on the microSPARC
II */
case 0x0f: /* stbar in the SPARCv8 manual,
rdy on the microSPARC II */
case 0x10 ... 0x1f: /* implementation-dependent in the
SPARCv8 manual, rdy on the
microSPARC II */
/* Read Asr17 */
if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
TCGv t = gen_dest_gpr(dc, rd);
/* Read Asr17 for a Leon3 monoprocessor */
tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
gen_store_gpr(dc, rd, t);
break;
}
#endif
gen_store_gpr(dc, rd, cpu_y);
break;
#ifdef TARGET_SPARC64
case 0x2: /* V9 rdccr */
update_psr(dc);
gen_helper_rdccr(cpu_dst, cpu_env);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x3: /* V9 rdasi */
tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x4: /* V9 rdtick */
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_get_count(cpu_dst, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
gen_store_gpr(dc, rd, cpu_dst);
}
break;
case 0x5: /* V9 rdpc */
{
TCGv t = gen_dest_gpr(dc, rd);
if (unlikely(AM_CHECK(dc))) {
tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
} else {
tcg_gen_movi_tl(t, dc->pc);
}
gen_store_gpr(dc, rd, t);
}
break;
case 0x6: /* V9 rdfprs */
tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0xf: /* V9 membar */
break; /* no effect */
case 0x13: /* Graphics Status */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_store_gpr(dc, rd, cpu_gsr);
break;
case 0x16: /* Softint */
tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x17: /* Tick compare */
gen_store_gpr(dc, rd, cpu_tick_cmpr);
break;
case 0x18: /* System tick */
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_get_count(cpu_dst, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
gen_store_gpr(dc, rd, cpu_dst);
}
break;
case 0x19: /* System tick compare */
gen_store_gpr(dc, rd, cpu_stick_cmpr);
break;
case 0x10: /* Performance Control */
case 0x11: /* Performance Instrumentation Counter */
case 0x12: /* Dispatch Control */
case 0x14: /* Softint set, WO */
case 0x15: /* Softint clear, WO */
#endif
default:
goto illegal_insn;
}
#if !defined(CONFIG_USER_ONLY)
} else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
#ifndef TARGET_SPARC64
if (!supervisor(dc)) {
goto priv_insn;
}
update_psr(dc);
gen_helper_rdpsr(cpu_dst, cpu_env);
#else
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
rs1 = GET_FIELD(insn, 13, 17);
switch (rs1) {
case 0: // hpstate
// gen_op_rdhpstate();
break;
case 1: // htstate
// gen_op_rdhtstate();
break;
case 3: // hintp
tcg_gen_mov_tl(cpu_dst, cpu_hintp);
break;
case 5: // htba
tcg_gen_mov_tl(cpu_dst, cpu_htba);
break;
case 6: // hver
tcg_gen_mov_tl(cpu_dst, cpu_hver);
break;
case 31: // hstick_cmpr
tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
break;
default:
goto illegal_insn;
}
#endif
gen_store_gpr(dc, rd, cpu_dst);
break;
} else if (xop == 0x2a) { /* rdwim / V9 rdpr */
if (!supervisor(dc))
goto priv_insn;
#ifdef TARGET_SPARC64
rs1 = GET_FIELD(insn, 13, 17);
switch (rs1) {
case 0: // tpc
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 1: // tnpc
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 2: // tstate
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tstate));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 3: // tt
{
TCGv_ptr r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 4: // tick
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 5: // tba
tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
break;
case 6: // pstate
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, pstate));
break;
case 7: // tl
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, tl));
break;
case 8: // pil
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, psrpil));
break;
case 9: // cwp
gen_helper_rdcwp(cpu_tmp0, cpu_env);
break;
case 10: // cansave
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, cansave));
break;
case 11: // canrestore
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, canrestore));
break;
case 12: // cleanwin
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, cleanwin));
break;
case 13: // otherwin
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, otherwin));
break;
case 14: // wstate
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, wstate));
break;
case 16: // UA2005 gl
CHECK_IU_FEATURE(dc, GL);
tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, gl));
break;
case 26: // UA2005 strand status
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
break;
case 31: // ver
tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
break;
case 15: // fq
default:
goto illegal_insn;
}
#else
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
#endif
gen_store_gpr(dc, rd, cpu_tmp0);
break;
} else if (xop == 0x2b) { /* rdtbr / V9 flushw */
#ifdef TARGET_SPARC64
save_state(dc);
gen_helper_flushw(cpu_env);
#else
if (!supervisor(dc))
goto priv_insn;
gen_store_gpr(dc, rd, cpu_tbr);
#endif
break;
#endif
} else if (xop == 0x34) { /* FPU Operations */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_op_clear_ieee_excp_and_FTT();
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
xop = GET_FIELD(insn, 18, 26);
save_state(dc);
switch (xop) {
case 0x1: /* fmovs */
cpu_src1_32 = gen_load_fpr_F(dc, rs2);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x5: /* fnegs */
gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
break;
case 0x9: /* fabss */
gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
break;
case 0x29: /* fsqrts */
CHECK_FPU_FEATURE(dc, FSQRT);
gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
break;
case 0x2a: /* fsqrtd */
CHECK_FPU_FEATURE(dc, FSQRT);
gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
break;
case 0x2b: /* fsqrtq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
break;
case 0x41: /* fadds */
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
break;
case 0x42: /* faddd */
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
break;
case 0x43: /* faddq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
break;
case 0x45: /* fsubs */
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
break;
case 0x46: /* fsubd */
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
break;
case 0x47: /* fsubq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
break;
case 0x49: /* fmuls */
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
break;
case 0x4a: /* fmuld */
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
break;
case 0x4b: /* fmulq */
CHECK_FPU_FEATURE(dc, FLOAT128);
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
break;
case 0x4d: /* fdivs */
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
break;
case 0x4e: /* fdivd */
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
break;
case 0x4f: /* fdivq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
break;
case 0x69: /* fsmuld */
CHECK_FPU_FEATURE(dc, FSMULD);
gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
break;
case 0x6e: /* fdmulq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
break;
case 0xc4: /* fitos */
gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
break;
case 0xc6: /* fdtos */
gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
break;
case 0xc7: /* fqtos */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
break;
case 0xc8: /* fitod */
gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
break;
case 0xc9: /* fstod */
gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
break;
case 0xcb: /* fqtod */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
break;
case 0xcc: /* fitoq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
break;
case 0xcd: /* fstoq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
break;
case 0xce: /* fdtoq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
break;
case 0xd1: /* fstoi */
gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
break;
case 0xd2: /* fdtoi */
gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
break;
case 0xd3: /* fqtoi */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
break;
#ifdef TARGET_SPARC64
case 0x2: /* V9 fmovd */
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x3: /* V9 fmovq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_move_Q(rd, rs2);
break;
case 0x6: /* V9 fnegd */
gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
break;
case 0x7: /* V9 fnegq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
break;
case 0xa: /* V9 fabsd */
gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
break;
case 0xb: /* V9 fabsq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
break;
case 0x81: /* V9 fstox */
gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
break;
case 0x82: /* V9 fdtox */
gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
break;
case 0x83: /* V9 fqtox */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
break;
case 0x84: /* V9 fxtos */
gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
break;
case 0x88: /* V9 fxtod */
gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
break;
case 0x8c: /* V9 fxtoq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
break;
#endif
default:
goto illegal_insn;
}
} else if (xop == 0x35) { /* FPU Operations */
#ifdef TARGET_SPARC64
int cond;
#endif
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_op_clear_ieee_excp_and_FTT();
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
xop = GET_FIELD(insn, 18, 26);
save_state(dc);
#ifdef TARGET_SPARC64
#define FMOVR(sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
cpu_src1 = get_src1(dc, insn); \
gen_compare_reg(&cmp, cond, cpu_src1); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
FMOVR(s);
break;
} else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
FMOVR(d);
break;
} else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVR(q);
break;
}
#undef FMOVR
#endif
switch (xop) {
#ifdef TARGET_SPARC64
#define FMOVCC(fcc, sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
gen_fcompare(&cmp, fcc, cond); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
case 0x001: /* V9 fmovscc %fcc0 */
FMOVCC(0, s);
break;
case 0x002: /* V9 fmovdcc %fcc0 */
FMOVCC(0, d);
break;
case 0x003: /* V9 fmovqcc %fcc0 */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(0, q);
break;
case 0x041: /* V9 fmovscc %fcc1 */
FMOVCC(1, s);
break;
case 0x042: /* V9 fmovdcc %fcc1 */
FMOVCC(1, d);
break;
case 0x043: /* V9 fmovqcc %fcc1 */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(1, q);
break;
case 0x081: /* V9 fmovscc %fcc2 */
FMOVCC(2, s);
break;
case 0x082: /* V9 fmovdcc %fcc2 */
FMOVCC(2, d);
break;
case 0x083: /* V9 fmovqcc %fcc2 */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(2, q);
break;
case 0x0c1: /* V9 fmovscc %fcc3 */
FMOVCC(3, s);
break;
case 0x0c2: /* V9 fmovdcc %fcc3 */
FMOVCC(3, d);
break;
case 0x0c3: /* V9 fmovqcc %fcc3 */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(3, q);
break;
#undef FMOVCC
#define FMOVCC(xcc, sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
gen_compare(&cmp, xcc, cond, dc); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
case 0x101: /* V9 fmovscc %icc */
FMOVCC(0, s);
break;
case 0x102: /* V9 fmovdcc %icc */
FMOVCC(0, d);
break;
case 0x103: /* V9 fmovqcc %icc */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(0, q);
break;
case 0x181: /* V9 fmovscc %xcc */
FMOVCC(1, s);
break;
case 0x182: /* V9 fmovdcc %xcc */
FMOVCC(1, d);
break;
case 0x183: /* V9 fmovqcc %xcc */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(1, q);
break;
#undef FMOVCC
#endif
case 0x51: /* fcmps, V9 %fcc */
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
cpu_src2_32 = gen_load_fpr_F(dc, rs2);
gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
break;
case 0x52: /* fcmpd, V9 %fcc */
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
break;
case 0x53: /* fcmpq, V9 %fcc */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rs1));
gen_op_load_fpr_QT1(QFPREG(rs2));
gen_op_fcmpq(rd & 3);
break;
case 0x55: /* fcmpes, V9 %fcc */
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
cpu_src2_32 = gen_load_fpr_F(dc, rs2);
gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
break;
case 0x56: /* fcmped, V9 %fcc */
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
break;
case 0x57: /* fcmpeq, V9 %fcc */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rs1));
gen_op_load_fpr_QT1(QFPREG(rs2));
gen_op_fcmpeq(rd & 3);
break;
default:
goto illegal_insn;
}
} else if (xop == 0x2) {
TCGv dst = gen_dest_gpr(dc, rd);
rs1 = GET_FIELD(insn, 13, 17);
if (rs1 == 0) {
/* clr/mov shortcut : or %g0, x, y -> mov x, y */
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_movi_tl(dst, simm);
gen_store_gpr(dc, rd, dst);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 == 0) {
tcg_gen_movi_tl(dst, 0);
gen_store_gpr(dc, rd, dst);
} else {
cpu_src2 = gen_load_gpr(dc, rs2);
gen_store_gpr(dc, rd, cpu_src2);
}
}
} else {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_ori_tl(dst, cpu_src1, simm);
gen_store_gpr(dc, rd, dst);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 == 0) {
/* mov shortcut: or x, %g0, y -> mov x, y */
gen_store_gpr(dc, rd, cpu_src1);
} else {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, dst);
}
}
}
#ifdef TARGET_SPARC64
} else if (xop == 0x25) { /* sll, V9 sllx */
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
}
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
}
tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
} else if (xop == 0x26) { /* srl, V9 srlx */
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
}
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
gen_store_gpr(dc, rd, cpu_dst);
} else if (xop == 0x27) { /* sra, V9 srax */
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
}
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
gen_store_gpr(dc, rd, cpu_dst);
#endif
} else if (xop < 0x36) {
if (xop < 0x20) {
cpu_src1 = get_src1(dc, insn);
cpu_src2 = get_src2(dc, insn);
switch (xop & ~0x10) {
case 0x0: /* add */
if (xop & 0x10) {
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
dc->cc_op = CC_OP_ADD;
} else {
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
}
break;
case 0x1: /* and */
tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x2: /* or */
tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x3: /* xor */
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x4: /* sub */
if (xop & 0x10) {
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
dc->cc_op = CC_OP_SUB;
} else {
tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
}
break;
case 0x5: /* andn */
tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x6: /* orn */
tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x7: /* xorn */
tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x8: /* addx, V9 addc */
gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
(xop & 0x10));
break;
#ifdef TARGET_SPARC64
case 0x9: /* V9 mulx */
tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
break;
#endif
case 0xa: /* umul */
CHECK_IU_FEATURE(dc, MUL);
gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0xb: /* smul */
CHECK_IU_FEATURE(dc, MUL);
gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0xc: /* subx, V9 subc */
gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
(xop & 0x10));
break;
#ifdef TARGET_SPARC64
case 0xd: /* V9 udivx */
gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
break;
#endif
case 0xe: /* udiv */
CHECK_IU_FEATURE(dc, DIV);
if (xop & 0x10) {
gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
dc->cc_op = CC_OP_DIV;
} else {
gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
}
break;
case 0xf: /* sdiv */
CHECK_IU_FEATURE(dc, DIV);
if (xop & 0x10) {
gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
dc->cc_op = CC_OP_DIV;
} else {
gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
}
break;
default:
goto illegal_insn;
}
gen_store_gpr(dc, rd, cpu_dst);
} else {
cpu_src1 = get_src1(dc, insn);
cpu_src2 = get_src2(dc, insn);
switch (xop) {
case 0x20: /* taddcc */
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
dc->cc_op = CC_OP_TADD;
break;
case 0x21: /* tsubcc */
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
dc->cc_op = CC_OP_TSUB;
break;
case 0x22: /* taddcctv */
gen_helper_taddcctv(cpu_dst, cpu_env,
cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
dc->cc_op = CC_OP_TADDTV;
break;
case 0x23: /* tsubcctv */
gen_helper_tsubcctv(cpu_dst, cpu_env,
cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
dc->cc_op = CC_OP_TSUBTV;
break;
case 0x24: /* mulscc */
update_psr(dc);
gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
dc->cc_op = CC_OP_ADD;
break;
#ifndef TARGET_SPARC64
case 0x25: /* sll */
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else { /* register */
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x26: /* srl */
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else { /* register */
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x27: /* sra */
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else { /* register */
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
#endif
case 0x30:
{
switch(rd) {
case 0: /* wry */
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
break;
#ifndef TARGET_SPARC64
case 0x01 ... 0x0f: /* undefined in the
SPARCv8 manual, nop
on the microSPARC
II */
case 0x10 ... 0x1f: /* implementation-dependent
in the SPARCv8
manual, nop on the
microSPARC II */
break;
#else
case 0x2: /* V9 wrccr */
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
gen_helper_wrccr(cpu_env, cpu_tmp0);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
dc->cc_op = CC_OP_FLAGS;
break;
case 0x3: /* V9 wrasi */
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
tcg_gen_trunc_tl_i32(cpu_asi, cpu_tmp0);
break;
case 0x6: /* V9 wrfprs */
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
break;
case 0xf: /* V9 sir, nop if user */
#if !defined(CONFIG_USER_ONLY)
if (supervisor(dc)) {
; // XXX
}
#endif
break;
case 0x13: /* Graphics Status */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
break;
case 0x14: /* Softint set */
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
gen_helper_set_softint(cpu_env, cpu_tmp0);
break;
case 0x15: /* Softint clear */
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
gen_helper_clear_softint(cpu_env, cpu_tmp0);
break;
case 0x16: /* Softint write */
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
gen_helper_write_softint(cpu_env, cpu_tmp0);
break;
case 0x17: /* Tick compare */
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_set_limit(r_tickptr,
cpu_tick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x18: /* System tick */
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x19: /* System tick compare */
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_set_limit(r_tickptr,
cpu_stick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x10: /* Performance Control */
case 0x11: /* Performance Instrumentation
Counter */
case 0x12: /* Dispatch Control */
#endif
default:
goto illegal_insn;
}
}
break;
#if !defined(CONFIG_USER_ONLY)
case 0x31: /* wrpsr, V9 saved, restored */
{
if (!supervisor(dc))
goto priv_insn;
#ifdef TARGET_SPARC64
switch (rd) {
case 0:
gen_helper_saved(cpu_env);
break;
case 1:
gen_helper_restored(cpu_env);
break;
case 2: /* UA2005 allclean */
case 3: /* UA2005 otherw */
case 4: /* UA2005 normalw */
case 5: /* UA2005 invalw */
// XXX
default:
goto illegal_insn;
}
#else
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
gen_helper_wrpsr(cpu_env, cpu_tmp0);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
dc->cc_op = CC_OP_FLAGS;
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
#endif
}
break;
case 0x32: /* wrwim, V9 wrpr */
{
if (!supervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
#ifdef TARGET_SPARC64
switch (rd) {
case 0: // tpc
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 1: // tnpc
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 2: // tstate
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state,
tstate));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 3: // tt
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 4: // tick
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 5: // tba
tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
break;
case 6: // pstate
save_state(dc);
gen_helper_wrpstate(cpu_env, cpu_tmp0);
dc->npc = DYNAMIC_PC;
break;
case 7: // tl
save_state(dc);
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, tl));
dc->npc = DYNAMIC_PC;
break;
case 8: // pil
gen_helper_wrpil(cpu_env, cpu_tmp0);
break;
case 9: // cwp
gen_helper_wrcwp(cpu_env, cpu_tmp0);
break;
case 10: // cansave
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState,
cansave));
break;
case 11: // canrestore
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState,
canrestore));
break;
case 12: // cleanwin
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState,
cleanwin));
break;
case 13: // otherwin
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState,
otherwin));
break;
case 14: // wstate
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState,
wstate));
break;
case 16: // UA2005 gl
CHECK_IU_FEATURE(dc, GL);
tcg_gen_st32_tl(cpu_tmp0, cpu_env,
offsetof(CPUSPARCState, gl));
break;
case 26: // UA2005 strand status
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
break;
default:
goto illegal_insn;
}
#else
tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
if (dc->def->nwindows != 32) {
tcg_gen_andi_tl(cpu_wim, cpu_wim,
(1 << dc->def->nwindows) - 1);
}
#endif
}
break;
case 0x33: /* wrtbr, UA2005 wrhpr */
{
#ifndef TARGET_SPARC64
if (!supervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
#else
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
switch (rd) {
case 0: // hpstate
// XXX gen_op_wrhpstate();
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
break;
case 1: // htstate
// XXX gen_op_wrhtstate();
break;
case 3: // hintp
tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
break;
case 5: // htba
tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
break;
case 31: // hstick_cmpr
{
TCGv_ptr r_tickptr;
tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, hstick));
gen_helper_tick_set_limit(r_tickptr,
cpu_hstick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 6: // hver readonly
default:
goto illegal_insn;
}
#endif
}
break;
#endif
#ifdef TARGET_SPARC64
case 0x2c: /* V9 movcc */
{
int cc = GET_FIELD_SP(insn, 11, 12);
int cond = GET_FIELD_SP(insn, 14, 17);
DisasCompare cmp;
TCGv dst;
if (insn & (1 << 18)) {
if (cc == 0) {
gen_compare(&cmp, 0, cond, dc);
} else if (cc == 2) {
gen_compare(&cmp, 1, cond, dc);
} else {
goto illegal_insn;
}
} else {
gen_fcompare(&cmp, cc, cond);
}
/* The get_src2 above loaded the normal 13-bit
immediate field, not the 11-bit field we have
in movcc. But it did handle the reg case. */
if (IS_IMM) {
simm = GET_FIELD_SPs(insn, 0, 10);
tcg_gen_movi_tl(cpu_src2, simm);
}
dst = gen_load_gpr(dc, rd);
tcg_gen_movcond_tl(cmp.cond, dst,
cmp.c1, cmp.c2,
cpu_src2, dst);
free_compare(&cmp);
gen_store_gpr(dc, rd, dst);
break;
}
case 0x2d: /* V9 sdivx */
gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2e: /* V9 popc */
gen_helper_popc(cpu_dst, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2f: /* V9 movr */
{
int cond = GET_FIELD_SP(insn, 10, 12);
DisasCompare cmp;
TCGv dst;
gen_compare_reg(&cmp, cond, cpu_src1);
/* The get_src2 above loaded the normal 13-bit
immediate field, not the 10-bit field we have
in movr. But it did handle the reg case. */
if (IS_IMM) {
simm = GET_FIELD_SPs(insn, 0, 9);
tcg_gen_movi_tl(cpu_src2, simm);
}
dst = gen_load_gpr(dc, rd);
tcg_gen_movcond_tl(cmp.cond, dst,
cmp.c1, cmp.c2,
cpu_src2, dst);
free_compare(&cmp);
gen_store_gpr(dc, rd, dst);
break;
}
#endif
default:
goto illegal_insn;
}
}
} else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
#ifdef TARGET_SPARC64
int opf = GET_FIELD_SP(insn, 5, 13);
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
switch (opf) {
case 0x000: /* VIS I edge8cc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x001: /* VIS II edge8n */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x002: /* VIS I edge8lcc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x003: /* VIS II edge8ln */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x004: /* VIS I edge16cc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x005: /* VIS II edge16n */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x006: /* VIS I edge16lcc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x007: /* VIS II edge16ln */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x008: /* VIS I edge32cc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x009: /* VIS II edge32n */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x00a: /* VIS I edge32lcc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x00b: /* VIS II edge32ln */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x010: /* VIS I array8 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x012: /* VIS I array16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x014: /* VIS I array32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x018: /* VIS I alignaddr */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x01a: /* VIS I alignaddrl */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x019: /* VIS II bmask */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x020: /* VIS I fcmple16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x022: /* VIS I fcmpne16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x024: /* VIS I fcmple32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x026: /* VIS I fcmpne32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x028: /* VIS I fcmpgt16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02a: /* VIS I fcmpeq16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02c: /* VIS I fcmpgt32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02e: /* VIS I fcmpeq32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x031: /* VIS I fmul8x16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
break;
case 0x033: /* VIS I fmul8x16au */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
break;
case 0x035: /* VIS I fmul8x16al */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
break;
case 0x036: /* VIS I fmul8sux16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
break;
case 0x037: /* VIS I fmul8ulx16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
break;
case 0x038: /* VIS I fmuld8sux16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
break;
case 0x039: /* VIS I fmuld8ulx16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
break;
case 0x03a: /* VIS I fpack32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
break;
case 0x03b: /* VIS I fpack16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
cpu_dst_32 = gen_dest_fpr_F(dc);
gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x03d: /* VIS I fpackfix */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
cpu_dst_32 = gen_dest_fpr_F(dc);
gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x03e: /* VIS I pdist */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
break;
case 0x048: /* VIS I faligndata */
CHECK_FPU_FEATURE(dc, VIS1);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
break;
case 0x04b: /* VIS I fpmerge */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
break;
case 0x04c: /* VIS II bshuffle */
CHECK_FPU_FEATURE(dc, VIS2);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
break;
case 0x04d: /* VIS I fexpand */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
break;
case 0x050: /* VIS I fpadd16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
break;
case 0x051: /* VIS I fpadd16s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
break;
case 0x052: /* VIS I fpadd32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
break;
case 0x053: /* VIS I fpadd32s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
break;
case 0x054: /* VIS I fpsub16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
break;
case 0x055: /* VIS I fpsub16s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
break;
case 0x056: /* VIS I fpsub32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
break;
case 0x057: /* VIS I fpsub32s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
break;
case 0x060: /* VIS I fzero */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_64 = gen_dest_fpr_D(dc, rd);
tcg_gen_movi_i64(cpu_dst_64, 0);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
case 0x061: /* VIS I fzeros */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_32 = gen_dest_fpr_F(dc);
tcg_gen_movi_i32(cpu_dst_32, 0);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x062: /* VIS I fnor */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
break;
case 0x063: /* VIS I fnors */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
break;
case 0x064: /* VIS I fandnot2 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
break;
case 0x065: /* VIS I fandnot2s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
break;
case 0x066: /* VIS I fnot2 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
break;
case 0x067: /* VIS I fnot2s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
break;
case 0x068: /* VIS I fandnot1 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
break;
case 0x069: /* VIS I fandnot1s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
break;
case 0x06a: /* VIS I fnot1 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
break;
case 0x06b: /* VIS I fnot1s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
break;
case 0x06c: /* VIS I fxor */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
break;
case 0x06d: /* VIS I fxors */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
break;
case 0x06e: /* VIS I fnand */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
break;
case 0x06f: /* VIS I fnands */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
break;
case 0x070: /* VIS I fand */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
break;
case 0x071: /* VIS I fands */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
break;
case 0x072: /* VIS I fxnor */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
break;
case 0x073: /* VIS I fxnors */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
break;
case 0x074: /* VIS I fsrc1 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x075: /* VIS I fsrc1s */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x076: /* VIS I fornot2 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
break;
case 0x077: /* VIS I fornot2s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
break;
case 0x078: /* VIS I fsrc2 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x079: /* VIS I fsrc2s */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_32 = gen_load_fpr_F(dc, rs2);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x07a: /* VIS I fornot1 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
break;
case 0x07b: /* VIS I fornot1s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
break;
case 0x07c: /* VIS I for */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
break;
case 0x07d: /* VIS I fors */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
break;
case 0x07e: /* VIS I fone */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_64 = gen_dest_fpr_D(dc, rd);
tcg_gen_movi_i64(cpu_dst_64, -1);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
case 0x07f: /* VIS I fones */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_32 = gen_dest_fpr_F(dc);
tcg_gen_movi_i32(cpu_dst_32, -1);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x080: /* VIS I shutdown */
case 0x081: /* VIS II siam */
// XXX
goto illegal_insn;
default:
goto illegal_insn;
}
#else
goto ncp_insn;
#endif
} else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
#ifdef TARGET_SPARC64
goto illegal_insn;
#else
goto ncp_insn;
#endif
#ifdef TARGET_SPARC64
} else if (xop == 0x39) { /* V9 return */
TCGv_i32 r_const;
save_state(dc);
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2) {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
}
}
gen_helper_restore(cpu_env);
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
tcg_temp_free_i32(r_const);
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
dc->npc = DYNAMIC_PC;
goto jmp_insn;
#endif
} else {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2) {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
}
}
switch (xop) {
case 0x38: /* jmpl */
{
TCGv t;
TCGv_i32 r_const;
t = gen_dest_gpr(dc, rd);
tcg_gen_movi_tl(t, dc->pc);
gen_store_gpr(dc, rd, t);
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
tcg_temp_free_i32(r_const);
gen_address_mask(dc, cpu_tmp0);
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
dc->npc = DYNAMIC_PC;
}
goto jmp_insn;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
case 0x39: /* rett, V9 return */
{
TCGv_i32 r_const;
if (!supervisor(dc))
goto priv_insn;
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_tmp0, r_const);
tcg_temp_free_i32(r_const);
tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
dc->npc = DYNAMIC_PC;
gen_helper_rett(cpu_env);
}
goto jmp_insn;
#endif
case 0x3b: /* flush */
if (!((dc)->def->features & CPU_FEATURE_FLUSH))
goto unimp_flush;
/* nop */
break;
case 0x3c: /* save */
save_state(dc);
gen_helper_save(cpu_env);
gen_store_gpr(dc, rd, cpu_tmp0);
break;
case 0x3d: /* restore */
save_state(dc);
gen_helper_restore(cpu_env);
gen_store_gpr(dc, rd, cpu_tmp0);
break;
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
case 0x3e: /* V9 done/retry */
{
switch (rd) {
case 0:
if (!supervisor(dc))
goto priv_insn;
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
gen_helper_done(cpu_env);
goto jmp_insn;
case 1:
if (!supervisor(dc))
goto priv_insn;
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
gen_helper_retry(cpu_env);
goto jmp_insn;
default:
goto illegal_insn;
}
}
break;
#endif
default:
goto illegal_insn;
}
}
break;
}
break;
case 3: /* load/store instructions */
{
unsigned int xop = GET_FIELD(insn, 7, 12);
/* ??? gen_address_mask prevents us from using a source
register directly. Always generate a temporary. */
TCGv cpu_addr = get_temp_tl(dc);
tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
if (xop == 0x3c || xop == 0x3e) {
/* V9 casa/casxa : no offset */
} else if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
if (simm != 0) {
tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
}
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 != 0) {
tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
}
}
if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
(xop > 0x17 && xop <= 0x1d ) ||
(xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
TCGv cpu_val = gen_dest_gpr(dc, rd);
switch (xop) {
case 0x0: /* ld, V9 lduw, load unsigned word */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x1: /* ldub, load unsigned byte */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x2: /* lduh, load unsigned halfword */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x3: /* ldd, load double word */
if (rd & 1)
goto illegal_insn;
else {
TCGv_i32 r_const;
TCGv_i64 t64;
save_state(dc);
r_const = tcg_const_i32(7);
/* XXX remove alignment check */
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_address_mask(dc, cpu_addr);
t64 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
tcg_gen_trunc_i64_tl(cpu_tmp0, t64);
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
gen_store_gpr(dc, rd + 1, cpu_tmp0);
tcg_gen_shri_i64(t64, t64, 32);
tcg_gen_trunc_i64_tl(cpu_val, t64);
tcg_temp_free_i64(t64);
tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
}
break;
case 0x9: /* ldsb, load signed byte */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0xa: /* ldsh, load signed halfword */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0xd: /* ldstub -- XXX: should be atomically */
{
TCGv r_const;
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
r_const = tcg_const_tl(0xff);
tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
tcg_temp_free(r_const);
}
break;
case 0x0f: /* swap, swap register with memory. Also
atomically */
CHECK_IU_FEATURE(dc, SWAP);
cpu_src1 = gen_load_gpr(dc, rd);
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
tcg_gen_mov_tl(cpu_val, cpu_tmp0);
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
case 0x10: /* lda, V9 lduwa, load word alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
break;
case 0x11: /* lduba, load unsigned byte alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
break;
case 0x12: /* lduha, load unsigned halfword alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
break;
case 0x13: /* ldda, load double word alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
if (rd & 1)
goto illegal_insn;
save_state(dc);
gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
goto skip_move;
case 0x19: /* ldsba, load signed byte alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
break;
case 0x1a: /* ldsha, load signed halfword alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
break;
case 0x1d: /* ldstuba -- XXX: should be atomically */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ldstub_asi(cpu_val, cpu_addr, insn);
break;
case 0x1f: /* swapa, swap reg with alt. memory. Also
atomically */
CHECK_IU_FEATURE(dc, SWAP);
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
cpu_src1 = gen_load_gpr(dc, rd);
gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
break;
#ifndef TARGET_SPARC64
case 0x30: /* ldc */
case 0x31: /* ldcsr */
case 0x33: /* lddc */
goto ncp_insn;
#endif
#endif
#ifdef TARGET_SPARC64
case 0x08: /* V9 ldsw */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x0b: /* V9 ldx */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x18: /* V9 ldswa */
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
break;
case 0x1b: /* V9 ldxa */
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
break;
case 0x2d: /* V9 prefetch, no effect */
goto skip_move;
case 0x30: /* V9 ldfa */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 4, rd);
gen_update_fprs_dirty(rd);
goto skip_move;
case 0x33: /* V9 lddfa */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
gen_update_fprs_dirty(DFPREG(rd));
goto skip_move;
case 0x3d: /* V9 prefetcha, no effect */
goto skip_move;
case 0x32: /* V9 ldqfa */
CHECK_FPU_FEATURE(dc, FLOAT128);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
gen_update_fprs_dirty(QFPREG(rd));
goto skip_move;
#endif
default:
goto illegal_insn;
}
gen_store_gpr(dc, rd, cpu_val);
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
skip_move: ;
#endif
} else if (xop >= 0x20 && xop < 0x24) {
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
switch (xop) {
case 0x20: /* ldf, load fpreg */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
cpu_dst_32 = gen_dest_fpr_F(dc);
tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x21: /* ldfsr, V9 ldxfsr */
#ifdef TARGET_SPARC64
gen_address_mask(dc, cpu_addr);
if (rd == 1) {
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
gen_helper_ldxfsr(cpu_env, t64);
tcg_temp_free_i64(t64);
break;
}
#endif
{
TCGv_i32 t32 = get_temp_i32(dc);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
tcg_gen_trunc_tl_i32(t32, cpu_tmp0);
gen_helper_ldfsr(cpu_env, t32);
}
break;
case 0x22: /* ldqf, load quad fpreg */
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
r_const = tcg_const_i32(dc->mem_idx);
gen_address_mask(dc, cpu_addr);
gen_helper_ldqf(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(QFPREG(rd));
}
break;
case 0x23: /* lddf, load double fpreg */
gen_address_mask(dc, cpu_addr);
cpu_dst_64 = gen_dest_fpr_D(dc, rd);
tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
default:
goto illegal_insn;
}
} else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
xop == 0xe || xop == 0x1e) {
TCGv cpu_val = gen_load_gpr(dc, rd);
switch (xop) {
case 0x4: /* st, store word */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x5: /* stb, store byte */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x6: /* sth, store halfword */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x7: /* std, store double word */
if (rd & 1)
goto illegal_insn;
else {
TCGv_i32 r_const;
TCGv_i64 t64;
TCGv lo;
save_state(dc);
gen_address_mask(dc, cpu_addr);
r_const = tcg_const_i32(7);
/* XXX remove alignment check */
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
lo = gen_load_gpr(dc, rd + 1);
t64 = tcg_temp_new_i64();
tcg_gen_concat_tl_i64(t64, lo, cpu_val);
tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
tcg_temp_free_i64(t64);
}
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
case 0x14: /* sta, V9 stwa, store word alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 4);
dc->npc = DYNAMIC_PC;
break;
case 0x15: /* stba, store byte alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 1);
dc->npc = DYNAMIC_PC;
break;
case 0x16: /* stha, store halfword alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 2);
dc->npc = DYNAMIC_PC;
break;
case 0x17: /* stda, store double word alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
if (rd & 1)
goto illegal_insn;
else {
save_state(dc);
gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
}
break;
#endif
#ifdef TARGET_SPARC64
case 0x0e: /* V9 stx */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x1e: /* V9 stxa */
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 8);
dc->npc = DYNAMIC_PC;
break;
#endif
default:
goto illegal_insn;
}
} else if (xop > 0x23 && xop < 0x28) {
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
switch (xop) {
case 0x24: /* stf, store fpreg */
gen_address_mask(dc, cpu_addr);
cpu_src1_32 = gen_load_fpr_F(dc, rd);
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
break;
case 0x25: /* stfsr, V9 stxfsr */
{
TCGv t = get_temp_tl(dc);
tcg_gen_ld_tl(t, cpu_env, offsetof(CPUSPARCState, fsr));
#ifdef TARGET_SPARC64
gen_address_mask(dc, cpu_addr);
if (rd == 1) {
tcg_gen_qemu_st64(t, cpu_addr, dc->mem_idx);
break;
}
#endif
tcg_gen_qemu_st32(t, cpu_addr, dc->mem_idx);
}
break;
case 0x26:
#ifdef TARGET_SPARC64
/* V9 stqf, store quad fpreg */
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rd));
r_const = tcg_const_i32(dc->mem_idx);
gen_address_mask(dc, cpu_addr);
gen_helper_stqf(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
}
break;
#else /* !TARGET_SPARC64 */
/* stdfq, store floating point queue */
#if defined(CONFIG_USER_ONLY)
goto illegal_insn;
#else
if (!supervisor(dc))
goto priv_insn;
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
goto nfq_insn;
#endif
#endif
case 0x27: /* stdf, store double fpreg */
gen_address_mask(dc, cpu_addr);
cpu_src1_64 = gen_load_fpr_D(dc, rd);
tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
break;
default:
goto illegal_insn;
}
} else if (xop > 0x33 && xop < 0x3f) {
save_state(dc);
switch (xop) {
#ifdef TARGET_SPARC64
case 0x34: /* V9 stfa */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_stf_asi(cpu_addr, insn, 4, rd);
break;
case 0x36: /* V9 stqfa */
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
r_const = tcg_const_i32(7);
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
}
break;
case 0x37: /* V9 stdfa */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
break;
case 0x3c: /* V9 casa */
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
break;
case 0x3e: /* V9 casxa */
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
break;
#else
case 0x34: /* stc */
case 0x35: /* stcsr */
case 0x36: /* stdcq */
case 0x37: /* stdc */
goto ncp_insn;
#endif
default:
goto illegal_insn;
}
} else {
goto illegal_insn;
}
}
break;
}
/* default case for non jump instructions */
if (dc->npc == DYNAMIC_PC) {
dc->pc = DYNAMIC_PC;
gen_op_next_insn();
} else if (dc->npc == JUMP_PC) {
/* we can do a static jump */
gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
dc->is_br = 1;
} else {
dc->pc = dc->npc;
dc->npc = dc->npc + 4;
}
jmp_insn:
goto egress;
illegal_insn:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_ILL_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
unimp_flush:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_UNIMP_FLUSH);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
#if !defined(CONFIG_USER_ONLY)
priv_insn:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_PRIV_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
#endif
nfpu_insn:
save_state(dc);
gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
dc->is_br = 1;
goto egress;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
nfq_insn:
save_state(dc);
gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
dc->is_br = 1;
goto egress;
#endif
#ifndef TARGET_SPARC64
ncp_insn:
{
TCGv r_const;
save_state(dc);
r_const = tcg_const_i32(TT_NCP_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free(r_const);
dc->is_br = 1;
}
goto egress;
#endif
egress:
if (dc->n_t32 != 0) {
int i;
for (i = dc->n_t32 - 1; i >= 0; --i) {
tcg_temp_free_i32(dc->t32[i]);
}
dc->n_t32 = 0;
}
if (dc->n_ttl != 0) {
int i;
for (i = dc->n_ttl - 1; i >= 0; --i) {
tcg_temp_free(dc->ttl[i]);
}
dc->n_ttl = 0;
}
}
| false | qemu | 5793f2a47e201d251856c7956d6f7907ec0d9f1f |
6,301 | int use_gdb_syscalls(void)
{
if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
gdb_syscall_mode = (gdb_syscall_state ? GDB_SYS_ENABLED
: GDB_SYS_DISABLED);
}
return gdb_syscall_mode == GDB_SYS_ENABLED;
}
| false | qemu | 880a7578381d1c7ed4d41c7599ae3cc06567a824 |
6,302 | static void s390_hot_add_cpu(const int64_t id, Error **errp)
{
MachineState *machine = MACHINE(qdev_get_machine());
s390x_new_cpu(machine->cpu_model, id, errp);
}
| false | qemu | 524d18d8bd463431b120eeb5f9f3d1064a1c19e4 |
6,303 | static bool virtio_pci_modern_state_needed(void *opaque)
{
VirtIOPCIProxy *proxy = opaque;
return !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
}
| false | qemu | 9a4c0e220d8a4f82b5665d0ee95ef94d8e1509d5 |
6,304 | static int on2avc_decode_band_types(On2AVCContext *c, GetBitContext *gb)
{
int bits_per_sect = c->is_long ? 5 : 3;
int esc_val = (1 << bits_per_sect) - 1;
int num_bands = c->num_bands * c->num_windows;
int band = 0, i, band_type, run_len, run;
while (band < num_bands) {
band_type = get_bits(gb, 4);
run_len = 1;
do {
run = get_bits(gb, bits_per_sect);
run_len += run;
} while (run == esc_val);
if (band + run_len > num_bands) {
av_log(c->avctx, AV_LOG_ERROR, "Invalid band type run\n");
return AVERROR_INVALIDDATA;
}
for (i = band; i < band + run_len; i++) {
c->band_type[i] = band_type;
c->band_run_end[i] = band + run_len;
}
band += run_len;
}
return 0;
}
| false | FFmpeg | 22f15f5735389e992ec9aed43b0680e75746b3a1 |
6,305 | static float pvq_band_cost(CeltPVQ *pvq, CeltFrame *f, OpusRangeCoder *rc, int band,
float *bits, float lambda)
{
int i, b = 0;
uint32_t cm[2] = { (1 << f->blocks) - 1, (1 << f->blocks) - 1 };
const int band_size = ff_celt_freq_range[band] << f->size;
float buf[176 * 2], lowband_scratch[176], norm1[176], norm2[176];
float dist, cost, err_x = 0.0f, err_y = 0.0f;
float *X = buf;
float *X_orig = f->block[0].coeffs + (ff_celt_freq_bands[band] << f->size);
float *Y = (f->channels == 2) ? &buf[176] : NULL;
float *Y_orig = f->block[1].coeffs + (ff_celt_freq_bands[band] << f->size);
OPUS_RC_CHECKPOINT_SPAWN(rc);
memcpy(X, X_orig, band_size*sizeof(float));
if (Y)
memcpy(Y, Y_orig, band_size*sizeof(float));
f->remaining2 = ((f->framebits << 3) - f->anticollapse_needed) - opus_rc_tell_frac(rc) - 1;
if (band <= f->coded_bands - 1) {
int curr_balance = f->remaining / FFMIN(3, f->coded_bands - band);
b = av_clip_uintp2(FFMIN(f->remaining2 + 1, f->pulses[band] + curr_balance), 14);
}
if (f->dual_stereo) {
pvq->quant_band(pvq, f, rc, band, X, NULL, band_size, b / 2, f->blocks, NULL,
f->size, norm1, 0, 1.0f, lowband_scratch, cm[0]);
pvq->quant_band(pvq, f, rc, band, Y, NULL, band_size, b / 2, f->blocks, NULL,
f->size, norm2, 0, 1.0f, lowband_scratch, cm[1]);
} else {
pvq->quant_band(pvq, f, rc, band, X, Y, band_size, b, f->blocks, NULL, f->size,
norm1, 0, 1.0f, lowband_scratch, cm[0] | cm[1]);
}
for (i = 0; i < band_size; i++) {
err_x += (X[i] - X_orig[i])*(X[i] - X_orig[i]);
if (Y)
err_y += (Y[i] - Y_orig[i])*(Y[i] - Y_orig[i]);
}
dist = sqrtf(err_x) + sqrtf(err_y);
cost = OPUS_RC_CHECKPOINT_BITS(rc)/8.0f;
*bits += cost;
OPUS_RC_CHECKPOINT_ROLLBACK(rc);
return lambda*dist*cost;
}
| false | FFmpeg | 7b46add7257628bffac96d3002308d1f9e1ed172 |
6,306 | static int mpegts_read_header(AVFormatContext *s)
{
MpegTSContext *ts = s->priv_data;
AVIOContext *pb = s->pb;
uint8_t buf[8 * 1024] = {0};
int len;
int64_t pos, probesize =
#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
s->probesize ? s->probesize : s->probesize2;
#else
s->probesize;
#endif
if (ffio_ensure_seekback(pb, probesize) < 0)
av_log(s, AV_LOG_WARNING, "Failed to allocate buffers for seekback\n");
/* read the first 8192 bytes to get packet size */
pos = avio_tell(pb);
len = avio_read(pb, buf, sizeof(buf));
ts->raw_packet_size = get_packet_size(buf, len);
if (ts->raw_packet_size <= 0) {
av_log(s, AV_LOG_WARNING, "Could not detect TS packet size, defaulting to non-FEC/DVHS\n");
ts->raw_packet_size = TS_PACKET_SIZE;
}
ts->stream = s;
ts->auto_guess = 0;
if (s->iformat == &ff_mpegts_demuxer) {
/* normal demux */
/* first do a scan to get all the services */
seek_back(s, pb, pos);
mpegts_open_section_filter(ts, SDT_PID, sdt_cb, ts, 1);
mpegts_open_section_filter(ts, PAT_PID, pat_cb, ts, 1);
handle_packets(ts, probesize / ts->raw_packet_size);
/* if could not find service, enable auto_guess */
ts->auto_guess = 1;
av_log(ts->stream, AV_LOG_TRACE, "tuning done\n");
s->ctx_flags |= AVFMTCTX_NOHEADER;
} else {
AVStream *st;
int pcr_pid, pid, nb_packets, nb_pcrs, ret, pcr_l;
int64_t pcrs[2], pcr_h;
int packet_count[2];
uint8_t packet[TS_PACKET_SIZE];
const uint8_t *data;
/* only read packets */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 60, 1, 27000000);
st->codec->codec_type = AVMEDIA_TYPE_DATA;
st->codec->codec_id = AV_CODEC_ID_MPEG2TS;
/* we iterate until we find two PCRs to estimate the bitrate */
pcr_pid = -1;
nb_pcrs = 0;
nb_packets = 0;
for (;;) {
ret = read_packet(s, packet, ts->raw_packet_size, &data);
if (ret < 0)
return ret;
pid = AV_RB16(data + 1) & 0x1fff;
if ((pcr_pid == -1 || pcr_pid == pid) &&
parse_pcr(&pcr_h, &pcr_l, data) == 0) {
finished_reading_packet(s, ts->raw_packet_size);
pcr_pid = pid;
packet_count[nb_pcrs] = nb_packets;
pcrs[nb_pcrs] = pcr_h * 300 + pcr_l;
nb_pcrs++;
if (nb_pcrs >= 2)
break;
} else {
finished_reading_packet(s, ts->raw_packet_size);
}
nb_packets++;
}
/* NOTE1: the bitrate is computed without the FEC */
/* NOTE2: it is only the bitrate of the start of the stream */
ts->pcr_incr = (pcrs[1] - pcrs[0]) / (packet_count[1] - packet_count[0]);
ts->cur_pcr = pcrs[0] - ts->pcr_incr * packet_count[0];
s->bit_rate = TS_PACKET_SIZE * 8 * 27000000LL / ts->pcr_incr;
st->codec->bit_rate = s->bit_rate;
st->start_time = ts->cur_pcr;
av_log(ts->stream, AV_LOG_TRACE, "start=%0.3f pcr=%0.3f incr=%d\n",
st->start_time / 1000000.0, pcrs[0] / 27e6, ts->pcr_incr);
}
seek_back(s, pb, pos);
return 0;
}
| false | FFmpeg | 655b6dcb34b25d591e15ede17673ea6cb8074711 |
6,309 | static void apply_tns(float coef[1024], TemporalNoiseShaping *tns,
IndividualChannelStream *ics, int decode)
{
const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb);
int w, filt, m, i;
int bottom, top, order, start, end, size, inc;
float lpc[TNS_MAX_ORDER];
float tmp[TNS_MAX_ORDER];
for (w = 0; w < ics->num_windows; w++) {
bottom = ics->num_swb;
for (filt = 0; filt < tns->n_filt[w]; filt++) {
top = bottom;
bottom = FFMAX(0, top - tns->length[w][filt]);
order = tns->order[w][filt];
if (order == 0)
continue;
// tns_decode_coef
compute_lpc_coefs(tns->coef[w][filt], order, lpc, 0, 0, 0);
start = ics->swb_offset[FFMIN(bottom, mmm)];
end = ics->swb_offset[FFMIN( top, mmm)];
if ((size = end - start) <= 0)
continue;
if (tns->direction[w][filt]) {
inc = -1;
start = end - 1;
} else {
inc = 1;
}
start += w * 128;
if (decode) {
// ar filter
for (m = 0; m < size; m++, start += inc)
for (i = 1; i <= FFMIN(m, order); i++)
coef[start] -= coef[start - i * inc] * lpc[i - 1];
} else {
// ma filter
for (m = 0; m < size; m++, start += inc) {
tmp[0] = coef[start];
for (i = 1; i <= FFMIN(m, order); i++)
coef[start] += tmp[i] * lpc[i - 1];
for (i = order; i > 0; i--)
tmp[i] = tmp[i - 1];
}
}
}
}
}
| true | FFmpeg | 6d5b0092678b2a95dfe209a207550bd2fe9ef646 |
6,310 | void do_info_snapshots(Monitor *mon)
{
DriveInfo *dinfo;
BlockDriverState *bs, *bs1;
QEMUSnapshotInfo *sn_tab, *sn;
int nb_sns, i;
char buf[256];
bs = get_bs_snapshots();
if (!bs) {
monitor_printf(mon, "No available block device supports snapshots\n");
return;
}
monitor_printf(mon, "Snapshot devices:");
QTAILQ_FOREACH(dinfo, &drives, next) {
bs1 = dinfo->bdrv;
if (bdrv_has_snapshot(bs1)) {
if (bs == bs1)
monitor_printf(mon, " %s", bdrv_get_device_name(bs1));
}
}
monitor_printf(mon, "\n");
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
if (nb_sns < 0) {
monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns);
return;
}
monitor_printf(mon, "Snapshot list (from %s):\n",
bdrv_get_device_name(bs));
monitor_printf(mon, "%s\n", bdrv_snapshot_dump(buf, sizeof(buf), NULL));
for(i = 0; i < nb_sns; i++) {
sn = &sn_tab[i];
monitor_printf(mon, "%s\n", bdrv_snapshot_dump(buf, sizeof(buf), sn));
}
qemu_free(sn_tab);
}
| true | qemu | feeee5aca765606818e00f5a19d19f141f4ae365 |
6,311 | static int activate(AVFilterContext *ctx)
{
PreMultiplyContext *s = ctx->priv;
if (s->inplace) {
AVFrame *frame = NULL;
AVFrame *out = NULL;
int ret, status;
int64_t pts;
if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &frame)) > 0) {
if ((ret = filter_frame(ctx, &out, frame, frame)) < 0)
return ret;
av_frame_free(&frame);
ret = ff_filter_frame(ctx->outputs[0], out);
}
if (ret < 0) {
return ret;
} else if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
} else {
if (ff_outlink_frame_wanted(ctx->outputs[0]))
ff_inlink_request_frame(ctx->inputs[0]);
return 0;
}
} else {
return ff_framesync_activate(&s->fs);
}
}
| true | FFmpeg | c7ded42d5dfc0124008b5b9b13a1a342324885ed |
6,312 | int kvm_arch_remove_sw_breakpoint(CPUState *cpu, struct kvm_sw_breakpoint *bp)
{
return -EINVAL;
}
| true | qemu | 8a0548f94edecb96acb9b7fb9106ccc821c4996f |
6,313 | static int unix_open(URLContext *h, const char *filename, int flags)
{
UnixContext *s = h->priv_data;
int fd, ret;
av_strstart(filename, "unix:", &filename);
s->addr.sun_family = AF_UNIX;
av_strlcpy(s->addr.sun_path, filename, sizeof(s->addr.sun_path));
if ((fd = ff_socket(AF_UNIX, s->type, 0)) < 0)
return ff_neterrno();
if (s->listen) {
fd = ff_listen_bind(fd, (struct sockaddr *)&s->addr,
sizeof(s->addr), s->timeout, h);
if (fd < 0) {
ret = fd;
goto fail;
}
} else {
ret = ff_listen_connect(fd, (struct sockaddr *)&s->addr,
sizeof(s->addr), s->timeout, h, 0);
if (ret < 0)
goto fail;
}
s->fd = fd;
return 0;
fail:
if (s->listen && AVUNERROR(ret) != EADDRINUSE)
unlink(s->addr.sun_path);
if (fd >= 0)
closesocket(fd);
return ret;
}
| true | FFmpeg | 27852f2f1dec3749ea79883b70484c841169f747 |
6,315 | static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
{
gen_set_cpsr(cpsr, 0xffffffff);
dead_tmp(cpsr);
store_reg(s, 15, pc);
s->is_jmp = DISAS_UPDATE;
}
| true | qemu | 7d1b0095bff7157e856d1d0e6c4295641ced2752 |
6,316 | static inline void RENAME(rgb16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
int i;
assert(src1 == src2);
for(i=0; i<width; i++)
{
int d0= ((uint32_t*)src1)[i];
int dl= (d0&0x07E0F81F);
int dh= ((d0>>5)&0x07C0F83F);
int dh2= (dh>>11) + (dh<<21);
int d= dh2 + dl;
int r= d&0x7F;
int b= (d>>11)&0x7F;
int g= d>>21;
dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
}
}
| true | FFmpeg | 2da0d70d5eebe42f9fcd27ee554419ebe2a5da06 |
6,317 | static int rtl8139_cplus_transmit_one(RTL8139State *s)
{
if (!rtl8139_transmitter_enabled(s))
{
DPRINTF("+++ C+ mode: transmitter disabled\n");
return 0;
}
if (!rtl8139_cp_transmitter_enabled(s))
{
DPRINTF("+++ C+ mode: C+ transmitter disabled\n");
return 0 ;
}
int descriptor = s->currCPlusTxDesc;
dma_addr_t cplus_tx_ring_desc = rtl8139_addr64(s->TxAddr[0], s->TxAddr[1]);
/* Normal priority ring */
cplus_tx_ring_desc += 16 * descriptor;
DPRINTF("+++ C+ mode reading TX descriptor %d from host memory at "
"%08x %08x = 0x"DMA_ADDR_FMT"\n", descriptor, s->TxAddr[1],
s->TxAddr[0], cplus_tx_ring_desc);
uint32_t val, txdw0,txdw1,txbufLO,txbufHI;
pci_dma_read(&s->dev, cplus_tx_ring_desc, (uint8_t *)&val, 4);
txdw0 = le32_to_cpu(val);
pci_dma_read(&s->dev, cplus_tx_ring_desc+4, (uint8_t *)&val, 4);
txdw1 = le32_to_cpu(val);
pci_dma_read(&s->dev, cplus_tx_ring_desc+8, (uint8_t *)&val, 4);
txbufLO = le32_to_cpu(val);
pci_dma_read(&s->dev, cplus_tx_ring_desc+12, (uint8_t *)&val, 4);
txbufHI = le32_to_cpu(val);
DPRINTF("+++ C+ mode TX descriptor %d %08x %08x %08x %08x\n", descriptor,
txdw0, txdw1, txbufLO, txbufHI);
/* w0 ownership flag */
#define CP_TX_OWN (1<<31)
/* w0 end of ring flag */
#define CP_TX_EOR (1<<30)
/* first segment of received packet flag */
#define CP_TX_FS (1<<29)
/* last segment of received packet flag */
#define CP_TX_LS (1<<28)
/* large send packet flag */
#define CP_TX_LGSEN (1<<27)
/* large send MSS mask, bits 16...25 */
#define CP_TC_LGSEN_MSS_MASK ((1 << 12) - 1)
/* IP checksum offload flag */
#define CP_TX_IPCS (1<<18)
/* UDP checksum offload flag */
#define CP_TX_UDPCS (1<<17)
/* TCP checksum offload flag */
#define CP_TX_TCPCS (1<<16)
/* w0 bits 0...15 : buffer size */
#define CP_TX_BUFFER_SIZE (1<<16)
#define CP_TX_BUFFER_SIZE_MASK (CP_TX_BUFFER_SIZE - 1)
/* w1 add tag flag */
#define CP_TX_TAGC (1<<17)
/* w1 bits 0...15 : VLAN tag (big endian) */
#define CP_TX_VLAN_TAG_MASK ((1<<16) - 1)
/* w2 low 32bit of Rx buffer ptr */
/* w3 high 32bit of Rx buffer ptr */
/* set after transmission */
/* FIFO underrun flag */
#define CP_TX_STATUS_UNF (1<<25)
/* transmit error summary flag, valid if set any of three below */
#define CP_TX_STATUS_TES (1<<23)
/* out-of-window collision flag */
#define CP_TX_STATUS_OWC (1<<22)
/* link failure flag */
#define CP_TX_STATUS_LNKF (1<<21)
/* excessive collisions flag */
#define CP_TX_STATUS_EXC (1<<20)
if (!(txdw0 & CP_TX_OWN))
{
DPRINTF("C+ Tx mode : descriptor %d is owned by host\n", descriptor);
return 0 ;
}
DPRINTF("+++ C+ Tx mode : transmitting from descriptor %d\n", descriptor);
if (txdw0 & CP_TX_FS)
{
DPRINTF("+++ C+ Tx mode : descriptor %d is first segment "
"descriptor\n", descriptor);
/* reset internal buffer offset */
s->cplus_txbuffer_offset = 0;
}
int txsize = txdw0 & CP_TX_BUFFER_SIZE_MASK;
dma_addr_t tx_addr = rtl8139_addr64(txbufLO, txbufHI);
/* make sure we have enough space to assemble the packet */
if (!s->cplus_txbuffer)
{
s->cplus_txbuffer_len = CP_TX_BUFFER_SIZE;
s->cplus_txbuffer = g_malloc(s->cplus_txbuffer_len);
s->cplus_txbuffer_offset = 0;
DPRINTF("+++ C+ mode transmission buffer allocated space %d\n",
s->cplus_txbuffer_len);
}
while (s->cplus_txbuffer && s->cplus_txbuffer_offset + txsize >= s->cplus_txbuffer_len)
{
s->cplus_txbuffer_len += CP_TX_BUFFER_SIZE;
s->cplus_txbuffer = g_realloc(s->cplus_txbuffer, s->cplus_txbuffer_len);
DPRINTF("+++ C+ mode transmission buffer space changed to %d\n",
s->cplus_txbuffer_len);
}
if (!s->cplus_txbuffer)
{
/* out of memory */
DPRINTF("+++ C+ mode transmiter failed to reallocate %d bytes\n",
s->cplus_txbuffer_len);
/* update tally counter */
++s->tally_counters.TxERR;
++s->tally_counters.TxAbt;
return 0;
}
/* append more data to the packet */
DPRINTF("+++ C+ mode transmit reading %d bytes from host memory at "
DMA_ADDR_FMT" to offset %d\n", txsize, tx_addr,
s->cplus_txbuffer_offset);
pci_dma_read(&s->dev, tx_addr,
s->cplus_txbuffer + s->cplus_txbuffer_offset, txsize);
s->cplus_txbuffer_offset += txsize;
/* seek to next Rx descriptor */
if (txdw0 & CP_TX_EOR)
{
s->currCPlusTxDesc = 0;
}
else
{
++s->currCPlusTxDesc;
if (s->currCPlusTxDesc >= 64)
s->currCPlusTxDesc = 0;
}
/* transfer ownership to target */
txdw0 &= ~CP_RX_OWN;
/* reset error indicator bits */
txdw0 &= ~CP_TX_STATUS_UNF;
txdw0 &= ~CP_TX_STATUS_TES;
txdw0 &= ~CP_TX_STATUS_OWC;
txdw0 &= ~CP_TX_STATUS_LNKF;
txdw0 &= ~CP_TX_STATUS_EXC;
/* update ring data */
val = cpu_to_le32(txdw0);
pci_dma_write(&s->dev, cplus_tx_ring_desc, (uint8_t *)&val, 4);
/* Now decide if descriptor being processed is holding the last segment of packet */
if (txdw0 & CP_TX_LS)
{
uint8_t dot1q_buffer_space[VLAN_HLEN];
uint16_t *dot1q_buffer;
DPRINTF("+++ C+ Tx mode : descriptor %d is last segment descriptor\n",
descriptor);
/* can transfer fully assembled packet */
uint8_t *saved_buffer = s->cplus_txbuffer;
int saved_size = s->cplus_txbuffer_offset;
int saved_buffer_len = s->cplus_txbuffer_len;
/* create vlan tag */
if (txdw1 & CP_TX_TAGC) {
/* the vlan tag is in BE byte order in the descriptor
* BE + le_to_cpu() + ~swap()~ = cpu */
DPRINTF("+++ C+ Tx mode : inserting vlan tag with ""tci: %u\n",
bswap16(txdw1 & CP_TX_VLAN_TAG_MASK));
dot1q_buffer = (uint16_t *) dot1q_buffer_space;
dot1q_buffer[0] = cpu_to_be16(ETH_P_8021Q);
/* BE + le_to_cpu() + ~cpu_to_le()~ = BE */
dot1q_buffer[1] = cpu_to_le16(txdw1 & CP_TX_VLAN_TAG_MASK);
} else {
dot1q_buffer = NULL;
}
/* reset the card space to protect from recursive call */
s->cplus_txbuffer = NULL;
s->cplus_txbuffer_offset = 0;
s->cplus_txbuffer_len = 0;
if (txdw0 & (CP_TX_IPCS | CP_TX_UDPCS | CP_TX_TCPCS | CP_TX_LGSEN))
{
DPRINTF("+++ C+ mode offloaded task checksum\n");
/* ip packet header */
ip_header *ip = NULL;
int hlen = 0;
uint8_t ip_protocol = 0;
uint16_t ip_data_len = 0;
uint8_t *eth_payload_data = NULL;
size_t eth_payload_len = 0;
int proto = be16_to_cpu(*(uint16_t *)(saved_buffer + 12));
if (proto == ETH_P_IP)
{
DPRINTF("+++ C+ mode has IP packet\n");
/* not aligned */
eth_payload_data = saved_buffer + ETH_HLEN;
eth_payload_len = saved_size - ETH_HLEN;
ip = (ip_header*)eth_payload_data;
if (IP_HEADER_VERSION(ip) != IP_HEADER_VERSION_4) {
DPRINTF("+++ C+ mode packet has bad IP version %d "
"expected %d\n", IP_HEADER_VERSION(ip),
IP_HEADER_VERSION_4);
ip = NULL;
} else {
hlen = IP_HEADER_LENGTH(ip);
ip_protocol = ip->ip_p;
ip_data_len = be16_to_cpu(ip->ip_len) - hlen;
}
}
if (ip)
{
if (txdw0 & CP_TX_IPCS)
{
DPRINTF("+++ C+ mode need IP checksum\n");
if (hlen<sizeof(ip_header) || hlen>eth_payload_len) {/* min header length */
/* bad packet header len */
/* or packet too short */
}
else
{
ip->ip_sum = 0;
ip->ip_sum = ip_checksum(ip, hlen);
DPRINTF("+++ C+ mode IP header len=%d checksum=%04x\n",
hlen, ip->ip_sum);
}
}
if ((txdw0 & CP_TX_LGSEN) && ip_protocol == IP_PROTO_TCP)
{
int large_send_mss = (txdw0 >> 16) & CP_TC_LGSEN_MSS_MASK;
DPRINTF("+++ C+ mode offloaded task TSO MTU=%d IP data %d "
"frame data %d specified MSS=%d\n", ETH_MTU,
ip_data_len, saved_size - ETH_HLEN, large_send_mss);
int tcp_send_offset = 0;
int send_count = 0;
/* maximum IP header length is 60 bytes */
uint8_t saved_ip_header[60];
/* save IP header template; data area is used in tcp checksum calculation */
memcpy(saved_ip_header, eth_payload_data, hlen);
/* a placeholder for checksum calculation routine in tcp case */
uint8_t *data_to_checksum = eth_payload_data + hlen - 12;
// size_t data_to_checksum_len = eth_payload_len - hlen + 12;
/* pointer to TCP header */
tcp_header *p_tcp_hdr = (tcp_header*)(eth_payload_data + hlen);
int tcp_hlen = TCP_HEADER_DATA_OFFSET(p_tcp_hdr);
/* ETH_MTU = ip header len + tcp header len + payload */
int tcp_data_len = ip_data_len - tcp_hlen;
int tcp_chunk_size = ETH_MTU - hlen - tcp_hlen;
DPRINTF("+++ C+ mode TSO IP data len %d TCP hlen %d TCP "
"data len %d TCP chunk size %d\n", ip_data_len,
tcp_hlen, tcp_data_len, tcp_chunk_size);
/* note the cycle below overwrites IP header data,
but restores it from saved_ip_header before sending packet */
int is_last_frame = 0;
for (tcp_send_offset = 0; tcp_send_offset < tcp_data_len; tcp_send_offset += tcp_chunk_size)
{
uint16_t chunk_size = tcp_chunk_size;
/* check if this is the last frame */
if (tcp_send_offset + tcp_chunk_size >= tcp_data_len)
{
is_last_frame = 1;
chunk_size = tcp_data_len - tcp_send_offset;
}
DPRINTF("+++ C+ mode TSO TCP seqno %08x\n",
be32_to_cpu(p_tcp_hdr->th_seq));
/* add 4 TCP pseudoheader fields */
/* copy IP source and destination fields */
memcpy(data_to_checksum, saved_ip_header + 12, 8);
DPRINTF("+++ C+ mode TSO calculating TCP checksum for "
"packet with %d bytes data\n", tcp_hlen +
chunk_size);
if (tcp_send_offset)
{
memcpy((uint8_t*)p_tcp_hdr + tcp_hlen, (uint8_t*)p_tcp_hdr + tcp_hlen + tcp_send_offset, chunk_size);
}
/* keep PUSH and FIN flags only for the last frame */
if (!is_last_frame)
{
TCP_HEADER_CLEAR_FLAGS(p_tcp_hdr, TCP_FLAG_PUSH|TCP_FLAG_FIN);
}
/* recalculate TCP checksum */
ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_tcpip_hdr->zeros = 0;
p_tcpip_hdr->ip_proto = IP_PROTO_TCP;
p_tcpip_hdr->ip_payload = cpu_to_be16(tcp_hlen + chunk_size);
p_tcp_hdr->th_sum = 0;
int tcp_checksum = ip_checksum(data_to_checksum, tcp_hlen + chunk_size + 12);
DPRINTF("+++ C+ mode TSO TCP checksum %04x\n",
tcp_checksum);
p_tcp_hdr->th_sum = tcp_checksum;
/* restore IP header */
memcpy(eth_payload_data, saved_ip_header, hlen);
/* set IP data length and recalculate IP checksum */
ip->ip_len = cpu_to_be16(hlen + tcp_hlen + chunk_size);
/* increment IP id for subsequent frames */
ip->ip_id = cpu_to_be16(tcp_send_offset/tcp_chunk_size + be16_to_cpu(ip->ip_id));
ip->ip_sum = 0;
ip->ip_sum = ip_checksum(eth_payload_data, hlen);
DPRINTF("+++ C+ mode TSO IP header len=%d "
"checksum=%04x\n", hlen, ip->ip_sum);
int tso_send_size = ETH_HLEN + hlen + tcp_hlen + chunk_size;
DPRINTF("+++ C+ mode TSO transferring packet size "
"%d\n", tso_send_size);
rtl8139_transfer_frame(s, saved_buffer, tso_send_size,
0, (uint8_t *) dot1q_buffer);
/* add transferred count to TCP sequence number */
p_tcp_hdr->th_seq = cpu_to_be32(chunk_size + be32_to_cpu(p_tcp_hdr->th_seq));
++send_count;
}
/* Stop sending this frame */
saved_size = 0;
}
else if (txdw0 & (CP_TX_TCPCS|CP_TX_UDPCS))
{
DPRINTF("+++ C+ mode need TCP or UDP checksum\n");
/* maximum IP header length is 60 bytes */
uint8_t saved_ip_header[60];
memcpy(saved_ip_header, eth_payload_data, hlen);
uint8_t *data_to_checksum = eth_payload_data + hlen - 12;
// size_t data_to_checksum_len = eth_payload_len - hlen + 12;
/* add 4 TCP pseudoheader fields */
/* copy IP source and destination fields */
memcpy(data_to_checksum, saved_ip_header + 12, 8);
if ((txdw0 & CP_TX_TCPCS) && ip_protocol == IP_PROTO_TCP)
{
DPRINTF("+++ C+ mode calculating TCP checksum for "
"packet with %d bytes data\n", ip_data_len);
ip_pseudo_header *p_tcpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_tcpip_hdr->zeros = 0;
p_tcpip_hdr->ip_proto = IP_PROTO_TCP;
p_tcpip_hdr->ip_payload = cpu_to_be16(ip_data_len);
tcp_header* p_tcp_hdr = (tcp_header *) (data_to_checksum+12);
p_tcp_hdr->th_sum = 0;
int tcp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12);
DPRINTF("+++ C+ mode TCP checksum %04x\n",
tcp_checksum);
p_tcp_hdr->th_sum = tcp_checksum;
}
else if ((txdw0 & CP_TX_UDPCS) && ip_protocol == IP_PROTO_UDP)
{
DPRINTF("+++ C+ mode calculating UDP checksum for "
"packet with %d bytes data\n", ip_data_len);
ip_pseudo_header *p_udpip_hdr = (ip_pseudo_header *)data_to_checksum;
p_udpip_hdr->zeros = 0;
p_udpip_hdr->ip_proto = IP_PROTO_UDP;
p_udpip_hdr->ip_payload = cpu_to_be16(ip_data_len);
udp_header *p_udp_hdr = (udp_header *) (data_to_checksum+12);
p_udp_hdr->uh_sum = 0;
int udp_checksum = ip_checksum(data_to_checksum, ip_data_len + 12);
DPRINTF("+++ C+ mode UDP checksum %04x\n",
udp_checksum);
p_udp_hdr->uh_sum = udp_checksum;
}
/* restore IP header */
memcpy(eth_payload_data, saved_ip_header, hlen);
}
}
}
/* update tally counter */
++s->tally_counters.TxOk;
DPRINTF("+++ C+ mode transmitting %d bytes packet\n", saved_size);
rtl8139_transfer_frame(s, saved_buffer, saved_size, 1,
(uint8_t *) dot1q_buffer);
/* restore card space if there was no recursion and reset offset */
if (!s->cplus_txbuffer)
{
s->cplus_txbuffer = saved_buffer;
s->cplus_txbuffer_len = saved_buffer_len;
s->cplus_txbuffer_offset = 0;
}
else
{
g_free(saved_buffer);
}
}
else
{
DPRINTF("+++ C+ mode transmission continue to next descriptor\n");
}
return 1;
}
| true | qemu | cde31a0e3dc0e4ac83e454d6096350cec584adf1 |
6,318 | static int adpcm_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
ADPCMDecodeContext *c = avctx->priv_data;
ADPCMChannelStatus *cs;
int n, m, channel, i;
int block_predictor[2];
short *samples;
short *samples_end;
const uint8_t *src;
int st; /* stereo */
/* DK3 ADPCM accounting variables */
unsigned char last_byte = 0;
unsigned char nibble;
int decode_top_nibble_next = 0;
int diff_channel;
/* EA ADPCM state variables */
uint32_t samples_in_chunk;
int32_t previous_left_sample, previous_right_sample;
int32_t current_left_sample, current_right_sample;
int32_t next_left_sample, next_right_sample;
int32_t coeff1l, coeff2l, coeff1r, coeff2r;
uint8_t shift_left, shift_right;
int count1, count2;
int coeff[2][2], shift[2];//used in EA MAXIS ADPCM
if (!buf_size)
return 0;
//should protect all 4bit ADPCM variants
//8 is needed for CODEC_ID_ADPCM_IMA_WAV with 2 channels
//
if(*data_size/4 < buf_size + 8)
return -1;
samples = data;
samples_end= samples + *data_size/2;
*data_size= 0;
src = buf;
st = avctx->channels == 2 ? 1 : 0;
switch(avctx->codec->id) {
case CODEC_ID_ADPCM_IMA_QT:
n = buf_size - 2*avctx->channels;
for (channel = 0; channel < avctx->channels; channel++) {
int16_t predictor;
int step_index;
cs = &(c->status[channel]);
/* (pppppp) (piiiiiii) */
/* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
predictor = AV_RB16(src);
step_index = predictor & 0x7F;
predictor &= 0xFF80;
src += 2;
if (cs->step_index == step_index) {
int diff = (int)predictor - cs->predictor;
if (diff < 0)
diff = - diff;
if (diff > 0x7f)
goto update;
} else {
update:
cs->step_index = step_index;
cs->predictor = predictor;
}
if (cs->step_index > 88){
av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
cs->step_index = 88;
}
samples = (short*)data + channel;
for(m=32; n>0 && m>0; n--, m--) { /* in QuickTime, IMA is encoded by chuncks of 34 bytes (=64 samples) */
*samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3);
samples += avctx->channels;
*samples = adpcm_ima_qt_expand_nibble(cs, src[0] >> 4 , 3);
samples += avctx->channels;
src ++;
}
}
if (st)
samples--;
break;
case CODEC_ID_ADPCM_IMA_WAV:
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
// samples_per_block= (block_align-4*chanels)*8 / (bits_per_sample * chanels) + 1;
for(i=0; i<avctx->channels; i++){
cs = &(c->status[i]);
cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src);
cs->step_index = *src++;
if (cs->step_index > 88){
av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
cs->step_index = 88;
}
if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */
}
while(src < buf + buf_size){
for(m=0; m<4; m++){
for(i=0; i<=st; i++)
*samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] & 0x0F, 3);
for(i=0; i<=st; i++)
*samples++ = adpcm_ima_expand_nibble(&c->status[i], src[4*i] >> 4 , 3);
src++;
}
src += 4*st;
}
break;
case CODEC_ID_ADPCM_4XM:
cs = &(c->status[0]);
c->status[0].predictor= (int16_t)bytestream_get_le16(&src);
if(st){
c->status[1].predictor= (int16_t)bytestream_get_le16(&src);
}
c->status[0].step_index= (int16_t)bytestream_get_le16(&src);
if(st){
c->status[1].step_index= (int16_t)bytestream_get_le16(&src);
}
if (cs->step_index < 0) cs->step_index = 0;
if (cs->step_index > 88) cs->step_index = 88;
m= (buf_size - (src - buf))>>st;
for(i=0; i<m; i++) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] & 0x0F, 4);
if (st)
*samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] & 0x0F, 4);
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[i] >> 4, 4);
if (st)
*samples++ = adpcm_ima_expand_nibble(&c->status[1], src[i+m] >> 4, 4);
}
src += m<<st;
break;
case CODEC_ID_ADPCM_MS:
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
n = buf_size - 7 * avctx->channels;
if (n < 0)
return -1;
block_predictor[0] = av_clip(*src++, 0, 6);
block_predictor[1] = 0;
if (st)
block_predictor[1] = av_clip(*src++, 0, 6);
c->status[0].idelta = (int16_t)bytestream_get_le16(&src);
if (st){
c->status[1].idelta = (int16_t)bytestream_get_le16(&src);
}
c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[0]];
c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[0]];
c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor[1]];
c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor[1]];
c->status[0].sample1 = bytestream_get_le16(&src);
if (st) c->status[1].sample1 = bytestream_get_le16(&src);
c->status[0].sample2 = bytestream_get_le16(&src);
if (st) c->status[1].sample2 = bytestream_get_le16(&src);
*samples++ = c->status[0].sample2;
if (st) *samples++ = c->status[1].sample2;
*samples++ = c->status[0].sample1;
if (st) *samples++ = c->status[1].sample1;
for(;n>0;n--) {
*samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 );
*samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F);
src ++;
}
break;
case CODEC_ID_ADPCM_IMA_DK4:
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
c->status[0].step_index = *src++;
src++;
*samples++ = c->status[0].predictor;
if (st) {
c->status[1].predictor = (int16_t)bytestream_get_le16(&src);
c->status[1].step_index = *src++;
src++;
*samples++ = c->status[1].predictor;
}
while (src < buf + buf_size) {
/* take care of the top nibble (always left or mono channel) */
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4, 3);
/* take care of the bottom nibble, which is right sample for
* stereo, or another mono sample */
if (st)
*samples++ = adpcm_ima_expand_nibble(&c->status[1],
src[0] & 0x0F, 3);
else
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] & 0x0F, 3);
src++;
}
break;
case CODEC_ID_ADPCM_IMA_DK3:
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
if(buf_size + 16 > (samples_end - samples)*3/8)
return -1;
c->status[0].predictor = (int16_t)AV_RL16(src + 10);
c->status[1].predictor = (int16_t)AV_RL16(src + 12);
c->status[0].step_index = src[14];
c->status[1].step_index = src[15];
/* sign extend the predictors */
src += 16;
diff_channel = c->status[1].predictor;
/* the DK3_GET_NEXT_NIBBLE macro issues the break statement when
* the buffer is consumed */
while (1) {
/* for this algorithm, c->status[0] is the sum channel and
* c->status[1] is the diff channel */
/* process the first predictor of the sum channel */
DK3_GET_NEXT_NIBBLE();
adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
/* process the diff channel predictor */
DK3_GET_NEXT_NIBBLE();
adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
/* process the first pair of stereo PCM samples */
diff_channel = (diff_channel + c->status[1].predictor) / 2;
*samples++ = c->status[0].predictor + c->status[1].predictor;
*samples++ = c->status[0].predictor - c->status[1].predictor;
/* process the second predictor of the sum channel */
DK3_GET_NEXT_NIBBLE();
adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
/* process the second pair of stereo PCM samples */
diff_channel = (diff_channel + c->status[1].predictor) / 2;
*samples++ = c->status[0].predictor + c->status[1].predictor;
*samples++ = c->status[0].predictor - c->status[1].predictor;
}
break;
case CODEC_ID_ADPCM_IMA_ISS:
c->status[0].predictor = (int16_t)AV_RL16(src + 0);
c->status[0].step_index = src[2];
src += 4;
if(st) {
c->status[1].predictor = (int16_t)AV_RL16(src + 0);
c->status[1].step_index = src[2];
src += 4;
}
while (src < buf + buf_size) {
if (st) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[1],
src[0] & 0x0F, 3);
} else {
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] & 0x0F, 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4 , 3);
}
src++;
}
break;
case CODEC_ID_ADPCM_IMA_WS:
/* no per-block initialization; just start decoding the data */
while (src < buf + buf_size) {
if (st) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[1],
src[0] & 0x0F, 3);
} else {
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
src[0] & 0x0F, 3);
}
src++;
}
break;
case CODEC_ID_ADPCM_XA:
while (buf_size >= 128) {
xa_decode(samples, src, &c->status[0], &c->status[1],
avctx->channels);
src += 128;
samples += 28 * 8;
buf_size -= 128;
}
break;
case CODEC_ID_ADPCM_IMA_EA_EACS:
samples_in_chunk = bytestream_get_le32(&src) >> (1-st);
if (samples_in_chunk > buf_size-4-(8<<st)) {
src += buf_size - 4;
break;
}
for (i=0; i<=st; i++)
c->status[i].step_index = bytestream_get_le32(&src);
for (i=0; i<=st; i++)
c->status[i].predictor = bytestream_get_le32(&src);
for (; samples_in_chunk; samples_in_chunk--, src++) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
}
break;
case CODEC_ID_ADPCM_IMA_EA_SEAD:
for (; src < buf+buf_size; src++) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
*samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6);
}
break;
case CODEC_ID_ADPCM_EA:
/* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
each coding 28 stereo samples. */
if (buf_size < 12) {
av_log(avctx, AV_LOG_ERROR, "frame too small\n");
return AVERROR(EINVAL);
}
samples_in_chunk = AV_RL32(src);
if (samples_in_chunk / 28 > (buf_size - 12) / 30) {
av_log(avctx, AV_LOG_ERROR, "invalid frame\n");
return AVERROR(EINVAL);
}
src += 4;
current_left_sample = (int16_t)bytestream_get_le16(&src);
previous_left_sample = (int16_t)bytestream_get_le16(&src);
current_right_sample = (int16_t)bytestream_get_le16(&src);
previous_right_sample = (int16_t)bytestream_get_le16(&src);
for (count1 = 0; count1 < samples_in_chunk/28;count1++) {
coeff1l = ea_adpcm_table[ *src >> 4 ];
coeff2l = ea_adpcm_table[(*src >> 4 ) + 4];
coeff1r = ea_adpcm_table[*src & 0x0F];
coeff2r = ea_adpcm_table[(*src & 0x0F) + 4];
src++;
shift_left = (*src >> 4 ) + 8;
shift_right = (*src & 0x0F) + 8;
src++;
for (count2 = 0; count2 < 28; count2++) {
next_left_sample = (int32_t)((*src & 0xF0) << 24) >> shift_left;
next_right_sample = (int32_t)((*src & 0x0F) << 28) >> shift_right;
src++;
next_left_sample = (next_left_sample +
(current_left_sample * coeff1l) +
(previous_left_sample * coeff2l) + 0x80) >> 8;
next_right_sample = (next_right_sample +
(current_right_sample * coeff1r) +
(previous_right_sample * coeff2r) + 0x80) >> 8;
previous_left_sample = current_left_sample;
current_left_sample = av_clip_int16(next_left_sample);
previous_right_sample = current_right_sample;
current_right_sample = av_clip_int16(next_right_sample);
*samples++ = (unsigned short)current_left_sample;
*samples++ = (unsigned short)current_right_sample;
}
}
if (src - buf == buf_size - 2)
src += 2; // Skip terminating 0x0000
break;
case CODEC_ID_ADPCM_EA_MAXIS_XA:
for(channel = 0; channel < avctx->channels; channel++) {
for (i=0; i<2; i++)
coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i];
shift[channel] = (*src & 0x0F) + 8;
src++;
}
for (count1 = 0; count1 < (buf_size - avctx->channels) / avctx->channels; count1++) {
for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
for(channel = 0; channel < avctx->channels; channel++) {
int32_t sample = (int32_t)(((*(src+channel) >> i) & 0x0F) << 0x1C) >> shift[channel];
sample = (sample +
c->status[channel].sample1 * coeff[channel][0] +
c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
c->status[channel].sample2 = c->status[channel].sample1;
c->status[channel].sample1 = av_clip_int16(sample);
*samples++ = c->status[channel].sample1;
}
}
src+=avctx->channels;
}
break;
case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3: {
/* channel numbering
2chan: 0=fl, 1=fr
4chan: 0=fl, 1=rl, 2=fr, 3=rr
6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3;
int32_t previous_sample, current_sample, next_sample;
int32_t coeff1, coeff2;
uint8_t shift;
unsigned int channel;
uint16_t *samplesC;
const uint8_t *srcC;
const uint8_t *src_end = buf + buf_size;
samples_in_chunk = (big_endian ? bytestream_get_be32(&src)
: bytestream_get_le32(&src)) / 28;
if (samples_in_chunk > UINT32_MAX/(28*avctx->channels) ||
28*samples_in_chunk*avctx->channels > samples_end-samples) {
src += buf_size - 4;
break;
}
for (channel=0; channel<avctx->channels; channel++) {
int32_t offset = (big_endian ? bytestream_get_be32(&src)
: bytestream_get_le32(&src))
+ (avctx->channels-channel-1) * 4;
if ((offset < 0) || (offset >= src_end - src - 4)) break;
srcC = src + offset;
samplesC = samples + channel;
if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) {
current_sample = (int16_t)bytestream_get_le16(&srcC);
previous_sample = (int16_t)bytestream_get_le16(&srcC);
} else {
current_sample = c->status[channel].predictor;
previous_sample = c->status[channel].prev_sample;
}
for (count1=0; count1<samples_in_chunk; count1++) {
if (*srcC == 0xEE) { /* only seen in R2 and R3 */
srcC++;
if (srcC > src_end - 30*2) break;
current_sample = (int16_t)bytestream_get_be16(&srcC);
previous_sample = (int16_t)bytestream_get_be16(&srcC);
for (count2=0; count2<28; count2++) {
*samplesC = (int16_t)bytestream_get_be16(&srcC);
samplesC += avctx->channels;
}
} else {
coeff1 = ea_adpcm_table[ *srcC>>4 ];
coeff2 = ea_adpcm_table[(*srcC>>4) + 4];
shift = (*srcC++ & 0x0F) + 8;
if (srcC > src_end - 14) break;
for (count2=0; count2<28; count2++) {
if (count2 & 1)
next_sample = (int32_t)((*srcC++ & 0x0F) << 28) >> shift;
else
next_sample = (int32_t)((*srcC & 0xF0) << 24) >> shift;
next_sample += (current_sample * coeff1) +
(previous_sample * coeff2);
next_sample = av_clip_int16(next_sample >> 8);
previous_sample = current_sample;
current_sample = next_sample;
*samplesC = current_sample;
samplesC += avctx->channels;
}
}
}
if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) {
c->status[channel].predictor = current_sample;
c->status[channel].prev_sample = previous_sample;
}
}
src = src + buf_size - (4 + 4*avctx->channels);
samples += 28 * samples_in_chunk * avctx->channels;
break;
}
case CODEC_ID_ADPCM_EA_XAS:
if (samples_end-samples < 32*4*avctx->channels
|| buf_size < (4+15)*4*avctx->channels) {
src += buf_size;
break;
}
for (channel=0; channel<avctx->channels; channel++) {
int coeff[2][4], shift[4];
short *s2, *s = &samples[channel];
for (n=0; n<4; n++, s+=32*avctx->channels) {
for (i=0; i<2; i++)
coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i];
shift[n] = (src[2]&0x0F) + 8;
for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels)
s2[0] = (src[0]&0xF0) + (src[1]<<8);
}
for (m=2; m<32; m+=2) {
s = &samples[m*avctx->channels + channel];
for (n=0; n<4; n++, src++, s+=32*avctx->channels) {
for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) {
int level = (int32_t)((*src & (0xF0>>i)) << (24+i)) >> shift[n];
int pred = s2[-1*avctx->channels] * coeff[0][n]
+ s2[-2*avctx->channels] * coeff[1][n];
s2[0] = av_clip_int16((level + pred + 0x80) >> 8);
}
}
}
}
samples += 32*4*avctx->channels;
break;
case CODEC_ID_ADPCM_IMA_AMV:
case CODEC_ID_ADPCM_IMA_SMJPEG:
c->status[0].predictor = (int16_t)bytestream_get_le16(&src);
c->status[0].step_index = bytestream_get_le16(&src);
if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
src+=4;
while (src < buf + buf_size) {
char hi, lo;
lo = *src & 0x0F;
hi = *src >> 4;
if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
FFSWAP(char, hi, lo);
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
lo, 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
hi, 3);
src++;
}
break;
case CODEC_ID_ADPCM_CT:
while (src < buf + buf_size) {
if (st) {
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
src[0] >> 4);
*samples++ = adpcm_ct_expand_nibble(&c->status[1],
src[0] & 0x0F);
} else {
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
src[0] >> 4);
*samples++ = adpcm_ct_expand_nibble(&c->status[0],
src[0] & 0x0F);
}
src++;
}
break;
case CODEC_ID_ADPCM_SBPRO_4:
case CODEC_ID_ADPCM_SBPRO_3:
case CODEC_ID_ADPCM_SBPRO_2:
if (!c->status[0].step_index) {
/* the first byte is a raw sample */
*samples++ = 128 * (*src++ - 0x80);
if (st)
*samples++ = 128 * (*src++ - 0x80);
c->status[0].step_index = 1;
}
if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
while (src < buf + buf_size) {
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
src[0] >> 4, 4, 0);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
src[0] & 0x0F, 4, 0);
src++;
}
} else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
while (src < buf + buf_size && samples + 2 < samples_end) {
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
src[0] >> 5 , 3, 0);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
(src[0] >> 2) & 0x07, 3, 0);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
src[0] & 0x03, 2, 0);
src++;
}
} else {
while (src < buf + buf_size && samples + 3 < samples_end) {
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
src[0] >> 6 , 2, 2);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
(src[0] >> 4) & 0x03, 2, 2);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
(src[0] >> 2) & 0x03, 2, 2);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
src[0] & 0x03, 2, 2);
src++;
}
}
break;
case CODEC_ID_ADPCM_SWF:
{
GetBitContext gb;
const int *table;
int k0, signmask, nb_bits, count;
int size = buf_size*8;
init_get_bits(&gb, buf, size);
//read bits & initial values
nb_bits = get_bits(&gb, 2)+2;
//av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits);
table = swf_index_tables[nb_bits-2];
k0 = 1 << (nb_bits-2);
signmask = 1 << (nb_bits-1);
while (get_bits_count(&gb) <= size - 22*avctx->channels) {
for (i = 0; i < avctx->channels; i++) {
*samples++ = c->status[i].predictor = get_sbits(&gb, 16);
c->status[i].step_index = get_bits(&gb, 6);
}
for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
int i;
for (i = 0; i < avctx->channels; i++) {
// similar to IMA adpcm
int delta = get_bits(&gb, nb_bits);
int step = ff_adpcm_step_table[c->status[i].step_index];
long vpdiff = 0; // vpdiff = (delta+0.5)*step/4
int k = k0;
do {
if (delta & k)
vpdiff += step;
step >>= 1;
k >>= 1;
} while(k);
vpdiff += step;
if (delta & signmask)
c->status[i].predictor -= vpdiff;
else
c->status[i].predictor += vpdiff;
c->status[i].step_index += table[delta & (~signmask)];
c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
c->status[i].predictor = av_clip_int16(c->status[i].predictor);
*samples++ = c->status[i].predictor;
if (samples >= samples_end) {
av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
return -1;
}
}
}
}
src += buf_size;
break;
}
case CODEC_ID_ADPCM_YAMAHA:
while (src < buf + buf_size) {
if (st) {
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
src[0] & 0x0F);
*samples++ = adpcm_yamaha_expand_nibble(&c->status[1],
src[0] >> 4 );
} else {
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
src[0] & 0x0F);
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0],
src[0] >> 4 );
}
src++;
}
break;
case CODEC_ID_ADPCM_THP:
{
int table[2][16];
unsigned int samplecnt;
int prev[2][2];
int ch;
if (buf_size < 80) {
av_log(avctx, AV_LOG_ERROR, "frame too small\n");
return -1;
}
src+=4;
samplecnt = bytestream_get_be32(&src);
for (i = 0; i < 32; i++)
table[0][i] = (int16_t)bytestream_get_be16(&src);
/* Initialize the previous sample. */
for (i = 0; i < 4; i++)
prev[0][i] = (int16_t)bytestream_get_be16(&src);
if (samplecnt >= (samples_end - samples) / (st + 1)) {
av_log(avctx, AV_LOG_ERROR, "allocated output buffer is too small\n");
return -1;
}
for (ch = 0; ch <= st; ch++) {
samples = (unsigned short *) data + ch;
/* Read in every sample for this channel. */
for (i = 0; i < samplecnt / 14; i++) {
int index = (*src >> 4) & 7;
unsigned int exp = 28 - (*src++ & 15);
int factor1 = table[ch][index * 2];
int factor2 = table[ch][index * 2 + 1];
/* Decode 14 samples. */
for (n = 0; n < 14; n++) {
int32_t sampledat;
if(n&1) sampledat= *src++ <<28;
else sampledat= (*src&0xF0)<<24;
sampledat = ((prev[ch][0]*factor1
+ prev[ch][1]*factor2) >> 11) + (sampledat>>exp);
*samples = av_clip_int16(sampledat);
prev[ch][1] = prev[ch][0];
prev[ch][0] = *samples++;
/* In case of stereo, skip one sample, this sample
is for the other channel. */
samples += st;
}
}
}
/* In the previous loop, in case stereo is used, samples is
increased exactly one time too often. */
samples -= st;
break;
}
default:
return -1;
}
*data_size = (uint8_t *)samples - (uint8_t *)data;
return src - buf;
}
| true | FFmpeg | 346876ec168affe7c21be88d8f1acf1a75cc8409 |
6,319 | int DCT_common_init(MpegEncContext *s)
{
int i;
ff_put_pixels_clamped = s->dsp.put_pixels_clamped;
ff_add_pixels_clamped = s->dsp.add_pixels_clamped;
s->dct_unquantize_h263 = dct_unquantize_h263_c;
s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
s->dct_quantize= dct_quantize_c;
if(s->avctx->dct_algo==FF_DCT_FASTINT)
s->fdct = fdct_ifast;
else
s->fdct = ff_jpeg_fdct_islow; //slow/accurate/default
if(s->avctx->idct_algo==FF_IDCT_INT){
s->idct_put= ff_jref_idct_put;
s->idct_add= ff_jref_idct_add;
s->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
}else{ //accurate/default
s->idct_put= simple_idct_put;
s->idct_add= simple_idct_add;
s->idct_permutation_type= FF_NO_IDCT_PERM;
}
#ifdef HAVE_MMX
MPV_common_init_mmx(s);
#endif
#ifdef ARCH_ALPHA
MPV_common_init_axp(s);
#endif
#ifdef HAVE_MLIB
MPV_common_init_mlib(s);
#endif
#ifdef HAVE_MMI
MPV_common_init_mmi(s);
#endif
#ifdef ARCH_ARMV4L
MPV_common_init_armv4l();
#endif
#ifdef ARCH_POWERPC
MPV_common_init_ppc(s);
#endif
switch(s->idct_permutation_type){
case FF_NO_IDCT_PERM:
for(i=0; i<64; i++)
s->idct_permutation[i]= i;
break;
case FF_LIBMPEG2_IDCT_PERM:
for(i=0; i<64; i++)
s->idct_permutation[i]= (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
break;
case FF_SIMPLE_IDCT_PERM:
for(i=0; i<64; i++)
s->idct_permutation[i]= simple_mmx_permutation[i];
break;
case FF_TRANSPOSE_IDCT_PERM:
for(i=0; i<64; i++)
s->idct_permutation[i]= ((i&7)<<3) | (i>>3);
break;
default:
fprintf(stderr, "Internal error, IDCT permutation not set\n");
return -1;
}
/* load & permutate scantables
note: only wmv uses differnt ones
*/
ff_init_scantable(s, &s->inter_scantable , ff_zigzag_direct);
ff_init_scantable(s, &s->intra_scantable , ff_zigzag_direct);
ff_init_scantable(s, &s->intra_h_scantable, ff_alternate_horizontal_scan);
ff_init_scantable(s, &s->intra_v_scantable, ff_alternate_vertical_scan);
return 0;
}
| false | FFmpeg | 83f238cbf0c038245d2b2dffa5beb0916e7c36d2 |
6,320 | static void yuv2yuvX_c(SwsContext *c, const int16_t *lumFilter,
const int16_t **lumSrc, int lumFilterSize,
const int16_t *chrFilter, const int16_t **chrUSrc,
const int16_t **chrVSrc,
int chrFilterSize, const int16_t **alpSrc,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest,
uint8_t *aDest, int dstW, int chrDstW)
{
//FIXME Optimize (just quickly written not optimized..)
int i;
for (i=0; i<dstW; i++) {
int val=1<<18;
int j;
for (j=0; j<lumFilterSize; j++)
val += lumSrc[j][i] * lumFilter[j];
dest[i]= av_clip_uint8(val>>19);
}
if (uDest)
for (i=0; i<chrDstW; i++) {
int u=1<<18;
int v=1<<18;
int j;
for (j=0; j<chrFilterSize; j++) {
u += chrUSrc[j][i] * chrFilter[j];
v += chrVSrc[j][i] * chrFilter[j];
}
uDest[i]= av_clip_uint8(u>>19);
vDest[i]= av_clip_uint8(v>>19);
}
if (CONFIG_SWSCALE_ALPHA && aDest)
for (i=0; i<dstW; i++) {
int val=1<<18;
int j;
for (j=0; j<lumFilterSize; j++)
val += alpSrc[j][i] * lumFilter[j];
aDest[i]= av_clip_uint8(val>>19);
}
}
| false | FFmpeg | 13a099799e89a76eb921ca452e1b04a7a28a9855 |
6,321 | static int dxva2_mpeg2_start_frame(AVCodecContext *avctx,
av_unused const uint8_t *buffer,
av_unused uint32_t size)
{
const struct MpegEncContext *s = avctx->priv_data;
AVDXVAContext *ctx = avctx->hwaccel_context;
struct dxva2_picture_context *ctx_pic =
s->current_picture_ptr->hwaccel_picture_private;
if (!DXVA_CONTEXT_VALID(avctx, ctx))
return -1;
assert(ctx_pic);
fill_picture_parameters(avctx, ctx, s, &ctx_pic->pp);
fill_quantization_matrices(avctx, ctx, s, &ctx_pic->qm);
ctx_pic->slice_count = 0;
ctx_pic->bitstream_size = 0;
ctx_pic->bitstream = NULL;
return 0;
}
| false | FFmpeg | ab28108a361196134704071b7b34c42fc7d747c7 |
6,323 | static void avc_luma_vt_and_aver_dst_4x4_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst, int32_t dst_stride)
{
int16_t filt_const0 = 0xfb01;
int16_t filt_const1 = 0x1414;
int16_t filt_const2 = 0x1fb;
v16u8 dst0, dst1, dst2, dst3;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
v16i8 src10_r, src32_r, src54_r, src76_r, src21_r, src43_r, src65_r;
v16i8 src87_r, src2110, src4332, src6554, src8776;
v8i16 out10, out32;
v16i8 filt0, filt1, filt2;
v16u8 res;
filt0 = (v16i8) __msa_fill_h(filt_const0);
filt1 = (v16i8) __msa_fill_h(filt_const1);
filt2 = (v16i8) __msa_fill_h(filt_const2);
LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
src += (5 * src_stride);
ILVR_B4_SB(src1, src0, src2, src1, src3, src2, src4, src3,
src10_r, src21_r, src32_r, src43_r);
ILVR_D2_SB(src21_r, src10_r, src43_r, src32_r, src2110, src4332);
XORI_B2_128_SB(src2110, src4332);
LD_SB4(src, src_stride, src5, src6, src7, src8);
ILVR_B4_SB(src5, src4, src6, src5, src7, src6, src8, src7,
src54_r, src65_r, src76_r, src87_r);
ILVR_D2_SB(src65_r, src54_r, src87_r, src76_r, src6554, src8776);
XORI_B2_128_SB(src6554, src8776);
out10 = DPADD_SH3_SH(src2110, src4332, src6554, filt0, filt1, filt2);
out32 = DPADD_SH3_SH(src4332, src6554, src8776, filt0, filt1, filt2);
SRARI_H2_SH(out10, out32, 5);
SAT_SH2_SH(out10, out32, 7);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
res = PCKEV_XORI128_UB(out10, out32);
ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst1);
dst0 = (v16u8) __msa_pckev_d((v2i64) dst1, (v2i64) dst0);
dst0 = __msa_aver_u_b(res, dst0);
ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dst, dst_stride);
}
| false | FFmpeg | 72dbc610be3272ba36603f78a39cc2d2d8fe0cc3 |
6,324 | static int decode_frame_mp3on4(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MP3On4DecodeContext *s = avctx->priv_data;
MPADecodeContext *m;
int fsize, len = buf_size, out_size = 0;
uint32_t header;
OUT_INT **out_samples;
OUT_INT *outptr[2];
int fr, ch, ret;
/* get output buffer */
frame->nb_samples = MPA_FRAME_SIZE;
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
out_samples = (OUT_INT **)frame->extended_data;
// Discard too short frames
if (buf_size < HEADER_SIZE)
return AVERROR_INVALIDDATA;
avctx->bit_rate = 0;
ch = 0;
for (fr = 0; fr < s->frames; fr++) {
fsize = AV_RB16(buf) >> 4;
fsize = FFMIN3(fsize, len, MPA_MAX_CODED_FRAME_SIZE);
m = s->mp3decctx[fr];
assert(m != NULL);
if (fsize < HEADER_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Frame size smaller than header size\n");
return AVERROR_INVALIDDATA;
}
header = (AV_RB32(buf) & 0x000fffff) | s->syncword; // patch header
if (ff_mpa_check_header(header) < 0) // Bad header, discard block
break;
avpriv_mpegaudio_decode_header((MPADecodeHeader *)m, header);
if (ch + m->nb_channels > avctx->channels ||
s->coff[fr] + m->nb_channels > avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "frame channel count exceeds codec "
"channel count\n");
return AVERROR_INVALIDDATA;
}
ch += m->nb_channels;
outptr[0] = out_samples[s->coff[fr]];
if (m->nb_channels > 1)
outptr[1] = out_samples[s->coff[fr] + 1];
if ((ret = mp_decode_frame(m, outptr, buf, fsize)) < 0)
return ret;
out_size += ret;
buf += fsize;
len -= fsize;
avctx->bit_rate += m->bit_rate;
}
/* update codec info */
avctx->sample_rate = s->mp3decctx[0]->sample_rate;
frame->nb_samples = out_size / (avctx->channels * sizeof(OUT_INT));
*got_frame_ptr = 1;
return buf_size;
}
| false | FFmpeg | 955aec3c7c7be39b659197e1ec379a09f2b7c41c |
6,325 | static int dxva2_h264_start_frame(AVCodecContext *avctx,
av_unused const uint8_t *buffer,
av_unused uint32_t size)
{
const H264Context *h = avctx->priv_data;
AVDXVAContext *ctx = avctx->hwaccel_context;
struct dxva2_picture_context *ctx_pic = h->cur_pic_ptr->hwaccel_picture_private;
if (DXVA_CONTEXT_DECODER(avctx, ctx) == NULL ||
DXVA_CONTEXT_CFG(avctx, ctx) == NULL ||
DXVA_CONTEXT_COUNT(avctx, ctx) <= 0)
return -1;
assert(ctx_pic);
/* Fill up DXVA_PicParams_H264 */
fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp);
/* Fill up DXVA_Qmatrix_H264 */
fill_scaling_lists(avctx, ctx, h, &ctx_pic->qm);
ctx_pic->slice_count = 0;
ctx_pic->bitstream_size = 0;
ctx_pic->bitstream = NULL;
return 0;
}
| false | FFmpeg | 0ac2d86c4758e1419934905b6c092910296aa16a |
6,326 | static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
{
#ifdef HAVE_MMX
if(uDest != NULL)
{
asm volatile(
YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
:: "r" (&c->redDither),
"r" (uDest), "p" (chrDstW)
: "%"REG_a, "%"REG_d, "%"REG_S
);
asm volatile(
YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
:: "r" (&c->redDither),
"r" (vDest), "p" (chrDstW)
: "%"REG_a, "%"REG_d, "%"REG_S
);
}
asm volatile(
YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
:: "r" (&c->redDither),
"r" (dest), "p" (dstW)
: "%"REG_a, "%"REG_d, "%"REG_S
);
#else
#ifdef HAVE_ALTIVEC
yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
chrFilter, chrSrc, chrFilterSize,
dest, uDest, vDest, dstW, chrDstW);
#else //HAVE_ALTIVEC
yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
chrFilter, chrSrc, chrFilterSize,
dest, uDest, vDest, dstW, chrDstW);
#endif //!HAVE_ALTIVEC
#endif
}
| false | FFmpeg | bca11e75fbc6b922438670733c6cb418c70433b4 |
6,327 | static inline void dv_encode_video_segment(DVVideoContext *s,
uint8_t *dif,
const uint16_t *mb_pos_ptr)
{
int mb_index, i, j, v;
int mb_x, mb_y, c_offset, linesize;
uint8_t* y_ptr;
uint8_t* data;
uint8_t* ptr;
int do_edge_wrap;
DCTELEM block[64] __align8;
DCTELEM sblock[5*6][64] __align8;
EncBlockInfo enc_blks[5*6];
PutBitContext pbs[5*6];
PutBitContext* pb;
EncBlockInfo* enc_blk;
int vs_bit_size = 0;
int qnos[5];
enc_blk = &enc_blks[0];
pb = &pbs[0];
for(mb_index = 0; mb_index < 5; mb_index++) {
v = *mb_pos_ptr++;
mb_x = v & 0xff;
mb_y = v >> 8;
y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 8);
c_offset = (s->sys->pix_fmt == PIX_FMT_YUV411P) ?
((mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8)) :
(((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8));
do_edge_wrap = 0;
qnos[mb_index] = 15; /* No quantization */
ptr = dif + mb_index*80 + 4;
for(j = 0;j < 6; j++) {
if (j < 4) { /* Four Y blocks */
/* NOTE: at end of line, the macroblock is handled as 420 */
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) {
data = y_ptr + (j * 8);
} else {
data = y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]);
}
linesize = s->picture.linesize[0];
} else { /* Cr and Cb blocks */
/* don't ask Fabrice why they inverted Cb and Cr ! */
data = s->picture.data[6 - j] + c_offset;
linesize = s->picture.linesize[6 - j];
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8))
do_edge_wrap = 1;
}
/* Everything is set up -- now just copy data -> DCT block */
if (do_edge_wrap) { /* Edge wrap copy: 4x16 -> 8x8 */
uint8_t* d;
DCTELEM *b = block;
for (i=0;i<8;i++) {
d = data + 8 * linesize;
b[0] = data[0]; b[1] = data[1]; b[2] = data[2]; b[3] = data[3];
b[4] = d[0]; b[5] = d[1]; b[6] = d[2]; b[7] = d[3];
data += linesize;
b += 8;
}
} else { /* Simple copy: 8x8 -> 8x8 */
s->get_pixels(block, data, linesize);
}
enc_blk->dct_mode = dv_guess_dct_mode(block);
enc_blk->mb = &sblock[mb_index*6+j][0];
enc_blk->area_q[0] = enc_blk->area_q[1] = enc_blk->area_q[2] = enc_blk->area_q[3] = 0;
enc_blk->partial_bit_count = 0;
enc_blk->partial_bit_buffer = 0;
enc_blk->cur_ac = 1;
s->fdct[enc_blk->dct_mode](block);
dv_set_class_number(block, enc_blk,
enc_blk->dct_mode ? ff_zigzag248_direct : ff_zigzag_direct,
j/4*(j%2));
init_put_bits(pb, ptr, block_sizes[j]/8);
put_bits(pb, 9, (uint16_t)(((enc_blk->mb[0] >> 3) - 1024) >> 2));
put_bits(pb, 1, enc_blk->dct_mode);
put_bits(pb, 2, enc_blk->cno);
vs_bit_size += enc_blk->bit_size[0] + enc_blk->bit_size[1] +
enc_blk->bit_size[2] + enc_blk->bit_size[3];
++enc_blk;
++pb;
ptr += block_sizes[j]/8;
}
}
if (vs_total_ac_bits < vs_bit_size)
dv_guess_qnos(&enc_blks[0], &qnos[0]);
for (i=0; i<5; i++) {
dif[i*80 + 3] = qnos[i];
}
/* First pass over individual cells only */
for (j=0; j<5*6; j++)
dv_encode_ac(&enc_blks[j], &pbs[j], 1);
/* Second pass over each MB space */
for (j=0; j<5*6; j++) {
if (enc_blks[j].cur_ac < 65 || enc_blks[j].partial_bit_count)
dv_encode_ac(&enc_blks[j], &pbs[(j/6)*6], 6);
}
/* Third and final pass over the whole vides segment space */
for (j=0; j<5*6; j++) {
if (enc_blks[j].cur_ac < 65 || enc_blks[j].partial_bit_count)
dv_encode_ac(&enc_blks[j], &pbs[0], 6*5);
}
for (j=0; j<5*6; j++)
flush_put_bits(&pbs[j]);
}
| false | FFmpeg | c619ff6daf93a8f3c03decf2d3345d2474c3db91 |
6,328 | static double fade_gain(int curve, int64_t index, int range)
{
double gain;
gain = av_clipd(1.0 * index / range, 0, 1.0);
switch (curve) {
case QSIN:
gain = sin(gain * M_PI / 2.0);
break;
case IQSIN:
gain = 0.636943 * asin(gain);
break;
case ESIN:
gain = 1.0 - cos(M_PI / 4.0 * (pow(2.0*gain - 1, 3) + 1));
break;
case HSIN:
gain = (1.0 - cos(gain * M_PI)) / 2.0;
break;
case IHSIN:
gain = 0.318471 * acos(1 - 2 * gain);
break;
case EXP:
gain = pow(0.1, (1 - gain) * 5.0);
break;
case LOG:
gain = av_clipd(0.0868589 * log(100000 * gain), 0, 1.0);
break;
case PAR:
gain = 1 - sqrt(1 - gain);
break;
case IPAR:
gain = (1 - (1 - gain) * (1 - gain));
break;
case QUA:
gain *= gain;
break;
case CUB:
gain = gain * gain * gain;
break;
case SQU:
gain = sqrt(gain);
break;
case CBR:
gain = cbrt(gain);
break;
case DESE:
gain = gain <= 0.5 ? pow(2 * gain, 1/3.) / 2: 1 - pow(2 * (1 - gain), 1/3.) / 2;
break;
case DESI:
gain = gain <= 0.5 ? pow(2 * gain, 3) / 2: 1 - pow(2 * (1 - gain), 3) / 2;
break;
}
return gain;
}
| false | FFmpeg | 9ee1feaa7c2822240b93d4640ec6e8d3e5b6139b |
6,329 | static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
switch(avctx->colorspace) {
case AVCOL_SPC_BT709:
*matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
break;
case AVCOL_SPC_UNSPECIFIED:
*matrix = NULL;
break;
case AVCOL_SPC_BT470BG:
case AVCOL_SPC_SMPTE170M:
*matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
break;
case AVCOL_SPC_SMPTE240M:
*matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
break;
case AVCOL_SPC_BT2020_NCL:
*matrix = kCVImageBufferYCbCrMatrix_ITU_R_2020;
break;
default:
av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
return -1;
}
return 0;
}
| false | FFmpeg | dcd3418a35aab7ef283b68ed9997ce4ac204094e |
6,330 | static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec, int copy)
{
AVStream *fst;
fst = av_mallocz(sizeof(AVStream));
if (!fst)
return NULL;
if (copy) {
fst->codec= avcodec_alloc_context();
memcpy(fst->codec, codec, sizeof(AVCodecContext));
if (codec->extradata_size) {
fst->codec->extradata = av_malloc(codec->extradata_size);
memcpy(fst->codec->extradata, codec->extradata,
codec->extradata_size);
}
} else {
/* live streams must use the actual feed's codec since it may be
* updated later to carry extradata needed by the streams.
*/
fst->codec = codec;
}
fst->priv_data = av_mallocz(sizeof(FeedData));
fst->index = stream->nb_streams;
av_set_pts_info(fst, 33, 1, 90000);
fst->sample_aspect_ratio = (AVRational){0,1};
stream->streams[stream->nb_streams++] = fst;
return fst;
}
| false | FFmpeg | 34c340d49f2e6ca5190f16ced82da32561a7bef2 |
6,331 | static void buffer_reserve(Buffer *buffer, size_t len)
{
if ((buffer->capacity - buffer->offset) < len) {
buffer->capacity += (len + 1024);
buffer->buffer = qemu_realloc(buffer->buffer, buffer->capacity);
if (buffer->buffer == NULL) {
fprintf(stderr, "vnc: out of memory\n");
exit(1);
}
}
}
| true | qemu | 2f9606b3736c3be4dbd606c46525c7b770ced119 |
6,332 | static void qmp_input_end_struct(Visitor *v, Error **errp)
{
QmpInputVisitor *qiv = to_qiv(v);
qmp_input_pop(qiv, errp);
}
| true | qemu | 15c2f669e3fb2bc97f7b42d1871f595c0ac24af8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.