label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
1
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN) { int code, i, j, level, val, run; if (*EOBRUN) { (*EOBRUN)--; return 0; } { OPEN_READER(re, &s->gb); for (i = ss; ; i++) { UPDATE_CACHE(re, &s->gb); GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2); run = ((unsigned) code) >> 4; code &= 0xF; if (code) { i += run; if (code > MIN_CACHE_BITS - 16) UPDATE_CACHE(re, &s->gb); { int cache = GET_CACHE(re, &s->gb); int sign = (~cache) >> 31; level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign; } LAST_SKIP_BITS(re, &s->gb, code); if (i >= se) { if (i == se) { j = s->scantable.permutated[se]; block[j] = level * (quant_matrix[se] << Al); break; } av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); return AVERROR_INVALIDDATA; } j = s->scantable.permutated[i]; block[j] = level * (quant_matrix[i] << Al); } else { if (run == 0xF) {// ZRL - skip 15 coefficients i += 15; if (i >= se) { av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i); return AVERROR_INVALIDDATA; } } else { val = (1 << run); if (run) { UPDATE_CACHE(re, &s->gb); val += NEG_USR32(GET_CACHE(re, &s->gb), run); LAST_SKIP_BITS(re, &s->gb, run); } *EOBRUN = val - 1; break; } } } CLOSE_READER(re, &s->gb); } if (i > *last_nnz) *last_nnz = i; return 0; }
15,691
0
static int asf_read_header(AVFormatContext *s) { ASFContext *asf = s->priv_data; ff_asf_guid g; AVIOContext *pb = s->pb; int i; int64_t gsize; ff_get_guid(pb, &g); if (ff_guidcmp(&g, &ff_asf_header)) return AVERROR_INVALIDDATA; avio_rl64(pb); avio_rl32(pb); avio_r8(pb); avio_r8(pb); memset(&asf->asfid2avid, -1, sizeof(asf->asfid2avid)); for (i = 0; i<128; i++) asf->streams[i].stream_language_index = 128; // invalid stream index means no language info for (;;) { uint64_t gpos = avio_tell(pb); ff_get_guid(pb, &g); gsize = avio_rl64(pb); print_guid(&g); if (!ff_guidcmp(&g, &ff_asf_data_header)) { asf->data_object_offset = avio_tell(pb); /* If not streaming, gsize is not unlimited (how?), * and there is enough space in the file.. */ if (!(asf->hdr.flags & 0x01) && gsize >= 100) asf->data_object_size = gsize - 24; else asf->data_object_size = (uint64_t)-1; break; } if (gsize < 24) return AVERROR_INVALIDDATA; if (!ff_guidcmp(&g, &ff_asf_file_header)) { int ret = asf_read_file_properties(s, gsize); if (ret < 0) return ret; } else if (!ff_guidcmp(&g, &ff_asf_stream_header)) { int ret = asf_read_stream_properties(s, gsize); if (ret < 0) return ret; } else if (!ff_guidcmp(&g, &ff_asf_comment_header)) { asf_read_content_desc(s, gsize); } else if (!ff_guidcmp(&g, &ff_asf_language_guid)) { asf_read_language_list(s, gsize); } else if (!ff_guidcmp(&g, &ff_asf_extended_content_header)) { asf_read_ext_content_desc(s, gsize); } else if (!ff_guidcmp(&g, &ff_asf_metadata_header)) { asf_read_metadata(s, gsize); } else if (!ff_guidcmp(&g, &ff_asf_metadata_library_header)) { asf_read_metadata(s, gsize); } else if (!ff_guidcmp(&g, &ff_asf_ext_stream_header)) { asf_read_ext_stream_properties(s, gsize); // there could be a optional stream properties object to follow // if so the next iteration will pick it up continue; } else if (!ff_guidcmp(&g, &ff_asf_head1_guid)) { ff_get_guid(pb, &g); avio_skip(pb, 6); continue; } else if (!ff_guidcmp(&g, &ff_asf_marker_header)) { asf_read_marker(s, gsize); } else if (avio_feof(pb)) { return AVERROR_EOF; } else { if (!s->keylen) { if (!ff_guidcmp(&g, &ff_asf_content_encryption)) { unsigned int len; int ret; AVPacket pkt; av_log(s, AV_LOG_WARNING, "DRM protected stream detected, decoding will likely fail!\n"); len= avio_rl32(pb); av_log(s, AV_LOG_DEBUG, "Secret data:\n"); if ((ret = av_get_packet(pb, &pkt, len)) < 0) return ret; av_hex_dump_log(s, AV_LOG_DEBUG, pkt.data, pkt.size); av_free_packet(&pkt); len= avio_rl32(pb); get_tag(s, "ASF_Protection_Type", -1, len, 32); len= avio_rl32(pb); get_tag(s, "ASF_Key_ID", -1, len, 32); len= avio_rl32(pb); get_tag(s, "ASF_License_URL", -1, len, 32); } else if (!ff_guidcmp(&g, &ff_asf_ext_content_encryption)) { av_log(s, AV_LOG_WARNING, "Ext DRM protected stream detected, decoding will likely fail!\n"); av_dict_set(&s->metadata, "encryption", "ASF Extended Content Encryption", 0); } else if (!ff_guidcmp(&g, &ff_asf_digital_signature)) { av_log(s, AV_LOG_INFO, "Digital signature detected!\n"); } } } if (avio_tell(pb) != gpos + gsize) av_log(s, AV_LOG_DEBUG, "gpos mismatch our pos=%"PRIu64", end=%"PRId64"\n", avio_tell(pb) - gpos, gsize); avio_seek(pb, gpos + gsize, SEEK_SET); } ff_get_guid(pb, &g); avio_rl64(pb); avio_r8(pb); avio_r8(pb); if (avio_feof(pb)) return AVERROR_EOF; asf->data_offset = avio_tell(pb); asf->packet_size_left = 0; for (i = 0; i < 128; i++) { int stream_num = asf->asfid2avid[i]; if (stream_num >= 0) { AVStream *st = s->streams[stream_num]; if (!st->codec->bit_rate) st->codec->bit_rate = asf->stream_bitrates[i]; if (asf->dar[i].num > 0 && asf->dar[i].den > 0) { av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den, asf->dar[i].num, asf->dar[i].den, INT_MAX); } else if ((asf->dar[0].num > 0) && (asf->dar[0].den > 0) && // Use ASF container value if the stream doesn't set AR. (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)) av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den, asf->dar[0].num, asf->dar[0].den, INT_MAX); av_log(s, AV_LOG_TRACE, "i=%d, st->codec->codec_type:%d, asf->dar %d:%d sar=%d:%d\n", i, st->codec->codec_type, asf->dar[i].num, asf->dar[i].den, st->sample_aspect_ratio.num, st->sample_aspect_ratio.den); // copy and convert language codes to the frontend if (asf->streams[i].stream_language_index < 128) { const char *rfc1766 = asf->stream_languages[asf->streams[i].stream_language_index]; if (rfc1766 && strlen(rfc1766) > 1) { const char primary_tag[3] = { rfc1766[0], rfc1766[1], '\0' }; // ignore country code if any const char *iso6392 = av_convert_lang_to(primary_tag, AV_LANG_ISO639_2_BIBL); if (iso6392) av_dict_set(&st->metadata, "language", iso6392, 0); } } } } ff_metadata_conv(&s->metadata, NULL, ff_asf_metadata_conv); return 0; }
15,692
0
int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush) { AVPacketList *pktl; int stream_count = 0; int i; if (pkt) { ff_interleave_add_packet(s, pkt, interleave_compare_dts); } if (s->max_interleave_delta > 0 && s->packet_buffer && !flush) { AVPacket *top_pkt = &s->packet_buffer->pkt; int64_t delta_dts = INT64_MIN; int64_t top_dts = av_rescale_q(top_pkt->dts, s->streams[top_pkt->stream_index]->time_base, AV_TIME_BASE_Q); for (i = 0; i < s->nb_streams; i++) { int64_t last_dts; const AVPacketList *last = s->streams[i]->last_in_packet_buffer; if (!last) continue; last_dts = av_rescale_q(last->pkt.dts, s->streams[i]->time_base, AV_TIME_BASE_Q); delta_dts = FFMAX(delta_dts, last_dts - top_dts); stream_count++; } if (delta_dts > s->max_interleave_delta) { av_log(s, AV_LOG_DEBUG, "Delay between the first packet and last packet in the " "muxing queue is %"PRId64" > %"PRId64": forcing output\n", delta_dts, s->max_interleave_delta); flush = 1; } } else { for (i = 0; i < s->nb_streams; i++) stream_count += !!s->streams[i]->last_in_packet_buffer; } if (stream_count && (s->internal->nb_interleaved_streams == stream_count || flush)) { pktl = s->packet_buffer; *out = pktl->pkt; s->packet_buffer = pktl->next; if (!s->packet_buffer) s->packet_buffer_end = NULL; if (s->streams[out->stream_index]->last_in_packet_buffer == pktl) s->streams[out->stream_index]->last_in_packet_buffer = NULL; av_freep(&pktl); return 1; } else { av_init_packet(out); return 0; } }
15,694
0
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type) { H264Context *h = &s->h; int i, j, k, m, dir, mode; int cbp = 0; uint32_t vlc; int8_t *top, *left; const int mb_xy = h->mb_xy; const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride; h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF; h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF; h->topright_samples_available = 0xFFFF; if (mb_type == 0) { /* SKIP */ if (h->pict_type == AV_PICTURE_TYPE_P || s->next_pic->mb_type[mb_xy] == -1) { svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16, 0, 0, 0, 0, 0, 0); if (h->pict_type == AV_PICTURE_TYPE_B) svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16, 0, 0, 0, 0, 1, 1); mb_type = MB_TYPE_SKIP; } else { mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6); if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0) return -1; if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0) return -1; mb_type = MB_TYPE_16x16; } } else if (mb_type < 8) { /* INTER */ if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb)) mode = THIRDPEL_MODE; else if (s->halfpel_flag && s->thirdpel_flag == !get_bits1(&h->gb)) mode = HALFPEL_MODE; else mode = FULLPEL_MODE; /* fill caches */ /* note ref_cache should contain here: * ???????? * ???11111 * N??11111 * N??11111 * N??11111 */ for (m = 0; m < 2; m++) { if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) { for (i = 0; i < 4; i++) AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8], h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]); } else { for (i = 0; i < 4; i++) AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]); } if (h->mb_y > 0) { memcpy(h->mv_cache[m][scan8[0] - 1 * 8], h->cur_pic.motion_val[m][b_xy - h->b_stride], 4 * 2 * sizeof(int16_t)); memset(&h->ref_cache[m][scan8[0] - 1 * 8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4); if (h->mb_x < h->mb_width - 1) { AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8], h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]); h->ref_cache[m][scan8[0] + 4 - 1 * 8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 || h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1; } else h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE; if (h->mb_x > 0) { AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8], h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]); h->ref_cache[m][scan8[0] - 1 - 1 * 8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1; } else h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE; } else memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1], PART_NOT_AVAILABLE, 8); if (h->pict_type != AV_PICTURE_TYPE_B) break; } /* decode motion vector(s) and form prediction(s) */ if (h->pict_type == AV_PICTURE_TYPE_P) { if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0) return -1; } else { /* AV_PICTURE_TYPE_B */ if (mb_type != 2) { if (svq3_mc_dir(s, 0, mode, 0, 0) < 0) return -1; } else { for (i = 0; i < 4; i++) memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride], 0, 4 * 2 * sizeof(int16_t)); } if (mb_type != 1) { if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0) return -1; } else { for (i = 0; i < 4; i++) memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride], 0, 4 * 2 * sizeof(int16_t)); } } mb_type = MB_TYPE_16x16; } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */ memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t)); if (mb_type == 8) { if (h->mb_x > 0) { for (i = 0; i < 4; i++) h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i]; if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) h->left_samples_available = 0x5F5F; } if (h->mb_y > 0) { h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0]; h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1]; h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2]; h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3]; if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1) h->top_samples_available = 0x33FF; } /* decode prediction codes for luma blocks */ for (i = 0; i < 16; i += 2) { vlc = svq3_get_ue_golomb(&h->gb); if (vlc >= 25U) { av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc); return -1; } left = &h->intra4x4_pred_mode_cache[scan8[i] - 1]; top = &h->intra4x4_pred_mode_cache[scan8[i] - 8]; left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]]; left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]]; if (left[1] == -1 || left[2] == -1) { av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n"); return -1; } } } else { /* mb_type == 33, DC_128_PRED block type */ for (i = 0; i < 4; i++) memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4); } write_back_intra_pred_mode(h); if (mb_type == 8) { ff_h264_check_intra4x4_pred_mode(h); h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF; h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF; } else { for (i = 0; i < 4; i++) memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4); h->top_samples_available = 0x33FF; h->left_samples_available = 0x5F5F; } mb_type = MB_TYPE_INTRA4x4; } else { /* INTRA16x16 */ dir = i_mb_type_info[mb_type - 8].pred_mode; dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1; if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) { av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n"); return -1; } cbp = i_mb_type_info[mb_type - 8].cbp; mb_type = MB_TYPE_INTRA16x16; } if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) { for (i = 0; i < 4; i++) memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride], 0, 4 * 2 * sizeof(int16_t)); if (h->pict_type == AV_PICTURE_TYPE_B) { for (i = 0; i < 4; i++) memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride], 0, 4 * 2 * sizeof(int16_t)); } } if (!IS_INTRA4x4(mb_type)) { memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8); } if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) { memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t)); } if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) { if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){ av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc); return -1; } cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc]; } if (IS_INTRA16x16(mb_type) || (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) { h->qscale += svq3_get_se_golomb(&h->gb); if (h->qscale > 31u) { av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale); return -1; } } if (IS_INTRA16x16(mb_type)) { AV_ZERO128(h->mb_luma_dc[0] + 0); AV_ZERO128(h->mb_luma_dc[0] + 8); if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) { av_log(h->avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n"); return -1; } } if (cbp) { const int index = IS_INTRA16x16(mb_type) ? 1 : 0; const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1); for (i = 0; i < 4; i++) if ((cbp & (1 << i))) { for (j = 0; j < 4; j++) { k = index ? (1 * (j & 1) + 2 * (i & 1) + 2 * (j & 2) + 4 * (i & 2)) : (4 * i + j); h->non_zero_count_cache[scan8[k]] = 1; if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) { av_log(h->avctx, AV_LOG_ERROR, "error while decoding block\n"); return -1; } } } if ((cbp & 0x30)) { for (i = 1; i < 3; ++i) if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) { av_log(h->avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n"); return -1; } if ((cbp & 0x20)) { for (i = 1; i < 3; i++) { for (j = 0; j < 4; j++) { k = 16 * i + j; h->non_zero_count_cache[scan8[k]] = 1; if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) { av_log(h->avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n"); return -1; } } } } } } h->cbp = cbp; h->cur_pic.mb_type[mb_xy] = mb_type; if (IS_INTRA(mb_type)) h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8, 1); return 0; }
15,695
0
static void do_getfd(Monitor *mon, const QDict *qdict) { const char *fdname = qdict_get_str(qdict, "fdname"); mon_fd_t *monfd; int fd; fd = qemu_chr_get_msgfd(mon->chr); if (fd == -1) { monitor_printf(mon, "getfd: no file descriptor supplied via SCM_RIGHTS\n"); return; } if (qemu_isdigit(fdname[0])) { monitor_printf(mon, "getfd: monitor names may not begin with a number\n"); return; } fd = dup(fd); if (fd == -1) { monitor_printf(mon, "Failed to dup() file descriptor: %s\n", strerror(errno)); return; } LIST_FOREACH(monfd, &mon->fds, next) { if (strcmp(monfd->name, fdname) != 0) { continue; } close(monfd->fd); monfd->fd = fd; return; } monfd = qemu_mallocz(sizeof(mon_fd_t)); monfd->name = qemu_strdup(fdname); monfd->fd = fd; LIST_INSERT_HEAD(&mon->fds, monfd, next); }
15,697
0
hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash) { CPUPPCState *env = &cpu->env; return (hash * HASH_PTEG_SIZE_32) & env->htab_mask; }
15,698
0
static void mv88w8618_register_devices(void) { #ifdef HAS_AUDIO sysbus_register_withprop(&mv88w8618_audio_info); #endif }
15,699
0
static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev) { struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) }; int ret, i; ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name); if (ret < 0) { error_report("vfio: error getting device %s from group %d: %m", name, group->groupid); error_printf("Verify all devices in group %d are bound to vfio-pci " "or pci-stub and not already in use\n", group->groupid); return ret; } vdev->fd = ret; vdev->group = group; QLIST_INSERT_HEAD(&group->device_list, vdev, next); /* Sanity check device */ ret = ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &dev_info); if (ret) { error_report("vfio: error getting device info: %m"); goto error; } DPRINTF("Device %s flags: %u, regions: %u, irgs: %u\n", name, dev_info.flags, dev_info.num_regions, dev_info.num_irqs); if (!(dev_info.flags & VFIO_DEVICE_FLAGS_PCI)) { error_report("vfio: Um, this isn't a PCI device"); goto error; } vdev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); if (!vdev->reset_works) { error_report("Warning, device %s does not support reset", name); } if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { error_report("vfio: unexpected number of io regions %u", dev_info.num_regions); goto error; } if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) { error_report("vfio: unexpected number of irqs %u", dev_info.num_irqs); goto error; } for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) { reg_info.index = i; ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info); if (ret) { error_report("vfio: Error getting region %d info: %m", i); goto error; } DPRINTF("Device %s region %d:\n", name, i); DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", (unsigned long)reg_info.size, (unsigned long)reg_info.offset, (unsigned long)reg_info.flags); vdev->bars[i].flags = reg_info.flags; vdev->bars[i].size = reg_info.size; vdev->bars[i].fd_offset = reg_info.offset; vdev->bars[i].fd = vdev->fd; vdev->bars[i].nr = i; QLIST_INIT(&vdev->bars[i].quirks); } reg_info.index = VFIO_PCI_ROM_REGION_INDEX; ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info); if (ret) { error_report("vfio: Error getting ROM info: %m"); goto error; } DPRINTF("Device %s ROM:\n", name); DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", (unsigned long)reg_info.size, (unsigned long)reg_info.offset, (unsigned long)reg_info.flags); vdev->rom_size = reg_info.size; vdev->rom_offset = reg_info.offset; reg_info.index = VFIO_PCI_CONFIG_REGION_INDEX; ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info); if (ret) { error_report("vfio: Error getting config info: %m"); goto error; } DPRINTF("Device %s config:\n", name); DPRINTF(" size: 0x%lx, offset: 0x%lx, flags: 0x%lx\n", (unsigned long)reg_info.size, (unsigned long)reg_info.offset, (unsigned long)reg_info.flags); vdev->config_size = reg_info.size; if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; } vdev->config_offset = reg_info.offset; if ((vdev->features & VFIO_FEATURE_ENABLE_VGA) && dev_info.num_regions > VFIO_PCI_VGA_REGION_INDEX) { struct vfio_region_info vga_info = { .argsz = sizeof(vga_info), .index = VFIO_PCI_VGA_REGION_INDEX, }; ret = ioctl(vdev->fd, VFIO_DEVICE_GET_REGION_INFO, &vga_info); if (ret) { error_report( "vfio: Device does not support requested feature x-vga"); goto error; } if (!(vga_info.flags & VFIO_REGION_INFO_FLAG_READ) || !(vga_info.flags & VFIO_REGION_INFO_FLAG_WRITE) || vga_info.size < 0xbffff + 1) { error_report("vfio: Unexpected VGA info, flags 0x%lx, size 0x%lx", (unsigned long)vga_info.flags, (unsigned long)vga_info.size); goto error; } vdev->vga.fd_offset = vga_info.offset; vdev->vga.fd = vdev->fd; vdev->vga.region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE; vdev->vga.region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM; QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_MEM].quirks); vdev->vga.region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE; vdev->vga.region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO; QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_LO].quirks); vdev->vga.region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE; vdev->vga.region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI; QLIST_INIT(&vdev->vga.region[QEMU_PCI_VGA_IO_HI].quirks); vdev->has_vga = true; } irq_info.index = VFIO_PCI_ERR_IRQ_INDEX; ret = ioctl(vdev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info); if (ret) { /* This can fail for an old kernel or legacy PCI dev */ DPRINTF("VFIO_DEVICE_GET_IRQ_INFO failure ret=%d\n", ret); ret = 0; } else if (irq_info.count == 1) { vdev->pci_aer = true; } else { error_report("vfio: Warning: " "Could not enable error recovery for the device\n"); } error: if (ret) { QLIST_REMOVE(vdev, next); vdev->group = NULL; close(vdev->fd); } return ret; }
15,700
0
static void loop_filter(H264Context *h, H264SliceContext *sl, int start_x, int end_x) { uint8_t *dest_y, *dest_cb, *dest_cr; int linesize, uvlinesize, mb_x, mb_y; const int end_mb_y = h->mb_y + FRAME_MBAFF(h); const int old_slice_type = sl->slice_type; const int pixel_shift = h->pixel_shift; const int block_h = 16 >> h->chroma_y_shift; if (h->deblocking_filter) { for (mb_x = start_x; mb_x < end_x; mb_x++) for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) { int mb_xy, mb_type; mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride; sl->slice_num = h->slice_table[mb_xy]; mb_type = h->cur_pic.mb_type[mb_xy]; sl->list_count = h->list_counts[mb_xy]; if (FRAME_MBAFF(h)) h->mb_mbaff = h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type); h->mb_x = mb_x; h->mb_y = mb_y; dest_y = h->cur_pic.f.data[0] + ((mb_x << pixel_shift) + mb_y * h->linesize) * 16; dest_cb = h->cur_pic.f.data[1] + (mb_x << pixel_shift) * (8 << CHROMA444(h)) + mb_y * h->uvlinesize * block_h; dest_cr = h->cur_pic.f.data[2] + (mb_x << pixel_shift) * (8 << CHROMA444(h)) + mb_y * h->uvlinesize * block_h; // FIXME simplify above if (MB_FIELD(h)) { linesize = sl->mb_linesize = h->linesize * 2; uvlinesize = sl->mb_uvlinesize = h->uvlinesize * 2; if (mb_y & 1) { // FIXME move out of this function? dest_y -= h->linesize * 15; dest_cb -= h->uvlinesize * (block_h - 1); dest_cr -= h->uvlinesize * (block_h - 1); } } else { linesize = sl->mb_linesize = h->linesize; uvlinesize = sl->mb_uvlinesize = h->uvlinesize; } backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0); if (fill_filter_caches(h, sl, mb_type)) continue; sl->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]); sl->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]); if (FRAME_MBAFF(h)) { ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } else { ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } } } sl->slice_type = old_slice_type; h->mb_x = end_x; h->mb_y = end_mb_y - FRAME_MBAFF(h); sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); }
15,701
0
static void xenfb_update(void *opaque) { struct XenFB *xenfb = opaque; DisplaySurface *surface; int i; if (xenfb->c.xendev.be_state != XenbusStateConnected) return; if (!xenfb->feature_update) { /* we don't get update notifications, thus use the * sledge hammer approach ... */ xenfb->up_fullscreen = 1; } /* resize if needed */ if (xenfb->do_resize) { pixman_format_code_t format; xenfb->do_resize = 0; switch (xenfb->depth) { case 16: case 32: /* console.c supported depth -> buffer can be used directly */ format = qemu_default_pixman_format(xenfb->depth, true); surface = qemu_create_displaysurface_from (xenfb->width, xenfb->height, format, xenfb->row_stride, xenfb->pixels + xenfb->offset); break; default: /* we must convert stuff */ surface = qemu_create_displaysurface(xenfb->width, xenfb->height); break; } dpy_gfx_replace_surface(xenfb->c.con, surface); xen_pv_printf(&xenfb->c.xendev, 1, "update: resizing: %dx%d @ %d bpp%s\n", xenfb->width, xenfb->height, xenfb->depth, is_buffer_shared(surface) ? " (shared)" : ""); xenfb->up_fullscreen = 1; } /* run queued updates */ if (xenfb->up_fullscreen) { xen_pv_printf(&xenfb->c.xendev, 3, "update: fullscreen\n"); xenfb_guest_copy(xenfb, 0, 0, xenfb->width, xenfb->height); } else if (xenfb->up_count) { xen_pv_printf(&xenfb->c.xendev, 3, "update: %d rects\n", xenfb->up_count); for (i = 0; i < xenfb->up_count; i++) xenfb_guest_copy(xenfb, xenfb->up_rects[i].x, xenfb->up_rects[i].y, xenfb->up_rects[i].w, xenfb->up_rects[i].h); } else { xen_pv_printf(&xenfb->c.xendev, 3, "update: nothing\n"); } xenfb->up_count = 0; xenfb->up_fullscreen = 0; }
15,702
0
petalogix_s3adsp1800_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; DeviceState *dev; MicroBlazeCPU *cpu; DriveInfo *dinfo; int i; hwaddr ddr_base = MEMORY_BASEADDR; MemoryRegion *phys_lmb_bram = g_new(MemoryRegion, 1); MemoryRegion *phys_ram = g_new(MemoryRegion, 1); qemu_irq irq[32]; MemoryRegion *sysmem = get_system_memory(); /* init CPUs */ if (cpu_model == NULL) { cpu_model = "microblaze"; } cpu = cpu_mb_init(cpu_model); /* Attach emulated BRAM through the LMB. */ memory_region_init_ram(phys_lmb_bram, NULL, "petalogix_s3adsp1800.lmb_bram", LMB_BRAM_SIZE, &error_abort); vmstate_register_ram_global(phys_lmb_bram); memory_region_add_subregion(sysmem, 0x00000000, phys_lmb_bram); memory_region_init_ram(phys_ram, NULL, "petalogix_s3adsp1800.ram", ram_size, &error_abort); vmstate_register_ram_global(phys_ram); memory_region_add_subregion(sysmem, ddr_base, phys_ram); dinfo = drive_get(IF_PFLASH, 0, 0); pflash_cfi01_register(FLASH_BASEADDR, NULL, "petalogix_s3adsp1800.flash", FLASH_SIZE, dinfo ? blk_bs(blk_by_legacy_dinfo(dinfo)) : NULL, (64 * 1024), FLASH_SIZE >> 16, 1, 0x89, 0x18, 0x0000, 0x0, 1); dev = qdev_create(NULL, "xlnx.xps-intc"); qdev_prop_set_uint32(dev, "kind-of-intr", 1 << ETHLITE_IRQ | 1 << UARTLITE_IRQ); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(DEVICE(cpu), MB_CPU_IRQ)); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(dev, i); } sysbus_create_simple("xlnx.xps-uartlite", UARTLITE_BASEADDR, irq[UARTLITE_IRQ]); /* 2 timers at irq 2 @ 62 Mhz. */ dev = qdev_create(NULL, "xlnx.xps-timer"); qdev_prop_set_uint32(dev, "one-timer-only", 0); qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); qemu_check_nic_model(&nd_table[0], "xlnx.xps-ethernetlite"); dev = qdev_create(NULL, "xlnx.xps-ethernetlite"); qdev_set_nic_properties(dev, &nd_table[0]); qdev_prop_set_uint32(dev, "tx-ping-pong", 0); qdev_prop_set_uint32(dev, "rx-ping-pong", 0); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, ETHLITE_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[ETHLITE_IRQ]); microblaze_load_kernel(cpu, ddr_base, ram_size, machine->initrd_filename, BINARY_DEVICE_TREE_FILE, machine_cpu_reset); }
15,703
0
static void mainstone_common_init(ram_addr_t ram_size, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model, enum mainstone_model_e model, int arm_id) { uint32_t sector_len = 256 * 1024; target_phys_addr_t mainstone_flash_base[] = { MST_FLASH_0, MST_FLASH_1 }; PXA2xxState *cpu; DeviceState *mst_irq; DriveInfo *dinfo; int i; int be; if (!cpu_model) cpu_model = "pxa270-c5"; /* Setup CPU & memory */ cpu = pxa270_init(mainstone_binfo.ram_size, cpu_model); cpu_register_physical_memory(0, MAINSTONE_ROM, qemu_ram_alloc(NULL, "mainstone.rom", MAINSTONE_ROM) | IO_MEM_ROM); #ifdef TARGET_WORDS_BIGENDIAN be = 1; #else be = 0; #endif /* There are two 32MiB flash devices on the board */ for (i = 0; i < 2; i ++) { dinfo = drive_get(IF_PFLASH, 0, i); if (!dinfo) { fprintf(stderr, "Two flash images must be given with the " "'pflash' parameter\n"); exit(1); } if (!pflash_cfi01_register(mainstone_flash_base[i], qemu_ram_alloc(NULL, i ? "mainstone.flash1" : "mainstone.flash0", MAINSTONE_FLASH), dinfo->bdrv, sector_len, MAINSTONE_FLASH / sector_len, 4, 0, 0, 0, 0, be)) { fprintf(stderr, "qemu: Error registering flash memory.\n"); exit(1); } } mst_irq = sysbus_create_simple("mainstone-fpga", MST_FPGA_PHYS, cpu->pic[PXA2XX_PIC_GPIO_0]); /* setup keypad */ printf("map addr %p\n", &map); pxa27x_register_keypad(cpu->kp, map, 0xe0); /* MMC/SD host */ pxa2xx_mmci_handlers(cpu->mmc, NULL, qdev_get_gpio_in(mst_irq, MMC_IRQ)); smc91c111_init(&nd_table[0], MST_ETH_PHYS, qdev_get_gpio_in(mst_irq, ETHERNET_IRQ)); mainstone_binfo.kernel_filename = kernel_filename; mainstone_binfo.kernel_cmdline = kernel_cmdline; mainstone_binfo.initrd_filename = initrd_filename; mainstone_binfo.board_id = arm_id; arm_load_kernel(cpu->env, &mainstone_binfo); }
15,704
0
static void vmxnet3_pci_realize(PCIDevice *pci_dev, Error **errp) { DeviceState *dev = DEVICE(pci_dev); VMXNET3State *s = VMXNET3(pci_dev); int ret; VMW_CBPRN("Starting init..."); memory_region_init_io(&s->bar0, OBJECT(s), &b0_ops, s, "vmxnet3-b0", VMXNET3_PT_REG_SIZE); pci_register_bar(pci_dev, VMXNET3_BAR0_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar0); memory_region_init_io(&s->bar1, OBJECT(s), &b1_ops, s, "vmxnet3-b1", VMXNET3_VD_REG_SIZE); pci_register_bar(pci_dev, VMXNET3_BAR1_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar1); memory_region_init(&s->msix_bar, OBJECT(s), "vmxnet3-msix-bar", VMXNET3_MSIX_BAR_SIZE); pci_register_bar(pci_dev, VMXNET3_MSIX_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix_bar); vmxnet3_reset_interrupt_states(s); /* Interrupt pin A */ pci_dev->config[PCI_INTERRUPT_PIN] = 0x01; ret = msi_init(pci_dev, VMXNET3_MSI_OFFSET(s), VMXNET3_MAX_NMSIX_INTRS, VMXNET3_USE_64BIT, VMXNET3_PER_VECTOR_MASK, NULL); /* Any error other than -ENOTSUP(board's MSI support is broken) * is a programming error. Fall back to INTx silently on -ENOTSUP */ assert(!ret || ret == -ENOTSUP); if (!vmxnet3_init_msix(s)) { VMW_WRPRN("Failed to initialize MSI-X, configuration is inconsistent."); } vmxnet3_net_init(s); if (pci_is_express(pci_dev)) { if (pci_bus_is_express(pci_dev->bus)) { pcie_endpoint_cap_init(pci_dev, VMXNET3_EXP_EP_OFFSET); } pcie_dev_ser_num_init(pci_dev, VMXNET3_DSN_OFFSET, vmxnet3_device_serial_num(s)); } register_savevm_live(dev, "vmxnet3-msix", -1, 1, &savevm_vmxnet3_msix, s); }
15,705
0
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { ARMCPU *cpu = arm_env_get_cpu(env); if (env->cp15.c1_sys == value) { /* Skip the TLB flush if nothing actually changed; Linux likes * to do a lot of pointless SCTLR writes. */ return; } env->cp15.c1_sys = value; /* ??? Lots of these bits are not implemented. */ /* This may enable/disable the MMU, so do a TLB flush. */ tlb_flush(CPU(cpu), 1); }
15,706
0
static Visitor *validate_test_init_internal(TestInputVisitorData *data, const char *json_string, va_list *ap) { validate_teardown(data, NULL); data->obj = qobject_from_jsonv(json_string, ap); g_assert(data->obj); data->qiv = qmp_input_visitor_new(data->obj, true); g_assert(data->qiv); return data->qiv; }
15,707
0
static void monitor_find_completion(Monitor *mon, const char *cmdline) { char *args[MAX_ARGS]; int nb_args, len; /* 1. parse the cmdline */ if (parse_cmdline(cmdline, &nb_args, args) < 0) { return; } #ifdef DEBUG_COMPLETION for (i = 0; i < nb_args; i++) { monitor_printf(mon, "arg%d = '%s'\n", i, args[i]); } #endif /* if the line ends with a space, it means we want to complete the next arg */ len = strlen(cmdline); if (len > 0 && qemu_isspace(cmdline[len - 1])) { if (nb_args >= MAX_ARGS) { goto cleanup; } args[nb_args++] = g_strdup(""); } /* 2. auto complete according to args */ monitor_find_completion_by_table(mon, mon->cmd_table, args, nb_args); cleanup: free_cmdline_args(args, nb_args); }
15,708
0
static void gdb_set_cpu_pc(GDBState *s, target_ulong pc) { #if defined(TARGET_I386) cpu_synchronize_state(s->c_cpu); s->c_cpu->eip = pc; #elif defined (TARGET_PPC) s->c_cpu->nip = pc; #elif defined (TARGET_SPARC) s->c_cpu->pc = pc; s->c_cpu->npc = pc + 4; #elif defined (TARGET_ARM) s->c_cpu->regs[15] = pc; #elif defined (TARGET_SH4) s->c_cpu->pc = pc; #elif defined (TARGET_MIPS) s->c_cpu->active_tc.PC = pc; #elif defined (TARGET_MICROBLAZE) s->c_cpu->sregs[SR_PC] = pc; #elif defined (TARGET_CRIS) s->c_cpu->pc = pc; #elif defined (TARGET_ALPHA) s->c_cpu->pc = pc; #elif defined (TARGET_S390X) cpu_synchronize_state(s->c_cpu); s->c_cpu->psw.addr = pc; #endif }
15,709
0
bool hvf_inject_interrupts(CPUState *cpu_state) { int allow_nmi = !(rvmcs(cpu_state->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY) & VMCS_INTERRUPTIBILITY_NMI_BLOCKING); X86CPU *x86cpu = X86_CPU(cpu_state); CPUX86State *env = &x86cpu->env; uint64_t idt_info = rvmcs(cpu_state->hvf_fd, VMCS_IDT_VECTORING_INFO); uint64_t info = 0; if (idt_info & VMCS_IDT_VEC_VALID) { uint8_t vector = idt_info & 0xff; uint64_t intr_type = idt_info & VMCS_INTR_T_MASK; info = idt_info; uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON); if (intr_type == VMCS_INTR_T_NMI && reason != EXIT_REASON_TASK_SWITCH) { allow_nmi = 1; vmx_clear_nmi_blocking(cpu_state); } if ((allow_nmi || intr_type != VMCS_INTR_T_NMI)) { info &= ~(1 << 12); /* clear undefined bit */ if (intr_type == VMCS_INTR_T_SWINTR || intr_type == VMCS_INTR_T_PRIV_SWEXCEPTION || intr_type == VMCS_INTR_T_SWEXCEPTION) { uint64_t ins_len = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, ins_len); } if (vector == EXCEPTION_BP || vector == EXCEPTION_OF) { /* * VT-x requires #BP and #OF to be injected as software * exceptions. */ info &= ~VMCS_INTR_T_MASK; info |= VMCS_INTR_T_SWEXCEPTION; uint64_t ins_len = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, ins_len); } uint64_t err = 0; if (idt_info & VMCS_INTR_DEL_ERRCODE) { err = rvmcs(cpu_state->hvf_fd, VMCS_IDT_VECTORING_ERROR); wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR, err); } /*printf("reinject %lx err %d\n", info, err);*/ wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); }; } if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) { if (allow_nmi && !(info & VMCS_INTR_VALID)) { cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI; info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | NMI_VEC; wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); } else { vmx_set_nmi_window_exiting(cpu_state); } } if (env->hvf_emul->interruptable && (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && (EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) { int line = cpu_get_pic_interrupt(&x86cpu->env); cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD; if (line >= 0) { wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); } } if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) { vmx_set_int_window_exiting(cpu_state); } }
15,710
0
static void jpeg_prepare_row(VncState *vs, uint8_t *dst, int x, int y, int count) { if (vs->tight_pixel24) jpeg_prepare_row24(vs, dst, x, y, count); else if (ds_get_bytes_per_pixel(vs->ds) == 4) jpeg_prepare_row32(vs, dst, x, y, count); else jpeg_prepare_row16(vs, dst, x, y, count); }
15,711
0
static int detect_stream_specific(AVFormatContext *avf, int idx) { ConcatContext *cat = avf->priv_data; AVStream *st = cat->avf->streams[idx]; ConcatStream *cs = &cat->cur_file->streams[idx]; AVBitStreamFilterContext *bsf; int ret; if (cat->auto_convert && st->codecpar->codec_id == AV_CODEC_ID_H264 && (st->codecpar->extradata_size < 4 || AV_RB32(st->codecpar->extradata) != 1)) { av_log(cat->avf, AV_LOG_INFO, "Auto-inserting h264_mp4toannexb bitstream filter\n"); if (!(bsf = av_bitstream_filter_init("h264_mp4toannexb"))) { av_log(avf, AV_LOG_ERROR, "h264_mp4toannexb bitstream filter " "required for H.264 streams\n"); return AVERROR_BSF_NOT_FOUND; } cs->bsf = bsf; cs->avctx = avcodec_alloc_context3(NULL); if (!cs->avctx) return AVERROR(ENOMEM); /* This really should be part of the bsf work. Note: input bitstream filtering will not work with bsf that create extradata from the first packet. */ av_freep(&st->codecpar->extradata); st->codecpar->extradata_size = 0; ret = avcodec_parameters_to_context(cs->avctx, st->codecpar); if (ret < 0) { avcodec_free_context(&cs->avctx); return ret; } } return 0; }
15,712
0
void pc_basic_device_init(qemu_irq *isa_irq, FDCtrl **floppy_controller, ISADevice **rtc_state) { int i; DriveInfo *fd[MAX_FD]; PITState *pit; qemu_irq rtc_irq = NULL; qemu_irq *a20_line; ISADevice *i8042, *port92, *vmmouse; qemu_irq *cpu_exit_irq; register_ioport_write(0x80, 1, 1, ioport80_write, NULL); register_ioport_write(0xf0, 1, 1, ioportF0_write, NULL); if (!no_hpet) { DeviceState *hpet = sysbus_try_create_simple("hpet", HPET_BASE, NULL); if (hpet) { for (i = 0; i < 24; i++) { sysbus_connect_irq(sysbus_from_qdev(hpet), i, isa_irq[i]); } rtc_irq = qdev_get_gpio_in(hpet, 0); } } *rtc_state = rtc_init(2000, rtc_irq); qemu_register_boot_set(pc_boot_set, *rtc_state); pit = pit_init(0x40, isa_reserve_irq(0)); pcspk_init(pit); for(i = 0; i < MAX_SERIAL_PORTS; i++) { if (serial_hds[i]) { serial_isa_init(i, serial_hds[i]); } } for(i = 0; i < MAX_PARALLEL_PORTS; i++) { if (parallel_hds[i]) { parallel_init(i, parallel_hds[i]); } } a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2); i8042 = isa_create_simple("i8042"); i8042_setup_a20_line(i8042, &a20_line[0]); vmport_init(); vmmouse = isa_try_create("vmmouse"); if (vmmouse) { qdev_prop_set_ptr(&vmmouse->qdev, "ps2_mouse", i8042); } port92 = isa_create_simple("port92"); port92_init(port92, &a20_line[1]); cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1); DMA_init(0, cpu_exit_irq); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); } *floppy_controller = fdctrl_init_isa(fd); }
15,713
0
static void dump_json_image_check(ImageCheck *check, bool quiet) { QString *str; QObject *obj; Visitor *v = qmp_output_visitor_new(&obj); visit_type_ImageCheck(v, NULL, &check, &error_abort); visit_complete(v, &obj); str = qobject_to_json_pretty(obj); assert(str != NULL); qprintf(quiet, "%s\n", qstring_get_str(str)); qobject_decref(obj); visit_free(v); QDECREF(str); }
15,714
0
INLINE bits32 extractFloat32Frac( float32 a ) { return a & 0x007FFFFF; }
15,715
0
int rom_load_fw(void *fw_cfg) { Rom *rom; QTAILQ_FOREACH(rom, &roms, next) { if (!rom->fw_file) { continue; } fw_cfg_add_file(fw_cfg, rom->fw_dir, rom->fw_file, rom->data, rom->romsize); } return 0; }
15,716
0
static inline void gen_branch2(DisasContext *dc, target_ulong pc1, target_ulong pc2, TCGv r_cond) { int l1; l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1); gen_goto_tb(dc, 0, pc1, pc1 + 4); gen_set_label(l1); gen_goto_tb(dc, 1, pc2, pc2 + 4); }
15,717
0
static int u3_agp_pci_host_init(PCIDevice *d) { pci_config_set_vendor_id(d->config, PCI_VENDOR_ID_APPLE); pci_config_set_device_id(d->config, PCI_DEVICE_ID_APPLE_U3_AGP); /* revision */ d->config[0x08] = 0x00; pci_config_set_class(d->config, PCI_CLASS_BRIDGE_HOST); /* cache line size */ d->config[0x0C] = 0x08; /* latency timer */ d->config[0x0D] = 0x10; return 0; }
15,718
0
static void nographic_update(void *opaque) { uint64_t interval = GUI_REFRESH_INTERVAL; qemu_flush_coalesced_mmio_buffer(); qemu_mod_timer(nographic_timer, interval + qemu_get_clock(rt_clock)); }
15,720
0
static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, int remaining_sectors, QEMUIOVector *qiov) { BDRVQcowState *s = bs->opaque; int index_in_cluster, n1; int ret; int cur_nr_sectors; /* number of sectors in current iteration */ uint64_t cluster_offset = 0; uint64_t bytes_done = 0; QEMUIOVector hd_qiov; uint8_t *cluster_data = NULL; qemu_iovec_init(&hd_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (remaining_sectors != 0) { /* prepare next request */ cur_nr_sectors = remaining_sectors; if (s->crypt_method) { cur_nr_sectors = MIN(cur_nr_sectors, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); } ret = qcow2_get_cluster_offset(bs, sector_num << 9, &cur_nr_sectors, &cluster_offset); if (ret < 0) { goto fail; } index_in_cluster = sector_num & (s->cluster_sectors - 1); qemu_iovec_reset(&hd_qiov); qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_nr_sectors * 512); switch (ret) { case QCOW2_CLUSTER_UNALLOCATED: if (bs->backing_hd) { /* read from the base image */ n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov, sector_num, cur_nr_sectors); if (n1 > 0) { BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->backing_hd, sector_num, n1, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } } } else { /* Note: in this case, no need to wait */ qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors); } break; case QCOW2_CLUSTER_ZERO: if (s->qcow_version < 3) { ret = -EIO; goto fail; } qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors); break; case QCOW2_CLUSTER_COMPRESSED: /* add AIO support for compressed blocks ? */ ret = qcow2_decompress_cluster(bs, cluster_offset); if (ret < 0) { goto fail; } qemu_iovec_from_buf(&hd_qiov, 0, s->cluster_cache + index_in_cluster * 512, 512 * cur_nr_sectors); break; case QCOW2_CLUSTER_NORMAL: if ((cluster_offset & 511) != 0) { ret = -EIO; goto fail; } if (s->crypt_method) { /* * For encrypted images, read everything into a temporary * contiguous buffer on which the AES functions can work. */ if (!cluster_data) { cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); } assert(cur_nr_sectors <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cluster_data, 512 * cur_nr_sectors); } BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_readv(bs->file, (cluster_offset >> 9) + index_in_cluster, cur_nr_sectors, &hd_qiov); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } if (s->crypt_method) { qcow2_encrypt_sectors(s, sector_num, cluster_data, cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key); qemu_iovec_from_buf(qiov, bytes_done, cluster_data, 512 * cur_nr_sectors); } break; default: g_assert_not_reached(); ret = -EIO; goto fail; } remaining_sectors -= cur_nr_sectors; sector_num += cur_nr_sectors; bytes_done += cur_nr_sectors * 512; } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); qemu_vfree(cluster_data); return ret; }
15,721
0
static int sh_pci_init_device(SysBusDevice *dev) { SHPCIState *s; int i; s = FROM_SYSBUS(SHPCIState, dev); for (i = 0; i < 4; i++) { sysbus_init_irq(dev, &s->irq[i]); } s->bus = pci_register_bus(&s->busdev.qdev, "pci", sh_pci_set_irq, sh_pci_map_irq, s->irq, get_system_memory(), get_system_io(), PCI_DEVFN(0, 0), 4); memory_region_init_io(&s->memconfig_p4, &sh_pci_reg_ops, s, "sh_pci", 0x224); memory_region_init_alias(&s->memconfig_a7, "sh_pci.2", &s->memconfig_a7, 0, 0x224); isa_mmio_setup(&s->isa, 0x40000); sysbus_init_mmio_cb2(dev, sh_pci_map, sh_pci_unmap); sysbus_init_mmio_region(dev, &s->memconfig_a7); sysbus_init_mmio_region(dev, &s->isa); s->dev = pci_create_simple(s->bus, PCI_DEVFN(0, 0), "sh_pci_host"); return 0; }
15,722
0
static uint64_t pxa2xx_lcdc_read(void *opaque, hwaddr offset, unsigned size) { PXA2xxLCDState *s = (PXA2xxLCDState *) opaque; int ch; switch (offset) { case LCCR0: return s->control[0]; case LCCR1: return s->control[1]; case LCCR2: return s->control[2]; case LCCR3: return s->control[3]; case LCCR4: return s->control[4]; case LCCR5: return s->control[5]; case OVL1C1: return s->ovl1c[0]; case OVL1C2: return s->ovl1c[1]; case OVL2C1: return s->ovl2c[0]; case OVL2C2: return s->ovl2c[1]; case CCR: return s->ccr; case CMDCR: return s->cmdcr; case TRGBR: return s->trgbr; case TCR: return s->tcr; case 0x200 ... 0x1000: /* DMA per-channel registers */ ch = (offset - 0x200) >> 4; if (!(ch >= 0 && ch < PXA_LCDDMA_CHANS)) goto fail; switch (offset & 0xf) { case DMA_FDADR: return s->dma_ch[ch].descriptor; case DMA_FSADR: return s->dma_ch[ch].source; case DMA_FIDR: return s->dma_ch[ch].id; case DMA_LDCMD: return s->dma_ch[ch].command; default: goto fail; } case FBR0: return s->dma_ch[0].branch; case FBR1: return s->dma_ch[1].branch; case FBR2: return s->dma_ch[2].branch; case FBR3: return s->dma_ch[3].branch; case FBR4: return s->dma_ch[4].branch; case FBR5: return s->dma_ch[5].branch; case FBR6: return s->dma_ch[6].branch; case BSCNTR: return s->bscntr; case PRSR: return 0; case LCSR0: return s->status[0]; case LCSR1: return s->status[1]; case LIIDR: return s->liidr; default: fail: hw_error("%s: Bad offset " REG_FMT "\n", __FUNCTION__, offset); } return 0; }
15,724
0
int main_loop(void) { #ifndef _WIN32 struct pollfd ufds[MAX_IO_HANDLERS + 1], *pf; IOHandlerRecord *ioh, *ioh_next; uint8_t buf[4096]; int n, max_size; #endif int ret, timeout; CPUState *env = global_env; for(;;) { if (vm_running) { ret = cpu_exec(env); if (shutdown_requested) { ret = EXCP_INTERRUPT; break; } if (reset_requested) { reset_requested = 0; qemu_system_reset(); ret = EXCP_INTERRUPT; } if (ret == EXCP_DEBUG) { vm_stop(EXCP_DEBUG); } /* if hlt instruction, we wait until the next IRQ */ /* XXX: use timeout computed from timers */ if (ret == EXCP_HLT) timeout = 10; else timeout = 0; } else { timeout = 10; } #ifdef _WIN32 if (timeout > 0) Sleep(timeout); #else /* poll any events */ /* XXX: separate device handlers from system ones */ pf = ufds; for(ioh = first_io_handler; ioh != NULL; ioh = ioh->next) { if (!ioh->fd_can_read) { max_size = 0; pf->fd = ioh->fd; pf->events = POLLIN; ioh->ufd = pf; pf++; } else { max_size = ioh->fd_can_read(ioh->opaque); if (max_size > 0) { if (max_size > sizeof(buf)) max_size = sizeof(buf); pf->fd = ioh->fd; pf->events = POLLIN; ioh->ufd = pf; pf++; } else { ioh->ufd = NULL; } } ioh->max_size = max_size; } ret = poll(ufds, pf - ufds, timeout); if (ret > 0) { /* XXX: better handling of removal */ for(ioh = first_io_handler; ioh != NULL; ioh = ioh_next) { ioh_next = ioh->next; pf = ioh->ufd; if (pf) { if (pf->revents & POLLIN) { if (ioh->max_size == 0) { /* just a read event */ ioh->fd_read(ioh->opaque, NULL, 0); } else { n = read(ioh->fd, buf, ioh->max_size); if (n >= 0) { ioh->fd_read(ioh->opaque, buf, n); } else if (errno != EAGAIN) { ioh->fd_read(ioh->opaque, NULL, -errno); } } } } } } #if defined(CONFIG_SLIRP) /* XXX: merge with poll() */ if (slirp_inited) { fd_set rfds, wfds, xfds; int nfds; struct timeval tv; nfds = -1; FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&xfds); slirp_select_fill(&nfds, &rfds, &wfds, &xfds); tv.tv_sec = 0; tv.tv_usec = 0; ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv); if (ret >= 0) { slirp_select_poll(&rfds, &wfds, &xfds); } } #endif #endif if (vm_running) { qemu_run_timers(&active_timers[QEMU_TIMER_VIRTUAL], qemu_get_clock(vm_clock)); if (audio_enabled) { /* XXX: add explicit timer */ SB16_run(); } /* run dma transfers, if any */ DMA_run(); } /* real time timers */ qemu_run_timers(&active_timers[QEMU_TIMER_REALTIME], qemu_get_clock(rt_clock)); } cpu_disable_ticks(); return ret; }
15,725
0
static void intel_hda_corb_run(IntelHDAState *d) { hwaddr addr; uint32_t rp, verb; if (d->ics & ICH6_IRS_BUSY) { dprint(d, 2, "%s: [icw] verb 0x%08x\n", __FUNCTION__, d->icw); intel_hda_send_command(d, d->icw); return; } for (;;) { if (!(d->corb_ctl & ICH6_CORBCTL_RUN)) { dprint(d, 2, "%s: !run\n", __FUNCTION__); return; } if ((d->corb_rp & 0xff) == d->corb_wp) { dprint(d, 2, "%s: corb ring empty\n", __FUNCTION__); return; } if (d->rirb_count == d->rirb_cnt) { dprint(d, 2, "%s: rirb count reached\n", __FUNCTION__); return; } rp = (d->corb_rp + 1) & 0xff; addr = intel_hda_addr(d->corb_lbase, d->corb_ubase); verb = ldl_le_pci_dma(&d->pci, addr + 4*rp); d->corb_rp = rp; dprint(d, 2, "%s: [rp 0x%x] verb 0x%08x\n", __FUNCTION__, rp, verb); intel_hda_send_command(d, verb); } }
15,726
0
static inline int valid_flags(int flag) { if (flag & O_NOCTTY || flag & O_NONBLOCK || flag & O_ASYNC || flag & O_CLOEXEC) return 0; else return 1; }
15,727
0
static void vfio_map_bar(VFIOPCIDevice *vdev, int nr) { VFIOBAR *bar = &vdev->bars[nr]; uint64_t size = bar->region.size; char name[64]; uint32_t pci_bar; uint8_t type; int ret; /* Skip both unimplemented BARs and the upper half of 64bit BARS. */ if (!size) { return; } snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function, nr); /* Determine what type of BAR this is for registration */ ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar), vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr)); if (ret != sizeof(pci_bar)) { error_report("vfio: Failed to read BAR %d (%m)", nr); return; } pci_bar = le32_to_cpu(pci_bar); bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO); bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64); type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK : ~PCI_BASE_ADDRESS_MEM_MASK); /* A "slow" read/write mapping underlies all BARs */ memory_region_init_io(&bar->region.mem, OBJECT(vdev), &vfio_region_ops, bar, name, size); pci_register_bar(&vdev->pdev, nr, type, &bar->region.mem); /* * We can't mmap areas overlapping the MSIX vector table, so we * potentially insert a direct-mapped subregion before and after it. */ if (vdev->msix && vdev->msix->table_bar == nr) { size = vdev->msix->table_offset & qemu_real_host_page_mask; } strncat(name, " mmap", sizeof(name) - strlen(name) - 1); if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem, &bar->region.mmap_mem, &bar->region.mmap, size, 0, name)) { error_report("%s unsupported. Performance may be slow", name); } if (vdev->msix && vdev->msix->table_bar == nr) { uint64_t start; start = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset + (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE)); size = start < bar->region.size ? bar->region.size - start : 0; strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1); /* VFIOMSIXInfo contains another MemoryRegion for this mapping */ if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem, &vdev->msix->mmap_mem, &vdev->msix->mmap, size, start, name)) { error_report("%s unsupported. Performance may be slow", name); } } vfio_bar_quirk_setup(vdev, nr); }
15,729
0
static void ich9_cc_write(void *opaque, hwaddr addr, uint64_t val, unsigned len) { ICH9LPCState *lpc = (ICH9LPCState *)opaque; ich9_cc_addr_len(&addr, &len); memcpy(lpc->chip_config + addr, &val, len); pci_bus_fire_intx_routing_notifier(lpc->d.bus); ich9_cc_update(lpc); }
15,730
0
opts_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp) { OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v); const QemuOpt *opt; const char *str; long long val; char *endptr; if (ov->list_mode == LM_SIGNED_INTERVAL) { *obj = ov->range_next.s; return; } opt = lookup_scalar(ov, name, errp); if (!opt) { return; } str = opt->str ? opt->str : ""; /* we've gotten past lookup_scalar() */ assert(ov->list_mode == LM_NONE || ov->list_mode == LM_IN_PROGRESS); errno = 0; val = strtoll(str, &endptr, 0); if (errno == 0 && endptr > str && INT64_MIN <= val && val <= INT64_MAX) { if (*endptr == '\0') { *obj = val; processed(ov, name); return; } if (*endptr == '-' && ov->list_mode == LM_IN_PROGRESS) { long long val2; str = endptr + 1; val2 = strtoll(str, &endptr, 0); if (errno == 0 && endptr > str && *endptr == '\0' && INT64_MIN <= val2 && val2 <= INT64_MAX && val <= val2) { ov->range_next.s = val; ov->range_limit.s = val2; ov->list_mode = LM_SIGNED_INTERVAL; /* as if entering on the top */ *obj = ov->range_next.s; return; } } } error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name, (ov->list_mode == LM_NONE) ? "an int64 value" : "an int64 value or range"); }
15,732
0
float32 HELPER(ucf64_df2sf)(float64 x, CPUUniCore32State *env) { return float64_to_float32(x, &env->ucf64.fp_status); }
15,733
0
static int rm_read_audio_stream_info(AVFormatContext *s, ByteIOContext *pb, AVStream *st, int read_all) { RMDemuxContext *rm = s->priv_data; char buf[256]; uint32_t version; int i; /* ra type header */ version = get_be32(pb); /* version */ if (((version >> 16) & 0xff) == 3) { int64_t startpos = url_ftell(pb); /* very old version */ for(i = 0; i < 14; i++) get_byte(pb); get_str8(pb, s->title, sizeof(s->title)); get_str8(pb, s->author, sizeof(s->author)); get_str8(pb, s->copyright, sizeof(s->copyright)); get_str8(pb, s->comment, sizeof(s->comment)); if ((startpos + (version & 0xffff)) >= url_ftell(pb) + 2) { // fourcc (should always be "lpcJ") get_byte(pb); get_str8(pb, buf, sizeof(buf)); } // Skip extra header crap (this should never happen) if ((startpos + (version & 0xffff)) > url_ftell(pb)) url_fskip(pb, (version & 0xffff) + startpos - url_ftell(pb)); st->codec->sample_rate = 8000; st->codec->channels = 1; st->codec->codec_type = CODEC_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_RA_144; } else { int flavor, sub_packet_h, coded_framesize, sub_packet_size; /* old version (4) */ get_be32(pb); /* .ra4 */ get_be32(pb); /* data size */ get_be16(pb); /* version2 */ get_be32(pb); /* header size */ flavor= get_be16(pb); /* add codec info / flavor */ rm->coded_framesize = coded_framesize = get_be32(pb); /* coded frame size */ get_be32(pb); /* ??? */ get_be32(pb); /* ??? */ get_be32(pb); /* ??? */ rm->sub_packet_h = sub_packet_h = get_be16(pb); /* 1 */ st->codec->block_align= get_be16(pb); /* frame size */ rm->sub_packet_size = sub_packet_size = get_be16(pb); /* sub packet size */ get_be16(pb); /* ??? */ if (((version >> 16) & 0xff) == 5) { get_be16(pb); get_be16(pb); get_be16(pb); } st->codec->sample_rate = get_be16(pb); get_be32(pb); st->codec->channels = get_be16(pb); if (((version >> 16) & 0xff) == 5) { get_be32(pb); buf[0] = get_byte(pb); buf[1] = get_byte(pb); buf[2] = get_byte(pb); buf[3] = get_byte(pb); buf[4] = 0; } else { get_str8(pb, buf, sizeof(buf)); /* desc */ get_str8(pb, buf, sizeof(buf)); /* desc */ } st->codec->codec_type = CODEC_TYPE_AUDIO; if (!strcmp(buf, "dnet")) { st->codec->codec_id = CODEC_ID_AC3; st->need_parsing = AVSTREAM_PARSE_FULL; } else if (!strcmp(buf, "28_8")) { st->codec->codec_id = CODEC_ID_RA_288; st->codec->extradata_size= 0; rm->audio_framesize = st->codec->block_align; st->codec->block_align = coded_framesize; if(rm->audio_framesize >= UINT_MAX / sub_packet_h){ av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n"); return -1; } rm->audiobuf = av_malloc(rm->audio_framesize * sub_packet_h); } else if ((!strcmp(buf, "cook")) || (!strcmp(buf, "atrc")) || (!strcmp(buf, "sipr"))) { int codecdata_length, i; get_be16(pb); get_byte(pb); if (((version >> 16) & 0xff) == 5) get_byte(pb); codecdata_length = get_be32(pb); if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n"); return -1; } if(sub_packet_size <= 0){ av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n"); return -1; } if (!strcmp(buf, "cook")) st->codec->codec_id = CODEC_ID_COOK; else if (!strcmp(buf, "sipr")) st->codec->codec_id = CODEC_ID_SIPR; else st->codec->codec_id = CODEC_ID_ATRAC3; st->codec->extradata_size= codecdata_length; st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); for(i = 0; i < codecdata_length; i++) ((uint8_t*)st->codec->extradata)[i] = get_byte(pb); rm->audio_framesize = st->codec->block_align; st->codec->block_align = rm->sub_packet_size; if(rm->audio_framesize >= UINT_MAX / sub_packet_h){ av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n"); return -1; } rm->audiobuf = av_malloc(rm->audio_framesize * sub_packet_h); } else if (!strcmp(buf, "raac") || !strcmp(buf, "racp")) { int codecdata_length, i; get_be16(pb); get_byte(pb); if (((version >> 16) & 0xff) == 5) get_byte(pb); st->codec->codec_id = CODEC_ID_AAC; codecdata_length = get_be32(pb); if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n"); return -1; } if (codecdata_length >= 1) { st->codec->extradata_size = codecdata_length - 1; st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); get_byte(pb); for(i = 0; i < st->codec->extradata_size; i++) ((uint8_t*)st->codec->extradata)[i] = get_byte(pb); } } else { st->codec->codec_id = CODEC_ID_NONE; av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name)); } if (read_all) { get_byte(pb); get_byte(pb); get_byte(pb); get_str8(pb, s->title, sizeof(s->title)); get_str8(pb, s->author, sizeof(s->author)); get_str8(pb, s->copyright, sizeof(s->copyright)); get_str8(pb, s->comment, sizeof(s->comment)); } } return 0; }
15,734
0
void ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number) { int full_frame= 0; avpriv_align_put_bits(&s->pb); put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P)); put_bits(&s->pb, 1, 0); /* not PB frame */ put_bits(&s->pb, 5, s->qscale); if (s->pict_type == AV_PICTURE_TYPE_I) { /* specific MPEG like DC coding not used */ } /* if multiple packets per frame are sent, the position at which to display the macroblocks is coded here */ if(!full_frame){ put_bits(&s->pb, 6, 0); /* mb_x */ put_bits(&s->pb, 6, 0); /* mb_y */ put_bits(&s->pb, 12, s->mb_width * s->mb_height); } put_bits(&s->pb, 3, 0); /* ignored */ }
15,735
0
int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal) { int count, i; if (avctx->bits_per_coded_sample > 8) { av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n"); return AVERROR_INVALIDDATA; } count = 1 << avctx->bits_per_coded_sample; if (avctx->extradata_size < count * 3) { av_log(avctx, AV_LOG_ERROR, "palette data underflow\n"); return AVERROR_INVALIDDATA; } for (i=0; i < count; i++) { pal[i] = 0xFF000000 | AV_RB24( avctx->extradata + i*3 ); } return 0; }
15,736
1
static CharDriverState* create_eventfd_chr_device(IVShmemState *s, EventNotifier *n, int vector) { /* create a event character device based on the passed eventfd */ PCIDevice *pdev = PCI_DEVICE(s); int eventfd = event_notifier_get_fd(n); CharDriverState *chr; s->msi_vectors[vector].pdev = pdev; chr = qemu_chr_open_eventfd(eventfd); if (chr == NULL) { error_report("creating chardriver for eventfd %d failed", eventfd); return NULL; } qemu_chr_fe_claim_no_fail(chr); /* if MSI is supported we need multiple interrupts */ if (ivshmem_has_feature(s, IVSHMEM_MSI)) { s->msi_vectors[vector].pdev = PCI_DEVICE(s); qemu_chr_add_handlers(chr, ivshmem_can_receive, fake_irqfd, ivshmem_event, &s->msi_vectors[vector]); } else { qemu_chr_add_handlers(chr, ivshmem_can_receive, ivshmem_receive, ivshmem_event, s); } return chr; }
15,737
1
void ff_hcscale_fast_mmxext(SwsContext *c, int16_t *dst1, int16_t *dst2, int dstWidth, const uint8_t *src1, const uint8_t *src2, int srcW, int xInc) { int32_t *filterPos = c->hChrFilterPos; int16_t *filter = c->hChrFilter; void *mmxextFilterCode = c->chrMmxextFilterCode; int i; #if ARCH_X86_64 DECLARE_ALIGNED(8, uint64_t, retsave); #else #if defined(PIC) DECLARE_ALIGNED(8, uint64_t, ebxsave); #endif #endif __asm__ volatile( #if ARCH_X86_64 "mov -8(%%rsp), %%"FF_REG_a" \n\t" "mov %%"FF_REG_a", %7 \n\t" // retsave #else #if defined(PIC) "mov %%"FF_REG_b", %7 \n\t" // ebxsave #endif #endif "pxor %%mm7, %%mm7 \n\t" "mov %0, %%"FF_REG_c" \n\t" "mov %1, %%"FF_REG_D" \n\t" "mov %2, %%"FF_REG_d" \n\t" "mov %3, %%"FF_REG_b" \n\t" "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" // i PREFETCH" (%%"FF_REG_c") \n\t" PREFETCH" 32(%%"FF_REG_c") \n\t" PREFETCH" 64(%%"FF_REG_c") \n\t" CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" // i "mov %5, %%"FF_REG_c" \n\t" // src2 "mov %6, %%"FF_REG_D" \n\t" // dst2 PREFETCH" (%%"FF_REG_c") \n\t" PREFETCH" 32(%%"FF_REG_c") \n\t" PREFETCH" 64(%%"FF_REG_c") \n\t" CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE CALL_MMXEXT_FILTER_CODE #if ARCH_X86_64 "mov %7, %%"FF_REG_a" \n\t" "mov %%"FF_REG_a", -8(%%rsp) \n\t" #else #if defined(PIC) "mov %7, %%"FF_REG_b" \n\t" #endif #endif :: "m" (src1), "m" (dst1), "m" (filter), "m" (filterPos), "m" (mmxextFilterCode), "m" (src2), "m"(dst2) #if ARCH_X86_64 ,"m"(retsave) #else #if defined(PIC) ,"m" (ebxsave) #endif #endif : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_D #if ARCH_X86_64 || !defined(PIC) ,"%"FF_REG_b #endif ); for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) { dst1[i] = src1[srcW-1]*128; dst2[i] = src2[srcW-1]*128; } }
15,738
1
static always_inline void gen_sradi (DisasContext *ctx, int n) { int sh = SH(ctx->opcode) + (n << 5); if (sh != 0) { int l1, l2; TCGv t0; l1 = gen_new_label(); l2 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1); t0 = tcg_temp_new(TCG_TYPE_TL); tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1ULL << sh) - 1); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); tcg_gen_ori_tl(cpu_xer, cpu_xer, 1 << XER_CA); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA)); gen_set_label(l2); tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh); } else { tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); tcg_gen_andi_tl(cpu_xer, cpu_xer, ~(1 << XER_CA)); } if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); }
15,739
1
static inline int wnv1_get_code(WNV1Context *w, int base_value) { int v = get_vlc2(&w->gb, code_vlc.table, CODE_VLC_BITS, 1); if (v == 15) return ff_reverse[get_bits(&w->gb, 8 - w->shift)]; else return base_value + ((v - 7) << w->shift); }
15,740
1
static void cirrus_mem_writeb_mode4and5_16bpp(CirrusVGAState * s, unsigned mode, unsigned offset, uint32_t mem_value) { int x; unsigned val = mem_value; uint8_t *dst; dst = s->vram_ptr + offset; for (x = 0; x < 8; x++) { if (val & 0x80) { *dst = s->cirrus_shadow_gr1; *(dst + 1) = s->gr[0x11]; } else if (mode == 5) { *dst = s->cirrus_shadow_gr0; *(dst + 1) = s->gr[0x10]; } val <<= 1; dst += 2; } cpu_physical_memory_set_dirty(s->vram_offset + offset); cpu_physical_memory_set_dirty(s->vram_offset + offset + 15); }
15,741
1
static void raw_refresh_limits(BlockDriverState *bs, Error **errp) { BDRVRawState *s = bs->opaque; struct stat st; if (!fstat(s->fd, &st)) { if (S_ISBLK(st.st_mode) || S_ISCHR(st.st_mode)) { int ret = hdev_get_max_transfer_length(bs, s->fd); if (ret > 0 && ret <= BDRV_REQUEST_MAX_BYTES) { bs->bl.max_transfer = pow2floor(ret); raw_probe_alignment(bs, s->fd, errp); bs->bl.min_mem_alignment = s->buf_align; bs->bl.opt_mem_alignment = MAX(s->buf_align, getpagesize());
15,742
1
int av_image_get_linesize(enum PixelFormat pix_fmt, int width, int plane) { const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[pix_fmt]; int max_step [4]; /* max pixel step for each plane */ int max_step_comp[4]; /* the component for each plane which has the max pixel step */ int s; if (desc->flags & PIX_FMT_BITSTREAM) return (width * (desc->comp[0].step_minus1+1) + 7) >> 3; av_image_fill_max_pixsteps(max_step, max_step_comp, desc); s = (max_step_comp[plane] == 1 || max_step_comp[plane] == 2) ? desc->log2_chroma_w : 0; return max_step[plane] * (((width + (1 << s) - 1)) >> s); }
15,743
1
rdt_parse_packet (AVFormatContext *ctx, PayloadContext *rdt, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t rtp_seq, int flags) { int seq = 1, res; AVIOContext pb; if (!rdt->rmctx) return AVERROR(EINVAL); if (rdt->audio_pkt_cnt == 0) { int pos; ffio_init_context(&pb, buf, len, 0, NULL, NULL, NULL, NULL); flags = (flags & RTP_FLAG_KEY) ? 2 : 0; res = ff_rm_parse_packet (rdt->rmctx, &pb, st, rdt->rmst[st->index], len, pkt, &seq, flags, *timestamp); pos = avio_tell(&pb); if (res < 0) return res; if (res > 0) { if (st->codec->codec_id == AV_CODEC_ID_AAC) { memcpy (rdt->buffer, buf + pos, len - pos); rdt->rmctx->pb = avio_alloc_context (rdt->buffer, len - pos, 0, NULL, NULL, NULL, NULL); } goto get_cache; } } else { get_cache: rdt->audio_pkt_cnt = ff_rm_retrieve_cache (rdt->rmctx, rdt->rmctx->pb, st, rdt->rmst[st->index], pkt); if (rdt->audio_pkt_cnt == 0 && st->codec->codec_id == AV_CODEC_ID_AAC) av_freep(&rdt->rmctx->pb); } pkt->stream_index = st->index; pkt->pts = *timestamp; return rdt->audio_pkt_cnt > 0; }
15,744
1
static BlockBackend *img_open_file(const char *filename, QDict *options, const char *fmt, int flags, bool writethrough, bool quiet, bool force_share) { BlockBackend *blk; Error *local_err = NULL; if (!options) { options = qdict_new(); } if (fmt) { qdict_put_str(options, "driver", fmt); } if (force_share) { qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true); } blk = blk_new_open(filename, NULL, options, flags, &local_err); if (!blk) { error_reportf_err(local_err, "Could not open '%s': ", filename); return NULL; } blk_set_enable_write_cache(blk, !writethrough); if (img_open_password(blk, filename, flags, quiet) < 0) { blk_unref(blk); return NULL; } return blk; }
15,745
0
ff_rm_retrieve_cache (AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *ast, AVPacket *pkt) { RMDemuxContext *rm = s->priv_data; assert (rm->audio_pkt_cnt > 0); if (ast->deint_id == DEINT_ID_VBRF || ast->deint_id == DEINT_ID_VBRS) av_get_packet(pb, pkt, ast->sub_packet_lengths[ast->sub_packet_cnt - rm->audio_pkt_cnt]); else { av_new_packet(pkt, st->codec->block_align); memcpy(pkt->data, ast->pkt.data + st->codec->block_align * //FIXME avoid this (ast->sub_packet_h * ast->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt), st->codec->block_align); } rm->audio_pkt_cnt--; if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) { ast->audiotimestamp = AV_NOPTS_VALUE; pkt->flags = AV_PKT_FLAG_KEY; } else pkt->flags = 0; pkt->stream_index = st->index; return rm->audio_pkt_cnt; }
15,746
0
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt) { if (!pkt) { ctx->internal->eof = 1; return 0; } if (ctx->internal->eof) { av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n"); return AVERROR(EINVAL); } if (ctx->internal->buffer_pkt->data || ctx->internal->buffer_pkt->side_data_elems) return AVERROR(EAGAIN); av_packet_move_ref(ctx->internal->buffer_pkt, pkt); return 0; }
15,747
0
static int refresh_thread(void *opaque) { VideoState *is= opaque; while (!is->abort_request) { SDL_Event event; event.type = FF_REFRESH_EVENT; event.user.data1 = opaque; if (!is->refresh && (!is->paused || is->force_refresh)) { is->refresh = 1; SDL_PushEvent(&event); } //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000); } return 0; }
15,749
0
void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags) { int cpu_flags = av_get_cpu_flags(); if (INLINE_MMX(cpu_flags)) hpeldsp_init_mmx(c, flags, cpu_flags); if (EXTERNAL_MMXEXT(cpu_flags)) hpeldsp_init_mmxext(c, flags, cpu_flags); if (EXTERNAL_AMD3DNOW(cpu_flags)) hpeldsp_init_3dnow(c, flags, cpu_flags); if (EXTERNAL_SSE2(cpu_flags)) hpeldsp_init_sse2(c, flags, cpu_flags); }
15,751
0
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val) { void *dst, *target_obj; const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj); uint8_t *bin, buf[128]; int len, i, ret; if (!o || !target_obj) return AVERROR_OPTION_NOT_FOUND; dst = (uint8_t*)target_obj + o->offset; buf[0] = 0; switch (o->type) { case AV_OPT_TYPE_FLAGS: ret = snprintf(buf, sizeof(buf), "0x%08X", *(int *)dst);break; case AV_OPT_TYPE_INT: ret = snprintf(buf, sizeof(buf), "%d" , *(int *)dst);break; case AV_OPT_TYPE_INT64: ret = snprintf(buf, sizeof(buf), "%"PRId64, *(int64_t*)dst);break; case AV_OPT_TYPE_FLOAT: ret = snprintf(buf, sizeof(buf), "%f" , *(float *)dst);break; case AV_OPT_TYPE_DOUBLE: ret = snprintf(buf, sizeof(buf), "%f" , *(double *)dst);break; case AV_OPT_TYPE_RATIONAL: ret = snprintf(buf, sizeof(buf), "%d/%d", ((AVRational*)dst)->num, ((AVRational*)dst)->den);break; case AV_OPT_TYPE_STRING: if (*(uint8_t**)dst) *out_val = av_strdup(*(uint8_t**)dst); else *out_val = av_strdup(""); return 0; case AV_OPT_TYPE_BINARY: len = *(int*)(((uint8_t *)dst) + sizeof(uint8_t *)); if ((uint64_t)len*2 + 1 > INT_MAX) return AVERROR(EINVAL); if (!(*out_val = av_malloc(len*2 + 1))) return AVERROR(ENOMEM); bin = *(uint8_t**)dst; for (i = 0; i < len; i++) snprintf(*out_val + i*2, 3, "%02X", bin[i]); return 0; default: return AVERROR(EINVAL); } if (ret >= sizeof(buf)) return AVERROR(EINVAL); *out_val = av_strdup(buf); return 0; }
15,752
1
static int alac_set_info(ALACContext *alac) { const unsigned char *ptr = alac->avctx->extradata; ptr += 4; /* size */ ptr += 4; /* alac */ ptr += 4; /* 0 ? */ if(AV_RB32(ptr) >= UINT_MAX/4){ av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n"); return -1; } /* buffer size / 2 ? */ alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr); ptr++; /* ??? */ alac->setinfo_sample_size = *ptr++; alac->setinfo_rice_historymult = *ptr++; alac->setinfo_rice_initialhistory = *ptr++; alac->setinfo_rice_kmodifier = *ptr++; alac->numchannels = *ptr++; bytestream_get_be16(&ptr); /* ??? */ bytestream_get_be32(&ptr); /* max coded frame size */ bytestream_get_be32(&ptr); /* bitrate ? */ bytestream_get_be32(&ptr); /* samplerate */ return 0; }
15,753
1
int ff_ivi_decode_blocks(GetBitContext *gb, IVIBandDesc *band, IVITile *tile) { int mbn, blk, num_blocks, num_coeffs, blk_size, scan_pos, run, val, pos, is_intra, mc_type, mv_x, mv_y, col_mask; uint8_t col_flags[8]; int32_t prev_dc, trvec[64]; uint32_t cbp, sym, lo, hi, quant, buf_offs, q; IVIMbInfo *mb; RVMapDesc *rvmap = band->rv_map; void (*mc_with_delta_func)(int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); void (*mc_no_delta_func) (int16_t *buf, const int16_t *ref_buf, uint32_t pitch, int mc_type); const uint16_t *base_tab; const uint8_t *scale_tab; prev_dc = 0; /* init intra prediction for the DC coefficient */ blk_size = band->blk_size; col_mask = blk_size - 1; /* column mask for tracking non-zero coeffs */ num_blocks = (band->mb_size != blk_size) ? 4 : 1; /* number of blocks per mb */ num_coeffs = blk_size * blk_size; if (blk_size == 8) { mc_with_delta_func = ff_ivi_mc_8x8_delta; mc_no_delta_func = ff_ivi_mc_8x8_no_delta; } else { mc_with_delta_func = ff_ivi_mc_4x4_delta; mc_no_delta_func = ff_ivi_mc_4x4_no_delta; } for (mbn = 0, mb = tile->mbs; mbn < tile->num_MBs; mb++, mbn++) { is_intra = !mb->type; cbp = mb->cbp; buf_offs = mb->buf_offs; quant = av_clip(band->glob_quant + mb->q_delta, 0, 23); base_tab = is_intra ? band->intra_base : band->inter_base; scale_tab = is_intra ? band->intra_scale : band->inter_scale; if (scale_tab) quant = scale_tab[quant]; if (!is_intra) { mv_x = mb->mv_x; mv_y = mb->mv_y; if (!band->is_halfpel) { mc_type = 0; /* we have only fullpel vectors */ } else { mc_type = ((mv_y & 1) << 1) | (mv_x & 1); mv_x >>= 1; mv_y >>= 1; /* convert halfpel vectors into fullpel ones */ } } for (blk = 0; blk < num_blocks; blk++) { /* adjust block position in the buffer according to its number */ if (blk & 1) { buf_offs += blk_size; } else if (blk == 2) { buf_offs -= blk_size; buf_offs += blk_size * band->pitch; } if (cbp & 1) { /* block coded ? */ scan_pos = -1; memset(trvec, 0, num_coeffs*sizeof(trvec[0])); /* zero transform vector */ memset(col_flags, 0, sizeof(col_flags)); /* zero column flags */ while (scan_pos <= num_coeffs) { sym = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); if (sym == rvmap->eob_sym) break; /* End of block */ if (sym == rvmap->esc_sym) { /* Escape - run/val explicitly coded using 3 vlc codes */ run = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1) + 1; lo = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); hi = get_vlc2(gb, band->blk_vlc.tab->table, IVI_VLC_BITS, 1); val = IVI_TOSIGNED((hi << 6) | lo); /* merge them and convert into signed val */ } else { if (sym >= 256U) { av_log(NULL, AV_LOG_ERROR, "Invalid sym encountered: %d.\n", sym); return -1; } run = rvmap->runtab[sym]; val = rvmap->valtab[sym]; } /* de-zigzag and dequantize */ scan_pos += run; if (scan_pos >= num_coeffs) break; pos = band->scan[scan_pos]; if (!val) av_dlog(NULL, "Val = 0 encountered!\n"); q = (base_tab[pos] * quant) >> 9; if (q > 1) val = val * q + FFSIGN(val) * (((q ^ 1) - 1) >> 1); trvec[pos] = val; col_flags[pos & col_mask] |= !!val; /* track columns containing non-zero coeffs */ }// while if (scan_pos >= num_coeffs && sym != rvmap->eob_sym) return -1; /* corrupt block data */ /* undoing DC coeff prediction for intra-blocks */ if (is_intra && band->is_2d_trans) { prev_dc += trvec[0]; trvec[0] = prev_dc; col_flags[0] |= !!prev_dc; } /* apply inverse transform */ band->inv_transform(trvec, band->buf + buf_offs, band->pitch, col_flags); /* apply motion compensation */ if (!is_intra) mc_with_delta_func(band->buf + buf_offs, band->ref_buf + buf_offs + mv_y * band->pitch + mv_x, band->pitch, mc_type); } else { /* block not coded */ /* for intra blocks apply the dc slant transform */ /* for inter - perform the motion compensation without delta */ if (is_intra && band->dc_transform) { band->dc_transform(&prev_dc, band->buf + buf_offs, band->pitch, blk_size); } else mc_no_delta_func(band->buf + buf_offs, band->ref_buf + buf_offs + mv_y * band->pitch + mv_x, band->pitch, mc_type); } cbp >>= 1; }// for blk }// for mbn align_get_bits(gb); return 0; }
15,755
1
void test_clone(void) { uint8_t *stack1, *stack2; int pid1, pid2, status1, status2; stack1 = malloc(STACK_SIZE); pid1 = chk_error(clone(thread1_func, stack1 + STACK_SIZE, CLONE_VM | CLONE_FS | CLONE_FILES | SIGCHLD, "hello1")); stack2 = malloc(STACK_SIZE); pid2 = chk_error(clone(thread2_func, stack2 + STACK_SIZE, CLONE_VM | CLONE_FS | CLONE_FILES | SIGCHLD, "hello2")); while (waitpid(pid1, &status1, 0) != pid1); while (waitpid(pid2, &status2, 0) != pid2); if (thread1_res != 5 || thread2_res != 6) error("clone"); }
15,756
1
static int h264_handle_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket * pkt, uint32_t * timestamp, const uint8_t * buf, int len, int flags) { uint8_t nal = buf[0]; uint8_t type = (nal & 0x1f); int result= 0; uint8_t start_sequence[] = { 0, 0, 0, 1 }; #ifdef DEBUG assert(data); assert(data->cookie == MAGIC_COOKIE); #endif assert(buf); if (type >= 1 && type <= 23) type = 1; // simplify the case. (these are all the nal types used internally by the h264 codec) switch (type) { case 0: // undefined, but pass them through case 1: av_new_packet(pkt, len+sizeof(start_sequence)); memcpy(pkt->data, start_sequence, sizeof(start_sequence)); memcpy(pkt->data+sizeof(start_sequence), buf, len); #ifdef DEBUG data->packet_types_received[nal & 0x1f]++; #endif break; case 24: // STAP-A (one packet, multiple nals) // consume the STAP-A NAL buf++; len--; // first we are going to figure out the total size.... { int pass= 0; int total_length= 0; uint8_t *dst= NULL; for(pass= 0; pass<2; pass++) { const uint8_t *src= buf; int src_len= len; do { uint16_t nal_size = AV_RB16(src); // this going to be a problem if unaligned (can it be?) // consume the length of the aggregate... src += 2; src_len -= 2; if (nal_size <= src_len) { if(pass==0) { // counting... total_length+= sizeof(start_sequence)+nal_size; } else { // copying assert(dst); memcpy(dst, start_sequence, sizeof(start_sequence)); dst+= sizeof(start_sequence); memcpy(dst, src, nal_size); #ifdef DEBUG data->packet_types_received[*src & 0x1f]++; #endif dst+= nal_size; } } else { av_log(ctx, AV_LOG_ERROR, "nal size exceeds length: %d %d\n", nal_size, src_len); } // eat what we handled... src += nal_size; src_len -= nal_size; if (src_len < 0) av_log(ctx, AV_LOG_ERROR, "Consumed more bytes than we got! (%d)\n", src_len); } while (src_len > 2); // because there could be rtp padding.. if(pass==0) { // now we know the total size of the packet (with the start sequences added) av_new_packet(pkt, total_length); dst= pkt->data; } else { assert(dst-pkt->data==total_length); } } } break; case 25: // STAP-B case 26: // MTAP-16 case 27: // MTAP-24 case 29: // FU-B av_log(ctx, AV_LOG_ERROR, "Unhandled type (%d) (See RFC for implementation details\n", type); result= -1; break; case 28: // FU-A (fragmented nal) buf++; len--; // skip the fu_indicator { // these are the same as above, we just redo them here for clarity... uint8_t fu_indicator = nal; uint8_t fu_header = *buf; // read the fu_header. uint8_t start_bit = fu_header >> 7; // uint8_t end_bit = (fu_header & 0x40) >> 6; uint8_t nal_type = (fu_header & 0x1f); uint8_t reconstructed_nal; // reconstruct this packet's true nal; only the data follows.. reconstructed_nal = fu_indicator & (0xe0); // the original nal forbidden bit and NRI are stored in this packet's nal; reconstructed_nal |= nal_type; // skip the fu_header... buf++; len--; #ifdef DEBUG if (start_bit) data->packet_types_received[nal_type]++; #endif if(start_bit) { // copy in the start sequence, and the reconstructed nal.... av_new_packet(pkt, sizeof(start_sequence)+sizeof(nal)+len); memcpy(pkt->data, start_sequence, sizeof(start_sequence)); pkt->data[sizeof(start_sequence)]= reconstructed_nal; memcpy(pkt->data+sizeof(start_sequence)+sizeof(nal), buf, len); } else { av_new_packet(pkt, len); memcpy(pkt->data, buf, len); } } break; case 30: // undefined case 31: // undefined default: av_log(ctx, AV_LOG_ERROR, "Undefined type (%d)", type); result= -1; break; } pkt->stream_index = st->index; return result; }
15,757
1
static void wiener_denoise(WMAVoiceContext *s, int fcb_type, float *synth_pf, int size, const float *lpcs) { int remainder, lim, n; if (fcb_type != FCB_TYPE_SILENCE) { float *tilted_lpcs = s->tilted_lpcs_pf, *coeffs = s->denoise_coeffs_pf, tilt_mem = 0; tilted_lpcs[0] = 1.0; memcpy(&tilted_lpcs[1], lpcs, sizeof(lpcs[0]) * s->lsps); memset(&tilted_lpcs[s->lsps + 1], 0, sizeof(tilted_lpcs[0]) * (128 - s->lsps - 1)); ff_tilt_compensation(&tilt_mem, 0.7 * tilt_factor(lpcs, s->lsps), tilted_lpcs, s->lsps + 2); /* The IRDFT output (127 samples for 7-bit filter) beyond the frame * size is applied to the next frame. All input beyond this is zero, * and thus all output beyond this will go towards zero, hence we can * limit to min(size-1, 127-size) as a performance consideration. */ remainder = FFMIN(127 - size, size - 1); calc_input_response(s, tilted_lpcs, fcb_type, coeffs, remainder); /* apply coefficients (in frequency spectrum domain), i.e. complex * number multiplication */ memset(&synth_pf[size], 0, sizeof(synth_pf[0]) * (128 - size)); ff_rdft_calc(&s->rdft, synth_pf); ff_rdft_calc(&s->rdft, coeffs); synth_pf[0] *= coeffs[0]; synth_pf[1] *= coeffs[1]; for (n = 1; n < 128; n++) { float v1 = synth_pf[n * 2], v2 = synth_pf[n * 2 + 1]; synth_pf[n * 2] = v1 * coeffs[n * 2] - v2 * coeffs[n * 2 + 1]; synth_pf[n * 2 + 1] = v2 * coeffs[n * 2] + v1 * coeffs[n * 2 + 1]; } ff_rdft_calc(&s->irdft, synth_pf); } /* merge filter output with the history of previous runs */ if (s->denoise_filter_cache_size) { lim = FFMIN(s->denoise_filter_cache_size, size); for (n = 0; n < lim; n++) synth_pf[n] += s->denoise_filter_cache[n]; s->denoise_filter_cache_size -= lim; memmove(s->denoise_filter_cache, &s->denoise_filter_cache[size], sizeof(s->denoise_filter_cache[0]) * s->denoise_filter_cache_size); } /* move remainder of filter output into a cache for future runs */ if (fcb_type != FCB_TYPE_SILENCE) { lim = FFMIN(remainder, s->denoise_filter_cache_size); for (n = 0; n < lim; n++) s->denoise_filter_cache[n] += synth_pf[size + n]; if (lim < remainder) { memcpy(&s->denoise_filter_cache[lim], &synth_pf[size + lim], sizeof(s->denoise_filter_cache[0]) * (remainder - lim)); s->denoise_filter_cache_size = remainder; } } }
15,758
1
static av_always_inline int check_4block_inter(SnowContext *s, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd){ const int b_stride= s->b_width << s->block_max_depth; BlockNode *block= &s->block[mb_x + mb_y * b_stride]; BlockNode backup[4]= {block[0], block[1], block[b_stride], block[b_stride+1]}; int rd, index, value; assert(mb_x>=0 && mb_y>=0); assert(mb_x<b_stride); assert(((mb_x|mb_y)&1) == 0); index= (p0 + 31*p1) & (ME_CACHE_SIZE-1); value= s->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12); if(s->me_cache[index] == value) return 0; s->me_cache[index]= value; block->mx= p0; block->my= p1; block->ref= ref; block->type &= ~BLOCK_INTRA; block[1]= block[b_stride]= block[b_stride+1]= *block; rd= get_4block_rd(s, mb_x, mb_y, 0); //FIXME chroma if(rd < *best_rd){ *best_rd= rd; return 1; }else{ block[0]= backup[0]; block[1]= backup[1]; block[b_stride]= backup[2]; block[b_stride+1]= backup[3]; return 0; } }
15,759
1
int qemu_signalfd(const sigset_t *mask) { #if defined(CONFIG_signalfd) int ret; ret = syscall(SYS_signalfd, -1, mask, _NSIG / 8); if (ret != -1) return ret; #endif return qemu_signalfd_compat(mask); }
15,760
1
static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, uint8_t devfn, VTDContextEntry *ce) { VTDRootEntry re; int ret_fr; ret_fr = vtd_get_root_entry(s, bus_num, &re); if (ret_fr) { return ret_fr; } if (!vtd_root_entry_present(&re)) { /* Not error - it's okay we don't have root entry. */ trace_vtd_re_not_present(bus_num); return -VTD_FR_ROOT_ENTRY_P; } else if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) { trace_vtd_re_invalid(re.rsvd, re.val); return -VTD_FR_ROOT_ENTRY_RSVD; } ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce); if (ret_fr) { return ret_fr; } if (!vtd_ce_present(ce)) { /* Not error - it's okay we don't have context entry. */ trace_vtd_ce_not_present(bus_num, devfn); return -VTD_FR_CONTEXT_ENTRY_P; } else if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) || (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) { trace_vtd_ce_invalid(ce->hi, ce->lo); return -VTD_FR_CONTEXT_ENTRY_RSVD; } /* Check if the programming of context-entry is valid */ if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) { trace_vtd_ce_invalid(ce->hi, ce->lo); return -VTD_FR_CONTEXT_ENTRY_INV; } else { switch (vtd_ce_get_type(ce)) { case VTD_CONTEXT_TT_MULTI_LEVEL: /* fall through */ case VTD_CONTEXT_TT_DEV_IOTLB: break; default: trace_vtd_ce_invalid(ce->hi, ce->lo); return -VTD_FR_CONTEXT_ENTRY_INV; } } return 0; }
15,761
1
static int get_buffer(AVCodecContext *avctx, AVFrame *pic) { pic->type = FF_BUFFER_TYPE_USER; pic->data[0] = (void *)1; return 0; }
15,762
1
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) { ARMCPU *cpu = arm_env_get_cpu(env); switch (reg) { case 0: /* APSR */ return xpsr_read(env) & 0xf8000000; case 1: /* IAPSR */ return xpsr_read(env) & 0xf80001ff; case 2: /* EAPSR */ return xpsr_read(env) & 0xff00fc00; case 3: /* xPSR */ return xpsr_read(env) & 0xff00fdff; case 5: /* IPSR */ return xpsr_read(env) & 0x000001ff; case 6: /* EPSR */ return xpsr_read(env) & 0x0700fc00; case 7: /* IEPSR */ return xpsr_read(env) & 0x0700edff; case 8: /* MSP */ return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13]; case 9: /* PSP */ return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp; case 16: /* PRIMASK */ return (env->daif & PSTATE_I) != 0; case 17: /* BASEPRI */ case 18: /* BASEPRI_MAX */ return env->v7m.basepri; case 19: /* FAULTMASK */ return (env->daif & PSTATE_F) != 0; case 20: /* CONTROL */ return env->v7m.control; default: /* ??? For debugging only. */ cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg); return 0; } }
15,763
0
static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]){ int y, h_size; if(c->srcFormat == PIX_FMT_YUV422P){ srcStride[1] *= 2; srcStride[2] *= 2; } h_size= (c->dstW+7)&~7; if(h_size*4 > dstStride[0]) h_size-=8; __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ ); for (y= 0; y<srcSliceH; y++ ) { uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0]; uint8_t *_py = src[0] + y*srcStride[0]; uint8_t *_pu = src[1] + (y>>1)*srcStride[1]; uint8_t *_pv = src[2] + (y>>1)*srcStride[2]; long index= -h_size/2; /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8 pixels in each iteration */ __asm__ __volatile__ ( /* load data for start of next scan line */ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ // ".balign 16 \n\t" "1: \n\t" YUV2RGB /* convert RGB plane to RGB packed format, mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0, mm4 -> GB, mm5 -> AR pixel 4-7, mm6 -> GB, mm7 -> AR pixel 0-3 */ "pxor %%mm3, %%mm3;" /* zero mm3 */ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ "movq %%mm1, %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ "movq %%mm1, %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */ "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */ MOVNTQ " %%mm6, (%1);" /* Store ARGB1 ARGB0 */ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */ "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */ MOVNTQ " %%mm6, 8 (%1);" /* Store ARGB3 ARGB2 */ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */ "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */ MOVNTQ " %%mm4, 16 (%1);" /* Store ARGB5 ARGB4 */ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */ "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */ MOVNTQ " %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ "pxor %%mm4, %%mm4;" /* zero mm4 */ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ "add $32, %1 \n\t" "add $4, %0 \n\t" " js 1b \n\t" : "+r" (index), "+r" (_image) : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index) ); } __asm__ __volatile__ (EMMS); return srcSliceH; }
15,764
1
static void qmp_tmp105_set_temperature(const char *id, int value) { QDict *response; response = qmp("{ 'execute': 'qom-set', 'arguments': { 'path': '%s', " "'property': 'temperature', 'value': %d } }", id, value); g_assert(qdict_haskey(response, "return")); QDECREF(response); }
15,765
1
void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque) { struct capture_callback *cb; for (cb = cap->cb_head.lh_first; cb; cb = cb->entries.le_next) { if (cb->opaque == cb_opaque) { cb->ops.destroy (cb_opaque); QLIST_REMOVE (cb, entries); g_free (cb); if (!cap->cb_head.lh_first) { SWVoiceOut *sw = cap->hw.sw_head.lh_first, *sw1; while (sw) { SWVoiceCap *sc = (SWVoiceCap *) sw; #ifdef DEBUG_CAPTURE dolog ("freeing %s\n", sw->name); #endif sw1 = sw->entries.le_next; if (sw->rate) { st_rate_stop (sw->rate); sw->rate = NULL; } QLIST_REMOVE (sw, entries); QLIST_REMOVE (sc, entries); g_free (sc); sw = sw1; } QLIST_REMOVE (cap, entries); g_free (cap); } return; } } }
15,766
1
struct omap_sdrc_s *omap_sdrc_init(MemoryRegion *sysmem, hwaddr base) { struct omap_sdrc_s *s = (struct omap_sdrc_s *) g_malloc0(sizeof(struct omap_sdrc_s)); omap_sdrc_reset(s); memory_region_init_io(&s->iomem, NULL, &omap_sdrc_ops, s, "omap.sdrc", 0x1000); memory_region_add_subregion(sysmem, base, &s->iomem); return s; }
15,767
1
static int oggvorbis_decode_init(AVCodecContext *avccontext) { OggVorbisDecContext *context = avccontext->priv_data ; uint8_t *p= avccontext->extradata; int i, hsizes[3]; unsigned char *headers[3], *extradata = avccontext->extradata; vorbis_info_init(&context->vi) ; vorbis_comment_init(&context->vc) ; if(! avccontext->extradata_size || ! p) { av_log(avccontext, AV_LOG_ERROR, "vorbis extradata absent\n"); return -1; } if(p[0] == 0 && p[1] == 30) { for(i = 0; i < 3; i++){ hsizes[i] = bytestream_get_be16((const uint8_t **)&p); headers[i] = p; p += hsizes[i]; } } else if(*p == 2) { unsigned int offset = 1; p++; for(i=0; i<2; i++) { hsizes[i] = 0; while((*p == 0xFF) && (offset < avccontext->extradata_size)) { hsizes[i] += 0xFF; offset++; p++; } if(offset >= avccontext->extradata_size - 1) { av_log(avccontext, AV_LOG_ERROR, "vorbis header sizes damaged\n"); return -1; } hsizes[i] += *p; offset++; p++; } hsizes[2] = avccontext->extradata_size - hsizes[0]-hsizes[1]-offset; #if 0 av_log(avccontext, AV_LOG_DEBUG, "vorbis header sizes: %d, %d, %d, / extradata_len is %d \n", hsizes[0], hsizes[1], hsizes[2], avccontext->extradata_size); #endif headers[0] = extradata + offset; headers[1] = extradata + offset + hsizes[0]; headers[2] = extradata + offset + hsizes[0] + hsizes[1]; } else { av_log(avccontext, AV_LOG_ERROR, "vorbis initial header len is wrong: %d\n", *p); return -1; } for(i=0; i<3; i++){ context->op.b_o_s= i==0; context->op.bytes = hsizes[i]; context->op.packet = headers[i]; if(vorbis_synthesis_headerin(&context->vi, &context->vc, &context->op)<0){ av_log(avccontext, AV_LOG_ERROR, "%d. vorbis header damaged\n", i+1); return -1; } } avccontext->channels = context->vi.channels; avccontext->sample_rate = context->vi.rate; avccontext->sample_fmt = AV_SAMPLE_FMT_S16; avccontext->time_base= (AVRational){1, avccontext->sample_rate}; vorbis_synthesis_init(&context->vd, &context->vi); vorbis_block_init(&context->vd, &context->vb); return 0 ; }
15,769
1
static CharDriverState *qemu_chr_open_pipe(QemuOpts *opts) { int fd_in, fd_out; char filename_in[256], filename_out[256]; const char *filename = qemu_opt_get(opts, "path"); if (filename == NULL) { fprintf(stderr, "chardev: pipe: no filename given\n"); return NULL; } snprintf(filename_in, 256, "%s.in", filename); snprintf(filename_out, 256, "%s.out", filename); TFR(fd_in = open(filename_in, O_RDWR | O_BINARY)); TFR(fd_out = open(filename_out, O_RDWR | O_BINARY)); if (fd_in < 0 || fd_out < 0) { if (fd_in >= 0) close(fd_in); if (fd_out >= 0) close(fd_out); TFR(fd_in = fd_out = open(filename, O_RDWR | O_BINARY)); if (fd_in < 0) return NULL; } return qemu_chr_open_fd(fd_in, fd_out); }
15,770
0
yuv2rgb48_2_c_template(SwsContext *c, const int32_t *buf[2], const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf[2], uint16_t *dest, int dstW, int yalpha, int uvalpha, int y, enum AVPixelFormat target) { const int32_t *buf0 = buf[0], *buf1 = buf[1], *ubuf0 = ubuf[0], *ubuf1 = ubuf[1], *vbuf0 = vbuf[0], *vbuf1 = vbuf[1]; int yalpha1 = 4096 - yalpha; int uvalpha1 = 4096 - uvalpha; int i; for (i = 0; i < ((dstW + 1) >> 1); i++) { int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14; int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14; int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14; int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14; int R, G, B; Y1 -= c->yuv2rgb_y_offset; Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; Y1 += 1 << 13; Y2 += 1 << 13; R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); dest += 6; } }
15,771
0
static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) { MpegEncContext * const s = &h->s; const int mb_xy= mb_x + mb_y*s->mb_stride; const int mb_type = s->current_picture.mb_type[mb_xy]; const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4; int first_vertical_edge_done = 0; int dir; //for sufficiently low qp, filtering wouldn't do anything //this is a conservative estimate: could also check beta_offset and more accurate chroma_qp if(!FRAME_MBAFF){ int qp_thresh = 15 - h->slice_alpha_c0_offset - FFMAX3(0, h->pps.chroma_qp_index_offset[0], h->pps.chroma_qp_index_offset[1]); int qp = s->current_picture.qscale_table[mb_xy]; if(qp <= qp_thresh && (mb_x == 0 || ((qp + s->current_picture.qscale_table[mb_xy-1] + 1)>>1) <= qp_thresh) && (mb_y == 0 || ((qp + s->current_picture.qscale_table[h->top_mb_xy] + 1)>>1) <= qp_thresh)){ return; } } if (FRAME_MBAFF // left mb is in picture && h->slice_table[mb_xy-1] != 255 // and current and left pair do not have the same interlaced type && (IS_INTERLACED(mb_type) != IS_INTERLACED(s->current_picture.mb_type[mb_xy-1])) // and left mb is in the same slice if deblocking_filter == 2 && (h->deblocking_filter!=2 || h->slice_table[mb_xy-1] == h->slice_table[mb_xy])) { /* First vertical edge is different in MBAFF frames * There are 8 different bS to compute and 2 different Qp */ const int pair_xy = mb_x + (mb_y&~1)*s->mb_stride; const int left_mb_xy[2] = { pair_xy-1, pair_xy-1+s->mb_stride }; int16_t bS[8]; int qp[2]; int bqp[2]; int rqp[2]; int mb_qp, mbn0_qp, mbn1_qp; int i; first_vertical_edge_done = 1; if( IS_INTRA(mb_type) ) bS[0] = bS[1] = bS[2] = bS[3] = bS[4] = bS[5] = bS[6] = bS[7] = 4; else { for( i = 0; i < 8; i++ ) { int mbn_xy = MB_FIELD ? left_mb_xy[i>>2] : left_mb_xy[i&1]; if( IS_INTRA( s->current_picture.mb_type[mbn_xy] ) ) bS[i] = 4; else if( h->non_zero_count_cache[12+8*(i>>1)] != 0 || /* FIXME: with 8x8dct + cavlc, should check cbp instead of nnz */ h->non_zero_count[mbn_xy][MB_FIELD ? i&3 : (i>>2)+(mb_y&1)*2] ) bS[i] = 2; else bS[i] = 1; } } mb_qp = s->current_picture.qscale_table[mb_xy]; mbn0_qp = s->current_picture.qscale_table[left_mb_xy[0]]; mbn1_qp = s->current_picture.qscale_table[left_mb_xy[1]]; qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1; bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) + get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1; rqp[0] = ( get_chroma_qp( h, 1, mb_qp ) + get_chroma_qp( h, 1, mbn0_qp ) + 1 ) >> 1; qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1; bqp[1] = ( get_chroma_qp( h, 0, mb_qp ) + get_chroma_qp( h, 0, mbn1_qp ) + 1 ) >> 1; rqp[1] = ( get_chroma_qp( h, 1, mb_qp ) + get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1; /* Filter edge */ tprintf(s->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize); { int i; for (i = 0; i < 8; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } filter_mb_mbaff_edgev ( h, &img_y [0], linesize, bS, qp ); filter_mb_mbaff_edgecv( h, &img_cb[0], uvlinesize, bS, bqp ); filter_mb_mbaff_edgecv( h, &img_cr[0], uvlinesize, bS, rqp ); } /* dir : 0 -> vertical edge, 1 -> horizontal edge */ for( dir = 0; dir < 2; dir++ ) { int edge; const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy; const int mbm_type = s->current_picture.mb_type[mbm_xy]; int start = h->slice_table[mbm_xy] == 255 ? 1 : 0; const int edges = (mb_type & (MB_TYPE_16x16|MB_TYPE_SKIP)) == (MB_TYPE_16x16|MB_TYPE_SKIP) ? 1 : 4; // how often to recheck mv-based bS when iterating between edges const int mask_edge = (mb_type & (MB_TYPE_16x16 | (MB_TYPE_16x8 << dir))) ? 3 : (mb_type & (MB_TYPE_8x16 >> dir)) ? 1 : 0; // how often to recheck mv-based bS when iterating along each edge const int mask_par0 = mb_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)); if (first_vertical_edge_done) { start = 1; first_vertical_edge_done = 0; } if (h->deblocking_filter==2 && h->slice_table[mbm_xy] != h->slice_table[mb_xy]) start = 1; if (FRAME_MBAFF && (dir == 1) && ((mb_y&1) == 0) && start == 0 && !IS_INTERLACED(mb_type) && IS_INTERLACED(mbm_type) ) { // This is a special case in the norm where the filtering must // be done twice (one each of the field) even if we are in a // frame macroblock. // static const int nnz_idx[4] = {4,5,6,3}; unsigned int tmp_linesize = 2 * linesize; unsigned int tmp_uvlinesize = 2 * uvlinesize; int mbn_xy = mb_xy - 2 * s->mb_stride; int qp; int i, j; int16_t bS[4]; for(j=0; j<2; j++, mbn_xy += s->mb_stride){ if( IS_INTRA(mb_type) || IS_INTRA(s->current_picture.mb_type[mbn_xy]) ) { bS[0] = bS[1] = bS[2] = bS[3] = 3; } else { const uint8_t *mbn_nnz = h->non_zero_count[mbn_xy]; for( i = 0; i < 4; i++ ) { if( h->non_zero_count_cache[scan8[0]+i] != 0 || mbn_nnz[nnz_idx[i]] != 0 ) bS[i] = 2; else bS[i] = 1; } } // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } filter_mb_edgeh( h, &img_y[j*linesize], tmp_linesize, bS, qp ); filter_mb_edgech( h, &img_cb[j*uvlinesize], tmp_uvlinesize, bS, ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); filter_mb_edgech( h, &img_cr[j*uvlinesize], tmp_uvlinesize, bS, ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); } start = 1; } /* Calculate bS */ for( edge = start; edge < edges; edge++ ) { /* mbn_xy: neighbor macroblock */ const int mbn_xy = edge > 0 ? mb_xy : mbm_xy; const int mbn_type = s->current_picture.mb_type[mbn_xy]; int16_t bS[4]; int qp; if( (edge&1) && IS_8x8DCT(mb_type) ) continue; if( IS_INTRA(mb_type) || IS_INTRA(mbn_type) ) { int value; if (edge == 0) { if ( (!IS_INTERLACED(mb_type) && !IS_INTERLACED(mbm_type)) || ((FRAME_MBAFF || (s->picture_structure != PICT_FRAME)) && (dir == 0)) ) { value = 4; } else { value = 3; } } else { value = 3; } bS[0] = bS[1] = bS[2] = bS[3] = value; } else { int i, l; int mv_done; if( edge & mask_edge ) { bS[0] = bS[1] = bS[2] = bS[3] = 0; mv_done = 1; } else if( FRAME_MBAFF && IS_INTERLACED(mb_type ^ mbn_type)) { bS[0] = bS[1] = bS[2] = bS[3] = 1; mv_done = 1; } else if( mask_par0 && (edge || (mbn_type & (MB_TYPE_16x16 | (MB_TYPE_8x16 >> dir)))) ) { int b_idx= 8 + 4 + edge * (dir ? 8:1); int bn_idx= b_idx - (dir ? 8:1); int v = 0; int xn= h->slice_type_nos == FF_B_TYPE && h->ref2frm[0][h->ref_cache[0][b_idx]+2] != h->ref2frm[0][h->ref_cache[0][bn_idx]+2]; for( l = 0; !v && l < 1 + (h->slice_type_nos == FF_B_TYPE); l++ ) { int ln= l^xn; v |= h->ref2frm[l][h->ref_cache[l][b_idx]+2] != h->ref2frm[ln][h->ref_cache[ln][bn_idx]+2] || FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[ln][bn_idx][0] ) >= 4 || FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[ln][bn_idx][1] ) >= mvy_limit; } bS[0] = bS[1] = bS[2] = bS[3] = v; mv_done = 1; } else mv_done = 0; for( i = 0; i < 4; i++ ) { int x = dir == 0 ? edge : i; int y = dir == 0 ? i : edge; int b_idx= 8 + 4 + x + 8*y; int bn_idx= b_idx - (dir ? 8:1); if( h->non_zero_count_cache[b_idx] != 0 || h->non_zero_count_cache[bn_idx] != 0 ) { bS[i] = 2; } else if(!mv_done) { int xn= h->slice_type_nos == FF_B_TYPE && h->ref2frm[0][h->ref_cache[0][b_idx]+2] != h->ref2frm[0][h->ref_cache[0][bn_idx]+2]; bS[i] = 0; for( l = 0; l < 1 + (h->slice_type_nos == FF_B_TYPE); l++ ) { int ln= l^xn; if( h->ref2frm[l][h->ref_cache[l][b_idx]+2] != h->ref2frm[ln][h->ref_cache[ln][bn_idx]+2] || FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[ln][bn_idx][0] ) >= 4 || FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[ln][bn_idx][1] ) >= mvy_limit ) { bS[i] = 1; break; } } } } if(bS[0]+bS[1]+bS[2]+bS[3] == 0) continue; } /* Filter edge */ // Do not use s->qscale as luma quantizer because it has not the same // value in IPCM macroblocks. qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1; //tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp, s->current_picture.qscale_table[mbn_xy]); tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); { int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); } if( dir == 0 ) { filter_mb_edgev( h, &img_y[4*edge], linesize, bS, qp ); if( (edge&1) == 0 ) { filter_mb_edgecv( h, &img_cb[2*edge], uvlinesize, bS, ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); filter_mb_edgecv( h, &img_cr[2*edge], uvlinesize, bS, ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); } } else { filter_mb_edgeh( h, &img_y[4*edge*linesize], linesize, bS, qp ); if( (edge&1) == 0 ) { filter_mb_edgech( h, &img_cb[2*edge*uvlinesize], uvlinesize, bS, ( h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); filter_mb_edgech( h, &img_cr[2*edge*uvlinesize], uvlinesize, bS, ( h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1); } } } } }
15,772
0
static int twolame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { TWOLAMEContext *s = avctx->priv_data; int ret; if ((ret = ff_alloc_packet(avpkt, MPA_MAX_CODED_FRAME_SIZE)) < 0) return ret; if (frame) { switch (avctx->sample_fmt) { case AV_SAMPLE_FMT_FLT: ret = twolame_encode_buffer_float32_interleaved(s->glopts, (const float *)frame->data[0], frame->nb_samples, avpkt->data, avpkt->size); break; case AV_SAMPLE_FMT_FLTP: ret = twolame_encode_buffer_float32(s->glopts, (const float *)frame->data[0], (const float *)frame->data[1], frame->nb_samples, avpkt->data, avpkt->size); break; case AV_SAMPLE_FMT_S16: ret = twolame_encode_buffer_interleaved(s->glopts, (const short int *)frame->data[0], frame->nb_samples, avpkt->data, avpkt->size); break; case AV_SAMPLE_FMT_S16P: ret = twolame_encode_buffer(s->glopts, (const short int *)frame->data[0], (const short int *)frame->data[1], frame->nb_samples, avpkt->data, avpkt->size); break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported sample format %d.\n", avctx->sample_fmt); return AVERROR_BUG; } } else { ret = twolame_encode_flush(s->glopts, avpkt->data, avpkt->size); } if (!ret) // no bytes written return 0; if (ret < 0) // twolame error return AVERROR_UNKNOWN; avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples); if (frame) { if (frame->pts != AV_NOPTS_VALUE) avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay); } else { avpkt->pts = s->next_pts; } // this is for setting pts for flushed packet(s). if (avpkt->pts != AV_NOPTS_VALUE) s->next_pts = avpkt->pts + avpkt->duration; av_shrink_packet(avpkt, ret); *got_packet_ptr = 1; return 0; }
15,773
0
av_cold void ff_vc1dsp_init_x86(VC1DSPContext *dsp) { int cpu_flags = av_get_cpu_flags(); if (INLINE_MMX(cpu_flags)) ff_vc1dsp_init_mmx(dsp); if (INLINE_MMXEXT(cpu_flags)) ff_vc1dsp_init_mmxext(dsp); #define ASSIGN_LF(EXT) \ dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_ ## EXT; \ dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_ ## EXT; \ dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_ ## EXT; \ dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_ ## EXT; \ dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \ dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT #if HAVE_YASM if (cpu_flags & AV_CPU_FLAG_MMX) { dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_mmx; } if (cpu_flags & AV_CPU_FLAG_MMXEXT) { ASSIGN_LF(mmxext); dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_mmxext; dsp->avg_vc1_mspel_pixels_tab[0] = avg_vc1_mspel_mc00_mmxext; } else if (cpu_flags & AV_CPU_FLAG_3DNOW) { dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_3dnow; } if (cpu_flags & AV_CPU_FLAG_SSE2) { dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_sse2; dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse2; dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2; dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2; } if (cpu_flags & AV_CPU_FLAG_SSSE3) { ASSIGN_LF(ssse3); dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_vc1_chroma_mc8_nornd_ssse3; dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = ff_avg_vc1_chroma_mc8_nornd_ssse3; } if (cpu_flags & AV_CPU_FLAG_SSE4) { dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse4; dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4; } #endif /* HAVE_YASM */ }
15,774
0
static inline void neon_store_reg64(TCGv var, int reg) { tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg)); }
15,776
0
static DisplaySurface *qemu_create_message_surface(int w, int h, const char *msg) { DisplaySurface *surface = qemu_create_displaysurface(w, h); pixman_color_t bg = color_table_rgb[0][COLOR_BLACK]; pixman_color_t fg = color_table_rgb[0][COLOR_WHITE]; pixman_image_t *glyph; int len, x, y, i; len = strlen(msg); x = (w / FONT_WIDTH - len) / 2; y = (h / FONT_HEIGHT - 1) / 2; for (i = 0; i < len; i++) { glyph = qemu_pixman_glyph_from_vgafont(FONT_HEIGHT, vgafont16, msg[i]); qemu_pixman_glyph_render(glyph, surface->image, &fg, &bg, x+i, y, FONT_WIDTH, FONT_HEIGHT); qemu_pixman_image_unref(glyph); } return surface; }
15,777
0
static bool vregs_needed(void *opaque) { #ifdef CONFIG_KVM if (kvm_enabled()) { return kvm_check_extension(kvm_state, KVM_CAP_S390_VECTOR_REGISTERS); } #endif return 0; }
15,780
0
static unsigned int dec_abs_r(DisasContext *dc) { TCGv t0; DIS(fprintf (logfile, "abs $r%u, $r%u\n", dc->op1, dc->op2)); cris_cc_mask(dc, CC_MASK_NZ); t0 = tcg_temp_new(TCG_TYPE_TL); tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31); tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0); tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0); tcg_temp_free(t0); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4); return 2; }
15,781
0
static int calculate_new_instance_id(const char *idstr) { SaveStateEntry *se; int instance_id = 0; TAILQ_FOREACH(se, &savevm_handlers, entry) { if (strcmp(idstr, se->idstr) == 0 && instance_id <= se->instance_id) { instance_id = se->instance_id + 1; } } return instance_id; }
15,782
0
static int resample(ResampleContext *c, void *dst, const void *src, int *consumed, int src_size, int dst_size, int update_ctx, int nearest_neighbour) { int dst_index; int index = c->index; int frac = c->frac; int dst_incr_frac = c->dst_incr % c->src_incr; int dst_incr = c->dst_incr / c->src_incr; int compensation_distance = c->compensation_distance; if (!dst != !src) return AVERROR(EINVAL); if (nearest_neighbour) { int64_t index2 = ((int64_t)index) << 32; int64_t incr = (1LL << 32) * c->dst_incr / c->src_incr; dst_size = FFMIN(dst_size, (src_size-1-index) * (int64_t)c->src_incr / c->dst_incr); if (dst) { for(dst_index = 0; dst_index < dst_size; dst_index++) { c->resample_nearest(dst, dst_index, src, index2 >> 32); index2 += incr; } } else { dst_index = dst_size; } index += dst_index * dst_incr; index += (frac + dst_index * (int64_t)dst_incr_frac) / c->src_incr; frac = (frac + dst_index * (int64_t)dst_incr_frac) % c->src_incr; } else { for (dst_index = 0; dst_index < dst_size; dst_index++) { int sample_index = index >> c->phase_shift; if (sample_index + c->filter_length > src_size || -sample_index >= src_size) break; if (dst) c->resample_one(c, dst, dst_index, src, src_size, index, frac); frac += dst_incr_frac; index += dst_incr; if (frac >= c->src_incr) { frac -= c->src_incr; index++; } if (dst_index + 1 == compensation_distance) { compensation_distance = 0; dst_incr_frac = c->ideal_dst_incr % c->src_incr; dst_incr = c->ideal_dst_incr / c->src_incr; } } } if (consumed) *consumed = FFMAX(index, 0) >> c->phase_shift; if (update_ctx) { if (index >= 0) index &= c->phase_mask; if (compensation_distance) { compensation_distance -= dst_index; if (compensation_distance <= 0) return AVERROR_BUG; } c->frac = frac; c->index = index; c->dst_incr = dst_incr_frac + c->src_incr*dst_incr; c->compensation_distance = compensation_distance; } return dst_index; }
15,783
0
static void l2cap_frame_in(struct l2cap_instance_s *l2cap, const l2cap_hdr *frame) { uint16_t cid = le16_to_cpu(frame->cid); uint16_t len = le16_to_cpu(frame->len); if (unlikely(cid >= L2CAP_CID_MAX || !l2cap->cid[cid])) { fprintf(stderr, "%s: frame addressed to a non-existent L2CAP " "channel %04x received.\n", __FUNCTION__, cid); return; } l2cap->cid[cid]->frame_in(l2cap->cid[cid], cid, frame, len); }
15,784
0
static uint32_t superio_ioport_readb(void *opaque, uint32_t addr) { SuperIOConfig *superio_conf = opaque; DPRINTF("superio_ioport_readb address 0x%x \n", addr); return (superio_conf->config[superio_conf->index]); }
15,785
0
void aio_notify_accept(AioContext *ctx) { if (atomic_xchg(&ctx->notified, false)) { event_notifier_test_and_clear(&ctx->notifier); } }
15,786
0
static inline void code_gen_alloc(size_t tb_size) { code_gen_buffer_size = size_code_gen_buffer(tb_size); code_gen_buffer = alloc_code_gen_buffer(); if (code_gen_buffer == NULL) { fprintf(stderr, "Could not allocate dynamic translator buffer\n"); exit(1); } map_exec(code_gen_prologue, sizeof(code_gen_prologue)); code_gen_buffer_max_size = code_gen_buffer_size - (TCG_MAX_OP_SIZE * OPC_BUF_SIZE); code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); }
15,787
0
static void qemu_chr_parse_parallel(QemuOpts *opts, ChardevBackend *backend, Error **errp) { const char *device = qemu_opt_get(opts, "path"); if (device == NULL) { error_setg(errp, "chardev: parallel: no device path given"); return; } backend->parallel = g_new0(ChardevHostdev, 1); backend->parallel->device = g_strdup(device); }
15,789
0
static void guess_chs_for_size(BlockDriverState *bs, uint32_t *pcyls, uint32_t *pheads, uint32_t *psecs) { uint64_t nb_sectors; int cylinders; bdrv_get_geometry(bs, &nb_sectors); cylinders = nb_sectors / (16 * 63); if (cylinders > 16383) { cylinders = 16383; } else if (cylinders < 2) { cylinders = 2; } *pcyls = cylinders; *pheads = 16; *psecs = 63; }
15,790
0
static void disas_xtensa_insn(DisasContext *dc) { #define HAS_OPTION_BITS(opt) do { \ if (!option_bits_enabled(dc, opt)) { \ qemu_log("Option is not enabled %s:%d\n", \ __FILE__, __LINE__); \ goto invalid_opcode; \ } \ } while (0) #define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt)) #define TBD() qemu_log("TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__) #define RESERVED() do { \ qemu_log("RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \ dc->pc, b0, b1, b2, __FILE__, __LINE__); \ goto invalid_opcode; \ } while (0) #ifdef TARGET_WORDS_BIGENDIAN #define OP0 (((b0) & 0xf0) >> 4) #define OP1 (((b2) & 0xf0) >> 4) #define OP2 ((b2) & 0xf) #define RRR_R ((b1) & 0xf) #define RRR_S (((b1) & 0xf0) >> 4) #define RRR_T ((b0) & 0xf) #else #define OP0 (((b0) & 0xf)) #define OP1 (((b2) & 0xf)) #define OP2 (((b2) & 0xf0) >> 4) #define RRR_R (((b1) & 0xf0) >> 4) #define RRR_S (((b1) & 0xf)) #define RRR_T (((b0) & 0xf0) >> 4) #endif #define RRR_X ((RRR_R & 0x4) >> 2) #define RRR_Y ((RRR_T & 0x4) >> 2) #define RRR_W (RRR_R & 0x3) #define RRRN_R RRR_R #define RRRN_S RRR_S #define RRRN_T RRR_T #define RRI8_R RRR_R #define RRI8_S RRR_S #define RRI8_T RRR_T #define RRI8_IMM8 (b2) #define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8) #ifdef TARGET_WORDS_BIGENDIAN #define RI16_IMM16 (((b1) << 8) | (b2)) #else #define RI16_IMM16 (((b2) << 8) | (b1)) #endif #ifdef TARGET_WORDS_BIGENDIAN #define CALL_N (((b0) & 0xc) >> 2) #define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2)) #else #define CALL_N (((b0) & 0x30) >> 4) #define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10)) #endif #define CALL_OFFSET_SE \ (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET) #define CALLX_N CALL_N #ifdef TARGET_WORDS_BIGENDIAN #define CALLX_M ((b0) & 0x3) #else #define CALLX_M (((b0) & 0xc0) >> 6) #endif #define CALLX_S RRR_S #define BRI12_M CALLX_M #define BRI12_S RRR_S #ifdef TARGET_WORDS_BIGENDIAN #define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2)) #else #define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4)) #endif #define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12) #define BRI8_M BRI12_M #define BRI8_R RRI8_R #define BRI8_S RRI8_S #define BRI8_IMM8 RRI8_IMM8 #define BRI8_IMM8_SE RRI8_IMM8_SE #define RSR_SR (b1) uint8_t b0 = ldub_code(dc->pc); uint8_t b1 = ldub_code(dc->pc + 1); uint8_t b2 = 0; static const uint32_t B4CONST[] = { 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 }; static const uint32_t B4CONSTU[] = { 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256 }; if (OP0 >= 8) { dc->next_pc = dc->pc + 2; HAS_OPTION(XTENSA_OPTION_CODE_DENSITY); } else { dc->next_pc = dc->pc + 3; b2 = ldub_code(dc->pc + 2); } switch (OP0) { case 0: /*QRST*/ switch (OP1) { case 0: /*RST0*/ switch (OP2) { case 0: /*ST0*/ if ((RRR_R & 0xc) == 0x8) { HAS_OPTION(XTENSA_OPTION_BOOLEAN); } switch (RRR_R) { case 0: /*SNM0*/ switch (CALLX_M) { case 0: /*ILL*/ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); break; case 1: /*reserved*/ RESERVED(); break; case 2: /*JR*/ switch (CALLX_N) { case 0: /*RET*/ case 2: /*JX*/ gen_window_check1(dc, CALLX_S); gen_jump(dc, cpu_R[CALLX_S]); break; case 1: /*RETWw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_retw(tmp, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 3: /*reserved*/ RESERVED(); break; } break; case 3: /*CALLX*/ gen_window_check2(dc, CALLX_S, CALLX_N << 2); switch (CALLX_N) { case 0: /*CALLX0*/ { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); tcg_gen_movi_i32(cpu_R[0], dc->next_pc); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 1: /*CALLX4w*/ case 2: /*CALLX8w*/ case 3: /*CALLX12w*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]); gen_callw(dc, CALLX_N, tmp); tcg_temp_free(tmp); } break; } break; } break; case 1: /*MOVSPw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_window_check2(dc, RRR_T, RRR_S); { TCGv_i32 pc = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_movsp(pc); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]); tcg_temp_free(pc); } break; case 2: /*SYNC*/ switch (RRR_T) { case 0: /*ISYNC*/ break; case 1: /*RSYNC*/ break; case 2: /*ESYNC*/ break; case 3: /*DSYNC*/ break; case 8: /*EXCW*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); break; case 12: /*MEMW*/ break; case 13: /*EXTW*/ break; case 15: /*NOP*/ break; default: /*reserved*/ RESERVED(); break; } break; case 3: /*RFEIx*/ switch (RRR_T) { case 0: /*RFETx*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); switch (RRR_S) { case 0: /*RFEx*/ gen_check_privilege(dc); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); break; case 1: /*RFUEx*/ RESERVED(); break; case 2: /*RFDEx*/ gen_check_privilege(dc); gen_jump(dc, cpu_SR[ dc->config->ndepc ? DEPC : EPC1]); break; case 4: /*RFWOw*/ case 5: /*RFWUw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 tmp = tcg_const_i32(1); tcg_gen_andi_i32( cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); if (RRR_S == 4) { tcg_gen_andc_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } else { tcg_gen_or_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } gen_helper_restore_owb(); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1]); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*RFIx*/ HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT); if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) { gen_check_privilege(dc); tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + RRR_S - 2]); gen_helper_check_interrupts(cpu_env); gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]); } else { qemu_log("RFI %d is illegal\n", RRR_S); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } break; case 2: /*RFME*/ TBD(); break; default: /*reserved*/ RESERVED(); break; } break; case 4: /*BREAKx*/ HAS_OPTION(XTENSA_OPTION_DEBUG); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_BI); } break; case 5: /*SYSCALLx*/ HAS_OPTION(XTENSA_OPTION_EXCEPTION); switch (RRR_S) { case 0: /*SYSCALLx*/ gen_exception_cause(dc, SYSCALL_CAUSE); break; case 1: /*SIMCALL*/ if (semihosting_enabled) { gen_check_privilege(dc); gen_helper_simcall(cpu_env); } else { qemu_log("SIMCALL but semihosting is disabled\n"); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); } break; default: RESERVED(); break; } break; case 6: /*RSILx*/ HAS_OPTION(XTENSA_OPTION_INTERRUPT); gen_check_privilege(dc); gen_window_check1(dc, RRR_T); tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S); gen_helper_check_interrupts(cpu_env); gen_jumpi_check_loop_end(dc, 0); break; case 7: /*WAITIx*/ HAS_OPTION(XTENSA_OPTION_INTERRUPT); gen_check_privilege(dc); gen_waiti(dc, RRR_S); break; case 8: /*ANY4p*/ case 9: /*ALL4p*/ case 10: /*ANY8p*/ case 11: /*ALL8p*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); { const unsigned shift = (RRR_R & 2) ? 8 : 4; TCGv_i32 mask = tcg_const_i32( ((1 << shift) - 1) << RRR_S); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_SR[BR], mask); if (RRR_R & 1) { /*ALL*/ tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S); } else { /*ANY*/ tcg_gen_add_i32(tmp, tmp, mask); } tcg_gen_shri_i32(tmp, tmp, RRR_S + shift); tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp, RRR_T, 1); tcg_temp_free(mask); tcg_temp_free(tmp); } break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*AND*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 2: /*OR*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 3: /*XOR*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 4: /*ST1*/ switch (RRR_R) { case 0: /*SSR*/ gen_window_check1(dc, RRR_S); gen_right_shift_sar(dc, cpu_R[RRR_S]); break; case 1: /*SSL*/ gen_window_check1(dc, RRR_S); gen_left_shift_sar(dc, cpu_R[RRR_S]); break; case 2: /*SSA8L*/ gen_window_check1(dc, RRR_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 3: /*SSA8B*/ gen_window_check1(dc, RRR_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3); gen_left_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 4: /*SSAI*/ { TCGv_i32 tmp = tcg_const_i32( RRR_S | ((RRR_T & 1) << 4)); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } break; case 6: /*RER*/ TBD(); break; case 7: /*WER*/ TBD(); break; case 8: /*ROTWw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 tmp = tcg_const_i32( RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0)); gen_helper_rotw(tmp); tcg_temp_free(tmp); reset_used_window(dc); } break; case 14: /*NSAu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); gen_window_check2(dc, RRR_S, RRR_T); gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]); break; case 15: /*NSAUu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA); gen_window_check2(dc, RRR_S, RRR_T); gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]); break; default: /*reserved*/ RESERVED(); break; } break; case 5: /*TLB*/ HAS_OPTION_BITS( XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION)); gen_check_privilege(dc); gen_window_check2(dc, RRR_S, RRR_T); { TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0); switch (RRR_R & 7) { case 3: /*RITLB0*/ /*RDTLB0*/ gen_helper_rtlb0(cpu_R[RRR_T], cpu_R[RRR_S], dtlb); break; case 4: /*IITLB*/ /*IDTLB*/ gen_helper_itlb(cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 5: /*PITLB*/ /*PDTLB*/ tcg_gen_movi_i32(cpu_pc, dc->pc); gen_helper_ptlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb); break; case 6: /*WITLB*/ /*WDTLB*/ gen_helper_wtlb(cpu_R[RRR_T], cpu_R[RRR_S], dtlb); /* This could change memory mapping, so exit tb */ gen_jumpi_check_loop_end(dc, -1); break; case 7: /*RITLB1*/ /*RDTLB1*/ gen_helper_rtlb1(cpu_R[RRR_T], cpu_R[RRR_S], dtlb); break; default: tcg_temp_free(dtlb); RESERVED(); break; } tcg_temp_free(dtlb); } break; case 6: /*RT0*/ gen_window_check2(dc, RRR_R, RRR_T); switch (RRR_S) { case 0: /*NEG*/ tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); break; case 1: /*ABS*/ { int label = gen_new_label(); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]); tcg_gen_brcondi_i32( TCG_COND_GE, cpu_R[RRR_R], 0, label); tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]); gen_set_label(label); } break; default: /*reserved*/ RESERVED(); break; } break; case 7: /*reserved*/ RESERVED(); break; case 8: /*ADD*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 9: /*ADD**/ case 10: case 11: gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8); tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); tcg_temp_free(tmp); } break; case 12: /*SUB*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 13: /*SUB**/ case 14: case 15: gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12); tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]); tcg_temp_free(tmp); } break; } break; case 1: /*RST1*/ switch (OP2) { case 0: /*SLLI*/ case 1: gen_window_check2(dc, RRR_R, RRR_S); tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S], 32 - (RRR_T | ((OP2 & 1) << 4))); break; case 2: /*SRAI*/ case 3: gen_window_check2(dc, RRR_R, RRR_T); tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S | ((OP2 & 1) << 4)); break; case 4: /*SRLI*/ gen_window_check2(dc, RRR_R, RRR_T); tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S); break; case 6: /*XSR*/ { TCGv_i32 tmp = tcg_temp_new_i32(); if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); tcg_gen_mov_i32(tmp, cpu_R[RRR_T]); gen_rsr(dc, cpu_R[RRR_T], RSR_SR); gen_wsr(dc, RSR_SR, tmp); tcg_temp_free(tmp); if (!sregnames[RSR_SR]) { TBD(); } } break; /* * Note: 64 bit ops are used here solely because SAR values * have range 0..63 */ #define gen_shift_reg(cmd, reg) do { \ TCGv_i64 tmp = tcg_temp_new_i64(); \ tcg_gen_extu_i32_i64(tmp, reg); \ tcg_gen_##cmd##_i64(v, v, tmp); \ tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \ tcg_temp_free_i64(v); \ tcg_temp_free_i64(tmp); \ } while (0) #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR]) case 8: /*SRC*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]); gen_shift(shr); } break; case 9: /*SRL*/ gen_window_check2(dc, RRR_R, RRR_T); if (dc->sar_5bit) { tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]); gen_shift(shr); } break; case 10: /*SLL*/ gen_window_check2(dc, RRR_R, RRR_S); if (dc->sar_m32_5bit) { tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32); } else { TCGv_i64 v = tcg_temp_new_i64(); TCGv_i32 s = tcg_const_i32(32); tcg_gen_sub_i32(s, s, cpu_SR[SAR]); tcg_gen_andi_i32(s, s, 0x3f); tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]); gen_shift_reg(shl, s); tcg_temp_free(s); } break; case 11: /*SRA*/ gen_window_check2(dc, RRR_R, RRR_T); if (dc->sar_5bit) { tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]); gen_shift(sar); } break; #undef gen_shift #undef gen_shift_reg case 12: /*MUL16U*/ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]); tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]); tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } break; case 13: /*MUL16S*/ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]); tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]); tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } break; default: /*reserved*/ RESERVED(); break; } break; case 2: /*RST2*/ if (OP2 >= 8) { gen_window_check3(dc, RRR_R, RRR_S, RRR_T); } if (OP2 >= 12) { HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV); int label = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label); gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE); gen_set_label(label); } switch (OP2) { #define BOOLEAN_LOGIC(fn, r, s, t) \ do { \ HAS_OPTION(XTENSA_OPTION_BOOLEAN); \ TCGv_i32 tmp1 = tcg_temp_new_i32(); \ TCGv_i32 tmp2 = tcg_temp_new_i32(); \ \ tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \ tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \ tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \ tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \ tcg_temp_free(tmp1); \ tcg_temp_free(tmp2); \ } while (0) case 0: /*ANDBp*/ BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T); break; case 1: /*ANDBCp*/ BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T); break; case 2: /*ORBp*/ BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T); break; case 3: /*ORBCp*/ BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T); break; case 4: /*XORBp*/ BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T); break; #undef BOOLEAN_LOGIC case 8: /*MULLi*/ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL); tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 10: /*MULUHi*/ case 11: /*MULSHi*/ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH); { TCGv_i64 r = tcg_temp_new_i64(); TCGv_i64 s = tcg_temp_new_i64(); TCGv_i64 t = tcg_temp_new_i64(); if (OP2 == 10) { tcg_gen_extu_i32_i64(s, cpu_R[RRR_S]); tcg_gen_extu_i32_i64(t, cpu_R[RRR_T]); } else { tcg_gen_ext_i32_i64(s, cpu_R[RRR_S]); tcg_gen_ext_i32_i64(t, cpu_R[RRR_T]); } tcg_gen_mul_i64(r, s, t); tcg_gen_shri_i64(r, r, 32); tcg_gen_trunc_i64_i32(cpu_R[RRR_R], r); tcg_temp_free_i64(r); tcg_temp_free_i64(s); tcg_temp_free_i64(t); } break; case 12: /*QUOUi*/ tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; case 13: /*QUOSi*/ case 15: /*REMSi*/ { int label1 = gen_new_label(); int label2 = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000, label1); tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff, label1); tcg_gen_movi_i32(cpu_R[RRR_R], OP2 == 13 ? 0x80000000 : 0); tcg_gen_br(label2); gen_set_label(label1); if (OP2 == 13) { tcg_gen_div_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } else { tcg_gen_rem_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); } gen_set_label(label2); } break; case 14: /*REMUi*/ tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]); break; default: /*reserved*/ RESERVED(); break; } break; case 3: /*RST3*/ switch (OP2) { case 0: /*RSR*/ if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); gen_rsr(dc, cpu_R[RRR_T], RSR_SR); if (!sregnames[RSR_SR]) { TBD(); } break; case 1: /*WSR*/ if (RSR_SR >= 64) { gen_check_privilege(dc); } gen_window_check1(dc, RRR_T); gen_wsr(dc, RSR_SR, cpu_R[RRR_T]); if (!sregnames[RSR_SR]) { TBD(); } break; case 2: /*SEXTu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT); gen_window_check2(dc, RRR_R, RRR_S); { int shift = 24 - RRR_T; if (shift == 24) { tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } else if (shift == 16) { tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } else { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift); tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift); tcg_temp_free(tmp); } } break; case 3: /*CLAMPSu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS); gen_window_check2(dc, RRR_R, RRR_S); { TCGv_i32 tmp1 = tcg_temp_new_i32(); TCGv_i32 tmp2 = tcg_temp_new_i32(); int label = gen_new_label(); tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T); tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]); tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7)); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp2, 0, label); tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31); tcg_gen_xori_i32(cpu_R[RRR_R], tmp1, 0xffffffff >> (25 - RRR_T)); gen_set_label(label); tcg_temp_free(tmp1); tcg_temp_free(tmp2); } break; case 4: /*MINu*/ case 5: /*MAXu*/ case 6: /*MINUu*/ case 7: /*MAXUu*/ HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX); gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { static const TCGCond cond[] = { TCG_COND_LE, TCG_COND_GE, TCG_COND_LEU, TCG_COND_GEU }; int label = gen_new_label(); if (RRR_R != RRR_T) { tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); tcg_gen_brcond_i32(cond[OP2 - 4], cpu_R[RRR_S], cpu_R[RRR_T], label); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_T]); } else { tcg_gen_brcond_i32(cond[OP2 - 4], cpu_R[RRR_T], cpu_R[RRR_S], label); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); } gen_set_label(label); } break; case 8: /*MOVEQZ*/ case 9: /*MOVNEZ*/ case 10: /*MOVLTZ*/ case 11: /*MOVGEZ*/ gen_window_check3(dc, RRR_R, RRR_S, RRR_T); { static const TCGCond cond[] = { TCG_COND_NE, TCG_COND_EQ, TCG_COND_GE, TCG_COND_LT }; int label = gen_new_label(); tcg_gen_brcondi_i32(cond[OP2 - 8], cpu_R[RRR_T], 0, label); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); gen_set_label(label); } break; case 12: /*MOVFp*/ case 13: /*MOVTp*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); gen_window_check2(dc, RRR_R, RRR_S); { int label = gen_new_label(); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T); tcg_gen_brcondi_i32( OP2 & 1 ? TCG_COND_EQ : TCG_COND_NE, tmp, 0, label); tcg_gen_mov_i32(cpu_R[RRR_R], cpu_R[RRR_S]); gen_set_label(label); tcg_temp_free(tmp); } break; case 14: /*RUR*/ gen_window_check1(dc, RRR_R); { int st = (RRR_S << 4) + RRR_T; if (uregnames[st]) { tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]); } else { qemu_log("RUR %d not implemented, ", st); TBD(); } } break; case 15: /*WUR*/ gen_window_check1(dc, RRR_T); { if (uregnames[RSR_SR]) { tcg_gen_mov_i32(cpu_UR[RSR_SR], cpu_R[RRR_T]); } else { qemu_log("WUR %d not implemented, ", RSR_SR); TBD(); } } break; } break; case 4: /*EXTUI*/ case 5: gen_window_check2(dc, RRR_R, RRR_T); { int shiftimm = RRR_S | (OP1 << 4); int maskimm = (1 << (OP2 + 1)) - 1; TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm); tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm); tcg_temp_free(tmp); } break; case 6: /*CUST0*/ RESERVED(); break; case 7: /*CUST1*/ RESERVED(); break; case 8: /*LSCXp*/ HAS_OPTION(XTENSA_OPTION_COPROCESSOR); TBD(); break; case 9: /*LSC4*/ gen_window_check2(dc, RRR_S, RRR_T); switch (OP2) { case 0: /*L32E*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRR_S], (0xffffffc0 | (RRR_R << 2))); tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring); tcg_temp_free(addr); } break; case 4: /*S32E*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_check_privilege(dc); { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, cpu_R[RRR_S], (0xffffffc0 | (RRR_R << 2))); tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring); tcg_temp_free(addr); } break; default: RESERVED(); break; } break; case 10: /*FP0*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); TBD(); break; case 11: /*FP1*/ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR); TBD(); break; default: /*reserved*/ RESERVED(); break; } break; case 1: /*L32R*/ gen_window_check1(dc, RRR_T); { TCGv_i32 tmp = tcg_const_i32( ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ? 0 : ((dc->pc + 3) & ~3)) + (0xfffc0000 | (RI16_IMM16 << 2))); if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) { tcg_gen_add_i32(tmp, tmp, dc->litbase); } tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring); tcg_temp_free(tmp); } break; case 2: /*LSAI*/ #define gen_load_store(type, shift) do { \ TCGv_i32 addr = tcg_temp_new_i32(); \ gen_window_check2(dc, RRI8_S, RRI8_T); \ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \ if (shift) { \ gen_load_store_alignment(dc, shift, addr, false); \ } \ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) switch (RRI8_R) { case 0: /*L8UI*/ gen_load_store(ld8u, 0); break; case 1: /*L16UI*/ gen_load_store(ld16u, 1); break; case 2: /*L32I*/ gen_load_store(ld32u, 2); break; case 4: /*S8I*/ gen_load_store(st8, 0); break; case 5: /*S16I*/ gen_load_store(st16, 1); break; case 6: /*S32I*/ gen_load_store(st32, 2); break; case 7: /*CACHEc*/ if (RRI8_T < 8) { HAS_OPTION(XTENSA_OPTION_DCACHE); } switch (RRI8_T) { case 0: /*DPFRc*/ break; case 1: /*DPFWc*/ break; case 2: /*DPFROc*/ break; case 3: /*DPFWOc*/ break; case 4: /*DHWBc*/ break; case 5: /*DHWBIc*/ break; case 6: /*DHIc*/ break; case 7: /*DIIc*/ break; case 8: /*DCEc*/ switch (OP1) { case 0: /*DPFLl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 2: /*DHUl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 3: /*DIUl*/ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK); break; case 4: /*DIWBc*/ HAS_OPTION(XTENSA_OPTION_DCACHE); break; case 5: /*DIWBIc*/ HAS_OPTION(XTENSA_OPTION_DCACHE); break; default: /*reserved*/ RESERVED(); break; } break; case 12: /*IPFc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; case 13: /*ICEc*/ switch (OP1) { case 0: /*IPFLl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; case 2: /*IHUl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; case 3: /*IIUl*/ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK); break; default: /*reserved*/ RESERVED(); break; } break; case 14: /*IHIc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; case 15: /*IIIc*/ HAS_OPTION(XTENSA_OPTION_ICACHE); break; default: /*reserved*/ RESERVED(); break; } break; case 9: /*L16SI*/ gen_load_store(ld16s, 1); break; #undef gen_load_store case 10: /*MOVI*/ gen_window_check1(dc, RRI8_T); tcg_gen_movi_i32(cpu_R[RRI8_T], RRI8_IMM8 | (RRI8_S << 8) | ((RRI8_S & 0x8) ? 0xfffff000 : 0)); break; #define gen_load_store_no_hw_align(type) do { \ TCGv_i32 addr = tcg_temp_local_new_i32(); \ gen_window_check2(dc, RRI8_S, RRI8_T); \ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \ gen_load_store_alignment(dc, 2, addr, true); \ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) case 11: /*L32AIy*/ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/ break; case 12: /*ADDI*/ gen_window_check2(dc, RRI8_S, RRI8_T); tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE); break; case 13: /*ADDMI*/ gen_window_check2(dc, RRI8_S, RRI8_T); tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE << 8); break; case 14: /*S32C1Iy*/ HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE); gen_window_check2(dc, RRI8_S, RRI8_T); { int label = gen_new_label(); TCGv_i32 tmp = tcg_temp_local_new_i32(); TCGv_i32 addr = tcg_temp_local_new_i32(); tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]); tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); gen_load_store_alignment(dc, 2, addr, true); tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring); tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T], cpu_SR[SCOMPARE1], label); tcg_gen_qemu_st32(tmp, addr, dc->cring); gen_set_label(label); tcg_temp_free(addr); tcg_temp_free(tmp); } break; case 15: /*S32RIy*/ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO); gen_load_store_no_hw_align(st32); /*TODO release?*/ break; #undef gen_load_store_no_hw_align default: /*reserved*/ RESERVED(); break; } break; case 3: /*LSCIp*/ HAS_OPTION(XTENSA_OPTION_COPROCESSOR); TBD(); break; case 4: /*MAC16d*/ HAS_OPTION(XTENSA_OPTION_MAC16); { enum { MAC16_UMUL = 0x0, MAC16_MUL = 0x4, MAC16_MULA = 0x8, MAC16_MULS = 0xc, MAC16_NONE = 0xf, } op = OP1 & 0xc; bool is_m1_sr = (OP2 & 0x3) == 2; bool is_m2_sr = (OP2 & 0xc) == 0; uint32_t ld_offset = 0; if (OP2 > 9) { RESERVED(); } switch (OP2 & 2) { case 0: /*MACI?/MACC?*/ is_m1_sr = true; ld_offset = (OP2 & 1) ? -4 : 4; if (OP2 >= 8) { /*MACI/MACC*/ if (OP1 == 0) { /*LDINC/LDDEC*/ op = MAC16_NONE; } else { RESERVED(); } } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/ RESERVED(); } break; case 2: /*MACD?/MACA?*/ if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/ RESERVED(); } break; } if (op != MAC16_NONE) { if (!is_m1_sr) { gen_window_check1(dc, RRR_S); } if (!is_m2_sr) { gen_window_check1(dc, RRR_T); } } { TCGv_i32 vaddr = tcg_temp_new_i32(); TCGv_i32 mem32 = tcg_temp_new_i32(); if (ld_offset) { gen_window_check1(dc, RRR_S); tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset); gen_load_store_alignment(dc, 2, vaddr, false); tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); } if (op != MAC16_NONE) { TCGv_i32 m1 = gen_mac16_m( is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S], OP1 & 1, op == MAC16_UMUL); TCGv_i32 m2 = gen_mac16_m( is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T], OP1 & 2, op == MAC16_UMUL); if (op == MAC16_MUL || op == MAC16_UMUL) { tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2); if (op == MAC16_UMUL) { tcg_gen_movi_i32(cpu_SR[ACCHI], 0); } else { tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31); } } else { TCGv_i32 res = tcg_temp_new_i32(); TCGv_i64 res64 = tcg_temp_new_i64(); TCGv_i64 tmp = tcg_temp_new_i64(); tcg_gen_mul_i32(res, m1, m2); tcg_gen_ext_i32_i64(res64, res); tcg_gen_concat_i32_i64(tmp, cpu_SR[ACCLO], cpu_SR[ACCHI]); if (op == MAC16_MULA) { tcg_gen_add_i64(tmp, tmp, res64); } else { tcg_gen_sub_i64(tmp, tmp, res64); } tcg_gen_trunc_i64_i32(cpu_SR[ACCLO], tmp); tcg_gen_shri_i64(tmp, tmp, 32); tcg_gen_trunc_i64_i32(cpu_SR[ACCHI], tmp); tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); tcg_temp_free(res); tcg_temp_free_i64(res64); tcg_temp_free_i64(tmp); } tcg_temp_free(m1); tcg_temp_free(m2); } if (ld_offset) { tcg_gen_mov_i32(cpu_R[RRR_S], vaddr); tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32); } tcg_temp_free(vaddr); tcg_temp_free(mem32); } } break; case 5: /*CALLN*/ switch (CALL_N) { case 0: /*CALL0*/ tcg_gen_movi_i32(cpu_R[0], dc->next_pc); gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); break; case 1: /*CALL4w*/ case 2: /*CALL8w*/ case 3: /*CALL12w*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); gen_window_check1(dc, CALL_N << 2); gen_callwi(dc, CALL_N, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0); break; } break; case 6: /*SI*/ switch (CALL_N) { case 0: /*J*/ gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0); break; case 1: /*BZ*/ gen_window_check1(dc, BRI12_S); { static const TCGCond cond[] = { TCG_COND_EQ, /*BEQZ*/ TCG_COND_NE, /*BNEZ*/ TCG_COND_LT, /*BLTZ*/ TCG_COND_GE, /*BGEZ*/ }; gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0, 4 + BRI12_IMM12_SE); } break; case 2: /*BI0*/ gen_window_check1(dc, BRI8_S); { static const TCGCond cond[] = { TCG_COND_EQ, /*BEQI*/ TCG_COND_NE, /*BNEI*/ TCG_COND_LT, /*BLTI*/ TCG_COND_GE, /*BGEI*/ }; gen_brcondi(dc, cond[BRI8_M & 3], cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE); } break; case 3: /*BI1*/ switch (BRI8_M) { case 0: /*ENTRYw*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 s = tcg_const_i32(BRI12_S); TCGv_i32 imm = tcg_const_i32(BRI12_IMM12); gen_advance_ccount(dc); gen_helper_entry(pc, s, imm); tcg_temp_free(imm); tcg_temp_free(s); tcg_temp_free(pc); reset_used_window(dc); } break; case 1: /*B1*/ switch (BRI8_R) { case 0: /*BFp*/ case 1: /*BTp*/ HAS_OPTION(XTENSA_OPTION_BOOLEAN); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S); gen_brcondi(dc, BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 8: /*LOOP*/ case 9: /*LOOPNEZ*/ case 10: /*LOOPGTZ*/ HAS_OPTION(XTENSA_OPTION_LOOP); gen_window_check1(dc, RRI8_S); { uint32_t lend = dc->pc + RRI8_IMM8 + 4; TCGv_i32 tmp = tcg_const_i32(lend); tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1); tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc); gen_wsr_lend(dc, LEND, tmp); tcg_temp_free(tmp); if (BRI8_R > 8) { int label = gen_new_label(); tcg_gen_brcondi_i32( BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT, cpu_R[RRI8_S], 0, label); gen_jumpi(dc, lend, 1); gen_set_label(label); } gen_jumpi(dc, dc->next_pc, 0); } break; default: /*reserved*/ RESERVED(); break; } break; case 2: /*BLTUI*/ case 3: /*BGEUI*/ gen_window_check1(dc, BRI8_S); gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU, cpu_R[BRI8_S], B4CONSTU[BRI8_R], 4 + BRI8_IMM8_SE); break; } break; } break; case 7: /*B*/ { TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ; switch (RRI8_R & 7) { case 0: /*BNONE*/ /*BANY*/ gen_window_check2(dc, RRI8_S, RRI8_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 1: /*BEQ*/ /*BNE*/ case 2: /*BLT*/ /*BGE*/ case 3: /*BLTU*/ /*BGEU*/ gen_window_check2(dc, RRI8_S, RRI8_T); { static const TCGCond cond[] = { [1] = TCG_COND_EQ, [2] = TCG_COND_LT, [3] = TCG_COND_LTU, [9] = TCG_COND_NE, [10] = TCG_COND_GE, [11] = TCG_COND_GEU, }; gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T], 4 + RRI8_IMM8_SE); } break; case 4: /*BALL*/ /*BNALL*/ gen_window_check2(dc, RRI8_S, RRI8_T); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]); gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T], 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; case 5: /*BBC*/ /*BBS*/ gen_window_check2(dc, RRI8_S, RRI8_T); { TCGv_i32 bit = tcg_const_i32(1); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f); tcg_gen_shl_i32(bit, bit, tmp); tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); tcg_temp_free(bit); } break; case 6: /*BBCI*/ /*BBSI*/ case 7: gen_window_check1(dc, RRI8_S); { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_R[RRI8_S], 1 << (((RRI8_R & 1) << 4) | RRI8_T)); gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE); tcg_temp_free(tmp); } break; } } break; #define gen_narrow_load_store(type) do { \ TCGv_i32 addr = tcg_temp_new_i32(); \ gen_window_check2(dc, RRRN_S, RRRN_T); \ tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \ gen_load_store_alignment(dc, 2, addr, false); \ tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \ tcg_temp_free(addr); \ } while (0) case 8: /*L32I.Nn*/ gen_narrow_load_store(ld32u); break; case 9: /*S32I.Nn*/ gen_narrow_load_store(st32); break; #undef gen_narrow_load_store case 10: /*ADD.Nn*/ gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T); tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]); break; case 11: /*ADDI.Nn*/ gen_window_check2(dc, RRRN_R, RRRN_S); tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], RRRN_T ? RRRN_T : -1); break; case 12: /*ST2n*/ gen_window_check1(dc, RRRN_S); if (RRRN_T < 8) { /*MOVI.Nn*/ tcg_gen_movi_i32(cpu_R[RRRN_S], RRRN_R | (RRRN_T << 4) | ((RRRN_T & 6) == 6 ? 0xffffff80 : 0)); } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/ TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ; gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0, 4 + (RRRN_R | ((RRRN_T & 3) << 4))); } break; case 13: /*ST3n*/ switch (RRRN_R) { case 0: /*MOV.Nn*/ gen_window_check2(dc, RRRN_S, RRRN_T); tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]); break; case 15: /*S3*/ switch (RRRN_T) { case 0: /*RET.Nn*/ gen_jump(dc, cpu_R[0]); break; case 1: /*RETW.Nn*/ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER); { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_advance_ccount(dc); gen_helper_retw(tmp, tmp); gen_jump(dc, tmp); tcg_temp_free(tmp); } break; case 2: /*BREAK.Nn*/ HAS_OPTION(XTENSA_OPTION_DEBUG); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_BN); } break; case 3: /*NOP.Nn*/ break; case 6: /*ILL.Nn*/ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } break; default: /*reserved*/ RESERVED(); break; } gen_check_loop_end(dc, 0); dc->pc = dc->next_pc; return; invalid_opcode: qemu_log("INVALID(pc = %08x)\n", dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); #undef HAS_OPTION }
15,792
0
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp opc, TCGReg addrlo, TCGReg addrhi, int mem_index, bool is_read) { int cmp_off = (is_read ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write)); int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend); TCGReg base = TCG_AREG0; TCGMemOp s_bits = opc & MO_SIZE; /* Extract the page index, shifted into place for tlb index. */ if (TCG_TARGET_REG_BITS == 64) { if (TARGET_LONG_BITS == 32) { /* Zero-extend the address into a place helpful for further use. */ tcg_out_ext32u(s, TCG_REG_R4, addrlo); addrlo = TCG_REG_R4; } else { tcg_out_rld(s, RLDICL, TCG_REG_R3, addrlo, 64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS); } } /* Compensate for very large offsets. */ if (add_off >= 0x8000) { /* Most target env are smaller than 32k; none are larger than 64k. Simplify the logic here merely to offset by 0x7ff0, giving us a range just shy of 64k. Check this assumption. */ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1]) > 0x7ff0 + 0x7fff); tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, base, 0x7ff0)); base = TCG_REG_TMP1; cmp_off -= 0x7ff0; add_off -= 0x7ff0; } /* Extraction and shifting, part 2. */ if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { tcg_out_rlw(s, RLWINM, TCG_REG_R3, addrlo, 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS), 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS), 31 - CPU_TLB_ENTRY_BITS); } else { tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS); } tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base)); /* Load the tlb comparator. */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off); tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4); } else { tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off); } /* Load the TLB addend for use on the fast path. Do this asap to minimize any load use delay. */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3, add_off); /* Clear the non-page, non-alignment bits from the address */ if (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32) { /* We don't support unaligned accesses on 32-bits, preserve * the bottom bits and thus trigger a comparison failure on * unaligned accesses */ tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0, (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS); } else if (s_bits) { /* > byte access, we need to handle alignment */ if ((opc & MO_AMASK) == MO_ALIGN) { /* Alignment required by the front-end, same as 32-bits */ tcg_out_rld(s, RLDICL, TCG_REG_R0, addrlo, 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits); tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); } else { /* We support unaligned accesses, we need to make sure we fail * if we cross a page boundary. The trick is to add the * access_size-1 to the address before masking the low bits. * That will make the address overflow to the next page if we * cross a page boundary which will then force a mismatch of * the TLB compare since the next page cannot possibly be in * the same TLB index. */ tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, (1 << s_bits) - 1)); tcg_out_rld(s, RLDICR, TCG_REG_R0, TCG_REG_R0, 0, 63 - TARGET_PAGE_BITS); } } else { /* Byte access, just chop off the bits below the page index */ tcg_out_rld(s, RLDICR, TCG_REG_R0, addrlo, 0, 63 - TARGET_PAGE_BITS); } if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, 0, 7, TCG_TYPE_I32); tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32); tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); } else { tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1, 0, 7, TCG_TYPE_TL); } return addrlo; }
15,793
0
static av_always_inline int dnxhd_decode_dct_block(const DNXHDContext *ctx, RowContext *row, int n, int index_bits, int level_bias, int level_shift, int dc_shift) { int i, j, index1, index2, len, flags; int level, component, sign; const int *scale; const uint8_t *weight_matrix; const uint8_t *ac_info = ctx->cid_table->ac_info; int16_t *block = row->blocks[n]; const int eob_index = ctx->cid_table->eob_index; int ret = 0; OPEN_READER(bs, &row->gb); ctx->bdsp.clear_block(block); if (!ctx->is_444) { if (n & 2) { component = 1 + (n & 1); scale = row->chroma_scale; weight_matrix = ctx->cid_table->chroma_weight; } else { component = 0; scale = row->luma_scale; weight_matrix = ctx->cid_table->luma_weight; } } else { component = (n >> 1) % 3; if (component) { scale = row->chroma_scale; weight_matrix = ctx->cid_table->chroma_weight; } else { scale = row->luma_scale; weight_matrix = ctx->cid_table->luma_weight; } } UPDATE_CACHE(bs, &row->gb); GET_VLC(len, bs, &row->gb, ctx->dc_vlc.table, DNXHD_DC_VLC_BITS, 1); if (len) { level = GET_CACHE(bs, &row->gb); LAST_SKIP_BITS(bs, &row->gb, len); sign = ~level >> 31; level = (NEG_USR32(sign ^ level, len) ^ sign) - sign; row->last_dc[component] += level << dc_shift; } block[0] = row->last_dc[component]; i = 0; UPDATE_CACHE(bs, &row->gb); GET_VLC(index1, bs, &row->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); while (index1 != eob_index) { level = ac_info[2*index1+0]; flags = ac_info[2*index1+1]; sign = SHOW_SBITS(bs, &row->gb, 1); SKIP_BITS(bs, &row->gb, 1); if (flags & 1) { level += SHOW_UBITS(bs, &row->gb, index_bits) << 7; SKIP_BITS(bs, &row->gb, index_bits); } if (flags & 2) { UPDATE_CACHE(bs, &row->gb); GET_VLC(index2, bs, &row->gb, ctx->run_vlc.table, DNXHD_VLC_BITS, 2); i += ctx->cid_table->run[index2]; } if (++i > 63) { av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i); ret = -1; break; } j = ctx->scantable.permutated[i]; level *= scale[i]; level += scale[i] >> 1; if (level_bias < 32 || weight_matrix[i] != level_bias) level += level_bias; // 1<<(level_shift-1) level >>= level_shift; block[j] = (level ^ sign) - sign; UPDATE_CACHE(bs, &row->gb); GET_VLC(index1, bs, &row->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); } CLOSE_READER(bs, &row->gb); return ret; }
15,794
0
static uint64_t build_channel_report_mcic(void) { uint64_t mcic; /* subclass: indicate channel report pending */ mcic = MCIC_SC_CP | /* subclass modifiers: none */ /* storage errors: none */ /* validity bits: no damage */ MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP | MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR | MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC; if (s390_has_feat(S390_FEAT_VECTOR)) { mcic |= MCIC_VB_VR; } if (s390_has_feat(S390_FEAT_GUARDED_STORAGE)) { mcic |= MCIC_VB_GS; } return mcic; }
15,795
0
static int do_virtio_net_can_receive(VirtIONet *n, int bufsize) { if (!virtio_queue_ready(n->rx_vq) || !(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) return 0; if (virtio_queue_empty(n->rx_vq) || (n->mergeable_rx_bufs && !virtqueue_avail_bytes(n->rx_vq, bufsize, 0))) { virtio_queue_set_notification(n->rx_vq, 1); return 0; } virtio_queue_set_notification(n->rx_vq, 0); return 1; }
15,796
0
static inline void gen_bcond(DisasContext *ctx, int type) { uint32_t bo = BO(ctx->opcode); int l1; TCGv target; ctx->exception = POWERPC_EXCP_BRANCH; if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { target = tcg_temp_local_new(); if (type == BCOND_CTR) tcg_gen_mov_tl(target, cpu_ctr); else if (type == BCOND_TAR) gen_load_spr(target, SPR_TAR); else tcg_gen_mov_tl(target, cpu_lr); } else { TCGV_UNUSED(target); } if (LK(ctx->opcode)) gen_setlr(ctx, ctx->nip); l1 = gen_new_label(); if ((bo & 0x4) == 0) { /* Decrement and test CTR */ TCGv temp = tcg_temp_new(); if (unlikely(type == BCOND_CTR)) { gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); return; } tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1); if (NARROW_MODE(ctx)) { tcg_gen_ext32u_tl(temp, cpu_ctr); } else { tcg_gen_mov_tl(temp, cpu_ctr); } if (bo & 0x2) { tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1); } else { tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1); } tcg_temp_free(temp); } if ((bo & 0x10) == 0) { /* Test CR */ uint32_t bi = BI(ctx->opcode); uint32_t mask = 0x08 >> (bi & 0x03); TCGv_i32 temp = tcg_temp_new_i32(); if (bo & 0x8) { tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1); } else { tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1); } tcg_temp_free_i32(temp); } gen_update_cfar(ctx, ctx->nip); if (type == BCOND_IM) { target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); if (likely(AA(ctx->opcode) == 0)) { gen_goto_tb(ctx, 0, ctx->nip + li - 4); } else { gen_goto_tb(ctx, 0, li); } gen_set_label(l1); gen_goto_tb(ctx, 1, ctx->nip); } else { if (NARROW_MODE(ctx)) { tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3); } else { tcg_gen_andi_tl(cpu_nip, target, ~3); } tcg_gen_exit_tb(0); gen_set_label(l1); gen_update_nip(ctx, ctx->nip); tcg_gen_exit_tb(0); } if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { tcg_temp_free(target); } }
15,797
0
static void menelaus_pre_save(void *opaque) { MenelausState *s = opaque; /* Should be <= 1000 */ s->rtc_next_vmstate = s->rtc.next - qemu_get_clock(rt_clock); }
15,798
0
static inline uint64_t bdrv_get_align(BlockDriverState *bs) { /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */ return MAX(BDRV_SECTOR_SIZE, bs->request_alignment); }
15,799
0
static int mipsnet_can_receive(void *opaque) { MIPSnetState *s = opaque; if (s->busy) return 0; return !mipsnet_buffer_full(s); }
15,800
0
void hmp_host_net_remove(Monitor *mon, const QDict *qdict) { NetClientState *nc; int vlan_id = qdict_get_int(qdict, "vlan_id"); const char *device = qdict_get_str(qdict, "device"); nc = net_hub_find_client_by_name(vlan_id, device); if (!nc) { error_report("Host network device '%s' on hub '%d' not found", device, vlan_id); return; } if (!net_host_check_device(nc->model)) { error_report("invalid host network device '%s'", device); return; } qemu_del_net_client(nc->peer); qemu_del_net_client(nc); }
15,801
0
static int ssd0303_init(I2CSlave *i2c) { ssd0303_state *s = FROM_I2C_SLAVE(ssd0303_state, i2c); s->con = graphic_console_init(ssd0303_update_display, ssd0303_invalidate_display, NULL, NULL, s); qemu_console_resize(s->con, 96 * MAGNIFY, 16 * MAGNIFY); return 0; }
15,802
0
static enum AVPixelFormat webp_get_format(AVCodecContext *avctx, const enum AVPixelFormat *formats) { WebPContext *s = avctx->priv_data; if (s->has_alpha) return AV_PIX_FMT_YUVA420P; else return AV_PIX_FMT_YUV420P; }
15,803
0
static void coded_frame_add(void *list, struct FrameListData *cx_frame) { struct FrameListData **p = list; while (*p != NULL) p = &(*p)->next; *p = cx_frame; cx_frame->next = NULL; }
15,805
0
void swri_resample_dsp_x86_init(ResampleContext *c) { int av_unused mm_flags = av_get_cpu_flags(); switch(c->format){ case AV_SAMPLE_FMT_S16P: if (ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL && mm_flags & AV_CPU_FLAG_MMX2) { c->dsp.resample = c->linear ? ff_resample_linear_int16_mmxext : ff_resample_common_int16_mmxext; } if (HAVE_SSE2_EXTERNAL && mm_flags & AV_CPU_FLAG_SSE2) { c->dsp.resample = c->linear ? ff_resample_linear_int16_sse2 : ff_resample_common_int16_sse2; } if (HAVE_XOP_EXTERNAL && mm_flags & AV_CPU_FLAG_XOP) { c->dsp.resample = c->linear ? ff_resample_linear_int16_xop : ff_resample_common_int16_xop; } break; case AV_SAMPLE_FMT_FLTP: if (HAVE_SSE_EXTERNAL && mm_flags & AV_CPU_FLAG_SSE) { c->dsp.resample = c->linear ? ff_resample_linear_float_sse : ff_resample_common_float_sse; } if (HAVE_AVX_EXTERNAL && mm_flags & AV_CPU_FLAG_AVX) { c->dsp.resample = c->linear ? ff_resample_linear_float_avx : ff_resample_common_float_avx; } if (HAVE_FMA3_EXTERNAL && mm_flags & AV_CPU_FLAG_FMA3) { c->dsp.resample = c->linear ? ff_resample_linear_float_fma3 : ff_resample_common_float_fma3; } if (HAVE_FMA4_EXTERNAL && mm_flags & AV_CPU_FLAG_FMA4) { c->dsp.resample = c->linear ? ff_resample_linear_float_fma4 : ff_resample_common_float_fma4; } break; case AV_SAMPLE_FMT_DBLP: if (HAVE_SSE2_EXTERNAL && mm_flags & AV_CPU_FLAG_SSE2) { c->dsp.resample = c->linear ? ff_resample_linear_double_sse2 : ff_resample_common_double_sse2; } break; } }
15,806