label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb, AVStream *st, RMStream *ast, int read_all) { char buf[256]; uint32_t version; int ret; /* ra type header */ version = avio_rb16(pb); /* version */ if (version == 3) { int header_size = avio_rb16(pb); int64_t startpos = avio_tell(pb); avio_skip(pb, 14); rm_read_metadata(s, 0); if ((startpos + header_size) >= avio_tell(pb) + 2) { // fourcc (should always be "lpcJ") avio_r8(pb); get_str8(pb, buf, sizeof(buf)); } // Skip extra header crap (this should never happen) if ((startpos + header_size) > avio_tell(pb)) avio_skip(pb, header_size + startpos - avio_tell(pb)); st->codec->sample_rate = 8000; st->codec->channels = 1; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = CODEC_ID_RA_144; ast->deint_id = DEINT_ID_INT0; } else { int flavor, sub_packet_h, coded_framesize, sub_packet_size; int codecdata_length; /* old version (4) */ avio_skip(pb, 2); /* unused */ avio_rb32(pb); /* .ra4 */ avio_rb32(pb); /* data size */ avio_rb16(pb); /* version2 */ avio_rb32(pb); /* header size */ flavor= avio_rb16(pb); /* add codec info / flavor */ ast->coded_framesize = coded_framesize = avio_rb32(pb); /* coded frame size */ avio_rb32(pb); /* ??? */ avio_rb32(pb); /* ??? */ avio_rb32(pb); /* ??? */ ast->sub_packet_h = sub_packet_h = avio_rb16(pb); /* 1 */ st->codec->block_align= avio_rb16(pb); /* frame size */ ast->sub_packet_size = sub_packet_size = avio_rb16(pb); /* sub packet size */ avio_rb16(pb); /* ??? */ if (version == 5) { avio_rb16(pb); avio_rb16(pb); avio_rb16(pb); } st->codec->sample_rate = avio_rb16(pb); avio_rb32(pb); st->codec->channels = avio_rb16(pb); if (version == 5) { ast->deint_id = avio_rl32(pb); avio_read(pb, buf, 4); buf[4] = 0; } else { get_str8(pb, buf, sizeof(buf)); /* desc */ ast->deint_id = AV_RL32(buf); get_str8(pb, buf, sizeof(buf)); /* desc */ } st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_tag = AV_RL32(buf); st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags, st->codec->codec_tag); switch (ast->deint_id) { case DEINT_ID_GENR: case DEINT_ID_INT0: case DEINT_ID_INT4: case DEINT_ID_SIPR: case DEINT_ID_VBRS: case DEINT_ID_VBRF: break; default: av_log(NULL,0,"Unknown interleaver %X\n", ast->deint_id); return AVERROR_INVALIDDATA; } switch (st->codec->codec_id) { case CODEC_ID_AC3: st->need_parsing = AVSTREAM_PARSE_FULL; break; case CODEC_ID_RA_288: st->codec->extradata_size= 0; ast->audio_framesize = st->codec->block_align; st->codec->block_align = coded_framesize; if(ast->audio_framesize >= UINT_MAX / sub_packet_h){ av_log(s, AV_LOG_ERROR, "ast->audio_framesize * sub_packet_h too large\n"); return -1; } av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h); break; case CODEC_ID_COOK: case CODEC_ID_ATRAC3: case CODEC_ID_SIPR: avio_rb16(pb); avio_r8(pb); if (version == 5) avio_r8(pb); codecdata_length = avio_rb32(pb); if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n"); return -1; } ast->audio_framesize = st->codec->block_align; if (st->codec->codec_id == CODEC_ID_SIPR) { if (flavor > 3) { av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n", flavor); return -1; } st->codec->block_align = ff_sipr_subpk_size[flavor]; } else { if(sub_packet_size <= 0){ av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n"); return -1; } st->codec->block_align = ast->sub_packet_size; } if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0) return ret; if(ast->audio_framesize >= UINT_MAX / sub_packet_h){ av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n"); return -1; } av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h); break; case CODEC_ID_AAC: avio_rb16(pb); avio_r8(pb); if (version == 5) avio_r8(pb); codecdata_length = avio_rb32(pb); if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n"); return -1; } if (codecdata_length >= 1) { avio_r8(pb); if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0) return ret; } break; default: av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name)); } if (read_all) { avio_r8(pb); avio_r8(pb); avio_r8(pb); rm_read_metadata(s, 0); } } return 0; } | 14,441 |
1 | SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, uint8_t *buf, void *hba_private) { SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); SCSIRequest *req; SCSICommand cmd; if (scsi_req_parse(&cmd, d, buf) != 0) { trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); } else { trace_scsi_req_parsed(d->id, lun, tag, buf[0], cmd.mode, cmd.xfer); if (req->cmd.lba != -1) { trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], cmd.lba); } if ((d->unit_attention.key == UNIT_ATTENTION || bus->unit_attention.key == UNIT_ATTENTION) && (buf[0] != INQUIRY && buf[0] != REPORT_LUNS && buf[0] != GET_CONFIGURATION && buf[0] != GET_EVENT_STATUS_NOTIFICATION)) { req = scsi_req_alloc(&reqops_unit_attention, d, tag, lun, hba_private); } else if (lun != d->lun || buf[0] == REPORT_LUNS || buf[0] == REQUEST_SENSE) { req = scsi_req_alloc(&reqops_target_command, d, tag, lun, hba_private); } else { req = d->info->alloc_req(d, tag, lun, hba_private); } } req->cmd = cmd; switch (buf[0]) { case INQUIRY: trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); break; case TEST_UNIT_READY: trace_scsi_test_unit_ready(d->id, lun, tag); break; case REPORT_LUNS: trace_scsi_report_luns(d->id, lun, tag); break; case REQUEST_SENSE: trace_scsi_request_sense(d->id, lun, tag); break; default: break; } return req; } | 14,443 |
1 | av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; static volatile int done = 0; int i; MVTable *mv; if (ff_h263_decode_init(avctx) < 0) ff_msmpeg4_common_init(s); if (!done) { for(i=0;i<NB_RL_TABLES;i++) { ff_init_rl(&ff_rl_table[i], ff_static_rl_table_store[i]); INIT_VLC_RL(ff_rl_table[0], 642); INIT_VLC_RL(ff_rl_table[1], 1104); INIT_VLC_RL(ff_rl_table[2], 554); INIT_VLC_RL(ff_rl_table[3], 940); INIT_VLC_RL(ff_rl_table[4], 962); INIT_VLC_RL(ff_rl_table[5], 554); mv = &ff_mv_tables[0]; INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1, mv->table_mv_bits, 1, 1, mv->table_mv_code, 2, 2, 3714); mv = &ff_mv_tables[1]; INIT_VLC_STATIC(&mv->vlc, MV_VLC_BITS, mv->n + 1, mv->table_mv_bits, 1, 1, mv->table_mv_code, 2, 2, 2694); INIT_VLC_STATIC(&ff_msmp4_dc_luma_vlc[0], DC_VLC_BITS, 120, &ff_table0_dc_lum[0][1], 8, 4, &ff_table0_dc_lum[0][0], 8, 4, 1158); INIT_VLC_STATIC(&ff_msmp4_dc_chroma_vlc[0], DC_VLC_BITS, 120, &ff_table0_dc_chroma[0][1], 8, 4, &ff_table0_dc_chroma[0][0], 8, 4, 1118); INIT_VLC_STATIC(&ff_msmp4_dc_luma_vlc[1], DC_VLC_BITS, 120, &ff_table1_dc_lum[0][1], 8, 4, &ff_table1_dc_lum[0][0], 8, 4, 1476); INIT_VLC_STATIC(&ff_msmp4_dc_chroma_vlc[1], DC_VLC_BITS, 120, &ff_table1_dc_chroma[0][1], 8, 4, &ff_table1_dc_chroma[0][0], 8, 4, 1216); INIT_VLC_STATIC(&v2_dc_lum_vlc, DC_VLC_BITS, 512, &ff_v2_dc_lum_table[0][1], 8, 4, &ff_v2_dc_lum_table[0][0], 8, 4, 1472); INIT_VLC_STATIC(&v2_dc_chroma_vlc, DC_VLC_BITS, 512, &ff_v2_dc_chroma_table[0][1], 8, 4, &ff_v2_dc_chroma_table[0][0], 8, 4, 1506); INIT_VLC_STATIC(&v2_intra_cbpc_vlc, V2_INTRA_CBPC_VLC_BITS, 4, &ff_v2_intra_cbpc[0][1], 2, 1, &ff_v2_intra_cbpc[0][0], 2, 1, 8); INIT_VLC_STATIC(&v2_mb_type_vlc, V2_MB_TYPE_VLC_BITS, 8, &ff_v2_mb_type[0][1], 2, 1, &ff_v2_mb_type[0][0], 2, 1, 128); INIT_VLC_STATIC(&v2_mv_vlc, V2_MV_VLC_BITS, 33, &ff_mvtab[0][1], 2, 1, &ff_mvtab[0][0], 2, 1, 538); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[0], MB_NON_INTRA_VLC_BITS, 128, &ff_wmv2_inter_table[0][0][1], 8, 4, &ff_wmv2_inter_table[0][0][0], 8, 4, 1636); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[1], MB_NON_INTRA_VLC_BITS, 128, &ff_wmv2_inter_table[1][0][1], 8, 4, &ff_wmv2_inter_table[1][0][0], 8, 4, 2648); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[2], MB_NON_INTRA_VLC_BITS, 128, &ff_wmv2_inter_table[2][0][1], 8, 4, &ff_wmv2_inter_table[2][0][0], 8, 4, 1532); INIT_VLC_STATIC(&ff_mb_non_intra_vlc[3], MB_NON_INTRA_VLC_BITS, 128, &ff_wmv2_inter_table[3][0][1], 8, 4, &ff_wmv2_inter_table[3][0][0], 8, 4, 2488); INIT_VLC_STATIC(&ff_msmp4_mb_i_vlc, MB_INTRA_VLC_BITS, 64, &ff_msmp4_mb_i_table[0][1], 4, 2, &ff_msmp4_mb_i_table[0][0], 4, 2, 536); INIT_VLC_STATIC(&ff_inter_intra_vlc, INTER_INTRA_VLC_BITS, 4, &ff_table_inter_intra[0][1], 2, 1, &ff_table_inter_intra[0][0], 2, 1, 8); done = 1; switch(s->msmpeg4_version){ case 1: case 2: s->decode_mb= msmpeg4v12_decode_mb; break; case 3: case 4: s->decode_mb= msmpeg4v34_decode_mb; break; case 5: if (CONFIG_WMV2_DECODER) s->decode_mb= ff_wmv2_decode_mb; case 6: //FIXME + TODO VC1 decode mb break; s->slice_height= s->mb_height; //to avoid 1/0 if the first frame is not a keyframe return 0; | 14,444 |
1 | void qemu_thread_exit(void *arg) { QemuThread *thread = TlsGetValue(qemu_thread_tls_index); thread->ret = arg; CloseHandle(thread->thread); thread->thread = NULL; ExitThread(0); } | 14,446 |
1 | int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); AVPicture dummy_pict; int ret; if ((ret = av_image_check_size(width, height, 0, NULL)) < 0) return ret; if (desc->flags & PIX_FMT_PSEUDOPAL) // do not include palette for these pseudo-paletted formats return width * height; return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height); } | 14,447 |
1 | static int qcow_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVQcowState *s = bs->opaque; int len, i, shift, ret; QCowHeader header; ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); if (ret < 0) { be32_to_cpus(&header.magic); be32_to_cpus(&header.version); be64_to_cpus(&header.backing_file_offset); be32_to_cpus(&header.backing_file_size); be32_to_cpus(&header.mtime); be64_to_cpus(&header.size); be32_to_cpus(&header.crypt_method); be64_to_cpus(&header.l1_table_offset); if (header.magic != QCOW_MAGIC) { error_setg(errp, "Image not in qcow format"); if (header.version != QCOW_VERSION) { char version[64]; snprintf(version, sizeof(version), "QCOW version %" PRIu32, header.version); error_set(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, bs->device_name, "qcow", version); ret = -ENOTSUP; if (header.size <= 1) { error_setg(errp, "Image size is too small (must be at least 2 bytes)"); if (header.cluster_bits < 9 || header.cluster_bits > 16) { error_setg(errp, "Cluster size must be between 512 and 64k"); if (header.crypt_method > QCOW_CRYPT_AES) { error_setg(errp, "invalid encryption method in qcow header"); s->crypt_method_header = header.crypt_method; if (s->crypt_method_header) { bs->encrypted = 1; s->cluster_bits = header.cluster_bits; s->cluster_size = 1 << s->cluster_bits; s->cluster_sectors = 1 << (s->cluster_bits - 9); s->l2_bits = header.l2_bits; s->l2_size = 1 << s->l2_bits; bs->total_sectors = header.size / 512; s->cluster_offset_mask = (1LL << (63 - s->cluster_bits)) - 1; /* read the level 1 table */ shift = s->cluster_bits + s->l2_bits; s->l1_size = (header.size + (1LL << shift) - 1) >> shift; s->l1_table_offset = header.l1_table_offset; s->l1_table = g_malloc(s->l1_size * sizeof(uint64_t)); ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)); if (ret < 0) { for(i = 0;i < s->l1_size; i++) { be64_to_cpus(&s->l1_table[i]); /* alloc L2 cache */ s->l2_cache = g_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t)); s->cluster_cache = g_malloc(s->cluster_size); s->cluster_data = g_malloc(s->cluster_size); s->cluster_cache_offset = -1; /* read the backing file name */ if (header.backing_file_offset != 0) { len = header.backing_file_size; if (len > 1023) { len = 1023; ret = bdrv_pread(bs->file, header.backing_file_offset, bs->backing_file, len); if (ret < 0) { bs->backing_file[len] = '\0'; /* Disable migration when qcow images are used */ error_set(&s->migration_blocker, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, "qcow", bs->device_name, "live migration"); migrate_add_blocker(s->migration_blocker); qemu_co_mutex_init(&s->lock); return 0; fail: g_free(s->l1_table); g_free(s->l2_cache); g_free(s->cluster_cache); g_free(s->cluster_data); return ret; | 14,448 |
1 | void OPPROTO op_idivw_AX_T0(void) { int num, den, q, r; num = (EAX & 0xffff) | ((EDX & 0xffff) << 16); den = (int16_t)T0; if (den == 0) { raise_exception(EXCP00_DIVZ); } q = (num / den) & 0xffff; r = (num % den) & 0xffff; EAX = (EAX & ~0xffff) | q; EDX = (EDX & ~0xffff) | r; } | 14,449 |
1 | static always_inline void gen_op_subfco_64 (void) { gen_op_move_T2_T0(); gen_op_subf(); gen_op_check_subfc_64(); gen_op_check_subfo_64(); } | 14,450 |
1 | static av_cold int wmv2_encode_init(AVCodecContext *avctx){ Wmv2Context * const w= avctx->priv_data; if(ff_MPV_encode_init(avctx) < 0) return -1; ff_wmv2_common_init(w); avctx->extradata_size= 4; avctx->extradata= av_mallocz(avctx->extradata_size + 10); encode_ext_header(w); return 0; } | 14,451 |
1 | static void kvm_arm_gic_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); ARMGICCommonClass *agcc = ARM_GIC_COMMON_CLASS(klass); KVMARMGICClass *kgc = KVM_ARM_GIC_CLASS(klass); agcc->pre_save = kvm_arm_gic_get; agcc->post_load = kvm_arm_gic_put; kgc->parent_realize = dc->realize; kgc->parent_reset = dc->reset; dc->realize = kvm_arm_gic_realize; dc->reset = kvm_arm_gic_reset; dc->no_user = 1; } | 14,452 |
1 | test_tls_get_ipaddr(const char *addrstr, char **data, int *datalen) { struct addrinfo *res; struct addrinfo hints; memset(&hints, 0, sizeof(hints)); hints.ai_flags = AI_NUMERICHOST; g_assert(getaddrinfo(addrstr, NULL, &hints, &res) == 0); *datalen = res->ai_addrlen; *data = g_new(char, *datalen); memcpy(*data, res->ai_addr, *datalen); } | 14,453 |
1 | static int adaptive_cb_search(const int16_t *adapt_cb, float *work, const float *coefs, float *data) { int i, best_vect; float score, gain, best_score, best_gain; float exc[BLOCKSIZE]; gain = best_score = 0; for (i = BLOCKSIZE / 2; i <= BUFFERSIZE; i++) { create_adapt_vect(exc, adapt_cb, i); get_match_score(work, coefs, exc, NULL, NULL, data, &score, &gain); if (score > best_score) { best_score = score; best_vect = i; best_gain = gain; } } if (!best_score) return 0; /** * Re-calculate the filtered vector from the vector with maximum match score * and remove its contribution from input data. */ create_adapt_vect(exc, adapt_cb, best_vect); ff_celp_lp_synthesis_filterf(work, coefs, exc, BLOCKSIZE, LPC_ORDER); for (i = 0; i < BLOCKSIZE; i++) data[i] -= best_gain * work[i]; return best_vect - BLOCKSIZE / 2 + 1; } | 14,454 |
1 | static void pc_cpu_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { CPUArchId *found_cpu; HotplugHandlerClass *hhc; Error *local_err = NULL; PCMachineState *pcms = PC_MACHINE(hotplug_dev); if (pcms->acpi_dev) { hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev); hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err); if (local_err) { goto out; } } /* increment the number of CPUs */ pcms->boot_cpus++; if (dev->hotplugged) { rtc_set_cpus_count(pcms->rtc, pcms->boot_cpus); fw_cfg_modify_i16(pcms->fw_cfg, FW_CFG_NB_CPUS, pcms->boot_cpus); } found_cpu = pc_find_cpu_slot(pcms, CPU(dev), NULL); found_cpu->cpu = CPU(dev); out: error_propagate(errp, local_err); } | 14,455 |
1 | static int transcode(void) { int ret, i; AVFormatContext *is, *os; OutputStream *ost; InputStream *ist; uint8_t *no_packet; int no_packet_count = 0; int64_t timer_start; if (!(no_packet = av_mallocz(nb_input_files))) exit_program(1); ret = transcode_init(); if (ret < 0) goto fail; av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n"); term_init(); timer_start = av_gettime(); for (; received_sigterm == 0;) { int file_index, ist_index, past_recording_time = 1; AVPacket pkt; int64_t ipts_min; ipts_min = INT64_MAX; /* check if there's any stream where output is still needed */ for (i = 0; i < nb_output_streams; i++) { OutputFile *of; ost = output_streams[i]; of = output_files[ost->file_index]; os = output_files[ost->file_index]->ctx; if (ost->is_past_recording_time || (os->pb && avio_tell(os->pb) >= of->limit_filesize)) continue; if (ost->frame_number > ost->max_frames) { int j; for (j = 0; j < of->ctx->nb_streams; j++) output_streams[of->ost_index + j]->is_past_recording_time = 1; continue; } past_recording_time = 0; } if (past_recording_time) break; /* select the stream that we must read now by looking at the smallest output pts */ file_index = -1; for (i = 0; i < nb_input_streams; i++) { int64_t ipts; ist = input_streams[i]; ipts = ist->last_dts; if (ist->discard || no_packet[ist->file_index]) continue; if (!input_files[ist->file_index]->eof_reached) { if (ipts < ipts_min) { ipts_min = ipts; file_index = ist->file_index; } } } /* if none, if is finished */ if (file_index < 0) { if (no_packet_count) { no_packet_count = 0; memset(no_packet, 0, nb_input_files); usleep(10000); continue; } break; } /* read a frame from it and output it in the fifo */ is = input_files[file_index]->ctx; ret = av_read_frame(is, &pkt); if (ret == AVERROR(EAGAIN)) { no_packet[file_index] = 1; no_packet_count++; continue; } if (ret < 0) { input_files[file_index]->eof_reached = 1; for (i = 0; i < input_files[file_index]->nb_streams; i++) { ist = input_streams[input_files[file_index]->ist_index + i]; if (ist->decoding_needed) output_packet(ist, NULL); } if (opt_shortest) break; else continue; } no_packet_count = 0; memset(no_packet, 0, nb_input_files); if (do_pkt_dump) { av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, is->streams[pkt.stream_index]); } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ if (pkt.stream_index >= input_files[file_index]->nb_streams) goto discard_packet; ist_index = input_files[file_index]->ist_index + pkt.stream_index; ist = input_streams[ist_index]; if (ist->discard) goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts *= ist->ts_scale; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts *= ist->ts_scale; //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", // ist->next_dts, // pkt.dts, input_files[ist->file_index].ts_offset, // ist->st->codec->codec_type); if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta = pkt_dts - ist->next_dts; if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) { input_files[ist->file_index]->ts_offset -= delta; av_log(NULL, AV_LOG_DEBUG, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files[ist->file_index]->ts_offset); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) { av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n", ist->file_index, ist->st->index); if (exit_on_error) exit_program(1); av_free_packet(&pkt); continue; } discard_packet: av_free_packet(&pkt); /* dump report by using the output first video and audio streams */ print_report(0, timer_start); } /* at the end of stream, we must flush the decoder buffers */ for (i = 0; i < nb_input_streams; i++) { ist = input_streams[i]; if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) { output_packet(ist, NULL); } } poll_filters(); flush_encoders(); term_exit(); /* write the trailer if needed and close file */ for (i = 0; i < nb_output_files; i++) { os = output_files[i]->ctx; av_write_trailer(os); } /* dump report by using the first video and audio streams */ print_report(1, timer_start); /* close each encoder */ for (i = 0; i < nb_output_streams; i++) { ost = output_streams[i]; if (ost->encoding_needed) { av_freep(&ost->st->codec->stats_in); avcodec_close(ost->st->codec); } } /* close each decoder */ for (i = 0; i < nb_input_streams; i++) { ist = input_streams[i]; if (ist->decoding_needed) { avcodec_close(ist->st->codec); } } /* finished ! */ ret = 0; fail: av_freep(&no_packet); if (output_streams) { for (i = 0; i < nb_output_streams; i++) { ost = output_streams[i]; if (ost) { if (ost->stream_copy) av_freep(&ost->st->codec->extradata); if (ost->logfile) { fclose(ost->logfile); ost->logfile = NULL; } av_fifo_free(ost->fifo); /* works even if fifo is not initialized but set to zero */ av_freep(&ost->st->codec->subtitle_header); av_free(ost->forced_kf_pts); if (ost->avr) avresample_free(&ost->avr); av_dict_free(&ost->opts); } } } return ret; } | 14,456 |
1 | static void pc_dimm_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { HotplugHandlerClass *hhc; Error *local_err = NULL; PCMachineState *pcms = PC_MACHINE(hotplug_dev); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); PCDIMMDevice *dimm = PC_DIMM(dev); PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); MemoryRegion *mr = ddc->get_memory_region(dimm); uint64_t align = TARGET_PAGE_SIZE; bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); if (memory_region_get_alignment(mr) && pcmc->enforce_aligned_dimm) { align = memory_region_get_alignment(mr); } if (!pcms->acpi_dev) { error_setg(&local_err, "memory hotplug is not enabled: missing acpi device"); goto out; } if (is_nvdimm && !pcms->acpi_nvdimm_state.is_enabled) { error_setg(&local_err, "nvdimm is not enabled: missing 'nvdimm' in '-M'"); goto out; } pc_dimm_memory_plug(dev, &pcms->hotplug_memory, mr, align, &local_err); if (local_err) { goto out; } if (is_nvdimm) { nvdimm_plug(&pcms->acpi_nvdimm_state); } hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev); hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &error_abort); out: error_propagate(errp, local_err); } | 14,457 |
1 | static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) { /* this code makes no assumption on src or stride. One could remove the recomputation of the perm vector by assuming (stride % 16) == 0, unfortunately this is not always true. Quite a lot of load/stores can be removed by assuming proper alignment of src & stride :-( */ uint8_t *src2 = src; const vector signed int zero = vec_splat_s32(0); const int properStride = (stride % 16); const int srcAlign = ((unsigned long)src2 % 16); DECLARE_ALIGNED(16, short, qp[8]) = {c->QP}; vector signed short vqp = vec_ld(0, qp); vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9; vector unsigned char vbA0, vbA1, vbA2, vbA3, vbA4, vbA5, vbA6, vbA7, vbA8, vbA9; vector unsigned char vbB0, vbB1, vbB2, vbB3, vbB4, vbB5, vbB6, vbB7, vbB8, vbB9; vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9; vector unsigned char perml0, perml1, perml2, perml3, perml4, perml5, perml6, perml7, perml8, perml9; register int j0 = 0, j1 = stride, j2 = 2 * stride, j3 = 3 * stride, j4 = 4 * stride, j5 = 5 * stride, j6 = 6 * stride, j7 = 7 * stride, j8 = 8 * stride, j9 = 9 * stride; vqp = vec_splat(vqp, 0); src2 += stride*3; #define LOAD_LINE(i) \ perml##i = vec_lvsl(i * stride, src2); \ vbA##i = vec_ld(i * stride, src2); \ vbB##i = vec_ld(i * stride + 16, src2); \ vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \ vb##i = \ (vector signed short)vec_mergeh((vector unsigned char)zero, \ (vector unsigned char)vbT##i) #define LOAD_LINE_ALIGNED(i) \ vbT##i = vec_ld(j##i, src2); \ vb##i = \ (vector signed short)vec_mergeh((vector signed char)zero, \ (vector signed char)vbT##i) /* Special-casing the aligned case is worthwhile, as all calls from * the (transposed) horizontable deblocks will be aligned, in addition * to the naturally aligned vertical deblocks. */ if (properStride && srcAlign) { LOAD_LINE_ALIGNED(0); LOAD_LINE_ALIGNED(1); LOAD_LINE_ALIGNED(2); LOAD_LINE_ALIGNED(3); LOAD_LINE_ALIGNED(4); LOAD_LINE_ALIGNED(5); LOAD_LINE_ALIGNED(6); LOAD_LINE_ALIGNED(7); LOAD_LINE_ALIGNED(8); LOAD_LINE_ALIGNED(9); } else { LOAD_LINE(0); LOAD_LINE(1); LOAD_LINE(2); LOAD_LINE(3); LOAD_LINE(4); LOAD_LINE(5); LOAD_LINE(6); LOAD_LINE(7); LOAD_LINE(8); LOAD_LINE(9); } #undef LOAD_LINE #undef LOAD_LINE_ALIGNED { const vector unsigned short v_2 = vec_splat_u16(2); const vector unsigned short v_4 = vec_splat_u16(4); const vector signed short v_diff01 = vec_sub(vb0, vb1); const vector unsigned short v_cmp01 = (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp); const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01); const vector signed short v_diff89 = vec_sub(vb8, vb9); const vector unsigned short v_cmp89 = (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp); const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89); const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1); const vector signed short temp02 = vec_add(vb2, vb3); const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4); const vector signed short v_sumsB0 = vec_add(temp02, temp03); const vector signed short temp11 = vec_sub(v_sumsB0, v_first); const vector signed short v_sumsB1 = vec_add(temp11, vb4); const vector signed short temp21 = vec_sub(v_sumsB1, v_first); const vector signed short v_sumsB2 = vec_add(temp21, vb5); const vector signed short temp31 = vec_sub(v_sumsB2, v_first); const vector signed short v_sumsB3 = vec_add(temp31, vb6); const vector signed short temp41 = vec_sub(v_sumsB3, v_first); const vector signed short v_sumsB4 = vec_add(temp41, vb7); const vector signed short temp51 = vec_sub(v_sumsB4, vb1); const vector signed short v_sumsB5 = vec_add(temp51, vb8); const vector signed short temp61 = vec_sub(v_sumsB5, vb2); const vector signed short v_sumsB6 = vec_add(temp61, v_last); const vector signed short temp71 = vec_sub(v_sumsB6, vb3); const vector signed short v_sumsB7 = vec_add(temp71, v_last); const vector signed short temp81 = vec_sub(v_sumsB7, vb4); const vector signed short v_sumsB8 = vec_add(temp81, v_last); const vector signed short temp91 = vec_sub(v_sumsB8, vb5); const vector signed short v_sumsB9 = vec_add(temp91, v_last); #define COMPUTE_VR(i, j, k) \ const vector signed short temps1##i = \ vec_add(v_sumsB##i, v_sumsB##k); \ const vector signed short temps2##i = \ vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \ const vector signed short vr##j = vec_sra(temps2##i, v_4) COMPUTE_VR(0, 1, 2); COMPUTE_VR(1, 2, 3); COMPUTE_VR(2, 3, 4); COMPUTE_VR(3, 4, 5); COMPUTE_VR(4, 5, 6); COMPUTE_VR(5, 6, 7); COMPUTE_VR(6, 7, 8); COMPUTE_VR(7, 8, 9); const vector signed char neg1 = vec_splat_s8(-1); const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; #define PACK_AND_STORE(i) \ { const vector unsigned char perms##i = \ vec_lvsr(i * stride, src2); \ const vector unsigned char vf##i = \ vec_packsu(vr##i, (vector signed short)zero); \ const vector unsigned char vg##i = \ vec_perm(vf##i, vbT##i, permHH); \ const vector unsigned char mask##i = \ vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ const vector unsigned char vg2##i = \ vec_perm(vg##i, vg##i, perms##i); \ const vector unsigned char svA##i = \ vec_sel(vbA##i, vg2##i, mask##i); \ const vector unsigned char svB##i = \ vec_sel(vg2##i, vbB##i, mask##i); \ vec_st(svA##i, i * stride, src2); \ vec_st(svB##i, i * stride + 16, src2);} #define PACK_AND_STORE_ALIGNED(i) \ { const vector unsigned char vf##i = \ vec_packsu(vr##i, (vector signed short)zero); \ const vector unsigned char vg##i = \ vec_perm(vf##i, vbT##i, permHH); \ vec_st(vg##i, i * stride, src2);} /* Special-casing the aligned case is worthwhile, as all calls from * the (transposed) horizontable deblocks will be aligned, in addition * to the naturally aligned vertical deblocks. */ if (properStride && srcAlign) { PACK_AND_STORE_ALIGNED(1) PACK_AND_STORE_ALIGNED(2) PACK_AND_STORE_ALIGNED(3) PACK_AND_STORE_ALIGNED(4) PACK_AND_STORE_ALIGNED(5) PACK_AND_STORE_ALIGNED(6) PACK_AND_STORE_ALIGNED(7) PACK_AND_STORE_ALIGNED(8) } else { PACK_AND_STORE(1) PACK_AND_STORE(2) PACK_AND_STORE(3) PACK_AND_STORE(4) PACK_AND_STORE(5) PACK_AND_STORE(6) PACK_AND_STORE(7) PACK_AND_STORE(8) } #undef PACK_AND_STORE #undef PACK_AND_STORE_ALIGNED } } | 14,458 |
1 | static void decode_hrd(HEVCContext *s, int common_inf_present, int max_sublayers) { GetBitContext *gb = &s->HEVClc.gb; int nal_params_present = 0, vcl_params_present = 0; int subpic_params_present = 0; int i; if (common_inf_present) { nal_params_present = get_bits1(gb); vcl_params_present = get_bits1(gb); if (nal_params_present || vcl_params_present) { subpic_params_present = get_bits1(gb); if (subpic_params_present) { skip_bits(gb, 8); // tick_divisor_minus2 skip_bits(gb, 5); // du_cpb_removal_delay_increment_length_minus1 skip_bits(gb, 1); // sub_pic_cpb_params_in_pic_timing_sei_flag skip_bits(gb, 5); // dpb_output_delay_du_length_minus1 } skip_bits(gb, 4); // bit_rate_scale skip_bits(gb, 4); // cpb_size_scale if (subpic_params_present) skip_bits(gb, 4); // cpb_size_du_scale skip_bits(gb, 5); // initial_cpb_removal_delay_length_minus1 skip_bits(gb, 5); // au_cpb_removal_delay_length_minus1 skip_bits(gb, 5); // dpb_output_delay_length_minus1 } } for (i = 0; i < max_sublayers; i++) { int low_delay = 0; int nb_cpb = 1; int fixed_rate = get_bits1(gb); if (!fixed_rate) fixed_rate = get_bits1(gb); if (fixed_rate) get_ue_golomb_long(gb); // elemental_duration_in_tc_minus1 else low_delay = get_bits1(gb); if (!low_delay) nb_cpb = get_ue_golomb_long(gb) + 1; if (nal_params_present) decode_sublayer_hrd(s, nb_cpb, subpic_params_present); if (vcl_params_present) decode_sublayer_hrd(s, nb_cpb, subpic_params_present); } } | 14,460 |
1 | static uint8_t eepro100_read1(EEPRO100State * s, uint32_t addr) { uint8_t val; if (addr <= sizeof(s->mem) - sizeof(val)) { memcpy(&val, &s->mem[addr], sizeof(val)); } switch (addr) { case SCBStatus: case SCBAck: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBCmd: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); #if 0 val = eepro100_read_command(s); #endif break; case SCBIntmask: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBPort + 3: TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBeeprom: val = eepro100_read_eeprom(s); break; case SCBpmdr: /* Power Management Driver Register */ val = 0; TRACE(OTHER, logout("addr=%s val=0x%02x\n", regname(addr), val)); break; case SCBgstat: /* General Status Register */ /* 100 Mbps full duplex, valid link */ val = 0x07; TRACE(OTHER, logout("addr=General Status val=%02x\n", val)); break; default: logout("addr=%s val=0x%02x\n", regname(addr), val); missing("unknown byte read"); } return val; } | 14,461 |
1 | void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp) { MigrationState *s = migrate_get_current(); if (params->has_compress_level && (params->compress_level < 0 || params->compress_level > 9)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", "is invalid, it should be in the range of 0 to 9"); } if (params->has_compress_threads && (params->compress_threads < 1 || params->compress_threads > 255)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_threads", "is invalid, it should be in the range of 1 to 255"); } if (params->has_decompress_threads && (params->decompress_threads < 1 || params->decompress_threads > 255)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "decompress_threads", "is invalid, it should be in the range of 1 to 255"); } if (params->has_cpu_throttle_initial && (params->cpu_throttle_initial < 1 || params->cpu_throttle_initial > 99)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu_throttle_initial", "an integer in the range of 1 to 99"); } if (params->has_cpu_throttle_increment && (params->cpu_throttle_increment < 1 || params->cpu_throttle_increment > 99)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu_throttle_increment", "an integer in the range of 1 to 99"); } if (params->has_compress_level) { s->parameters.compress_level = params->compress_level; } if (params->has_compress_threads) { s->parameters.compress_threads = params->compress_threads; } if (params->has_decompress_threads) { s->parameters.decompress_threads = params->decompress_threads; } if (params->has_cpu_throttle_initial) { s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; } if (params->has_cpu_throttle_increment) { s->parameters.cpu_throttle_increment = params->cpu_throttle_increment; } if (params->has_tls_creds) { g_free(s->parameters.tls_creds); s->parameters.tls_creds = g_strdup(params->tls_creds); } if (params->has_tls_hostname) { g_free(s->parameters.tls_hostname); s->parameters.tls_hostname = g_strdup(params->tls_hostname); } } | 14,462 |
1 | static int vorbis_parse_setup_hdr_floors(vorbis_context *vc) { GetBitContext *gb = &vc->gb; int i,j,k; vc->floor_count = get_bits(gb, 6) + 1; vc->floors = av_mallocz(vc->floor_count * sizeof(*vc->floors)); for (i = 0; i < vc->floor_count; ++i) { vorbis_floor *floor_setup = &vc->floors[i]; floor_setup->floor_type = get_bits(gb, 16); av_dlog(NULL, " %d. floor type %d \n", i, floor_setup->floor_type); if (floor_setup->floor_type == 1) { int maximum_class = -1; unsigned rangebits, rangemax, floor1_values = 2; floor_setup->decode = vorbis_floor1_decode; floor_setup->data.t1.partitions = get_bits(gb, 5); av_dlog(NULL, " %d.floor: %d partitions \n", i, floor_setup->data.t1.partitions); for (j = 0; j < floor_setup->data.t1.partitions; ++j) { floor_setup->data.t1.partition_class[j] = get_bits(gb, 4); if (floor_setup->data.t1.partition_class[j] > maximum_class) maximum_class = floor_setup->data.t1.partition_class[j]; av_dlog(NULL, " %d. floor %d partition class %d \n", i, j, floor_setup->data.t1.partition_class[j]); av_dlog(NULL, " maximum class %d \n", maximum_class); for (j = 0; j <= maximum_class; ++j) { floor_setup->data.t1.class_dimensions[j] = get_bits(gb, 3) + 1; floor_setup->data.t1.class_subclasses[j] = get_bits(gb, 2); av_dlog(NULL, " %d floor %d class dim: %d subclasses %d \n", i, j, floor_setup->data.t1.class_dimensions[j], floor_setup->data.t1.class_subclasses[j]); if (floor_setup->data.t1.class_subclasses[j]) { GET_VALIDATED_INDEX(floor_setup->data.t1.class_masterbook[j], 8, vc->codebook_count) av_dlog(NULL, " masterbook: %d \n", floor_setup->data.t1.class_masterbook[j]); for (k = 0; k < (1 << floor_setup->data.t1.class_subclasses[j]); ++k) { int16_t bits = get_bits(gb, 8) - 1; if (bits != -1) VALIDATE_INDEX(bits, vc->codebook_count) floor_setup->data.t1.subclass_books[j][k] = bits; av_dlog(NULL, " book %d. : %d \n", k, floor_setup->data.t1.subclass_books[j][k]); floor_setup->data.t1.multiplier = get_bits(gb, 2) + 1; floor_setup->data.t1.x_list_dim = 2; for (j = 0; j < floor_setup->data.t1.partitions; ++j) floor_setup->data.t1.x_list_dim+=floor_setup->data.t1.class_dimensions[floor_setup->data.t1.partition_class[j]]; floor_setup->data.t1.list = av_mallocz(floor_setup->data.t1.x_list_dim * sizeof(*floor_setup->data.t1.list)); rangebits = get_bits(gb, 4); rangemax = (1 << rangebits); if (rangemax > vc->blocksize[1] / 2) { "Floor value is too large for blocksize: %u (%"PRIu32")\n", rangemax, vc->blocksize[1] / 2); floor_setup->data.t1.list[0].x = 0; floor_setup->data.t1.list[1].x = rangemax; for (j = 0; j < floor_setup->data.t1.partitions; ++j) { for (k = 0; k < floor_setup->data.t1.class_dimensions[floor_setup->data.t1.partition_class[j]]; ++k, ++floor1_values) { floor_setup->data.t1.list[floor1_values].x = get_bits(gb, rangebits); av_dlog(NULL, " %u. floor1 Y coord. %d\n", floor1_values, floor_setup->data.t1.list[floor1_values].x); // Precalculate order of x coordinates - needed for decode if (ff_vorbis_ready_floor1_list(vc->avccontext, floor_setup->data.t1.list, floor_setup->data.t1.x_list_dim)) { } else if (floor_setup->floor_type == 0) { unsigned max_codebook_dim = 0; floor_setup->decode = vorbis_floor0_decode; floor_setup->data.t0.order = get_bits(gb, 8); floor_setup->data.t0.rate = get_bits(gb, 16); floor_setup->data.t0.bark_map_size = get_bits(gb, 16); floor_setup->data.t0.amplitude_bits = get_bits(gb, 6); /* zero would result in a div by zero later * * 2^0 - 1 == 0 */ if (floor_setup->data.t0.amplitude_bits == 0) { "Floor 0 amplitude bits is 0.\n"); floor_setup->data.t0.amplitude_offset = get_bits(gb, 8); floor_setup->data.t0.num_books = get_bits(gb, 4) + 1; /* allocate mem for booklist */ floor_setup->data.t0.book_list = av_malloc(floor_setup->data.t0.num_books); if (!floor_setup->data.t0.book_list) return AVERROR(ENOMEM); /* read book indexes */ { int idx; unsigned book_idx; for (idx = 0; idx < floor_setup->data.t0.num_books; ++idx) { GET_VALIDATED_INDEX(book_idx, 8, vc->codebook_count) floor_setup->data.t0.book_list[idx] = book_idx; if (vc->codebooks[book_idx].dimensions > max_codebook_dim) max_codebook_dim = vc->codebooks[book_idx].dimensions; create_map(vc, i); /* codebook dim is for padding if codebook dim doesn't * * divide order+1 then we need to read more data */ floor_setup->data.t0.lsp = av_malloc((floor_setup->data.t0.order + 1 + max_codebook_dim) * sizeof(*floor_setup->data.t0.lsp)); if (!floor_setup->data.t0.lsp) return AVERROR(ENOMEM); /* debug output parsed headers */ av_dlog(NULL, "floor0 order: %u\n", floor_setup->data.t0.order); av_dlog(NULL, "floor0 rate: %u\n", floor_setup->data.t0.rate); av_dlog(NULL, "floor0 bark map size: %u\n", floor_setup->data.t0.bark_map_size); av_dlog(NULL, "floor0 amplitude bits: %u\n", floor_setup->data.t0.amplitude_bits); av_dlog(NULL, "floor0 amplitude offset: %u\n", floor_setup->data.t0.amplitude_offset); av_dlog(NULL, "floor0 number of books: %u\n", floor_setup->data.t0.num_books); av_dlog(NULL, "floor0 book list pointer: %p\n", floor_setup->data.t0.book_list); { int idx; for (idx = 0; idx < floor_setup->data.t0.num_books; ++idx) { av_dlog(NULL, " Book %d: %u\n", idx + 1, floor_setup->data.t0.book_list[idx]); } else { av_log(vc->avccontext, AV_LOG_ERROR, "Invalid floor type!\n"); return 0; | 14,463 |
1 | static void ppc_core99_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; const char *boot_device = machine->boot_order; PowerPCCPU *cpu = NULL; CPUPPCState *env = NULL; char *filename; qemu_irq *pic, **openpic_irqs; MemoryRegion *isa = g_new(MemoryRegion, 1); MemoryRegion *unin_memory = g_new(MemoryRegion, 1); MemoryRegion *unin2_memory = g_new(MemoryRegion, 1); int linux_boot, i, j, k; MemoryRegion *ram = g_new(MemoryRegion, 1), *bios = g_new(MemoryRegion, 1); hwaddr kernel_base, initrd_base, cmdline_base = 0; long kernel_size, initrd_size; PCIBus *pci_bus; PCIDevice *macio; MACIOIDEState *macio_ide; BusState *adb_bus; MacIONVRAMState *nvr; int bios_size, ndrv_size; uint8_t *ndrv_file; MemoryRegion *pic_mem, *escc_mem; MemoryRegion *escc_bar = g_new(MemoryRegion, 1); int ppc_boot_device; DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; void *fw_cfg; int machine_arch; SysBusDevice *s; DeviceState *dev; int *token = g_new(int, 1); hwaddr nvram_addr = 0xFFF04000; uint64_t tbfreq; linux_boot = (kernel_filename != NULL); /* init CPUs */ if (machine->cpu_model == NULL) { #ifdef TARGET_PPC64 machine->cpu_model = "970fx"; #else machine->cpu_model = "G4"; #endif } for (i = 0; i < smp_cpus; i++) { cpu = POWERPC_CPU(cpu_generic_init(TYPE_POWERPC_CPU, machine->cpu_model)); if (cpu == NULL) { fprintf(stderr, "Unable to find PowerPC CPU definition\n"); exit(1); } env = &cpu->env; /* Set time-base frequency to 100 Mhz */ cpu_ppc_tb_init(env, TBFREQ); qemu_register_reset(ppc_core99_reset, cpu); } /* allocate RAM */ memory_region_allocate_system_memory(ram, NULL, "ppc_core99.ram", ram_size); memory_region_add_subregion(get_system_memory(), 0, ram); /* allocate and load BIOS */ memory_region_init_ram(bios, NULL, "ppc_core99.bios", BIOS_SIZE, &error_fatal); if (bios_name == NULL) bios_name = PROM_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); memory_region_set_readonly(bios, true); memory_region_add_subregion(get_system_memory(), PROM_ADDR, bios); /* Load OpenBIOS (ELF) */ if (filename) { bios_size = load_elf(filename, NULL, NULL, NULL, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); g_free(filename); } else { bios_size = -1; } if (bios_size < 0 || bios_size > BIOS_SIZE) { error_report("could not load PowerPC bios '%s'", bios_name); exit(1); } if (linux_boot) { uint64_t lowaddr = 0; int bswap_needed; #ifdef BSWAP_NEEDED bswap_needed = 1; #else bswap_needed = 0; #endif kernel_base = KERNEL_LOAD_ADDR; kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, 0, 0); if (kernel_size < 0) kernel_size = load_aout(kernel_filename, kernel_base, ram_size - kernel_base, bswap_needed, TARGET_PAGE_SIZE); if (kernel_size < 0) kernel_size = load_image_targphys(kernel_filename, kernel_base, ram_size - kernel_base); if (kernel_size < 0) { error_report("could not load kernel '%s'", kernel_filename); exit(1); } /* load initrd */ if (initrd_filename) { initrd_base = round_page(kernel_base + kernel_size + KERNEL_GAP); initrd_size = load_image_targphys(initrd_filename, initrd_base, ram_size - initrd_base); if (initrd_size < 0) { error_report("could not load initial ram disk '%s'", initrd_filename); exit(1); } cmdline_base = round_page(initrd_base + initrd_size); } else { initrd_base = 0; initrd_size = 0; cmdline_base = round_page(kernel_base + kernel_size + KERNEL_GAP); } ppc_boot_device = 'm'; } else { kernel_base = 0; kernel_size = 0; initrd_base = 0; initrd_size = 0; ppc_boot_device = '\0'; /* We consider that NewWorld PowerMac never have any floppy drive * For now, OHW cannot boot from the network. */ for (i = 0; boot_device[i] != '\0'; i++) { if (boot_device[i] >= 'c' && boot_device[i] <= 'f') { ppc_boot_device = boot_device[i]; break; } } if (ppc_boot_device == '\0') { fprintf(stderr, "No valid boot device for Mac99 machine\n"); exit(1); } } /* Register 8 MB of ISA IO space */ memory_region_init_alias(isa, NULL, "isa_mmio", get_system_io(), 0, 0x00800000); memory_region_add_subregion(get_system_memory(), 0xf2000000, isa); /* UniN init: XXX should be a real device */ memory_region_init_io(unin_memory, NULL, &unin_ops, token, "unin", 0x1000); memory_region_add_subregion(get_system_memory(), 0xf8000000, unin_memory); memory_region_init_io(unin2_memory, NULL, &unin_ops, token, "unin", 0x1000); memory_region_add_subregion(get_system_memory(), 0xf3000000, unin2_memory); openpic_irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *)); openpic_irqs[0] = g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB); for (i = 0; i < smp_cpus; i++) { /* Mac99 IRQ connection between OpenPIC outputs pins * and PowerPC input pins */ switch (PPC_INPUT(env)) { case PPC_FLAGS_INPUT_6xx: openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB); openpic_irqs[i][OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_MCK] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_MCP]; /* Not connected ? */ openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i][OPENPIC_OUTPUT_RESET] = ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_HRESET]; break; #if defined(TARGET_PPC64) case PPC_FLAGS_INPUT_970: openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB); openpic_irqs[i][OPENPIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT]; openpic_irqs[i][OPENPIC_OUTPUT_MCK] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_MCP]; /* Not connected ? */ openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL; /* Check this */ openpic_irqs[i][OPENPIC_OUTPUT_RESET] = ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_HRESET]; break; #endif /* defined(TARGET_PPC64) */ default: error_report("Bus model not supported on mac99 machine"); exit(1); } } pic = g_new0(qemu_irq, 64); dev = qdev_create(NULL, TYPE_OPENPIC); qdev_prop_set_uint32(dev, "model", OPENPIC_MODEL_RAVEN); qdev_init_nofail(dev); s = SYS_BUS_DEVICE(dev); pic_mem = s->mmio[0].memory; k = 0; for (i = 0; i < smp_cpus; i++) { for (j = 0; j < OPENPIC_OUTPUT_NB; j++) { sysbus_connect_irq(s, k++, openpic_irqs[i][j]); } } for (i = 0; i < 64; i++) { pic[i] = qdev_get_gpio_in(dev, i); } if (PPC_INPUT(env) == PPC_FLAGS_INPUT_970) { /* 970 gets a U3 bus */ pci_bus = pci_pmac_u3_init(pic, get_system_memory(), get_system_io()); machine_arch = ARCH_MAC99_U3; } else { pci_bus = pci_pmac_init(pic, get_system_memory(), get_system_io()); machine_arch = ARCH_MAC99; } object_property_set_bool(OBJECT(pci_bus), true, "realized", &error_abort); machine->usb |= defaults_enabled() && !machine->usb_disabled; /* Timebase Frequency */ if (kvm_enabled()) { tbfreq = kvmppc_get_tbfreq(); } else { tbfreq = TBFREQ; } /* init basic PC hardware */ escc_mem = escc_init(0, pic[0x25], pic[0x24], serial_hds[0], serial_hds[1], ESCC_CLOCK, 4); memory_region_init_alias(escc_bar, NULL, "escc-bar", escc_mem, 0, memory_region_size(escc_mem)); macio = pci_create(pci_bus, -1, TYPE_NEWWORLD_MACIO); dev = DEVICE(macio); qdev_connect_gpio_out(dev, 0, pic[0x19]); /* CUDA */ qdev_connect_gpio_out(dev, 1, pic[0x0d]); /* IDE */ qdev_connect_gpio_out(dev, 2, pic[0x02]); /* IDE DMA */ qdev_connect_gpio_out(dev, 3, pic[0x0e]); /* IDE */ qdev_connect_gpio_out(dev, 4, pic[0x03]); /* IDE DMA */ qdev_prop_set_uint64(dev, "frequency", tbfreq); macio_init(macio, pic_mem, escc_bar); /* We only emulate 2 out of 3 IDE controllers for now */ ide_drive_get(hd, ARRAY_SIZE(hd)); macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), "ide[0]")); macio_ide_init_drives(macio_ide, hd); macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio), "ide[1]")); macio_ide_init_drives(macio_ide, &hd[MAX_IDE_DEVS]); dev = DEVICE(object_resolve_path_component(OBJECT(macio), "cuda")); adb_bus = qdev_get_child_bus(dev, "adb.0"); dev = qdev_create(adb_bus, TYPE_ADB_KEYBOARD); qdev_init_nofail(dev); dev = qdev_create(adb_bus, TYPE_ADB_MOUSE); qdev_init_nofail(dev); if (machine->usb) { pci_create_simple(pci_bus, -1, "pci-ohci"); /* U3 needs to use USB for input because Linux doesn't support via-cuda on PPC64 */ if (machine_arch == ARCH_MAC99_U3) { USBBus *usb_bus = usb_bus_find(-1); usb_create_simple(usb_bus, "usb-kbd"); usb_create_simple(usb_bus, "usb-mouse"); } } pci_vga_init(pci_bus); if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) { graphic_depth = 15; } for (i = 0; i < nb_nics; i++) { pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL); } /* The NewWorld NVRAM is not located in the MacIO device */ #ifdef CONFIG_KVM if (kvm_enabled() && getpagesize() > 4096) { /* We can't combine read-write and read-only in a single page, so move the NVRAM out of ROM again for KVM */ nvram_addr = 0xFFE00000; } #endif dev = qdev_create(NULL, TYPE_MACIO_NVRAM); qdev_prop_set_uint32(dev, "size", 0x2000); qdev_prop_set_uint32(dev, "it_shift", 1); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, nvram_addr); nvr = MACIO_NVRAM(dev); pmac_format_nvram_partition(nvr, 0x2000); /* No PCI init: the BIOS will do it */ fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2); fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); if (kernel_cmdline) { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, cmdline_base); pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline); } else { fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0); } fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height); fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled()); if (kvm_enabled()) { #ifdef CONFIG_KVM uint8_t *hypercall; hypercall = g_malloc(16); kvmppc_get_hypercall(env, hypercall, 16); fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid()); #endif } fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq); /* Mac OS X requires a "known good" clock-frequency value; pass it one. */ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_CLOCKFREQ, CLOCKFREQ); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ); fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_NVRAM_ADDR, nvram_addr); /* MacOS NDRV VGA driver */ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, NDRV_VGA_FILENAME); if (filename) { ndrv_size = get_image_size(filename); if (ndrv_size != -1) { ndrv_file = g_malloc(ndrv_size); ndrv_size = load_image(filename, ndrv_file); fw_cfg_add_file(fw_cfg, "ndrv/qemu_vga.ndrv", ndrv_file, ndrv_size); } g_free(filename); } qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); } | 14,465 |
1 | static void test_qemu_strtoull_empty(void) { const char *str = ""; char f = 'X'; const char *endptr = &f; uint64_t res = 999; int err; err = qemu_strtoull(str, &endptr, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0); g_assert(endptr == str); } | 14,466 |
1 | void pcmcia_socket_unregister(PCMCIASocket *socket) { struct pcmcia_socket_entry_s *entry, **ptr; ptr = &pcmcia_sockets; for (entry = *ptr; entry; ptr = &entry->next, entry = *ptr) if (entry->socket == socket) { *ptr = entry->next; g_free(entry); } } | 14,467 |
1 | static ssize_t qsb_get_iovec(const QEMUSizedBuffer *qsb, off_t pos, off_t *d_off) { ssize_t i; off_t curr = 0; if (pos > qsb->used) { return -1; } for (i = 0; i < qsb->n_iov; i++) { if (curr + qsb->iov[i].iov_len > pos) { *d_off = pos - curr; return i; } curr += qsb->iov[i].iov_len; } return -1; } | 14,468 |
1 | static gboolean fd_chr_read(GIOChannel *chan, GIOCondition cond, void *opaque) { CharDriverState *chr = opaque; FDCharDriver *s = chr->opaque; int len; uint8_t buf[READ_BUF_LEN]; GIOStatus status; gsize bytes_read; len = sizeof(buf); if (len > s->max_size) { len = s->max_size; } if (len == 0) { return TRUE; } status = g_io_channel_read_chars(chan, (gchar *)buf, len, &bytes_read, NULL); if (status == G_IO_STATUS_EOF) { if (s->fd_in_tag) { g_source_remove(s->fd_in_tag); s->fd_in_tag = 0; } qemu_chr_be_event(chr, CHR_EVENT_CLOSED); return FALSE; } if (status == G_IO_STATUS_NORMAL) { qemu_chr_be_write(chr, buf, bytes_read); } return TRUE; } | 14,470 |
1 | void hmp_drive_add_node(Monitor *mon, const char *optstr) { QemuOpts *opts; QDict *qdict; Error *local_err = NULL; opts = qemu_opts_parse_noisily(&qemu_drive_opts, optstr, false); if (!opts) { return; } qdict = qemu_opts_to_qdict(opts, NULL); if (!qdict_get_try_str(qdict, "node-name")) { error_report("'node-name' needs to be specified"); goto out; } BlockDriverState *bs = bds_tree_init(qdict, &local_err); if (!bs) { error_report_err(local_err); goto out; } QTAILQ_INSERT_TAIL(&monitor_bdrv_states, bs, monitor_list); out: qemu_opts_del(opts); } | 14,471 |
0 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) { #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp, *next; QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { if (bp->flags & mask) { cpu_breakpoint_remove_by_ref(cpu, bp); } } #endif } | 14,473 |
0 | solisten(port, laddr, lport, flags) u_int port; u_int32_t laddr; u_int lport; int flags; { struct sockaddr_in addr; struct socket *so; int s, addrlen = sizeof(addr), opt = 1; DEBUG_CALL("solisten"); DEBUG_ARG("port = %d", port); DEBUG_ARG("laddr = %x", laddr); DEBUG_ARG("lport = %d", lport); DEBUG_ARG("flags = %x", flags); if ((so = socreate()) == NULL) { /* free(so); Not sofree() ??? free(NULL) == NOP */ return NULL; } /* Don't tcp_attach... we don't need so_snd nor so_rcv */ if ((so->so_tcpcb = tcp_newtcpcb(so)) == NULL) { free(so); return NULL; } insque(so,&tcb); /* * SS_FACCEPTONCE sockets must time out. */ if (flags & SS_FACCEPTONCE) so->so_tcpcb->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT*2; so->so_state = (SS_FACCEPTCONN|flags); so->so_lport = lport; /* Kept in network format */ so->so_laddr.s_addr = laddr; /* Ditto */ addr.sin_family = AF_INET; addr.sin_addr.s_addr = INADDR_ANY; addr.sin_port = port; if (((s = socket(AF_INET,SOCK_STREAM,0)) < 0) || (setsockopt(s,SOL_SOCKET,SO_REUSEADDR,(char *)&opt,sizeof(int)) < 0) || (bind(s,(struct sockaddr *)&addr, sizeof(addr)) < 0) || (listen(s,1) < 0)) { int tmperrno = errno; /* Don't clobber the real reason we failed */ close(s); sofree(so); /* Restore the real errno */ #ifdef _WIN32 WSASetLastError(tmperrno); #else errno = tmperrno; #endif return NULL; } setsockopt(s,SOL_SOCKET,SO_OOBINLINE,(char *)&opt,sizeof(int)); getsockname(s,(struct sockaddr *)&addr,&addrlen); so->so_fport = addr.sin_port; if (addr.sin_addr.s_addr == 0 || addr.sin_addr.s_addr == loopback_addr.s_addr) so->so_faddr = alias_addr; else so->so_faddr = addr.sin_addr; so->s = s; return so; } | 14,474 |
0 | static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; qemu_tcg_init_cpu_signals(); qemu_thread_get_self(cpu->thread); qemu_mutex_lock(&qemu_global_mutex); CPU_FOREACH(cpu) { cpu->thread_id = qemu_get_thread_id(); cpu->created = true; cpu->exception_index = -1; cpu->can_do_io = 1; } qemu_cond_signal(&qemu_cpu_cond); /* wait for initial kick-off after machine start */ while (QTAILQ_FIRST(&cpus)->stopped) { qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); /* process any pending work */ CPU_FOREACH(cpu) { qemu_wait_io_event_common(cpu); } } while (1) { tcg_exec_all(); if (use_icount) { int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); if (deadline == 0) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } } qemu_tcg_wait_io_event(); } return NULL; } | 14,475 |
0 | int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) { int ret; AVPacket user_pkt = *avpkt; *got_packet_ptr = 0; if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { av_free_packet(avpkt); av_init_packet(avpkt); avpkt->size = 0; return 0; } if (av_image_check_size(avctx->width, avctx->height, 0, avctx)) return AVERROR(EINVAL); av_assert0(avctx->codec->encode2); ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); av_assert0(ret <= 0); if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) { if (user_pkt.data) { if (user_pkt.size >= avpkt->size) { memcpy(user_pkt.data, avpkt->data, avpkt->size); } else { av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size); avpkt->size = user_pkt.size; ret = -1; } avpkt->data = user_pkt.data; avpkt->destruct = user_pkt.destruct; } else { if (av_dup_packet(avpkt) < 0) { ret = AVERROR(ENOMEM); } } } if (!ret) { if (!*got_packet_ptr) avpkt->size = 0; else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) avpkt->pts = avpkt->dts = frame->pts; if (!user_pkt.data && avpkt->data && avpkt->destruct == av_destruct_packet) { uint8_t *new_data = av_realloc(avpkt->data, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); if (new_data) avpkt->data = new_data; } avctx->frame_number++; } if (ret < 0 || !*got_packet_ptr) av_free_packet(avpkt); emms_c(); return ret; } | 14,477 |
0 | static int net_rx_ok(void *opaque) { struct XenNetDev *netdev = opaque; RING_IDX rc, rp; if (netdev->xendev.be_state != XenbusStateConnected) return 0; rc = netdev->rx_ring.req_cons; rp = netdev->rx_ring.sring->req_prod; xen_rmb(); if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) { xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n", __FUNCTION__, rc, rp); return 0; } return 1; } | 14,478 |
0 | uint64_t migrate_max_downtime(void) { return max_downtime; } | 14,479 |
0 | static void tc6393xb_gpio_set(void *opaque, int line, int level) { // TC6393xbState *s = opaque; if (line > TC6393XB_GPIOS) { printf("%s: No GPIO pin %i\n", __FUNCTION__, line); return; } // FIXME: how does the chip reflect the GPIO input level change? } | 14,480 |
0 | static void ne2000_receive(void *opaque, const uint8_t *buf, int size) { NE2000State *s = opaque; uint8_t *p; int total_len, next, avail, len, index, mcast_idx; uint8_t buf1[60]; static const uint8_t broadcast_macaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #if defined(DEBUG_NE2000) printf("NE2000: received len=%d\n", size); #endif if (s->cmd & E8390_STOP || ne2000_buffer_full(s)) return; /* XXX: check this */ if (s->rxcr & 0x10) { /* promiscuous: receive all */ } else { if (!memcmp(buf, broadcast_macaddr, 6)) { /* broadcast address */ if (!(s->rxcr & 0x04)) return; } else if (buf[0] & 0x01) { /* multicast */ if (!(s->rxcr & 0x08)) return; mcast_idx = compute_mcast_idx(buf); if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7)))) return; } else if (s->mem[0] == buf[0] && s->mem[2] == buf[1] && s->mem[4] == buf[2] && s->mem[6] == buf[3] && s->mem[8] == buf[4] && s->mem[10] == buf[5]) { /* match */ } else { return; } } /* if too small buffer, then expand it */ if (size < MIN_BUF_SIZE) { memcpy(buf1, buf, size); memset(buf1 + size, 0, MIN_BUF_SIZE - size); buf = buf1; size = MIN_BUF_SIZE; } index = s->curpag << 8; /* 4 bytes for header */ total_len = size + 4; /* address for next packet (4 bytes for CRC) */ next = index + ((total_len + 4 + 255) & ~0xff); if (next >= s->stop) next -= (s->stop - s->start); /* prepare packet header */ p = s->mem + index; s->rsr = ENRSR_RXOK; /* receive status */ /* XXX: check this */ if (buf[0] & 0x01) s->rsr |= ENRSR_PHY; p[0] = s->rsr; p[1] = next >> 8; p[2] = total_len; p[3] = total_len >> 8; index += 4; /* write packet data */ while (size > 0) { avail = s->stop - index; len = size; if (len > avail) len = avail; memcpy(s->mem + index, buf, len); buf += len; index += len; if (index == s->stop) index = s->start; size -= len; } s->curpag = next >> 8; /* now we can signal we have received something */ s->isr |= ENISR_RX; ne2000_update_irq(s); } | 14,481 |
0 | static TCGv gen_muls_i64_i32(TCGv a, TCGv b) { TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64); TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64); tcg_gen_ext_i32_i64(tmp1, a); dead_tmp(a); tcg_gen_ext_i32_i64(tmp2, b); dead_tmp(b); tcg_gen_mul_i64(tmp1, tmp1, tmp2); return tmp1; } | 14,482 |
0 | void runstate_set(RunState new_state) { if (new_state >= RUN_STATE_MAX || !runstate_valid_transitions[current_run_state][new_state]) { fprintf(stderr, "invalid runstate transition\n"); abort(); } current_run_state = new_state; } | 14,483 |
0 | static int kvmppc_put_books_sregs(PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; struct kvm_sregs sregs; int i; sregs.pvr = env->spr[SPR_PVR]; sregs.u.s.sdr1 = env->spr[SPR_SDR1]; /* Sync SLB */ #ifdef TARGET_PPC64 for (i = 0; i < ARRAY_SIZE(env->slb); i++) { sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid; if (env->slb[i].esid & SLB_ESID_V) { sregs.u.s.ppc64.slb[i].slbe |= i; } sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid; } #endif /* Sync SRs */ for (i = 0; i < 16; i++) { sregs.u.s.ppc32.sr[i] = env->sr[i]; } /* Sync BATs */ for (i = 0; i < 8; i++) { /* Beware. We have to swap upper and lower bits here */ sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32) | env->DBAT[1][i]; sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32) | env->IBAT[1][i]; } return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); } | 14,484 |
0 | int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size) { long len; if (fd < 0) { return -1; } len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(sPAPRTCE); if ((munmap(table, len) < 0) || (close(fd) < 0)) { fprintf(stderr, "KVM: Unexpected error removing TCE table: %s", strerror(errno)); /* Leak the table */ } return 0; } | 14,485 |
0 | static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc) { int insn_len = 2; int i; if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) { tcg_gen_debug_insn_start(dc->pc); } /* Load a halfword onto the instruction register. */ dc->ir = cris_fetch(env, dc, dc->pc, 2, 0); /* Now decode it. */ dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11); dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3); dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15); dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4); dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5); dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10); /* Large switch for all insns. */ for (i = 0; i < ARRAY_SIZE(decinfo); i++) { if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) { insn_len = decinfo[i].dec(env, dc); break; } } #if !defined(CONFIG_USER_ONLY) /* Single-stepping ? */ if (dc->tb_flags & S_FLAG) { int l1; l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1); /* We treat SPC as a break with an odd trap vector. */ cris_evaluate_flags(dc); t_gen_mov_env_TN(trap_vector, tcg_const_tl(3)); tcg_gen_movi_tl(env_pc, dc->pc + insn_len); tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len); t_gen_raise_exception(EXCP_BREAK); gen_set_label(l1); } #endif return insn_len; } | 14,486 |
0 | static int64_t coroutine_fn cow_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *num_same) { BDRVCowState *s = bs->opaque; int ret = cow_co_is_allocated(bs, sector_num, nb_sectors, num_same); int64_t offset = s->cow_sectors_offset + (sector_num << BDRV_SECTOR_BITS); if (ret < 0) { return ret; } return (ret ? BDRV_BLOCK_DATA : 0) | offset | BDRV_BLOCK_OFFSET_VALID; } | 14,487 |
0 | static int rle_unpack(unsigned char *src, unsigned char *dest, int len) { unsigned char *ps; unsigned char *pd; int i, l; ps = src; pd = dest; if (len & 1) *pd++ = *ps++; len >>= 1; i = 0; do { l = *ps++; if (l & 0x80) { l = (l & 0x7F) * 2; memcpy(pd, ps, l); ps += l; pd += l; } else { for (i = 0; i < l; i++) { *pd++ = ps[0]; *pd++ = ps[1]; } ps += 2; } i += l; } while (i < len); return (ps - src); } | 14,488 |
0 | void scsi_bus_legacy_handle_cmdline(SCSIBus *bus, Error **errp) { Location loc; DriveInfo *dinfo; int unit; Error *err = NULL; loc_push_none(&loc); for (unit = 0; unit <= bus->info->max_target; unit++) { dinfo = drive_get(IF_SCSI, bus->busnr, unit); if (dinfo == NULL) { continue; } qemu_opts_loc_restore(dinfo->opts); scsi_bus_legacy_add_drive(bus, blk_bs(blk_by_legacy_dinfo(dinfo)), unit, false, -1, NULL, &err); if (err != NULL) { error_report("%s", error_get_pretty(err)); error_propagate(errp, err); break; } } loc_pop(&loc); } | 14,489 |
0 | static void netfilter_print_info(Monitor *mon, NetFilterState *nf) { char *str; ObjectProperty *prop; ObjectPropertyIterator iter; StringOutputVisitor *ov; /* generate info str */ object_property_iter_init(&iter, OBJECT(nf)); while ((prop = object_property_iter_next(&iter))) { if (!strcmp(prop->name, "type")) { continue; } ov = string_output_visitor_new(false); object_property_get(OBJECT(nf), string_output_get_visitor(ov), prop->name, NULL); str = string_output_get_string(ov); visit_free(string_output_get_visitor(ov)); monitor_printf(mon, ",%s=%s", prop->name, str); g_free(str); } monitor_printf(mon, "\n"); } | 14,490 |
0 | static int restore_user_regs(CPUPPCState *env, struct target_mcontext *frame, int sig) { target_ulong save_r2 = 0; target_ulong msr; target_ulong ccr; int i; if (!sig) { save_r2 = env->gpr[2]; } /* Restore general registers. */ for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { if (__get_user(env->gpr[i], &frame->mc_gregs[i])) { return 1; } } if (__get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]) || __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]) || __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]) || __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER])) return 1; if (__get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR])) return 1; for (i = 0; i < ARRAY_SIZE(env->crf); i++) { env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; } if (!sig) { env->gpr[2] = save_r2; } /* Restore MSR. */ if (__get_user(msr, &frame->mc_gregs[TARGET_PT_MSR])) return 1; /* If doing signal return, restore the previous little-endian mode. */ if (sig) env->msr = (env->msr & ~MSR_LE) | (msr & MSR_LE); /* Restore Altivec registers if necessary. */ if (env->insns_flags & PPC_ALTIVEC) { for (i = 0; i < ARRAY_SIZE(env->avr); i++) { ppc_avr_t *avr = &env->avr[i]; ppc_avr_t *vreg = &frame->mc_vregs.altivec[i]; if (__get_user(avr->u64[0], &vreg->u64[0]) || __get_user(avr->u64[1], &vreg->u64[1])) { return 1; } } /* Set MSR_VEC in the saved MSR value to indicate that frame->mc_vregs contains valid data. */ if (__get_user(env->spr[SPR_VRSAVE], (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]))) return 1; } /* Restore floating point registers. */ if (env->insns_flags & PPC_FLOAT) { uint64_t fpscr; for (i = 0; i < ARRAY_SIZE(env->fpr); i++) { if (__get_user(env->fpr[i], &frame->mc_fregs[i])) { return 1; } } if (__get_user(fpscr, &frame->mc_fregs[32])) return 1; env->fpscr = (uint32_t) fpscr; } /* Save SPE registers. The kernel only saves the high half. */ if (env->insns_flags & PPC_SPE) { #if defined(TARGET_PPC64) for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { uint32_t hi; if (__get_user(hi, &frame->mc_vregs.spe[i])) { return 1; } env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]); } #else for (i = 0; i < ARRAY_SIZE(env->gprh); i++) { if (__get_user(env->gprh[i], &frame->mc_vregs.spe[i])) { return 1; } } #endif if (__get_user(env->spe_fscr, &frame->mc_vregs.spe[32])) return 1; } return 0; } | 14,491 |
0 | static void virtio_pci_config_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { VirtIOPCIProxy *proxy = opaque; uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev); if (addr < config) { virtio_ioport_write(proxy, addr, val); return; } addr -= config; /* * Virtio-PCI is odd. Ioports are LE but config space is target native * endian. */ switch (size) { case 1: virtio_config_writeb(proxy->vdev, addr, val); break; case 2: if (virtio_is_big_endian()) { val = bswap16(val); } virtio_config_writew(proxy->vdev, addr, val); break; case 4: if (virtio_is_big_endian()) { val = bswap32(val); } virtio_config_writel(proxy->vdev, addr, val); break; } } | 14,492 |
0 | int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) { BDRVQcowState *s = bs->opaque; int i, j = 0, l2_index, ret; uint64_t *old_cluster, start_sect, l2_offset, *l2_table; uint64_t cluster_offset = m->cluster_offset; bool cow = false; trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); if (m->nb_clusters == 0) return 0; old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t)); /* copy content of unmodified sectors */ start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9; if (m->n_start) { cow = true; qemu_co_mutex_unlock(&s->lock); ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start); qemu_co_mutex_lock(&s->lock); if (ret < 0) goto err; } if (m->nb_available & (s->cluster_sectors - 1)) { uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1); cow = true; qemu_co_mutex_unlock(&s->lock); ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9), m->nb_available - end, s->cluster_sectors); qemu_co_mutex_lock(&s->lock); if (ret < 0) goto err; } /* * Update L2 table. * * Before we update the L2 table to actually point to the new cluster, we * need to be sure that the refcounts have been increased and COW was * handled. */ if (cow) { qcow2_cache_depends_on_flush(s->l2_table_cache); } qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); ret = get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index); if (ret < 0) { goto err; } qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); for (i = 0; i < m->nb_clusters; i++) { /* if two concurrent writes happen to the same unallocated cluster * each write allocates separate cluster and writes data concurrently. * The first one to complete updates l2 table with pointer to its * cluster the second one has to do RMW (which is done above by * copy_sectors()), update l2 table with its cluster pointer and free * old cluster. This is what this loop does */ if(l2_table[l2_index + i] != 0) old_cluster[j++] = l2_table[l2_index + i]; l2_table[l2_index + i] = cpu_to_be64((cluster_offset + (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); } ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); if (ret < 0) { goto err; } /* * If this was a COW, we need to decrease the refcount of the old cluster. * Also flush bs->file to get the right order for L2 and refcount update. */ if (j != 0) { for (i = 0; i < j; i++) { qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1); } } ret = 0; err: g_free(old_cluster); return ret; } | 14,493 |
0 | MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, uint8_t *buf, int len, hwaddr addr1, hwaddr l, MemoryRegion *mr) { uint8_t *ptr; uint64_t val; MemTxResult result = MEMTX_OK; bool release_lock = false; for (;;) { if (!memory_access_is_direct(mr, false)) { /* I/O case */ release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); switch (l) { case 8: /* 64 bit read access */ result |= memory_region_dispatch_read(mr, addr1, &val, 8, attrs); stq_p(buf, val); break; case 4: /* 32 bit read access */ result |= memory_region_dispatch_read(mr, addr1, &val, 4, attrs); stl_p(buf, val); break; case 2: /* 16 bit read access */ result |= memory_region_dispatch_read(mr, addr1, &val, 2, attrs); stw_p(buf, val); break; case 1: /* 8 bit read access */ result |= memory_region_dispatch_read(mr, addr1, &val, 1, attrs); stb_p(buf, val); break; default: abort(); } } else { /* RAM case */ ptr = qemu_map_ram_ptr(mr->ram_block, addr1); memcpy(buf, ptr, l); } if (release_lock) { qemu_mutex_unlock_iothread(); release_lock = false; } len -= l; buf += l; addr += l; if (!len) { break; } l = len; mr = address_space_translate(as, addr, &addr1, &l, false); } return result; } | 14,494 |
0 | FsTypeEntry *get_fsdev_fsentry(char *id) { struct FsTypeListEntry *fsle; QTAILQ_FOREACH(fsle, &fstype_entries, next) { if (strcmp(fsle->fse.fsdev_id, id) == 0) { return &fsle->fse; } } return NULL; } | 14,495 |
0 | bool eth_parse_ipv6_hdr(struct iovec *pkt, int pkt_frags, size_t ip6hdr_off, uint8_t *l4proto, size_t *full_hdr_len) { struct ip6_header ip6_hdr; struct ip6_ext_hdr ext_hdr; size_t bytes_read; bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off, &ip6_hdr, sizeof(ip6_hdr)); if (bytes_read < sizeof(ip6_hdr)) { return false; } *full_hdr_len = sizeof(struct ip6_header); if (!eth_is_ip6_extension_header_type(ip6_hdr.ip6_nxt)) { *l4proto = ip6_hdr.ip6_nxt; return true; } do { bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off + *full_hdr_len, &ext_hdr, sizeof(ext_hdr)); *full_hdr_len += (ext_hdr.ip6r_len + 1) * IP6_EXT_GRANULARITY; } while (eth_is_ip6_extension_header_type(ext_hdr.ip6r_nxt)); *l4proto = ext_hdr.ip6r_nxt; return true; } | 14,497 |
0 | static void kvmppc_timer_hack(void *opaque) { qemu_service_io(); qemu_mod_timer(kvmppc_timer, qemu_get_clock_ns(vm_clock) + kvmppc_timer_rate); } | 14,498 |
0 | static void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUPPCState *env) { struct target_rt_sigframe *rt_sf; struct target_mcontext *frame; target_ulong rt_sf_addr, newsp = 0; int i, err = 0; int signal; rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf)); if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1)) goto sigsegv; signal = current_exec_domain_sig(sig); copy_siginfo_to_user(&rt_sf->info, info); __put_user(0, &rt_sf->uc.tuc_flags); __put_user(0, &rt_sf->uc.tuc_link); __put_user((target_ulong)target_sigaltstack_used.ss_sp, &rt_sf->uc.tuc_stack.ss_sp); __put_user(sas_ss_flags(env->gpr[1]), &rt_sf->uc.tuc_stack.ss_flags); __put_user(target_sigaltstack_used.ss_size, &rt_sf->uc.tuc_stack.ss_size); __put_user(h2g (&rt_sf->uc.tuc_mcontext), &rt_sf->uc.tuc_regs); for(i = 0; i < TARGET_NSIG_WORDS; i++) { __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]); } frame = &rt_sf->uc.tuc_mcontext; err |= save_user_regs(env, frame, TARGET_NR_rt_sigreturn); /* The kernel checks for the presence of a VDSO here. We don't emulate a vdso, so use a sigreturn system call. */ env->lr = (target_ulong) h2g(frame->tramp); /* Turn off all fp exceptions. */ env->fpscr = 0; /* Create a stack frame for the caller of the handler. */ newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16); __put_user(env->gpr[1], (target_ulong *)(uintptr_t) newsp); if (err) goto sigsegv; /* Set up registers for signal handler. */ env->gpr[1] = newsp; env->gpr[3] = (target_ulong) signal; env->gpr[4] = (target_ulong) h2g(&rt_sf->info); env->gpr[5] = (target_ulong) h2g(&rt_sf->uc); env->gpr[6] = (target_ulong) h2g(rt_sf); env->nip = (target_ulong) ka->_sa_handler; /* Signal handlers are entered in big-endian mode. */ env->msr &= ~MSR_LE; unlock_user_struct(rt_sf, rt_sf_addr, 1); return; sigsegv: unlock_user_struct(rt_sf, rt_sf_addr, 1); qemu_log("segfaulting from setup_rt_frame\n"); force_sig(TARGET_SIGSEGV); } | 14,500 |
0 | static void gen_test_cc(int cc, int label) { TCGv tmp; TCGv tmp2; int inv; switch (cc) { case 0: /* eq: Z */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 1: /* ne: !Z */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); break; case 2: /* cs: C */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); break; case 3: /* cc: !C */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 4: /* mi: N */ tmp = load_cpu_field(NF); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 5: /* pl: !N */ tmp = load_cpu_field(NF); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 6: /* vs: V */ tmp = load_cpu_field(VF); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 7: /* vc: !V */ tmp = load_cpu_field(VF); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 8: /* hi: C && !Z */ inv = gen_new_label(); tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); dead_tmp(tmp); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label); gen_set_label(inv); break; case 9: /* ls: !C || Z */ tmp = load_cpu_field(CF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); dead_tmp(tmp); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); break; case 10: /* ge: N == V -> N ^ V == 0 */ tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); break; case 11: /* lt: N != V -> N ^ V != 0 */ tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; case 12: /* gt: !Z && N == V */ inv = gen_new_label(); tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv); dead_tmp(tmp); tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label); gen_set_label(inv); break; case 13: /* le: Z || N != V */ tmp = load_cpu_field(ZF); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); dead_tmp(tmp); tmp = load_cpu_field(VF); tmp2 = load_cpu_field(NF); tcg_gen_xor_i32(tmp, tmp, tmp2); dead_tmp(tmp2); tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label); break; default: fprintf(stderr, "Bad condition code 0x%x\n", cc); abort(); } dead_tmp(tmp); } | 14,501 |
0 | static int kvm_put_xcrs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_xcrs xcrs = {}; if (!kvm_has_xcrs()) { return 0; } xcrs.nr_xcrs = 1; xcrs.flags = 0; xcrs.xcrs[0].xcr = 0; xcrs.xcrs[0].value = env->xcr0; return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs); } | 14,502 |
0 | START_TEST(qint_destroy_test) { QInt *qi = qint_from_int(0); QDECREF(qi); } | 14,503 |
0 | static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, Error **errp) { bool is_listen = qemu_opt_get_bool(opts, "server", false); bool is_waitconnect = is_listen && qemu_opt_get_bool(opts, "wait", true); bool is_telnet = qemu_opt_get_bool(opts, "telnet", false); bool is_tn3270 = qemu_opt_get_bool(opts, "tn3270", false); bool do_nodelay = !qemu_opt_get_bool(opts, "delay", true); int64_t reconnect = qemu_opt_get_number(opts, "reconnect", 0); const char *path = qemu_opt_get(opts, "path"); const char *host = qemu_opt_get(opts, "host"); const char *port = qemu_opt_get(opts, "port"); const char *tls_creds = qemu_opt_get(opts, "tls-creds"); SocketAddress *addr; ChardevSocket *sock; backend->type = CHARDEV_BACKEND_KIND_SOCKET; if (!path) { if (!host) { error_setg(errp, "chardev: socket: no host given"); return; } if (!port) { error_setg(errp, "chardev: socket: no port given"); return; } } else { if (tls_creds) { error_setg(errp, "TLS can only be used over TCP socket"); return; } } sock = backend->u.socket.data = g_new0(ChardevSocket, 1); qemu_chr_parse_common(opts, qapi_ChardevSocket_base(sock)); sock->has_nodelay = true; sock->nodelay = do_nodelay; sock->has_server = true; sock->server = is_listen; sock->has_telnet = true; sock->telnet = is_telnet; sock->has_tn3270 = true; sock->tn3270 = is_tn3270; sock->has_wait = true; sock->wait = is_waitconnect; sock->has_reconnect = true; sock->reconnect = reconnect; sock->tls_creds = g_strdup(tls_creds); addr = g_new0(SocketAddress, 1); if (path) { UnixSocketAddress *q_unix; addr->type = SOCKET_ADDRESS_KIND_UNIX; q_unix = addr->u.q_unix.data = g_new0(UnixSocketAddress, 1); q_unix->path = g_strdup(path); } else { addr->type = SOCKET_ADDRESS_KIND_INET; addr->u.inet.data = g_new(InetSocketAddress, 1); *addr->u.inet.data = (InetSocketAddress) { .host = g_strdup(host), .port = g_strdup(port), .has_to = qemu_opt_get(opts, "to"), .to = qemu_opt_get_number(opts, "to", 0), .has_ipv4 = qemu_opt_get(opts, "ipv4"), .ipv4 = qemu_opt_get_bool(opts, "ipv4", 0), .has_ipv6 = qemu_opt_get(opts, "ipv6"), .ipv6 = qemu_opt_get_bool(opts, "ipv6", 0), }; } sock->addr = addr; } | 14,504 |
0 | void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) { tcg_insn_unit i1, i2; uint64_t pair; intptr_t diff = addr - jmp_addr; if (in_range_b(diff)) { i1 = B | (diff & 0x3fffffc); i2 = NOP; } else if (USE_REG_RA) { intptr_t lo, hi; diff = addr - (uintptr_t)tb_ret_addr; lo = (int16_t)diff; hi = (int32_t)(diff - lo); assert(diff == hi + lo); i1 = ADDIS | TAI(TCG_REG_TMP1, TCG_REG_RA, hi >> 16); i2 = ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, lo); } else { assert(TCG_TARGET_REG_BITS == 32 || addr == (int32_t)addr); i1 = ADDIS | TAI(TCG_REG_TMP1, 0, addr >> 16); i2 = ORI | SAI(TCG_REG_TMP1, TCG_REG_TMP1, addr); } #ifdef HOST_WORDS_BIGENDIAN pair = (uint64_t)i1 << 32 | i2; #else pair = (uint64_t)i2 << 32 | i1; #endif /* ??? __atomic_store_8, presuming there's some way to do that for 32-bit, otherwise this is good enough for 64-bit. */ *(uint64_t *)jmp_addr = pair; flush_icache_range(jmp_addr, jmp_addr + 8); } | 14,505 |
0 | int qcow2_update_header(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; QCowHeader *header; char *buf; size_t buflen = s->cluster_size; int ret; uint64_t total_size; uint32_t refcount_table_clusters; size_t header_length; Qcow2UnknownHeaderExtension *uext; buf = qemu_blockalign(bs, buflen); /* Header structure */ header = (QCowHeader*) buf; if (buflen < sizeof(*header)) { ret = -ENOSPC; goto fail; } header_length = sizeof(*header) + s->unknown_header_fields_size; total_size = bs->total_sectors * BDRV_SECTOR_SIZE; refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); *header = (QCowHeader) { /* Version 2 fields */ .magic = cpu_to_be32(QCOW_MAGIC), .version = cpu_to_be32(s->qcow_version), .backing_file_offset = 0, .backing_file_size = 0, .cluster_bits = cpu_to_be32(s->cluster_bits), .size = cpu_to_be64(total_size), .crypt_method = cpu_to_be32(s->crypt_method_header), .l1_size = cpu_to_be32(s->l1_size), .l1_table_offset = cpu_to_be64(s->l1_table_offset), .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), .nb_snapshots = cpu_to_be32(s->nb_snapshots), .snapshots_offset = cpu_to_be64(s->snapshots_offset), /* Version 3 fields */ .incompatible_features = cpu_to_be64(s->incompatible_features), .compatible_features = cpu_to_be64(s->compatible_features), .autoclear_features = cpu_to_be64(s->autoclear_features), .refcount_order = cpu_to_be32(s->refcount_order), .header_length = cpu_to_be32(header_length), }; /* For older versions, write a shorter header */ switch (s->qcow_version) { case 2: ret = offsetof(QCowHeader, incompatible_features); break; case 3: ret = sizeof(*header); break; default: ret = -EINVAL; goto fail; } buf += ret; buflen -= ret; memset(buf, 0, buflen); /* Preserve any unknown field in the header */ if (s->unknown_header_fields_size) { if (buflen < s->unknown_header_fields_size) { ret = -ENOSPC; goto fail; } memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); buf += s->unknown_header_fields_size; buflen -= s->unknown_header_fields_size; } /* Backing file format header extension */ if (*bs->backing_format) { ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, bs->backing_format, strlen(bs->backing_format), buflen); if (ret < 0) { goto fail; } buf += ret; buflen -= ret; } /* Feature table */ Qcow2Feature features[] = { { .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, .bit = QCOW2_INCOMPAT_DIRTY_BITNR, .name = "dirty bit", }, { .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, .name = "corrupt bit", }, { .type = QCOW2_FEAT_TYPE_COMPATIBLE, .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, .name = "lazy refcounts", }, }; ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, features, sizeof(features), buflen); if (ret < 0) { goto fail; } buf += ret; buflen -= ret; /* Keep unknown header extensions */ QLIST_FOREACH(uext, &s->unknown_header_ext, next) { ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); if (ret < 0) { goto fail; } buf += ret; buflen -= ret; } /* End of header extensions */ ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); if (ret < 0) { goto fail; } buf += ret; buflen -= ret; /* Backing file name */ if (*bs->backing_file) { size_t backing_file_len = strlen(bs->backing_file); if (buflen < backing_file_len) { ret = -ENOSPC; goto fail; } /* Using strncpy is ok here, since buf is not NUL-terminated. */ strncpy(buf, bs->backing_file, buflen); header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); header->backing_file_size = cpu_to_be32(backing_file_len); } /* Write the new header */ ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); if (ret < 0) { goto fail; } ret = 0; fail: qemu_vfree(header); return ret; } | 14,506 |
0 | static void gd_update_cursor(GtkDisplayState *s, gboolean override) { GdkWindow *window; bool on_vga; window = gtk_widget_get_window(GTK_WIDGET(s->drawing_area)); on_vga = (gtk_notebook_get_current_page(GTK_NOTEBOOK(s->notebook)) == 0); if ((override || on_vga) && kbd_mouse_is_absolute()) { gdk_window_set_cursor(window, s->null_cursor); } else { gdk_window_set_cursor(window, NULL); } } | 14,507 |
0 | static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg) { Picture *pic = NULL; int64_t pts; int i, display_picture_number = 0, ret; const int encoding_delay = s->max_b_frames ? s->max_b_frames : (s->low_delay ? 0 : 1); int direct = 1; if (pic_arg) { pts = pic_arg->pts; display_picture_number = s->input_picture_number++; if (pts != AV_NOPTS_VALUE) { if (s->user_specified_pts != AV_NOPTS_VALUE) { int64_t time = pts; int64_t last = s->user_specified_pts; if (time <= last) { av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%"PRId64", " "last=%"PRId64"\n", pts, s->user_specified_pts); return -1; } if (!s->low_delay && display_picture_number == 1) s->dts_delta = time - last; } s->user_specified_pts = pts; } else { if (s->user_specified_pts != AV_NOPTS_VALUE) { s->user_specified_pts = pts = s->user_specified_pts + 1; av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n", pts); } else { pts = display_picture_number; } } } if (pic_arg) { if (!pic_arg->buf[0]) direct = 0; if (pic_arg->linesize[0] != s->linesize) direct = 0; if (pic_arg->linesize[1] != s->uvlinesize) direct = 0; if (pic_arg->linesize[2] != s->uvlinesize) direct = 0; av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); if (direct) { i = ff_find_unused_picture(s, 1); if (i < 0) return i; pic = &s->picture[i]; pic->reference = 3; if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0) return ret; if (ff_alloc_picture(s, pic, 1) < 0) { return -1; } } else { i = ff_find_unused_picture(s, 0); if (i < 0) return i; pic = &s->picture[i]; pic->reference = 3; if (ff_alloc_picture(s, pic, 0) < 0) { return -1; } if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] && pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] && pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) { // empty } else { int h_chroma_shift, v_chroma_shift; av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); for (i = 0; i < 3; i++) { int src_stride = pic_arg->linesize[i]; int dst_stride = i ? s->uvlinesize : s->linesize; int h_shift = i ? h_chroma_shift : 0; int v_shift = i ? v_chroma_shift : 0; int w = s->width >> h_shift; int h = s->height >> v_shift; uint8_t *src = pic_arg->data[i]; uint8_t *dst = pic->f.data[i]; if (s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)) { h = ((s->height + 15)/16*16) >> v_shift; } if (!s->avctx->rc_buffer_size) dst += INPLACE_OFFSET; if (src_stride == dst_stride) memcpy(dst, src, src_stride * h); else { int h2 = h; uint8_t *dst2 = dst; while (h2--) { memcpy(dst2, src, w); dst2 += dst_stride; src += src_stride; } } if ((s->width & 15) || (s->height & 15)) { s->dsp.draw_edges(dst, dst_stride, w, h, 16>>h_shift, 16>>v_shift, EDGE_BOTTOM); } } } } copy_picture_attributes(s, &pic->f, pic_arg); pic->f.display_picture_number = display_picture_number; pic->f.pts = pts; // we set this here to avoid modifiying pic_arg } /* shift buffer entries */ for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++) s->input_picture[i - 1] = s->input_picture[i]; s->input_picture[encoding_delay] = (Picture*) pic; return 0; } | 14,510 |
1 | static void pc_i440fx_2_4_machine_options(MachineClass *m) { PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_2_5_machine_options(m); m->alias = NULL; m->is_default = 0; pcmc->broken_reserved_end = true; pcmc->inter_dimm_gap = false; SET_MACHINE_COMPAT(m, PC_COMPAT_2_4); } | 14,511 |
1 | static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size, n_slices = 0, i; VC1Context *v = avctx->priv_data; MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; const uint8_t *buf_start = buf, *buf_start_second_field = NULL; int mb_height, n_slices1=-1; struct { uint8_t *buf; GetBitContext gb; int mby_start; } *slices = NULL, *tmp; v->second_field = 0; if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay = 1; /* no supplementary picture */ if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) { /* special case for last picture */ if (s->low_delay == 0 && s->next_picture_ptr) { *pict = s->next_picture_ptr->f; s->next_picture_ptr = NULL; *data_size = sizeof(AVFrame); return buf_size; if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) { if (v->profile < PROFILE_ADVANCED) avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3; else avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1; //for advanced profile we may need to parse and unescape data if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { int buf_size2 = 0; buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */ const uint8_t *start, *end, *next; int size; next = buf; for (start = buf, end = buf + buf_size; next < end; start = next) { next = find_next_marker(start + 4, end); size = next - start - 4; if (size <= 0) continue; switch (AV_RB32(start)) { case VC1_CODE_FRAME: if (avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) buf_start = start; buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); break; case VC1_CODE_FIELD: { int buf_size3; if (avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) buf_start_second_field = start; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) buf_size3 = vc1_unescape_buffer(start + 4, size, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); /* assuming that the field marker is at the exact middle, hope it's correct */ slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; // index of the last slice of the first field n_slices++; break; case VC1_CODE_ENTRYPOINT: /* it should be before frame data */ buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); init_get_bits(&s->gb, buf2, buf_size2 * 8); ff_vc1_decode_entry_point(avctx, v, &s->gb); break; case VC1_CODE_SLICE: { int buf_size3; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) buf_size3 = vc1_unescape_buffer(start + 4, size, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9); n_slices++; break; } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */ const uint8_t *divider; int buf_size3; divider = find_next_marker(buf, buf + buf_size); if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) { av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n"); } else { // found field marker, unescape second field if (avctx->hwaccel || s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) buf_start_second_field = divider; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf); init_get_bits(&slices[n_slices].gb, slices[n_slices].buf, buf_size3 << 3); slices[n_slices].mby_start = s->mb_height >> 1; n_slices1 = n_slices - 1; n_slices++; buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2); } else { buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2); init_get_bits(&s->gb, buf2, buf_size2*8); } else init_get_bits(&s->gb, buf, buf_size*8); if (v->res_sprite) { v->new_sprite = !get_bits1(&s->gb); v->two_sprites = get_bits1(&s->gb); /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means we're using the sprite compositor. These are intentionally kept separate so you can get the raw sprites by using the wmv3 decoder for WMVP or the vc1 one for WVP2 */ if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { if (v->new_sprite) { // switch AVCodecContext parameters to those of the sprites avctx->width = avctx->coded_width = v->sprite_width; avctx->height = avctx->coded_height = v->sprite_height; } else { goto image; if (s->context_initialized && (s->width != avctx->coded_width || s->height != avctx->coded_height)) { ff_vc1_decode_end(avctx); if (!s->context_initialized) { if (ff_msmpeg4_decode_init(avctx) < 0 || ff_vc1_decode_init_alloc_tables(v) < 0) s->low_delay = !avctx->has_b_frames || v->res_sprite; if (v->profile == PROFILE_ADVANCED) { s->h_edge_pos = avctx->coded_width; s->v_edge_pos = avctx->coded_height; /* We need to set current_picture_ptr before reading the header, * otherwise we cannot store anything in there. */ if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) { int i = ff_find_unused_picture(s, 0); if (i < 0) s->current_picture_ptr = &s->picture[i]; // do parse frame header v->pic_header_flag = 0; if (v->profile < PROFILE_ADVANCED) { if (ff_vc1_parse_frame_header(v, &s->gb) < 0) { } else { if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { if (avctx->debug & FF_DEBUG_PICT_INFO) av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type)); if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) && s->pict_type != AV_PICTURE_TYPE_I) { av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n"); // process pulldown flags s->current_picture_ptr->f.repeat_pict = 0; // Pulldown flags are only valid when 'broadcast' has been set. // So ticks_per_frame will be 2 if (v->rff) { // repeat field s->current_picture_ptr->f.repeat_pict = 1; } else if (v->rptfrm) { // repeat frames s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2; // for skipping the frame s->current_picture.f.pict_type = s->pict_type; s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; /* skip B-frames if we don't have reference frames */ if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->dropable)) { if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) { goto end; if (s->next_p_frame_damaged) { if (s->pict_type == AV_PICTURE_TYPE_B) goto end; else s->next_p_frame_damaged = 0; if (ff_MPV_frame_start(s, avctx) < 0) { v->s.current_picture_ptr->f.interlaced_frame = (v->fcm != PROGRESSIVE); v->s.current_picture_ptr->f.top_field_first = v->tff; s->me.qpel_put = s->dsp.put_qpel_pixels_tab; s->me.qpel_avg = s->dsp.avg_qpel_pixels_tab; if ((CONFIG_VC1_VDPAU_DECODER) &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start); else if (avctx->hwaccel) { if (v->field_mode && buf_start_second_field) { // decode first field s->picture_structure = PICT_BOTTOM_FIELD - v->tff; if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0) if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0) if (avctx->hwaccel->end_frame(avctx) < 0) // decode second field s->gb = slices[n_slices1 + 1].gb; s->picture_structure = PICT_TOP_FIELD + v->tff; v->second_field = 1; v->pic_header_flag = 0; if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed"); v->s.current_picture_ptr->f.pict_type = v->s.pict_type; if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0) if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0) if (avctx->hwaccel->end_frame(avctx) < 0) } else { s->picture_structure = PICT_FRAME; if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0) if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) if (avctx->hwaccel->end_frame(avctx) < 0) } else { if (v->fcm == ILACE_FRAME && s->pict_type == AV_PICTURE_TYPE_B) goto err; // This codepath is still incomplete thus it is disabled ff_er_frame_start(s); v->bits = buf_size * 8; v->end_mb_x = s->mb_width; if (v->field_mode) { uint8_t *tmp[2]; s->current_picture.f.linesize[0] <<= 1; s->current_picture.f.linesize[1] <<= 1; s->current_picture.f.linesize[2] <<= 1; s->linesize <<= 1; s->uvlinesize <<= 1; tmp[0] = v->mv_f_last[0]; tmp[1] = v->mv_f_last[1]; v->mv_f_last[0] = v->mv_f_next[0]; v->mv_f_last[1] = v->mv_f_next[1]; v->mv_f_next[0] = v->mv_f[0]; v->mv_f_next[1] = v->mv_f[1]; v->mv_f[0] = tmp[0]; v->mv_f[1] = tmp[1]; mb_height = s->mb_height >> v->field_mode; for (i = 0; i <= n_slices; i++) { if (i > 0 && slices[i - 1].mby_start >= mb_height) { if (v->field_mode <= 0) { av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond " "picture boundary (%d >= %d)\n", i, slices[i - 1].mby_start, mb_height); continue; v->second_field = 1; v->blocks_off = s->mb_width * s->mb_height << 1; v->mb_off = s->mb_stride * s->mb_height >> 1; } else { v->second_field = 0; v->blocks_off = 0; v->mb_off = 0; if (i) { v->pic_header_flag = 0; if (v->field_mode && i == n_slices1 + 2) { if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n"); continue; } else if (get_bits1(&s->gb)) { v->pic_header_flag = 1; if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n"); continue; s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height); if (!v->field_mode || v->second_field) s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); else s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height); if (s->end_mb_y <= s->start_mb_y) { av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y); continue; ff_vc1_decode_blocks(v); if (i != n_slices) s->gb = slices[i].gb; if (v->field_mode) { v->second_field = 0; if (s->pict_type == AV_PICTURE_TYPE_B) { memcpy(v->mv_f_base, v->mv_f_next_base, 2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2)); s->current_picture.f.linesize[0] >>= 1; s->current_picture.f.linesize[1] >>= 1; s->current_picture.f.linesize[2] >>= 1; s->linesize >>= 1; s->uvlinesize >>= 1; av_dlog(s->avctx, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits); // if (get_bits_count(&s->gb) > buf_size * 8) // return -1; if(s->error_occurred && s->pict_type == AV_PICTURE_TYPE_B) if(!v->field_mode) ff_er_frame_end(s); ff_MPV_frame_end(s); if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { image: avctx->width = avctx->coded_width = v->output_width; avctx->height = avctx->coded_height = v->output_height; if (avctx->skip_frame >= AVDISCARD_NONREF) goto end; #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER if (vc1_decode_sprites(v, &s->gb)) #endif *pict = v->sprite_output_frame; *data_size = sizeof(AVFrame); } else { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { *pict = s->current_picture_ptr->f; } else if (s->last_picture_ptr != NULL) { *pict = s->last_picture_ptr->f; if (s->last_picture_ptr || s->low_delay) { *data_size = sizeof(AVFrame); ff_print_debug_info(s, pict); end: av_free(buf2); for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); return buf_size; err: av_free(buf2); for (i = 0; i < n_slices; i++) av_free(slices[i].buf); av_free(slices); return -1; | 14,512 |
1 | static int local_mknod(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, FsCred *credp) { char *path; int err = -1; int serrno = 0; V9fsString fullname; char *buffer; v9fs_string_init(&fullname); v9fs_string_sprintf(&fullname, "%s/%s", dir_path->data, name); path = fullname.data; /* Determine the security model */ if (fs_ctx->export_flags & V9FS_SM_MAPPED) { buffer = rpath(fs_ctx, path); err = mknod(buffer, SM_LOCAL_MODE_BITS|S_IFREG, 0); if (err == -1) { g_free(buffer); goto out; } err = local_set_xattr(buffer, credp); if (err == -1) { serrno = errno; goto err_end; } } else if (fs_ctx->export_flags & V9FS_SM_MAPPED_FILE) { buffer = rpath(fs_ctx, path); err = mknod(buffer, SM_LOCAL_MODE_BITS|S_IFREG, 0); if (err == -1) { g_free(buffer); goto out; } err = local_set_mapped_file_attr(fs_ctx, path, credp); if (err == -1) { serrno = errno; goto err_end; } } else if ((fs_ctx->export_flags & V9FS_SM_PASSTHROUGH) || (fs_ctx->export_flags & V9FS_SM_NONE)) { buffer = rpath(fs_ctx, path); err = mknod(buffer, credp->fc_mode, credp->fc_rdev); if (err == -1) { g_free(buffer); goto out; } err = local_post_create_passthrough(fs_ctx, path, credp); if (err == -1) { serrno = errno; goto err_end; } } goto out; err_end: remove(buffer); errno = serrno; g_free(buffer); out: v9fs_string_free(&fullname); return err; } | 14,513 |
0 | static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream, int flv_codecid) { FLVContext *flv = s->priv_data; AVCodecContext *vcodec = vstream->codec; switch(flv_codecid) { case FLV_CODECID_H263 : vcodec->codec_id = CODEC_ID_FLV1 ; break; case FLV_CODECID_SCREEN: vcodec->codec_id = CODEC_ID_FLASHSV; break; case FLV_CODECID_VP6A : if (!flv->alpha_stream) { AVCodecContext *alpha_codec; flv->alpha_stream = av_new_stream(s, 2); if (flv->alpha_stream) { av_set_pts_info(flv->alpha_stream, 24, 1, 1000); alpha_codec = flv->alpha_stream->codec; alpha_codec->codec_type = CODEC_TYPE_VIDEO; alpha_codec->codec_id = CODEC_ID_VP6F; alpha_codec->extradata_size = 1; alpha_codec->extradata = av_malloc(1); } } case FLV_CODECID_VP6 : vcodec->codec_id = CODEC_ID_VP6F ; if(vcodec->extradata_size != 1) { vcodec->extradata_size = 1; vcodec->extradata = av_malloc(1); } vcodec->extradata[0] = get_byte(&s->pb); if (flv->alpha_stream) flv->alpha_stream->codec->extradata[0] = vcodec->extradata[0]; return 1; // 1 byte body size adjustment for flv_read_packet() default: av_log(s, AV_LOG_INFO, "Unsupported video codec (%x)\n", flv_codecid); vcodec->codec_tag = flv_codecid; } return 0; } | 14,514 |
1 | void wdt_ib700_init(void) { watchdog_add_model(&model); timer = qemu_new_timer(vm_clock, ib700_timer_expired, NULL); } | 14,515 |
0 | static int sub2video_prepare(InputStream *ist, InputFilter *ifilter) { AVFormatContext *avf = input_files[ist->file_index]->ctx; int i, w, h; /* Compute the size of the canvas for the subtitles stream. If the subtitles codecpar has set a size, use it. Otherwise use the maximum dimensions of the video streams in the same file. */ w = ifilter->width; h = ifilter->height; if (!(w && h)) { for (i = 0; i < avf->nb_streams; i++) { if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { w = FFMAX(w, avf->streams[i]->codecpar->width); h = FFMAX(h, avf->streams[i]->codecpar->height); } } if (!(w && h)) { w = FFMAX(w, 720); h = FFMAX(h, 576); } av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h); } ist->sub2video.w = ist->resample_width = w; ist->sub2video.h = ist->resample_height = h; /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the palettes for all rectangles are identical or compatible */ ist->resample_pix_fmt = ifilter->format = AV_PIX_FMT_RGB32; ist->sub2video.frame = av_frame_alloc(); if (!ist->sub2video.frame) return AVERROR(ENOMEM); ist->sub2video.last_pts = INT64_MIN; return 0; } | 14,516 |
0 | static int parse_inputs(const char **buf, AVFilterInOut **curr_inputs, AVFilterInOut **open_outputs, AVClass *log_ctx) { int pad = 0; while (**buf == '[') { char *name = parse_link_name(buf, log_ctx); AVFilterInOut *match; if (!name) return -1; /* First check if the label is not in the open_outputs list */ match = extract_inout(name, open_outputs); if (match) { av_free(name); } else { /* Not in the list, so add it as an input */ match = av_mallocz(sizeof(AVFilterInOut)); match->name = name; match->pad_idx = pad; } insert_inout(curr_inputs, match); *buf += strspn(*buf, WHITESPACES); pad++; } return pad; } | 14,517 |
1 | static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr) { DNXHDEncContext *ctx = avctx->priv_data; int mb_y = jobnr, mb_x; ctx = ctx->thread[threadnr]; if (ctx->cid_table->bit_depth == 8) { uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize); for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x, pix += 16) { unsigned mb = mb_y * ctx->m.mb_width + mb_x; int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize); int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)(sum*sum))>>8)+128)>>8; ctx->mb_cmp[mb].value = varc; ctx->mb_cmp[mb].mb = mb; } } else { // 10-bit int const linesize = ctx->m.linesize >> 1; for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x) { uint16_t *pix = (uint16_t*)ctx->thread[0]->src[0] + ((mb_y << 4) * linesize) + (mb_x << 4); unsigned mb = mb_y * ctx->m.mb_width + mb_x; int sum = 0; int sqsum = 0; int mean, sqmean; int i, j; // Macroblocks are 16x16 pixels, unlike DCT blocks which are 8x8. for (i = 0; i < 16; ++i) { for (j = 0; j < 16; ++j) { // Turn 16-bit pixels into 10-bit ones. int const sample = (unsigned)pix[j] >> 6; sum += sample; sqsum += sample * sample; // 2^10 * 2^10 * 16 * 16 = 2^28, which is less than INT_MAX } pix += linesize; } mean = sum >> 8; // 16*16 == 2^8 sqmean = sqsum >> 8; ctx->mb_cmp[mb].value = sqmean - mean * mean; ctx->mb_cmp[mb].mb = mb; } } return 0; } | 14,518 |
1 | static void virtio_blk_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); dc->exit = virtio_blk_device_exit; dc->props = virtio_blk_properties; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); vdc->init = virtio_blk_device_init; vdc->get_config = virtio_blk_update_config; vdc->set_config = virtio_blk_set_config; vdc->get_features = virtio_blk_get_features; vdc->set_status = virtio_blk_set_status; vdc->reset = virtio_blk_reset; } | 14,519 |
1 | static int cirrus_vga_load(QEMUFile *f, void *opaque, int version_id) { CirrusVGAState *s = opaque; int ret; if (version_id > 2) return -EINVAL; if (s->pci_dev && version_id >= 2) { ret = pci_device_load(s->pci_dev, f); if (ret < 0) return ret; } qemu_get_be32s(f, &s->latch); qemu_get_8s(f, &s->sr_index); qemu_get_buffer(f, s->sr, 256); qemu_get_8s(f, &s->gr_index); qemu_get_8s(f, &s->cirrus_shadow_gr0); qemu_get_8s(f, &s->cirrus_shadow_gr1); s->gr[0x00] = s->cirrus_shadow_gr0 & 0x0f; s->gr[0x01] = s->cirrus_shadow_gr1 & 0x0f; qemu_get_buffer(f, s->gr + 2, 254); qemu_get_8s(f, &s->ar_index); qemu_get_buffer(f, s->ar, 21); s->ar_flip_flop=qemu_get_be32(f); qemu_get_8s(f, &s->cr_index); qemu_get_buffer(f, s->cr, 256); qemu_get_8s(f, &s->msr); qemu_get_8s(f, &s->fcr); qemu_get_8s(f, &s->st00); qemu_get_8s(f, &s->st01); qemu_get_8s(f, &s->dac_state); qemu_get_8s(f, &s->dac_sub_index); qemu_get_8s(f, &s->dac_read_index); qemu_get_8s(f, &s->dac_write_index); qemu_get_buffer(f, s->dac_cache, 3); qemu_get_buffer(f, s->palette, 768); s->bank_offset=qemu_get_be32(f); qemu_get_8s(f, &s->cirrus_hidden_dac_lockindex); qemu_get_8s(f, &s->cirrus_hidden_dac_data); qemu_get_be32s(f, &s->hw_cursor_x); qemu_get_be32s(f, &s->hw_cursor_y); cirrus_update_memory_access(s); /* force refresh */ s->graphic_mode = -1; cirrus_update_bank_ptr(s, 0); cirrus_update_bank_ptr(s, 1); return 0; } | 14,520 |
1 | static int ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind) { IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus); IDEState *s = bus->ifs + dev->unit; Error *err = NULL; if (dev->conf.discard_granularity == -1) { dev->conf.discard_granularity = 512; } else if (dev->conf.discard_granularity && dev->conf.discard_granularity != 512) { error_report("discard_granularity must be 512 for ide"); blkconf_serial(&dev->conf, &dev->serial); if (kind != IDE_CD) { blkconf_geometry(&dev->conf, &dev->chs_trans, 65536, 16, 255, &err); if (err) { error_report("%s", error_get_pretty(err)); error_free(err); if (ide_init_drive(s, dev->conf.blk, kind, dev->version, dev->serial, dev->model, dev->wwn, dev->conf.cyls, dev->conf.heads, dev->conf.secs, dev->chs_trans) < 0) { if (!dev->version) { dev->version = g_strdup(s->version); if (!dev->serial) { dev->serial = g_strdup(s->drive_serial_str); add_boot_device_path(dev->conf.bootindex, &dev->qdev, dev->unit ? "/disk@1" : "/disk@0"); return 0; | 14,522 |
1 | static int writer_open(WriterContext **wctx, const Writer *writer, const char *args, const struct section *sections, int nb_sections) { int i, ret = 0; if (!(*wctx = av_malloc(sizeof(WriterContext)))) { ret = AVERROR(ENOMEM); goto fail; } if (!((*wctx)->priv = av_mallocz(writer->priv_size))) { ret = AVERROR(ENOMEM); goto fail; } (*wctx)->class = &writer_class; (*wctx)->writer = writer; (*wctx)->level = -1; (*wctx)->sections = sections; (*wctx)->nb_sections = nb_sections; if (writer->priv_class) { void *priv_ctx = (*wctx)->priv; *((const AVClass **)priv_ctx) = writer->priv_class; av_opt_set_defaults(priv_ctx); if (args && (ret = av_set_options_string(priv_ctx, args, "=", ":")) < 0) goto fail; } for (i = 0; i < SECTION_MAX_NB_LEVELS; i++) av_bprint_init(&(*wctx)->section_pbuf[i], 1, AV_BPRINT_SIZE_UNLIMITED); if ((*wctx)->writer->init) ret = (*wctx)->writer->init(*wctx); if (ret < 0) goto fail; return 0; fail: writer_close(wctx); return ret; } | 14,523 |
1 | int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw, int is_user, int is_softmmu) { mmu_ctx_t ctx; int exception = 0, error_code = 0; int access_type; int ret = 0; if (rw == 2) { /* code access */ rw = 0; access_type = ACCESS_CODE; } else { /* data access */ /* XXX: put correct access by using cpu_restore_state() correctly */ access_type = ACCESS_INT; // access_type = env->access_type; } ret = get_physical_address(env, &ctx, address, rw, access_type, 1); if (ret == 0) { ret = tlb_set_page(env, address & TARGET_PAGE_MASK, ctx.raddr & TARGET_PAGE_MASK, ctx.prot, is_user, is_softmmu); } else if (ret < 0) { #if defined (DEBUG_MMU) if (loglevel > 0) cpu_dump_state(env, logfile, fprintf, 0); #endif if (access_type == ACCESS_CODE) { exception = EXCP_ISI; switch (ret) { case -1: /* No matches in page tables or TLB */ if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_6xx)) { exception = EXCP_I_TLBMISS; env->spr[SPR_IMISS] = address; env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; error_code = 1 << 18; goto tlb_miss; } else if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_4xx)) { /* XXX: TODO */ } else { error_code = 0x40000000; } break; case -2: /* Access rights violation */ error_code = 0x08000000; break; case -3: /* No execute protection violation */ error_code = 0x10000000; break; case -4: /* Direct store exception */ /* No code fetch is allowed in direct-store areas */ error_code = 0x10000000; break; case -5: /* No match in segment table */ exception = EXCP_ISEG; error_code = 0; break; } } else { exception = EXCP_DSI; switch (ret) { case -1: /* No matches in page tables or TLB */ if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_6xx)) { if (rw == 1) { exception = EXCP_DS_TLBMISS; error_code = 1 << 16; } else { exception = EXCP_DL_TLBMISS; error_code = 0; } env->spr[SPR_DMISS] = address; env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; tlb_miss: error_code |= ctx.key << 19; env->spr[SPR_HASH1] = ctx.pg_addr[0]; env->spr[SPR_HASH2] = ctx.pg_addr[1]; /* Do not alter DAR nor DSISR */ goto out; } else if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_4xx)) { /* XXX: TODO */ } else { error_code = 0x40000000; } break; case -2: /* Access rights violation */ error_code = 0x08000000; break; case -4: /* Direct store exception */ switch (access_type) { case ACCESS_FLOAT: /* Floating point load/store */ exception = EXCP_ALIGN; error_code = EXCP_ALIGN_FP; break; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ error_code = 0x04000000; break; case ACCESS_EXT: /* eciwx or ecowx */ error_code = 0x04100000; break; default: printf("DSI: invalid exception (%d)\n", ret); exception = EXCP_PROGRAM; error_code = EXCP_INVAL | EXCP_INVAL_INVAL; break; } break; case -5: /* No match in segment table */ exception = EXCP_DSEG; error_code = 0; break; } if (exception == EXCP_DSI && rw == 1) error_code |= 0x02000000; /* Store fault address */ env->spr[SPR_DAR] = address; env->spr[SPR_DSISR] = error_code; } out: #if 0 printf("%s: set exception to %d %02x\n", __func__, exception, error_code); #endif env->exception_index = exception; env->error_code = error_code; ret = 1; } return ret; } | 14,524 |
0 | static PCIINTxRoute gpex_route_intx_pin_to_irq(void *opaque, int pin) { PCIINTxRoute route; GPEXHost *s = opaque; route.mode = PCI_INTX_ENABLED; route.irq = s->irq_num[pin]; return route; } | 14,525 |
0 | static void cuda_writeb(void *opaque, target_phys_addr_t addr, uint32_t val) { CUDAState *s = opaque; addr = (addr >> 9) & 0xf; CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val); switch(addr) { case 0: s->b = val; cuda_update(s); break; case 1: s->a = val; break; case 2: s->dirb = val; break; case 3: s->dira = val; break; case 4: s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock)); break; case 5: s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); s->ifr &= ~T1_INT; set_counter(s, &s->timers[0], s->timers[0].latch); break; case 6: s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock)); break; case 7: s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); s->ifr &= ~T1_INT; cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock)); break; case 8: s->timers[1].latch = val; set_counter(s, &s->timers[1], val); break; case 9: set_counter(s, &s->timers[1], (val << 8) | s->timers[1].latch); break; case 10: s->sr = val; break; case 11: s->acr = val; cuda_timer_update(s, &s->timers[0], qemu_get_clock_ns(vm_clock)); cuda_update(s); break; case 12: s->pcr = val; break; case 13: /* reset bits */ s->ifr &= ~val; cuda_update_irq(s); break; case 14: if (val & IER_SET) { /* set bits */ s->ier |= val & 0x7f; } else { /* reset bits */ s->ier &= ~val; } cuda_update_irq(s); break; default: case 15: s->anh = val; break; } } | 14,526 |
0 | int pci_add_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t offset, uint8_t size, Error **errp) { uint8_t *config; int i, overlapping_cap; if (!offset) { offset = pci_find_space(pdev, size); /* out of PCI config space is programming error */ assert(offset); } else { /* Verify that capabilities don't overlap. Note: device assignment * depends on this check to verify that the device is not broken. * Should never trigger for emulated devices, but it's helpful * for debugging these. */ for (i = offset; i < offset + size; i++) { overlapping_cap = pci_find_capability_at_offset(pdev, i); if (overlapping_cap) { error_setg(errp, "%s:%02x:%02x.%x " "Attempt to add PCI capability %x at offset " "%x overlaps existing capability %x at offset %x", pci_root_bus_path(pdev), pci_bus_num(pdev->bus), PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), cap_id, offset, overlapping_cap, i); return -EINVAL; } } } config = pdev->config + offset; config[PCI_CAP_LIST_ID] = cap_id; config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST]; pdev->config[PCI_CAPABILITY_LIST] = offset; pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST; memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4)); /* Make capability read-only by default */ memset(pdev->wmask + offset, 0, size); /* Check capability by default */ memset(pdev->cmask + offset, 0xFF, size); return offset; } | 14,527 |
0 | void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) { MigrationParameters *params; params = qmp_query_migrate_parameters(NULL); if (params) { assert(params->has_compress_level); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_LEVEL), params->compress_level); assert(params->has_compress_threads); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_THREADS), params->compress_threads); assert(params->has_decompress_threads); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_DECOMPRESS_THREADS), params->decompress_threads); assert(params->has_cpu_throttle_initial); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL), params->cpu_throttle_initial); assert(params->has_cpu_throttle_increment); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT), params->cpu_throttle_increment); assert(params->has_tls_creds); monitor_printf(mon, "%s: '%s'\n", MigrationParameter_str(MIGRATION_PARAMETER_TLS_CREDS), params->tls_creds); assert(params->has_tls_hostname); monitor_printf(mon, "%s: '%s'\n", MigrationParameter_str(MIGRATION_PARAMETER_TLS_HOSTNAME), params->tls_hostname); assert(params->has_max_bandwidth); monitor_printf(mon, "%s: %" PRId64 " bytes/second\n", MigrationParameter_str(MIGRATION_PARAMETER_MAX_BANDWIDTH), params->max_bandwidth); assert(params->has_downtime_limit); monitor_printf(mon, "%s: %" PRId64 " milliseconds\n", MigrationParameter_str(MIGRATION_PARAMETER_DOWNTIME_LIMIT), params->downtime_limit); assert(params->has_x_checkpoint_delay); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_X_CHECKPOINT_DELAY), params->x_checkpoint_delay); assert(params->has_block_incremental); monitor_printf(mon, "%s: %s\n", MigrationParameter_str(MIGRATION_PARAMETER_BLOCK_INCREMENTAL), params->block_incremental ? "on" : "off"); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_X_MULTIFD_CHANNELS), params->x_multifd_channels); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_X_MULTIFD_PAGE_COUNT), params->x_multifd_page_count); monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_str(MIGRATION_PARAMETER_XBZRLE_CACHE_SIZE), params->xbzrle_cache_size); } qapi_free_MigrationParameters(params); } | 14,528 |
0 | static void pxb_dev_realize_common(PCIDevice *dev, bool pcie, Error **errp) { PXBDev *pxb = convert_to_pxb(dev); DeviceState *ds, *bds = NULL; PCIBus *bus; const char *dev_name = NULL; Error *local_err = NULL; if (pxb->numa_node != NUMA_NODE_UNASSIGNED && pxb->numa_node >= nb_numa_nodes) { error_setg(errp, "Illegal numa node %d", pxb->numa_node); return; } if (dev->qdev.id && *dev->qdev.id) { dev_name = dev->qdev.id; } ds = qdev_create(NULL, TYPE_PXB_HOST); if (pcie) { bus = pci_root_bus_new(ds, dev_name, NULL, NULL, 0, TYPE_PXB_PCIE_BUS); } else { bus = pci_root_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS); bds = qdev_create(BUS(bus), "pci-bridge"); bds->id = dev_name; qdev_prop_set_uint8(bds, PCI_BRIDGE_DEV_PROP_CHASSIS_NR, pxb->bus_nr); qdev_prop_set_bit(bds, PCI_BRIDGE_DEV_PROP_SHPC, false); } bus->parent_dev = dev; bus->address_space_mem = dev->bus->address_space_mem; bus->address_space_io = dev->bus->address_space_io; bus->map_irq = pxb_map_irq_fn; PCI_HOST_BRIDGE(ds)->bus = bus; pxb_register_bus(dev, bus, &local_err); if (local_err) { error_propagate(errp, local_err); goto err_register_bus; } qdev_init_nofail(ds); if (bds) { qdev_init_nofail(bds); } pci_word_test_and_set_mask(dev->config + PCI_STATUS, PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK); pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_HOST); pxb_dev_list = g_list_insert_sorted(pxb_dev_list, pxb, pxb_compare); return; err_register_bus: object_unref(OBJECT(bds)); object_unparent(OBJECT(bus)); object_unref(OBJECT(ds)); } | 14,529 |
0 | int qemu_read_config_file(const char *filename) { FILE *f = fopen(filename, "r"); int ret; if (f == NULL) { return -errno; } ret = qemu_config_parse(f, vm_config_groups, filename); fclose(f); if (ret == 0) { return 0; } else { return -EINVAL; } } | 14,530 |
0 | SocketAddress *socket_parse(const char *str, Error **errp) { SocketAddress *addr; addr = g_new0(SocketAddress, 1); if (strstart(str, "unix:", NULL)) { if (str[5] == '\0') { error_setg(errp, "invalid Unix socket address"); goto fail; } else { addr->type = SOCKET_ADDRESS_KIND_UNIX; addr->u.q_unix.data = g_new(UnixSocketAddress, 1); addr->u.q_unix.data->path = g_strdup(str + 5); } } else if (strstart(str, "fd:", NULL)) { if (str[3] == '\0') { error_setg(errp, "invalid file descriptor address"); goto fail; } else { addr->type = SOCKET_ADDRESS_KIND_FD; addr->u.fd.data = g_new(String, 1); addr->u.fd.data->str = g_strdup(str + 3); } } else if (strstart(str, "vsock:", NULL)) { addr->type = SOCKET_ADDRESS_KIND_VSOCK; addr->u.vsock.data = g_new(VsockSocketAddress, 1); if (vsock_parse(addr->u.vsock.data, str + strlen("vsock:"), errp)) { goto fail; } } else { addr->type = SOCKET_ADDRESS_KIND_INET; addr->u.inet.data = g_new(InetSocketAddress, 1); if (inet_parse(addr->u.inet.data, str, errp)) { goto fail; } } return addr; fail: qapi_free_SocketAddress(addr); return NULL; } | 14,533 |
0 | static always_inline int translate_one (DisasContext *ctx, uint32_t insn) { uint32_t palcode; int32_t disp21, disp16, disp12; uint16_t fn11, fn16; uint8_t opc, ra, rb, rc, sbz, fpfn, fn7, fn2, islit; uint8_t lit; int ret; /* Decode all instruction fields */ opc = insn >> 26; ra = (insn >> 21) & 0x1F; rb = (insn >> 16) & 0x1F; rc = insn & 0x1F; sbz = (insn >> 13) & 0x07; islit = (insn >> 12) & 1; if (rb == 31 && !islit) { islit = 1; lit = 0; } else lit = (insn >> 13) & 0xFF; palcode = insn & 0x03FFFFFF; disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11; disp16 = (int16_t)(insn & 0x0000FFFF); disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20; fn16 = insn & 0x0000FFFF; fn11 = (insn >> 5) & 0x000007FF; fpfn = fn11 & 0x3F; fn7 = (insn >> 5) & 0x0000007F; fn2 = (insn >> 5) & 0x00000003; ret = 0; LOG_DISAS("opc %02x ra %d rb %d rc %d disp16 %04x\n", opc, ra, rb, rc, disp16); switch (opc) { case 0x00: /* CALL_PAL */ if (palcode >= 0x80 && palcode < 0xC0) { /* Unprivileged PAL call */ gen_excp(ctx, EXCP_CALL_PAL + ((palcode & 0x1F) << 6), 0); #if !defined (CONFIG_USER_ONLY) } else if (palcode < 0x40) { /* Privileged PAL code */ if (ctx->mem_idx & 1) goto invalid_opc; else gen_excp(ctx, EXCP_CALL_PALP + ((palcode & 0x3F) << 6), 0); #endif } else { /* Invalid PAL call */ goto invalid_opc; } ret = 3; break; case 0x01: /* OPC01 */ goto invalid_opc; case 0x02: /* OPC02 */ goto invalid_opc; case 0x03: /* OPC03 */ goto invalid_opc; case 0x04: /* OPC04 */ goto invalid_opc; case 0x05: /* OPC05 */ goto invalid_opc; case 0x06: /* OPC06 */ goto invalid_opc; case 0x07: /* OPC07 */ goto invalid_opc; case 0x08: /* LDA */ if (likely(ra != 31)) { if (rb != 31) tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16); else tcg_gen_movi_i64(cpu_ir[ra], disp16); } break; case 0x09: /* LDAH */ if (likely(ra != 31)) { if (rb != 31) tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16); else tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16); } break; case 0x0A: /* LDBU */ if (!(ctx->amask & AMASK_BWX)) goto invalid_opc; gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); break; case 0x0B: /* LDQ_U */ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); break; case 0x0C: /* LDWU */ if (!(ctx->amask & AMASK_BWX)) goto invalid_opc; gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); break; case 0x0D: /* STW */ gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0, 0); break; case 0x0E: /* STB */ gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0, 0); break; case 0x0F: /* STQ_U */ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1, 0); break; case 0x10: switch (fn7) { case 0x00: /* ADDL */ if (likely(rc != 31)) { if (ra != 31) { if (islit) { tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } else { tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x02: /* S4ADDL */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) tcg_gen_addi_i64(tmp, tmp, lit); else tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], tmp); tcg_temp_free(tmp); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x09: /* SUBL */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit); else tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], -lit); else { tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } break; case 0x0B: /* S4SUBL */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) tcg_gen_subi_i64(tmp, tmp, lit); else tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], tmp); tcg_temp_free(tmp); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], -lit); else { tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } } break; case 0x0F: /* CMPBGE */ gen_cmpbge(ra, rb, rc, islit, lit); break; case 0x12: /* S8ADDL */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) tcg_gen_addi_i64(tmp, tmp, lit); else tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], tmp); tcg_temp_free(tmp); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x1B: /* S8SUBL */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) tcg_gen_subi_i64(tmp, tmp, lit); else tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], tmp); tcg_temp_free(tmp); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], -lit); else tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } } break; case 0x1D: /* CMPULT */ gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit); break; case 0x20: /* ADDQ */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit); else tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x22: /* S4ADDQ */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) tcg_gen_addi_i64(cpu_ir[rc], tmp, lit); else tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]); tcg_temp_free(tmp); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x29: /* SUBQ */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit); else tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], -lit); else tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x2B: /* S4SUBQ */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 2); if (islit) tcg_gen_subi_i64(cpu_ir[rc], tmp, lit); else tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]); tcg_temp_free(tmp); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], -lit); else tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x2D: /* CMPEQ */ gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit); break; case 0x32: /* S8ADDQ */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) tcg_gen_addi_i64(cpu_ir[rc], tmp, lit); else tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]); tcg_temp_free(tmp); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x3B: /* S8SUBQ */ if (likely(rc != 31)) { if (ra != 31) { TCGv tmp = tcg_temp_new(); tcg_gen_shli_i64(tmp, cpu_ir[ra], 3); if (islit) tcg_gen_subi_i64(cpu_ir[rc], tmp, lit); else tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]); tcg_temp_free(tmp); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], -lit); else tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x3D: /* CMPULE */ gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit); break; case 0x40: /* ADDL/V */ gen_addlv(ra, rb, rc, islit, lit); break; case 0x49: /* SUBL/V */ gen_sublv(ra, rb, rc, islit, lit); break; case 0x4D: /* CMPLT */ gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit); break; case 0x60: /* ADDQ/V */ gen_addqv(ra, rb, rc, islit, lit); break; case 0x69: /* SUBQ/V */ gen_subqv(ra, rb, rc, islit, lit); break; case 0x6D: /* CMPLE */ gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit); break; default: goto invalid_opc; } break; case 0x11: switch (fn7) { case 0x00: /* AND */ if (likely(rc != 31)) { if (ra == 31) tcg_gen_movi_i64(cpu_ir[rc], 0); else if (islit) tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit); else tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } break; case 0x08: /* BIC */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit); else tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x14: /* CMOVLBS */ gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1); break; case 0x16: /* CMOVLBC */ gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1); break; case 0x20: /* BIS */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit); else tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x24: /* CMOVEQ */ gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0); break; case 0x26: /* CMOVNE */ gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0); break; case 0x28: /* ORNOT */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit); else tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], ~lit); else tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x40: /* XOR */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit); else tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], lit); else tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x44: /* CMOVLT */ gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0); break; case 0x46: /* CMOVGE */ gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0); break; case 0x48: /* EQV */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit); else tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } else { if (islit) tcg_gen_movi_i64(cpu_ir[rc], ~lit); else tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]); } } break; case 0x61: /* AMASK */ if (likely(rc != 31)) { if (islit) tcg_gen_movi_i64(cpu_ir[rc], helper_amask(lit)); else gen_helper_amask(cpu_ir[rc], cpu_ir[rb]); } break; case 0x64: /* CMOVLE */ gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0); break; case 0x66: /* CMOVGT */ gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0); break; case 0x6C: /* IMPLVER */ if (rc != 31) gen_helper_load_implver(cpu_ir[rc]); break; default: goto invalid_opc; } break; case 0x12: switch (fn7) { case 0x02: /* MSKBL */ gen_mskbl(ra, rb, rc, islit, lit); break; case 0x06: /* EXTBL */ gen_ext_l(&tcg_gen_ext8u_i64, ra, rb, rc, islit, lit); break; case 0x0B: /* INSBL */ gen_insbl(ra, rb, rc, islit, lit); break; case 0x12: /* MSKWL */ gen_mskwl(ra, rb, rc, islit, lit); break; case 0x16: /* EXTWL */ gen_ext_l(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit); break; case 0x1B: /* INSWL */ gen_inswl(ra, rb, rc, islit, lit); break; case 0x22: /* MSKLL */ gen_mskll(ra, rb, rc, islit, lit); break; case 0x26: /* EXTLL */ gen_ext_l(&tcg_gen_ext32u_i64, ra, rb, rc, islit, lit); break; case 0x2B: /* INSLL */ gen_insll(ra, rb, rc, islit, lit); break; case 0x30: /* ZAP */ gen_zap(ra, rb, rc, islit, lit); break; case 0x31: /* ZAPNOT */ gen_zapnot(ra, rb, rc, islit, lit); break; case 0x32: /* MSKQL */ gen_mskql(ra, rb, rc, islit, lit); break; case 0x34: /* SRL */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); else { TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); } } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x36: /* EXTQL */ gen_ext_l(NULL, ra, rb, rc, islit, lit); break; case 0x39: /* SLL */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); else { TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); } } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x3B: /* INSQL */ gen_insql(ra, rb, rc, islit, lit); break; case 0x3C: /* SRA */ if (likely(rc != 31)) { if (ra != 31) { if (islit) tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f); else { TCGv shift = tcg_temp_new(); tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f); tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift); tcg_temp_free(shift); } } else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x52: /* MSKWH */ gen_mskwh(ra, rb, rc, islit, lit); break; case 0x57: /* INSWH */ gen_inswh(ra, rb, rc, islit, lit); break; case 0x5A: /* EXTWH */ gen_ext_h(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit); break; case 0x62: /* MSKLH */ gen_msklh(ra, rb, rc, islit, lit); break; case 0x67: /* INSLH */ gen_inslh(ra, rb, rc, islit, lit); break; case 0x6A: /* EXTLH */ gen_ext_h(&tcg_gen_ext16u_i64, ra, rb, rc, islit, lit); break; case 0x72: /* MSKQH */ gen_mskqh(ra, rb, rc, islit, lit); break; case 0x77: /* INSQH */ gen_insqh(ra, rb, rc, islit, lit); break; case 0x7A: /* EXTQH */ gen_ext_h(NULL, ra, rb, rc, islit, lit); break; default: goto invalid_opc; } break; case 0x13: switch (fn7) { case 0x00: /* MULL */ if (likely(rc != 31)) { if (ra == 31) tcg_gen_movi_i64(cpu_ir[rc], 0); else { if (islit) tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit); else tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]); } } break; case 0x20: /* MULQ */ if (likely(rc != 31)) { if (ra == 31) tcg_gen_movi_i64(cpu_ir[rc], 0); else if (islit) tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit); else tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); } break; case 0x30: /* UMULH */ gen_umulh(ra, rb, rc, islit, lit); break; case 0x40: /* MULL/V */ gen_mullv(ra, rb, rc, islit, lit); break; case 0x60: /* MULQ/V */ gen_mulqv(ra, rb, rc, islit, lit); break; default: goto invalid_opc; } break; case 0x14: switch (fpfn) { /* f11 & 0x3F */ case 0x04: /* ITOFS */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; if (likely(rc != 31)) { if (ra != 31) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]); gen_helper_memory_to_s(cpu_fir[rc], tmp); tcg_temp_free_i32(tmp); } else tcg_gen_movi_i64(cpu_fir[rc], 0); } break; case 0x0A: /* SQRTF */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; gen_fsqrtf(rb, rc); break; case 0x0B: /* SQRTS */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; gen_fsqrts(rb, rc); break; case 0x14: /* ITOFF */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; if (likely(rc != 31)) { if (ra != 31) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]); gen_helper_memory_to_f(cpu_fir[rc], tmp); tcg_temp_free_i32(tmp); } else tcg_gen_movi_i64(cpu_fir[rc], 0); } break; case 0x24: /* ITOFT */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; if (likely(rc != 31)) { if (ra != 31) tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]); else tcg_gen_movi_i64(cpu_fir[rc], 0); } break; case 0x2A: /* SQRTG */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; gen_fsqrtg(rb, rc); break; case 0x02B: /* SQRTT */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; gen_fsqrtt(rb, rc); break; default: goto invalid_opc; } break; case 0x15: /* VAX floating point */ /* XXX: rounding mode and trap are ignored (!) */ switch (fpfn) { /* f11 & 0x3F */ case 0x00: /* ADDF */ gen_faddf(ra, rb, rc); break; case 0x01: /* SUBF */ gen_fsubf(ra, rb, rc); break; case 0x02: /* MULF */ gen_fmulf(ra, rb, rc); break; case 0x03: /* DIVF */ gen_fdivf(ra, rb, rc); break; case 0x1E: /* CVTDG */ #if 0 // TODO gen_fcvtdg(rb, rc); #else goto invalid_opc; #endif break; case 0x20: /* ADDG */ gen_faddg(ra, rb, rc); break; case 0x21: /* SUBG */ gen_fsubg(ra, rb, rc); break; case 0x22: /* MULG */ gen_fmulg(ra, rb, rc); break; case 0x23: /* DIVG */ gen_fdivg(ra, rb, rc); break; case 0x25: /* CMPGEQ */ gen_fcmpgeq(ra, rb, rc); break; case 0x26: /* CMPGLT */ gen_fcmpglt(ra, rb, rc); break; case 0x27: /* CMPGLE */ gen_fcmpgle(ra, rb, rc); break; case 0x2C: /* CVTGF */ gen_fcvtgf(rb, rc); break; case 0x2D: /* CVTGD */ #if 0 // TODO gen_fcvtgd(rb, rc); #else goto invalid_opc; #endif break; case 0x2F: /* CVTGQ */ gen_fcvtgq(rb, rc); break; case 0x3C: /* CVTQF */ gen_fcvtqf(rb, rc); break; case 0x3E: /* CVTQG */ gen_fcvtqg(rb, rc); break; default: goto invalid_opc; } break; case 0x16: /* IEEE floating-point */ /* XXX: rounding mode and traps are ignored (!) */ switch (fpfn) { /* f11 & 0x3F */ case 0x00: /* ADDS */ gen_fadds(ra, rb, rc); break; case 0x01: /* SUBS */ gen_fsubs(ra, rb, rc); break; case 0x02: /* MULS */ gen_fmuls(ra, rb, rc); break; case 0x03: /* DIVS */ gen_fdivs(ra, rb, rc); break; case 0x20: /* ADDT */ gen_faddt(ra, rb, rc); break; case 0x21: /* SUBT */ gen_fsubt(ra, rb, rc); break; case 0x22: /* MULT */ gen_fmult(ra, rb, rc); break; case 0x23: /* DIVT */ gen_fdivt(ra, rb, rc); break; case 0x24: /* CMPTUN */ gen_fcmptun(ra, rb, rc); break; case 0x25: /* CMPTEQ */ gen_fcmpteq(ra, rb, rc); break; case 0x26: /* CMPTLT */ gen_fcmptlt(ra, rb, rc); break; case 0x27: /* CMPTLE */ gen_fcmptle(ra, rb, rc); break; case 0x2C: /* XXX: incorrect */ if (fn11 == 0x2AC || fn11 == 0x6AC) { /* CVTST */ gen_fcvtst(rb, rc); } else { /* CVTTS */ gen_fcvtts(rb, rc); } break; case 0x2F: /* CVTTQ */ gen_fcvttq(rb, rc); break; case 0x3C: /* CVTQS */ gen_fcvtqs(rb, rc); break; case 0x3E: /* CVTQT */ gen_fcvtqt(rb, rc); break; default: goto invalid_opc; } break; case 0x17: switch (fn11) { case 0x010: /* CVTLQ */ gen_fcvtlq(rb, rc); break; case 0x020: if (likely(rc != 31)) { if (ra == rb) /* FMOV */ tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); else /* CPYS */ gen_fcpys(ra, rb, rc); } break; case 0x021: /* CPYSN */ gen_fcpysn(ra, rb, rc); break; case 0x022: /* CPYSE */ gen_fcpyse(ra, rb, rc); break; case 0x024: /* MT_FPCR */ if (likely(ra != 31)) gen_helper_store_fpcr(cpu_fir[ra]); else { TCGv tmp = tcg_const_i64(0); gen_helper_store_fpcr(tmp); tcg_temp_free(tmp); } break; case 0x025: /* MF_FPCR */ if (likely(ra != 31)) gen_helper_load_fpcr(cpu_fir[ra]); break; case 0x02A: /* FCMOVEQ */ gen_fcmpfeq(ra, rb, rc); break; case 0x02B: /* FCMOVNE */ gen_fcmpfne(ra, rb, rc); break; case 0x02C: /* FCMOVLT */ gen_fcmpflt(ra, rb, rc); break; case 0x02D: /* FCMOVGE */ gen_fcmpfge(ra, rb, rc); break; case 0x02E: /* FCMOVLE */ gen_fcmpfle(ra, rb, rc); break; case 0x02F: /* FCMOVGT */ gen_fcmpfgt(ra, rb, rc); break; case 0x030: /* CVTQL */ gen_fcvtql(rb, rc); break; case 0x130: /* CVTQL/V */ gen_fcvtqlv(rb, rc); break; case 0x530: /* CVTQL/SV */ gen_fcvtqlsv(rb, rc); break; default: goto invalid_opc; } break; case 0x18: switch ((uint16_t)disp16) { case 0x0000: /* TRAPB */ /* No-op. Just exit from the current tb */ ret = 2; break; case 0x0400: /* EXCB */ /* No-op. Just exit from the current tb */ ret = 2; break; case 0x4000: /* MB */ /* No-op */ break; case 0x4400: /* WMB */ /* No-op */ break; case 0x8000: /* FETCH */ /* No-op */ break; case 0xA000: /* FETCH_M */ /* No-op */ break; case 0xC000: /* RPCC */ if (ra != 31) gen_helper_load_pcc(cpu_ir[ra]); break; case 0xE000: /* RC */ if (ra != 31) gen_helper_rc(cpu_ir[ra]); break; case 0xE800: /* ECB */ /* XXX: TODO: evict tb cache at address rb */ #if 0 ret = 2; #else goto invalid_opc; #endif break; case 0xF000: /* RS */ if (ra != 31) gen_helper_rs(cpu_ir[ra]); break; case 0xF800: /* WH64 */ /* No-op */ break; default: goto invalid_opc; } break; case 0x19: /* HW_MFPR (PALcode) */ #if defined (CONFIG_USER_ONLY) goto invalid_opc; #else if (!ctx->pal_mode) goto invalid_opc; if (ra != 31) { TCGv tmp = tcg_const_i32(insn & 0xFF); gen_helper_mfpr(cpu_ir[ra], tmp, cpu_ir[ra]); tcg_temp_free(tmp); } break; #endif case 0x1A: if (rb != 31) tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3); else tcg_gen_movi_i64(cpu_pc, 0); if (ra != 31) tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); /* Those four jumps only differ by the branch prediction hint */ switch (fn2) { case 0x0: /* JMP */ break; case 0x1: /* JSR */ break; case 0x2: /* RET */ break; case 0x3: /* JSR_COROUTINE */ break; } ret = 1; break; case 0x1B: /* HW_LD (PALcode) */ #if defined (CONFIG_USER_ONLY) goto invalid_opc; #else if (!ctx->pal_mode) goto invalid_opc; if (ra != 31) { TCGv addr = tcg_temp_new(); if (rb != 31) tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); else tcg_gen_movi_i64(addr, disp12); switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access */ gen_helper_ldl_raw(cpu_ir[ra], addr); break; case 0x1: /* Quadword physical access */ gen_helper_ldq_raw(cpu_ir[ra], addr); break; case 0x2: /* Longword physical access with lock */ gen_helper_ldl_l_raw(cpu_ir[ra], addr); break; case 0x3: /* Quadword physical access with lock */ gen_helper_ldq_l_raw(cpu_ir[ra], addr); break; case 0x4: /* Longword virtual PTE fetch */ gen_helper_ldl_kernel(cpu_ir[ra], addr); break; case 0x5: /* Quadword virtual PTE fetch */ gen_helper_ldq_kernel(cpu_ir[ra], addr); break; case 0x6: /* Incpu_ir[ra]id */ goto incpu_ir[ra]id_opc; case 0x7: /* Incpu_ir[ra]id */ goto incpu_ir[ra]id_opc; case 0x8: /* Longword virtual access */ gen_helper_st_virt_to_phys(addr, addr); gen_helper_ldl_raw(cpu_ir[ra], addr); break; case 0x9: /* Quadword virtual access */ gen_helper_st_virt_to_phys(addr, addr); gen_helper_ldq_raw(cpu_ir[ra], addr); break; case 0xA: /* Longword virtual access with protection check */ tcg_gen_qemu_ld32s(cpu_ir[ra], addr, ctx->flags); break; case 0xB: /* Quadword virtual access with protection check */ tcg_gen_qemu_ld64(cpu_ir[ra], addr, ctx->flags); break; case 0xC: /* Longword virtual access with altenate access mode */ gen_helper_set_alt_mode(); gen_helper_st_virt_to_phys(addr, addr); gen_helper_ldl_raw(cpu_ir[ra], addr); gen_helper_restore_mode(); break; case 0xD: /* Quadword virtual access with altenate access mode */ gen_helper_set_alt_mode(); gen_helper_st_virt_to_phys(addr, addr); gen_helper_ldq_raw(cpu_ir[ra], addr); gen_helper_restore_mode(); break; case 0xE: /* Longword virtual access with alternate access mode and * protection checks */ gen_helper_set_alt_mode(); gen_helper_ldl_data(cpu_ir[ra], addr); gen_helper_restore_mode(); break; case 0xF: /* Quadword virtual access with alternate access mode and * protection checks */ gen_helper_set_alt_mode(); gen_helper_ldq_data(cpu_ir[ra], addr); gen_helper_restore_mode(); break; } tcg_temp_free(addr); } break; #endif case 0x1C: switch (fn7) { case 0x00: /* SEXTB */ if (!(ctx->amask & AMASK_BWX)) goto invalid_opc; if (likely(rc != 31)) { if (islit) tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit)); else tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]); } break; case 0x01: /* SEXTW */ if (!(ctx->amask & AMASK_BWX)) goto invalid_opc; if (likely(rc != 31)) { if (islit) tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit)); else tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]); } break; case 0x30: /* CTPOP */ if (!(ctx->amask & AMASK_CIX)) goto invalid_opc; if (likely(rc != 31)) { if (islit) tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit)); else gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]); } break; case 0x31: /* PERR */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x32: /* CTLZ */ if (!(ctx->amask & AMASK_CIX)) goto invalid_opc; if (likely(rc != 31)) { if (islit) tcg_gen_movi_i64(cpu_ir[rc], clz64(lit)); else gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]); } break; case 0x33: /* CTTZ */ if (!(ctx->amask & AMASK_CIX)) goto invalid_opc; if (likely(rc != 31)) { if (islit) tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit)); else gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]); } break; case 0x34: /* UNPKBW */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x35: /* UNPKWL */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x36: /* PKWB */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x37: /* PKLB */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x38: /* MINSB8 */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x39: /* MINSW4 */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x3A: /* MINUB8 */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x3B: /* MINUW4 */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x3C: /* MAXUB8 */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x3D: /* MAXUW4 */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x3E: /* MAXSB8 */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x3F: /* MAXSW4 */ if (!(ctx->amask & AMASK_MVI)) goto invalid_opc; /* XXX: TODO */ goto invalid_opc; break; case 0x70: /* FTOIT */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; if (likely(rc != 31)) { if (ra != 31) tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]); else tcg_gen_movi_i64(cpu_ir[rc], 0); } break; case 0x78: /* FTOIS */ if (!(ctx->amask & AMASK_FIX)) goto invalid_opc; if (rc != 31) { TCGv_i32 tmp1 = tcg_temp_new_i32(); if (ra != 31) gen_helper_s_to_memory(tmp1, cpu_fir[ra]); else { TCGv tmp2 = tcg_const_i64(0); gen_helper_s_to_memory(tmp1, tmp2); tcg_temp_free(tmp2); } tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1); tcg_temp_free_i32(tmp1); } break; default: goto invalid_opc; } break; case 0x1D: /* HW_MTPR (PALcode) */ #if defined (CONFIG_USER_ONLY) goto invalid_opc; #else if (!ctx->pal_mode) goto invalid_opc; else { TCGv tmp1 = tcg_const_i32(insn & 0xFF); if (ra != 31) gen_helper_mtpr(tmp1, cpu_ir[ra]); else { TCGv tmp2 = tcg_const_i64(0); gen_helper_mtpr(tmp1, tmp2); tcg_temp_free(tmp2); } tcg_temp_free(tmp1); ret = 2; } break; #endif case 0x1E: /* HW_REI (PALcode) */ #if defined (CONFIG_USER_ONLY) goto invalid_opc; #else if (!ctx->pal_mode) goto invalid_opc; if (rb == 31) { /* "Old" alpha */ gen_helper_hw_rei(); } else { TCGv tmp; if (ra != 31) { tmp = tcg_temp_new(); tcg_gen_addi_i64(tmp, cpu_ir[rb], (((int64_t)insn << 51) >> 51)); } else tmp = tcg_const_i64(((int64_t)insn << 51) >> 51); gen_helper_hw_ret(tmp); tcg_temp_free(tmp); } ret = 2; break; #endif case 0x1F: /* HW_ST (PALcode) */ #if defined (CONFIG_USER_ONLY) goto invalid_opc; #else if (!ctx->pal_mode) goto invalid_opc; else { TCGv addr, val; addr = tcg_temp_new(); if (rb != 31) tcg_gen_addi_i64(addr, cpu_ir[rb], disp12); else tcg_gen_movi_i64(addr, disp12); if (ra != 31) val = cpu_ir[ra]; else { val = tcg_temp_new(); tcg_gen_movi_i64(val, 0); } switch ((insn >> 12) & 0xF) { case 0x0: /* Longword physical access */ gen_helper_stl_raw(val, addr); break; case 0x1: /* Quadword physical access */ gen_helper_stq_raw(val, addr); break; case 0x2: /* Longword physical access with lock */ gen_helper_stl_c_raw(val, val, addr); break; case 0x3: /* Quadword physical access with lock */ gen_helper_stq_c_raw(val, val, addr); break; case 0x4: /* Longword virtual access */ gen_helper_st_virt_to_phys(addr, addr); gen_helper_stl_raw(val, addr); break; case 0x5: /* Quadword virtual access */ gen_helper_st_virt_to_phys(addr, addr); gen_helper_stq_raw(val, addr); break; case 0x6: /* Invalid */ goto invalid_opc; case 0x7: /* Invalid */ goto invalid_opc; case 0x8: /* Invalid */ goto invalid_opc; case 0x9: /* Invalid */ goto invalid_opc; case 0xA: /* Invalid */ goto invalid_opc; case 0xB: /* Invalid */ goto invalid_opc; case 0xC: /* Longword virtual access with alternate access mode */ gen_helper_set_alt_mode(); gen_helper_st_virt_to_phys(addr, addr); gen_helper_stl_raw(val, addr); gen_helper_restore_mode(); break; case 0xD: /* Quadword virtual access with alternate access mode */ gen_helper_set_alt_mode(); gen_helper_st_virt_to_phys(addr, addr); gen_helper_stl_raw(val, addr); gen_helper_restore_mode(); break; case 0xE: /* Invalid */ goto invalid_opc; case 0xF: /* Invalid */ goto invalid_opc; } if (ra == 31) tcg_temp_free(val); tcg_temp_free(addr); } break; #endif case 0x20: /* LDF */ gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); break; case 0x21: /* LDG */ gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0); break; case 0x22: /* LDS */ gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0); break; case 0x23: /* LDT */ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); break; case 0x24: /* STF */ gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0, 0); break; case 0x25: /* STG */ gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0, 0); break; case 0x26: /* STS */ gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0, 0); break; case 0x27: /* STT */ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0, 0); break; case 0x28: /* LDL */ gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); break; case 0x29: /* LDQ */ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); break; case 0x2A: /* LDL_L */ gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0); break; case 0x2B: /* LDQ_L */ gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0); break; case 0x2C: /* STL */ gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0, 0); break; case 0x2D: /* STQ */ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0, 0); break; case 0x2E: /* STL_C */ gen_store_mem(ctx, &gen_qemu_stl_c, ra, rb, disp16, 0, 0, 1); break; case 0x2F: /* STQ_C */ gen_store_mem(ctx, &gen_qemu_stq_c, ra, rb, disp16, 0, 0, 1); break; case 0x30: /* BR */ if (ra != 31) tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2)); ret = 1; break; case 0x31: /* FBEQ */ case 0x32: /* FBLT */ case 0x33: /* FBLE */ gen_fbcond(ctx, opc, ra, disp16); ret = 1; break; case 0x34: /* BSR */ if (ra != 31) tcg_gen_movi_i64(cpu_ir[ra], ctx->pc); tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp21 << 2)); ret = 1; break; case 0x35: /* FBNE */ case 0x36: /* FBGE */ case 0x37: /* FBGT */ gen_fbcond(ctx, opc, ra, disp16); ret = 1; break; case 0x38: /* BLBC */ gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); ret = 1; break; case 0x39: /* BEQ */ gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0); ret = 1; break; case 0x3A: /* BLT */ gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0); ret = 1; break; case 0x3B: /* BLE */ gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0); ret = 1; break; case 0x3C: /* BLBS */ gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); ret = 1; break; case 0x3D: /* BNE */ gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0); ret = 1; break; case 0x3E: /* BGE */ gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0); ret = 1; break; case 0x3F: /* BGT */ gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0); ret = 1; break; invalid_opc: gen_invalid(ctx); ret = 3; break; } return ret; } | 14,534 |
0 | void helper_restore_mode (void) { env->ps = (env->ps & ~0xC) | env->saved_mode; } | 14,536 |
0 | void pc_init_pci64_hole(PcPciInfo *pci_info, uint64_t pci_hole64_start, uint64_t pci_hole64_size) { if ((sizeof(hwaddr) == 4) || (!pci_hole64_size)) { return; } /* * BIOS does not set MTRR entries for the 64 bit window, so no need to * align address to power of two. Align address at 1G, this makes sure * it can be exactly covered with a PAT entry even when using huge * pages. */ pci_info->w64.begin = ROUND_UP(pci_hole64_start, 0x1ULL << 30); pci_info->w64.end = pci_info->w64.begin + pci_hole64_size; assert(pci_info->w64.begin <= pci_info->w64.end); } | 14,537 |
0 | static ssize_t mp_user_getxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size) { char buffer[PATH_MAX]; if (strncmp(name, "user.virtfs.", 12) == 0) { /* * Don't allow fetch of user.virtfs namesapce * in case of mapped security */ errno = ENOATTR; return -1; } return lgetxattr(rpath(ctx, path, buffer), name, value, size); } | 14,538 |
0 | void arm_load_kernel(CPUState *env, struct arm_boot_info *info) { int kernel_size; int initrd_size; int n; int is_linux = 0; uint64_t elf_entry; target_phys_addr_t entry; int big_endian; /* Load the kernel. */ if (!info->kernel_filename) { fprintf(stderr, "Kernel image must be specified\n"); exit(1); } if (!info->secondary_cpu_reset_hook) { info->secondary_cpu_reset_hook = default_reset_secondary; } if (!info->write_secondary_boot) { info->write_secondary_boot = default_write_secondary; } if (info->nb_cpus == 0) info->nb_cpus = 1; #ifdef TARGET_WORDS_BIGENDIAN big_endian = 1; #else big_endian = 0; #endif /* Assume that raw images are linux kernels, and ELF images are not. */ kernel_size = load_elf(info->kernel_filename, NULL, NULL, &elf_entry, NULL, NULL, big_endian, ELF_MACHINE, 1); entry = elf_entry; if (kernel_size < 0) { kernel_size = load_uimage(info->kernel_filename, &entry, NULL, &is_linux); } if (kernel_size < 0) { entry = info->loader_start + KERNEL_LOAD_ADDR; kernel_size = load_image_targphys(info->kernel_filename, entry, ram_size - KERNEL_LOAD_ADDR); is_linux = 1; } if (kernel_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", info->kernel_filename); exit(1); } info->entry = entry; if (is_linux) { if (info->initrd_filename) { initrd_size = load_image_targphys(info->initrd_filename, info->loader_start + INITRD_LOAD_ADDR, ram_size - INITRD_LOAD_ADDR); if (initrd_size < 0) { fprintf(stderr, "qemu: could not load initrd '%s'\n", info->initrd_filename); exit(1); } } else { initrd_size = 0; } bootloader[1] |= info->board_id & 0xff; bootloader[2] |= (info->board_id >> 8) & 0xff; bootloader[5] = info->loader_start + KERNEL_ARGS_ADDR; bootloader[6] = entry; for (n = 0; n < sizeof(bootloader) / 4; n++) { bootloader[n] = tswap32(bootloader[n]); } rom_add_blob_fixed("bootloader", bootloader, sizeof(bootloader), info->loader_start); if (info->nb_cpus > 1) { info->write_secondary_boot(env, info); } info->initrd_size = initrd_size; } info->is_linux = is_linux; for (; env; env = env->next_cpu) { env->boot_info = info; qemu_register_reset(do_cpu_reset, env); } } | 14,539 |
0 | static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, uint16_t domain_id, hwaddr addr, uint8_t am) { IntelIOMMUNotifierNode *node; VTDContextEntry ce; int ret; QLIST_FOREACH(node, &(s->notifiers_list), next) { VTDAddressSpace *vtd_as = node->vtd_as; ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), vtd_as->devfn, &ce); if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE, vtd_page_invalidate_notify_hook, (void *)&vtd_as->iommu, true); } } } | 14,540 |
0 | int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq) { int rc; uint8_t byte; if (!dev->crq.qsize) { fprintf(stderr, "spapr_vio_send_creq on uninitialized queue\n"); return -1; } /* Maybe do a fast path for KVM just writing to the pages */ rc = spapr_vio_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1); if (rc) { return rc; } if (byte != 0) { return 1; } rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8, &crq[8], 8); if (rc) { return rc; } kvmppc_eieio(); rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8); if (rc) { return rc; } dev->crq.qnext = (dev->crq.qnext + 16) % dev->crq.qsize; if (dev->signal_state & 1) { qemu_irq_pulse(dev->qirq); } return 0; } | 14,541 |
0 | static void qnull_visit_test(void) { QObject *obj; QmpOutputVisitor *qov; Visitor *v; /* * Most tests of interactions between QObject and visitors are in * test-qmp-*-visitor; but these tests live here because they * depend on layering violations to check qnull_ refcnt. */ g_assert(qnull_.refcnt == 1); obj = qnull(); v = qmp_input_visitor_new(obj, true); qobject_decref(obj); visit_type_null(v, NULL, &error_abort); visit_free(v); qov = qmp_output_visitor_new(); visit_type_null(qmp_output_get_visitor(qov), NULL, &error_abort); obj = qmp_output_get_qobject(qov); g_assert(obj == &qnull_); qobject_decref(obj); qmp_output_visitor_cleanup(qov); g_assert(qnull_.refcnt == 1); } | 14,542 |
0 | vcard_emul_find_vreader_from_slot(PK11SlotInfo *slot) { VReaderList *reader_list = vreader_get_reader_list(); VReaderListEntry *current_entry = NULL; if (reader_list == NULL) { return NULL; } for (current_entry = vreader_list_get_first(reader_list); current_entry; current_entry = vreader_list_get_next(current_entry)) { VReader *reader = vreader_list_get_reader(current_entry); VReaderEmul *reader_emul = vreader_get_private(reader); if (reader_emul->slot == slot) { vreader_list_delete(reader_list); return reader; } vreader_free(reader); } vreader_list_delete(reader_list); return NULL; } | 14,544 |
0 | void qemu_del_polling_cb(PollingFunc *func, void *opaque) { PollingEntry **ppe, *pe; for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) { pe = *ppe; if (pe->func == func && pe->opaque == opaque) { *ppe = pe->next; g_free(pe); break; } } } | 14,545 |
0 | static void test_vector_dmul_scalar(const double *src0, const double *src1) { LOCAL_ALIGNED_32(double, cdst, [LEN]); LOCAL_ALIGNED_32(double, odst, [LEN]); int i; declare_func(void, double *dst, const double *src, double mul, int len); call_ref(cdst, src0, src1[0], LEN); call_new(odst, src0, src1[0], LEN); for (i = 0; i < LEN; i++) { if (!double_near_abs_eps(cdst[i], odst[i], DBL_EPSILON)) { fprintf(stderr, "%d: %- .12f - %- .12f = % .12g\n", i, cdst[i], odst[i], cdst[i] - odst[i]); fail(); break; } } bench_new(odst, src0, src1[0], LEN); } | 14,546 |
0 | static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pkt, int packet_idx) { char val_str[128]; AVStream *st = fmt_ctx->streams[pkt->stream_index]; struct print_buf pbuf = {.s = NULL}; print_section_header("packet"); print_str("codec_type", av_x_if_null(av_get_media_type_string(st->codec->codec_type), "unknown")); print_int("stream_index", pkt->stream_index); print_ts ("pts", pkt->pts); print_time("pts_time", pkt->pts, &st->time_base); print_ts ("dts", pkt->dts); print_time("dts_time", pkt->dts, &st->time_base); print_ts ("duration", pkt->duration); print_time("duration_time", pkt->duration, &st->time_base); print_val("size", pkt->size, unit_byte_str); print_fmt("pos", "%"PRId64, pkt->pos); print_fmt("flags", "%c", pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_'); print_section_footer("packet"); av_free(pbuf.s); fflush(stdout); } | 14,547 |
0 | static int event_thread(void *arg) { AVFormatContext *s = arg; SDLContext *sdl = s->priv_data; int flags = SDL_BASE_FLAGS | (sdl->window_fullscreen ? SDL_FULLSCREEN : 0); AVStream *st = s->streams[0]; AVCodecContext *encctx = st->codec; /* initialization */ if (SDL_Init(SDL_INIT_VIDEO) != 0) { av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError()); sdl->init_ret = AVERROR(EINVAL); goto init_end; } SDL_WM_SetCaption(sdl->window_title, sdl->icon_title); sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height, 24, flags); if (!sdl->surface) { av_log(sdl, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError()); sdl->init_ret = AVERROR(EINVAL); goto init_end; } sdl->overlay = SDL_CreateYUVOverlay(encctx->width, encctx->height, sdl->overlay_fmt, sdl->surface); if (!sdl->overlay || sdl->overlay->pitches[0] < encctx->width) { av_log(s, AV_LOG_ERROR, "SDL does not support an overlay with size of %dx%d pixels\n", encctx->width, encctx->height); sdl->init_ret = AVERROR(EINVAL); goto init_end; } sdl->init_ret = 0; av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d\n", encctx->width, encctx->height, av_get_pix_fmt_name(encctx->pix_fmt), sdl->overlay_rect.w, sdl->overlay_rect.h); init_end: SDL_LockMutex(sdl->mutex); sdl->inited = 1; SDL_UnlockMutex(sdl->mutex); SDL_CondSignal(sdl->init_cond); if (sdl->init_ret < 0) return sdl->init_ret; /* event loop */ while (!sdl->quit) { int ret; SDL_Event event; SDL_PumpEvents(); ret = SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_ALLEVENTS); if (ret < 0) av_log(s, AV_LOG_ERROR, "Error when getting SDL event: %s\n", SDL_GetError()); if (ret <= 0) continue; switch (event.type) { case SDL_KEYDOWN: switch (event.key.keysym.sym) { case SDLK_ESCAPE: case SDLK_q: sdl->quit = 1; break; } break; case SDL_QUIT: sdl->quit = 1; break; case SDL_VIDEORESIZE: sdl->window_width = event.resize.w; sdl->window_height = event.resize.h; SDL_LockMutex(sdl->mutex); sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height, 24, SDL_BASE_FLAGS); if (!sdl->surface) { av_log(s, AV_LOG_ERROR, "Failed to set SDL video mode: %s\n", SDL_GetError()); sdl->quit = 1; } else { compute_overlay_rect(s); } SDL_UnlockMutex(sdl->mutex); break; default: break; } } return 0; } | 14,548 |
0 | static int write_elf_loads(DumpState *s) { hwaddr offset; MemoryMapping *memory_mapping; uint32_t phdr_index = 1; int ret; uint32_t max_index; if (s->have_section) { max_index = s->sh_info; } else { max_index = s->phdr_num; } QTAILQ_FOREACH(memory_mapping, &s->list.head, next) { offset = get_offset(memory_mapping->phys_addr, s); if (s->dump_info.d_class == ELFCLASS64) { ret = write_elf64_load(s, memory_mapping, phdr_index++, offset); } else { ret = write_elf32_load(s, memory_mapping, phdr_index++, offset); } if (ret < 0) { return -1; } if (phdr_index >= max_index) { break; } } return 0; } | 14,549 |
0 | static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) { int addr_regl, addr_reg1, addr_meml; int data_regl, data_regh, data_reg1, data_reg2; int mem_index, s_bits; #if defined(CONFIG_SOFTMMU) void *label1_ptr, *label2_ptr; int sp_args; #endif #if TARGET_LONG_BITS == 64 # if defined(CONFIG_SOFTMMU) uint8_t *label3_ptr; # endif int addr_regh, addr_reg2, addr_memh; #endif data_regl = *args++; if (opc == 3) data_regh = *args++; else data_regh = 0; addr_regl = *args++; #if TARGET_LONG_BITS == 64 addr_regh = *args++; #endif mem_index = *args; s_bits = opc & 3; if (opc == 3) { #if defined(TCG_TARGET_WORDS_BIGENDIAN) data_reg1 = data_regh; data_reg2 = data_regl; #else data_reg1 = data_regl; data_reg2 = data_regh; #endif } else { data_reg1 = data_regl; data_reg2 = 0; } #if TARGET_LONG_BITS == 64 # if defined(TCG_TARGET_WORDS_BIGENDIAN) addr_reg1 = addr_regh; addr_reg2 = addr_regl; addr_memh = 0; addr_meml = 4; # else addr_reg1 = addr_regl; addr_reg2 = addr_regh; addr_memh = 4; addr_meml = 0; # endif #else addr_reg1 = addr_regl; addr_meml = 0; #endif #if defined(CONFIG_SOFTMMU) tcg_out_opc_sa(s, OPC_SRL, TCG_REG_A0, addr_regl, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_A0, TCG_REG_A0, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_A0, TCG_REG_A0, TCG_AREG0); tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_meml); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T0, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); tcg_out_opc_reg(s, OPC_AND, TCG_REG_T0, TCG_REG_T0, addr_regl); # if TARGET_LONG_BITS == 64 label3_ptr = s->code_ptr; tcg_out_opc_br(s, OPC_BNE, TCG_REG_T0, TCG_REG_AT); tcg_out_nop(s); tcg_out_opc_imm(s, OPC_LW, TCG_REG_AT, TCG_REG_A0, offsetof(CPUState, tlb_table[mem_index][0].addr_read) + addr_memh); label1_ptr = s->code_ptr; tcg_out_opc_br(s, OPC_BEQ, addr_regh, TCG_REG_AT); tcg_out_nop(s); reloc_pc16(label3_ptr, (tcg_target_long) s->code_ptr); # else label1_ptr = s->code_ptr; tcg_out_opc_br(s, OPC_BEQ, TCG_REG_T0, TCG_REG_AT); tcg_out_nop(s); # endif /* slow path */ sp_args = TCG_REG_A0; tcg_out_mov(s, sp_args++, addr_reg1); # if TARGET_LONG_BITS == 64 tcg_out_mov(s, sp_args++, addr_reg2); # endif tcg_out_movi(s, TCG_TYPE_I32, sp_args++, mem_index); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T9, (tcg_target_long)qemu_ld_helpers[s_bits]); tcg_out_opc_reg(s, OPC_JALR, TCG_REG_RA, TCG_REG_T9, 0); tcg_out_nop(s); switch(opc) { case 0: tcg_out_opc_imm(s, OPC_ANDI, data_reg1, TCG_REG_V0, 0xff); break; case 0 | 4: tcg_out_opc_sa(s, OPC_SLL, TCG_REG_V0, TCG_REG_V0, 24); tcg_out_opc_sa(s, OPC_SRA, data_reg1, TCG_REG_V0, 24); break; case 1: tcg_out_opc_imm(s, OPC_ANDI, data_reg1, TCG_REG_V0, 0xffff); break; case 1 | 4: tcg_out_opc_sa(s, OPC_SLL, TCG_REG_V0, TCG_REG_V0, 16); tcg_out_opc_sa(s, OPC_SRA, data_reg1, TCG_REG_V0, 16); break; case 2: tcg_out_mov(s, data_reg1, TCG_REG_V0); break; case 3: tcg_out_mov(s, data_reg2, TCG_REG_V1); tcg_out_mov(s, data_reg1, TCG_REG_V0); break; default: tcg_abort(); } label2_ptr = s->code_ptr; tcg_out_opc_br(s, OPC_BEQ, TCG_REG_ZERO, TCG_REG_ZERO); tcg_out_nop(s); /* label1: fast path */ reloc_pc16(label1_ptr, (tcg_target_long) s->code_ptr); tcg_out_opc_imm(s, OPC_LW, TCG_REG_A0, TCG_REG_A0, offsetof(CPUState, tlb_table[mem_index][0].addend) + addr_meml); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_A0, addr_regl); #else if (GUEST_BASE == (int16_t)GUEST_BASE) { tcg_out_opc_imm(s, OPC_ADDIU, TCG_REG_V0, addr_reg1, GUEST_BASE); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_V0, GUEST_BASE); tcg_out_opc_reg(s, OPC_ADDU, TCG_REG_V0, TCG_REG_V0, addr_reg1); } #endif switch(opc) { case 0: tcg_out_opc_imm(s, OPC_LBU, data_reg1, TCG_REG_V0, 0); break; case 0 | 4: tcg_out_opc_imm(s, OPC_LB, data_reg1, TCG_REG_V0, 0); break; case 1: if (TCG_NEED_BSWAP) { tcg_out_opc_imm(s, OPC_LHU, TCG_REG_T0, TCG_REG_V0, 0); tcg_out_bswap16(s, data_reg1, TCG_REG_T0); } else { tcg_out_opc_imm(s, OPC_LHU, data_reg1, TCG_REG_V0, 0); } break; case 1 | 4: if (TCG_NEED_BSWAP) { tcg_out_opc_imm(s, OPC_LHU, TCG_REG_T0, TCG_REG_V0, 0); tcg_out_bswap16s(s, data_reg1, TCG_REG_T0); } else { tcg_out_opc_imm(s, OPC_LH, data_reg1, TCG_REG_V0, 0); } break; case 2: if (TCG_NEED_BSWAP) { tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, TCG_REG_V0, 0); tcg_out_bswap32(s, data_reg1, TCG_REG_T0); } else { tcg_out_opc_imm(s, OPC_LW, data_reg1, TCG_REG_V0, 0); } break; case 3: if (TCG_NEED_BSWAP) { tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, TCG_REG_V0, 4); tcg_out_bswap32(s, data_reg1, TCG_REG_T0); tcg_out_opc_imm(s, OPC_LW, TCG_REG_T0, TCG_REG_V0, 0); tcg_out_bswap32(s, data_reg2, TCG_REG_T0); } else { tcg_out_opc_imm(s, OPC_LW, data_reg1, TCG_REG_V0, 0); tcg_out_opc_imm(s, OPC_LW, data_reg2, TCG_REG_V0, 4); } break; default: tcg_abort(); } #if defined(CONFIG_SOFTMMU) reloc_pc16(label2_ptr, (tcg_target_long) s->code_ptr); #endif } | 14,550 |
0 | static int ram_save_setup(QEMUFile *f, void *opaque) { RAMBlock *block; bytes_transferred = 0; reset_ram_globals(); if (migrate_use_xbzrle()) { XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); if (!XBZRLE.cache) { DPRINTF("Error creating cache\n"); return -1; } XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE); XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE); acct_clear(); } /* Make sure all dirty bits are set */ QLIST_FOREACH(block, &ram_list.blocks, next) { migration_bitmap_set_dirty(block->mr, block->length); } memory_global_dirty_log_start(); memory_global_sync_dirty_bitmap(get_system_memory()); qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE); QLIST_FOREACH(block, &ram_list.blocks, next) { qemu_put_byte(f, strlen(block->idstr)); qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr)); qemu_put_be64(f, block->length); } qemu_put_be64(f, RAM_SAVE_FLAG_EOS); return 0; } | 14,551 |
0 | static int pci_pbm_map_irq(PCIDevice *pci_dev, int irq_num) { int bus_offset; if (pci_dev->devfn & 1) bus_offset = 16; else bus_offset = 0; return (bus_offset + (PCI_SLOT(pci_dev->devfn) << 2) + irq_num) & 0x1f; } | 14,552 |
0 | static void xics_reset(void *opaque) { struct icp_state *icp = (struct icp_state *)opaque; struct ics_state *ics = icp->ics; int i; for (i = 0; i < icp->nr_servers; i++) { icp->ss[i].xirr = 0; icp->ss[i].pending_priority = 0; icp->ss[i].mfrr = 0xff; /* Make all outputs are deasserted */ qemu_set_irq(icp->ss[i].output, 0); } for (i = 0; i < ics->nr_irqs; i++) { /* Reset everything *except* the type */ ics->irqs[i].server = 0; ics->irqs[i].status = 0; ics->irqs[i].priority = 0xff; ics->irqs[i].saved_priority = 0xff; } } | 14,553 |
0 | static uint64_t assigned_dev_ioport_rw(AssignedDevRegion *dev_region, target_phys_addr_t addr, int size, uint64_t *data) { uint64_t val = 0; int fd = dev_region->region->resource_fd; if (fd >= 0) { if (data) { DEBUG("pwrite data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx ", addr="TARGET_FMT_plx"\n", *data, size, addr, addr); if (pwrite(fd, data, size, addr) != size) { error_report("%s - pwrite failed %s", __func__, strerror(errno)); } } else { if (pread(fd, &val, size, addr) != size) { error_report("%s - pread failed %s", __func__, strerror(errno)); val = (1UL << (size * 8)) - 1; } DEBUG("pread val=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx ", addr=" TARGET_FMT_plx "\n", val, size, addr, addr); } } else { uint32_t port = addr + dev_region->u.r_baseport; if (data) { DEBUG("out data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx ", host=%x\n", *data, size, addr, port); switch (size) { case 1: outb(*data, port); break; case 2: outw(*data, port); break; case 4: outl(*data, port); break; } } else { switch (size) { case 1: val = inb(port); break; case 2: val = inw(port); break; case 4: val = inl(port); break; } DEBUG("in data=%" PRIx64 ", size=%d, e_phys=" TARGET_FMT_plx ", host=%x\n", val, size, addr, port); } } return val; } | 14,554 |
0 | int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { NbdClientSession *client = nbd_get_client_session(bs); struct nbd_request request = { .type = NBD_CMD_WRITE, .from = offset, .len = bytes, }; struct nbd_reply reply; ssize_t ret; if (flags & BDRV_REQ_FUA) { assert(client->nbdflags & NBD_FLAG_SEND_FUA); request.type |= NBD_CMD_FLAG_FUA; } assert(bytes <= NBD_MAX_BUFFER_SIZE); nbd_coroutine_start(client, &request); ret = nbd_co_send_request(bs, &request, qiov); if (ret < 0) { reply.error = -ret; } else { nbd_co_receive_reply(client, &request, &reply, NULL); } nbd_coroutine_end(client, &request); return -reply.error; } | 14,555 |
0 | static void gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd) { TCGv_i32 r_asi, r_size, r_rd; r_asi = gen_get_asi(dc, insn); r_size = tcg_const_i32(size); r_rd = tcg_const_i32(rd); gen_helper_ldf_asi(cpu_env, addr, r_asi, r_size, r_rd); tcg_temp_free_i32(r_rd); tcg_temp_free_i32(r_size); tcg_temp_free_i32(r_asi); } | 14,558 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.