label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
0
void bdrv_flush_io_queue(BlockDriverState *bs) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_flush_io_queue) { drv->bdrv_flush_io_queue(bs); } else if (bs->file) { bdrv_flush_io_queue(bs->file); } }
15,438
0
static void rtas_event_log_queue(int log_type, void *data, bool exception) { sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); sPAPREventLogEntry *entry = g_new(sPAPREventLogEntry, 1); g_assert(data); entry->log_type = log_type; entry->exception = exception; entry->data = data; QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next); }
15,439
0
static int pci_set_default_subsystem_id(PCIDevice *pci_dev) { uint16_t *id; id = (void*)(&pci_dev->config[PCI_SUBSYSTEM_VENDOR_ID]); id[0] = cpu_to_le16(pci_default_sub_vendor_id); id[1] = cpu_to_le16(pci_default_sub_device_id); return 0; }
15,440
0
static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2) { return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000)); }
15,441
0
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int ignore_truncation) { AVBufferRef *sps_buf; int profile_idc, level_idc, constraint_set_flags = 0; unsigned int sps_id; int i, log2_max_frame_num_minus4; SPS *sps; sps_buf = av_buffer_allocz(sizeof(*sps)); if (!sps_buf) return AVERROR(ENOMEM); sps = (SPS*)sps_buf->data; sps->data_size = gb->buffer_end - gb->buffer; if (sps->data_size > sizeof(sps->data)) { av_log(avctx, AV_LOG_WARNING, "Truncating likely oversized SPS\n"); sps->data_size = sizeof(sps->data); } memcpy(sps->data, gb->buffer, sps->data_size); profile_idc = get_bits(gb, 8); constraint_set_flags |= get_bits1(gb) << 0; // constraint_set0_flag constraint_set_flags |= get_bits1(gb) << 1; // constraint_set1_flag constraint_set_flags |= get_bits1(gb) << 2; // constraint_set2_flag constraint_set_flags |= get_bits1(gb) << 3; // constraint_set3_flag constraint_set_flags |= get_bits1(gb) << 4; // constraint_set4_flag constraint_set_flags |= get_bits1(gb) << 5; // constraint_set5_flag skip_bits(gb, 2); // reserved_zero_2bits level_idc = get_bits(gb, 8); sps_id = get_ue_golomb_31(gb); if (sps_id >= MAX_SPS_COUNT) { av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id); goto fail; } sps->sps_id = sps_id; sps->time_offset_length = 24; sps->profile_idc = profile_idc; sps->constraint_set_flags = constraint_set_flags; sps->level_idc = level_idc; sps->full_range = -1; memset(sps->scaling_matrix4, 16, sizeof(sps->scaling_matrix4)); memset(sps->scaling_matrix8, 16, sizeof(sps->scaling_matrix8)); sps->scaling_matrix_present = 0; sps->colorspace = 2; //AVCOL_SPC_UNSPECIFIED if (sps->profile_idc == 100 || // High profile sps->profile_idc == 110 || // High10 profile sps->profile_idc == 122 || // High422 profile sps->profile_idc == 244 || // High444 Predictive profile sps->profile_idc == 44 || // Cavlc444 profile sps->profile_idc == 83 || // Scalable Constrained High profile (SVC) sps->profile_idc == 86 || // Scalable High Intra profile (SVC) sps->profile_idc == 118 || // Stereo High profile (MVC) sps->profile_idc == 128 || // Multiview High profile (MVC) sps->profile_idc == 138 || // Multiview Depth High profile (MVCD) sps->profile_idc == 144) { // old High444 profile sps->chroma_format_idc = get_ue_golomb_31(gb); if (sps->chroma_format_idc > 3U) { avpriv_request_sample(avctx, "chroma_format_idc %u", sps->chroma_format_idc); goto fail; } else if (sps->chroma_format_idc == 3) { sps->residual_color_transform_flag = get_bits1(gb); if (sps->residual_color_transform_flag) { av_log(avctx, AV_LOG_ERROR, "separate color planes are not supported\n"); goto fail; } } sps->bit_depth_luma = get_ue_golomb(gb) + 8; sps->bit_depth_chroma = get_ue_golomb(gb) + 8; if (sps->bit_depth_chroma != sps->bit_depth_luma) { avpriv_request_sample(avctx, "Different chroma and luma bit depth"); goto fail; } if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 || sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) { av_log(avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n", sps->bit_depth_luma, sps->bit_depth_chroma); goto fail; } sps->transform_bypass = get_bits1(gb); sps->scaling_matrix_present |= decode_scaling_matrices(gb, sps, NULL, 1, sps->scaling_matrix4, sps->scaling_matrix8); } else { sps->chroma_format_idc = 1; sps->bit_depth_luma = 8; sps->bit_depth_chroma = 8; } log2_max_frame_num_minus4 = get_ue_golomb(gb); if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 || log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) { av_log(avctx, AV_LOG_ERROR, "log2_max_frame_num_minus4 out of range (0-12): %d\n", log2_max_frame_num_minus4); goto fail; } sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4; sps->poc_type = get_ue_golomb_31(gb); if (sps->poc_type == 0) { // FIXME #define unsigned t = get_ue_golomb(gb); if (t>12) { av_log(avctx, AV_LOG_ERROR, "log2_max_poc_lsb (%d) is out of range\n", t); goto fail; } sps->log2_max_poc_lsb = t + 4; } else if (sps->poc_type == 1) { // FIXME #define sps->delta_pic_order_always_zero_flag = get_bits1(gb); sps->offset_for_non_ref_pic = get_se_golomb(gb); sps->offset_for_top_to_bottom_field = get_se_golomb(gb); sps->poc_cycle_length = get_ue_golomb(gb); if ((unsigned)sps->poc_cycle_length >= FF_ARRAY_ELEMS(sps->offset_for_ref_frame)) { av_log(avctx, AV_LOG_ERROR, "poc_cycle_length overflow %d\n", sps->poc_cycle_length); goto fail; } for (i = 0; i < sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i] = get_se_golomb(gb); } else if (sps->poc_type != 2) { av_log(avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type); goto fail; } sps->ref_frame_count = get_ue_golomb_31(gb); if (avctx->codec_tag == MKTAG('S', 'M', 'V', '2')) sps->ref_frame_count = FFMAX(2, sps->ref_frame_count); if (sps->ref_frame_count > H264_MAX_PICTURE_COUNT - 2 || sps->ref_frame_count > 16U) { av_log(avctx, AV_LOG_ERROR, "too many reference frames %d\n", sps->ref_frame_count); goto fail; } sps->gaps_in_frame_num_allowed_flag = get_bits1(gb); sps->mb_width = get_ue_golomb(gb) + 1; sps->mb_height = get_ue_golomb(gb) + 1; if ((unsigned)sps->mb_width >= INT_MAX / 16 || (unsigned)sps->mb_height >= INT_MAX / 16 || av_image_check_size(16 * sps->mb_width, 16 * sps->mb_height, 0, avctx)) { av_log(avctx, AV_LOG_ERROR, "mb_width/height overflow\n"); goto fail; } sps->frame_mbs_only_flag = get_bits1(gb); if (!sps->frame_mbs_only_flag) sps->mb_aff = get_bits1(gb); else sps->mb_aff = 0; sps->direct_8x8_inference_flag = get_bits1(gb); #ifndef ALLOW_INTERLACE if (sps->mb_aff) av_log(avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time.\n"); #endif sps->crop = get_bits1(gb); if (sps->crop) { unsigned int crop_left = get_ue_golomb(gb); unsigned int crop_right = get_ue_golomb(gb); unsigned int crop_top = get_ue_golomb(gb); unsigned int crop_bottom = get_ue_golomb(gb); int width = 16 * sps->mb_width; int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag); if (avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) { av_log(avctx, AV_LOG_DEBUG, "discarding sps cropping, original " "values are l:%d r:%d t:%d b:%d\n", crop_left, crop_right, crop_top, crop_bottom); sps->crop_left = sps->crop_right = sps->crop_top = sps->crop_bottom = 0; } else { int vsub = (sps->chroma_format_idc == 1) ? 1 : 0; int hsub = (sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2) ? 1 : 0; int step_x = 1 << hsub; int step_y = (2 - sps->frame_mbs_only_flag) << vsub; if (crop_left & (0x1F >> (sps->bit_depth_luma > 8)) && !(avctx->flags & AV_CODEC_FLAG_UNALIGNED)) { crop_left &= ~(0x1F >> (sps->bit_depth_luma > 8)); av_log(avctx, AV_LOG_WARNING, "Reducing left cropping to %d " "chroma samples to preserve alignment.\n", crop_left); } if (crop_left > (unsigned)INT_MAX / 4 / step_x || crop_right > (unsigned)INT_MAX / 4 / step_x || crop_top > (unsigned)INT_MAX / 4 / step_y || crop_bottom> (unsigned)INT_MAX / 4 / step_y || (crop_left + crop_right ) * step_x >= width || (crop_top + crop_bottom) * step_y >= height ) { av_log(avctx, AV_LOG_ERROR, "crop values invalid %d %d %d %d / %d %d\n", crop_left, crop_right, crop_top, crop_bottom, width, height); goto fail; } sps->crop_left = crop_left * step_x; sps->crop_right = crop_right * step_x; sps->crop_top = crop_top * step_y; sps->crop_bottom = crop_bottom * step_y; } } else { sps->crop_left = sps->crop_right = sps->crop_top = sps->crop_bottom = sps->crop = 0; } sps->vui_parameters_present_flag = get_bits1(gb); if (sps->vui_parameters_present_flag) { int ret = decode_vui_parameters(gb, avctx, sps); if (ret < 0) goto fail; } if (get_bits_left(gb) < 0) { av_log(avctx, ignore_truncation ? AV_LOG_WARNING : AV_LOG_ERROR, "Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(gb)); if (!ignore_truncation) goto fail; } /* if the maximum delay is not stored in the SPS, derive it based on the * level */ if (!sps->bitstream_restriction_flag) { sps->num_reorder_frames = MAX_DELAYED_PIC_COUNT - 1; for (i = 0; i < FF_ARRAY_ELEMS(level_max_dpb_mbs); i++) { if (level_max_dpb_mbs[i][0] == sps->level_idc) { sps->num_reorder_frames = FFMIN(level_max_dpb_mbs[i][1] / (sps->mb_width * sps->mb_height), sps->num_reorder_frames); break; } } } if (!sps->sar.den) sps->sar.den = 1; if (avctx->debug & FF_DEBUG_PICT_INFO) { static const char csp[4][5] = { "Gray", "420", "422", "444" }; av_log(avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %"PRId32"/%"PRId32" b%d reo:%d\n", sps_id, sps->profile_idc, sps->level_idc, sps->poc_type, sps->ref_frame_count, sps->mb_width, sps->mb_height, sps->frame_mbs_only_flag ? "FRM" : (sps->mb_aff ? "MB-AFF" : "PIC-AFF"), sps->direct_8x8_inference_flag ? "8B8" : "", sps->crop_left, sps->crop_right, sps->crop_top, sps->crop_bottom, sps->vui_parameters_present_flag ? "VUI" : "", csp[sps->chroma_format_idc], sps->timing_info_present_flag ? sps->num_units_in_tick : 0, sps->timing_info_present_flag ? sps->time_scale : 0, sps->bit_depth_luma, sps->bitstream_restriction_flag ? sps->num_reorder_frames : -1 ); } /* check if this is a repeat of an already parsed SPS, then keep the * original one. * otherwise drop all PPSes that depend on it */ if (ps->sps_list[sps_id] && !memcmp(ps->sps_list[sps_id]->data, sps_buf->data, sps_buf->size)) { av_buffer_unref(&sps_buf); } else { remove_sps(ps, sps_id); ps->sps_list[sps_id] = sps_buf; } return 0; fail: av_buffer_unref(&sps_buf); return AVERROR_INVALIDDATA; }
15,442
0
static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, hwaddr length) { if (cpu_physical_memory_range_includes_clean(addr, length)) { uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { tb_invalidate_phys_range(addr, addr + length); dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); } cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); } else { xen_modified_memory(addr, length); } }
15,443
0
void ich9_lpc_pm_init(PCIDevice *lpc_pci) { ICH9LPCState *lpc = ICH9_LPC_DEVICE(lpc_pci); ich9_pm_init(lpc_pci, &lpc->pm, qemu_allocate_irq(ich9_set_sci, lpc, 0)); ich9_lpc_reset(&lpc->d.qdev); }
15,444
0
static void term_show_prompt(void) { term_show_prompt2(); term_cmd_buf_index = 0; term_cmd_buf_size = 0; }
15,446
0
static int vfio_add_capabilities(VFIOPCIDevice *vdev) { PCIDevice *pdev = &vdev->pdev; int ret; if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) || !pdev->config[PCI_CAPABILITY_LIST]) { return 0; /* Nothing to add */ } ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST]); if (ret) { return ret; } /* on PCI bus, it doesn't make sense to expose extended capabilities. */ if (!pci_is_express(pdev) || !pci_bus_is_express(pdev->bus) || !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) { return 0; } return vfio_add_ext_cap(vdev); }
15,447
0
static void test_qemu_strtosz_units(void) { const char *none = "1"; const char *b = "1B"; const char *k = "1K"; const char *m = "1M"; const char *g = "1G"; const char *t = "1T"; const char *p = "1P"; const char *e = "1E"; char *endptr = NULL; int64_t res; /* default is M */ res = qemu_strtosz_MiB(none, &endptr); g_assert_cmpint(res, ==, M_BYTE); g_assert(endptr == none + 1); res = qemu_strtosz(b, &endptr); g_assert_cmpint(res, ==, 1); g_assert(endptr == b + 2); res = qemu_strtosz(k, &endptr); g_assert_cmpint(res, ==, K_BYTE); g_assert(endptr == k + 2); res = qemu_strtosz(m, &endptr); g_assert_cmpint(res, ==, M_BYTE); g_assert(endptr == m + 2); res = qemu_strtosz(g, &endptr); g_assert_cmpint(res, ==, G_BYTE); g_assert(endptr == g + 2); res = qemu_strtosz(t, &endptr); g_assert_cmpint(res, ==, T_BYTE); g_assert(endptr == t + 2); res = qemu_strtosz(p, &endptr); g_assert_cmpint(res, ==, P_BYTE); g_assert(endptr == p + 2); res = qemu_strtosz(e, &endptr); g_assert_cmpint(res, ==, E_BYTE); g_assert(endptr == e + 2); }
15,448
0
static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int sign, int compute_ov) { int l1 = gen_new_label(); int l2 = gen_new_label(); tcg_gen_brcondi_i64(TCG_COND_EQ, arg2, 0, l1); if (sign) { int l3 = gen_new_label(); tcg_gen_brcondi_i64(TCG_COND_NE, arg2, -1, l3); tcg_gen_brcondi_i64(TCG_COND_EQ, arg1, INT64_MIN, l1); gen_set_label(l3); tcg_gen_div_i64(ret, arg1, arg2); } else { tcg_gen_divu_i64(ret, arg1, arg2); } if (compute_ov) { tcg_gen_movi_tl(cpu_ov, 0); } tcg_gen_br(l2); gen_set_label(l1); if (sign) { tcg_gen_sari_i64(ret, arg1, 63); } else { tcg_gen_movi_i64(ret, 0); } if (compute_ov) { tcg_gen_movi_tl(cpu_ov, 1); tcg_gen_movi_tl(cpu_so, 1); } gen_set_label(l2); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, ret); }
15,450
0
static int os_host_main_loop_wait(int timeout) { int ret, i; PollingEntry *pe; WaitObjects *w = &wait_objects; static struct timeval tv0; /* XXX: need to suppress polling by better using win32 events */ ret = 0; for (pe = first_polling_entry; pe != NULL; pe = pe->next) { ret |= pe->func(pe->opaque); } if (ret != 0) { return ret; } if (nfds >= 0) { ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0); if (ret != 0) { timeout = 0; } } for (i = 0; i < w->num; i++) { poll_fds[i].fd = (DWORD) w->events[i]; poll_fds[i].events = G_IO_IN; } qemu_mutex_unlock_iothread(); ret = g_poll(poll_fds, w->num, timeout); qemu_mutex_lock_iothread(); if (ret > 0) { for (i = 0; i < w->num; i++) { w->revents[i] = poll_fds[i].revents; } for (i = 0; i < w->num; i++) { if (w->revents[i] && w->func[i]) { w->func[i](w->opaque[i]); } } } /* If an edge-triggered socket event occurred, select will return a * positive result on the next iteration. We do not need to do anything * here. */ return ret; }
15,451
0
static int asf_write_header(AVFormatContext *s) { ASFContext *asf = s->priv_data; asf->packet_size = PACKET_SIZE; asf->nb_packets = 0; asf->last_indexed_pts = 0; asf->index_ptr = (ASFIndex*)av_malloc( sizeof(ASFIndex) * ASF_INDEX_BLOCK ); asf->nb_index_memory_alloc = ASF_INDEX_BLOCK; asf->nb_index_count = 0; asf->maximum_packet = 0; /* the data-chunk-size has to be 50, which is data_size - asf->data_offset * at the moment this function is done. It is needed to use asf as * streamable format. */ if (asf_write_header1(s, 0, 50) < 0) { //av_free(asf); return -1; } put_flush_packet(s->pb); asf->packet_nb_payloads = 0; asf->packet_timestamp_start = -1; asf->packet_timestamp_end = -1; init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1, NULL, NULL, NULL, NULL); return 0; }
15,452
0
static int do_syscall(CPUState *env, struct kqemu_cpu_state *kenv) { int selector; selector = (env->star >> 32) & 0xffff; #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { int code64; env->regs[R_ECX] = kenv->next_eip; env->regs[11] = env->eflags; code64 = env->hflags & HF_CS64_MASK; cpu_x86_set_cpl(env, 0); cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 0, 0xffffffff, DESC_G_MASK | DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); env->eflags &= ~env->fmask; if (code64) env->eip = env->lstar; else env->eip = env->cstar; } else #endif { env->regs[R_ECX] = (uint32_t)kenv->next_eip; cpu_x86_set_cpl(env, 0); cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK); env->eip = (uint32_t)env->star; } return 2; }
15,453
0
gdb_handlesig(CPUState *cpu, int sig) { GDBState *s; char buf[256]; int n; s = gdbserver_state; if (gdbserver_fd < 0 || s->fd < 0) { return sig; } /* disable single step if it was enabled */ cpu_single_step(cpu, 0); tb_flush(cpu); if (sig != 0) { snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig)); put_packet(s, buf); } /* put_packet() might have detected that the peer terminated the connection. */ if (s->fd < 0) { return sig; } sig = 0; s->state = RS_IDLE; s->running_state = 0; while (s->running_state == 0) { n = read(s->fd, buf, 256); if (n > 0) { int i; for (i = 0; i < n; i++) { gdb_read_byte(s, buf[i]); } } else if (n == 0 || errno != EAGAIN) { /* XXX: Connection closed. Should probably wait for another connection before continuing. */ return sig; } } sig = s->signal; s->signal = 0; return sig; }
15,454
0
void xen_map_cache_init(void) { unsigned long size; struct rlimit rlimit_as; mapcache = g_malloc0(sizeof (MapCache)); QTAILQ_INIT(&mapcache->locked_entries); mapcache->last_address_index = -1; if (geteuid() == 0) { rlimit_as.rlim_cur = RLIM_INFINITY; rlimit_as.rlim_max = RLIM_INFINITY; mapcache->max_mcache_size = MCACHE_MAX_SIZE; } else { getrlimit(RLIMIT_AS, &rlimit_as); rlimit_as.rlim_cur = rlimit_as.rlim_max; if (rlimit_as.rlim_max != RLIM_INFINITY) { fprintf(stderr, "Warning: QEMU's maximum size of virtual" " memory is not infinity.\n"); } if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) { mapcache->max_mcache_size = rlimit_as.rlim_max - NON_MCACHE_MEMORY_SIZE; } else { mapcache->max_mcache_size = MCACHE_MAX_SIZE; } } setrlimit(RLIMIT_AS, &rlimit_as); mapcache->nr_buckets = (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) + (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); size = mapcache->nr_buckets * sizeof (MapCacheEntry); size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__, mapcache->nr_buckets, size); mapcache->entry = g_malloc0(size); }
15,455
0
static void nvdimm_build_nfit(AcpiNVDIMMState *state, GArray *table_offsets, GArray *table_data, BIOSLinker *linker) { NvdimmFitBuffer *fit_buf = &state->fit_buf; unsigned int header; /* NVDIMM device is not plugged? */ if (!fit_buf->fit->len) { return; } acpi_add_table(table_offsets, table_data); /* NFIT header. */ header = table_data->len; acpi_data_push(table_data, sizeof(NvdimmNfitHeader)); /* NVDIMM device structures. */ g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len); build_header(linker, table_data, (void *)(table_data->data + header), "NFIT", sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL); }
15,456
0
static inline uint32_t efsctui(uint32_t val) { CPU_FloatU u; u.l = val; /* NaN are not treated the same way IEEE 754 does */ if (unlikely(float32_is_nan(u.f))) return 0; return float32_to_uint32(u.f, &env->vec_status); }
15,459
0
BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_aio_ioctl) return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque); return NULL; }
15,460
0
static int lsi_scsi_init(PCIDevice *dev) { LSIState *s = DO_UPCAST(LSIState, dev, dev); uint8_t *pci_conf; pci_conf = s->dev.config; /* PCI Vendor ID (word) */ pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_LSI_LOGIC); /* PCI device ID (word) */ pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_LSI_53C895A); /* PCI base class code */ pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_SCSI); /* PCI subsystem ID */ pci_conf[PCI_SUBSYSTEM_ID] = 0x00; pci_conf[PCI_SUBSYSTEM_ID + 1] = 0x10; /* PCI latency timer = 255 */ pci_conf[PCI_LATENCY_TIMER] = 0xff; /* TODO: RST# value should be 0 */ /* Interrupt pin 1 */ pci_conf[PCI_INTERRUPT_PIN] = 0x01; s->mmio_io_addr = cpu_register_io_memory(lsi_mmio_readfn, lsi_mmio_writefn, s, DEVICE_NATIVE_ENDIAN); s->ram_io_addr = cpu_register_io_memory(lsi_ram_readfn, lsi_ram_writefn, s, DEVICE_NATIVE_ENDIAN); pci_register_bar(&s->dev, 0, 256, PCI_BASE_ADDRESS_SPACE_IO, lsi_io_mapfunc); pci_register_bar_simple(&s->dev, 1, 0x400, 0, s->mmio_io_addr); pci_register_bar(&s->dev, 2, 0x2000, PCI_BASE_ADDRESS_SPACE_MEMORY, lsi_ram_mapfunc); QTAILQ_INIT(&s->queue); scsi_bus_new(&s->bus, &dev->qdev, 1, LSI_MAX_DEVS, lsi_command_complete); if (!dev->qdev.hotplugged) { return scsi_bus_legacy_handle_cmdline(&s->bus); } return 0; }
15,461
0
void cpu_physical_memory_write_rom(hwaddr addr, const uint8_t *buf, int len) { hwaddr l; uint8_t *ptr; hwaddr addr1; MemoryRegion *mr; while (len > 0) { l = len; mr = address_space_translate(&address_space_memory, addr, &addr1, &l, true); if (!(memory_region_is_ram(mr) || memory_region_is_romd(mr))) { /* do nothing */ } else { addr1 += memory_region_get_ram_addr(mr); /* ROM/RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); invalidate_and_set_dirty(addr1, l); } len -= l; buf += l; addr += l; } }
15,462
0
static int decode_cabac_mb_type( H264Context *h ) { MpegEncContext * const s = &h->s; if( h->slice_type == I_TYPE ) { return decode_cabac_intra_mb_type(h, 3, 1); } else if( h->slice_type == P_TYPE ) { if( get_cabac( &h->cabac, &h->cabac_state[14] ) == 0 ) { /* P-type */ if( get_cabac( &h->cabac, &h->cabac_state[15] ) == 0 ) { /* P_L0_D16x16, P_8x8 */ return 3 * get_cabac( &h->cabac, &h->cabac_state[16] ); } else { /* P_L0_D8x16, P_L0_D16x8 */ return 2 - get_cabac( &h->cabac, &h->cabac_state[17] ); } } else { return decode_cabac_intra_mb_type(h, 17, 0) + 5; } } else if( h->slice_type == B_TYPE ) { const int mba_xy = h->left_mb_xy[0]; const int mbb_xy = h->top_mb_xy; int ctx = 0; int bits; if( h->slice_table[mba_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mba_xy] ) ) ctx++; if( h->slice_table[mbb_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mbb_xy] ) ) ctx++; if( !get_cabac( &h->cabac, &h->cabac_state[27+ctx] ) ) return 0; /* B_Direct_16x16 */ if( !get_cabac( &h->cabac, &h->cabac_state[27+3] ) ) { return 1 + get_cabac( &h->cabac, &h->cabac_state[27+5] ); /* B_L[01]_16x16 */ } bits = get_cabac( &h->cabac, &h->cabac_state[27+4] ) << 3; bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] ) << 2; bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] ) << 1; bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] ); if( bits < 8 ) return bits + 3; /* B_Bi_16x16 through B_L1_L0_16x8 */ else if( bits == 13 ) { return decode_cabac_intra_mb_type(h, 32, 0) + 23; } else if( bits == 14 ) return 11; /* B_L1_L0_8x16 */ else if( bits == 15 ) return 22; /* B_8x8 */ bits= ( bits<<1 ) | get_cabac( &h->cabac, &h->cabac_state[27+5] ); return bits - 4; /* B_L0_Bi_* through B_Bi_Bi_* */ } else { /* TODO SI/SP frames? */ return -1; } }
15,464
0
static int ehci_state_fetchentry(EHCIState *ehci, int async) { int again = 0; uint32_t entry = ehci_get_fetch_addr(ehci, async); if (entry < 0x1000) { DPRINTF("fetchentry: entry invalid (0x%08x)\n", entry); ehci_set_state(ehci, async, EST_ACTIVE); goto out; } /* section 4.8, only QH in async schedule */ if (async && (NLPTR_TYPE_GET(entry) != NLPTR_TYPE_QH)) { fprintf(stderr, "non queue head request in async schedule\n"); return -1; } switch (NLPTR_TYPE_GET(entry)) { case NLPTR_TYPE_QH: ehci_set_state(ehci, async, EST_FETCHQH); again = 1; break; case NLPTR_TYPE_ITD: ehci_set_state(ehci, async, EST_FETCHITD); again = 1; break; case NLPTR_TYPE_STITD: ehci_set_state(ehci, async, EST_FETCHSITD); again = 1; break; default: /* TODO: handle FSTN type */ fprintf(stderr, "FETCHENTRY: entry at %X is of type %d " "which is not supported yet\n", entry, NLPTR_TYPE_GET(entry)); return -1; } out: return again; }
15,465
0
static int ssi_slave_init(DeviceState *dev, DeviceInfo *base_info) { SSISlaveInfo *info = container_of(base_info, SSISlaveInfo, qdev); SSISlave *s = SSI_SLAVE_FROM_QDEV(dev); SSIBus *bus; bus = FROM_QBUS(SSIBus, qdev_get_parent_bus(dev)); if (LIST_FIRST(&bus->qbus.children) != dev || LIST_NEXT(dev, sibling) != NULL) { hw_error("Too many devices on SSI bus"); } s->info = info; return info->init(s); }
15,466
0
struct pxa2xx_state_s *pxa255_init(unsigned int sdram_size, DisplayState *ds) { struct pxa2xx_state_s *s; struct pxa2xx_ssp_s *ssp; int iomemtype, i; s = (struct pxa2xx_state_s *) qemu_mallocz(sizeof(struct pxa2xx_state_s)); s->env = cpu_init(); cpu_arm_set_model(s->env, "pxa255"); register_savevm("cpu", 0, 0, cpu_save, cpu_load, s->env); /* SDRAM & Internal Memory Storage */ cpu_register_physical_memory(PXA2XX_SDRAM_BASE, sdram_size, qemu_ram_alloc(sdram_size) | IO_MEM_RAM); cpu_register_physical_memory(PXA2XX_INTERNAL_BASE, PXA2XX_INTERNAL_SIZE, qemu_ram_alloc(PXA2XX_INTERNAL_SIZE) | IO_MEM_RAM); s->pic = pxa2xx_pic_init(0x40d00000, s->env); s->dma = pxa255_dma_init(0x40000000, s->pic[PXA2XX_PIC_DMA]); pxa25x_timer_init(0x40a00000, &s->pic[PXA2XX_PIC_OST_0]); s->gpio = pxa2xx_gpio_init(0x40e00000, s->env, s->pic, 85); s->mmc = pxa2xx_mmci_init(0x41100000, s->pic[PXA2XX_PIC_MMC], s->dma); for (i = 0; pxa255_serial[i].io_base; i ++) if (serial_hds[i]) serial_mm_init(pxa255_serial[i].io_base, 2, s->pic[pxa255_serial[i].irqn], serial_hds[i], 1); else break; if (serial_hds[i]) s->fir = pxa2xx_fir_init(0x40800000, s->pic[PXA2XX_PIC_ICP], s->dma, serial_hds[i]); if (ds) s->lcd = pxa2xx_lcdc_init(0x44000000, s->pic[PXA2XX_PIC_LCD], ds); s->cm_base = 0x41300000; s->cm_regs[CCCR >> 4] = 0x02000210; /* 416.0 MHz */ s->clkcfg = 0x00000009; /* Turbo mode active */ iomemtype = cpu_register_io_memory(0, pxa2xx_cm_readfn, pxa2xx_cm_writefn, s); cpu_register_physical_memory(s->cm_base, 0xfff, iomemtype); register_savevm("pxa2xx_cm", 0, 0, pxa2xx_cm_save, pxa2xx_cm_load, s); cpu_arm_set_cp_io(s->env, 14, pxa2xx_cp14_read, pxa2xx_cp14_write, s); s->mm_base = 0x48000000; s->mm_regs[MDMRS >> 2] = 0x00020002; s->mm_regs[MDREFR >> 2] = 0x03ca4000; s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */ iomemtype = cpu_register_io_memory(0, pxa2xx_mm_readfn, pxa2xx_mm_writefn, s); cpu_register_physical_memory(s->mm_base, 0xfff, iomemtype); register_savevm("pxa2xx_mm", 0, 0, pxa2xx_mm_save, pxa2xx_mm_load, s); for (i = 0; pxa255_ssp[i].io_base; i ++); s->ssp = (struct pxa2xx_ssp_s **) qemu_mallocz(sizeof(struct pxa2xx_ssp_s *) * i); ssp = (struct pxa2xx_ssp_s *) qemu_mallocz(sizeof(struct pxa2xx_ssp_s) * i); for (i = 0; pxa255_ssp[i].io_base; i ++) { s->ssp[i] = &ssp[i]; ssp[i].base = pxa255_ssp[i].io_base; ssp[i].irq = s->pic[pxa255_ssp[i].irqn]; iomemtype = cpu_register_io_memory(0, pxa2xx_ssp_readfn, pxa2xx_ssp_writefn, &ssp[i]); cpu_register_physical_memory(ssp[i].base, 0xfff, iomemtype); register_savevm("pxa2xx_ssp", i, 0, pxa2xx_ssp_save, pxa2xx_ssp_load, s); } if (usb_enabled) { usb_ohci_init_pxa(0x4c000000, 3, -1, s->pic[PXA2XX_PIC_USBH1]); } s->pcmcia[0] = pxa2xx_pcmcia_init(0x20000000); s->pcmcia[1] = pxa2xx_pcmcia_init(0x30000000); s->rtc_base = 0x40900000; iomemtype = cpu_register_io_memory(0, pxa2xx_rtc_readfn, pxa2xx_rtc_writefn, s); cpu_register_physical_memory(s->rtc_base, 0xfff, iomemtype); pxa2xx_rtc_init(s); register_savevm("pxa2xx_rtc", 0, 0, pxa2xx_rtc_save, pxa2xx_rtc_load, s); /* Note that PM registers are in the same page with PWRI2C registers. * As a workaround we don't map PWRI2C into memory and we expect * PM handlers to call PWRI2C handlers when appropriate. */ s->i2c[0] = pxa2xx_i2c_init(0x40301600, s->pic[PXA2XX_PIC_I2C], 1); s->i2c[1] = pxa2xx_i2c_init(0x40f00100, s->pic[PXA2XX_PIC_PWRI2C], 0); s->pm_base = 0x40f00000; iomemtype = cpu_register_io_memory(0, pxa2xx_pm_readfn, pxa2xx_pm_writefn, s); cpu_register_physical_memory(s->pm_base, 0xfff, iomemtype); register_savevm("pxa2xx_pm", 0, 0, pxa2xx_pm_save, pxa2xx_pm_load, s); s->i2s = pxa2xx_i2s_init(0x40400000, s->pic[PXA2XX_PIC_I2S], s->dma); /* GPIO1 resets the processor */ /* The handler can be overriden by board-specific code */ pxa2xx_gpio_handler_set(s->gpio, 1, pxa2xx_reset, s); return s; }
15,467
0
void stq_le_phys(target_phys_addr_t addr, uint64_t val) { val = cpu_to_le64(val); cpu_physical_memory_write(addr, &val, 8); }
15,468
0
static int arm946_prbs_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { if (ri->crm >= 8) { return EXCP_UDEF; } env->cp15.c6_region[ri->crm] = value; return 0; }
15,469
0
static int64_t get_clock(void) { #if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \ || defined(__DragonFly__) if (use_rt_clock) { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return ts.tv_sec * 1000000000LL + ts.tv_nsec; } else #endif { /* XXX: using gettimeofday leads to problems if the date changes, so it should be avoided. */ struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000); } }
15,470
0
static void setup_frame(int sig, struct emulated_sigaction * ka, target_sigset_t *set, CPUState *regs) { struct sigframe *frame; abi_ulong frame_addr; int i; frame_addr = get_sigframe(ka, regs, sizeof(*frame)); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto give_sigsegv; install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); if(setup_sigcontext(regs, &frame->sf_sc)) goto give_sigsegv; for(i = 0; i < TARGET_NSIG_WORDS; i++) { if(__put_user(set->sig[i], &frame->sf_mask.sig[i])) goto give_sigsegv; } /* * Arguments to signal handler: * * a0 = signal number * a1 = 0 (should be cause) * a2 = pointer to struct sigcontext * * $25 and PC point to the signal handler, $29 points to the * struct sigframe. */ regs->gpr[ 4][regs->current_tc] = sig; regs->gpr[ 5][regs->current_tc] = 0; regs->gpr[ 6][regs->current_tc] = h2g(&frame->sf_sc); regs->gpr[29][regs->current_tc] = h2g(frame); regs->gpr[31][regs->current_tc] = h2g(frame->sf_code); /* The original kernel code sets CP0_EPC to the handler * since it returns to userland using eret * we cannot do this here, and we must set PC directly */ regs->PC[regs->current_tc] = regs->gpr[25][regs->current_tc] = ka->sa._sa_handler; unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: unlock_user_struct(frame, frame_addr, 1); force_sig(TARGET_SIGSEGV/*, current*/); return; }
15,471
0
static void vmsvga_text_update(void *opaque, console_ch_t *chardata) { struct vmsvga_state_s *s = opaque; if (s->vga.text_update) s->vga.text_update(&s->vga, chardata); }
15,473
0
static int send_response(GAState *s, QObject *payload) { const char *buf; QString *payload_qstr; GIOStatus status; g_assert(payload && s->channel); payload_qstr = qobject_to_json(payload); if (!payload_qstr) { return -EINVAL; } qstring_append_chr(payload_qstr, '\n'); buf = qstring_get_str(payload_qstr); status = ga_channel_write_all(s->channel, buf, strlen(buf)); QDECREF(payload_qstr); if (status != G_IO_STATUS_NORMAL) { return -EIO; } return 0; }
15,474
0
float64 HELPER(ucf64_addd)(float64 a, float64 b, CPUUniCore32State *env) { return float64_add(a, b, &env->ucf64.fp_status); }
15,475
0
static void term_exit(void) { tcsetattr (0, TCSANOW, &oldtty); }
15,477
0
void timer_mod_ns(QEMUTimer *ts, int64_t expire_time) { QEMUTimerList *timer_list = ts->timer_list; bool rearm; qemu_mutex_lock(&timer_list->active_timers_lock); timer_del_locked(timer_list, ts); rearm = timer_mod_ns_locked(timer_list, ts, expire_time); qemu_mutex_unlock(&timer_list->active_timers_lock); if (rearm) { timerlist_rearm(timer_list); } }
15,478
0
static int convert_do_copy(ImgConvertState *s) { uint8_t *buf = NULL; int64_t sector_num, allocated_done; int ret; int n; /* Check whether we have zero initialisation or can get it efficiently */ s->has_zero_init = s->min_sparse && !s->target_has_backing ? bdrv_has_zero_init(blk_bs(s->target)) : false; if (!s->has_zero_init && !s->target_has_backing && bdrv_can_write_zeroes_with_unmap(blk_bs(s->target))) { ret = bdrv_make_zero(blk_bs(s->target), BDRV_REQ_MAY_UNMAP); if (ret == 0) { s->has_zero_init = true; } } /* Allocate buffer for copied data. For compressed images, only one cluster * can be copied at a time. */ if (s->compressed) { if (s->cluster_sectors <= 0 || s->cluster_sectors > s->buf_sectors) { error_report("invalid cluster size"); ret = -EINVAL; goto fail; } s->buf_sectors = s->cluster_sectors; } buf = blk_blockalign(s->target, s->buf_sectors * BDRV_SECTOR_SIZE); /* Calculate allocated sectors for progress */ s->allocated_sectors = 0; sector_num = 0; while (sector_num < s->total_sectors) { n = convert_iteration_sectors(s, sector_num); if (n < 0) { ret = n; goto fail; } if (s->status == BLK_DATA) { s->allocated_sectors += n; } sector_num += n; } /* Do the copy */ s->src_cur = 0; s->src_cur_offset = 0; s->sector_next_status = 0; sector_num = 0; allocated_done = 0; while (sector_num < s->total_sectors) { n = convert_iteration_sectors(s, sector_num); if (n < 0) { ret = n; goto fail; } if (s->status == BLK_DATA) { allocated_done += n; qemu_progress_print(100.0 * allocated_done / s->allocated_sectors, 0); } ret = convert_read(s, sector_num, n, buf); if (ret < 0) { error_report("error while reading sector %" PRId64 ": %s", sector_num, strerror(-ret)); goto fail; } ret = convert_write(s, sector_num, n, buf); if (ret < 0) { error_report("error while writing sector %" PRId64 ": %s", sector_num, strerror(-ret)); goto fail; } sector_num += n; } if (s->compressed) { /* signal EOF to align */ ret = blk_write_compressed(s->target, 0, NULL, 0); if (ret < 0) { goto fail; } } ret = 0; fail: qemu_vfree(buf); return ret; }
15,479
0
static void test_visitor_in_native_list_uint16(TestInputVisitorData *data, const void *unused) { test_native_list_integer_helper(data, unused, USER_DEF_NATIVE_LIST_UNION_KIND_U16); }
15,480
0
static void validate_bootdevices(char *devices) { /* We just do some generic consistency checks */ const char *p; int bitmap = 0; for (p = devices; *p != '\0'; p++) { /* Allowed boot devices are: * a-b: floppy disk drives * c-f: IDE disk drives * g-m: machine implementation dependent drives * n-p: network devices * It's up to each machine implementation to check if the given boot * devices match the actual hardware implementation and firmware * features. */ if (*p < 'a' || *p > 'p') { fprintf(stderr, "Invalid boot device '%c'\n", *p); exit(1); } if (bitmap & (1 << (*p - 'a'))) { fprintf(stderr, "Boot device '%c' was given twice\n", *p); exit(1); } bitmap |= 1 << (*p - 'a'); } }
15,481
0
void s390_add_running_cpu(S390CPU *cpu) { CPUState *cs = CPU(cpu); if (cs->halted) { s390_running_cpus++; cs->halted = 0; cs->exception_index = -1; } }
15,483
0
static int usb_audio_handle_data(USBDevice *dev, USBPacket *p) { USBAudioState *s = (USBAudioState *) dev; int ret = 0; switch (p->pid) { case USB_TOKEN_OUT: switch (p->devep) { case 1: ret = usb_audio_handle_dataout(s, p); break; default: goto fail; } break; default: fail: ret = USB_RET_STALL; break; } if (ret == USB_RET_STALL && s->debug) { fprintf(stderr, "usb-audio: failed data transaction: " "pid 0x%x ep 0x%x len 0x%zx\n", p->pid, p->devep, p->iov.size); } return ret; }
15,484
0
int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, int cpuid, void *opaque) { struct arm_note note; CPUARMState *env = &ARM_CPU(cs)->env; DumpState *s = opaque; int ret, i; arm_note_init(&note, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus)); note.prstatus.pr_pid = cpu_to_dump32(s, cpuid); for (i = 0; i < 16; ++i) { note.prstatus.pr_reg.regs[i] = cpu_to_dump32(s, env->regs[i]); } note.prstatus.pr_reg.regs[16] = cpu_to_dump32(s, cpsr_read(env)); ret = f(&note, ARM_PRSTATUS_NOTE_SIZE, s); if (ret < 0) { return -1; } return 0; }
15,485
0
static int write_manifest(AVFormatContext *s, int final) { DASHContext *c = s->priv_data; AVIOContext *out; char temp_filename[1024]; int ret, i; const char *proto = avio_find_protocol_name(s->filename); int use_rename = proto && !strcmp(proto, "file"); static unsigned int warned_non_file = 0; AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0); if (!use_rename && !warned_non_file++) av_log(s, AV_LOG_ERROR, "Cannot use rename on non file protocol, this may lead to races and temporary partial files\n"); snprintf(temp_filename, sizeof(temp_filename), use_rename ? "%s.tmp" : "%s", s->filename); ret = s->io_open(s, &out, temp_filename, AVIO_FLAG_WRITE, NULL); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Unable to open %s for writing\n", temp_filename); return ret; } avio_printf(out, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"); avio_printf(out, "<MPD xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n" "\txmlns=\"urn:mpeg:dash:schema:mpd:2011\"\n" "\txmlns:xlink=\"http://www.w3.org/1999/xlink\"\n" "\txsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd\"\n" "\tprofiles=\"urn:mpeg:dash:profile:isoff-live:2011\"\n" "\ttype=\"%s\"\n", final ? "static" : "dynamic"); if (final) { avio_printf(out, "\tmediaPresentationDuration=\""); write_time(out, c->total_duration); avio_printf(out, "\"\n"); } else { int64_t update_period = c->last_duration / AV_TIME_BASE; char now_str[100]; if (c->use_template && !c->use_timeline) update_period = 500; avio_printf(out, "\tminimumUpdatePeriod=\"PT%"PRId64"S\"\n", update_period); avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", c->last_duration / AV_TIME_BASE); if (!c->availability_start_time[0] && s->nb_streams > 0 && c->streams[0].nb_segments > 0) { format_date_now(c->availability_start_time, sizeof(c->availability_start_time)); } if (c->availability_start_time[0]) avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", c->availability_start_time); format_date_now(now_str, sizeof(now_str)); if (now_str[0]) avio_printf(out, "\tpublishTime=\"%s\"\n", now_str); if (c->window_size && c->use_template) { avio_printf(out, "\ttimeShiftBufferDepth=\""); write_time(out, c->last_duration * c->window_size); avio_printf(out, "\"\n"); } } avio_printf(out, "\tminBufferTime=\""); write_time(out, c->last_duration * 2); avio_printf(out, "\">\n"); avio_printf(out, "\t<ProgramInformation>\n"); if (title) { char *escaped = xmlescape(title->value); avio_printf(out, "\t\t<Title>%s</Title>\n", escaped); av_free(escaped); } avio_printf(out, "\t</ProgramInformation>\n"); if (c->window_size && s->nb_streams > 0 && c->streams[0].nb_segments > 0 && !c->use_template) { OutputStream *os = &c->streams[0]; int start_index = FFMAX(os->nb_segments - c->window_size, 0); int64_t start_time = av_rescale_q(os->segments[start_index]->time, s->streams[0]->time_base, AV_TIME_BASE_Q); avio_printf(out, "\t<Period id=\"0\" start=\""); write_time(out, start_time); avio_printf(out, "\">\n"); } else { avio_printf(out, "\t<Period id=\"0\" start=\"PT0.0S\">\n"); } for (i = 0; i < c->nb_as; i++) { if ((ret = write_adaptation_set(s, out, i)) < 0) return ret; } avio_printf(out, "\t</Period>\n"); if (c->utc_timing_url) avio_printf(out, "\t<UTCTiming schemeIdUri=\"urn:mpeg:dash:utc:http-xsdate:2014\" value=\"%s\"/>\n", c->utc_timing_url); avio_printf(out, "</MPD>\n"); avio_flush(out); ff_format_io_close(s, &out); if (use_rename) return avpriv_io_move(temp_filename, s->filename); return 0; }
15,487
0
static void curl_close(BlockDriverState *bs) { BDRVCURLState *s = bs->opaque; int i; DPRINTF("CURL: Close\n"); for (i=0; i<CURL_NUM_STATES; i++) { if (s->states[i].in_use) curl_clean_state(&s->states[i]); if (s->states[i].curl) { curl_easy_cleanup(s->states[i].curl); s->states[i].curl = NULL; } if (s->states[i].orig_buf) { g_free(s->states[i].orig_buf); s->states[i].orig_buf = NULL; } } if (s->multi) curl_multi_cleanup(s->multi); timer_del(&s->timer); g_free(s->url); }
15,488
0
if_output(struct socket *so, struct mbuf *ifm) { struct mbuf *ifq; int on_fastq = 1; DEBUG_CALL("if_output"); DEBUG_ARG("so = %lx", (long)so); DEBUG_ARG("ifm = %lx", (long)ifm); /* * First remove the mbuf from m_usedlist, * since we're gonna use m_next and m_prev ourselves * XXX Shouldn't need this, gotta change dtom() etc. */ if (ifm->m_flags & M_USEDLIST) { remque(ifm); ifm->m_flags &= ~M_USEDLIST; } /* * See if there's already a batchq list for this session. * This can include an interactive session, which should go on fastq, * but gets too greedy... hence it'll be downgraded from fastq to batchq. * We mustn't put this packet back on the fastq (or we'll send it out of order) * XXX add cache here? */ for (ifq = if_batchq.ifq_prev; ifq != &if_batchq; ifq = ifq->ifq_prev) { if (so == ifq->ifq_so) { /* A match! */ ifm->ifq_so = so; ifs_insque(ifm, ifq->ifs_prev); goto diddit; } } /* No match, check which queue to put it on */ if (so && (so->so_iptos & IPTOS_LOWDELAY)) { ifq = if_fastq.ifq_prev; on_fastq = 1; /* * Check if this packet is a part of the last * packet's session */ if (ifq->ifq_so == so) { ifm->ifq_so = so; ifs_insque(ifm, ifq->ifs_prev); goto diddit; } } else ifq = if_batchq.ifq_prev; /* Create a new doubly linked list for this session */ ifm->ifq_so = so; ifs_init(ifm); insque(ifm, ifq); diddit: ++if_queued; if (so) { /* Update *_queued */ so->so_queued++; so->so_nqueued++; /* * Check if the interactive session should be downgraded to * the batchq. A session is downgraded if it has queued 6 * packets without pausing, and at least 3 of those packets * have been sent over the link * (XXX These are arbitrary numbers, probably not optimal..) */ if (on_fastq && ((so->so_nqueued >= 6) && (so->so_nqueued - so->so_queued) >= 3)) { /* Remove from current queue... */ remque(ifm->ifs_next); /* ...And insert in the new. That'll teach ya! */ insque(ifm->ifs_next, &if_batchq); } } #ifndef FULL_BOLT /* * This prevents us from malloc()ing too many mbufs */ if (link_up) { /* if_start will check towrite */ if_start(); } #endif }
15,490
0
static int ne2000_can_receive(void *opaque) { NE2000State *s = opaque; if (s->cmd & E8390_STOP) return 1; return !ne2000_buffer_full(s); }
15,491
0
static void *bochs_bios_init(void) { void *fw_cfg; uint8_t *smbios_table; size_t smbios_len; uint64_t *numa_fw_cfg; int i, j; register_ioport_write(0x400, 1, 2, bochs_bios_write, NULL); register_ioport_write(0x401, 1, 2, bochs_bios_write, NULL); register_ioport_write(0x402, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x403, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x8900, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x501, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x501, 1, 2, bochs_bios_write, NULL); register_ioport_write(0x502, 1, 2, bochs_bios_write, NULL); register_ioport_write(0x500, 1, 1, bochs_bios_write, NULL); register_ioport_write(0x503, 1, 1, bochs_bios_write, NULL); fw_cfg = fw_cfg_init(BIOS_CFG_IOPORT, BIOS_CFG_IOPORT + 1, 0, 0); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES, (uint8_t *)acpi_tables, acpi_tables_len); fw_cfg_add_bytes(fw_cfg, FW_CFG_IRQ0_OVERRIDE, &irq0override, 1); smbios_table = smbios_get_table(&smbios_len); if (smbios_table) fw_cfg_add_bytes(fw_cfg, FW_CFG_SMBIOS_ENTRIES, smbios_table, smbios_len); fw_cfg_add_bytes(fw_cfg, FW_CFG_E820_TABLE, (uint8_t *)&e820_table, sizeof(struct e820_table)); fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, (uint8_t *)&hpet_cfg, sizeof(struct hpet_fw_config)); /* allocate memory for the NUMA channel: one (64bit) word for the number * of nodes, one word for each VCPU->node and one word for each node to * hold the amount of memory. */ numa_fw_cfg = g_malloc0((1 + max_cpus + nb_numa_nodes) * 8); numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes); for (i = 0; i < max_cpus; i++) { for (j = 0; j < nb_numa_nodes; j++) { if (node_cpumask[j] & (1 << i)) { numa_fw_cfg[i + 1] = cpu_to_le64(j); break; } } } for (i = 0; i < nb_numa_nodes; i++) { numa_fw_cfg[max_cpus + 1 + i] = cpu_to_le64(node_mem[i]); } fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, (uint8_t *)numa_fw_cfg, (1 + max_cpus + nb_numa_nodes) * 8); return fw_cfg; }
15,493
0
static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry, uint16_t *value, uint16_t valid_mask) { XenPTRegInfo *reg = cfg_entry->reg; uint16_t valid_emu_mask = 0; /* emulate word register */ valid_emu_mask = reg->emu_mask & valid_mask; *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask); return 0; }
15,494
0
static int img_rebase(int argc, char **argv) { BlockDriverState *bs, *bs_old_backing = NULL, *bs_new_backing = NULL; BlockDriver *old_backing_drv, *new_backing_drv; char *filename; const char *fmt, *cache, *out_basefmt, *out_baseimg; int c, flags, ret; int unsafe = 0; int progress = 0; /* Parse commandline parameters */ fmt = NULL; cache = BDRV_DEFAULT_CACHE; out_baseimg = NULL; out_basefmt = NULL; for(;;) { c = getopt(argc, argv, "uhf:F:b:pt:"); if (c == -1) { break; } switch(c) { case '?': case 'h': help(); return 0; case 'f': fmt = optarg; break; case 'F': out_basefmt = optarg; break; case 'b': out_baseimg = optarg; break; case 'u': unsafe = 1; break; case 'p': progress = 1; break; case 't': cache = optarg; break; } } if ((optind >= argc) || (!unsafe && !out_baseimg)) { help(); } filename = argv[optind++]; qemu_progress_init(progress, 2.0); qemu_progress_print(0, 100); flags = BDRV_O_RDWR | (unsafe ? BDRV_O_NO_BACKING : 0); ret = bdrv_parse_cache_flags(cache, &flags); if (ret < 0) { error_report("Invalid cache option: %s", cache); return -1; } /* * Open the images. * * Ignore the old backing file for unsafe rebase in case we want to correct * the reference to a renamed or moved backing file. */ bs = bdrv_new_open(filename, fmt, flags); if (!bs) { return 1; } /* Find the right drivers for the backing files */ old_backing_drv = NULL; new_backing_drv = NULL; if (!unsafe && bs->backing_format[0] != '\0') { old_backing_drv = bdrv_find_format(bs->backing_format); if (old_backing_drv == NULL) { error_report("Invalid format name: '%s'", bs->backing_format); ret = -1; goto out; } } if (out_basefmt != NULL) { new_backing_drv = bdrv_find_format(out_basefmt); if (new_backing_drv == NULL) { error_report("Invalid format name: '%s'", out_basefmt); ret = -1; goto out; } } /* For safe rebasing we need to compare old and new backing file */ if (unsafe) { /* Make the compiler happy */ bs_old_backing = NULL; bs_new_backing = NULL; } else { char backing_name[1024]; bs_old_backing = bdrv_new("old_backing"); bdrv_get_backing_filename(bs, backing_name, sizeof(backing_name)); ret = bdrv_open(bs_old_backing, backing_name, BDRV_O_FLAGS, old_backing_drv); if (ret) { error_report("Could not open old backing file '%s'", backing_name); goto out; } bs_new_backing = bdrv_new("new_backing"); ret = bdrv_open(bs_new_backing, out_baseimg, BDRV_O_FLAGS, new_backing_drv); if (ret) { error_report("Could not open new backing file '%s'", out_baseimg); goto out; } } /* * Check each unallocated cluster in the COW file. If it is unallocated, * accesses go to the backing file. We must therefore compare this cluster * in the old and new backing file, and if they differ we need to copy it * from the old backing file into the COW file. * * If qemu-img crashes during this step, no harm is done. The content of * the image is the same as the original one at any time. */ if (!unsafe) { uint64_t num_sectors; uint64_t old_backing_num_sectors; uint64_t new_backing_num_sectors; uint64_t sector; int n; uint8_t * buf_old; uint8_t * buf_new; float local_progress; buf_old = qemu_blockalign(bs, IO_BUF_SIZE); buf_new = qemu_blockalign(bs, IO_BUF_SIZE); bdrv_get_geometry(bs, &num_sectors); bdrv_get_geometry(bs_old_backing, &old_backing_num_sectors); bdrv_get_geometry(bs_new_backing, &new_backing_num_sectors); local_progress = (float)100 / (num_sectors / MIN(num_sectors, IO_BUF_SIZE / 512)); for (sector = 0; sector < num_sectors; sector += n) { /* How many sectors can we handle with the next read? */ if (sector + (IO_BUF_SIZE / 512) <= num_sectors) { n = (IO_BUF_SIZE / 512); } else { n = num_sectors - sector; } /* If the cluster is allocated, we don't need to take action */ ret = bdrv_is_allocated(bs, sector, n, &n); if (ret) { continue; } /* * Read old and new backing file and take into consideration that * backing files may be smaller than the COW image. */ if (sector >= old_backing_num_sectors) { memset(buf_old, 0, n * BDRV_SECTOR_SIZE); } else { if (sector + n > old_backing_num_sectors) { n = old_backing_num_sectors - sector; } ret = bdrv_read(bs_old_backing, sector, buf_old, n); if (ret < 0) { error_report("error while reading from old backing file"); goto out; } } if (sector >= new_backing_num_sectors) { memset(buf_new, 0, n * BDRV_SECTOR_SIZE); } else { if (sector + n > new_backing_num_sectors) { n = new_backing_num_sectors - sector; } ret = bdrv_read(bs_new_backing, sector, buf_new, n); if (ret < 0) { error_report("error while reading from new backing file"); goto out; } } /* If they differ, we need to write to the COW file */ uint64_t written = 0; while (written < n) { int pnum; if (compare_sectors(buf_old + written * 512, buf_new + written * 512, n - written, &pnum)) { ret = bdrv_write(bs, sector + written, buf_old + written * 512, pnum); if (ret < 0) { error_report("Error while writing to COW image: %s", strerror(-ret)); goto out; } } written += pnum; } qemu_progress_print(local_progress, 100); } qemu_vfree(buf_old); qemu_vfree(buf_new); } /* * Change the backing file. All clusters that are different from the old * backing file are overwritten in the COW file now, so the visible content * doesn't change when we switch the backing file. */ ret = bdrv_change_backing_file(bs, out_baseimg, out_basefmt); if (ret == -ENOSPC) { error_report("Could not change the backing file to '%s': No " "space left in the file header", out_baseimg); } else if (ret < 0) { error_report("Could not change the backing file to '%s': %s", out_baseimg, strerror(-ret)); } qemu_progress_print(100, 0); /* * TODO At this point it is possible to check if any clusters that are * allocated in the COW file are the same in the backing file. If so, they * could be dropped from the COW file. Don't do this before switching the * backing file, in case of a crash this would lead to corruption. */ out: qemu_progress_end(); /* Cleanup */ if (!unsafe) { if (bs_old_backing != NULL) { bdrv_delete(bs_old_backing); } if (bs_new_backing != NULL) { bdrv_delete(bs_new_backing); } } bdrv_delete(bs); if (ret) { return 1; } return 0; }
15,496
0
int get_buffer(ByteIOContext *s, unsigned char *buf, int size) { int len, size1; size1 = size; while (size > 0) { len = s->buf_end - s->buf_ptr; if (len > size) len = size; if (len == 0) { fill_buffer(s); len = s->buf_end - s->buf_ptr; if (len == 0) break; } else { memcpy(buf, s->buf_ptr, len); buf += len; s->buf_ptr += len; size -= len; } } return size1 - size; }
15,497
0
AVFilterFormats *ff_make_format_list(const int *fmts) { AVFilterFormats *formats; int count; for (count = 0; fmts[count] != -1; count++) ; formats = av_mallocz(sizeof(*formats)); if (count) formats->formats = av_malloc(sizeof(*formats->formats) * count); formats->nb_formats = count; memcpy(formats->formats, fmts, sizeof(*formats->formats) * count); return formats; }
15,498
1
void qemu_cond_destroy(QemuCond *cond) { BOOL result; result = CloseHandle(cond->continue_event); if (!result) { error_exit(GetLastError(), __func__); } cond->continue_event = 0; result = CloseHandle(cond->sema); if (!result) { error_exit(GetLastError(), __func__); } cond->sema = 0; }
15,500
1
static inline void gen_intermediate_code_internal(CPUARMState *env, TranslationBlock *tb, int search_pc) { DisasContext dc1, *dc = &dc1; CPUBreakpoint *bp; uint16_t *gen_opc_end; int j, lj; target_ulong pc_start; uint32_t next_page_start; int num_insns; int max_insns; /* generate intermediate code */ pc_start = tb->pc; dc->tb = tb; gen_opc_end = gen_opc_buf + OPC_MAX_SIZE; dc->is_jmp = DISAS_NEXT; dc->pc = pc_start; dc->singlestep_enabled = env->singlestep_enabled; dc->condjmp = 0; dc->thumb = ARM_TBFLAG_THUMB(tb->flags); dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1; dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4; #if !defined(CONFIG_USER_ONLY) dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0); #endif dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); cpu_F0s = tcg_temp_new_i32(); cpu_F1s = tcg_temp_new_i32(); cpu_F0d = tcg_temp_new_i64(); cpu_F1d = tcg_temp_new_i64(); cpu_V0 = cpu_F0d; cpu_V1 = cpu_F1d; /* FIXME: cpu_M0 can probably be the same as cpu_V0. */ cpu_M0 = tcg_temp_new_i64(); next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; lj = -1; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; gen_icount_start(); tcg_clear_temp_count(); /* A note on handling of the condexec (IT) bits: * * We want to avoid the overhead of having to write the updated condexec * bits back to the CPUARMState for every instruction in an IT block. So: * (1) if the condexec bits are not already zero then we write * zero back into the CPUARMState now. This avoids complications trying * to do it at the end of the block. (For example if we don't do this * it's hard to identify whether we can safely skip writing condexec * at the end of the TB, which we definitely want to do for the case * where a TB doesn't do anything with the IT state at all.) * (2) if we are going to leave the TB then we call gen_set_condexec() * which will write the correct value into CPUARMState if zero is wrong. * This is done both for leaving the TB at the end, and for leaving * it because of an exception we know will happen, which is done in * gen_exception_insn(). The latter is necessary because we need to * leave the TB with the PC/IT state just prior to execution of the * instruction which caused the exception. * (3) if we leave the TB unexpectedly (eg a data abort on a load) * then the CPUARMState will be wrong and we need to reset it. * This is handled in the same way as restoration of the * PC in these situations: we will be called again with search_pc=1 * and generate a mapping of the condexec bits for each PC in * gen_opc_condexec_bits[]. restore_state_to_opc() then uses * this to restore the condexec bits. * * Note that there are no instructions which can read the condexec * bits, and none which can write non-static values to them, so * we don't need to care about whether CPUARMState is correct in the * middle of a TB. */ /* Reset the conditional execution bits immediately. This avoids complications trying to do it at the end of the block. */ if (dc->condexec_mask || dc->condexec_cond) { TCGv tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); store_cpu_field(tmp, condexec_bits); } do { #ifdef CONFIG_USER_ONLY /* Intercept jump to the magic kernel page. */ if (dc->pc >= 0xffff0000) { /* We always get here via a jump, so know we are not in a conditional execution block. */ gen_exception(EXCP_KERNEL_TRAP); dc->is_jmp = DISAS_UPDATE; break; } #else if (dc->pc >= 0xfffffff0 && IS_M(env)) { /* We always get here via a jump, so know we are not in a conditional execution block. */ gen_exception(EXCP_EXCEPTION_EXIT); dc->is_jmp = DISAS_UPDATE; break; } #endif if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) { QTAILQ_FOREACH(bp, &env->breakpoints, entry) { if (bp->pc == dc->pc) { gen_exception_insn(dc, 0, EXCP_DEBUG); /* Advance PC so that clearing the breakpoint will invalidate this TB. */ dc->pc += 2; goto done_generating; break; } } } if (search_pc) { j = gen_opc_ptr - gen_opc_buf; if (lj < j) { lj++; while (lj < j) gen_opc_instr_start[lj++] = 0; } gen_opc_pc[lj] = dc->pc; gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1); gen_opc_instr_start[lj] = 1; gen_opc_icount[lj] = num_insns; } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { tcg_gen_debug_insn_start(dc->pc); } if (dc->thumb) { disas_thumb_insn(env, dc); if (dc->condexec_mask) { dc->condexec_cond = (dc->condexec_cond & 0xe) | ((dc->condexec_mask >> 4) & 1); dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f; if (dc->condexec_mask == 0) { dc->condexec_cond = 0; } } } else { disas_arm_insn(env, dc); } if (dc->condjmp && !dc->is_jmp) { gen_set_label(dc->condlabel); dc->condjmp = 0; } if (tcg_check_temp_count()) { fprintf(stderr, "TCG temporary leak before %08x\n", dc->pc); } /* Translation stops when a conditional branch is encountered. * Otherwise the subsequent code could get translated several times. * Also stop translation when a page boundary is reached. This * ensures prefetch aborts occur at the right place. */ num_insns ++; } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end && !env->singlestep_enabled && !singlestep && dc->pc < next_page_start && num_insns < max_insns); if (tb->cflags & CF_LAST_IO) { if (dc->condjmp) { /* FIXME: This can theoretically happen with self-modifying code. */ cpu_abort(env, "IO on conditional branch instruction"); } gen_io_end(); } /* At this stage dc->condjmp will only be set when the skipped instruction was a conditional branch or trap, and the PC has already been written. */ if (unlikely(env->singlestep_enabled)) { /* Make sure the pc is updated, and raise a debug exception. */ if (dc->condjmp) { gen_set_condexec(dc); if (dc->is_jmp == DISAS_SWI) { gen_exception(EXCP_SWI); } else { gen_exception(EXCP_DEBUG); } gen_set_label(dc->condlabel); } if (dc->condjmp || !dc->is_jmp) { gen_set_pc_im(dc->pc); dc->condjmp = 0; } gen_set_condexec(dc); if (dc->is_jmp == DISAS_SWI && !dc->condjmp) { gen_exception(EXCP_SWI); } else { /* FIXME: Single stepping a WFI insn will not halt the CPU. */ gen_exception(EXCP_DEBUG); } } else { /* While branches must always occur at the end of an IT block, there are a few other things that can cause us to terminate the TB in the middel of an IT block: - Exception generating instructions (bkpt, swi, undefined). - Page boundaries. - Hardware watchpoints. Hardware breakpoints have already been handled and skip this code. */ gen_set_condexec(dc); switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, dc->pc); break; default: case DISAS_JUMP: case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; case DISAS_WFI: gen_helper_wfi(); break; case DISAS_SWI: gen_exception(EXCP_SWI); break; } if (dc->condjmp) { gen_set_label(dc->condlabel); gen_set_condexec(dc); gen_goto_tb(dc, 1, dc->pc); dc->condjmp = 0; } } done_generating: gen_icount_end(tb, num_insns); *gen_opc_ptr = INDEX_op_end; #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log("----------------\n"); qemu_log("IN: %s\n", lookup_symbol(pc_start)); log_target_disas(pc_start, dc->pc - pc_start, dc->thumb); qemu_log("\n"); } #endif if (search_pc) { j = gen_opc_ptr - gen_opc_buf; lj++; while (lj <= j) gen_opc_instr_start[lj++] = 0; } else { tb->size = dc->pc - pc_start; tb->icount = num_insns; } }
15,502
1
static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) { int pages = -1; uint64_t bytes_xmit; ram_addr_t current_addr; uint8_t *p; int ret; bool send_async = true; RAMBlock *block = pss->block; ram_addr_t offset = pss->page << TARGET_PAGE_BITS; p = block->host + offset; trace_ram_save_page(block->idstr, (uint64_t)offset, p); /* In doubt sent page as normal */ bytes_xmit = 0; ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE, &bytes_xmit); if (bytes_xmit) { rs->bytes_transferred += bytes_xmit; pages = 1; } XBZRLE_cache_lock(); current_addr = block->offset + offset; if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (ret != RAM_SAVE_CONTROL_DELAYED) { if (bytes_xmit > 0) { rs->norm_pages++; } else if (bytes_xmit == 0) { rs->zero_pages++; } } } else { pages = save_zero_page(rs, block, offset, p); if (pages > 0) { /* Must let xbzrle know, otherwise a previous (now 0'd) cached * page would be stale */ xbzrle_cache_zero_page(rs, current_addr); ram_release_pages(block->idstr, offset, pages); } else if (!rs->ram_bulk_stage && !migration_in_postcopy() && migrate_use_xbzrle()) { pages = save_xbzrle_page(rs, &p, current_addr, block, offset, last_stage); if (!last_stage) { /* Can't send this cached data async, since the cache page * might get updated before it gets to the wire */ send_async = false; } } } /* XBZRLE overflow or normal page */ if (pages == -1) { rs->bytes_transferred += save_page_header(rs, block, offset | RAM_SAVE_FLAG_PAGE); if (send_async) { qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE, migrate_release_ram() & migration_in_postcopy()); } else { qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE); } rs->bytes_transferred += TARGET_PAGE_SIZE; pages = 1; rs->norm_pages++; } XBZRLE_cache_unlock(); return pages; }
15,503
1
static inline int vmsvga_fifo_empty(struct vmsvga_state_s *s) { if (!s->config || !s->enable) return 1; return (s->cmd->next_cmd == s->cmd->stop); }
15,504
0
static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 6); }
15,505
0
av_cold int swri_rematrix_init(SwrContext *s){ int i, j; int nb_in = av_get_channel_layout_nb_channels(s->in_ch_layout); int nb_out = av_get_channel_layout_nb_channels(s->out_ch_layout); s->mix_any_f = NULL; if (!s->rematrix_custom) { int r = auto_matrix(s); if (r) return r; } if (s->midbuf.fmt == AV_SAMPLE_FMT_S16P){ s->native_matrix = av_calloc(nb_in * nb_out, sizeof(int)); s->native_one = av_mallocz(sizeof(int)); for (i = 0; i < nb_out; i++) for (j = 0; j < nb_in; j++) ((int*)s->native_matrix)[i * nb_in + j] = lrintf(s->matrix[i][j] * 32768); *((int*)s->native_one) = 32768; s->mix_1_1_f = (mix_1_1_func_type*)copy_s16; s->mix_2_1_f = (mix_2_1_func_type*)sum2_s16; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_s16(s); }else if(s->midbuf.fmt == AV_SAMPLE_FMT_FLTP){ s->native_matrix = av_calloc(nb_in * nb_out, sizeof(float)); s->native_one = av_mallocz(sizeof(float)); for (i = 0; i < nb_out; i++) for (j = 0; j < nb_in; j++) ((float*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j]; *((float*)s->native_one) = 1.0; s->mix_1_1_f = (mix_1_1_func_type*)copy_float; s->mix_2_1_f = (mix_2_1_func_type*)sum2_float; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_float(s); }else if(s->midbuf.fmt == AV_SAMPLE_FMT_DBLP){ s->native_matrix = av_calloc(nb_in * nb_out, sizeof(double)); s->native_one = av_mallocz(sizeof(double)); for (i = 0; i < nb_out; i++) for (j = 0; j < nb_in; j++) ((double*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j]; *((double*)s->native_one) = 1.0; s->mix_1_1_f = (mix_1_1_func_type*)copy_double; s->mix_2_1_f = (mix_2_1_func_type*)sum2_double; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_double(s); }else if(s->midbuf.fmt == AV_SAMPLE_FMT_S32P){ // Only for dithering currently // s->native_matrix = av_calloc(nb_in * nb_out, sizeof(double)); s->native_one = av_mallocz(sizeof(int)); // for (i = 0; i < nb_out; i++) // for (j = 0; j < nb_in; j++) // ((double*)s->native_matrix)[i * nb_in + j] = s->matrix[i][j]; *((int*)s->native_one) = 32768; s->mix_1_1_f = (mix_1_1_func_type*)copy_s32; s->mix_2_1_f = (mix_2_1_func_type*)sum2_s32; s->mix_any_f = (mix_any_func_type*)get_mix_any_func_s32(s); }else av_assert0(0); //FIXME quantize for integeres for (i = 0; i < SWR_CH_MAX; i++) { int ch_in=0; for (j = 0; j < SWR_CH_MAX; j++) { s->matrix32[i][j]= lrintf(s->matrix[i][j] * 32768); if(s->matrix[i][j]) s->matrix_ch[i][++ch_in]= j; } s->matrix_ch[i][0]= ch_in; } if(HAVE_YASM && HAVE_MMX) swri_rematrix_init_x86(s); return 0; }
15,506
0
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb) { int i, j, k, sb_x, sb_y; int scheme; int current_macroblock; int current_fragment; int coding_mode; int custom_mode_alphabet[CODING_MODE_COUNT]; if (s->keyframe) { for (i = 0; i < s->fragment_count; i++) s->all_fragments[i].coding_method = MODE_INTRA; } else { /* fetch the mode coding scheme for this frame */ scheme = get_bits(gb, 3); /* is it a custom coding scheme? */ if (scheme == 0) { for (i = 0; i < 8; i++) custom_mode_alphabet[i] = MODE_INTER_NO_MV; for (i = 0; i < 8; i++) custom_mode_alphabet[get_bits(gb, 3)] = i; } /* iterate through all of the macroblocks that contain 1 or more * coded fragments */ for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) { for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) { for (j = 0; j < 4; j++) { int mb_x = 2*sb_x + (j>>1); int mb_y = 2*sb_y + (((j>>1)+j)&1); int frags_coded = 0; current_macroblock = mb_y * s->macroblock_width + mb_x; if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height) continue; #define BLOCK_X (2*mb_x + (k&1)) #define BLOCK_Y (2*mb_y + (k>>1)) /* coding modes are only stored if the macroblock has at least one * luma block coded, otherwise it must be INTER_NO_MV */ for (k = 0; k < 4; k++) { current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X; if (s->all_fragments[current_fragment].coding_method != MODE_COPY) break; } if (k == 4) { s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV; continue; } /* mode 7 means get 3 bits for each coding mode */ if (scheme == 7) coding_mode = get_bits(gb, 3); else if(scheme == 0) coding_mode = custom_mode_alphabet [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; else coding_mode = ModeAlphabet[scheme-1] [get_vlc2(gb, s->mode_code_vlc.table, 3, 3)]; s->macroblock_coding[current_macroblock] = coding_mode; for (k = 0; k < 4; k++) { current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X; if (s->all_fragments[current_fragment].coding_method != MODE_COPY) s->all_fragments[current_fragment].coding_method = coding_mode; } for (k = 0; k < 2; k++) { current_fragment = s->fragment_start[k+1] + mb_y*(s->fragment_width>>1) + mb_x; if (s->all_fragments[current_fragment].coding_method != MODE_COPY) s->all_fragments[current_fragment].coding_method = coding_mode; } } } } } return 0; }
15,507
0
static int h264_probe(AVProbeData *p) { uint32_t code = -1; int sps = 0, pps = 0, idr = 0, res = 0, sli = 0; int i; for (i = 0; i < p->buf_size; i++) { code = (code << 8) + p->buf[i]; if ((code & 0xffffff00) == 0x100) { int ref_idc = (code >> 5) & 3; int type = code & 0x1F; static const int8_t ref_zero[] = { 2, 0, 0, 0, 0, -1, 1, -1, -1, 1, 1, 1, 1, -1, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }; if (code & 0x80) // forbidden_bit return 0; if (ref_zero[type] == 1 && ref_idc) return 0; if (ref_zero[type] == -1 && !ref_idc) return 0; if (ref_zero[type] == 2) { if (!(code == 0x100 && !p->buf[i + 1] && !p->buf[i + 2])) res++; } switch (type) { case 1: sli++; break; case 5: idr++; break; case 7: if (p->buf[i + 2] & 0x03) return 0; sps++; break; case 8: pps++; break; } } } ff_tlog(NULL, "sps:%d pps:%d idr:%d sli:%d res:%d\n", sps, pps, idr, sli, res); if (sps && pps && (idr || sli > 3) && res < (sps + pps + idr)) return AVPROBE_SCORE_EXTENSION + 1; // 1 more than .mpg return 0; }
15,508
0
static int mov_write_tapt_tag(AVIOContext *pb, MOVTrack *track) { int32_t width = av_rescale(track->enc->sample_aspect_ratio.num, track->enc->width, track->enc->sample_aspect_ratio.den); int64_t pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "tapt"); avio_wb32(pb, 20); ffio_wfourcc(pb, "clef"); avio_wb32(pb, 0); avio_wb32(pb, width << 16); avio_wb32(pb, track->enc->height << 16); avio_wb32(pb, 20); ffio_wfourcc(pb, "enof"); avio_wb32(pb, 0); avio_wb32(pb, track->enc->width << 16); avio_wb32(pb, track->enc->height << 16); return updateSize(pb, pos); };
15,509
0
static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused) { int i; assert(src1==src2); for (i=0; i<width; i++) { int r= src1[6*i + 0] + src1[6*i + 3]; int g= src1[6*i + 1] + src1[6*i + 4]; int b= src1[6*i + 2] + src1[6*i + 5]; dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1); dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1); } }
15,510
0
static inline int parse_nal_units(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t * const buf, int buf_size) { H264ParseContext *p = s->priv_data; H2645NAL nal = { NULL }; int buf_index, next_avc; unsigned int pps_id; unsigned int slice_type; int state = -1, got_reset = 0; int q264 = buf_size >=4 && !memcmp("Q264", buf, 4); int field_poc[2]; int ret; /* set some sane default values */ s->pict_type = AV_PICTURE_TYPE_I; s->key_frame = 0; s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN; ff_h264_sei_uninit(&p->sei); p->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1; if (!buf_size) return 0; buf_index = 0; next_avc = p->is_avc ? 0 : buf_size; for (;;) { const SPS *sps; int src_length, consumed, nalsize = 0; if (buf_index >= next_avc) { nalsize = get_nalsize(p->nal_length_size, buf, buf_size, &buf_index, avctx); if (nalsize < 0) break; next_avc = buf_index + nalsize; } else { buf_index = find_start_code(buf, buf_size, buf_index, next_avc); if (buf_index >= buf_size) break; if (buf_index >= next_avc) continue; } src_length = next_avc - buf_index; state = buf[buf_index]; switch (state & 0x1f) { case H264_NAL_SLICE: case H264_NAL_IDR_SLICE: // Do not walk the whole buffer just to decode slice header if ((state & 0x1f) == H264_NAL_IDR_SLICE || ((state >> 5) & 0x3) == 0) { /* IDR or disposable slice * No need to decode many bytes because MMCOs shall not be present. */ if (src_length > 60) src_length = 60; } else { /* To decode up to MMCOs */ if (src_length > 1000) src_length = 1000; } break; } consumed = ff_h2645_extract_rbsp(buf + buf_index, src_length, &nal, 1); if (consumed < 0) break; buf_index += consumed; ret = init_get_bits8(&nal.gb, nal.data, nal.size); if (ret < 0) goto fail; get_bits1(&nal.gb); nal.ref_idc = get_bits(&nal.gb, 2); nal.type = get_bits(&nal.gb, 5); switch (nal.type) { case H264_NAL_SPS: ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps, 0); break; case H264_NAL_PPS: ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps, nal.size_bits); break; case H264_NAL_SEI: ff_h264_sei_decode(&p->sei, &nal.gb, &p->ps, avctx); break; case H264_NAL_IDR_SLICE: s->key_frame = 1; p->poc.prev_frame_num = 0; p->poc.prev_frame_num_offset = 0; p->poc.prev_poc_msb = p->poc.prev_poc_lsb = 0; /* fall through */ case H264_NAL_SLICE: get_ue_golomb_long(&nal.gb); // skip first_mb_in_slice slice_type = get_ue_golomb_31(&nal.gb); s->pict_type = ff_h264_golomb_to_pict_type[slice_type % 5]; if (p->sei.recovery_point.recovery_frame_cnt >= 0) { /* key frame, since recovery_frame_cnt is set */ s->key_frame = 1; } pps_id = get_ue_golomb(&nal.gb); if (pps_id >= MAX_PPS_COUNT) { av_log(avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id); goto fail; } if (!p->ps.pps_list[pps_id]) { av_log(avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); goto fail; } av_buffer_unref(&p->ps.pps_ref); av_buffer_unref(&p->ps.sps_ref); p->ps.pps = NULL; p->ps.sps = NULL; p->ps.pps_ref = av_buffer_ref(p->ps.pps_list[pps_id]); if (!p->ps.pps_ref) goto fail; p->ps.pps = (const PPS*)p->ps.pps_ref->data; if (!p->ps.sps_list[p->ps.pps->sps_id]) { av_log(avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", p->ps.pps->sps_id); goto fail; } p->ps.sps_ref = av_buffer_ref(p->ps.sps_list[p->ps.pps->sps_id]); if (!p->ps.sps_ref) goto fail; p->ps.sps = (const SPS*)p->ps.sps_ref->data; sps = p->ps.sps; // heuristic to detect non marked keyframes if (p->ps.sps->ref_frame_count <= 1 && p->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I) s->key_frame = 1; p->poc.frame_num = get_bits(&nal.gb, sps->log2_max_frame_num); s->coded_width = 16 * sps->mb_width; s->coded_height = 16 * sps->mb_height; s->width = s->coded_width - (sps->crop_right + sps->crop_left); s->height = s->coded_height - (sps->crop_top + sps->crop_bottom); if (s->width <= 0 || s->height <= 0) { s->width = s->coded_width; s->height = s->coded_height; } switch (sps->bit_depth_luma) { case 9: if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P9; else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P9; else s->format = AV_PIX_FMT_YUV420P9; break; case 10: if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P10; else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P10; else s->format = AV_PIX_FMT_YUV420P10; break; case 8: if (sps->chroma_format_idc == 3) s->format = AV_PIX_FMT_YUV444P; else if (sps->chroma_format_idc == 2) s->format = AV_PIX_FMT_YUV422P; else s->format = AV_PIX_FMT_YUV420P; break; default: s->format = AV_PIX_FMT_NONE; } avctx->profile = ff_h264_get_profile(sps); avctx->level = sps->level_idc; if (sps->frame_mbs_only_flag) { p->picture_structure = PICT_FRAME; } else { if (get_bits1(&nal.gb)) { // field_pic_flag p->picture_structure = PICT_TOP_FIELD + get_bits1(&nal.gb); // bottom_field_flag } else { p->picture_structure = PICT_FRAME; } } if (nal.type == H264_NAL_IDR_SLICE) get_ue_golomb_long(&nal.gb); /* idr_pic_id */ if (sps->poc_type == 0) { p->poc.poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb); if (p->ps.pps->pic_order_present == 1 && p->picture_structure == PICT_FRAME) p->poc.delta_poc_bottom = get_se_golomb(&nal.gb); } if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) { p->poc.delta_poc[0] = get_se_golomb(&nal.gb); if (p->ps.pps->pic_order_present == 1 && p->picture_structure == PICT_FRAME) p->poc.delta_poc[1] = get_se_golomb(&nal.gb); } /* Decode POC of this picture. * The prev_ values needed for decoding POC of the next picture are not set here. */ field_poc[0] = field_poc[1] = INT_MAX; ff_h264_init_poc(field_poc, &s->output_picture_number, sps, &p->poc, p->picture_structure, nal.ref_idc); /* Continue parsing to check if MMCO_RESET is present. * FIXME: MMCO_RESET could appear in non-first slice. * Maybe, we should parse all undisposable non-IDR slice of this * picture until encountering MMCO_RESET in a slice of it. */ if (nal.ref_idc && nal.type != H264_NAL_IDR_SLICE) { got_reset = scan_mmco_reset(s, &nal.gb, avctx); if (got_reset < 0) goto fail; } /* Set up the prev_ values for decoding POC of the next picture. */ p->poc.prev_frame_num = got_reset ? 0 : p->poc.frame_num; p->poc.prev_frame_num_offset = got_reset ? 0 : p->poc.frame_num_offset; if (nal.ref_idc != 0) { if (!got_reset) { p->poc.prev_poc_msb = p->poc.poc_msb; p->poc.prev_poc_lsb = p->poc.poc_lsb; } else { p->poc.prev_poc_msb = 0; p->poc.prev_poc_lsb = p->picture_structure == PICT_BOTTOM_FIELD ? 0 : field_poc[0]; } } if (sps->pic_struct_present_flag) { switch (p->sei.picture_timing.pic_struct) { case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD: s->repeat_pict = 0; break; case SEI_PIC_STRUCT_FRAME: case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_BOTTOM_TOP: s->repeat_pict = 1; break; case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: s->repeat_pict = 2; break; case SEI_PIC_STRUCT_FRAME_DOUBLING: s->repeat_pict = 3; break; case SEI_PIC_STRUCT_FRAME_TRIPLING: s->repeat_pict = 5; break; default: s->repeat_pict = p->picture_structure == PICT_FRAME ? 1 : 0; break; } } else { s->repeat_pict = p->picture_structure == PICT_FRAME ? 1 : 0; } if (p->picture_structure == PICT_FRAME) { s->picture_structure = AV_PICTURE_STRUCTURE_FRAME; if (sps->pic_struct_present_flag) { switch (p->sei.picture_timing.pic_struct) { case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: s->field_order = AV_FIELD_TT; break; case SEI_PIC_STRUCT_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: s->field_order = AV_FIELD_BB; break; default: s->field_order = AV_FIELD_PROGRESSIVE; break; } } else { if (field_poc[0] < field_poc[1]) s->field_order = AV_FIELD_TT; else if (field_poc[0] > field_poc[1]) s->field_order = AV_FIELD_BB; else s->field_order = AV_FIELD_PROGRESSIVE; } } else { if (p->picture_structure == PICT_TOP_FIELD) s->picture_structure = AV_PICTURE_STRUCTURE_TOP_FIELD; else s->picture_structure = AV_PICTURE_STRUCTURE_BOTTOM_FIELD; if (p->poc.frame_num == p->last_frame_num && p->last_picture_structure != AV_PICTURE_STRUCTURE_UNKNOWN && p->last_picture_structure != AV_PICTURE_STRUCTURE_FRAME && p->last_picture_structure != s->picture_structure) { if (p->last_picture_structure == AV_PICTURE_STRUCTURE_TOP_FIELD) s->field_order = AV_FIELD_TT; else s->field_order = AV_FIELD_BB; } else { s->field_order = AV_FIELD_UNKNOWN; } p->last_picture_structure = s->picture_structure; p->last_frame_num = p->poc.frame_num; } av_freep(&nal.rbsp_buffer); return 0; /* no need to evaluate the rest */ } } if (q264) { av_freep(&nal.rbsp_buffer); return 0; } /* didn't find a picture! */ av_log(avctx, AV_LOG_ERROR, "missing picture in access unit with size %d\n", buf_size); fail: av_freep(&nal.rbsp_buffer); return -1; }
15,511
1
static void balloon_stats_change_timer(VirtIOBalloon *s, int secs) { timer_mod(s->stats_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + secs * 1000); }
15,512
1
static int tta_decode_init(AVCodecContext * avctx) { TTAContext *s = avctx->priv_data; int i; s->avctx = avctx; // 30bytes includes a seektable with one frame if (avctx->extradata_size < 30) init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size); if (show_bits_long(&s->gb, 32) == bswap_32(ff_get_fourcc("TTA1"))) { /* signature */ skip_bits(&s->gb, 32); // if (get_bits_long(&s->gb, 32) != bswap_32(ff_get_fourcc("TTA1"))) { // av_log(s->avctx, AV_LOG_ERROR, "Missing magic\n"); // return -1; // } s->flags = get_le16(&s->gb); if (s->flags != 1 && s->flags != 3) { av_log(s->avctx, AV_LOG_ERROR, "Invalid flags\n"); s->is_float = (s->flags == FORMAT_FLOAT); avctx->channels = s->channels = get_le16(&s->gb); avctx->bits_per_sample = get_le16(&s->gb); s->bps = (avctx->bits_per_sample + 7) / 8; avctx->sample_rate = get_le32(&s->gb); if(avctx->sample_rate > 1000000){ //prevent FRAME_TIME * avctx->sample_rate from overflowing and sanity check av_log(avctx, AV_LOG_ERROR, "sample_rate too large\n"); s->data_length = get_le32(&s->gb); skip_bits(&s->gb, 32); // CRC32 of header if (s->is_float) { avctx->sample_fmt = SAMPLE_FMT_FLT; av_log(s->avctx, AV_LOG_ERROR, "Unsupported sample format. Please contact the developers.\n"); else switch(s->bps) { // case 1: avctx->sample_fmt = SAMPLE_FMT_U8; break; case 2: avctx->sample_fmt = SAMPLE_FMT_S16; break; // case 3: avctx->sample_fmt = SAMPLE_FMT_S24; break; case 4: avctx->sample_fmt = SAMPLE_FMT_S32; break; default: av_log(s->avctx, AV_LOG_ERROR, "Invalid/unsupported sample format. Please contact the developers.\n"); // FIXME: horribly broken, but directly from reference source #define FRAME_TIME 1.04489795918367346939 s->frame_length = (int)(FRAME_TIME * avctx->sample_rate); s->last_frame_length = s->data_length % s->frame_length; s->total_frames = s->data_length / s->frame_length + (s->last_frame_length ? 1 : 0); av_log(s->avctx, AV_LOG_DEBUG, "flags: %x chans: %d bps: %d rate: %d block: %d\n", s->flags, avctx->channels, avctx->bits_per_sample, avctx->sample_rate, avctx->block_align); av_log(s->avctx, AV_LOG_DEBUG, "data_length: %d frame_length: %d last: %d total: %d\n", s->data_length, s->frame_length, s->last_frame_length, s->total_frames); // FIXME: seek table for (i = 0; i < s->total_frames; i++) skip_bits(&s->gb, 32); skip_bits(&s->gb, 32); // CRC32 of seektable s->decode_buffer = av_mallocz(sizeof(int32_t)*s->frame_length*s->channels); } else { av_log(avctx, AV_LOG_ERROR, "Wrong extradata present\n"); return 0;
15,513
1
void show_licence(void) { printf( "ffmpeg version " FFMPEG_VERSION "\n" "Copyright (c) 2000, 2001, 2002 Gerard Lantau\n" "This program is free software; you can redistribute it and/or modify\n" "it under the terms of the GNU General Public License as published by\n" "the Free Software Foundation; either version 2 of the License, or\n" "(at your option) any later version.\n" "\n" "This program is distributed in the hope that it will be useful,\n" "but WITHOUT ANY WARRANTY; without even the implied warranty of\n" "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" "GNU General Public License for more details.\n" "\n" "You should have received a copy of the GNU General Public License\n" "along with this program; if not, write to the Free Software\n" "Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n" ); exit(1); }
15,516
1
static int parse_hex64(DeviceState *dev, Property *prop, const char *str) { uint64_t *ptr = qdev_get_prop_ptr(dev, prop); if (sscanf(str, "%" PRIx64, ptr) != 1) return -EINVAL; return 0; }
15,518
1
const TPMDriverOps *tpm_get_backend_driver(const char *type) { int i; for (i = 0; i < TPM_MAX_DRIVERS && be_drivers[i] != NULL; i++) { if (!strcmp(TpmType_lookup[be_drivers[i]->type], type)) { return be_drivers[i]; } } return NULL; }
15,519
0
static void glib_pollfds_fill(int64_t *cur_timeout) { GMainContext *context = g_main_context_default(); int timeout = 0; int64_t timeout_ns; int n; g_main_context_prepare(context, &max_priority); glib_pollfds_idx = gpollfds->len; n = glib_n_poll_fds; do { GPollFD *pfds; glib_n_poll_fds = n; g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds); pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx); n = g_main_context_query(context, max_priority, &timeout, pfds, glib_n_poll_fds); } while (n != glib_n_poll_fds); if (timeout < 0) { timeout_ns = -1; } else { timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS; } *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout); }
15,522
0
static uint64_t musicpal_misc_read(void *opaque, target_phys_addr_t offset, unsigned size) { switch (offset) { case MP_MISC_BOARD_REVISION: return MP_BOARD_REVISION; default: return 0; } }
15,525
0
static void dmg_refresh_limits(BlockDriverState *bs, Error **errp) { bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */ }
15,527
0
static void sigp_store_adtl_status(void *arg) { SigpInfo *si = arg; if (!kvm_check_extension(kvm_state, KVM_CAP_S390_VECTOR_REGISTERS)) { set_sigp_status(si, SIGP_STAT_INVALID_ORDER); return; } /* cpu has to be stopped */ if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } /* parameter must be aligned to 1024-byte boundary */ if (si->param & 0x3ff) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } cpu_synchronize_state(CPU(si->cpu)); if (kvm_s390_store_adtl_status(si->cpu, si->param)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; }
15,528
0
static void rom_reset(void *unused) { Rom *rom; QTAILQ_FOREACH(rom, &roms, next) { if (rom->fw_file) { continue; } if (rom->data == NULL) continue; cpu_physical_memory_write_rom(rom->addr, rom->data, rom->romsize); if (rom->isrom) { /* rom needs to be written only once */ qemu_free(rom->data); rom->data = NULL; } } }
15,529
0
static void mp3_parse_info_tag(AVFormatContext *s, AVStream *st, MPADecodeHeader *c, uint32_t spf) { #define LAST_BITS(k, n) ((k) & ((1 << (n)) - 1)) #define MIDDLE_BITS(k, m, n) LAST_BITS((k) >> (m), ((n) - (m))) uint16_t crc; uint32_t v; char version[10]; uint32_t peak = 0; int32_t r_gain = INT32_MIN, a_gain = INT32_MIN; MP3DecContext *mp3 = s->priv_data; static const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}}; uint64_t fsize = avio_size(s->pb); /* Check for Xing / Info tag */ avio_skip(s->pb, xing_offtbl[c->lsf == 1][c->nb_channels == 1]); v = avio_rb32(s->pb); mp3->is_cbr = v == MKBETAG('I', 'n', 'f', 'o'); if (v != MKBETAG('X', 'i', 'n', 'g') && !mp3->is_cbr) return; v = avio_rb32(s->pb); if (v & XING_FLAG_FRAMES) mp3->frames = avio_rb32(s->pb); if (v & XING_FLAG_SIZE) mp3->header_filesize = avio_rb32(s->pb); if (fsize && mp3->header_filesize) { uint64_t min, delta; min = FFMIN(fsize, mp3->header_filesize); delta = FFMAX(fsize, mp3->header_filesize) - min; if (fsize > mp3->header_filesize && delta > min >> 4) { mp3->frames = 0; } else if (delta > min >> 4) { av_log(s, AV_LOG_WARNING, "filesize and duration do not match (growing file?)\n"); } } if (v & XING_FLAG_TOC) read_xing_toc(s, mp3->header_filesize, av_rescale_q(mp3->frames, (AVRational){spf, c->sample_rate}, st->time_base)); /* VBR quality */ if(v & 8) avio_skip(s->pb, 4); /* Encoder short version string */ memset(version, 0, sizeof(version)); avio_read(s->pb, version, 9); /* Info Tag revision + VBR method */ avio_r8(s->pb); /* Lowpass filter value */ avio_r8(s->pb); /* ReplayGain peak */ v = avio_rb32(s->pb); peak = av_rescale(v, 100000, 1 << 23); /* Radio ReplayGain */ v = avio_rb16(s->pb); if (MIDDLE_BITS(v, 13, 15) == 1) { r_gain = MIDDLE_BITS(v, 0, 8) * 10000; if (v & (1 << 9)) r_gain *= -1; } /* Audiophile ReplayGain */ v = avio_rb16(s->pb); if (MIDDLE_BITS(v, 13, 15) == 2) { a_gain = MIDDLE_BITS(v, 0, 8) * 10000; if (v & (1 << 9)) a_gain *= -1; } /* Encoding flags + ATH Type */ avio_r8(s->pb); /* if ABR {specified bitrate} else {minimal bitrate} */ avio_r8(s->pb); /* Encoder delays */ v= avio_rb24(s->pb); if(AV_RB32(version) == MKBETAG('L', 'A', 'M', 'E') || AV_RB32(version) == MKBETAG('L', 'a', 'v', 'f')) { mp3->start_pad = v>>12; mp3-> end_pad = v&4095; st->skip_samples = mp3->start_pad + 528 + 1; if (mp3->frames) st->end_discard_sample = -mp3->end_pad + 528 + 1 + mp3->frames * (int64_t)spf; if (!st->start_time) st->start_time = av_rescale_q(st->skip_samples, (AVRational){1, c->sample_rate}, st->time_base); av_log(s, AV_LOG_DEBUG, "pad %d %d\n", mp3->start_pad, mp3-> end_pad); } /* Misc */ avio_r8(s->pb); /* MP3 gain */ avio_r8(s->pb); /* Preset and surround info */ avio_rb16(s->pb); /* Music length */ avio_rb32(s->pb); /* Music CRC */ avio_rb16(s->pb); /* Info Tag CRC */ crc = ffio_get_checksum(s->pb); v = avio_rb16(s->pb); if (v == crc) { ff_replaygain_export_raw(st, r_gain, peak, a_gain, 0); av_dict_set(&st->metadata, "encoder", version, 0); } }
15,531
0
int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align) { uint8_t *buf; int size = av_samples_get_buffer_size(NULL, nb_channels, nb_samples, sample_fmt, align); if (size < 0) return size; buf = av_mallocz(size); if (!buf) return AVERROR(ENOMEM); size = av_samples_fill_arrays(audio_data, linesize, buf, nb_channels, nb_samples, sample_fmt, align); if (size < 0) { av_free(buf); return size; } return 0; }
15,532
0
static void do_interrupt_protected(int intno, int is_int, int error_code, unsigned int next_eip, int is_hw) { SegmentCache *dt; uint8_t *ptr, *ssp; int type, dpl, selector, ss_dpl, cpl, sp_mask; int has_error_code, new_stack, shift; uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2; uint32_t old_eip; has_error_code = 0; if (!is_int && !is_hw) { switch(intno) { case 8: case 10: case 11: case 12: case 13: case 14: case 17: has_error_code = 1; break; } } dt = &env->idt; if (intno * 8 + 7 > dt->limit) raise_exception_err(EXCP0D_GPF, intno * 8 + 2); ptr = dt->base + intno * 8; e1 = ldl_kernel(ptr); e2 = ldl_kernel(ptr + 4); /* check gate type */ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; switch(type) { case 5: /* task gate */ /* must do that check here to return the correct error code */ if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL); if (has_error_code) { int mask; /* push the error code */ shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1; if (env->segs[R_SS].flags & DESC_B_MASK) mask = 0xffffffff; else mask = 0xffff; esp = (env->regs[R_ESP] - (2 << shift)) & mask; ssp = env->segs[R_SS].base + esp; if (shift) stl_kernel(ssp, error_code); else stw_kernel(ssp, error_code); env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask); } return; case 6: /* 286 interrupt gate */ case 7: /* 286 trap gate */ case 14: /* 386 interrupt gate */ case 15: /* 386 trap gate */ break; default: raise_exception_err(EXCP0D_GPF, intno * 8 + 2); break; } dpl = (e2 >> DESC_DPL_SHIFT) & 3; cpl = env->hflags & HF_CPL_MASK; /* check privledge if software int */ if (is_int && dpl < cpl) raise_exception_err(EXCP0D_GPF, intno * 8 + 2); /* check valid bit */ if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2); selector = e1 >> 16; offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff); if ((selector & 0xfffc) == 0) raise_exception_err(EXCP0D_GPF, 0); if (load_segment(&e1, &e2, selector) != 0) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (dpl > cpl) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); if (!(e2 & DESC_C_MASK) && dpl < cpl) { /* to inner priviledge */ get_ss_esp_from_tss(&ss, &esp, dpl); if ((ss & 0xfffc) == 0) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); if ((ss & 3) != dpl) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); if (load_segment(&ss_e1, &ss_e2, ss) != 0) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (ss_dpl != dpl) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); if (!(ss_e2 & DESC_P_MASK)) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); new_stack = 1; sp_mask = get_sp_mask(ss_e2); ssp = get_seg_base(ss_e1, ss_e2); } else if ((e2 & DESC_C_MASK) || dpl == cpl) { /* to same priviledge */ new_stack = 0; sp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; esp = ESP; } else { raise_exception_err(EXCP0D_GPF, selector & 0xfffc); new_stack = 0; /* avoid warning */ sp_mask = 0; /* avoid warning */ ssp = NULL; /* avoid warning */ esp = 0; /* avoid warning */ } shift = type >> 3; #if 0 /* XXX: check that enough room is available */ push_size = 6 + (new_stack << 2) + (has_error_code << 1); if (env->eflags & VM_MASK) push_size += 8; push_size <<= shift; #endif if (is_int) old_eip = next_eip; else old_eip = env->eip; if (shift == 1) { if (env->eflags & VM_MASK) { PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector); PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector); } if (new_stack) { PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector); PUSHL(ssp, esp, sp_mask, ESP); } PUSHL(ssp, esp, sp_mask, compute_eflags()); PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector); PUSHL(ssp, esp, sp_mask, old_eip); if (has_error_code) { PUSHL(ssp, esp, sp_mask, error_code); } } else { if (new_stack) { PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector); PUSHW(ssp, esp, sp_mask, ESP); } PUSHW(ssp, esp, sp_mask, compute_eflags()); PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector); PUSHW(ssp, esp, sp_mask, old_eip); if (has_error_code) { PUSHW(ssp, esp, sp_mask, error_code); } } if (new_stack) { ss = (ss & ~3) | dpl; cpu_x86_load_seg_cache(env, R_SS, ss, ssp, get_seg_limit(ss_e1, ss_e2), ss_e2); } ESP = (ESP & ~sp_mask) | (esp & sp_mask); selector = (selector & ~3) | dpl; cpu_x86_load_seg_cache(env, R_CS, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); cpu_x86_set_cpl(env, dpl); env->eip = offset; /* interrupt gate clear IF mask */ if ((type & 1) == 0) { env->eflags &= ~IF_MASK; } env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); }
15,533
0
static int v9fs_synth_remove(FsContext *ctx, const char *path) { errno = EPERM; return -1; }
15,536
0
static void dec_b(DisasContext *dc) { if (dc->r0 == R_RA) { LOG_DIS("ret\n"); } else if (dc->r0 == R_EA) { LOG_DIS("eret\n"); } else if (dc->r0 == R_BA) { LOG_DIS("bret\n"); } else { LOG_DIS("b r%d\n", dc->r0); } /* restore IE.IE in case of an eret */ if (dc->r0 == R_EA) { TCGv t0 = tcg_temp_new(); int l1 = gen_new_label(); tcg_gen_andi_tl(t0, cpu_ie, IE_EIE); tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1); tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE); gen_set_label(l1); tcg_temp_free(t0); } else if (dc->r0 == R_BA) { TCGv t0 = tcg_temp_new(); int l1 = gen_new_label(); tcg_gen_andi_tl(t0, cpu_ie, IE_BIE); tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE); tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1); tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE); gen_set_label(l1); tcg_temp_free(t0); } tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]); dc->is_jmp = DISAS_JUMP; }
15,538
0
static void query_facilities(void) { unsigned long hwcap = qemu_getauxval(AT_HWCAP); /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this is present on all 64-bit systems, but let's check for it anyway. */ if (hwcap & HWCAP_S390_STFLE) { register int r0 __asm__("0"); register void *r1 __asm__("1"); /* stfle 0(%r1) */ r1 = &facilities; asm volatile(".word 0xb2b0,0x1000" : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc"); } }
15,539
0
static void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUOpenRISCState *env) { int err = 0; abi_ulong frame_addr; unsigned long return_ip; struct target_rt_sigframe *frame; abi_ulong info_addr, uc_addr; frame_addr = get_sigframe(ka, env, sizeof(*frame)); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto give_sigsegv; } info_addr = frame_addr + offsetof(struct target_rt_sigframe, info); __put_user(info_addr, &frame->pinfo); uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc); __put_user(uc_addr, &frame->puc); if (ka->sa_flags & SA_SIGINFO) { copy_siginfo_to_user(&frame->info, info); } /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ __put_user(0, &frame->uc.tuc_flags); __put_user(0, &frame->uc.tuc_link); __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags); __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); err |= setup_sigcontext(&frame->sc, env, set->sig[0]); /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/ if (err) { goto give_sigsegv; } /* trampoline - the desired return ip is the retcode itself */ return_ip = (unsigned long)&frame->retcode; /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */ __put_user(0xa960, (short *)(frame->retcode + 0)); __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2)); __put_user(0x20000001, (unsigned long *)(frame->retcode + 4)); __put_user(0x15000000, (unsigned long *)(frame->retcode + 8)); if (err) { goto give_sigsegv; } /* TODO what is the current->exec_domain stuff and invmap ? */ /* Set up registers for signal handler */ env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */ env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */ env->gpr[3] = (unsigned long)sig; /* arg 1: signo */ env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */ env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */ /* actually move the usp to reflect the stacked frame */ env->gpr[1] = (unsigned long)frame; return; give_sigsegv: unlock_user_struct(frame, frame_addr, 1); if (sig == TARGET_SIGSEGV) { ka->_sa_handler = TARGET_SIG_DFL; } force_sig(TARGET_SIGSEGV); }
15,540
0
static void imx_enet_do_tx(IMXFECState *s) { int frame_size = 0; uint8_t frame[ENET_MAX_FRAME_SIZE]; uint8_t *ptr = frame; uint32_t addr = s->tx_descriptor; while (1) { IMXENETBufDesc bd; int len; imx_enet_read_bd(&bd, addr); FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x " "status %04x\n", addr, bd.flags, bd.length, bd.data, bd.option, bd.status); if ((bd.flags & ENET_BD_R) == 0) { /* Run out of descriptors to transmit. */ break; } len = bd.length; if (frame_size + len > ENET_MAX_FRAME_SIZE) { len = ENET_MAX_FRAME_SIZE - frame_size; s->regs[ENET_EIR] |= ENET_INT_BABT; } dma_memory_read(&address_space_memory, bd.data, ptr, len); ptr += len; frame_size += len; if (bd.flags & ENET_BD_L) { if (bd.option & ENET_BD_PINS) { struct ip_header *ip_hd = PKT_GET_IP_HDR(frame); if (IP_HEADER_VERSION(ip_hd) == 4) { net_checksum_calculate(frame, frame_size); } } if (bd.option & ENET_BD_IINS) { struct ip_header *ip_hd = PKT_GET_IP_HDR(frame); /* We compute checksum only for IPv4 frames */ if (IP_HEADER_VERSION(ip_hd) == 4) { uint16_t csum; ip_hd->ip_sum = 0; csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd)); ip_hd->ip_sum = cpu_to_be16(csum); } } /* Last buffer in frame. */ qemu_send_packet(qemu_get_queue(s->nic), frame, len); ptr = frame; frame_size = 0; if (bd.option & ENET_BD_TX_INT) { s->regs[ENET_EIR] |= ENET_INT_TXF; } } if (bd.option & ENET_BD_TX_INT) { s->regs[ENET_EIR] |= ENET_INT_TXB; } bd.flags &= ~ENET_BD_R; /* Write back the modified descriptor. */ imx_enet_write_bd(&bd, addr); /* Advance to the next descriptor. */ if ((bd.flags & ENET_BD_W) != 0) { addr = s->regs[ENET_TDSR]; } else { addr += sizeof(bd); } } s->tx_descriptor = addr; imx_eth_update(s); }
15,541
0
static uint64_t ne2000_read(void *opaque, target_phys_addr_t addr, unsigned size) { NE2000State *s = opaque; if (addr < 0x10 && size == 1) { return ne2000_ioport_read(s, addr); } else if (addr == 0x10) { if (size <= 2) { return ne2000_asic_ioport_read(s, addr); } else { return ne2000_asic_ioport_readl(s, addr); } } else if (addr == 0x1f && size == 1) { return ne2000_reset_ioport_read(s, addr); } return ((uint64_t)1 << (size * 8)) - 1; }
15,542
0
static const OptionDef *find_option(const OptionDef *po, const char *name) { const char *p = strchr(name, ':'); int len = p ? p - name : strlen(name); while (po->name != NULL) { if (!strncmp(name, po->name, len) && strlen(po->name) == len) break; po++; } return po; }
15,543
0
uint32 float64_to_uint32( float64 a STATUS_PARAM ) { int64_t v; uint32 res; v = float64_to_int64(a STATUS_VAR); if (v < 0) { res = 0; float_raise( float_flag_invalid STATUS_VAR); } else if (v > 0xffffffff) { res = 0xffffffff; float_raise( float_flag_invalid STATUS_VAR); } else { res = v; } return res; }
15,544
0
static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) { VirtIOGPU *g = VIRTIO_GPU(vdev); VirtQueueElement elem; size_t s; struct virtio_gpu_update_cursor cursor_info; if (!virtio_queue_ready(vq)) { return; } while (virtqueue_pop(vq, &elem)) { s = iov_to_buf(elem.out_sg, elem.out_num, 0, &cursor_info, sizeof(cursor_info)); if (s != sizeof(cursor_info)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: cursor size incorrect %zu vs %zu\n", __func__, s, sizeof(cursor_info)); } else { update_cursor(g, &cursor_info); } virtqueue_push(vq, &elem, 0); virtio_notify(vdev, vq); } }
15,545
0
static int iscsi_truncate(BlockDriverState *bs, int64_t offset) { IscsiLun *iscsilun = bs->opaque; Error *local_err = NULL; if (iscsilun->type != TYPE_DISK) { return -ENOTSUP; } iscsi_readcapacity_sync(iscsilun, &local_err); if (local_err != NULL) { error_free(local_err); return -EIO; } if (offset > iscsi_getlength(bs)) { return -EINVAL; } if (iscsilun->allocationmap != NULL) { g_free(iscsilun->allocationmap); iscsilun->allocationmap = iscsi_allocationmap_init(iscsilun); } return 0; }
15,546
0
static void save_native_fp_fxsave(CPUState *env) { struct fpxstate *fp = &fpx1; int fptag, i, j; uint16_t fpuc; asm volatile ("fxsave %0" : : "m" (*fp)); env->fpuc = fp->fpuc; env->fpstt = (fp->fpus >> 11) & 7; env->fpus = fp->fpus & ~0x3800; fptag = fp->fptag ^ 0xff; for(i = 0;i < 8; i++) { env->fptags[i] = (fptag >> i) & 1; } j = env->fpstt; for(i = 0;i < 8; i++) { memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10); j = (j + 1) & 7; } if (env->cpuid_features & CPUID_SSE) { env->mxcsr = fp->mxcsr; memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16); } /* we must restore the default rounding state */ asm volatile ("fninit"); fpuc = 0x037f | (env->fpuc & (3 << 10)); asm volatile("fldcw %0" : : "m" (fpuc)); }
15,547
0
static int vmdk_create(const char *filename, QemuOpts *opts, Error **errp) { int idx = 0; BlockDriverState *new_bs = NULL; Error *local_err = NULL; char *desc = NULL; int64_t total_size = 0, filesize; char *adapter_type = NULL; char *backing_file = NULL; char *fmt = NULL; int flags = 0; int ret = 0; bool flat, split, compress; GString *ext_desc_lines; char path[PATH_MAX], prefix[PATH_MAX], postfix[PATH_MAX]; const int64_t split_size = 0x80000000; /* VMDK has constant split size */ const char *desc_extent_line; char parent_desc_line[BUF_SIZE] = ""; uint32_t parent_cid = 0xffffffff; uint32_t number_heads = 16; bool zeroed_grain = false; uint32_t desc_offset = 0, desc_len; const char desc_template[] = "# Disk DescriptorFile\n" "version=1\n" "CID=%" PRIx32 "\n" "parentCID=%" PRIx32 "\n" "createType=\"%s\"\n" "%s" "\n" "# Extent description\n" "%s" "\n" "# The Disk Data Base\n" "#DDB\n" "\n" "ddb.virtualHWVersion = \"%d\"\n" "ddb.geometry.cylinders = \"%" PRId64 "\"\n" "ddb.geometry.heads = \"%" PRIu32 "\"\n" "ddb.geometry.sectors = \"63\"\n" "ddb.adapterType = \"%s\"\n"; ext_desc_lines = g_string_new(NULL); if (filename_decompose(filename, path, prefix, postfix, PATH_MAX, errp)) { ret = -EINVAL; goto exit; } /* Read out options */ total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), BDRV_SECTOR_SIZE); adapter_type = qemu_opt_get_del(opts, BLOCK_OPT_ADAPTER_TYPE); backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); if (qemu_opt_get_bool_del(opts, BLOCK_OPT_COMPAT6, false)) { flags |= BLOCK_FLAG_COMPAT6; } fmt = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT); if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ZEROED_GRAIN, false)) { zeroed_grain = true; } if (!adapter_type) { adapter_type = g_strdup("ide"); } else if (strcmp(adapter_type, "ide") && strcmp(adapter_type, "buslogic") && strcmp(adapter_type, "lsilogic") && strcmp(adapter_type, "legacyESX")) { error_setg(errp, "Unknown adapter type: '%s'", adapter_type); ret = -EINVAL; goto exit; } if (strcmp(adapter_type, "ide") != 0) { /* that's the number of heads with which vmware operates when creating, exporting, etc. vmdk files with a non-ide adapter type */ number_heads = 255; } if (!fmt) { /* Default format to monolithicSparse */ fmt = g_strdup("monolithicSparse"); } else if (strcmp(fmt, "monolithicFlat") && strcmp(fmt, "monolithicSparse") && strcmp(fmt, "twoGbMaxExtentSparse") && strcmp(fmt, "twoGbMaxExtentFlat") && strcmp(fmt, "streamOptimized")) { error_setg(errp, "Unknown subformat: '%s'", fmt); ret = -EINVAL; goto exit; } split = !(strcmp(fmt, "twoGbMaxExtentFlat") && strcmp(fmt, "twoGbMaxExtentSparse")); flat = !(strcmp(fmt, "monolithicFlat") && strcmp(fmt, "twoGbMaxExtentFlat")); compress = !strcmp(fmt, "streamOptimized"); if (flat) { desc_extent_line = "RW %" PRId64 " FLAT \"%s\" 0\n"; } else { desc_extent_line = "RW %" PRId64 " SPARSE \"%s\"\n"; } if (flat && backing_file) { error_setg(errp, "Flat image can't have backing file"); ret = -ENOTSUP; goto exit; } if (flat && zeroed_grain) { error_setg(errp, "Flat image can't enable zeroed grain"); ret = -ENOTSUP; goto exit; } if (backing_file) { BlockDriverState *bs = NULL; ret = bdrv_open(&bs, backing_file, NULL, NULL, BDRV_O_NO_BACKING, NULL, errp); if (ret != 0) { goto exit; } if (strcmp(bs->drv->format_name, "vmdk")) { bdrv_unref(bs); ret = -EINVAL; goto exit; } parent_cid = vmdk_read_cid(bs, 0); bdrv_unref(bs); snprintf(parent_desc_line, sizeof(parent_desc_line), "parentFileNameHint=\"%s\"", backing_file); } /* Create extents */ filesize = total_size; while (filesize > 0) { char desc_line[BUF_SIZE]; char ext_filename[PATH_MAX]; char desc_filename[PATH_MAX]; int64_t size = filesize; if (split && size > split_size) { size = split_size; } if (split) { snprintf(desc_filename, sizeof(desc_filename), "%s-%c%03d%s", prefix, flat ? 'f' : 's', ++idx, postfix); } else if (flat) { snprintf(desc_filename, sizeof(desc_filename), "%s-flat%s", prefix, postfix); } else { snprintf(desc_filename, sizeof(desc_filename), "%s%s", prefix, postfix); } snprintf(ext_filename, sizeof(ext_filename), "%s%s", path, desc_filename); if (vmdk_create_extent(ext_filename, size, flat, compress, zeroed_grain, opts, errp)) { ret = -EINVAL; goto exit; } filesize -= size; /* Format description line */ snprintf(desc_line, sizeof(desc_line), desc_extent_line, size / BDRV_SECTOR_SIZE, desc_filename); g_string_append(ext_desc_lines, desc_line); } /* generate descriptor file */ desc = g_strdup_printf(desc_template, g_random_int(), parent_cid, fmt, parent_desc_line, ext_desc_lines->str, (flags & BLOCK_FLAG_COMPAT6 ? 6 : 4), total_size / (int64_t)(63 * number_heads * BDRV_SECTOR_SIZE), number_heads, adapter_type); desc_len = strlen(desc); /* the descriptor offset = 0x200 */ if (!split && !flat) { desc_offset = 0x200; } else { ret = bdrv_create_file(filename, opts, &local_err); if (ret < 0) { error_propagate(errp, local_err); goto exit; } } assert(new_bs == NULL); ret = bdrv_open(&new_bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, NULL, &local_err); if (ret < 0) { error_propagate(errp, local_err); goto exit; } ret = bdrv_pwrite(new_bs, desc_offset, desc, desc_len); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write description"); goto exit; } /* bdrv_pwrite write padding zeros to align to sector, we don't need that * for description file */ if (desc_offset == 0) { ret = bdrv_truncate(new_bs, desc_len); if (ret < 0) { error_setg_errno(errp, -ret, "Could not truncate file"); } } exit: if (new_bs) { bdrv_unref(new_bs); } g_free(adapter_type); g_free(backing_file); g_free(fmt); g_free(desc); g_string_free(ext_desc_lines, true); return ret; }
15,548
0
void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t cur_sector, int64_t nr_sectors) { assert(bdrv_dirty_bitmap_enabled(bitmap)); hbitmap_reset(bitmap->bitmap, cur_sector, nr_sectors); }
15,551
0
static int set_string(void *obj, const AVOption *o, const char *val, uint8_t **dst) { av_freep(dst); *dst = av_strdup(val); return 0; }
15,553
0
static int mkv_write_native_codecprivate(AVFormatContext *s, AVCodecParameters *par, AVIOContext *dyn_cp) { switch (par->codec_id) { case AV_CODEC_ID_VORBIS: case AV_CODEC_ID_THEORA: return put_xiph_codecpriv(s, dyn_cp, par); case AV_CODEC_ID_FLAC: return put_flac_codecpriv(s, dyn_cp, par); case AV_CODEC_ID_WAVPACK: return put_wv_codecpriv(dyn_cp, par); case AV_CODEC_ID_H264: return ff_isom_write_avcc(dyn_cp, par->extradata, par->extradata_size); case AV_CODEC_ID_HEVC: ff_isom_write_hvcc(dyn_cp, par->extradata, par->extradata_size, 0); return 0; case AV_CODEC_ID_ALAC: if (par->extradata_size < 36) { av_log(s, AV_LOG_ERROR, "Invalid extradata found, ALAC expects a 36-byte " "QuickTime atom."); return AVERROR_INVALIDDATA; } else avio_write(dyn_cp, par->extradata + 12, par->extradata_size - 12); break; default: if (par->codec_id == AV_CODEC_ID_PRORES && ff_codec_get_id(ff_codec_movvideo_tags, par->codec_tag) == AV_CODEC_ID_PRORES) { avio_wl32(dyn_cp, par->codec_tag); } else if (par->extradata_size && par->codec_id != AV_CODEC_ID_TTA) avio_write(dyn_cp, par->extradata, par->extradata_size); } return 0; }
15,554
1
static void set_cfg_value(bool is_max, int index, int value) { if (is_max) { cfg.buckets[index].max = value; } else { cfg.buckets[index].avg = value; } }
15,556
1
static int coroutine_fn mirror_iteration(MirrorBlockJob *s) { BlockDriverState *source = s->common.bs; BlockDriverState *target = s->target; QEMUIOVector qiov; int ret, nb_sectors; int64_t end; struct iovec iov; end = s->common.len >> BDRV_SECTOR_BITS; s->sector_num = bdrv_get_next_dirty(source, s->sector_num); nb_sectors = MIN(BDRV_SECTORS_PER_DIRTY_CHUNK, end - s->sector_num); bdrv_reset_dirty(source, s->sector_num, nb_sectors); /* Copy the dirty cluster. */ iov.iov_base = s->buf; iov.iov_len = nb_sectors * 512; qemu_iovec_init_external(&qiov, &iov, 1); trace_mirror_one_iteration(s, s->sector_num, nb_sectors); ret = bdrv_co_readv(source, s->sector_num, nb_sectors, &qiov); if (ret < 0) { return ret; } return bdrv_co_writev(target, s->sector_num, nb_sectors, &qiov); }
15,557
1
static void decode_rr_divide(CPUTriCoreState *env, DisasContext *ctx) { uint32_t op2; int r1, r2, r3; TCGv temp, temp2; op2 = MASK_OP_RR_OP2(ctx->opcode); r3 = MASK_OP_RR_D(ctx->opcode); r2 = MASK_OP_RR_S2(ctx->opcode); r1 = MASK_OP_RR_S1(ctx->opcode); switch (op2) { case OPC2_32_RR_BMERGE: gen_helper_bmerge(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_BSPLIT: gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]); break; case OPC2_32_RR_DVINIT_B: gen_dvinit_b(env, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_DVINIT_BU: temp = tcg_temp_new(); temp2 = tcg_temp_new(); /* reset av */ tcg_gen_movi_tl(cpu_PSW_AV, 0); if (!tricore_feature(env, TRICORE_FEATURE_131)) { /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ tcg_gen_neg_tl(temp, cpu_gpr_d[r3+1]); /* use cpu_PSW_AV to compare against 0 */ tcg_gen_movcond_tl(TCG_COND_LT, temp, cpu_gpr_d[r3+1], cpu_PSW_AV, temp, cpu_gpr_d[r3+1]); tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]); tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV, temp2, cpu_gpr_d[r2]); tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2); } else { /* overflow = (D[b] == 0) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); } tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* sv */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* write result */ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 8); tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24); tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp); tcg_temp_free(temp); tcg_temp_free(temp2); break; case OPC2_32_RR_DVINIT_H: gen_dvinit_h(env, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_DVINIT_HU: temp = tcg_temp_new(); temp2 = tcg_temp_new(); /* reset av */ tcg_gen_movi_tl(cpu_PSW_AV, 0); if (!tricore_feature(env, TRICORE_FEATURE_131)) { /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ tcg_gen_neg_tl(temp, cpu_gpr_d[r3+1]); /* use cpu_PSW_AV to compare against 0 */ tcg_gen_movcond_tl(TCG_COND_LT, temp, cpu_gpr_d[r3+1], cpu_PSW_AV, temp, cpu_gpr_d[r3+1]); tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]); tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV, temp2, cpu_gpr_d[r2]); tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2); } else { /* overflow = (D[b] == 0) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); } tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* sv */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* write result */ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); tcg_gen_shri_tl(cpu_gpr_d[r3+1], temp, 16); tcg_gen_shli_tl(cpu_gpr_d[r3], temp, 16); tcg_temp_free(temp); tcg_temp_free(temp2); break; case OPC2_32_RR_DVINIT: temp = tcg_temp_new(); temp2 = tcg_temp_new(); /* overflow = ((D[b] == 0) || ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff); tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000); tcg_gen_and_tl(temp, temp, temp2); tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0); tcg_gen_or_tl(cpu_PSW_V, temp, temp2); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* sv */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* reset av */ tcg_gen_movi_tl(cpu_PSW_AV, 0); /* write result */ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); /* sign extend to high reg */ tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31); tcg_temp_free(temp); tcg_temp_free(temp2); break; case OPC2_32_RR_DVINIT_U: /* overflow = (D[b] == 0) */ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* sv */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* reset av */ tcg_gen_movi_tl(cpu_PSW_AV, 0); /* write result */ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); /* zero extend to high reg*/ tcg_gen_movi_tl(cpu_gpr_d[r3+1], 0); break; case OPC2_32_RR_PARITY: gen_helper_parity(cpu_gpr_d[r3], cpu_gpr_d[r1]); break; case OPC2_32_RR_UNPACK: gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]); break; } }
15,558
1
static int local_unlinkat(FsContext *ctx, V9fsPath *dir, const char *name, int flags) { int ret; V9fsString fullname; char *buffer; v9fs_string_init(&fullname); v9fs_string_sprintf(&fullname, "%s/%s", dir->data, name); if (ctx->export_flags & V9FS_SM_MAPPED_FILE) { if (flags == AT_REMOVEDIR) { /* * If directory remove .virtfs_metadata contained in the * directory */ buffer = g_strdup_printf("%s/%s/%s", ctx->fs_root, fullname.data, VIRTFS_META_DIR); ret = remove(buffer); g_free(buffer); if (ret < 0 && errno != ENOENT) { /* * We didn't had the .virtfs_metadata file. May be file created * in non-mapped mode ?. Ignore ENOENT. */ goto err_out; } } /* * Now remove the name from parent directory * .virtfs_metadata directory. */ buffer = local_mapped_attr_path(ctx, fullname.data); ret = remove(buffer); g_free(buffer); if (ret < 0 && errno != ENOENT) { /* * We didn't had the .virtfs_metadata file. May be file created * in non-mapped mode ?. Ignore ENOENT. */ goto err_out; } } /* Remove the name finally */ buffer = rpath(ctx, fullname.data); ret = remove(buffer); g_free(buffer); err_out: v9fs_string_free(&fullname); return ret; }
15,559
1
static uint64_t pl011_read(void *opaque, hwaddr offset, unsigned size) { PL011State *s = (PL011State *)opaque; uint32_t c; if (offset >= 0xfe0 && offset < 0x1000) { return s->id[(offset - 0xfe0) >> 2]; } switch (offset >> 2) { case 0: /* UARTDR */ s->flags &= ~PL011_FLAG_RXFF; c = s->read_fifo[s->read_pos]; if (s->read_count > 0) { s->read_count--; if (++s->read_pos == 16) s->read_pos = 0; } if (s->read_count == 0) { s->flags |= PL011_FLAG_RXFE; } if (s->read_count == s->read_trigger - 1) s->int_level &= ~ PL011_INT_RX; pl011_update(s); if (s->chr) { qemu_chr_accept_input(s->chr); } return c; case 1: /* UARTCR */ return 0; case 6: /* UARTFR */ return s->flags; case 8: /* UARTILPR */ return s->ilpr; case 9: /* UARTIBRD */ return s->ibrd; case 10: /* UARTFBRD */ return s->fbrd; case 11: /* UARTLCR_H */ return s->lcr; case 12: /* UARTCR */ return s->cr; case 13: /* UARTIFLS */ return s->ifl; case 14: /* UARTIMSC */ return s->int_enabled; case 15: /* UARTRIS */ return s->int_level; case 16: /* UARTMIS */ return s->int_level & s->int_enabled; case 18: /* UARTDMACR */ return s->dmacr; default: qemu_log_mask(LOG_GUEST_ERROR, "pl011_read: Bad offset %x\n", (int)offset); return 0; } }
15,560
1
static BlockAIOCB *inject_error(BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque, BlkdebugRule *rule) { BDRVBlkdebugState *s = bs->opaque; int error = rule->options.inject.error; struct BlkdebugAIOCB *acb; QEMUBH *bh; if (rule->options.inject.once) { QSIMPLEQ_INIT(&s->active_rules); } if (rule->options.inject.immediately) { return NULL; } acb = qemu_aio_get(&blkdebug_aiocb_info, bs, cb, opaque); acb->ret = -error; bh = aio_bh_new(bdrv_get_aio_context(bs), error_callback_bh, acb); acb->bh = bh; qemu_bh_schedule(bh); return &acb->common; }
15,561
1
static void colo_old_packet_check_one_conn(void *opaque, void *user_data) { Connection *conn = opaque; GList *result = NULL; int64_t check_time = REGULAR_PACKET_CHECK_MS; result = g_queue_find_custom(&conn->primary_list, &check_time, (GCompareFunc)colo_old_packet_check_one); if (result) { /* do checkpoint will flush old packet */ /* TODO: colo_notify_checkpoint();*/ } }
15,562
1
int bdrv_open(BlockDriverState *bs, const char *filename, int snapshot) { int fd; int64_t size; struct cow_header_v2 cow_header; #ifndef _WIN32 char template[] = "/tmp/vl.XXXXXX"; int cow_fd; struct stat st; #endif bs->read_only = 0; bs->fd = -1; bs->cow_fd = -1; bs->cow_bitmap = NULL; strcpy(bs->filename, filename); /* open standard HD image */ #ifdef _WIN32 fd = open(filename, O_RDWR | O_BINARY); #else fd = open(filename, O_RDWR | O_LARGEFILE); #endif if (fd < 0) { /* read only image on disk */ #ifdef _WIN32 fd = open(filename, O_RDONLY | O_BINARY); #else fd = open(filename, O_RDONLY | O_LARGEFILE); #endif if (fd < 0) { perror(filename); goto fail; } if (!snapshot) bs->read_only = 1; } bs->fd = fd; /* see if it is a cow image */ if (read(fd, &cow_header, sizeof(cow_header)) != sizeof(cow_header)) { fprintf(stderr, "%s: could not read header\n", filename); goto fail; } #ifndef _WIN32 if (be32_to_cpu(cow_header.magic) == COW_MAGIC && be32_to_cpu(cow_header.version) == COW_VERSION) { /* cow image found */ size = cow_header.size; #ifndef WORDS_BIGENDIAN size = bswap64(size); #endif bs->total_sectors = size / 512; bs->cow_fd = fd; bs->fd = -1; if (cow_header.backing_file[0] != '\0') { if (stat(cow_header.backing_file, &st) != 0) { fprintf(stderr, "%s: could not find original disk image '%s'\n", filename, cow_header.backing_file); goto fail; } if (st.st_mtime != be32_to_cpu(cow_header.mtime)) { fprintf(stderr, "%s: original raw disk image '%s' does not match saved timestamp\n", filename, cow_header.backing_file); goto fail; } fd = open(cow_header.backing_file, O_RDONLY | O_LARGEFILE); if (fd < 0) goto fail; bs->fd = fd; } /* mmap the bitmap */ bs->cow_bitmap_size = ((bs->total_sectors + 7) >> 3) + sizeof(cow_header); bs->cow_bitmap_addr = mmap(get_mmap_addr(bs->cow_bitmap_size), bs->cow_bitmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, bs->cow_fd, 0); if (bs->cow_bitmap_addr == MAP_FAILED) goto fail; bs->cow_bitmap = bs->cow_bitmap_addr + sizeof(cow_header); bs->cow_sectors_offset = (bs->cow_bitmap_size + 511) & ~511; snapshot = 0; } else #endif { /* standard raw image */ size = lseek64(fd, 0, SEEK_END); bs->total_sectors = size / 512; bs->fd = fd; } #ifndef _WIN32 if (snapshot) { /* create a temporary COW file */ cow_fd = mkstemp64(template); if (cow_fd < 0) goto fail; bs->cow_fd = cow_fd; unlink(template); /* just need to allocate bitmap */ bs->cow_bitmap_size = (bs->total_sectors + 7) >> 3; bs->cow_bitmap_addr = mmap(get_mmap_addr(bs->cow_bitmap_size), bs->cow_bitmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (bs->cow_bitmap_addr == MAP_FAILED) goto fail; bs->cow_bitmap = bs->cow_bitmap_addr; bs->cow_sectors_offset = 0; } #endif bs->inserted = 1; /* call the change callback */ if (bs->change_cb) bs->change_cb(bs->change_opaque); return 0; fail: bdrv_close(bs); return -1; }
15,563
1
static void ehci_detach(USBPort *port) { EHCIState *s = port->opaque; uint32_t *portsc = &s->portsc[port->index]; const char *owner = (*portsc & PORTSC_POWNER) ? "comp" : "ehci"; trace_usb_ehci_port_detach(port->index, owner); if (*portsc & PORTSC_POWNER) { USBPort *companion = s->companion_ports[port->index]; companion->ops->detach(companion); companion->dev = NULL; /* * EHCI spec 4.2.2: "When a disconnect occurs... On the event, * the port ownership is returned immediately to the EHCI controller." */ *portsc &= ~PORTSC_POWNER; return; } ehci_queues_rip_device(s, port->dev, 0); ehci_queues_rip_device(s, port->dev, 1); *portsc &= ~(PORTSC_CONNECT|PORTSC_PED); *portsc |= PORTSC_CSC; ehci_raise_irq(s, USBSTS_PCD); }
15,564
1
static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size) { const uint8_t *s = src; const uint8_t *end; #ifdef HAVE_MMX const uint8_t *mm_end; #endif uint16_t *d = (uint16_t *)dst; end = s + src_size; #ifdef HAVE_MMX mm_end = end - 15; #if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster) asm volatile( "movq %3, %%mm5 \n\t" "movq %4, %%mm6 \n\t" "movq %5, %%mm7 \n\t" "jmp 2f \n\t" ASMALIGN(4) "1: \n\t" PREFETCH" 32(%1) \n\t" "movd (%1), %%mm0 \n\t" "movd 4(%1), %%mm3 \n\t" "punpckldq 8(%1), %%mm0 \n\t" "punpckldq 12(%1), %%mm3 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm3, %%mm4 \n\t" "pand %%mm6, %%mm0 \n\t" "pand %%mm6, %%mm3 \n\t" "pmaddwd %%mm7, %%mm0 \n\t" "pmaddwd %%mm7, %%mm3 \n\t" "pand %%mm5, %%mm1 \n\t" "pand %%mm5, %%mm4 \n\t" "por %%mm1, %%mm0 \n\t" "por %%mm4, %%mm3 \n\t" "psrld $6, %%mm0 \n\t" "pslld $10, %%mm3 \n\t" "por %%mm3, %%mm0 \n\t" MOVNTQ" %%mm0, (%0) \n\t" "add $16, %1 \n\t" "add $8, %0 \n\t" "2: \n\t" "cmp %2, %1 \n\t" " jb 1b \n\t" : "+r" (d), "+r"(s) : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215) ); #else __asm __volatile(PREFETCH" %0"::"m"(*src):"memory"); __asm __volatile( "movq %0, %%mm7\n\t" "movq %1, %%mm6\n\t" ::"m"(red_15mask),"m"(green_15mask)); while(s < mm_end) { __asm __volatile( PREFETCH" 32%1\n\t" "movd %1, %%mm0\n\t" "movd 4%1, %%mm3\n\t" "punpckldq 8%1, %%mm0\n\t" "punpckldq 12%1, %%mm3\n\t" "movq %%mm0, %%mm1\n\t" "movq %%mm0, %%mm2\n\t" "movq %%mm3, %%mm4\n\t" "movq %%mm3, %%mm5\n\t" "psrlq $3, %%mm0\n\t" "psrlq $3, %%mm3\n\t" "pand %2, %%mm0\n\t" "pand %2, %%mm3\n\t" "psrlq $6, %%mm1\n\t" "psrlq $6, %%mm4\n\t" "pand %%mm6, %%mm1\n\t" "pand %%mm6, %%mm4\n\t" "psrlq $9, %%mm2\n\t" "psrlq $9, %%mm5\n\t" "pand %%mm7, %%mm2\n\t" "pand %%mm7, %%mm5\n\t" "por %%mm1, %%mm0\n\t" "por %%mm4, %%mm3\n\t" "por %%mm2, %%mm0\n\t" "por %%mm5, %%mm3\n\t" "psllq $16, %%mm3\n\t" "por %%mm3, %%mm0\n\t" MOVNTQ" %%mm0, %0\n\t" :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory"); d += 4; s += 16; } #endif __asm __volatile(SFENCE:::"memory"); __asm __volatile(EMMS:::"memory"); #endif while(s < end) { register int rgb = *(uint32_t*)s; s += 4; *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9); } }
15,565
1
PPC_OP(tlbie) { do_tlbie(); RETURN(); }
15,566
0
static int avs_probe(AVProbeData * p) { const uint8_t *d; if (p->buf_size < 2) return 0; d = p->buf; if (d[0] == 'w' && d[1] == 'W' && d[2] == 0x10 && d[3] == 0) return 50; return 0; }
15,568