label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
1
static int decorrelate(TAKDecContext *s, int c1, int c2, int length) { GetBitContext *gb = &s->gb; int32_t *p1 = s->decoded[c1] + 1; int32_t *p2 = s->decoded[c2] + 1; int i; int dshift, dfactor; switch (s->dmode) { case 1: /* left/side */ for (i = 0; i < length; i++) { int32_t a = p1[i]; int32_t b = p2[i]; p2[i] = a + b; } break; case 2: /* side/right */ for (i = 0; i < length; i++) { int32_t a = p1[i]; int32_t b = p2[i]; p1[i] = b - a; } break; case 3: /* side/mid */ for (i = 0; i < length; i++) { int32_t a = p1[i]; int32_t b = p2[i]; a -= b >> 1; p1[i] = a; p2[i] = a + b; } break; case 4: /* side/left with scale factor */ FFSWAP(int32_t*, p1, p2); case 5: /* side/right with scale factor */ dshift = get_bits_esc4(gb); dfactor = get_sbits(gb, 10); for (i = 0; i < length; i++) { int32_t a = p1[i]; int32_t b = p2[i]; b = dfactor * (b >> dshift) + 128 >> 8 << dshift; p1[i] = b - a; } break; case 6: FFSWAP(int32_t*, p1, p2); case 7: { int length2, order_half, filter_order, dval1, dval2; int tmp, x, code_size; if (length < 256) return AVERROR_INVALIDDATA; dshift = get_bits_esc4(gb); filter_order = 8 << get_bits1(gb); dval1 = get_bits1(gb); dval2 = get_bits1(gb); AV_ZERO128(s->filter + 8); for (i = 0; i < filter_order; i++) { if (!(i & 3)) code_size = 14 - get_bits(gb, 3); s->filter[i] = get_sbits(gb, code_size); } order_half = filter_order / 2; length2 = length - (filter_order - 1); /* decorrelate beginning samples */ if (dval1) { for (i = 0; i < order_half; i++) { int32_t a = p1[i]; int32_t b = p2[i]; p1[i] = a + b; } } /* decorrelate ending samples */ if (dval2) { for (i = length2 + order_half; i < length; i++) { int32_t a = p1[i]; int32_t b = p2[i]; p1[i] = a + b; } } for (i = 0; i < filter_order; i++) s->residues[i] = *p2++ >> dshift; p1 += order_half; x = FF_ARRAY_ELEMS(s->residues) - filter_order; for (; length2 > 0; length2 -= tmp) { tmp = FFMIN(length2, x); for (i = 0; i < tmp; i++) s->residues[filter_order + i] = *p2++ >> dshift; for (i = 0; i < tmp; i++) { int v = 1 << 9; v += s->adsp.scalarproduct_int16(&s->residues[i], s->filter, 16); v = (av_clip_intp2(v >> 10, 13) << dshift) - *p1; *p1++ = v; } memcpy(s->residues, &s->residues[tmp], 2 * filter_order); } emms_c(); break; } } return 0; }
22,979
1
static void *colo_compare_thread(void *opaque) { GMainContext *worker_context; GMainLoop *compare_loop; CompareState *s = opaque; GSource *timeout_source; worker_context = g_main_context_new(); qemu_chr_fe_set_handlers(&s->chr_pri_in, compare_chr_can_read, compare_pri_chr_in, NULL, s, worker_context, true); qemu_chr_fe_set_handlers(&s->chr_sec_in, compare_chr_can_read, compare_sec_chr_in, NULL, s, worker_context, true); compare_loop = g_main_loop_new(worker_context, FALSE); /* To kick any packets that the secondary doesn't match */ timeout_source = g_timeout_source_new(REGULAR_PACKET_CHECK_MS); g_source_set_callback(timeout_source, (GSourceFunc)check_old_packet_regular, s, NULL); g_source_attach(timeout_source, worker_context); g_main_loop_run(compare_loop); g_source_unref(timeout_source); g_main_loop_unref(compare_loop); g_main_context_unref(worker_context); return NULL; }
22,981
1
static void show_format(WriterContext *w, AVFormatContext *fmt_ctx) { char val_str[128]; int64_t size = fmt_ctx->pb ? avio_size(fmt_ctx->pb) : -1; print_section_header("format"); print_str("filename", fmt_ctx->filename); print_int("nb_streams", fmt_ctx->nb_streams); print_str("format_name", fmt_ctx->iformat->name); print_str("format_long_name", fmt_ctx->iformat->long_name); print_time("start_time", fmt_ctx->start_time, &AV_TIME_BASE_Q); print_time("duration", fmt_ctx->duration, &AV_TIME_BASE_Q); if (size >= 0) print_val ("size", size, unit_byte_str); else print_str_opt("size", "N/A"); if (fmt_ctx->bit_rate > 0) print_val ("bit_rate", fmt_ctx->bit_rate, unit_bit_per_second_str); else print_str_opt("bit_rate", "N/A"); show_tags(fmt_ctx->metadata); print_section_footer("format"); fflush(stdout); }
22,982
1
static SchroFrame *libschroedinger_frame_from_data(AVCodecContext *avctx, const AVFrame *frame) { SchroEncoderParams *p_schro_params = avctx->priv_data; SchroFrame *in_frame = ff_create_schro_frame(avctx, p_schro_params->frame_format); if (in_frame) { /* Copy input data to SchroFrame buffers (they match the ones * referenced by the AVFrame stored in priv) */ if (av_frame_copy(in_frame->priv, frame) < 0) { av_log(avctx, AV_LOG_ERROR, "Failed to copy input data\n"); return NULL; } } return in_frame; }
22,983
1
static void x86_cpu_reset(CPUState *s) { X86CPU *cpu = X86_CPU(s); X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); CPUX86State *env = &cpu->env; int i; xcc->parent_reset(s); memset(env, 0, offsetof(CPUX86State, breakpoints)); tlb_flush(env, 1); env->old_exception = -1; /* init to reset state */ #ifdef CONFIG_SOFTMMU env->hflags |= HF_SOFTMMU_MASK; #endif env->hflags2 |= HF2_GIF_MASK; cpu_x86_update_cr0(env, 0x60000010); env->a20_mask = ~0x0; env->smbase = 0x30000; env->idt.limit = 0xffff; env->gdt.limit = 0xffff; env->ldt.limit = 0xffff; env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); env->tr.limit = 0xffff; env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK); env->eip = 0xfff0; env->regs[R_EDX] = env->cpuid_version; env->eflags = 0x2; /* FPU init */ for (i = 0; i < 8; i++) { env->fptags[i] = 1; } env->fpuc = 0x37f; env->mxcsr = 0x1f80; env->xstate_bv = XSTATE_FP | XSTATE_SSE; env->pat = 0x0007040600070406ULL; env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; memset(env->dr, 0, sizeof(env->dr)); env->dr[6] = DR6_FIXED_1; env->dr[7] = DR7_FIXED_1; cpu_breakpoint_remove_all(env, BP_CPU); cpu_watchpoint_remove_all(env, BP_CPU); #if !defined(CONFIG_USER_ONLY) /* We hard-wire the BSP to the first CPU. */ if (s->cpu_index == 0) { apic_designate_bsp(env->apic_state); } s->halted = !cpu_is_bsp(cpu); #endif }
22,984
1
DISAS_INSN(frestore) { /* TODO: Implement frestore. */ qemu_assert(0, "FRESTORE not implemented"); }
22,985
1
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { H264Context *h = dst->priv_data, *h1 = src->priv_data; int inited = h->context_initialized, err = 0; int context_reinitialized = 0; int i, ret; if (dst == src) return 0; if (inited && (h->width != h1->width || h->height != h1->height || h->mb_width != h1->mb_width || h->mb_height != h1->mb_height || h->sps.bit_depth_luma != h1->sps.bit_depth_luma || h->sps.chroma_format_idc != h1->sps.chroma_format_idc || h->sps.colorspace != h1->sps.colorspace)) { /* set bits_per_raw_sample to the previous value. the check for changed * bit depth in h264_set_parameter_from_sps() uses it and sets it to * the current value */ h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; h->width = h1->width; h->height = h1->height; h->mb_height = h1->mb_height; h->mb_width = h1->mb_width; h->mb_num = h1->mb_num; h->mb_stride = h1->mb_stride; h->b_stride = h1->b_stride; // SPS/PPS if ((ret = copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers, MAX_SPS_COUNT, sizeof(SPS))) < 0) return ret; h->sps = h1->sps; if ((ret = copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers, MAX_PPS_COUNT, sizeof(PPS))) < 0) return ret; h->pps = h1->pps; if ((err = h264_slice_header_init(h, 1)) < 0) { av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed\n"); return err; } context_reinitialized = 1; #if 0 h264_set_parameter_from_sps(h); //Note we set context_reinitialized which will cause h264_set_parameter_from_sps to be reexecuted h->cur_chroma_format_idc = h1->cur_chroma_format_idc; #endif } /* copy block_offset since frame_start may not be called */ memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset)); if (!inited) { H264SliceContext *orig_slice_ctx = h->slice_ctx; for (i = 0; i < MAX_SPS_COUNT; i++) av_freep(h->sps_buffers + i); for (i = 0; i < MAX_PPS_COUNT; i++) av_freep(h->pps_buffers + i); ff_h264_unref_picture(h, &h->last_pic_for_ec); memcpy(h, h1, sizeof(H264Context)); memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); memset(&h->cur_pic, 0, sizeof(h->cur_pic)); memset(&h->last_pic_for_ec, 0, sizeof(h->last_pic_for_ec)); h->slice_ctx = orig_slice_ctx; memset(&h->slice_ctx[0].er, 0, sizeof(h->slice_ctx[0].er)); memset(&h->slice_ctx[0].mb, 0, sizeof(h->slice_ctx[0].mb)); memset(&h->slice_ctx[0].mb_luma_dc, 0, sizeof(h->slice_ctx[0].mb_luma_dc)); memset(&h->slice_ctx[0].mb_padding, 0, sizeof(h->slice_ctx[0].mb_padding)); h->avctx = dst; h->DPB = NULL; h->qscale_table_pool = NULL; h->mb_type_pool = NULL; h->ref_index_pool = NULL; h->motion_val_pool = NULL; h->intra4x4_pred_mode= NULL; h->non_zero_count = NULL; h->slice_table_base = NULL; h->slice_table = NULL; h->cbp_table = NULL; h->chroma_pred_mode_table = NULL; memset(h->mvd_table, 0, sizeof(h->mvd_table)); h->direct_table = NULL; h->list_counts = NULL; h->mb2b_xy = NULL; h->mb2br_xy = NULL; if (h1->context_initialized) { h->context_initialized = 0; memset(&h->cur_pic, 0, sizeof(h->cur_pic)); av_frame_unref(&h->cur_pic.f); h->cur_pic.tf.f = &h->cur_pic.f; ret = ff_h264_alloc_tables(h); if (ret < 0) { av_log(dst, AV_LOG_ERROR, "Could not allocate memory\n"); return ret; } ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]); if (ret < 0) { av_log(dst, AV_LOG_ERROR, "context_init() failed.\n"); return ret; } } h->context_initialized = h1->context_initialized; } h->avctx->coded_height = h1->avctx->coded_height; h->avctx->coded_width = h1->avctx->coded_width; h->avctx->width = h1->avctx->width; h->avctx->height = h1->avctx->height; h->coded_picture_number = h1->coded_picture_number; h->first_field = h1->first_field; h->picture_structure = h1->picture_structure; h->droppable = h1->droppable; h->low_delay = h1->low_delay; for (i = 0; h->DPB && i < H264_MAX_PICTURE_COUNT; i++) { ff_h264_unref_picture(h, &h->DPB[i]); if (h1->DPB && h1->DPB[i].f.buf[0] && (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0) return ret; } h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1); ff_h264_unref_picture(h, &h->cur_pic); if (h1->cur_pic.f.buf[0]) { ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic); if (ret < 0) return ret; } h->workaround_bugs = h1->workaround_bugs; h->low_delay = h1->low_delay; h->droppable = h1->droppable; // extradata/NAL handling h->is_avc = h1->is_avc; // SPS/PPS if ((ret = copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers, MAX_SPS_COUNT, sizeof(SPS))) < 0) return ret; h->sps = h1->sps; if ((ret = copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers, MAX_PPS_COUNT, sizeof(PPS))) < 0) return ret; h->pps = h1->pps; // Dequantization matrices // FIXME these are big - can they be only copied when PPS changes? copy_fields(h, h1, dequant4_buffer, dequant4_coeff); for (i = 0; i < 6; i++) h->dequant4_coeff[i] = h->dequant4_buffer[0] + (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]); for (i = 0; i < 6; i++) h->dequant8_coeff[i] = h->dequant8_buffer[0] + (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]); h->dequant_coeff_pps = h1->dequant_coeff_pps; // POC timing copy_fields(h, h1, poc_lsb, default_ref_list); // reference lists copy_fields(h, h1, short_ref, current_slice); copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1); copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1); copy_picture_range(h->delayed_pic, h1->delayed_pic, MAX_DELAYED_PIC_COUNT + 2, h, h1); h->frame_recovered = h1->frame_recovered; if (context_reinitialized) ff_h264_set_parameter_from_sps(h); if (!h->cur_pic_ptr) return 0; if (!h->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb = h->poc_msb; h->prev_poc_lsb = h->poc_lsb; } h->prev_frame_num_offset = h->frame_num_offset; h->prev_frame_num = h->frame_num; h->outputed_poc = h->next_outputed_poc; h->recovery_frame = h1->recovery_frame; return err; }
22,986
1
static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array, abi_ulong target_addr) { int nsems; unsigned short *array; union semun semun; struct semid_ds semid_ds; int i, ret; semun.buf = &semid_ds; ret = semctl(semid, 0, IPC_STAT, semun); if (ret == -1) return get_errno(ret); nsems = semid_ds.sem_nsems; *host_array = malloc(nsems*sizeof(unsigned short)); array = lock_user(VERIFY_READ, target_addr, nsems*sizeof(unsigned short), 1); if (!array) return -TARGET_EFAULT; for(i=0; i<nsems; i++) { __get_user((*host_array)[i], &array[i]); } unlock_user(array, target_addr, 0); return 0; }
22,987
1
static void vga_draw_text(VGAState *s, int full_update) { int cx, cy, cheight, cw, ch, cattr, height, width, ch_attr; int cx_min, cx_max, linesize, x_incr; uint32_t offset, fgcol, bgcol, v, cursor_offset; uint8_t *d1, *d, *src, *s1, *dest, *cursor_ptr; const uint8_t *font_ptr, *font_base[2]; int dup9, line_offset, depth_index; uint32_t *palette; uint32_t *ch_attr_ptr; vga_draw_glyph8_func *vga_draw_glyph8; vga_draw_glyph9_func *vga_draw_glyph9; vga_dirty_log_stop(s); /* compute font data address (in plane 2) */ v = s->sr[3]; offset = (((v >> 4) & 1) | ((v << 1) & 6)) * 8192 * 4 + 2; if (offset != s->font_offsets[0]) { s->font_offsets[0] = offset; full_update = 1; } font_base[0] = s->vram_ptr + offset; offset = (((v >> 5) & 1) | ((v >> 1) & 6)) * 8192 * 4 + 2; font_base[1] = s->vram_ptr + offset; if (offset != s->font_offsets[1]) { s->font_offsets[1] = offset; full_update = 1; } if (s->plane_updated & (1 << 2)) { /* if the plane 2 was modified since the last display, it indicates the font may have been modified */ s->plane_updated = 0; full_update = 1; } full_update |= update_basic_params(s); line_offset = s->line_offset; s1 = s->vram_ptr + (s->start_addr * 4); vga_get_text_resolution(s, &width, &height, &cw, &cheight); x_incr = cw * ((ds_get_bits_per_pixel(s->ds) + 7) >> 3); if ((height * width) > CH_ATTR_SIZE) { /* better than nothing: exit if transient size is too big */ return; } if (width != s->last_width || height != s->last_height || cw != s->last_cw || cheight != s->last_ch || s->last_depth) { s->last_scr_width = width * cw; s->last_scr_height = height * cheight; qemu_console_resize(s->ds, s->last_scr_width, s->last_scr_height); s->last_depth = 0; s->last_width = width; s->last_height = height; s->last_ch = cheight; s->last_cw = cw; full_update = 1; } s->rgb_to_pixel = rgb_to_pixel_dup_table[get_depth_index(s->ds)]; full_update |= update_palette16(s); palette = s->last_palette; x_incr = cw * ((ds_get_bits_per_pixel(s->ds) + 7) >> 3); cursor_offset = ((s->cr[0x0e] << 8) | s->cr[0x0f]) - s->start_addr; if (cursor_offset != s->cursor_offset || s->cr[0xa] != s->cursor_start || s->cr[0xb] != s->cursor_end) { /* if the cursor position changed, we update the old and new chars */ if (s->cursor_offset < CH_ATTR_SIZE) s->last_ch_attr[s->cursor_offset] = -1; if (cursor_offset < CH_ATTR_SIZE) s->last_ch_attr[cursor_offset] = -1; s->cursor_offset = cursor_offset; s->cursor_start = s->cr[0xa]; s->cursor_end = s->cr[0xb]; } cursor_ptr = s->vram_ptr + (s->start_addr + cursor_offset) * 4; depth_index = get_depth_index(s->ds); if (cw == 16) vga_draw_glyph8 = vga_draw_glyph16_table[depth_index]; else vga_draw_glyph8 = vga_draw_glyph8_table[depth_index]; vga_draw_glyph9 = vga_draw_glyph9_table[depth_index]; dest = ds_get_data(s->ds); linesize = ds_get_linesize(s->ds); ch_attr_ptr = s->last_ch_attr; for(cy = 0; cy < height; cy++) { d1 = dest; src = s1; cx_min = width; cx_max = -1; for(cx = 0; cx < width; cx++) { ch_attr = *(uint16_t *)src; if (full_update || ch_attr != *ch_attr_ptr) { if (cx < cx_min) cx_min = cx; if (cx > cx_max) cx_max = cx; *ch_attr_ptr = ch_attr; #ifdef WORDS_BIGENDIAN ch = ch_attr >> 8; cattr = ch_attr & 0xff; #else ch = ch_attr & 0xff; cattr = ch_attr >> 8; #endif font_ptr = font_base[(cattr >> 3) & 1]; font_ptr += 32 * 4 * ch; bgcol = palette[cattr >> 4]; fgcol = palette[cattr & 0x0f]; if (cw != 9) { vga_draw_glyph8(d1, linesize, font_ptr, cheight, fgcol, bgcol); } else { dup9 = 0; if (ch >= 0xb0 && ch <= 0xdf && (s->ar[0x10] & 0x04)) dup9 = 1; vga_draw_glyph9(d1, linesize, font_ptr, cheight, fgcol, bgcol, dup9); } if (src == cursor_ptr && !(s->cr[0x0a] & 0x20)) { int line_start, line_last, h; /* draw the cursor */ line_start = s->cr[0x0a] & 0x1f; line_last = s->cr[0x0b] & 0x1f; /* XXX: check that */ if (line_last > cheight - 1) line_last = cheight - 1; if (line_last >= line_start && line_start < cheight) { h = line_last - line_start + 1; d = d1 + linesize * line_start; if (cw != 9) { vga_draw_glyph8(d, linesize, cursor_glyph, h, fgcol, bgcol); } else { vga_draw_glyph9(d, linesize, cursor_glyph, h, fgcol, bgcol, 1); } } } } d1 += x_incr; src += 4; ch_attr_ptr++; } if (cx_max != -1) { dpy_update(s->ds, cx_min * cw, cy * cheight, (cx_max - cx_min + 1) * cw, cheight); } dest += linesize * cheight; s1 += line_offset; } }
22,988
1
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, uint32_t slb_pshift, bool secondary, target_ulong ptem, ppc_hash_pte64_t *pte) { CPUPPCState *env = &cpu->env; int i; uint64_t token; target_ulong pte0, pte1; target_ulong pte_index; pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP; token = ppc_hash64_start_access(cpu, pte_index); if (!token) { return -1; } for (i = 0; i < HPTES_PER_GROUP; i++) { pte0 = ppc_hash64_load_hpte0(cpu, token, i); pte1 = ppc_hash64_load_hpte1(cpu, token, i); if ((pte0 & HPTE64_V_VALID) && (secondary == !!(pte0 & HPTE64_V_SECONDARY)) && HPTE64_V_COMPARE(pte0, ptem)) { uint32_t pshift = ppc_hash64_pte_size_decode(pte1, slb_pshift); if (pshift == 0) { continue; } /* We don't do anything with pshift yet as qemu TLB only deals * with 4K pages anyway */ pte->pte0 = pte0; pte->pte1 = pte1; ppc_hash64_stop_access(cpu, token); return (pte_index + i) * HASH_PTE_SIZE_64; } } ppc_hash64_stop_access(cpu, token); /* * We didn't find a valid entry. */ return -1; }
22,989
1
int bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue, Error **errp) { int ret = -1; Error *local_err = NULL; BlockDriver *drv; QemuOpts *opts; const char *value; bool read_only; assert(reopen_state != NULL); assert(reopen_state->bs->drv != NULL); drv = reopen_state->bs->drv; /* Process generic block layer options */ opts = qemu_opts_create(&bdrv_runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, reopen_state->options, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto error; } update_flags_from_options(&reopen_state->flags, opts); /* node-name and driver must be unchanged. Put them back into the QDict, so * that they are checked at the end of this function. */ value = qemu_opt_get(opts, "node-name"); if (value) { qdict_put_str(reopen_state->options, "node-name", value); } value = qemu_opt_get(opts, "driver"); if (value) { qdict_put_str(reopen_state->options, "driver", value); } /* If we are to stay read-only, do not allow permission change * to r/w. Attempting to set to r/w may fail if either BDRV_O_ALLOW_RDWR is * not set, or if the BDS still has copy_on_read enabled */ read_only = !(reopen_state->flags & BDRV_O_RDWR); ret = bdrv_can_set_read_only(reopen_state->bs, read_only, true, &local_err); if (local_err) { error_propagate(errp, local_err); goto error; } /* Calculate required permissions after reopening */ bdrv_reopen_perm(queue, reopen_state->bs, &reopen_state->perm, &reopen_state->shared_perm); ret = bdrv_flush(reopen_state->bs); if (ret) { error_setg_errno(errp, -ret, "Error flushing drive"); goto error; } if (drv->bdrv_reopen_prepare) { ret = drv->bdrv_reopen_prepare(reopen_state, queue, &local_err); if (ret) { if (local_err != NULL) { error_propagate(errp, local_err); } else { error_setg(errp, "failed while preparing to reopen image '%s'", reopen_state->bs->filename); } goto error; } } else { /* It is currently mandatory to have a bdrv_reopen_prepare() * handler for each supported drv. */ error_setg(errp, "Block format '%s' used by node '%s' " "does not support reopening files", drv->format_name, bdrv_get_device_or_node_name(reopen_state->bs)); ret = -1; goto error; } /* Options that are not handled are only okay if they are unchanged * compared to the old state. It is expected that some options are only * used for the initial open, but not reopen (e.g. filename) */ if (qdict_size(reopen_state->options)) { const QDictEntry *entry = qdict_first(reopen_state->options); do { QString *new_obj = qobject_to_qstring(entry->value); const char *new = qstring_get_str(new_obj); /* * Caution: while qdict_get_try_str() is fine, getting * non-string types would require more care. When * bs->options come from -blockdev or blockdev_add, its * members are typed according to the QAPI schema, but * when they come from -drive, they're all QString. */ const char *old = qdict_get_try_str(reopen_state->bs->options, entry->key); if (!old || strcmp(new, old)) { error_setg(errp, "Cannot change the option '%s'", entry->key); ret = -EINVAL; goto error; } } while ((entry = qdict_next(reopen_state->options, entry))); } ret = bdrv_check_perm(reopen_state->bs, queue, reopen_state->perm, reopen_state->shared_perm, NULL, errp); if (ret < 0) { goto error; } ret = 0; error: qemu_opts_del(opts); return ret; }
22,990
0
static av_cold int g722_encode_close(AVCodecContext *avctx) { G722Context *c = avctx->priv_data; int i; for (i = 0; i < 2; i++) { av_freep(&c->paths[i]); av_freep(&c->node_buf[i]); av_freep(&c->nodep_buf[i]); } return 0; }
22,991
0
static int asf_get_packet(AVFormatContext *s) { ASFContext *asf = s->priv_data; ByteIOContext *pb = &s->pb; uint32_t packet_length, padsize; int rsize = 9; int c; c = get_byte(pb); if (c != 0x82) { if (!url_feof(pb)) av_log(s, AV_LOG_ERROR, "ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb)); } if ((c & 0x0f) == 2) { // always true for now if (get_le16(pb) != 0) { if (!url_feof(pb)) av_log(s, AV_LOG_ERROR, "ff asf bad non zero\n"); return AVERROR_IO; } rsize+=2; /* }else{ if (!url_feof(pb)) printf("ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb)); return AVERROR_IO;*/ } asf->packet_flags = get_byte(pb); asf->packet_property = get_byte(pb); DO_2BITS(asf->packet_flags >> 5, packet_length, asf->packet_size); DO_2BITS(asf->packet_flags >> 1, padsize, 0); // sequence ignored DO_2BITS(asf->packet_flags >> 3, padsize, 0); // padding length //the following checks prevent overflows and infinite loops if(packet_length >= (1U<<29)){ av_log(s, AV_LOG_ERROR, "invalid packet_length %d at:%"PRId64"\n", packet_length, url_ftell(pb)); return -1; } if(padsize >= (1U<<29)){ av_log(s, AV_LOG_ERROR, "invalid padsize %d at:%"PRId64"\n", padsize, url_ftell(pb)); return -1; } asf->packet_timestamp = get_le32(pb); get_le16(pb); /* duration */ // rsize has at least 11 bytes which have to be present if (asf->packet_flags & 0x01) { asf->packet_segsizetype = get_byte(pb); rsize++; asf->packet_segments = asf->packet_segsizetype & 0x3f; } else { asf->packet_segments = 1; asf->packet_segsizetype = 0x80; } asf->packet_size_left = packet_length - padsize - rsize; if (packet_length < asf->hdr.min_pktsize) padsize += asf->hdr.min_pktsize - packet_length; asf->packet_padsize = padsize; #ifdef DEBUG printf("packet: size=%d padsize=%d left=%d\n", asf->packet_size, asf->packet_padsize, asf->packet_size_left); #endif return 0; }
22,992
0
static void new_video_stream(AVFormatContext *oc, int file_idx) { AVStream *st; OutputStream *ost; AVCodecContext *video_enc; enum CodecID codec_id = CODEC_ID_NONE; AVCodec *codec= NULL; if(!video_stream_copy){ if (video_codec_name) { codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1, avcodec_opts[AVMEDIA_TYPE_VIDEO]->strict_std_compliance); codec = avcodec_find_encoder_by_name(video_codec_name); } else { codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO); codec = avcodec_find_encoder(codec_id); } } ost = new_output_stream(oc, file_idx, codec); st = ost->st; if (!video_stream_copy) { ost->frame_aspect_ratio = frame_aspect_ratio; frame_aspect_ratio = 0; #if CONFIG_AVFILTER ost->avfilter= vfilters; vfilters = NULL; #endif } ost->bitstream_filters = video_bitstream_filters; video_bitstream_filters= NULL; st->codec->thread_count= thread_count; video_enc = st->codec; if(video_codec_tag) video_enc->codec_tag= video_codec_tag; if(oc->oformat->flags & AVFMT_GLOBALHEADER) { video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER; } if (video_stream_copy) { st->stream_copy = 1; video_enc->codec_type = AVMEDIA_TYPE_VIDEO; video_enc->sample_aspect_ratio = st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255); } else { const char *p; int i; if (frame_rate.num) ost->frame_rate = frame_rate; video_enc->codec_id = codec_id; set_context_opts(video_enc, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec); video_enc->width = frame_width; video_enc->height = frame_height; video_enc->pix_fmt = frame_pix_fmt; st->sample_aspect_ratio = video_enc->sample_aspect_ratio; if (intra_only) video_enc->gop_size = 0; if (video_qscale || same_quality) { video_enc->flags |= CODEC_FLAG_QSCALE; video_enc->global_quality = FF_QP2LAMBDA * video_qscale; } if(intra_matrix) video_enc->intra_matrix = intra_matrix; if(inter_matrix) video_enc->inter_matrix = inter_matrix; p= video_rc_override_string; for(i=0; p; i++){ int start, end, q; int e=sscanf(p, "%d,%d,%d", &start, &end, &q); if(e!=3){ fprintf(stderr, "error parsing rc_override\n"); ffmpeg_exit(1); } video_enc->rc_override= av_realloc(video_enc->rc_override, sizeof(RcOverride)*(i+1)); video_enc->rc_override[i].start_frame= start; video_enc->rc_override[i].end_frame = end; if(q>0){ video_enc->rc_override[i].qscale= q; video_enc->rc_override[i].quality_factor= 1.0; } else{ video_enc->rc_override[i].qscale= 0; video_enc->rc_override[i].quality_factor= -q/100.0; } p= strchr(p, '/'); if(p) p++; } video_enc->rc_override_count=i; if (!video_enc->rc_initial_buffer_occupancy) video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4; video_enc->me_threshold= me_threshold; video_enc->intra_dc_precision= intra_dc_precision - 8; if (do_psnr) video_enc->flags|= CODEC_FLAG_PSNR; /* two pass mode */ if (do_pass) { if (do_pass == 1) { video_enc->flags |= CODEC_FLAG_PASS1; } else { video_enc->flags |= CODEC_FLAG_PASS2; } } if (forced_key_frames) parse_forced_key_frames(forced_key_frames, ost, video_enc); } if (video_language) { av_dict_set(&st->metadata, "language", video_language, 0); av_freep(&video_language); } /* reset some key parameters */ video_disable = 0; av_freep(&video_codec_name); av_freep(&forced_key_frames); video_stream_copy = 0; frame_pix_fmt = PIX_FMT_NONE; }
22,993
0
static GSList *gd_vc_vte_init(GtkDisplayState *s, VirtualConsole *vc, CharDriverState *chr, int idx, GSList *group, GtkWidget *view_menu) { char buffer[32]; GtkWidget *box; GtkWidget *scrollbar; GtkAdjustment *vadjustment; VirtualConsole *tmp_vc = chr->opaque; vc->s = s; vc->vte.echo = tmp_vc->vte.echo; vc->vte.chr = chr; chr->opaque = vc; g_free(tmp_vc); snprintf(buffer, sizeof(buffer), "vc%d", idx); vc->label = g_strdup_printf("%s", vc->vte.chr->label ? vc->vte.chr->label : buffer); group = gd_vc_menu_init(s, vc, idx, group, view_menu); vc->vte.terminal = vte_terminal_new(); g_signal_connect(vc->vte.terminal, "commit", G_CALLBACK(gd_vc_in), vc); /* The documentation says that the default is UTF-8, but actually it is * 7-bit ASCII at least in VTE 0.38. */ #if VTE_CHECK_VERSION(0, 40, 0) vte_terminal_set_encoding(VTE_TERMINAL(vc->vte.terminal), "UTF-8", NULL); #else vte_terminal_set_encoding(VTE_TERMINAL(vc->vte.terminal), "UTF-8"); #endif vte_terminal_set_scrollback_lines(VTE_TERMINAL(vc->vte.terminal), -1); vte_terminal_set_size(VTE_TERMINAL(vc->vte.terminal), VC_TERM_X_MIN, VC_TERM_Y_MIN); #if VTE_CHECK_VERSION(0, 28, 0) && GTK_CHECK_VERSION(3, 0, 0) vadjustment = gtk_scrollable_get_vadjustment (GTK_SCROLLABLE(vc->vte.terminal)); #else vadjustment = vte_terminal_get_adjustment(VTE_TERMINAL(vc->vte.terminal)); #endif #if GTK_CHECK_VERSION(3, 0, 0) box = gtk_box_new(GTK_ORIENTATION_HORIZONTAL, 2); scrollbar = gtk_scrollbar_new(GTK_ORIENTATION_VERTICAL, vadjustment); #else box = gtk_hbox_new(false, 2); scrollbar = gtk_vscrollbar_new(vadjustment); #endif gtk_box_pack_start(GTK_BOX(box), vc->vte.terminal, TRUE, TRUE, 0); gtk_box_pack_start(GTK_BOX(box), scrollbar, FALSE, FALSE, 0); vc->vte.box = box; vc->vte.scrollbar = scrollbar; g_signal_connect(vadjustment, "changed", G_CALLBACK(gd_vc_adjustment_changed), vc); vc->type = GD_VC_VTE; vc->tab_item = box; vc->focus = vc->vte.terminal; gtk_notebook_append_page(GTK_NOTEBOOK(s->notebook), vc->tab_item, gtk_label_new(vc->label)); qemu_chr_be_generic_open(vc->vte.chr); if (vc->vte.chr->init) { vc->vte.chr->init(vc->vte.chr); } return group; }
22,995
0
int qemu_opt_foreach(QemuOpts *opts, qemu_opt_loopfunc func, void *opaque, int abort_on_failure) { QemuOpt *opt; int rc = 0; TAILQ_FOREACH(opt, &opts->head, next) { rc = func(opt->name, opt->str, opaque); if (abort_on_failure && rc != 0) break; } return rc; }
22,996
0
static void build_fadt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms, unsigned dsdt_tbl_offset) { AcpiFadtDescriptorRev5_1 *fadt = acpi_data_push(table_data, sizeof(*fadt)); unsigned dsdt_entry_offset = (char *)&fadt->dsdt - table_data->data; uint16_t bootflags; switch (vms->psci_conduit) { case QEMU_PSCI_CONDUIT_DISABLED: bootflags = 0; break; case QEMU_PSCI_CONDUIT_HVC: bootflags = ACPI_FADT_ARM_PSCI_COMPLIANT | ACPI_FADT_ARM_PSCI_USE_HVC; break; case QEMU_PSCI_CONDUIT_SMC: bootflags = ACPI_FADT_ARM_PSCI_COMPLIANT; break; default: g_assert_not_reached(); } /* Hardware Reduced = 1 and use PSCI 0.2+ */ fadt->flags = cpu_to_le32(1 << ACPI_FADT_F_HW_REDUCED_ACPI); fadt->arm_boot_flags = cpu_to_le16(bootflags); /* ACPI v5.1 (fadt->revision.fadt->minor_revision) */ fadt->minor_revision = 0x1; /* DSDT address to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, dsdt_entry_offset, sizeof(fadt->dsdt), ACPI_BUILD_TABLE_FILE, dsdt_tbl_offset); build_header(linker, table_data, (void *)fadt, "FACP", sizeof(*fadt), 5, NULL, NULL); }
22,997
0
static void vfio_pci_size_rom(VFIOPCIDevice *vdev) { uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK); off_t offset = vdev->config_offset + PCI_ROM_ADDRESS; DeviceState *dev = DEVICE(vdev); char name[32]; int fd = vdev->vbasedev.fd; if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { /* Since pci handles romfile, just print a message and return */ if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) { error_printf("Warning : Device at %04x:%02x:%02x.%x " "is known to cause system instability issues during " "option rom execution. " "Proceeding anyway since user specified romfile\n", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); } return; } /* * Use the same size ROM BAR as the physical device. The contents * will get filled in later when the guest tries to read it. */ if (pread(fd, &orig, 4, offset) != 4 || pwrite(fd, &size, 4, offset) != 4 || pread(fd, &size, 4, offset) != 4 || pwrite(fd, &orig, 4, offset) != 4) { error_report("%s(%04x:%02x:%02x.%x) failed: %m", __func__, vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); return; } size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1; if (!size) { return; } if (vfio_blacklist_opt_rom(vdev)) { if (dev->opts && qemu_opt_get(dev->opts, "rombar")) { error_printf("Warning : Device at %04x:%02x:%02x.%x " "is known to cause system instability issues during " "option rom execution. " "Proceeding anyway since user specified non zero value for " "rombar\n", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); } else { error_printf("Warning : Rom loading for device at " "%04x:%02x:%02x.%x has been disabled due to " "system instability issues. " "Specify rombar=1 or romfile to force\n", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); return; } } trace_vfio_pci_size_rom(vdev->vbasedev.name, size); snprintf(name, sizeof(name), "vfio[%04x:%02x:%02x.%x].rom", vdev->host.domain, vdev->host.bus, vdev->host.slot, vdev->host.function); memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), &vfio_rom_ops, vdev, name, size); pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); vdev->pdev.has_rom = true; vdev->rom_read_failed = false; }
22,998
0
void ff_check_pixfmt_descriptors(void){ int i, j; for (i=0; i<FF_ARRAY_ELEMS(av_pix_fmt_descriptors); i++) { const AVPixFmtDescriptor *d = &av_pix_fmt_descriptors[i]; if (!d->name && !d->nb_components && !d->log2_chroma_w && !d->log2_chroma_h && !d->flags) continue; // av_log(NULL, AV_LOG_DEBUG, "Checking: %s\n", d->name); av_assert0(d->log2_chroma_w <= 3); av_assert0(d->log2_chroma_h <= 3); av_assert0(d->nb_components <= 4); av_assert0(d->name && d->name[0]); av_assert0((d->nb_components==4 || d->nb_components==2) == !!(d->flags & PIX_FMT_ALPHA)); av_assert2(av_get_pix_fmt(d->name) == i); for (j=0; j<FF_ARRAY_ELEMS(d->comp); j++) { const AVComponentDescriptor *c = &d->comp[j]; if(j>=d->nb_components) av_assert0(!c->plane && !c->step_minus1 && !c->offset_plus1 && !c->shift && !c->depth_minus1); } } }
22,999
0
static int vt82c686b_initfn(PCIDevice *d) { uint8_t *pci_conf; uint8_t *wmask; int i; isa_bus_new(&d->qdev); pci_conf = d->config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_VIA); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_VIA_ISA_BRIDGE); pci_config_set_class(pci_conf, PCI_CLASS_BRIDGE_ISA); pci_config_set_prog_interface(pci_conf, 0x0); pci_config_set_revision(pci_conf,0x40); /* Revision 4.0 */ wmask = d->wmask; for (i = 0x00; i < 0xff; i++) { if (i<=0x03 || (i>=0x08 && i<=0x3f)) { wmask[i] = 0x00; } } qemu_register_reset(vt82c686b_reset, d); return 0; }
23,000
0
static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) { int r = 0; uint16_t func_code; /* * For any diagnose call we support, bits 48-63 of the resulting * address specify the function code; the remainder is ignored. */ func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK; switch (func_code) { case DIAG_IPL: kvm_handle_diag_308(cpu, run); break; case DIAG_KVM_HYPERCALL: r = handle_hypercall(cpu, run); break; case DIAG_KVM_BREAKPOINT: r = handle_sw_breakpoint(cpu, run); break; default: DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); r = -1; break; } return r; }
23,001
0
static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) { uint16_t old, new; bool v; /* We need to expose used array entries before checking used event. */ smp_mb(); /* Always notify when queue is empty (when feature acknowledge) */ if (virtio_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) && !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx) { return true; } if (!virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); } v = vq->signalled_used_valid; vq->signalled_used_valid = true; old = vq->signalled_used; new = vq->signalled_used = vring_used_idx(vq); return !v || vring_need_event(vring_get_used_event(vq), new, old); }
23,002
0
static void kvm_do_inject_x86_mce(void *_data) { struct kvm_x86_mce_data *data = _data; int r; /* If there is an MCE exception being processed, ignore this SRAO MCE */ if ((data->env->mcg_cap & MCG_SER_P) && !(data->mce->status & MCI_STATUS_AR)) { r = kvm_mce_in_exception(data->env); if (r == -1) { fprintf(stderr, "Failed to get MCE status\n"); } else if (r) { return; } } r = kvm_set_mce(data->env, data->mce); if (r < 0) { perror("kvm_set_mce FAILED"); if (data->abort_on_error) { abort(); } } }
23,003
0
void bareetraxfs_init (ram_addr_t ram_size, int vga_ram_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; struct etraxfs_pic *pic; void *etraxfs_dmac; struct etraxfs_dma_client *eth[2] = {NULL, NULL}; int kernel_size; int i; ram_addr_t phys_ram; ram_addr_t phys_flash; ram_addr_t phys_intmem; /* init CPUs */ if (cpu_model == NULL) { cpu_model = "crisv32"; } env = cpu_init(cpu_model); qemu_register_reset(main_cpu_reset, env); /* allocate RAM */ phys_ram = qemu_ram_alloc(ram_size); cpu_register_physical_memory(0x40000000, ram_size, phys_ram | IO_MEM_RAM); /* The ETRAX-FS has 128Kb on chip ram, the docs refer to it as the internal memory. */ phys_intmem = qemu_ram_alloc(INTMEM_SIZE); cpu_register_physical_memory(0x38000000, INTMEM_SIZE, phys_intmem | IO_MEM_RAM); phys_flash = qemu_ram_alloc(FLASH_SIZE); i = drive_get_index(IF_PFLASH, 0, 0); pflash_cfi02_register(0x0, phys_flash, i != -1 ? drives_table[i].bdrv : NULL, (64 * 1024), FLASH_SIZE >> 16, 1, 2, 0x0000, 0x0000, 0x0000, 0x0000, 0x555, 0x2aa); pic = etraxfs_pic_init(env, 0x3001c000); etraxfs_dmac = etraxfs_dmac_init(env, 0x30000000, 10); for (i = 0; i < 10; i++) { /* On ETRAX, odd numbered channels are inputs. */ etraxfs_dmac_connect(etraxfs_dmac, i, pic->irq + 7 + i, i & 1); } /* Add the two ethernet blocks. */ eth[0] = etraxfs_eth_init(&nd_table[0], env, pic->irq + 25, 0x30034000); if (nb_nics > 1) eth[1] = etraxfs_eth_init(&nd_table[1], env, pic->irq + 26, 0x30036000); /* The DMA Connector block is missing, hardwire things for now. */ etraxfs_dmac_connect_client(etraxfs_dmac, 0, eth[0]); etraxfs_dmac_connect_client(etraxfs_dmac, 1, eth[0] + 1); if (eth[1]) { etraxfs_dmac_connect_client(etraxfs_dmac, 6, eth[1]); etraxfs_dmac_connect_client(etraxfs_dmac, 7, eth[1] + 1); } /* 2 timers. */ etraxfs_timer_init(env, pic->irq + 0x1b, pic->nmi + 1, 0x3001e000); etraxfs_timer_init(env, pic->irq + 0x1b, pic->nmi + 1, 0x3005e000); for (i = 0; i < 4; i++) { if (serial_hds[i]) { etraxfs_ser_init(env, pic->irq + 0x14 + i, serial_hds[i], 0x30026000 + i * 0x2000); } } if (kernel_filename) { uint64_t entry, high; int kcmdline_len; /* Boots a kernel elf binary, os/linux-2.6/vmlinux from the axis devboard SDK. */ kernel_size = load_elf(kernel_filename, -0x80000000LL, &entry, NULL, &high); bootstrap_pc = entry; if (kernel_size < 0) { /* Takes a kimage from the axis devboard SDK. */ kernel_size = load_image(kernel_filename, phys_ram_base + 0x4000); bootstrap_pc = 0x40004000; env->regs[9] = 0x40004000 + kernel_size; } env->regs[8] = 0x56902387; /* RAM init magic. */ if (kernel_cmdline && (kcmdline_len = strlen(kernel_cmdline))) { if (kcmdline_len > 256) { fprintf(stderr, "Too long CRIS kernel cmdline (max 256)\n"); exit(1); } pstrcpy_targphys(high, 256, kernel_cmdline); /* Let the kernel know we are modifying the cmdline. */ env->regs[10] = 0x87109563; env->regs[11] = high; } } env->pc = bootstrap_pc; printf ("pc =%x\n", env->pc); printf ("ram size =%ld\n", ram_size); }
23,004
0
static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label) { TCGLabel *l = &s->labels[label]; int off19; if (l->has_value) { off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr)); } else { /* Make sure to preserve destinations during retranslation. */ off19 = *s->code_ptr & INSN_OFF19(-1); tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0); } tcg_out_bpcc0(s, scond, flags, off19); }
23,005
0
void load_kernel (CPUState *env, int ram_size, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename) { int64_t entry = 0; long kernel_size, initrd_size; kernel_size = load_elf(kernel_filename, VIRT_TO_PHYS_ADDEND, &entry); if (kernel_size >= 0) { if ((entry & ~0x7fffffffULL) == 0x80000000) entry = (int32_t)entry; env->PC = entry; } else { kernel_size = load_image(kernel_filename, phys_ram_base + KERNEL_LOAD_ADDR + VIRT_TO_PHYS_ADDEND); if (kernel_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } env->PC = KERNEL_LOAD_ADDR; } /* load initrd */ initrd_size = 0; if (initrd_filename) { initrd_size = load_image(initrd_filename, phys_ram_base + INITRD_LOAD_ADDR + VIRT_TO_PHYS_ADDEND); if (initrd_size == (target_ulong) -1) { fprintf(stderr, "qemu: could not load initial ram disk '%s'\n", initrd_filename); exit(1); } } /* Store command line. */ if (initrd_size > 0) { int ret; ret = sprintf(phys_ram_base + (16 << 20) - 256, "rd_start=0x" TLSZ " rd_size=%li ", INITRD_LOAD_ADDR, initrd_size); strcpy (phys_ram_base + (16 << 20) - 256 + ret, kernel_cmdline); } else { strcpy (phys_ram_base + (16 << 20) - 256, kernel_cmdline); } *(int *)(phys_ram_base + (16 << 20) - 260) = tswap32 (0x12345678); *(int *)(phys_ram_base + (16 << 20) - 264) = tswap32 (ram_size); }
23,006
0
static int posix_aio_init(void) { struct sigaction act; PosixAioState *s; int fds[2]; struct qemu_paioinit ai; if (posix_aio_state) return 0; s = qemu_malloc(sizeof(PosixAioState)); sigfillset(&act.sa_mask); act.sa_flags = 0; /* do not restart syscalls to interrupt select() */ act.sa_handler = aio_signal_handler; sigaction(SIGUSR2, &act, NULL); s->first_aio = NULL; if (pipe(fds) == -1) { fprintf(stderr, "failed to create pipe\n"); return -errno; } s->rfd = fds[0]; s->wfd = fds[1]; fcntl(s->rfd, F_SETFL, O_NONBLOCK); fcntl(s->wfd, F_SETFL, O_NONBLOCK); qemu_aio_set_fd_handler(s->rfd, posix_aio_read, NULL, posix_aio_flush, s); memset(&ai, 0, sizeof(ai)); ai.aio_threads = 64; ai.aio_num = 64; qemu_paio_init(&ai); posix_aio_state = s; return 0; }
23,007
0
e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size) { E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque; struct e1000_rx_desc desc; dma_addr_t base; unsigned int n, rdt; uint32_t rdh_start; uint16_t vlan_special = 0; uint8_t vlan_status = 0, vlan_offset = 0; uint8_t min_buf[MIN_BUF_SIZE]; size_t desc_offset; size_t desc_size; size_t total_size; if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) return -1; /* Pad to minimum Ethernet frame length */ if (size < sizeof(min_buf)) { memcpy(min_buf, buf, size); memset(&min_buf[size], 0, sizeof(min_buf) - size); buf = min_buf; size = sizeof(min_buf); } if (!receive_filter(s, buf, size)) return size; if (vlan_enabled(s) && is_vlan_packet(s, buf)) { vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14))); memmove((uint8_t *)buf + 4, buf, 12); vlan_status = E1000_RXD_STAT_VP; vlan_offset = 4; size -= 4; } rdh_start = s->mac_reg[RDH]; desc_offset = 0; total_size = size + fcs_len(s); if (!e1000_has_rxbufs(s, total_size)) { set_ics(s, 0, E1000_ICS_RXO); return -1; } do { desc_size = total_size - desc_offset; if (desc_size > s->rxbuf_size) { desc_size = s->rxbuf_size; } base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH]; pci_dma_read(&s->dev, base, &desc, sizeof(desc)); desc.special = vlan_special; desc.status |= (vlan_status | E1000_RXD_STAT_DD); if (desc.buffer_addr) { if (desc_offset < size) { size_t copy_size = size - desc_offset; if (copy_size > s->rxbuf_size) { copy_size = s->rxbuf_size; } pci_dma_write(&s->dev, le64_to_cpu(desc.buffer_addr), buf + desc_offset + vlan_offset, copy_size); } desc_offset += desc_size; desc.length = cpu_to_le16(desc_size); if (desc_offset >= total_size) { desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM; } else { /* Guest zeroing out status is not a hardware requirement. Clear EOP in case guest didn't do it. */ desc.status &= ~E1000_RXD_STAT_EOP; } } else { // as per intel docs; skip descriptors with null buf addr DBGOUT(RX, "Null RX descriptor!!\n"); } pci_dma_write(&s->dev, base, &desc, sizeof(desc)); if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN]) s->mac_reg[RDH] = 0; s->check_rxov = 1; /* see comment in start_xmit; same here */ if (s->mac_reg[RDH] == rdh_start) { DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n", rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]); set_ics(s, 0, E1000_ICS_RXO); return -1; } } while (desc_offset < total_size); s->mac_reg[GPRC]++; s->mac_reg[TPR]++; /* TOR - Total Octets Received: * This register includes bytes received in a packet from the <Destination * Address> field through the <CRC> field, inclusively. */ n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4; if (n < s->mac_reg[TORL]) s->mac_reg[TORH]++; s->mac_reg[TORL] = n; n = E1000_ICS_RXT0; if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH]) rdt += s->mac_reg[RDLEN] / sizeof(desc); if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >> s->rxbuf_min_shift) n |= E1000_ICS_RXDMT0; set_ics(s, 0, n); return size; }
23,008
0
void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, unsigned int *out_bytes, unsigned max_in_bytes, unsigned max_out_bytes) { unsigned int idx; unsigned int total_bufs, in_total, out_total; idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; while (virtqueue_num_heads(vq, idx)) { unsigned int max, num_bufs, indirect = 0; hwaddr desc_pa; int i; max = vq->vring.num; num_bufs = total_bufs; i = virtqueue_get_head(vq, idx++); desc_pa = vq->vring.desc; if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) { if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) { error_report("Invalid size for indirect buffer table"); exit(1); } /* If we've got too many, that implies a descriptor loop. */ if (num_bufs >= max) { error_report("Looped descriptor"); exit(1); } /* loop over the indirect descriptor table */ indirect = 1; max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc); num_bufs = i = 0; desc_pa = vring_desc_addr(desc_pa, i); } do { /* If we've got too many, that implies a descriptor loop. */ if (++num_bufs > max) { error_report("Looped descriptor"); exit(1); } if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) { in_total += vring_desc_len(desc_pa, i); } else { out_total += vring_desc_len(desc_pa, i); } if (in_total >= max_in_bytes && out_total >= max_out_bytes) { goto done; } } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max); if (!indirect) total_bufs = num_bufs; else total_bufs++; } done: if (in_bytes) { *in_bytes = in_total; } if (out_bytes) { *out_bytes = out_total; } }
23,009
0
static void mxf_write_generic_desc(ByteIOContext *pb, const MXFDescriptorWriteTableEntry *desc_tbl, AVStream *st) { const MXFCodecUL *codec_ul; put_buffer(pb, desc_tbl->key, 16); klv_encode_ber_length(pb, 108); mxf_write_local_tag(pb, 16, 0x3C0A); mxf_write_uuid(pb, SubDescriptor, st->index); mxf_write_local_tag(pb, 4, 0x3006); put_be32(pb, st->index); mxf_write_local_tag(pb, 8, 0x3001); put_be32(pb, st->time_base.den); put_be32(pb, st->time_base.num); codec_ul = mxf_get_essence_container_ul(st->codec->codec_id); mxf_write_local_tag(pb, 16, 0x3004); put_buffer(pb, codec_ul->uid, 16); }
23,010
0
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue, int ctrl) { VirtIODevice *vdev = VIRTIO_DEVICE(n); int i, max = multiqueue ? n->max_queues : 1; n->multiqueue = multiqueue; for (i = 2; i <= n->max_queues * 2 + 1; i++) { virtio_del_queue(vdev, i); } for (i = 1; i < max; i++) { n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx); if (n->vqs[i].tx_timer) { n->vqs[i].tx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer); n->vqs[i].tx_timer = qemu_new_timer_ns(vm_clock, virtio_net_tx_timer, &n->vqs[i]); } else { n->vqs[i].tx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh); n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]); } n->vqs[i].tx_waiting = 0; n->vqs[i].n = n; } if (ctrl) { n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl); } virtio_net_set_queues(n); }
23,012
0
int register_savevm_live(const char *idstr, int instance_id, int version_id, SaveLiveStateHandler *save_live_state, SaveStateHandler *save_state, LoadStateHandler *load_state, void *opaque) { SaveStateEntry *se; se = qemu_malloc(sizeof(SaveStateEntry)); pstrcpy(se->idstr, sizeof(se->idstr), idstr); se->version_id = version_id; se->section_id = global_section_id++; se->save_live_state = save_live_state; se->save_state = save_state; se->load_state = load_state; se->opaque = opaque; se->vmsd = NULL; if (instance_id == -1) { se->instance_id = calculate_new_instance_id(idstr); } else { se->instance_id = instance_id; } /* add at the end of list */ TAILQ_INSERT_TAIL(&savevm_handlers, se, entry); return 0; }
23,013
0
static void sun4m_hw_init(const struct hwdef *hwdef, int ram_size, DisplayState *ds, const char *cpu_model) { CPUState *env, *envs[MAX_CPUS]; unsigned int i; void *iommu, *espdma, *ledma, *main_esp; const sparc_def_t *def; qemu_irq *cpu_irqs[MAX_CPUS], *slavio_irq, *slavio_cpu_irq, *espdma_irq, *ledma_irq; /* init CPUs */ sparc_find_by_name(cpu_model, &def); if (def == NULL) { fprintf(stderr, "Unable to find Sparc CPU definition\n"); exit(1); } for(i = 0; i < smp_cpus; i++) { env = cpu_init(); cpu_sparc_register(env, def); envs[i] = env; if (i == 0) { qemu_register_reset(main_cpu_reset, env); } else { qemu_register_reset(secondary_cpu_reset, env); env->halted = 1; } register_savevm("cpu", i, 3, cpu_save, cpu_load, env); cpu_irqs[i] = qemu_allocate_irqs(cpu_set_irq, envs[i], MAX_PILS); } for (i = smp_cpus; i < MAX_CPUS; i++) cpu_irqs[i] = qemu_allocate_irqs(dummy_cpu_set_irq, NULL, MAX_PILS); /* allocate RAM */ cpu_register_physical_memory(0, ram_size, 0); iommu = iommu_init(hwdef->iommu_base); slavio_intctl = slavio_intctl_init(hwdef->intctl_base, hwdef->intctl_base + 0x10000ULL, &hwdef->intbit_to_level[0], &slavio_irq, &slavio_cpu_irq, cpu_irqs, hwdef->clock_irq); espdma = sparc32_dma_init(hwdef->dma_base, slavio_irq[hwdef->esp_irq], iommu, &espdma_irq); ledma = sparc32_dma_init(hwdef->dma_base + 16ULL, slavio_irq[hwdef->le_irq], iommu, &ledma_irq); if (graphic_depth != 8 && graphic_depth != 24) { fprintf(stderr, "qemu: Unsupported depth: %d\n", graphic_depth); exit (1); } tcx_init(ds, hwdef->tcx_base, phys_ram_base + ram_size, ram_size, hwdef->vram_size, graphic_width, graphic_height, graphic_depth); if (nd_table[0].model == NULL || strcmp(nd_table[0].model, "lance") == 0) { lance_init(&nd_table[0], hwdef->le_base, ledma, *ledma_irq); } else if (strcmp(nd_table[0].model, "?") == 0) { fprintf(stderr, "qemu: Supported NICs: lance\n"); exit (1); } else { fprintf(stderr, "qemu: Unsupported NIC: %s\n", nd_table[0].model); exit (1); } nvram = m48t59_init(slavio_irq[0], hwdef->nvram_base, 0, hwdef->nvram_size, 8); for (i = 0; i < MAX_CPUS; i++) { slavio_timer_init(hwdef->counter_base + (target_phys_addr_t)(i * TARGET_PAGE_SIZE), slavio_cpu_irq[i], 0); } slavio_timer_init(hwdef->counter_base + 0x10000ULL, slavio_irq[hwdef->clock1_irq], 2); slavio_serial_ms_kbd_init(hwdef->ms_kb_base, slavio_irq[hwdef->ms_kb_irq]); // Slavio TTYA (base+4, Linux ttyS0) is the first Qemu serial device // Slavio TTYB (base+0, Linux ttyS1) is the second Qemu serial device slavio_serial_init(hwdef->serial_base, slavio_irq[hwdef->ser_irq], serial_hds[1], serial_hds[0]); fdctrl_init(slavio_irq[hwdef->fd_irq], 0, 1, hwdef->fd_base, fd_table); main_esp = esp_init(bs_table, hwdef->esp_base, espdma, *espdma_irq); for (i = 0; i < MAX_DISKS; i++) { if (bs_table[i]) { esp_scsi_attach(main_esp, bs_table[i], i); } } slavio_misc = slavio_misc_init(hwdef->slavio_base, hwdef->power_base, slavio_irq[hwdef->me_irq]); if (hwdef->cs_base != (target_phys_addr_t)-1) cs_init(hwdef->cs_base, hwdef->cs_irq, slavio_intctl); }
23,014
0
static void init_demo(const char *filename) { int i, j; int h; int radian; char line[3 * W]; FILE *fichier; fichier = fopen(filename, "rb"); if (!fichier) { perror(filename); exit(1); } fread(line, 1, 15, fichier); for (i = 0; i < H; i++) { fread(line, 1, 3 * W, fichier); for (j = 0; j < W; j++) { tab_r[W * i + j] = line[3 * j ]; tab_g[W * i + j] = line[3 * j + 1]; tab_b[W * i + j] = line[3 * j + 2]; } } fclose(fichier); /* tables sin/cos */ for (i = 0; i < 360; i++) { radian = 2 * i * MY_PI / 360; h = 2 * FIXP + int_sin (radian); h_cos[i] = h * int_sin(radian + MY_PI / 2) / 2 / FIXP; h_sin[i] = h * int_sin(radian) / 2 / FIXP; } }
23,018
0
static int qsv_get_buffer(AVCodecContext *s, AVFrame *frame, int flags) { InputStream *ist = s->opaque; QSVContext *qsv = ist->hwaccel_ctx; int i; for (i = 0; i < qsv->nb_surfaces; i++) { if (qsv->surface_used[i]) continue; frame->buf[0] = av_buffer_create((uint8_t*)qsv->surface_ptrs[i], sizeof(*qsv->surface_ptrs[i]), buffer_release, &qsv->surface_used[i], 0); if (!frame->buf[0]) return AVERROR(ENOMEM); frame->data[3] = (uint8_t*)qsv->surface_ptrs[i]; qsv->surface_used[i] = 1; return 0; } return AVERROR(ENOMEM); }
23,020
0
static int vid_read_packet(AVFormatContext *s, AVPacket *pkt) { BVID_DemuxContext *vid = s->priv_data; AVIOContext *pb = s->pb; unsigned char block_type; int audio_length; int ret_value; if(vid->is_finished || pb->eof_reached) return AVERROR(EIO); block_type = avio_r8(pb); switch(block_type){ case PALETTE_BLOCK: avio_seek(pb, -1, SEEK_CUR); // include block type ret_value = av_get_packet(pb, pkt, 3 * 256 + 1); if(ret_value != 3 * 256 + 1){ av_free_packet(pkt); return AVERROR(EIO); } pkt->stream_index = 0; return ret_value; case FIRST_AUDIO_BLOCK: avio_rl16(pb); // soundblaster DAC used for sample rate, as on specification page (link above) s->streams[1]->codec->sample_rate = 1000000 / (256 - avio_r8(pb)); s->streams[1]->codec->bit_rate = s->streams[1]->codec->channels * s->streams[1]->codec->sample_rate * s->streams[1]->codec->bits_per_coded_sample; case AUDIO_BLOCK: audio_length = avio_rl16(pb); ret_value = av_get_packet(pb, pkt, audio_length); pkt->stream_index = 1; return ret_value != audio_length ? AVERROR(EIO) : ret_value; case VIDEO_P_FRAME: case VIDEO_YOFF_P_FRAME: case VIDEO_I_FRAME: return read_frame(vid, pb, pkt, block_type, s, s->streams[0]->codec->width * s->streams[0]->codec->height); case EOF_BLOCK: if(vid->nframes != 0) av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n"); vid->is_finished = 1; return AVERROR(EIO); default: av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n", block_type, block_type, block_type); return -1; } }
23,021
0
void avpriv_solve_lls(LLSModel *m, double threshold, unsigned short min_order) { int i, j, k; double (*factor)[MAX_VARS_ALIGN] = (void *) &m->covariance[1][0]; double (*covar) [MAX_VARS_ALIGN] = (void *) &m->covariance[1][1]; double *covar_y = m->covariance[0]; int count = m->indep_count; for (i = 0; i < count; i++) { for (j = i; j < count; j++) { double sum = covar[i][j]; for (k = i - 1; k >= 0; k--) sum -= factor[i][k] * factor[j][k]; if (i == j) { if (sum < threshold) sum = 1.0; factor[i][i] = sqrt(sum); } else { factor[j][i] = sum / factor[i][i]; } } } for (i = 0; i < count; i++) { double sum = covar_y[i + 1]; for (k = i - 1; k >= 0; k--) sum -= factor[i][k] * m->coeff[0][k]; m->coeff[0][i] = sum / factor[i][i]; } for (j = count - 1; j >= min_order; j--) { for (i = j; i >= 0; i--) { double sum = m->coeff[0][i]; for (k = i + 1; k <= j; k++) sum -= factor[k][i] * m->coeff[j][k]; m->coeff[j][i] = sum / factor[i][i]; } m->variance[j] = covar_y[0]; for (i = 0; i <= j; i++) { double sum = m->coeff[j][i] * covar[i][i] - 2 * covar_y[i + 1]; for (k = 0; k < i; k++) sum += 2 * m->coeff[j][k] * covar[k][i]; m->variance[j] += m->coeff[j][i] * sum; } } }
23,023
0
static int hap_encode(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) { HapContext *ctx = avctx->priv_data; int header_length = hap_header_length(ctx); int final_data_size, ret; int pktsize = FFMAX(ctx->tex_size, ctx->max_snappy * ctx->chunk_count) + header_length; /* Allocate maximum size packet, shrink later. */ ret = ff_alloc_packet2(avctx, pkt, pktsize, header_length); if (ret < 0) return ret; /* DXTC compression. */ ret = compress_texture(avctx, ctx->tex_buf, ctx->tex_size, frame); if (ret < 0) return ret; /* Compress (using Snappy) the frame */ final_data_size = hap_compress_frame(avctx, pkt->data + header_length); if (final_data_size < 0) return final_data_size; /* Write header at the start. */ hap_write_frame_header(ctx, pkt->data, final_data_size + header_length); av_shrink_packet(pkt, final_data_size + header_length); pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; }
23,024
0
static void add_pid_to_pmt(MpegTSContext *ts, unsigned int programid, unsigned int pid) { int i; struct Program *p = NULL; for(i=0; i<ts->nb_prg; i++) { if(ts->prg[i].id == programid) { p = &ts->prg[i]; break; } } if(!p) return; if(p->nb_pids >= MAX_PIDS_PER_PROGRAM) return; p->pids[p->nb_pids++] = pid; }
23,025
0
void ff_h261_loop_filter(H261Context * h){ MpegEncContext * const s = &h->s; int i; const int linesize = s->linesize; const int uvlinesize= s->uvlinesize; uint8_t *dest_y = s->dest[0]; uint8_t *dest_cb= s->dest[1]; uint8_t *dest_cr= s->dest[2]; uint8_t *src; CHECKED_ALLOCZ((src),sizeof(uint8_t) * 64 ); for(i=0; i<8;i++) memcpy(src+i*8,dest_y+i*linesize,sizeof(uint8_t) * 8 ); s->dsp.h261_v_loop_filter(dest_y, src, linesize); s->dsp.h261_h_loop_filter(dest_y, src, linesize); for(i=0; i<8;i++) memcpy(src+i*8,dest_y+i*linesize + 8,sizeof(uint8_t) * 8 ); s->dsp.h261_v_loop_filter(dest_y + 8, src, linesize); s->dsp.h261_h_loop_filter(dest_y + 8, src, linesize); for(i=0; i<8;i++) memcpy(src+i*8,dest_y+(i+8)*linesize,sizeof(uint8_t) * 8 ); s->dsp.h261_v_loop_filter(dest_y + 8 * linesize, src, linesize); s->dsp.h261_h_loop_filter(dest_y + 8 * linesize, src, linesize); for(i=0; i<8;i++) memcpy(src+i*8,dest_y+(i+8)*linesize + 8,sizeof(uint8_t) * 8 ); s->dsp.h261_v_loop_filter(dest_y + 8 * linesize + 8, src, linesize); s->dsp.h261_h_loop_filter(dest_y + 8 * linesize + 8, src, linesize); for(i=0; i<8;i++) memcpy(src+i*8,dest_cb+i*uvlinesize,sizeof(uint8_t) * 8 ); s->dsp.h261_v_loop_filter(dest_cb, src, uvlinesize); s->dsp.h261_h_loop_filter(dest_cb, src, uvlinesize); for(i=0; i<8;i++) memcpy(src+i*8,dest_cr+i*uvlinesize,sizeof(uint8_t) * 8 ); s->dsp.h261_v_loop_filter(dest_cr, src, uvlinesize); s->dsp.h261_h_loop_filter(dest_cr, src, uvlinesize); fail: av_free(src); return; }
23,026
0
void ff_put_h264_qpel8_mc32_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride) { avc_luma_midh_qrt_8w_msa(src - (2 * stride) - 2, stride, dst, stride, 8, 1); }
23,028
0
x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) { struct x11_grab *x11grab = s1->priv_data; Display *dpy; AVStream *st = NULL; enum PixelFormat input_pixfmt; XImage *image; int x_off = 0; int y_off = 0; int use_shm; char *param, *offset; int ret = 0; AVRational framerate; param = av_strdup(s1->filename); offset = strchr(param, '+'); if (offset) { sscanf(offset, "%d,%d", &x_off, &y_off); x11grab->nomouse= strstr(offset, "nomouse"); *offset= 0; } if ((ret = av_parse_video_size(&x11grab->width, &x11grab->height, x11grab->video_size)) < 0) { av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto out; } if ((ret = av_parse_video_rate(&framerate, x11grab->framerate)) < 0) { av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate); goto out; } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) x11grab->width = ap->width; if (ap->height > 0) x11grab->height = ap->height; if (ap->time_base.num) framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, param, x_off, y_off, x11grab->width, x11grab->height); dpy = XOpenDisplay(param); if(!dpy) { av_log(s1, AV_LOG_ERROR, "Could not open X display.\n"); ret = AVERROR(EIO); goto out; } st = av_new_stream(s1, 0); if (!st) { ret = AVERROR(ENOMEM); goto out; } av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ use_shm = XShmQueryExtension(dpy); av_log(s1, AV_LOG_INFO, "shared memory extension %s found\n", use_shm ? "" : "not"); if(use_shm) { int scr = XDefaultScreen(dpy); image = XShmCreateImage(dpy, DefaultVisual(dpy, scr), DefaultDepth(dpy, scr), ZPixmap, NULL, &x11grab->shminfo, x11grab->width, x11grab->height); x11grab->shminfo.shmid = shmget(IPC_PRIVATE, image->bytes_per_line * image->height, IPC_CREAT|0777); if (x11grab->shminfo.shmid == -1) { av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n"); ret = AVERROR(ENOMEM); goto out; } x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0); x11grab->shminfo.readOnly = False; if (!XShmAttach(dpy, &x11grab->shminfo)) { av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n"); /* needs some better error subroutine :) */ ret = AVERROR(EIO); goto out; } } else { image = XGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)), x_off,y_off, x11grab->width, x11grab->height, AllPlanes, ZPixmap); } switch (image->bits_per_pixel) { case 8: av_log (s1, AV_LOG_DEBUG, "8 bit palette\n"); input_pixfmt = PIX_FMT_PAL8; break; case 16: if ( image->red_mask == 0xf800 && image->green_mask == 0x07e0 && image->blue_mask == 0x001f ) { av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n"); input_pixfmt = PIX_FMT_RGB565; } else if (image->red_mask == 0x7c00 && image->green_mask == 0x03e0 && image->blue_mask == 0x001f ) { av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n"); input_pixfmt = PIX_FMT_RGB555; } else { av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask); ret = AVERROR(EIO); goto out; } break; case 24: if ( image->red_mask == 0xff0000 && image->green_mask == 0x00ff00 && image->blue_mask == 0x0000ff ) { input_pixfmt = PIX_FMT_BGR24; } else if ( image->red_mask == 0x0000ff && image->green_mask == 0x00ff00 && image->blue_mask == 0xff0000 ) { input_pixfmt = PIX_FMT_RGB24; } else { av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask); ret = AVERROR(EIO); goto out; } break; case 32: input_pixfmt = PIX_FMT_RGB32; break; default: av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel); ret = AVERROR(EINVAL); goto out; } x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8; x11grab->dpy = dpy; x11grab->time_base = (AVRational){framerate.den, framerate.num}; x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base); x11grab->x_off = x_off; x11grab->y_off = y_off; x11grab->image = image; x11grab->use_shm = use_shm; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->width = x11grab->width; st->codec->height = x11grab->height; st->codec->pix_fmt = input_pixfmt; st->codec->time_base = x11grab->time_base; st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8; out: return ret; }
23,029
1
static void simple_list(void) { int i; struct { const char *encoded; LiteralQObject decoded; } test_cases[] = { { .encoded = "[43,42]", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QINT(43), QLIT_QINT(42), { } })), }, { .encoded = "[43]", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QINT(43), { } })), }, { .encoded = "[]", .decoded = QLIT_QLIST(((LiteralQObject[]){ { } })), }, { .encoded = "[{}]", .decoded = QLIT_QLIST(((LiteralQObject[]){ QLIT_QDICT(((LiteralQDictEntry[]){ {}, })), {}, })), }, { } }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded, NULL); g_assert(compare_litqobj_to_qobj(&test_cases[i].decoded, obj) == 1); str = qobject_to_json(obj); qobject_decref(obj); obj = qobject_from_json(qstring_get_str(str), NULL); g_assert(compare_litqobj_to_qobj(&test_cases[i].decoded, obj) == 1); qobject_decref(obj); QDECREF(str); } }
23,030
1
static inline void RENAME(yuv2yuyv422_1)(SwsContext *c, const uint16_t *buf0, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y) { x86_reg uv_off = c->uv_off << 1; const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1 if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2PACKED1(%%REGBP, %5, %6) WRITEYUY2(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither), "m"(uv_off) ); } else { __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2PACKED1b(%%REGBP, %5, %6) WRITEYUY2(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither), "m"(uv_off) ); } }
23,031
1
static void tcp_wait_for_connect(void *opaque) { MigrationState *s = opaque; int val, ret; socklen_t valsize = sizeof(val); DPRINTF("connect completed\n"); do { ret = getsockopt(s->fd, SOL_SOCKET, SO_ERROR, (void *) &val, &valsize); } while (ret == -1 && (socket_error()) == EINTR); if (ret < 0) { migrate_fd_error(s); return; } qemu_set_fd_handler2(s->fd, NULL, NULL, NULL, NULL); if (val == 0) migrate_fd_connect(s); else { DPRINTF("error connecting %d\n", val); migrate_fd_error(s); } }
23,032
0
static int ipvideo_decode_block_opcode_0x4(IpvideoContext *s) { int x, y; unsigned char B, BL, BH; /* copy a block from the previous frame; need 1 more byte */ CHECK_STREAM_PTR(1); B = *s->stream_ptr++; BL = B & 0x0F; BH = (B >> 4) & 0x0F; x = -8 + BL; y = -8 + BH; debug_interplay (" motion byte = %d, (x, y) = (%d, %d)\n", B, x, y); return copy_from(s, &s->last_frame, x, y); }
23,035
0
static int open_input_stream(HTTPContext *c, const char *info) { char buf[128]; char input_filename[1024]; AVFormatContext *s; int buf_size, i, ret; int64_t stream_pos; /* find file name */ if (c->stream->feed) { strcpy(input_filename, c->stream->feed->feed_filename); buf_size = FFM_PACKET_SIZE; /* compute position (absolute time) */ if (find_info_tag(buf, sizeof(buf), "date", info)) { stream_pos = parse_date(buf, 0); if (stream_pos == INT64_MIN) return -1; } else if (find_info_tag(buf, sizeof(buf), "buffer", info)) { int prebuffer = strtol(buf, 0, 10); stream_pos = av_gettime() - prebuffer * (int64_t)1000000; } else stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000; } else { strcpy(input_filename, c->stream->feed_filename); buf_size = 0; /* compute position (relative time) */ if (find_info_tag(buf, sizeof(buf), "date", info)) { stream_pos = parse_date(buf, 1); if (stream_pos == INT64_MIN) return -1; } else stream_pos = 0; } if (input_filename[0] == '\0') return -1; #if 0 { time_t when = stream_pos / 1000000; http_log("Stream pos = %"PRId64", time=%s", stream_pos, ctime(&when)); } #endif /* open stream */ if ((ret = av_open_input_file(&s, input_filename, c->stream->ifmt, buf_size, c->stream->ap_in)) < 0) { http_log("could not open %s: %d\n", input_filename, ret); return -1; } s->flags |= AVFMT_FLAG_GENPTS; c->fmt_in = s; av_find_stream_info(c->fmt_in); /* open each parser */ for(i=0;i<s->nb_streams;i++) open_parser(s, i); /* choose stream as clock source (we favorize video stream if present) for packet sending */ c->pts_stream_index = 0; for(i=0;i<c->stream->nb_streams;i++) { if (c->pts_stream_index == 0 && c->stream->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) { c->pts_stream_index = i; } } #if 1 if (c->fmt_in->iformat->read_seek) av_seek_frame(c->fmt_in, -1, stream_pos, 0); #endif /* set the start time (needed for maxtime and RTP packet timing) */ c->start_time = cur_time; c->first_pts = AV_NOPTS_VALUE; return 0; }
23,036
0
static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int len) { int payload_len; while (len >= 4) { payload_len = FFMIN(len, (AV_RB16(buf + 2) + 1) * 4); switch (buf[1]) { case RTCP_SR: if (payload_len < 20) { av_log(NULL, AV_LOG_ERROR, "Invalid length for RTCP SR packet\n"); return AVERROR_INVALIDDATA; } s->last_rtcp_reception_time = av_gettime_relative(); s->last_rtcp_ntp_time = AV_RB64(buf + 8); s->last_rtcp_timestamp = AV_RB32(buf + 16); if (s->first_rtcp_ntp_time == AV_NOPTS_VALUE) { s->first_rtcp_ntp_time = s->last_rtcp_ntp_time; if (!s->base_timestamp) s->base_timestamp = s->last_rtcp_timestamp; s->rtcp_ts_offset = s->last_rtcp_timestamp - s->base_timestamp; } break; case RTCP_BYE: return -RTCP_BYE; } buf += payload_len; len -= payload_len; } return -1; }
23,037
0
void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block) { POWERPC_TBL_DECLARE(altivec_idct_add_num, 1); #ifdef ALTIVEC_USE_REFERENCE_C_CODE POWERPC_TBL_START_COUNT(altivec_idct_add_num, 1); void simple_idct_add(uint8_t *dest, int line_size, int16_t *block); simple_idct_add(dest, stride, (int16_t*)block); POWERPC_TBL_STOP_COUNT(altivec_idct_add_num, 1); #else /* ALTIVEC_USE_REFERENCE_C_CODE */ vector_u8_t tmp; vector_s16_t tmp2, tmp3; vector_u8_t perm0; vector_u8_t perm1; vector_u8_t p0, p1, p; POWERPC_TBL_START_COUNT(altivec_idct_add_num, 1); IDCT p0 = vec_lvsl (0, dest); p1 = vec_lvsl (stride, dest); p = vec_splat_u8 (-1); perm0 = vec_mergeh (p, p0); perm1 = vec_mergeh (p, p1); #define ADD(dest,src,perm) \ /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ tmp = vec_ld (0, dest); \ tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \ tmp3 = vec_adds (tmp2, src); \ tmp = vec_packsu (tmp3, tmp3); \ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); ADD (dest, vx0, perm0) dest += stride; ADD (dest, vx1, perm1) dest += stride; ADD (dest, vx2, perm0) dest += stride; ADD (dest, vx3, perm1) dest += stride; ADD (dest, vx4, perm0) dest += stride; ADD (dest, vx5, perm1) dest += stride; ADD (dest, vx6, perm0) dest += stride; ADD (dest, vx7, perm1) POWERPC_TBL_STOP_COUNT(altivec_idct_add_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ }
23,038
0
AVBufferRef *av_buffer_alloc(int size) { AVBufferRef *ret = NULL; uint8_t *data = NULL; data = av_malloc(size); if (!data) return NULL; if(CONFIG_MEMORY_POISONING) memset(data, 0x2a, size); ret = av_buffer_create(data, size, av_buffer_default_free, NULL, 0); if (!ret) av_freep(&data); return ret; }
23,039
0
void msix_notify(PCIDevice *dev, unsigned vector) { MSIMessage msg; if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) return; if (msix_is_masked(dev, vector)) { msix_set_pending(dev, vector); return; } msg = msix_get_message(dev, vector); stl_le_phys(&address_space_memory, msg.address, msg.data); }
23,040
0
void msi_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev)); bool msi64bit = flags & PCI_MSI_FLAGS_64BIT; bool msi_per_vector_mask = flags & PCI_MSI_FLAGS_MASKBIT; unsigned int nr_vectors; uint8_t log_num_vecs; uint8_t log_max_vecs; unsigned int vector; uint32_t pending; if (!ranges_overlap(addr, len, dev->msi_cap, msi_cap_sizeof(flags))) { return; } #ifdef MSI_DEBUG MSI_DEV_PRINTF(dev, "addr 0x%"PRIx32" val 0x%"PRIx32" len %d\n", addr, val, len); MSI_DEV_PRINTF(dev, "ctrl: 0x%"PRIx16" address: 0x%"PRIx32, flags, pci_get_long(dev->config + msi_address_lo_off(dev))); if (msi64bit) { fprintf(stderr, " address-hi: 0x%"PRIx32, pci_get_long(dev->config + msi_address_hi_off(dev))); } fprintf(stderr, " data: 0x%"PRIx16, pci_get_word(dev->config + msi_data_off(dev, msi64bit))); if (flags & PCI_MSI_FLAGS_MASKBIT) { fprintf(stderr, " mask 0x%"PRIx32" pending 0x%"PRIx32, pci_get_long(dev->config + msi_mask_off(dev, msi64bit)), pci_get_long(dev->config + msi_pending_off(dev, msi64bit))); } fprintf(stderr, "\n"); #endif if (!(flags & PCI_MSI_FLAGS_ENABLE)) { return; } /* * Now MSI is enabled, clear INTx# interrupts. * the driver is prohibited from writing enable bit to mask * a service request. But the guest OS could do this. * So we just discard the interrupts as moderate fallback. * * 6.8.3.3. Enabling Operation * While enabled for MSI or MSI-X operation, a function is prohibited * from using its INTx# pin (if implemented) to request * service (MSI, MSI-X, and INTx# are mutually exclusive). */ pci_device_deassert_intx(dev); /* * nr_vectors might be set bigger than capable. So clamp it. * This is not legal by spec, so we can do anything we like, * just don't crash the host */ log_num_vecs = (flags & PCI_MSI_FLAGS_QSIZE) >> (ffs(PCI_MSI_FLAGS_QSIZE) - 1); log_max_vecs = (flags & PCI_MSI_FLAGS_QMASK) >> (ffs(PCI_MSI_FLAGS_QMASK) - 1); if (log_num_vecs > log_max_vecs) { flags &= ~PCI_MSI_FLAGS_QSIZE; flags |= log_max_vecs << (ffs(PCI_MSI_FLAGS_QSIZE) - 1); pci_set_word(dev->config + msi_flags_off(dev), flags); } if (!msi_per_vector_mask) { /* if per vector masking isn't supported, there is no pending interrupt. */ return; } nr_vectors = msi_nr_vectors(flags); /* This will discard pending interrupts, if any. */ pending = pci_get_long(dev->config + msi_pending_off(dev, msi64bit)); pending &= 0xffffffff >> (PCI_MSI_VECTORS_MAX - nr_vectors); pci_set_long(dev->config + msi_pending_off(dev, msi64bit), pending); /* deliver pending interrupts which are unmasked */ for (vector = 0; vector < nr_vectors; ++vector) { if (msi_is_masked(dev, vector) || !(pending & (1U << vector))) { continue; } pci_long_test_and_clear_mask( dev->config + msi_pending_off(dev, msi64bit), 1U << vector); msi_notify(dev, vector); } }
23,041
0
static bool acpi_has_nvdimm(void) { PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); return pcms->nvdimm; }
23,042
0
void ide_flush_cache(IDEState *s) { if (s->bs == NULL) { ide_flush_cb(s, 0); return; } s->status |= BUSY_STAT; block_acct_start(bdrv_get_stats(s->bs), &s->acct, 0, BLOCK_ACCT_FLUSH); s->pio_aiocb = bdrv_aio_flush(s->bs, ide_flush_cb, s); }
23,043
0
have_autoneg(E1000State *s) { return (s->compat_flags & E1000_FLAG_AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN); }
23,044
0
qemu_irq *mpic_init (MemoryRegion *address_space, hwaddr base, int nb_cpus, qemu_irq **irqs, qemu_irq irq_out) { OpenPICState *mpp; int i; struct { const char *name; MemoryRegionOps const *ops; hwaddr start_addr; ram_addr_t size; } const list[] = { {"glb", &openpic_glb_ops_be, MPIC_GLB_REG_START, MPIC_GLB_REG_SIZE}, {"tmr", &openpic_tmr_ops_be, MPIC_TMR_REG_START, MPIC_TMR_REG_SIZE}, {"src", &openpic_src_ops_be, MPIC_SRC_REG_START, MPIC_SRC_REG_SIZE}, {"cpu", &openpic_cpu_ops_be, MPIC_CPU_REG_START, MPIC_CPU_REG_SIZE}, }; mpp = g_malloc0(sizeof(OpenPICState)); memory_region_init(&mpp->mem, "mpic", 0x40000); memory_region_add_subregion(address_space, base, &mpp->mem); for (i = 0; i < sizeof(list)/sizeof(list[0]); i++) { memory_region_init_io(&mpp->sub_io_mem[i], list[i].ops, mpp, list[i].name, list[i].size); memory_region_add_subregion(&mpp->mem, list[i].start_addr, &mpp->sub_io_mem[i]); } mpp->nb_cpus = nb_cpus; /* 12 external sources, 48 internal sources , 4 timer sources, 4 IPI sources, 4 messaging sources, and 8 Shared MSI sources */ mpp->nb_irqs = 80; mpp->vid = VID_REVISION_1_2; mpp->veni = VENI_GENERIC; mpp->spve_mask = 0xFFFF; mpp->tifr_reset = 0x00000000; mpp->ipvp_reset = 0x80000000; mpp->ide_reset = 0x00000001; mpp->max_irq = MPIC_MAX_IRQ; mpp->irq_ipi0 = MPIC_IPI_IRQ; mpp->irq_tim0 = MPIC_TMR_IRQ; for (i = 0; i < nb_cpus; i++) mpp->dst[i].irqs = irqs[i]; mpp->irq_out = irq_out; /* Enable critical interrupt support */ mpp->flags |= OPENPIC_FLAG_IDE_CRIT; register_savevm(NULL, "mpic", 0, 2, openpic_save, openpic_load, mpp); qemu_register_reset(openpic_reset, mpp); return qemu_allocate_irqs(openpic_set_irq, mpp, mpp->max_irq); }
23,045
0
static void qmp_output_end_list(Visitor *v, void **obj) { QmpOutputVisitor *qov = to_qov(v); QObject *value = qmp_output_pop(qov, obj); assert(qobject_type(value) == QTYPE_QLIST); }
23,046
0
void blkconf_geometry(BlockConf *conf, int *ptrans, unsigned cyls_max, unsigned heads_max, unsigned secs_max, Error **errp) { DriveInfo *dinfo; if (!conf->cyls && !conf->heads && !conf->secs) { /* try to fall back to value set with legacy -drive cyls=... */ dinfo = drive_get_by_blockdev(conf->bs); conf->cyls = dinfo->cyls; conf->heads = dinfo->heads; conf->secs = dinfo->secs; if (ptrans) { *ptrans = dinfo->trans; } } if (!conf->cyls && !conf->heads && !conf->secs) { hd_geometry_guess(conf->bs, &conf->cyls, &conf->heads, &conf->secs, ptrans); } else if (ptrans && *ptrans == BIOS_ATA_TRANSLATION_AUTO) { *ptrans = hd_bios_chs_auto_trans(conf->cyls, conf->heads, conf->secs); } if (conf->cyls || conf->heads || conf->secs) { if (conf->cyls < 1 || conf->cyls > cyls_max) { error_setg(errp, "cyls must be between 1 and %u", cyls_max); return; } if (conf->heads < 1 || conf->heads > heads_max) { error_setg(errp, "heads must be between 1 and %u", heads_max); return; } if (conf->secs < 1 || conf->secs > secs_max) { error_setg(errp, "secs must be between 1 and %u", secs_max); return; } } }
23,047
0
static void strongarm_ppc_handler_update(StrongARMPPCInfo *s) { uint32_t level, diff; int bit; level = s->olevel & s->dir; for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { bit = ffs(diff) - 1; qemu_set_irq(s->handler[bit], (level >> bit) & 1); } s->prev_level = level; }
23,048
0
sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, size_t window_size) { sPAPRTCETable *tcet; if (spapr_tce_find_by_liobn(liobn)) { fprintf(stderr, "Attempted to create TCE table with duplicate" " LIOBN 0x%x\n", liobn); return NULL; } if (!window_size) { return NULL; } tcet = g_malloc0(sizeof(*tcet)); tcet->liobn = liobn; tcet->window_size = window_size; if (kvm_enabled()) { tcet->table = kvmppc_create_spapr_tce(liobn, window_size, &tcet->fd); } if (!tcet->table) { size_t table_size = (window_size >> SPAPR_TCE_PAGE_SHIFT) * sizeof(sPAPRTCE); tcet->table = g_malloc0(table_size); } #ifdef DEBUG_TCE fprintf(stderr, "spapr_iommu: New TCE table @ %p, liobn=0x%x, " "table @ %p, fd=%d\n", tcet, liobn, tcet->table, tcet->fd); #endif memory_region_init_iommu(&tcet->iommu, OBJECT(owner), &spapr_iommu_ops, "iommu-spapr", UINT64_MAX); QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); return tcet; }
23,051
0
static void vhost_set_memory(MemoryListener *listener, MemoryRegionSection *section, bool add) { struct vhost_dev *dev = container_of(listener, struct vhost_dev, memory_listener); hwaddr start_addr = section->offset_within_address_space; ram_addr_t size = int128_get64(section->size); bool log_dirty = memory_region_is_logging(section->mr); int s = offsetof(struct vhost_memory, regions) + (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; void *ram; dev->mem = g_realloc(dev->mem, s); if (log_dirty) { add = false; } assert(size); /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; if (add) { if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) { /* Region exists with same address. Nothing to do. */ return; } } else { if (!vhost_dev_find_reg(dev, start_addr, size)) { /* Removing region that we don't access. Nothing to do. */ return; } } vhost_dev_unassign_memory(dev, start_addr, size); if (add) { /* Add given mapping, merging adjacent regions if any */ vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram); } else { /* Remove old mapping for this memory, if any. */ vhost_dev_unassign_memory(dev, start_addr, size); } dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr); dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1); dev->memory_changed = true; }
23,053
0
static void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUS390XState *env) { int i; rt_sigframe *frame; abi_ulong frame_addr; frame_addr = get_sigframe(ka, env, sizeof *frame); qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__, (unsigned long long)frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto give_sigsegv; } qemu_log("%s: 1\n", __FUNCTION__); if (copy_siginfo_to_user(&frame->info, info)) { goto give_sigsegv; } /* Create the ucontext. */ __put_user(0, &frame->uc.tuc_flags); __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link); __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp); __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &frame->uc.tuc_stack.ss_flags); __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size); save_sigregs(env, &frame->uc.tuc_mcontext); for (i = 0; i < TARGET_NSIG_WORDS; i++) { __put_user((abi_ulong)set->sig[i], (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]); } /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE; } else { env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE; if (__put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, (uint16_t *)(frame->retcode))) { goto give_sigsegv; } } /* Set up backchain. */ if (__put_user(env->regs[15], (abi_ulong *) frame)) { goto give_sigsegv; } /* Set up registers for signal handler */ env->regs[15] = frame_addr; env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE; env->regs[2] = sig; //map_signal(sig); env->regs[3] = frame_addr + offsetof(typeof(*frame), info); env->regs[4] = frame_addr + offsetof(typeof(*frame), uc); return; give_sigsegv: qemu_log("%s: give_sigsegv\n", __FUNCTION__); unlock_user_struct(frame, frame_addr, 1); force_sig(TARGET_SIGSEGV); }
23,054
0
static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size) { BDRVQcowState *s = bs->opaque; int i, nb_clusters; nb_clusters = size_to_clusters(s, size); retry: for(i = 0; i < nb_clusters; i++) { int64_t i = s->free_cluster_index++; if (get_refcount(bs, i) != 0) goto retry; } #ifdef DEBUG_ALLOC2 printf("alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n", size, (s->free_cluster_index - nb_clusters) << s->cluster_bits); #endif return (s->free_cluster_index - nb_clusters) << s->cluster_bits; }
23,055
0
static inline int get_dwords(uint32_t addr, uint32_t *buf, int num) { int i; for(i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { cpu_physical_memory_rw(addr,(uint8_t *)buf, sizeof(*buf), 0); *buf = le32_to_cpu(*buf); } return 1; }
23,056
0
static long kvm_hypercall(unsigned long nr, unsigned long param1, unsigned long param2) { register ulong r_nr asm("1") = nr; register ulong r_param1 asm("2") = param1; register ulong r_param2 asm("3") = param2; register long retval asm("2"); asm volatile ("diag 2,4,0x500" : "=d" (retval) : "d" (r_nr), "0" (r_param1), "r"(r_param2) : "memory", "cc"); return retval; }
23,057
0
static void io_mem_init(void) { int i; cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL); cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL); cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL); for (i=0; i<5; i++) io_mem_used[i] = 1; io_mem_watch = cpu_register_io_memory(watch_mem_read, watch_mem_write, NULL); #ifdef CONFIG_KQEMU if (kqemu_phys_ram_base) { /* alloc dirty bits array */ phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS); memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS); } #endif }
23,058
0
static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, ram_addr_t block_offset, ram_addr_t offset, size_t size, int *bytes_sent) { QEMUFileRDMA *rfile = opaque; RDMAContext *rdma = rfile->rdma; int ret; CHECK_ERROR_STATE(); qemu_fflush(f); if (size > 0) { /* * Add this page to the current 'chunk'. If the chunk * is full, or the page doen't belong to the current chunk, * an actual RDMA write will occur and a new chunk will be formed. */ ret = qemu_rdma_write(f, rdma, block_offset, offset, size); if (ret < 0) { fprintf(stderr, "rdma migration: write error! %d\n", ret); goto err; } /* * We always return 1 bytes because the RDMA * protocol is completely asynchronous. We do not yet know * whether an identified chunk is zero or not because we're * waiting for other pages to potentially be merged with * the current chunk. So, we have to call qemu_update_position() * later on when the actual write occurs. */ if (bytes_sent) { *bytes_sent = 1; } } else { uint64_t index, chunk; /* TODO: Change QEMUFileOps prototype to be signed: size_t => long if (size < 0) { ret = qemu_rdma_drain_cq(f, rdma); if (ret < 0) { fprintf(stderr, "rdma: failed to synchronously drain" " completion queue before unregistration.\n"); goto err; } } */ ret = qemu_rdma_search_ram_block(rdma, block_offset, offset, size, &index, &chunk); if (ret) { fprintf(stderr, "ram block search failed\n"); goto err; } qemu_rdma_signal_unregister(rdma, index, chunk, 0); /* * TODO: Synchronous, guaranteed unregistration (should not occur during * fast-path). Otherwise, unregisters will process on the next call to * qemu_rdma_drain_cq() if (size < 0) { qemu_rdma_unregister_waiting(rdma); } */ } /* * Drain the Completion Queue if possible, but do not block, * just poll. * * If nothing to poll, the end of the iteration will do this * again to make sure we don't overflow the request queue. */ while (1) { uint64_t wr_id, wr_id_in; int ret = qemu_rdma_poll(rdma, &wr_id_in); if (ret < 0) { fprintf(stderr, "rdma migration: polling error! %d\n", ret); goto err; } wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; if (wr_id == RDMA_WRID_NONE) { break; } } return RAM_SAVE_CONTROL_DELAYED; err: rdma->error_state = ret; return ret; }
23,059
0
static unsigned tget(const uint8_t **p, int type, int le) { switch (type) { case TIFF_BYTE : return *(*p)++; case TIFF_SHORT: return tget_short(p, le); case TIFF_LONG : return tget_long(p, le); default : return UINT_MAX; } }
23,060
0
static void tcg_temp_free_internal(int idx) { TCGContext *s = &tcg_ctx; TCGTemp *ts; int k; #if defined(CONFIG_DEBUG_TCG) s->temps_in_use--; if (s->temps_in_use < 0) { fprintf(stderr, "More temporaries freed than allocated!\n"); } #endif assert(idx >= s->nb_globals && idx < s->nb_temps); ts = &s->temps[idx]; assert(ts->temp_allocated != 0); ts->temp_allocated = 0; k = ts->base_type + (ts->temp_local ? TCG_TYPE_COUNT : 0); set_bit(idx, s->free_temps[k].l); }
23,061
0
static bool pte64_match(target_ulong pte0, target_ulong pte1, bool secondary, target_ulong ptem) { return (pte0 & HPTE64_V_VALID) && (secondary == !!(pte0 & HPTE64_V_SECONDARY)) && HPTE64_V_COMPARE(pte0, ptem); }
23,062
0
PCIBus *ppc4xx_pci_init(CPUState *env, qemu_irq pci_irqs[4], target_phys_addr_t config_space, target_phys_addr_t int_ack, target_phys_addr_t special_cycle, target_phys_addr_t registers) { PPC4xxPCIState *controller; int index; static int ppc4xx_pci_id; uint8_t *pci_conf; controller = qemu_mallocz(sizeof(PPC4xxPCIState)); controller->pci_state.bus = pci_register_bus(NULL, "pci", ppc4xx_pci_set_irq, ppc4xx_pci_map_irq, pci_irqs, 0, 4); controller->pci_dev = pci_register_device(controller->pci_state.bus, "host bridge", sizeof(PCIDevice), 0, NULL, NULL); pci_conf = controller->pci_dev->config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_IBM); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_IBM_440GX); pci_config_set_class(pci_conf, PCI_CLASS_BRIDGE_OTHER); /* CFGADDR */ index = cpu_register_io_memory(pci4xx_cfgaddr_read, pci4xx_cfgaddr_write, controller); if (index < 0) goto free; cpu_register_physical_memory(config_space + PCIC0_CFGADDR, 4, index); /* CFGDATA */ index = cpu_register_io_memory(pci4xx_cfgdata_read, pci4xx_cfgdata_write, &controller->pci_state); if (index < 0) goto free; cpu_register_physical_memory(config_space + PCIC0_CFGDATA, 4, index); /* Internal registers */ index = cpu_register_io_memory(pci_reg_read, pci_reg_write, controller); if (index < 0) goto free; cpu_register_physical_memory(registers, PCI_REG_SIZE, index); qemu_register_reset(ppc4xx_pci_reset, controller); /* XXX load/save code not tested. */ register_savevm("ppc4xx_pci", ppc4xx_pci_id++, 1, ppc4xx_pci_save, ppc4xx_pci_load, controller); return controller->pci_state.bus; free: printf("%s error\n", __func__); qemu_free(controller); return NULL; }
23,063
0
static void watch_mem_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE); switch (size) { case 1: stb_phys(addr, val); break; case 2: stw_phys(addr, val); break; case 4: stl_phys(addr, val); break; default: abort(); } }
23,064
0
static int io_channel_send_full(QIOChannel *ioc, const void *buf, size_t len, int *fds, size_t nfds) { size_t offset = 0; while (offset < len) { ssize_t ret = 0; struct iovec iov = { .iov_base = (char *)buf + offset, .iov_len = len - offset }; ret = qio_channel_writev_full( ioc, &iov, 1, fds, nfds, NULL); if (ret == QIO_CHANNEL_ERR_BLOCK) { errno = EAGAIN; return -1; } else if (ret < 0) { if (offset) { return offset; } errno = EINVAL; return -1; } offset += ret; } return offset; }
23,065
0
qemu_inject_x86_mce(Monitor *mon, CPUState *cenv, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags) { uint64_t mcg_cap = cenv->mcg_cap; uint64_t *banks = cenv->mce_banks + 4 * bank; /* * If there is an MCE exception being processed, ignore this SRAO MCE * unless unconditional injection was requested. */ if (!(flags & MCE_INJECT_UNCOND_AO) && !(status & MCI_STATUS_AR) && (cenv->mcg_status & MCG_STATUS_MCIP)) { return; } if (status & MCI_STATUS_UC) { /* * if MSR_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) { monitor_printf(mon, "CPU %d: Uncorrected error reporting disabled\n", cenv->cpu_index); return; } /* * if MSR_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if (banks[0] != ~(uint64_t)0) { monitor_printf(mon, "CPU %d: Uncorrected error reporting disabled " "for bank %d\n", cenv->cpu_index, bank); return; } if ((cenv->mcg_status & MCG_STATUS_MCIP) || !(cenv->cr[4] & CR4_MCE_MASK)) { monitor_printf(mon, "CPU %d: Previous MCE still in progress, " "raising triple fault\n", cenv->cpu_index); qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); qemu_system_reset_request(); return; } if (banks[1] & MCI_STATUS_VAL) { status |= MCI_STATUS_OVER; } banks[2] = addr; banks[3] = misc; cenv->mcg_status = mcg_status; banks[1] = status; cpu_interrupt(cenv, CPU_INTERRUPT_MCE); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) { status |= MCI_STATUS_OVER; } banks[2] = addr; banks[3] = misc; banks[1] = status; } else { banks[1] |= MCI_STATUS_OVER; } }
23,067
0
static void test_pxe_e1000(void) { test_pxe_one("-device e1000,netdev=" NETNAME, false); }
23,068
0
static int mpeg4_decode_partitioned_mb(MpegEncContext *s, DCTELEM block[6][64]) { int cbp, mb_type; const int xy= s->mb_x + s->mb_y*s->mb_width; mb_type= s->mb_type[xy]; cbp = s->cbp_table[xy]; if(s->current_picture.qscale_table[xy] != s->qscale){ s->qscale= s->current_picture.qscale_table[xy]; s->y_dc_scale= s->y_dc_scale_table[ s->qscale ]; s->c_dc_scale= s->c_dc_scale_table[ s->qscale ]; } if (s->pict_type == P_TYPE || s->pict_type==S_TYPE) { int i; for(i=0; i<4; i++){ s->mv[0][i][0] = s->motion_val[ s->block_index[i] ][0]; s->mv[0][i][1] = s->motion_val[ s->block_index[i] ][1]; } s->mb_intra = mb_type&MB_TYPE_INTRA; if (mb_type&MB_TYPE_SKIPED) { /* skip mb */ for(i=0;i<6;i++) s->block_last_index[i] = -1; s->mv_dir = MV_DIR_FORWARD; s->mv_type = MV_TYPE_16X16; if(s->pict_type==S_TYPE && s->vol_sprite_usage==GMC_SPRITE){ s->mcsel=1; s->mb_skiped = 0; }else{ s->mcsel=0; s->mb_skiped = 1; } }else if(s->mb_intra){ s->ac_pred = s->pred_dir_table[xy]>>7; /* decode each block */ for (i = 0; i < 6; i++) { if(mpeg4_decode_block(s, block[i], i, cbp&32, 1) < 0){ fprintf(stderr, "texture corrupted at %d %d\n", s->mb_x, s->mb_y); return -1; } cbp+=cbp; } }else if(!s->mb_intra){ // s->mcsel= 0; //FIXME do we need to init that s->mv_dir = MV_DIR_FORWARD; if (mb_type&MB_TYPE_INTER4V) { s->mv_type = MV_TYPE_8X8; } else { s->mv_type = MV_TYPE_16X16; } /* decode each block */ for (i = 0; i < 6; i++) { if(mpeg4_decode_block(s, block[i], i, cbp&32, 0) < 0){ fprintf(stderr, "texture corrupted at %d %d (trying to continue with mc/dc only)\n", s->mb_x, s->mb_y); return -1; } cbp+=cbp; } } } else { /* I-Frame */ int i; s->mb_intra = 1; s->ac_pred = s->pred_dir_table[xy]>>7; /* decode each block */ for (i = 0; i < 6; i++) { if(mpeg4_decode_block(s, block[i], i, cbp&32, 1) < 0){ fprintf(stderr, "texture corrupted at %d %d (trying to continue with dc only)\n", s->mb_x, s->mb_y); return -1; } cbp+=cbp; } } s->error_status_table[xy]&= ~AC_ERROR; /* per-MB end of slice check */ if(--s->mb_num_left <= 0){ //printf("%06X %d\n", show_bits(&s->gb, 24), s->gb.size*8 - get_bits_count(&s->gb)); if(mpeg4_is_resync(s)) return SLICE_END; else return SLICE_NOEND; }else{ if(s->cbp_table[xy+1] && mpeg4_is_resync(s)) return SLICE_END; else return SLICE_OK; } }
23,069
0
void visit_type_uint8(Visitor *v, uint8_t *obj, const char *name, Error **errp) { int64_t value; if (v->type_uint8) { v->type_uint8(v, obj, name, errp); } else { value = *obj; v->type_int64(v, &value, name, errp); if (value < 0 || value > UINT8_MAX) { /* FIXME questionable reuse of errp if callback changed value on error */ error_setg(errp, QERR_INVALID_PARAMETER_VALUE, name ? name : "null", "uint8_t"); return; } *obj = value; } }
23,072
0
void xen_cmos_set_s3_resume(void *opaque, int irq, int level) { pc_cmos_set_s3_resume(opaque, irq, level); if (level) { xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); } }
23,073
0
size_t v9fs_marshal(struct iovec *in_sg, int in_num, size_t offset, int bswap, const char *fmt, ...) { int i; va_list ap; size_t old_offset = offset; va_start(ap, fmt); for (i = 0; fmt[i]; i++) { switch (fmt[i]) { case 'b': { uint8_t val = va_arg(ap, int); offset += v9fs_pack(in_sg, in_num, offset, &val, sizeof(val)); break; } case 'w': { uint16_t val; if (bswap) { cpu_to_le16w(&val, va_arg(ap, int)); } else { val = va_arg(ap, int); } offset += v9fs_pack(in_sg, in_num, offset, &val, sizeof(val)); break; } case 'd': { uint32_t val; if (bswap) { cpu_to_le32w(&val, va_arg(ap, uint32_t)); } else { val = va_arg(ap, uint32_t); } offset += v9fs_pack(in_sg, in_num, offset, &val, sizeof(val)); break; } case 'q': { uint64_t val; if (bswap) { cpu_to_le64w(&val, va_arg(ap, uint64_t)); } else { val = va_arg(ap, uint64_t); } offset += v9fs_pack(in_sg, in_num, offset, &val, sizeof(val)); break; } case 's': { V9fsString *str = va_arg(ap, V9fsString *); offset += v9fs_marshal(in_sg, in_num, offset, bswap, "w", str->size); offset += v9fs_pack(in_sg, in_num, offset, str->data, str->size); break; } case 'Q': { V9fsQID *qidp = va_arg(ap, V9fsQID *); offset += v9fs_marshal(in_sg, in_num, offset, bswap, "bdq", qidp->type, qidp->version, qidp->path); break; } case 'S': { V9fsStat *statp = va_arg(ap, V9fsStat *); offset += v9fs_marshal(in_sg, in_num, offset, bswap, "wwdQdddqsssssddd", statp->size, statp->type, statp->dev, &statp->qid, statp->mode, statp->atime, statp->mtime, statp->length, &statp->name, &statp->uid, &statp->gid, &statp->muid, &statp->extension, statp->n_uid, statp->n_gid, statp->n_muid); break; } case 'A': { V9fsStatDotl *statp = va_arg(ap, V9fsStatDotl *); offset += v9fs_marshal(in_sg, in_num, offset, bswap, "qQdddqqqqqqqqqqqqqqq", statp->st_result_mask, &statp->qid, statp->st_mode, statp->st_uid, statp->st_gid, statp->st_nlink, statp->st_rdev, statp->st_size, statp->st_blksize, statp->st_blocks, statp->st_atime_sec, statp->st_atime_nsec, statp->st_mtime_sec, statp->st_mtime_nsec, statp->st_ctime_sec, statp->st_ctime_nsec, statp->st_btime_sec, statp->st_btime_nsec, statp->st_gen, statp->st_data_version); break; } default: break; } } va_end(ap); return offset - old_offset; }
23,077
0
void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, hwaddr granularity, bool is_write) { hwaddr addr; IOMMUTLBEntry iotlb; for (addr = 0; addr < memory_region_size(mr); addr += granularity) { iotlb = mr->iommu_ops->translate(mr, addr, is_write); if (iotlb.perm != IOMMU_NONE) { n->notify(n, &iotlb); } /* if (2^64 - MR size) < granularity, it's possible to get an * infinite loop here. This should catch such a wraparound */ if ((addr + granularity) < addr) { break; } } }
23,078
0
static void s390_init_cpus(MachineState *machine) { MachineClass *mc = MACHINE_GET_CLASS(machine); int i; if (tcg_enabled() && max_cpus > 1) { error_report("Number of SMP CPUs requested (%d) exceeds max CPUs " "supported by TCG (1) on s390x", max_cpus); exit(1); } /* initialize possible_cpus */ mc->possible_cpu_arch_ids(machine); for (i = 0; i < smp_cpus; i++) { s390x_new_cpu(machine->cpu_type, i, &error_fatal); } }
23,079
0
static int roq_decode_init(AVCodecContext *avctx) { RoqContext *s = avctx->priv_data; s->avctx = avctx; s->width = avctx->width; s->height = avctx->height; s->last_frame = &s->frames[0]; s->current_frame = &s->frames[1]; avctx->pix_fmt = PIX_FMT_YUV444P; dsputil_init(&s->dsp, avctx); return 0; }
23,080
0
static inline void helper_ret_protected(CPUX86State *env, int shift, int is_iret, int addend) { uint32_t new_cs, new_eflags, new_ss; uint32_t new_es, new_ds, new_fs, new_gs; uint32_t e1, e2, ss_e1, ss_e2; int cpl, dpl, rpl, eflags_mask, iopl; target_ulong ssp, sp, new_eip, new_esp, sp_mask; #ifdef TARGET_X86_64 if (shift == 2) { sp_mask = -1; } else #endif { sp_mask = get_sp_mask(env->segs[R_SS].flags); } sp = env->regs[R_ESP]; ssp = env->segs[R_SS].base; new_eflags = 0; /* avoid warning */ #ifdef TARGET_X86_64 if (shift == 2) { POPQ(sp, new_eip); POPQ(sp, new_cs); new_cs &= 0xffff; if (is_iret) { POPQ(sp, new_eflags); } } else #endif { if (shift == 1) { /* 32 bits */ POPL(ssp, sp, sp_mask, new_eip); POPL(ssp, sp, sp_mask, new_cs); new_cs &= 0xffff; if (is_iret) { POPL(ssp, sp, sp_mask, new_eflags); if (new_eflags & VM_MASK) { goto return_to_vm86; } } } else { /* 16 bits */ POPW(ssp, sp, sp_mask, new_eip); POPW(ssp, sp, sp_mask, new_cs); if (is_iret) { POPW(ssp, sp, sp_mask, new_eflags); } } } LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", new_cs, new_eip, shift, addend); LOG_PCALL_STATE(CPU(x86_env_get_cpu(env))); if ((new_cs & 0xfffc) == 0) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } if (load_segment(env, &e1, &e2, new_cs) != 0) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } if (!(e2 & DESC_S_MASK) || !(e2 & DESC_CS_MASK)) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } cpl = env->hflags & HF_CPL_MASK; rpl = new_cs & 3; if (rpl < cpl) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { if (dpl > rpl) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } } else { if (dpl != rpl) { raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc); } } if (!(e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc); } sp += addend; if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) || ((env->hflags & HF_CS64_MASK) && !is_iret))) { /* return to same privilege level */ cpu_x86_load_seg_cache(env, R_CS, new_cs, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); } else { /* return to different privilege level */ #ifdef TARGET_X86_64 if (shift == 2) { POPQ(sp, new_esp); POPQ(sp, new_ss); new_ss &= 0xffff; } else #endif { if (shift == 1) { /* 32 bits */ POPL(ssp, sp, sp_mask, new_esp); POPL(ssp, sp, sp_mask, new_ss); new_ss &= 0xffff; } else { /* 16 bits */ POPW(ssp, sp, sp_mask, new_esp); POPW(ssp, sp, sp_mask, new_ss); } } LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n", new_ss, new_esp); if ((new_ss & 0xfffc) == 0) { #ifdef TARGET_X86_64 /* NULL ss is allowed in long mode if cpl != 3 */ /* XXX: test CS64? */ if ((env->hflags & HF_LMA_MASK) && rpl != 3) { cpu_x86_load_seg_cache(env, R_SS, new_ss, 0, 0xffffffff, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (rpl << DESC_DPL_SHIFT) | DESC_W_MASK | DESC_A_MASK); ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */ } else #endif { raise_exception_err(env, EXCP0D_GPF, 0); } } else { if ((new_ss & 3) != rpl) { raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); } if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) { raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); } if (!(ss_e2 & DESC_S_MASK) || (ss_e2 & DESC_CS_MASK) || !(ss_e2 & DESC_W_MASK)) { raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); } dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3; if (dpl != rpl) { raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc); } if (!(ss_e2 & DESC_P_MASK)) { raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc); } cpu_x86_load_seg_cache(env, R_SS, new_ss, get_seg_base(ss_e1, ss_e2), get_seg_limit(ss_e1, ss_e2), ss_e2); } cpu_x86_load_seg_cache(env, R_CS, new_cs, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2); cpu_x86_set_cpl(env, rpl); sp = new_esp; #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { sp_mask = -1; } else #endif { sp_mask = get_sp_mask(ss_e2); } /* validate data segments */ validate_seg(env, R_ES, rpl); validate_seg(env, R_DS, rpl); validate_seg(env, R_FS, rpl); validate_seg(env, R_GS, rpl); sp += addend; } SET_ESP(sp, sp_mask); env->eip = new_eip; if (is_iret) { /* NOTE: 'cpl' is the _old_ CPL */ eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; if (cpl == 0) { eflags_mask |= IOPL_MASK; } iopl = (env->eflags >> IOPL_SHIFT) & 3; if (cpl <= iopl) { eflags_mask |= IF_MASK; } if (shift == 0) { eflags_mask &= 0xffff; } cpu_load_eflags(env, new_eflags, eflags_mask); } return; return_to_vm86: POPL(ssp, sp, sp_mask, new_esp); POPL(ssp, sp, sp_mask, new_ss); POPL(ssp, sp, sp_mask, new_es); POPL(ssp, sp, sp_mask, new_ds); POPL(ssp, sp, sp_mask, new_fs); POPL(ssp, sp, sp_mask, new_gs); /* modify processor state */ cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK); load_seg_vm(env, R_CS, new_cs & 0xffff); cpu_x86_set_cpl(env, 3); load_seg_vm(env, R_SS, new_ss & 0xffff); load_seg_vm(env, R_ES, new_es & 0xffff); load_seg_vm(env, R_DS, new_ds & 0xffff); load_seg_vm(env, R_FS, new_fs & 0xffff); load_seg_vm(env, R_GS, new_gs & 0xffff); env->eip = new_eip & 0xffff; env->regs[R_ESP] = new_esp; }
23,081
0
static int qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) { int i; for (i = 0; i < qiov->niov; i++) { if ((uintptr_t) qiov->iov[i].iov_base % bs->buffer_alignment) { return 0; } } return 1; }
23,082
0
void *qemu_anon_ram_alloc(size_t size) { void *ptr; /* FIXME: this is not exactly optimal solution since VirtualAlloc has 64Kb granularity, but at least it guarantees us that the memory is page aligned. */ ptr = VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE); trace_qemu_anon_ram_alloc(size, ptr); return ptr; }
23,083
0
static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs, uint64_t src_cluster_offset, unsigned offset_in_cluster, uint8_t *buffer, unsigned bytes) { if (bytes && bs->encrypted) { BDRVQcow2State *s = bs->opaque; int64_t sector = (src_cluster_offset + offset_in_cluster) >> BDRV_SECTOR_BITS; assert(s->cipher); assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); assert((bytes & ~BDRV_SECTOR_MASK) == 0); if (qcow2_encrypt_sectors(s, sector, buffer, bytes >> BDRV_SECTOR_BITS, true, NULL) < 0) { return false; } } return true; }
23,084
0
static uint32_t nam_readl (void *opaque, uint32_t addr) { PCIAC97LinkState *d = opaque; AC97LinkState *s = &d->ac97; dolog ("U nam readl %#x\n", addr); s->cas = 0; return ~0U; }
23,085
0
static int vpc_open(BlockDriverState *bs, int flags) { BDRVVPCState *s = bs->opaque; int i; struct vhd_footer* footer; struct vhd_dyndisk_header* dyndisk_header; uint8_t buf[HEADER_SIZE]; uint32_t checksum; int err = -1; int disk_type = VHD_DYNAMIC; if (bdrv_pread(bs->file, 0, s->footer_buf, HEADER_SIZE) != HEADER_SIZE) goto fail; footer = (struct vhd_footer*) s->footer_buf; if (strncmp(footer->creator, "conectix", 8)) { int64_t offset = bdrv_getlength(bs->file); if (offset < HEADER_SIZE) { goto fail; } /* If a fixed disk, the footer is found only at the end of the file */ if (bdrv_pread(bs->file, offset-HEADER_SIZE, s->footer_buf, HEADER_SIZE) != HEADER_SIZE) { goto fail; } if (strncmp(footer->creator, "conectix", 8)) { goto fail; } disk_type = VHD_FIXED; } checksum = be32_to_cpu(footer->checksum); footer->checksum = 0; if (vpc_checksum(s->footer_buf, HEADER_SIZE) != checksum) fprintf(stderr, "block-vpc: The header checksum of '%s' is " "incorrect.\n", bs->filename); /* Write 'checksum' back to footer, or else will leave it with zero. */ footer->checksum = be32_to_cpu(checksum); // The visible size of a image in Virtual PC depends on the geometry // rather than on the size stored in the footer (the size in the footer // is too large usually) bs->total_sectors = (int64_t) be16_to_cpu(footer->cyls) * footer->heads * footer->secs_per_cyl; if (bs->total_sectors >= 65535 * 16 * 255) { err = -EFBIG; goto fail; } if (disk_type == VHD_DYNAMIC) { if (bdrv_pread(bs->file, be64_to_cpu(footer->data_offset), buf, HEADER_SIZE) != HEADER_SIZE) { goto fail; } dyndisk_header = (struct vhd_dyndisk_header *) buf; if (strncmp(dyndisk_header->magic, "cxsparse", 8)) { goto fail; } s->block_size = be32_to_cpu(dyndisk_header->block_size); s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511; s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries); s->pagetable = g_malloc(s->max_table_entries * 4); s->bat_offset = be64_to_cpu(dyndisk_header->table_offset); if (bdrv_pread(bs->file, s->bat_offset, s->pagetable, s->max_table_entries * 4) != s->max_table_entries * 4) { goto fail; } s->free_data_block_offset = (s->bat_offset + (s->max_table_entries * 4) + 511) & ~511; for (i = 0; i < s->max_table_entries; i++) { be32_to_cpus(&s->pagetable[i]); if (s->pagetable[i] != 0xFFFFFFFF) { int64_t next = (512 * (int64_t) s->pagetable[i]) + s->bitmap_size + s->block_size; if (next > s->free_data_block_offset) { s->free_data_block_offset = next; } } } s->last_bitmap_offset = (int64_t) -1; #ifdef CACHE s->pageentry_u8 = g_malloc(512); s->pageentry_u32 = s->pageentry_u8; s->pageentry_u16 = s->pageentry_u8; s->last_pagetable = -1; #endif } qemu_co_mutex_init(&s->lock); /* Disable migration when VHD images are used */ error_set(&s->migration_blocker, QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED, "vpc", bs->device_name, "live migration"); migrate_add_blocker(s->migration_blocker); return 0; fail: return err; }
23,086
0
static void test_identify(void) { AHCIQState *ahci; ahci = ahci_boot_and_enable(); ahci_test_identify(ahci); ahci_shutdown(ahci); }
23,087
0
static void apic_send_msi(target_phys_addr_t addr, uint32_t data) { uint8_t dest = (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT; uint8_t vector = (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT; uint8_t dest_mode = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; /* XXX: Ignore redirection hint. */ apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); }
23,089
0
PCIHostState *spapr_create_phb(sPAPREnvironment *spapr, int index, const char *busname) { DeviceState *dev; dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE); qdev_prop_set_uint32(dev, "index", index); qdev_prop_set_string(dev, "busname", busname); qdev_init_nofail(dev); return PCI_HOST_BRIDGE(dev); }
23,090
0
static void opt_format(const char *arg) { /* compatibility stuff for pgmyuv */ if (!strcmp(arg, "pgmyuv")) { opt_image_format(arg); arg = "image"; } file_iformat = av_find_input_format(arg); file_oformat = guess_format(arg, NULL, NULL); if (!file_iformat && !file_oformat) { fprintf(stderr, "Unknown input or output format: %s\n", arg); exit(1); } }
23,091
1
void memory_region_add_eventfd(MemoryRegion *mr, hwaddr addr, unsigned size, bool match_data, uint64_t data, EventNotifier *e) { MemoryRegionIoeventfd mrfd = { .addr.start = int128_make64(addr), .addr.size = int128_make64(size), .match_data = match_data, .data = data, .e = e, }; unsigned i; if (size) { adjust_endianness(mr, &mrfd.data, size); memory_region_transaction_begin(); for (i = 0; i < mr->ioeventfd_nb; ++i) { if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) { break; ++mr->ioeventfd_nb; mr->ioeventfds = g_realloc(mr->ioeventfds, sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); mr->ioeventfds[i] = mrfd; ioeventfd_update_pending |= mr->enabled; memory_region_transaction_commit();
23,092
1
static int pdu_copy_sg(V9fsPDU *pdu, size_t offset, int rx, struct iovec *sg) { size_t pos = 0; int i, j; struct iovec *src_sg; unsigned int num; if (rx) { src_sg = pdu->elem.in_sg; num = pdu->elem.in_num; } else { src_sg = pdu->elem.out_sg; num = pdu->elem.out_num; } j = 0; for (i = 0; i < num; i++) { if (offset <= pos) { sg[j].iov_base = src_sg[i].iov_base; sg[j].iov_len = src_sg[i].iov_len; j++; } else if (offset < (src_sg[i].iov_len + pos)) { sg[j].iov_base = src_sg[i].iov_base; sg[j].iov_len = src_sg[i].iov_len; sg[j].iov_base += (offset - pos); sg[j].iov_len -= (offset - pos); j++; } pos += src_sg[i].iov_len; } return j; }
23,094
1
void init_vlc_rl(RLTable *rl) { int i, q; init_vlc(&rl->vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2); for(q=0; q<32; q++){ int qmul= q*2; int qadd= (q-1)|1; if(q==0){ qmul=1; qadd=0; } rl->rl_vlc[q]= av_malloc(rl->vlc.table_size*sizeof(RL_VLC_ELEM)); for(i=0; i<rl->vlc.table_size; i++){ int code= rl->vlc.table[i][0]; int len = rl->vlc.table[i][1]; int level, run; if(len==0){ // illegal code run= 66; level= MAX_LEVEL; }else if(len<0){ //more bits needed run= 0; level= code; }else{ if(code==rl->n){ //esc run= 66; level= 0; }else{ run= rl->table_run [code] + 1; level= rl->table_level[code] * qmul + qadd; if(code >= rl->last) run+=192; } } rl->rl_vlc[q][i].len= len; rl->rl_vlc[q][i].level= level; rl->rl_vlc[q][i].run= run; } } }
23,095
1
static void dec_sr(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5); } else { LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (!(dc->env->features & LM32_FEATURE_SHIFT)) { if (dc->format == OP_FMT_RI) { /* TODO: check r1 == 1 during runtime */ } else { if (dc->imm5 != 1) { cpu_abort(dc->env, "hardware shifter is not available\n"); } } } if (dc->format == OP_FMT_RI) { tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5); } else { TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f); tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0); tcg_temp_free(t0); } }
23,096
1
int kvm_arch_on_sigbus(int code, void *addr) { #ifdef KVM_CAP_MCE if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) { ram_addr_t ram_addr; target_phys_addr_t paddr; /* Hope we are lucky for AO MCE */ if (qemu_ram_addr_from_host(addr, &ram_addr) || !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) { fprintf(stderr, "Hardware memory error for memory used by " "QEMU itself instead of guest system!: %p\n", addr); return 0; } kvm_mce_inject(first_cpu, paddr, code); } else #endif /* KVM_CAP_MCE */ { if (code == BUS_MCEERR_AO) { return 0; } else if (code == BUS_MCEERR_AR) { hardware_memory_error(); } else { return 1; } } return 0; }
23,097
1
static int mxf_read_material_package(MXFPackage *package, ByteIOContext *pb, int tag) { switch(tag) { case 0x4403: package->tracks_count = get_be32(pb); if (package->tracks_count >= UINT_MAX / sizeof(UID)) return -1; package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID)); if (!package->tracks_refs) return -1; url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */ get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID)); break; } return 0; }
23,098
0
static void qemu_aio_complete(void *opaque, int ret) { struct ioreq *ioreq = opaque; if (ret != 0) { xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n", ioreq->req.operation == BLKIF_OP_READ ? "read" : "write"); ioreq->aio_errors++; } ioreq->aio_inflight--; if (ioreq->presync) { ioreq->presync = 0; ioreq_runio_qemu_aio(ioreq); return; } if (ioreq->aio_inflight > 0) { return; } if (ioreq->postsync) { ioreq->postsync = 0; ioreq->aio_inflight++; bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq); return; } ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; ioreq_unmap(ioreq); ioreq_finish(ioreq); switch (ioreq->req.operation) { case BLKIF_OP_WRITE: case BLKIF_OP_FLUSH_DISKCACHE: if (!ioreq->req.nr_segments) { break; } case BLKIF_OP_READ: block_acct_done(bdrv_get_stats(ioreq->blkdev->bs), &ioreq->acct); break; case BLKIF_OP_DISCARD: default: break; } qemu_bh_schedule(ioreq->blkdev->bh); }
23,099
0
static int select_rc_mode(AVCodecContext *avctx, QSVEncContext *q) { const char *rc_desc; mfxU16 rc_mode; int want_la = q->la_depth >= 0; int want_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE); int want_vcm = q->vcm; if (want_la && !QSV_HAVE_LA) { av_log(avctx, AV_LOG_ERROR, "Lookahead ratecontrol mode requested, but is not supported by this SDK version\n"); return AVERROR(ENOSYS); } if (want_vcm && !QSV_HAVE_VCM) { av_log(avctx, AV_LOG_ERROR, "VCM ratecontrol mode requested, but is not supported by this SDK version\n"); return AVERROR(ENOSYS); } if (want_la + want_qscale + want_vcm > 1) { av_log(avctx, AV_LOG_ERROR, "More than one of: { constant qscale, lookahead, VCM } requested, " "only one of them can be used at a time.\n"); return AVERROR(EINVAL); } if (want_qscale) { rc_mode = MFX_RATECONTROL_CQP; rc_desc = "constant quantization parameter (CQP)"; } #if QSV_HAVE_VCM else if (want_vcm) { rc_mode = MFX_RATECONTROL_VCM; rc_desc = "video conferencing mode (VCM)"; } #endif #if QSV_HAVE_LA else if (want_la) { rc_mode = MFX_RATECONTROL_LA; rc_desc = "VBR with lookahead (LA)"; #if QSV_HAVE_ICQ if (avctx->global_quality > 0) { rc_mode = MFX_RATECONTROL_LA_ICQ; rc_desc = "intelligent constant quality with lookahead (LA_ICQ)"; } #endif } #endif #if QSV_HAVE_ICQ else if (avctx->global_quality > 0) { rc_mode = MFX_RATECONTROL_ICQ; rc_desc = "intelligent constant quality (ICQ)"; } #endif else if (avctx->rc_max_rate == avctx->bit_rate) { rc_mode = MFX_RATECONTROL_CBR; rc_desc = "constant bitrate (CBR)"; } else if (!avctx->rc_max_rate) { rc_mode = MFX_RATECONTROL_AVBR; rc_desc = "average variable bitrate (AVBR)"; } else { rc_mode = MFX_RATECONTROL_VBR; rc_desc = "variable bitrate (VBR)"; } q->param.mfx.RateControlMethod = rc_mode; av_log(avctx, AV_LOG_VERBOSE, "Using the %s ratecontrol method\n", rc_desc); return 0; }
23,100