project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
FFmpeg
9b8c8a9395c849639aea0f6b5300e991e93c3a73
0
static int svq1_decode_block_intra(GetBitContext *bitbuf, uint8_t *pixels, int pitch) { uint32_t bit_cache; uint8_t *list[63]; uint32_t *dst; const uint32_t *codebook; int entries[6]; int i, j, m, n; int mean, stages; unsigned x, y, width, height, level; uint32_t n1, n2, n3, n4; /* initialize list for breadth first processing of vectors */ list[0] = pixels; /* recursively process vector */ for (i = 0, m = 1, n = 1, level = 5; i < n; i++) { SVQ1_PROCESS_VECTOR(); /* destination address and vector size */ dst = (uint32_t *)list[i]; width = 1 << ((4 + level) / 2); height = 1 << ((3 + level) / 2); /* get number of stages (-1 skips vector, 0 for mean only) */ stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1; if (stages == -1) { for (y = 0; y < height; y++) memset(&dst[y * (pitch / 4)], 0, width); continue; /* skip vector */ } if (stages > 0 && level >= 4) { av_dlog(NULL, "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n", stages, level); return AVERROR_INVALIDDATA; /* invalid vector */ } mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3); if (stages == 0) { for (y = 0; y < height; y++) memset(&dst[y * (pitch / 4)], mean, width); } else { SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_intra_codebooks); for (y = 0; y < height; y++) { for (x = 0; x < width / 4; x++, codebook++) { n1 = n4; n2 = n4; SVQ1_ADD_CODEBOOK() /* store result */ dst[x] = n1 << 8 | n2; } dst += pitch / 4; } } } return 0; }
17,205
FFmpeg
c988f97566cdf536ba0dcbc0d77d885456852060
0
int ff_h264_decode_mb_cavlc(H264Context *h){ MpegEncContext * const s = &h->s; int mb_xy; int partition_count; unsigned int mb_type, cbp; int dct8x8_allowed= h->pps.transform_8x8_mode; mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride; tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y); cbp = 0; /* avoid warning. FIXME: find a solution without slowing down the code */ if(h->slice_type_nos != FF_I_TYPE){ if(s->mb_skip_run==-1) s->mb_skip_run= get_ue_golomb(&s->gb); if (s->mb_skip_run--) { if(FRAME_MBAFF && (s->mb_y&1) == 0){ if(s->mb_skip_run==0) h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb); else predict_field_decoding_flag(h); } decode_mb_skip(h); return 0; } } if(FRAME_MBAFF){ if( (s->mb_y&1) == 0 ) h->mb_mbaff = h->mb_field_decoding_flag = get_bits1(&s->gb); } h->prev_mb_skipped= 0; mb_type= get_ue_golomb(&s->gb); if(h->slice_type_nos == FF_B_TYPE){ if(mb_type < 23){ partition_count= b_mb_type_info[mb_type].partition_count; mb_type= b_mb_type_info[mb_type].type; }else{ mb_type -= 23; goto decode_intra_mb; } }else if(h->slice_type_nos == FF_P_TYPE){ if(mb_type < 5){ partition_count= p_mb_type_info[mb_type].partition_count; mb_type= p_mb_type_info[mb_type].type; }else{ mb_type -= 5; goto decode_intra_mb; } }else{ assert(h->slice_type_nos == FF_I_TYPE); if(h->slice_type == FF_SI_TYPE && mb_type) mb_type--; decode_intra_mb: if(mb_type > 25){ av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y); return -1; } partition_count=0; cbp= i_mb_type_info[mb_type].cbp; h->intra16x16_pred_mode= i_mb_type_info[mb_type].pred_mode; mb_type= i_mb_type_info[mb_type].type; } if(MB_FIELD) mb_type |= MB_TYPE_INTERLACED; h->slice_table[ mb_xy ]= h->slice_num; if(IS_INTRA_PCM(mb_type)){ unsigned int x; // We assume these blocks are very rare so we do not optimize it. align_get_bits(&s->gb); // The pixels are stored in the same order as levels in h->mb array. for(x=0; x < (CHROMA ? 384 : 256); x++){ ((uint8_t*)h->mb)[x]= get_bits(&s->gb, 8); } // In deblocking, the quantizer is 0 s->current_picture.qscale_table[mb_xy]= 0; // All coeffs are present memset(h->non_zero_count[mb_xy], 16, 16); s->current_picture.mb_type[mb_xy]= mb_type; return 0; } if(MB_MBAFF){ h->ref_count[0] <<= 1; h->ref_count[1] <<= 1; } fill_caches(h, mb_type, 0); //mb_pred if(IS_INTRA(mb_type)){ int pred_mode; // init_top_left_availability(h); if(IS_INTRA4x4(mb_type)){ int i; int di = 1; if(dct8x8_allowed && get_bits1(&s->gb)){ mb_type |= MB_TYPE_8x8DCT; di = 4; } // fill_intra4x4_pred_table(h); for(i=0; i<16; i+=di){ int mode= pred_intra_mode(h, i); if(!get_bits1(&s->gb)){ const int rem_mode= get_bits(&s->gb, 3); mode = rem_mode + (rem_mode >= mode); } if(di==4) fill_rectangle( &h->intra4x4_pred_mode_cache[ scan8[i] ], 2, 2, 8, mode, 1 ); else h->intra4x4_pred_mode_cache[ scan8[i] ] = mode; } ff_h264_write_back_intra_pred_mode(h); if( ff_h264_check_intra4x4_pred_mode(h) < 0) return -1; }else{ h->intra16x16_pred_mode= ff_h264_check_intra_pred_mode(h, h->intra16x16_pred_mode); if(h->intra16x16_pred_mode < 0) return -1; } if(CHROMA){ pred_mode= ff_h264_check_intra_pred_mode(h, get_ue_golomb_31(&s->gb)); if(pred_mode < 0) return -1; h->chroma_pred_mode= pred_mode; } }else if(partition_count==4){ int i, j, sub_partition_count[4], list, ref[2][4]; if(h->slice_type_nos == FF_B_TYPE){ for(i=0; i<4; i++){ h->sub_mb_type[i]= get_ue_golomb_31(&s->gb); if(h->sub_mb_type[i] >=13){ av_log(h->s.avctx, AV_LOG_ERROR, "B sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y); return -1; } sub_partition_count[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; h->sub_mb_type[i]= b_sub_mb_type_info[ h->sub_mb_type[i] ].type; } if( IS_DIRECT(h->sub_mb_type[0]) || IS_DIRECT(h->sub_mb_type[1]) || IS_DIRECT(h->sub_mb_type[2]) || IS_DIRECT(h->sub_mb_type[3])) { ff_h264_pred_direct_motion(h, &mb_type); h->ref_cache[0][scan8[4]] = h->ref_cache[1][scan8[4]] = h->ref_cache[0][scan8[12]] = h->ref_cache[1][scan8[12]] = PART_NOT_AVAILABLE; } }else{ assert(h->slice_type_nos == FF_P_TYPE); //FIXME SP correct ? for(i=0; i<4; i++){ h->sub_mb_type[i]= get_ue_golomb_31(&s->gb); if(h->sub_mb_type[i] >=4){ av_log(h->s.avctx, AV_LOG_ERROR, "P sub_mb_type %u out of range at %d %d\n", h->sub_mb_type[i], s->mb_x, s->mb_y); return -1; } sub_partition_count[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].partition_count; h->sub_mb_type[i]= p_sub_mb_type_info[ h->sub_mb_type[i] ].type; } } for(list=0; list<h->list_count; list++){ int ref_count= IS_REF0(mb_type) ? 1 : h->ref_count[list]; for(i=0; i<4; i++){ if(IS_DIRECT(h->sub_mb_type[i])) continue; if(IS_DIR(h->sub_mb_type[i], 0, list)){ unsigned int tmp; if(ref_count == 1){ tmp= 0; }else if(ref_count == 2){ tmp= get_bits1(&s->gb)^1; }else{ tmp= get_ue_golomb_31(&s->gb); if(tmp>=ref_count){ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", tmp); return -1; } } ref[list][i]= tmp; }else{ //FIXME ref[list][i] = -1; } } } if(dct8x8_allowed) dct8x8_allowed = get_dct8x8_allowed(h); for(list=0; list<h->list_count; list++){ for(i=0; i<4; i++){ if(IS_DIRECT(h->sub_mb_type[i])) { h->ref_cache[list][ scan8[4*i] ] = h->ref_cache[list][ scan8[4*i]+1 ]; continue; } h->ref_cache[list][ scan8[4*i] ]=h->ref_cache[list][ scan8[4*i]+1 ]= h->ref_cache[list][ scan8[4*i]+8 ]=h->ref_cache[list][ scan8[4*i]+9 ]= ref[list][i]; if(IS_DIR(h->sub_mb_type[i], 0, list)){ const int sub_mb_type= h->sub_mb_type[i]; const int block_width= (sub_mb_type & (MB_TYPE_16x16|MB_TYPE_16x8)) ? 2 : 1; for(j=0; j<sub_partition_count[i]; j++){ int mx, my; const int index= 4*i + block_width*j; int16_t (* mv_cache)[2]= &h->mv_cache[list][ scan8[index] ]; pred_motion(h, index, block_width, list, h->ref_cache[list][ scan8[index] ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); tprintf(s->avctx, "final mv:%d %d\n", mx, my); if(IS_SUB_8X8(sub_mb_type)){ mv_cache[ 1 ][0]= mv_cache[ 8 ][0]= mv_cache[ 9 ][0]= mx; mv_cache[ 1 ][1]= mv_cache[ 8 ][1]= mv_cache[ 9 ][1]= my; }else if(IS_SUB_8X4(sub_mb_type)){ mv_cache[ 1 ][0]= mx; mv_cache[ 1 ][1]= my; }else if(IS_SUB_4X8(sub_mb_type)){ mv_cache[ 8 ][0]= mx; mv_cache[ 8 ][1]= my; } mv_cache[ 0 ][0]= mx; mv_cache[ 0 ][1]= my; } }else{ uint32_t *p= (uint32_t *)&h->mv_cache[list][ scan8[4*i] ][0]; p[0] = p[1]= p[8] = p[9]= 0; } } } }else if(IS_DIRECT(mb_type)){ ff_h264_pred_direct_motion(h, &mb_type); dct8x8_allowed &= h->sps.direct_8x8_inference_flag; }else{ int list, mx, my, i; //FIXME we should set ref_idx_l? to 0 if we use that later ... if(IS_16X16(mb_type)){ for(list=0; list<h->list_count; list++){ unsigned int val; if(IS_DIR(mb_type, 0, list)){ if(h->ref_count[list]==1){ val= 0; }else if(h->ref_count[list]==2){ val= get_bits1(&s->gb)^1; }else{ val= get_ue_golomb_31(&s->gb); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); return -1; } } }else val= LIST_NOT_USED&0xFF; fill_rectangle(&h->ref_cache[list][ scan8[0] ], 4, 4, 8, val, 1); } for(list=0; list<h->list_count; list++){ unsigned int val; if(IS_DIR(mb_type, 0, list)){ pred_motion(h, 0, 4, list, h->ref_cache[list][ scan8[0] ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); tprintf(s->avctx, "final mv:%d %d\n", mx, my); val= pack16to32(mx,my); }else val=0; fill_rectangle(h->mv_cache[list][ scan8[0] ], 4, 4, 8, val, 4); } } else if(IS_16X8(mb_type)){ for(list=0; list<h->list_count; list++){ for(i=0; i<2; i++){ unsigned int val; if(IS_DIR(mb_type, i, list)){ if(h->ref_count[list] == 1){ val= 0; }else if(h->ref_count[list] == 2){ val= get_bits1(&s->gb)^1; }else{ val= get_ue_golomb_31(&s->gb); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); return -1; } } }else val= LIST_NOT_USED&0xFF; fill_rectangle(&h->ref_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 1); } } for(list=0; list<h->list_count; list++){ for(i=0; i<2; i++){ unsigned int val; if(IS_DIR(mb_type, i, list)){ pred_16x8_motion(h, 8*i, list, h->ref_cache[list][scan8[0] + 16*i], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); tprintf(s->avctx, "final mv:%d %d\n", mx, my); val= pack16to32(mx,my); }else val=0; fill_rectangle(h->mv_cache[list][ scan8[0] + 16*i ], 4, 2, 8, val, 4); } } }else{ assert(IS_8X16(mb_type)); for(list=0; list<h->list_count; list++){ for(i=0; i<2; i++){ unsigned int val; if(IS_DIR(mb_type, i, list)){ //FIXME optimize if(h->ref_count[list]==1){ val= 0; }else if(h->ref_count[list]==2){ val= get_bits1(&s->gb)^1; }else{ val= get_ue_golomb_31(&s->gb); if(val >= h->ref_count[list]){ av_log(h->s.avctx, AV_LOG_ERROR, "ref %u overflow\n", val); return -1; } } }else val= LIST_NOT_USED&0xFF; fill_rectangle(&h->ref_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 1); } } for(list=0; list<h->list_count; list++){ for(i=0; i<2; i++){ unsigned int val; if(IS_DIR(mb_type, i, list)){ pred_8x16_motion(h, i*4, list, h->ref_cache[list][ scan8[0] + 2*i ], &mx, &my); mx += get_se_golomb(&s->gb); my += get_se_golomb(&s->gb); tprintf(s->avctx, "final mv:%d %d\n", mx, my); val= pack16to32(mx,my); }else val=0; fill_rectangle(h->mv_cache[list][ scan8[0] + 2*i ], 2, 4, 8, val, 4); } } } } if(IS_INTER(mb_type)) write_back_motion(h, mb_type); if(!IS_INTRA16x16(mb_type)){ cbp= get_ue_golomb(&s->gb); if(cbp > 47){ av_log(h->s.avctx, AV_LOG_ERROR, "cbp too large (%u) at %d %d\n", cbp, s->mb_x, s->mb_y); return -1; } if(CHROMA){ if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp[cbp]; else cbp= golomb_to_inter_cbp [cbp]; }else{ if(IS_INTRA4x4(mb_type)) cbp= golomb_to_intra4x4_cbp_gray[cbp]; else cbp= golomb_to_inter_cbp_gray[cbp]; } } h->cbp = cbp; if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){ if(get_bits1(&s->gb)){ mb_type |= MB_TYPE_8x8DCT; h->cbp_table[mb_xy]= cbp; } } s->current_picture.mb_type[mb_xy]= mb_type; if(cbp || IS_INTRA16x16(mb_type)){ int i8x8, i4x4, chroma_idx; int dquant; GetBitContext *gb= IS_INTRA(mb_type) ? h->intra_gb_ptr : h->inter_gb_ptr; const uint8_t *scan, *scan8x8, *dc_scan; // fill_non_zero_count_cache(h); if(IS_INTERLACED(mb_type)){ scan8x8= s->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0; scan= s->qscale ? h->field_scan : h->field_scan_q0; dc_scan= luma_dc_field_scan; }else{ scan8x8= s->qscale ? h->zigzag_scan8x8_cavlc : h->zigzag_scan8x8_cavlc_q0; scan= s->qscale ? h->zigzag_scan : h->zigzag_scan_q0; dc_scan= luma_dc_zigzag_scan; } dquant= get_se_golomb(&s->gb); if( dquant > 25 || dquant < -26 ){ av_log(h->s.avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, s->mb_x, s->mb_y); return -1; } s->qscale += dquant; if(((unsigned)s->qscale) > 51){ if(s->qscale<0) s->qscale+= 52; else s->qscale-= 52; } h->chroma_qp[0]= get_chroma_qp(h, 0, s->qscale); h->chroma_qp[1]= get_chroma_qp(h, 1, s->qscale); if(IS_INTRA16x16(mb_type)){ if( decode_residual(h, h->intra_gb_ptr, h->mb, LUMA_DC_BLOCK_INDEX, dc_scan, h->dequant4_coeff[0][s->qscale], 16) < 0){ return -1; //FIXME continue if partitioned and other return -1 too } assert((cbp&15) == 0 || (cbp&15) == 15); if(cbp&15){ for(i8x8=0; i8x8<4; i8x8++){ for(i4x4=0; i4x4<4; i4x4++){ const int index= i4x4 + 4*i8x8; if( decode_residual(h, h->intra_gb_ptr, h->mb + 16*index, index, scan + 1, h->dequant4_coeff[0][s->qscale], 15) < 0 ){ return -1; } } } }else{ fill_rectangle(&h->non_zero_count_cache[scan8[0]], 4, 4, 8, 0, 1); } }else{ for(i8x8=0; i8x8<4; i8x8++){ if(cbp & (1<<i8x8)){ if(IS_8x8DCT(mb_type)){ DCTELEM *buf = &h->mb[64*i8x8]; uint8_t *nnz; for(i4x4=0; i4x4<4; i4x4++){ if( decode_residual(h, gb, buf, i4x4+4*i8x8, scan8x8+16*i4x4, h->dequant8_coeff[IS_INTRA( mb_type ) ? 0:1][s->qscale], 16) <0 ) return -1; } nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ]; nnz[0] += nnz[1] + nnz[8] + nnz[9]; }else{ for(i4x4=0; i4x4<4; i4x4++){ const int index= i4x4 + 4*i8x8; if( decode_residual(h, gb, h->mb + 16*index, index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) <0 ){ return -1; } } } }else{ uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ]; nnz[0] = nnz[1] = nnz[8] = nnz[9] = 0; } } } if(cbp&0x30){ for(chroma_idx=0; chroma_idx<2; chroma_idx++) if( decode_residual(h, gb, h->mb + 256 + 16*4*chroma_idx, CHROMA_DC_BLOCK_INDEX, chroma_dc_scan, NULL, 4) < 0){ return -1; } } if(cbp&0x20){ for(chroma_idx=0; chroma_idx<2; chroma_idx++){ const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][h->chroma_qp[chroma_idx]]; for(i4x4=0; i4x4<4; i4x4++){ const int index= 16 + 4*chroma_idx + i4x4; if( decode_residual(h, gb, h->mb + 16*index, index, scan + 1, qmul, 15) < 0){ return -1; } } } }else{ uint8_t * const nnz= &h->non_zero_count_cache[0]; nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] = nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0; } }else{ uint8_t * const nnz= &h->non_zero_count_cache[0]; fill_rectangle(&nnz[scan8[0]], 4, 4, 8, 0, 1); nnz[ scan8[16]+0 ] = nnz[ scan8[16]+1 ] =nnz[ scan8[16]+8 ] =nnz[ scan8[16]+9 ] = nnz[ scan8[20]+0 ] = nnz[ scan8[20]+1 ] =nnz[ scan8[20]+8 ] =nnz[ scan8[20]+9 ] = 0; } s->current_picture.qscale_table[mb_xy]= s->qscale; write_back_non_zero_count(h); if(MB_MBAFF){ h->ref_count[0] >>= 1; h->ref_count[1] >>= 1; } return 0; }
17,207
FFmpeg
37bb6004059c15f6a17f38b4ab5c5f5d2f61c087
0
int av_opt_set(void *obj, const char *name, const char *val, int search_flags) { int ret = 0; void *dst, *target_obj; const AVOption *o = av_opt_find2(obj, name, NULL, 0, search_flags, &target_obj); if (!o || !target_obj) return AVERROR_OPTION_NOT_FOUND; if (!val && (o->type != AV_OPT_TYPE_STRING && o->type != AV_OPT_TYPE_PIXEL_FMT && o->type != AV_OPT_TYPE_SAMPLE_FMT && o->type != AV_OPT_TYPE_IMAGE_SIZE && o->type != AV_OPT_TYPE_VIDEO_RATE && o->type != AV_OPT_TYPE_DURATION && o->type != AV_OPT_TYPE_COLOR && o->type != AV_OPT_TYPE_CHANNEL_LAYOUT && o->type != AV_OPT_TYPE_BOOL)) return AVERROR(EINVAL); if (o->flags & AV_OPT_FLAG_READONLY) return AVERROR(EINVAL); dst = ((uint8_t *)target_obj) + o->offset; switch (o->type) { case AV_OPT_TYPE_BOOL: return set_string_bool(obj, o, val, dst); case AV_OPT_TYPE_STRING: return set_string(obj, o, val, dst); case AV_OPT_TYPE_BINARY: return set_string_binary(obj, o, val, dst); case AV_OPT_TYPE_FLAGS: case AV_OPT_TYPE_INT: case AV_OPT_TYPE_INT64: case AV_OPT_TYPE_FLOAT: case AV_OPT_TYPE_DOUBLE: case AV_OPT_TYPE_RATIONAL: return set_string_number(obj, target_obj, o, val, dst); case AV_OPT_TYPE_IMAGE_SIZE: return set_string_image_size(obj, o, val, dst); case AV_OPT_TYPE_VIDEO_RATE: return set_string_video_rate(obj, o, val, dst); case AV_OPT_TYPE_PIXEL_FMT: return set_string_pixel_fmt(obj, o, val, dst); case AV_OPT_TYPE_SAMPLE_FMT: return set_string_sample_fmt(obj, o, val, dst); case AV_OPT_TYPE_DURATION: if (!val) { *(int64_t *)dst = 0; return 0; } else { if ((ret = av_parse_time(dst, val, 1)) < 0) av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as duration\n", val); return ret; } break; case AV_OPT_TYPE_COLOR: return set_string_color(obj, o, val, dst); case AV_OPT_TYPE_CHANNEL_LAYOUT: if (!val || !strcmp(val, "none")) { *(int64_t *)dst = 0; } else { int64_t cl = av_get_channel_layout(val); if (!cl) { av_log(obj, AV_LOG_ERROR, "Unable to parse option value \"%s\" as channel layout\n", val); ret = AVERROR(EINVAL); } *(int64_t *)dst = cl; return ret; } break; } av_log(obj, AV_LOG_ERROR, "Invalid option type.\n"); return AVERROR(EINVAL); }
17,208
qemu
5cb9b56acfc0b50acf7ccd2d044ab4991c47fdde
1
static int parse_uint64(DeviceState *dev, Property *prop, const char *str) { uint64_t *ptr = qdev_get_prop_ptr(dev, prop); char *end; /* accept both hex and decimal */ *ptr = strtoull(str, &end, 0); if ((*end != '\0') || (end == str)) { return -EINVAL; } return 0; }
17,209
qemu
9b2fadda3e0196ffd485adde4fe9cdd6fae35300
1
static void gen_mtsr_64b(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); #else TCGv t0; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); return; } t0 = tcg_const_tl(SR(ctx->opcode)); gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]); tcg_temp_free(t0); #endif }
17,210
FFmpeg
2139e584391b6db7ad315cf4f6443f87f7813d51
1
static int64_t dv_frame_offset(AVFormatContext *s, DVDemuxContext *c, int64_t timestamp, int flags) { // FIXME: sys may be wrong if last dv_read_packet() failed (buffer is junk) const AVDVProfile *sys = av_dv_codec_profile2(c->vst->codec->width, c->vst->codec->height, c->vst->codec->pix_fmt, c->vst->codec->time_base); int64_t offset; int64_t size = avio_size(s->pb) - s->internal->data_offset; int64_t max_offset = ((size - 1) / sys->frame_size) * sys->frame_size; offset = sys->frame_size * timestamp; if (size >= 0 && offset > max_offset) offset = max_offset; else if (offset < 0) offset = 0; return offset + s->internal->data_offset; }
17,211
qemu
ae392c416c69a020226c768d9c3af08b29dd6d96
1
void msix_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; int vector; bool was_masked; if (!range_covers_byte(addr, len, enable_pos)) { return; } was_masked = dev->msix_function_masked; msix_update_function_masked(dev); if (!msix_enabled(dev)) { return; } pci_device_deassert_intx(dev); if (dev->msix_function_masked == was_masked) { return; } for (vector = 0; vector < dev->msix_entries_nr; ++vector) { msix_handle_mask_update(dev, vector); } }
17,212
qemu
d2164ad35c411d97abd2aa5c6f160283d215e214
1
static int get_uint32_equal(QEMUFile *f, void *pv, size_t size, VMStateField *field) { uint32_t *v = pv; uint32_t v2; qemu_get_be32s(f, &v2); if (*v == v2) { return 0; error_report("%" PRIx32 " != %" PRIx32, *v, v2); return -EINVAL;
17,213
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
1
static int qemu_rdma_accept(RDMAContext *rdma) { RDMACapabilities cap; struct rdma_conn_param conn_param = { .responder_resources = 2, .private_data = &cap, .private_data_len = sizeof(cap), }; struct rdma_cm_event *cm_event; struct ibv_context *verbs; int ret = -EINVAL; int idx; ret = rdma_get_cm_event(rdma->channel, &cm_event); if (ret) { goto err_rdma_dest_wait; } if (cm_event->event != RDMA_CM_EVENT_CONNECT_REQUEST) { rdma_ack_cm_event(cm_event); goto err_rdma_dest_wait; } memcpy(&cap, cm_event->param.conn.private_data, sizeof(cap)); network_to_caps(&cap); if (cap.version < 1 || cap.version > RDMA_CONTROL_VERSION_CURRENT) { fprintf(stderr, "Unknown source RDMA version: %d, bailing...\n", cap.version); rdma_ack_cm_event(cm_event); goto err_rdma_dest_wait; } /* * Respond with only the capabilities this version of QEMU knows about. */ cap.flags &= known_capabilities; /* * Enable the ones that we do know about. * Add other checks here as new ones are introduced. */ if (cap.flags & RDMA_CAPABILITY_PIN_ALL) { rdma->pin_all = true; } rdma->cm_id = cm_event->id; verbs = cm_event->id->verbs; rdma_ack_cm_event(cm_event); DPRINTF("Memory pin all: %s\n", rdma->pin_all ? "enabled" : "disabled"); caps_to_network(&cap); DPRINTF("verbs context after listen: %p\n", verbs); if (!rdma->verbs) { rdma->verbs = verbs; } else if (rdma->verbs != verbs) { fprintf(stderr, "ibv context not matching %p, %p!\n", rdma->verbs, verbs); goto err_rdma_dest_wait; } qemu_rdma_dump_id("dest_init", verbs); ret = qemu_rdma_alloc_pd_cq(rdma); if (ret) { fprintf(stderr, "rdma migration: error allocating pd and cq!\n"); goto err_rdma_dest_wait; } ret = qemu_rdma_alloc_qp(rdma); if (ret) { fprintf(stderr, "rdma migration: error allocating qp!\n"); goto err_rdma_dest_wait; } ret = qemu_rdma_init_ram_blocks(rdma); if (ret) { fprintf(stderr, "rdma migration: error initializing ram blocks!\n"); goto err_rdma_dest_wait; } for (idx = 0; idx < RDMA_WRID_MAX; idx++) { ret = qemu_rdma_reg_control(rdma, idx); if (ret) { fprintf(stderr, "rdma: error registering %d control!\n", idx); goto err_rdma_dest_wait; } } qemu_set_fd_handler2(rdma->channel->fd, NULL, NULL, NULL, NULL); ret = rdma_accept(rdma->cm_id, &conn_param); if (ret) { fprintf(stderr, "rdma_accept returns %d!\n", ret); goto err_rdma_dest_wait; } ret = rdma_get_cm_event(rdma->channel, &cm_event); if (ret) { fprintf(stderr, "rdma_accept get_cm_event failed %d!\n", ret); goto err_rdma_dest_wait; } if (cm_event->event != RDMA_CM_EVENT_ESTABLISHED) { fprintf(stderr, "rdma_accept not event established!\n"); rdma_ack_cm_event(cm_event); goto err_rdma_dest_wait; } rdma_ack_cm_event(cm_event); rdma->connected = true; ret = qemu_rdma_post_recv_control(rdma, RDMA_WRID_READY); if (ret) { fprintf(stderr, "rdma migration: error posting second control recv!\n"); goto err_rdma_dest_wait; } qemu_rdma_dump_gid("dest_connect", rdma->cm_id); return 0; err_rdma_dest_wait: rdma->error_state = ret; qemu_rdma_cleanup(rdma); return ret; }
17,214
qemu
8be7e7e4c72c048b90e3482557954a24bba43ba7
1
int inet_listen(const char *str, char *ostr, int olen, int socktype, int port_offset, Error **errp) { QemuOpts *opts; char *optstr; int sock = -1; opts = qemu_opts_create(&dummy_opts, NULL, 0); if (inet_parse(opts, str) == 0) { sock = inet_listen_opts(opts, port_offset, errp); if (sock != -1 && ostr) { optstr = strchr(str, ','); if (qemu_opt_get_bool(opts, "ipv6", 0)) { snprintf(ostr, olen, "[%s]:%s%s", qemu_opt_get(opts, "host"), qemu_opt_get(opts, "port"), optstr ? optstr : ""); } else { snprintf(ostr, olen, "%s:%s%s", qemu_opt_get(opts, "host"), qemu_opt_get(opts, "port"), optstr ? optstr : ""); } } } else { error_set(errp, QERR_SOCKET_CREATE_FAILED); } qemu_opts_del(opts); return sock; }
17,215
FFmpeg
9b8152bf047bbebe4495b993258591687bcdd36d
1
static int ogg_read_page(AVFormatContext *s, int *sid) { AVIOContext *bc = s->pb; struct ogg *ogg = s->priv_data; struct ogg_stream *os; int ret, i = 0; int flags, nsegs; uint64_t gp; uint32_t serial; int size, idx; uint8_t sync[4]; int sp = 0; ret = avio_read(bc, sync, 4); if (ret < 4) return ret < 0 ? ret : AVERROR_EOF; do { int c; if (sync[sp & 3] == 'O' && sync[(sp + 1) & 3] == 'g' && sync[(sp + 2) & 3] == 'g' && sync[(sp + 3) & 3] == 'S') break; if(!i && bc->seekable && ogg->page_pos > 0) { memset(sync, 0, 4); avio_seek(bc, ogg->page_pos+4, SEEK_SET); ogg->page_pos = -1; } c = avio_r8(bc); if (avio_feof(bc)) return AVERROR_EOF; sync[sp++ & 3] = c; } while (i++ < MAX_PAGE_SIZE); if (i >= MAX_PAGE_SIZE) { av_log(s, AV_LOG_INFO, "cannot find sync word\n"); return AVERROR_INVALIDDATA; } if (avio_r8(bc) != 0) { /* version */ av_log (s, AV_LOG_ERROR, "ogg page, unsupported version\n"); return AVERROR_INVALIDDATA; } flags = avio_r8(bc); gp = avio_rl64(bc); serial = avio_rl32(bc); avio_skip(bc, 8); /* seq, crc */ nsegs = avio_r8(bc); idx = ogg_find_stream(ogg, serial); if (idx < 0) { if (data_packets_seen(ogg)) idx = ogg_replace_stream(s, serial, nsegs); else idx = ogg_new_stream(s, serial); if (idx < 0) { av_log(s, AV_LOG_ERROR, "failed to create or replace stream\n"); return idx; } } os = ogg->streams + idx; ogg->page_pos = os->page_pos = avio_tell(bc) - 27; if (os->psize > 0) ogg_new_buf(ogg, idx); ret = avio_read(bc, os->segments, nsegs); if (ret < nsegs) return ret < 0 ? ret : AVERROR_EOF; os->nsegs = nsegs; os->segp = 0; size = 0; for (i = 0; i < nsegs; i++) size += os->segments[i]; if (!(flags & OGG_FLAG_BOS)) os->got_data = 1; if (flags & OGG_FLAG_CONT || os->incomplete) { if (!os->psize) { // If this is the very first segment we started // playback in the middle of a continuation packet. // Discard it since we missed the start of it. while (os->segp < os->nsegs) { int seg = os->segments[os->segp++]; os->pstart += seg; if (seg < 255) break; } os->sync_pos = os->page_pos; } } else { os->psize = 0; os->sync_pos = os->page_pos; } if (os->bufsize - os->bufpos < size) { uint8_t *nb = av_malloc((os->bufsize *= 2) + FF_INPUT_BUFFER_PADDING_SIZE); if (!nb) return AVERROR(ENOMEM); memcpy(nb, os->buf, os->bufpos); av_free(os->buf); os->buf = nb; } ret = avio_read(bc, os->buf + os->bufpos, size); if (ret < size) return ret < 0 ? ret : AVERROR_EOF; os->bufpos += size; os->granule = gp; os->flags = flags; memset(os->buf + os->bufpos, 0, FF_INPUT_BUFFER_PADDING_SIZE); if (sid) *sid = idx; return 0; }
17,216
FFmpeg
2da0d70d5eebe42f9fcd27ee554419ebe2a5da06
1
static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width) { #ifdef HAVE_MMX asm volatile( "movq "MANGLE(bm01010101)", %%mm4\n\t" "mov %0, %%"REG_a" \n\t" "1: \n\t" "movq (%1, %%"REG_a",4), %%mm0 \n\t" "movq 8(%1, %%"REG_a",4), %%mm1 \n\t" "pand %%mm4, %%mm0 \n\t" "pand %%mm4, %%mm1 \n\t" "packuswb %%mm1, %%mm0 \n\t" "movq %%mm0, %%mm1 \n\t" "psrlw $8, %%mm0 \n\t" "pand %%mm4, %%mm1 \n\t" "packuswb %%mm0, %%mm0 \n\t" "packuswb %%mm1, %%mm1 \n\t" "movd %%mm0, (%3, %%"REG_a") \n\t" "movd %%mm1, (%2, %%"REG_a") \n\t" "add $4, %%"REG_a" \n\t" " js 1b \n\t" : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width) : "%"REG_a ); #else int i; for(i=0; i<width; i++) { dstU[i]= src1[4*i + 0]; dstV[i]= src1[4*i + 2]; } #endif assert(src1 == src2); }
17,217
qemu
f8ed85ac992c48814d916d5df4d44f9a971c5de4
1
static void r2d_init(MachineState *machine) { const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; SuperHCPU *cpu; CPUSH4State *env; ResetData *reset_info; struct SH7750State *s; MemoryRegion *sdram = g_new(MemoryRegion, 1); qemu_irq *irq; DriveInfo *dinfo; int i; DeviceState *dev; SysBusDevice *busdev; MemoryRegion *address_space_mem = get_system_memory(); PCIBus *pci_bus; if (cpu_model == NULL) { cpu_model = "SH7751R"; } cpu = cpu_sh4_init(cpu_model); if (cpu == NULL) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } env = &cpu->env; reset_info = g_malloc0(sizeof(ResetData)); reset_info->cpu = cpu; reset_info->vector = env->pc; qemu_register_reset(main_cpu_reset, reset_info); /* Allocate memory space */ memory_region_init_ram(sdram, NULL, "r2d.sdram", SDRAM_SIZE, &error_abort); vmstate_register_ram_global(sdram); memory_region_add_subregion(address_space_mem, SDRAM_BASE, sdram); /* Register peripherals */ s = sh7750_init(cpu, address_space_mem); irq = r2d_fpga_init(address_space_mem, 0x04000000, sh7750_irl(s)); dev = qdev_create(NULL, "sh_pci"); busdev = SYS_BUS_DEVICE(dev); qdev_init_nofail(dev); pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci")); sysbus_mmio_map(busdev, 0, P4ADDR(0x1e200000)); sysbus_mmio_map(busdev, 1, A7ADDR(0x1e200000)); sysbus_connect_irq(busdev, 0, irq[PCI_INTA]); sysbus_connect_irq(busdev, 1, irq[PCI_INTB]); sysbus_connect_irq(busdev, 2, irq[PCI_INTC]); sysbus_connect_irq(busdev, 3, irq[PCI_INTD]); sm501_init(address_space_mem, 0x10000000, SM501_VRAM_SIZE, irq[SM501], serial_hds[2]); /* onboard CF (True IDE mode, Master only). */ dinfo = drive_get(IF_IDE, 0, 0); dev = qdev_create(NULL, "mmio-ide"); busdev = SYS_BUS_DEVICE(dev); sysbus_connect_irq(busdev, 0, irq[CF_IDE]); qdev_prop_set_uint32(dev, "shift", 1); qdev_init_nofail(dev); sysbus_mmio_map(busdev, 0, 0x14001000); sysbus_mmio_map(busdev, 1, 0x1400080c); mmio_ide_init_drives(dev, dinfo, NULL); /* onboard flash memory */ dinfo = drive_get(IF_PFLASH, 0, 0); pflash_cfi02_register(0x0, NULL, "r2d.flash", FLASH_SIZE, dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, (16 * 1024), FLASH_SIZE >> 16, 1, 4, 0x0000, 0x0000, 0x0000, 0x0000, 0x555, 0x2aa, 0); /* NIC: rtl8139 on-board, and 2 slots. */ for (i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, "rtl8139", i==0 ? "2" : NULL); /* USB keyboard */ usb_create_simple(usb_bus_find(-1), "usb-kbd"); /* Todo: register on board registers */ memset(&boot_params, 0, sizeof(boot_params)); if (kernel_filename) { int kernel_size; kernel_size = load_image_targphys(kernel_filename, SDRAM_BASE + LINUX_LOAD_OFFSET, INITRD_LOAD_OFFSET - LINUX_LOAD_OFFSET); if (kernel_size < 0) { fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); exit(1); } /* initialization which should be done by firmware */ address_space_stl(&address_space_memory, SH7750_BCR1, 1 << 3, MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 SDRAM */ address_space_stw(&address_space_memory, SH7750_BCR2, 3 << (3 * 2), MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 32bit */ reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; /* Start from P2 area */ } if (initrd_filename) { int initrd_size; initrd_size = load_image_targphys(initrd_filename, SDRAM_BASE + INITRD_LOAD_OFFSET, SDRAM_SIZE - INITRD_LOAD_OFFSET); if (initrd_size < 0) { fprintf(stderr, "qemu: could not load initrd '%s'\n", initrd_filename); exit(1); } /* initialization which should be done by firmware */ boot_params.loader_type = tswap32(1); boot_params.initrd_start = tswap32(INITRD_LOAD_OFFSET); boot_params.initrd_size = tswap32(initrd_size); } if (kernel_cmdline) { /* I see no evidence that this .kernel_cmdline buffer requires NUL-termination, so using strncpy should be ok. */ strncpy(boot_params.kernel_cmdline, kernel_cmdline, sizeof(boot_params.kernel_cmdline)); } rom_add_blob_fixed("boot_params", &boot_params, sizeof(boot_params), SDRAM_BASE + BOOT_PARAMS_OFFSET); }
17,218
qemu
b0f74c87a1dbd6b0c5e4de7f1c5cb40197e3fbe9
1
static void menelaus_rtc_hz(void *opaque) { struct menelaus_s *s = (struct menelaus_s *) opaque; s->rtc.next_comp --; s->rtc.alm_sec --; s->rtc.next += 1000; qemu_mod_timer(s->rtc.hz, s->rtc.next); if ((s->rtc.ctrl >> 3) & 3) { /* EVERY */ menelaus_rtc_update(s); if (((s->rtc.ctrl >> 3) & 3) == 1 && !s->rtc.tm.tm_sec) s->status |= 1 << 8; /* RTCTMR */ else if (((s->rtc.ctrl >> 3) & 3) == 2 && !s->rtc.tm.tm_min) s->status |= 1 << 8; /* RTCTMR */ else if (!s->rtc.tm.tm_hour) s->status |= 1 << 8; /* RTCTMR */ } else s->status |= 1 << 8; /* RTCTMR */ if ((s->rtc.ctrl >> 1) & 1) { /* RTC_AL_EN */ if (s->rtc.alm_sec == 0) s->status |= 1 << 9; /* RTCALM */ /* TODO: wake-up */ } if (s->rtc.next_comp <= 0) { s->rtc.next -= muldiv64((int16_t) s->rtc.comp, 1000, 0x8000); s->rtc.next_comp = 3600; } menelaus_update(s); }
17,219
qemu
67f3280c062d622dc077246b483702096d11dcc0
1
static void net_slirp_cleanup(NetClientState *nc) { SlirpState *s = DO_UPCAST(SlirpState, nc, nc); slirp_cleanup(s->slirp); qemu_remove_exit_notifier(&s->exit_notifier); slirp_smb_cleanup(s); QTAILQ_REMOVE(&slirp_stacks, s, entry); }
17,220
FFmpeg
09ef98f1ae3c8a4e08b66f41c3bd97dd7b07405f
1
static void FUNC(intra_pred)(HEVCContext *s, int x0, int y0, int log2_size, int c_idx) { #define PU(x) \ ((x) >> s->sps->log2_min_pu_size) #define MVF(x, y) \ (s->ref->tab_mvf[(x) + (y) * pic_width_in_min_pu]) #define MVF_PU(x, y) \ MVF(PU(x0 + ((x) << hshift)), PU(y0 + ((y) << vshift))) #define IS_INTRA(x, y) \ MVF_PU(x, y).is_intra #define MIN_TB_ADDR_ZS(x, y) \ s->pps->min_tb_addr_zs[(y) * s->sps->min_tb_width + (x)] #define EXTEND_LEFT(ptr, start, length) \ for (i = (start); i > (start) - (length); i--) \ ptr[i - 1] = ptr[i] #define EXTEND_RIGHT(ptr, start, length) \ for (i = (start); i < (start) + (length); i++) \ ptr[i] = ptr[i - 1] #define EXTEND_UP(ptr, start, length) EXTEND_LEFT(ptr, start, length) #define EXTEND_DOWN(ptr, start, length) EXTEND_RIGHT(ptr, start, length) #define EXTEND_LEFT_CIP(ptr, start, length) \ for (i = (start); i > (start) - (length); i--) \ if (!IS_INTRA(i - 1, -1)) \ ptr[i - 1] = ptr[i] #define EXTEND_RIGHT_CIP(ptr, start, length) \ for (i = (start); i < (start) + (length); i++) \ if (!IS_INTRA(i, -1)) \ ptr[i] = ptr[i - 1] #define EXTEND_UP_CIP(ptr, start, length) \ for (i = (start); i > (start) - (length); i--) \ if (!IS_INTRA(-1, i - 1)) \ ptr[i - 1] = ptr[i] #define EXTEND_UP_CIP_0(ptr, start, length) \ for (i = (start); i > (start) - (length); i--) \ ptr[i - 1] = ptr[i] #define EXTEND_DOWN_CIP(ptr, start, length) \ for (i = (start); i < (start) + (length); i++) \ if (!IS_INTRA(-1, i)) \ ptr[i] = ptr[i - 1] HEVCLocalContext *lc = s->HEVClc; int i; int hshift = s->sps->hshift[c_idx]; int vshift = s->sps->vshift[c_idx]; int size = (1 << log2_size); int size_in_luma = size << hshift; int size_in_tbs = size_in_luma >> s->sps->log2_min_transform_block_size; int x = x0 >> hshift; int y = y0 >> vshift; int x_tb = x0 >> s->sps->log2_min_transform_block_size; int y_tb = y0 >> s->sps->log2_min_transform_block_size; int cur_tb_addr = MIN_TB_ADDR_ZS(x_tb, y_tb); ptrdiff_t stride = s->frame->linesize[c_idx] / sizeof(pixel); pixel *src = (pixel*)s->frame->data[c_idx] + x + y * stride; int pic_width_in_min_pu = PU(s->sps->width); enum IntraPredMode mode = c_idx ? lc->pu.intra_pred_mode_c : lc->tu.cur_intra_pred_mode; pixel left_array[2 * MAX_TB_SIZE + 1]; pixel filtered_left_array[2 * MAX_TB_SIZE + 1]; pixel top_array[2 * MAX_TB_SIZE + 1]; pixel filtered_top_array[2 * MAX_TB_SIZE + 1]; pixel *left = left_array + 1; pixel *top = top_array + 1; pixel *filtered_left = filtered_left_array + 1; pixel *filtered_top = filtered_top_array + 1; int cand_bottom_left = lc->na.cand_bottom_left && cur_tb_addr > MIN_TB_ADDR_ZS(x_tb - 1, y_tb + size_in_tbs); int cand_left = lc->na.cand_left; int cand_up_left = lc->na.cand_up_left; int cand_up = lc->na.cand_up; int cand_up_right = lc->na.cand_up_right && cur_tb_addr > MIN_TB_ADDR_ZS(x_tb + size_in_tbs, y_tb - 1); int bottom_left_size = (FFMIN(y0 + 2 * size_in_luma, s->sps->height) - (y0 + size_in_luma)) >> vshift; int top_right_size = (FFMIN(x0 + 2 * size_in_luma, s->sps->width) - (x0 + size_in_luma)) >> hshift; if (s->pps->constrained_intra_pred_flag == 1) { int size_in_luma_pu = PU(size_in_luma); int on_pu_edge_x = !(x0 & ((1 << s->sps->log2_min_pu_size) - 1)); int on_pu_edge_y = !(y0 & ((1 << s->sps->log2_min_pu_size) - 1)); if(!size_in_luma_pu) size_in_luma_pu++; if (cand_bottom_left == 1 && on_pu_edge_x) { int x_left_pu = PU(x0 - 1); int y_bottom_pu = PU(y0 + size_in_luma); cand_bottom_left = 0; for(i = 0; i < size_in_luma_pu; i++) cand_bottom_left |= MVF(x_left_pu, y_bottom_pu + i).is_intra; } if (cand_left == 1 && on_pu_edge_x) { int x_left_pu = PU(x0 - 1); int y_left_pu = PU(y0); cand_left = 0; for(i = 0; i < size_in_luma_pu; i++) cand_left |= MVF(x_left_pu, y_left_pu + i).is_intra; } if (cand_up_left == 1) { int x_left_pu = PU(x0 - 1); int y_top_pu = PU(y0 - 1); cand_up_left = MVF(x_left_pu, y_top_pu).is_intra; } if (cand_up == 1 && on_pu_edge_y) { int x_top_pu = PU(x0); int y_top_pu = PU(y0 - 1); cand_up = 0; for(i = 0; i < size_in_luma_pu; i++) cand_up |= MVF(x_top_pu + i, y_top_pu).is_intra; } if (cand_up_right == 1 && on_pu_edge_y) { int y_top_pu = PU(y0 - 1); int x_right_pu = PU(x0 + size_in_luma); cand_up_right = 0; for(i = 0; i < size_in_luma_pu; i++) cand_up_right |= MVF(x_right_pu + i, y_top_pu).is_intra; } for (i = 0; i < 2 * MAX_TB_SIZE; i++) { left[i] = 128; top[i] = 128; } } if (cand_bottom_left) { for (i = size + bottom_left_size; i < (size << 1); i++) if (IS_INTRA(-1, size + bottom_left_size - 1) || !s->pps->constrained_intra_pred_flag) left[i] = POS(-1, size + bottom_left_size - 1); for (i = size + bottom_left_size - 1; i >= size; i--) if (IS_INTRA(-1, i) || !s->pps->constrained_intra_pred_flag) left[i] = POS(-1, i); } if (cand_left) for (i = size - 1; i >= 0; i--) if (IS_INTRA(-1, i) || !s->pps->constrained_intra_pred_flag) left[i] = POS(-1, i); if (cand_up_left) if (IS_INTRA(-1, -1) || !s->pps->constrained_intra_pred_flag) { left[-1] = POS(-1, -1); top[-1] = left[-1]; } if (cand_up) for (i = size - 1; i >= 0; i--) if (IS_INTRA(i, -1) || !s->pps->constrained_intra_pred_flag) top[i] = POS(i, -1); if (cand_up_right) { for (i = size + top_right_size; i < (size << 1); i++) if (IS_INTRA(size + top_right_size - 1, -1) || !s->pps->constrained_intra_pred_flag) top[i] = POS(size + top_right_size - 1, -1); for (i = size + top_right_size - 1; i >= size; i--) if (IS_INTRA(i, -1) || !s->pps->constrained_intra_pred_flag) top[i] = POS(i, -1); } if (s->pps->constrained_intra_pred_flag == 1) { if (cand_bottom_left || cand_left || cand_up_left || cand_up || cand_up_right) { int size_max_x = x0 + ((2 * size) << hshift) < s->sps->width ? 2 * size : (s->sps->width - x0) >> hshift; int size_max_y = y0 + ((2 * size) << vshift) < s->sps->height ? 2 * size : (s->sps->height - y0) >> vshift; int j = size + (cand_bottom_left? bottom_left_size: 0) -1; if (!cand_up_right) { size_max_x = x0 + ((size) << hshift) < s->sps->width ? size : (s->sps->width - x0) >> hshift; } if (!cand_bottom_left) { size_max_y = y0 + (( size) << vshift) < s->sps->height ? size : (s->sps->height - y0) >> vshift; } if (cand_bottom_left || cand_left || cand_up_left) { while (j>-1 && !IS_INTRA(-1, j)) j--; if (!IS_INTRA(-1, j)) { j = 0; while(j < size_max_x && !IS_INTRA(j, -1)) j++; EXTEND_LEFT_CIP(top, j, j+1); left[-1] = top[-1]; j = 0; } } else { j = 0; while (j < size_max_x && !IS_INTRA(j, -1)) j++; if (j > 0) if (x0 > 0) { EXTEND_LEFT_CIP(top, j, j+1); } else { EXTEND_LEFT_CIP(top, j, j); top[-1] = top[0]; } left[-1] = top[-1]; j = 0; } if (cand_bottom_left || cand_left) { EXTEND_DOWN_CIP(left, j, size_max_y-j); } if (!cand_left) { EXTEND_DOWN(left, 0, size); } if (!cand_bottom_left) { EXTEND_DOWN(left, size, size); } if (x0 != 0 && y0 != 0) { EXTEND_UP_CIP(left, size_max_y - 1, size_max_y); } else if( x0 == 0) { EXTEND_UP_CIP_0(left, size_max_y - 1, size_max_y); } else{ EXTEND_UP_CIP(left, size_max_y - 1, size_max_y-1); } top[-1] = left[-1]; if (y0 != 0) { EXTEND_RIGHT_CIP(top, 0, size_max_x); } } } // Infer the unavailable samples if (!cand_bottom_left) { if (cand_left) { EXTEND_DOWN(left, size, size); } else if (cand_up_left) { EXTEND_DOWN(left, 0, 2 * size); cand_left = 1; } else if (cand_up) { left[-1] = top[0]; EXTEND_DOWN(left, 0, 2 * size); cand_up_left = 1; cand_left = 1; } else if (cand_up_right) { EXTEND_LEFT(top, size, size); left[-1] = top[0]; EXTEND_DOWN(left ,0 , 2 * size); cand_up = 1; cand_up_left = 1; cand_left = 1; } else { // No samples available top[0] = left[-1] = (1 << (BIT_DEPTH - 1)); EXTEND_RIGHT(top, 1, 2 * size - 1); EXTEND_DOWN(left, 0, 2 * size); } } if (!cand_left) { EXTEND_UP(left, size, size); } if (!cand_up_left) { left[-1] = left[0]; } if (!cand_up) { top[0] = left[-1]; EXTEND_RIGHT(top, 1, size-1); } if (!cand_up_right) { EXTEND_RIGHT(top, size, size); } top[-1] = left[-1]; #undef EXTEND_LEFT_CIP #undef EXTEND_RIGHT_CIP #undef EXTEND_UP_CIP #undef EXTEND_DOWN_CIP #undef IS_INTRA #undef MVF_PU #undef MVF #undef PU #undef EXTEND_LEFT #undef EXTEND_RIGHT #undef EXTEND_UP #undef EXTEND_DOWN #undef MIN_TB_ADDR_ZS // Filtering process if (c_idx == 0 && mode != INTRA_DC && size != 4) { int intra_hor_ver_dist_thresh[] = { 7, 1, 0 }; int min_dist_vert_hor = FFMIN(FFABS((int)mode - 26), FFABS((int)mode - 10)); if (min_dist_vert_hor > intra_hor_ver_dist_thresh[log2_size - 3]) { int threshold = 1 << (BIT_DEPTH - 5); if (s->sps->sps_strong_intra_smoothing_enable_flag && log2_size == 5 && FFABS(top[-1] + top[63] - 2 * top[31]) < threshold && FFABS(left[-1] + left[63] - 2 * left[31]) < threshold) { // We can't just overwrite values in top because it could be // a pointer into src filtered_top[-1] = top[-1]; filtered_top[63] = top[63]; for (i = 0; i < 63; i++) filtered_top[i] = ((64 - (i + 1)) * top[-1] + (i + 1) * top[63] + 32) >> 6; for (i = 0; i < 63; i++) left[i] = ((64 - (i + 1)) * left[-1] + (i + 1) * left[63] + 32) >> 6; top = filtered_top; } else { filtered_left[2 * size - 1] = left[2 * size - 1]; filtered_top[2 * size - 1] = top[2 * size - 1]; for (i = 2 * size - 2; i >= 0; i--) filtered_left[i] = (left[i + 1] + 2 * left[i] + left[i - 1] + 2) >> 2; filtered_top[-1] = filtered_left[-1] = (left[0] + 2 * left[-1] + top[0] + 2) >> 2; for (i = 2 * size - 2; i >= 0; i--) filtered_top[i] = (top[i + 1] + 2 * top[i] + top[i - 1] + 2) >> 2; left = filtered_left; top = filtered_top; } } } switch (mode) { case INTRA_PLANAR: s->hpc.pred_planar[log2_size - 2]((uint8_t*)src, (uint8_t*)top, (uint8_t*)left, stride); break; case INTRA_DC: s->hpc.pred_dc((uint8_t*)src, (uint8_t*)top, (uint8_t*)left, stride, log2_size, c_idx); break; default: s->hpc.pred_angular[log2_size - 2]((uint8_t*)src, (uint8_t*)top, (uint8_t*)left, stride, c_idx, mode); break; } }
17,221
qemu
b125f9dc7bd68cd4c57189db4da83b0620b28a72
1
TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, int flags, int cflags) { CPUArchState *env = cpu->env_ptr; TranslationBlock *tb; tb_page_addr_t phys_pc, phys_page2; target_ulong virt_page2; tcg_insn_unit *gen_code_buf; int gen_code_size, search_size; #ifdef CONFIG_PROFILER int64_t ti; #endif phys_pc = get_page_addr_code(env, pc); if (use_icount) { cflags |= CF_USE_ICOUNT; } tb = tb_alloc(pc); if (!tb) { /* flush must be done */ tb_flush(cpu); /* cannot fail at this point */ tb = tb_alloc(pc); /* Don't forget to invalidate previous TB info. */ tcg_ctx.tb_ctx.tb_invalidated_flag = 1; } gen_code_buf = tcg_ctx.code_gen_ptr; tb->tc_ptr = gen_code_buf; tb->cs_base = cs_base; tb->flags = flags; tb->cflags = cflags; #ifdef CONFIG_PROFILER tcg_ctx.tb_count1++; /* includes aborted translations because of exceptions */ ti = profile_getclock(); #endif tcg_func_start(&tcg_ctx); gen_intermediate_code(env, tb); trace_translate_block(tb, tb->pc, tb->tc_ptr); /* generate machine code */ tb->tb_next_offset[0] = 0xffff; tb->tb_next_offset[1] = 0xffff; tcg_ctx.tb_next_offset = tb->tb_next_offset; #ifdef USE_DIRECT_JUMP tcg_ctx.tb_jmp_offset = tb->tb_jmp_offset; tcg_ctx.tb_next = NULL; #else tcg_ctx.tb_jmp_offset = NULL; tcg_ctx.tb_next = tb->tb_next; #endif #ifdef CONFIG_PROFILER tcg_ctx.tb_count++; tcg_ctx.interm_time += profile_getclock() - ti; tcg_ctx.code_time -= profile_getclock(); #endif gen_code_size = tcg_gen_code(&tcg_ctx, gen_code_buf); search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size); #ifdef CONFIG_PROFILER tcg_ctx.code_time += profile_getclock(); tcg_ctx.code_in_len += tb->size; tcg_ctx.code_out_len += gen_code_size; tcg_ctx.search_out_len += search_size; #endif #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { qemu_log("OUT: [size=%d]\n", gen_code_size); log_disas(tb->tc_ptr, gen_code_size); qemu_log("\n"); qemu_log_flush(); } #endif tcg_ctx.code_gen_ptr = (void *) ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size, CODE_GEN_ALIGN); /* check next page if needed */ virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; phys_page2 = -1; if ((pc & TARGET_PAGE_MASK) != virt_page2) { phys_page2 = get_page_addr_code(env, virt_page2); } tb_link_page(tb, phys_pc, phys_page2); return tb; }
17,222
FFmpeg
f4b288a639bbda3ca244072e67b689aa4f40f2c6
1
static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype) { MpegEncContext *s = &v->s; int xy, wrap, off = 0; int16_t *A, *B, *C; int px, py; int sum; int r_x, r_y; const uint8_t *is_intra = v->mb_type[0]; r_x = v->range_x; r_y = v->range_y; /* scale MV difference to be quad-pel */ dmv_x[0] <<= 1 - s->quarter_sample; dmv_y[0] <<= 1 - s->quarter_sample; dmv_x[1] <<= 1 - s->quarter_sample; dmv_y[1] <<= 1 - s->quarter_sample; wrap = s->b8_stride; xy = s->block_index[0]; if (s->mb_intra) { s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0; return; } if (!v->field_mode) { s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample); s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample); s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample); s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample); /* Pullback predicted motion vectors as specified in 8.4.5.4 */ s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); } if (direct) { s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0]; s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1]; s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0]; s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1]; return; } if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { C = s->current_picture.motion_val[0][xy - 2]; A = s->current_picture.motion_val[0][xy - wrap * 2]; off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; B = s->current_picture.motion_val[0][xy - wrap * 2 + off]; if (!s->mb_x) C[0] = C[1] = 0; if (!s->first_slice_line) { // predictor A is not out of bounds if (s->mb_width == 1) { px = A[0]; py = A[1]; } else { px = mid_pred(A[0], B[0], C[0]); py = mid_pred(A[1], B[1], C[1]); } } else if (s->mb_x) { // predictor C is not out of bounds px = C[0]; py = C[1]; } else { px = py = 0; } /* Pullback MV as specified in 8.3.5.3.4 */ { int qx, qy, X, Y; if (v->profile < PROFILE_ADVANCED) { qx = (s->mb_x << 5); qy = (s->mb_y << 5); X = (s->mb_width << 5) - 4; Y = (s->mb_height << 5) - 4; if (qx + px < -28) px = -28 - qx; if (qy + py < -28) py = -28 - qy; if (qx + px > X) px = X - qx; if (qy + py > Y) py = Y - qy; } else { qx = (s->mb_x << 6); qy = (s->mb_y << 6); X = (s->mb_width << 6) - 4; Y = (s->mb_height << 6) - 4; if (qx + px < -60) px = -60 - qx; if (qy + py < -60) py = -60 - qy; if (qx + px > X) px = X - qx; if (qy + py > Y) py = Y - qy; } } /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ if (0 && !s->first_slice_line && s->mb_x) { if (is_intra[xy - wrap]) sum = FFABS(px) + FFABS(py); else sum = FFABS(px - A[0]) + FFABS(py - A[1]); if (sum > 32) { if (get_bits1(&s->gb)) { px = A[0]; py = A[1]; } else { px = C[0]; py = C[1]; } } else { if (is_intra[xy - 2]) sum = FFABS(px) + FFABS(py); else sum = FFABS(px - C[0]) + FFABS(py - C[1]); if (sum > 32) { if (get_bits1(&s->gb)) { px = A[0]; py = A[1]; } else { px = C[0]; py = C[1]; } } } } /* store MV using signed modulus of MV range defined in 4.11 */ s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x; s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y; } if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { C = s->current_picture.motion_val[1][xy - 2]; A = s->current_picture.motion_val[1][xy - wrap * 2]; off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; B = s->current_picture.motion_val[1][xy - wrap * 2 + off]; if (!s->mb_x) C[0] = C[1] = 0; if (!s->first_slice_line) { // predictor A is not out of bounds if (s->mb_width == 1) { px = A[0]; py = A[1]; } else { px = mid_pred(A[0], B[0], C[0]); py = mid_pred(A[1], B[1], C[1]); } } else if (s->mb_x) { // predictor C is not out of bounds px = C[0]; py = C[1]; } else { px = py = 0; } /* Pullback MV as specified in 8.3.5.3.4 */ { int qx, qy, X, Y; if (v->profile < PROFILE_ADVANCED) { qx = (s->mb_x << 5); qy = (s->mb_y << 5); X = (s->mb_width << 5) - 4; Y = (s->mb_height << 5) - 4; if (qx + px < -28) px = -28 - qx; if (qy + py < -28) py = -28 - qy; if (qx + px > X) px = X - qx; if (qy + py > Y) py = Y - qy; } else { qx = (s->mb_x << 6); qy = (s->mb_y << 6); X = (s->mb_width << 6) - 4; Y = (s->mb_height << 6) - 4; if (qx + px < -60) px = -60 - qx; if (qy + py < -60) py = -60 - qy; if (qx + px > X) px = X - qx; if (qy + py > Y) py = Y - qy; } } /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ if (0 && !s->first_slice_line && s->mb_x) { if (is_intra[xy - wrap]) sum = FFABS(px) + FFABS(py); else sum = FFABS(px - A[0]) + FFABS(py - A[1]); if (sum > 32) { if (get_bits1(&s->gb)) { px = A[0]; py = A[1]; } else { px = C[0]; py = C[1]; } } else { if (is_intra[xy - 2]) sum = FFABS(px) + FFABS(py); else sum = FFABS(px - C[0]) + FFABS(py - C[1]); if (sum > 32) { if (get_bits1(&s->gb)) { px = A[0]; py = A[1]; } else { px = C[0]; py = C[1]; } } } } /* store MV using signed modulus of MV range defined in 4.11 */ s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x; s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y; } s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; }
17,223
qemu
2c8f86961b6eaac705be21bc98299f5517eb0b6b
1
static void test_ide_drive_user(const char *dev, bool trans) { char *argv[256], *opts; int argc; int secs = img_secs[backend_small]; const CHST expected_chst = { secs / (4 * 32) , 4, 32, trans }; argc = setup_common(argv, ARRAY_SIZE(argv)); opts = g_strdup_printf("%s,%s%scyls=%d,heads=%d,secs=%d", dev ?: "", trans && dev ? "bios-chs-" : "", trans ? "trans=lba," : "", expected_chst.cyls, expected_chst.heads, expected_chst.secs); cur_ide[0] = &expected_chst; argc = setup_ide(argc, argv, ARRAY_SIZE(argv), 0, dev ? opts : NULL, backend_small, mbr_chs, dev ? "" : opts); g_free(opts); qtest_start(g_strjoinv(" ", argv)); test_cmos(); qtest_end(); }
17,225
FFmpeg
44f110f509d0ab4fc73b9f2363a97c6577d3850f
1
static int read_rle_sgi(const SGIInfo *sgi_info, AVPicture *pict, ByteIOContext *f) { uint8_t *dest_row, *rle_data = NULL; unsigned long *start_table, *length_table; int y, z, xsize, ysize, zsize, tablen; long start_offset, run_length; int ret = 0; xsize = sgi_info->xsize; ysize = sgi_info->ysize; zsize = sgi_info->zsize; rle_data = av_malloc(xsize); /* skip header */ url_fseek(f, SGI_HEADER_SIZE, SEEK_SET); /* size of rle offset and length tables */ tablen = ysize * zsize * sizeof(long); start_table = (unsigned long *)av_malloc(tablen); length_table = (unsigned long *)av_malloc(tablen); if (!get_buffer(f, (uint8_t *)start_table, tablen)) { ret = -1; goto fail; } if (!get_buffer(f, (uint8_t *)length_table, tablen)) { ret = -1; goto fail; } for (z = 0; z < zsize; z++) { for (y = 0; y < ysize; y++) { dest_row = pict->data[0] + (ysize - 1 - y) * (xsize * zsize); start_offset = BE_32(&start_table[y + z * ysize]); run_length = BE_32(&length_table[y + z * ysize]); /* don't seek if already in the correct spot */ if (url_ftell(f) != start_offset) { url_fseek(f, start_offset, SEEK_SET); } get_buffer(f, rle_data, run_length); expand_rle_row(dest_row, rle_data, z, zsize); } } fail: av_free(start_table); av_free(length_table); av_free(rle_data); return ret; }
17,226
qemu
4656e1f01289cc3aa20986deb6a407165826abe5
1
static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw) { int in_plb, ret; ctx->raddr = eaddr; ctx->prot = PAGE_READ | PAGE_EXEC; ret = 0; switch (env->mmu_model) { case POWERPC_MMU_32B: case POWERPC_MMU_601: case POWERPC_MMU_SOFT_6xx: case POWERPC_MMU_SOFT_74xx: case POWERPC_MMU_SOFT_4xx: case POWERPC_MMU_REAL: case POWERPC_MMU_BOOKE: ctx->prot |= PAGE_WRITE; break; #if defined(TARGET_PPC64) case POWERPC_MMU_620: case POWERPC_MMU_64B: case POWERPC_MMU_2_06: /* Real address are 60 bits long */ ctx->raddr &= 0x0FFFFFFFFFFFFFFFULL; ctx->prot |= PAGE_WRITE; break; #endif case POWERPC_MMU_SOFT_4xx_Z: if (unlikely(msr_pe != 0)) { /* 403 family add some particular protections, * using PBL/PBU registers for accesses with no translation. */ in_plb = /* Check PLB validity */ (env->pb[0] < env->pb[1] && /* and address in plb area */ eaddr >= env->pb[0] && eaddr < env->pb[1]) || (env->pb[2] < env->pb[3] && eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0; if (in_plb ^ msr_px) { /* Access in protected area */ if (rw == 1) { /* Access is not allowed */ ret = -2; } } else { /* Read-write access is allowed */ ctx->prot |= PAGE_WRITE; } } break; case POWERPC_MMU_MPC8xx: /* XXX: TODO */ cpu_abort(env, "MPC8xx MMU model is not implemented\n"); break; case POWERPC_MMU_BOOKE206: cpu_abort(env, "BookE 2.06 MMU doesn't have physical real mode\n"); break; default: cpu_abort(env, "Unknown or invalid MMU model\n"); return -1; } return ret; }
17,228
FFmpeg
2cd04cf919ce2f22da391bd80b7664a25348b943
1
static int pcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { PCMDecode *s = avctx->priv_data; int sample_size, c, n; short *samples; const uint8_t *src, *src2[MAX_CHANNELS]; uint8_t *dstu8; int16_t *dst_int16_t; int32_t *dst_int32_t; int64_t *dst_int64_t; uint16_t *dst_uint16_t; uint32_t *dst_uint32_t; samples = data; src = buf; if (avctx->sample_fmt!=avctx->codec->sample_fmts[0]) { av_log(avctx, AV_LOG_ERROR, "invalid sample_fmt\n"); return -1; } if(avctx->channels <= 0 || avctx->channels > MAX_CHANNELS){ av_log(avctx, AV_LOG_ERROR, "PCM channels out of bounds\n"); return -1; } sample_size = av_get_bits_per_sample(avctx->codec_id)/8; n = avctx->channels * sample_size; /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */ if (CODEC_ID_PCM_DVD == avctx->codec_id) /* 2 samples are interleaved per block in PCM_DVD */ n = 2 * avctx->channels * avctx->bits_per_sample/8; if(n && buf_size % n){ av_log(avctx, AV_LOG_ERROR, "invalid PCM packet\n"); return -1; } buf_size= FFMIN(buf_size, *data_size/2); *data_size=0; n = buf_size/sample_size; switch(avctx->codec->id) { case CODEC_ID_PCM_U32LE: DECODE(uint32_t, le32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_U32BE: DECODE(uint32_t, be32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_S24LE: DECODE(int32_t, le24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_S24BE: DECODE(int32_t, be24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_U24LE: DECODE(uint32_t, le24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_U24BE: DECODE(uint32_t, be24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_S24DAUD: for(;n>0;n--) { uint32_t v = bytestream_get_be24(&src); v >>= 4; // sync flags are here *samples++ = ff_reverse[(v >> 8) & 0xff] + (ff_reverse[v & 0xff] << 8); } break; case CODEC_ID_PCM_S16LE_PLANAR: n /= avctx->channels; for(c=0;c<avctx->channels;c++) src2[c] = &src[c*n*2]; for(;n>0;n--) for(c=0;c<avctx->channels;c++) *samples++ = bytestream_get_le16(&src2[c]); src = src2[avctx->channels-1]; break; case CODEC_ID_PCM_U16LE: DECODE(uint16_t, le16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_U16BE: DECODE(uint16_t, be16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_S8: dstu8= (uint8_t*)samples; for(;n>0;n--) { *dstu8++ = *src++ + 128; } samples= (short*)dstu8; break; #if WORDS_BIGENDIAN case CODEC_ID_PCM_F64LE: DECODE(int64_t, le64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_F32LE: DECODE(int32_t, le32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16LE: DECODE(int16_t, le16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S16BE: #else case CODEC_ID_PCM_F64BE: DECODE(int64_t, be64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: DECODE(int32_t, be32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16BE: DECODE(int16_t, be16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_S16LE: #endif /* WORDS_BIGENDIAN */ case CODEC_ID_PCM_U8: memcpy(samples, src, n*sample_size); src += n*sample_size; samples = (short*)((uint8_t*)data + n*sample_size); break; case CODEC_ID_PCM_ZORK: for(;n>0;n--) { int x= *src++; if(x&128) x-= 128; else x = -x; *samples++ = x << 8; } break; case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: for(;n>0;n--) { *samples++ = s->table[*src++]; } break; case CODEC_ID_PCM_DVD: if(avctx->bits_per_sample != 20 && avctx->bits_per_sample != 24) { av_log(avctx, AV_LOG_ERROR, "PCM DVD unsupported sample depth\n"); return -1; } else { int jump = avctx->channels * (avctx->bits_per_sample-16) / 4; n = buf_size / (avctx->channels * 2 * avctx->bits_per_sample / 8); while (n--) { for (c=0; c < 2*avctx->channels; c++) *samples++ = bytestream_get_be16(&src); src += jump; } } break; default: return -1; } *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf; }
17,229
qemu
5839e53bbc0fec56021d758aab7610df421ed8c8
1
static int vmdk_add_extent(BlockDriverState *bs, BlockDriverState *file, bool flat, int64_t sectors, int64_t l1_offset, int64_t l1_backup_offset, uint32_t l1_size, int l2_size, uint64_t cluster_sectors, VmdkExtent **new_extent, Error **errp) { VmdkExtent *extent; BDRVVmdkState *s = bs->opaque; int64_t length; if (cluster_sectors > 0x200000) { /* 0x200000 * 512Bytes = 1GB for one cluster is unrealistic */ error_setg(errp, "Invalid granularity, image may be corrupt"); return -EFBIG; } if (l1_size > 512 * 1024 * 1024) { /* Although with big capacity and small l1_entry_sectors, we can get a * big l1_size, we don't want unbounded value to allocate the table. * Limit it to 512M, which is 16PB for default cluster and L2 table * size */ error_setg(errp, "L1 size too big"); return -EFBIG; } length = bdrv_getlength(file); if (length < 0) { return length; } s->extents = g_realloc(s->extents, (s->num_extents + 1) * sizeof(VmdkExtent)); extent = &s->extents[s->num_extents]; s->num_extents++; memset(extent, 0, sizeof(VmdkExtent)); extent->file = file; extent->flat = flat; extent->sectors = sectors; extent->l1_table_offset = l1_offset; extent->l1_backup_table_offset = l1_backup_offset; extent->l1_size = l1_size; extent->l1_entry_sectors = l2_size * cluster_sectors; extent->l2_size = l2_size; extent->cluster_sectors = flat ? sectors : cluster_sectors; extent->next_cluster_sector = ROUND_UP(DIV_ROUND_UP(length, BDRV_SECTOR_SIZE), cluster_sectors); if (s->num_extents > 1) { extent->end_sector = (*(extent - 1)).end_sector + extent->sectors; } else { extent->end_sector = extent->sectors; } bs->total_sectors = extent->end_sector; if (new_extent) { *new_extent = extent; } return 0; }
17,230
qemu
e6afc87f804abee7d0479be5e8e31c56d885fafb
1
static float64 addFloat64Sigs( float64 a, float64 b, flag zSign STATUS_PARAM ) { int16 aExp, bExp, zExp; uint64_t aSig, bSig, zSig; int16 expDiff; aSig = extractFloat64Frac( a ); aExp = extractFloat64Exp( a ); bSig = extractFloat64Frac( b ); bExp = extractFloat64Exp( b ); expDiff = aExp - bExp; aSig <<= 9; bSig <<= 9; if ( 0 < expDiff ) { if ( aExp == 0x7FF ) { if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR ); return a; } if ( bExp == 0 ) { --expDiff; } else { bSig |= LIT64( 0x2000000000000000 ); } shift64RightJamming( bSig, expDiff, &bSig ); zExp = aExp; } else if ( expDiff < 0 ) { if ( bExp == 0x7FF ) { if ( bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); return packFloat64( zSign, 0x7FF, 0 ); } if ( aExp == 0 ) { ++expDiff; } else { aSig |= LIT64( 0x2000000000000000 ); } shift64RightJamming( aSig, - expDiff, &aSig ); zExp = bExp; } else { if ( aExp == 0x7FF ) { if ( aSig | bSig ) return propagateFloat64NaN( a, b STATUS_VAR ); return a; } if ( aExp == 0 ) { if ( STATUS(flush_to_zero) ) return packFloat64( zSign, 0, 0 ); return packFloat64( zSign, 0, ( aSig + bSig )>>9 ); } zSig = LIT64( 0x4000000000000000 ) + aSig + bSig; zExp = aExp; goto roundAndPack; } aSig |= LIT64( 0x2000000000000000 ); zSig = ( aSig + bSig )<<1; --zExp; if ( (int64_t) zSig < 0 ) { zSig = aSig + bSig; ++zExp; } roundAndPack: return roundAndPackFloat64( zSign, zExp, zSig STATUS_VAR ); }
17,231
qemu
bbc5842211cdd90103cfe52f2ca24afac880694f
1
static void mpic_reset (void *opaque) { openpic_t *mpp = (openpic_t *)opaque; int i; mpp->glbc = 0x80000000; /* Initialise controller registers */ mpp->frep = 0x004f0002; mpp->veni = VENI; mpp->pint = 0x00000000; mpp->spve = 0x0000FFFF; /* Initialise IRQ sources */ for (i = 0; i < mpp->max_irq; i++) { mpp->src[i].ipvp = 0x80800000; mpp->src[i].ide = 0x00000001; } /* Set IDE for IPIs to 0 so we don't get spurious interrupts */ for (i = mpp->irq_ipi0; i < (mpp->irq_ipi0 + MAX_IPI); i++) { mpp->src[i].ide = 0; } /* Initialise IRQ destinations */ for (i = 0; i < MAX_CPU; i++) { mpp->dst[i].pctp = 0x0000000F; mpp->dst[i].tfrr = 0x00000000; memset(&mpp->dst[i].raised, 0, sizeof(IRQ_queue_t)); mpp->dst[i].raised.next = -1; memset(&mpp->dst[i].servicing, 0, sizeof(IRQ_queue_t)); mpp->dst[i].servicing.next = -1; } /* Initialise timers */ for (i = 0; i < MAX_TMR; i++) { mpp->timers[i].ticc = 0x00000000; mpp->timers[i].tibc = 0x80000000; } /* Go out of RESET state */ mpp->glbc = 0x00000000; }
17,232
qemu
632314c49ce20ee9c974f07544d9125fbbbfbe1b
1
gen_intermediate_code_internal(CPUMBState *env, TranslationBlock *tb, int search_pc) { uint16_t *gen_opc_end; uint32_t pc_start; int j, lj; struct DisasContext ctx; struct DisasContext *dc = &ctx; uint32_t next_page_start, org_flags; target_ulong npc; int num_insns; int max_insns; qemu_log_try_set_file(stderr); pc_start = tb->pc; dc->env = env; dc->tb = tb; org_flags = dc->synced_flags = dc->tb_flags = tb->flags; gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; dc->is_jmp = DISAS_NEXT; dc->jmp = 0; dc->delayed_branch = !!(dc->tb_flags & D_FLAG); if (dc->delayed_branch) { dc->jmp = JMP_INDIRECT; } dc->pc = pc_start; dc->singlestep_enabled = env->singlestep_enabled; dc->cpustate_changed = 0; dc->abort_at_next_insn = 0; dc->nr_nops = 0; if (pc_start & 3) cpu_abort(env, "Microblaze: unaligned PC=%x\n", pc_start); if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { #if !SIM_COMPAT qemu_log("--------------\n"); log_cpu_state(env, 0); #endif } next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; lj = -1; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; gen_icount_start(); do { #if SIM_COMPAT if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc); gen_helper_debug(); } #endif check_breakpoint(env, dc); if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; if (lj < j) { lj++; while (lj < j) tcg_ctx.gen_opc_instr_start[lj++] = 0; } tcg_ctx.gen_opc_pc[lj] = dc->pc; tcg_ctx.gen_opc_instr_start[lj] = 1; tcg_ctx.gen_opc_icount[lj] = num_insns; } /* Pretty disas. */ LOG_DIS("%8.8x:\t", dc->pc); if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); dc->clear_imm = 1; decode(dc, cpu_ldl_code(env, dc->pc)); if (dc->clear_imm) dc->tb_flags &= ~IMM_FLAG; dc->pc += 4; num_insns++; if (dc->delayed_branch) { dc->delayed_branch--; if (!dc->delayed_branch) { if (dc->tb_flags & DRTI_FLAG) do_rti(dc); if (dc->tb_flags & DRTB_FLAG) do_rtb(dc); if (dc->tb_flags & DRTE_FLAG) do_rte(dc); /* Clear the delay slot flag. */ dc->tb_flags &= ~D_FLAG; /* If it is a direct jump, try direct chaining. */ if (dc->jmp == JMP_INDIRECT) { eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc)); dc->is_jmp = DISAS_JUMP; } else if (dc->jmp == JMP_DIRECT) { t_sync_flags(dc); gen_goto_tb(dc, 0, dc->jmp_pc); dc->is_jmp = DISAS_TB_JUMP; } else if (dc->jmp == JMP_DIRECT_CC) { int l1; t_sync_flags(dc); l1 = gen_new_label(); /* Conditional jmp. */ tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1); gen_goto_tb(dc, 1, dc->pc); gen_set_label(l1); gen_goto_tb(dc, 0, dc->jmp_pc); dc->is_jmp = DISAS_TB_JUMP; } break; } } if (env->singlestep_enabled) break; } while (!dc->is_jmp && !dc->cpustate_changed && tcg_ctx.gen_opc_ptr < gen_opc_end && !singlestep && (dc->pc < next_page_start) && num_insns < max_insns); npc = dc->pc; if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) { if (dc->tb_flags & D_FLAG) { dc->is_jmp = DISAS_UPDATE; tcg_gen_movi_tl(cpu_SR[SR_PC], npc); sync_jmpstate(dc); } else npc = dc->jmp_pc; } if (tb->cflags & CF_LAST_IO) gen_io_end(); /* Force an update if the per-tb cpu state has changed. */ if (dc->is_jmp == DISAS_NEXT && (dc->cpustate_changed || org_flags != dc->tb_flags)) { dc->is_jmp = DISAS_UPDATE; tcg_gen_movi_tl(cpu_SR[SR_PC], npc); } t_sync_flags(dc); if (unlikely(env->singlestep_enabled)) { TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG); if (dc->is_jmp != DISAS_JUMP) { tcg_gen_movi_tl(cpu_SR[SR_PC], npc); } gen_helper_raise_exception(cpu_env, tmp); tcg_temp_free_i32(tmp); } else { switch(dc->is_jmp) { case DISAS_NEXT: gen_goto_tb(dc, 1, npc); break; default: case DISAS_JUMP: case DISAS_UPDATE: /* indicate that the hash table must be used to find the next TB */ tcg_gen_exit_tb(0); break; case DISAS_TB_JUMP: /* nothing more to generate */ break; } } gen_icount_end(tb, num_insns); *tcg_ctx.gen_opc_ptr = INDEX_op_end; if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; lj++; while (lj <= j) tcg_ctx.gen_opc_instr_start[lj++] = 0; } else { tb->size = dc->pc - pc_start; tb->icount = num_insns; } #ifdef DEBUG_DISAS #if !SIM_COMPAT if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { qemu_log("\n"); #if DISAS_GNU log_target_disas(env, pc_start, dc->pc - pc_start, 0); #endif qemu_log("\nisize=%d osize=%td\n", dc->pc - pc_start, tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf); } #endif #endif assert(!dc->abort_at_next_insn); }
17,233
FFmpeg
513a3494396d0a20233273b3cadcb5ee86485d5c
1
static int vp5_parse_coeff(VP56Context *s) { VP56RangeCoder *c = &s->c; VP56Model *model = s->modelp; uint8_t *permute = s->idct_scantable; uint8_t *model1, *model2; int coeff, sign, coeff_idx; int b, i, cg, idx, ctx, ctx_last; int pt = 0; /* plane type (0 for Y, 1 for U or V) */ if (c->end >= c->buffer && c->bits >= 0) { av_log(s->avctx, AV_LOG_ERROR, "End of AC stream reached in vp5_parse_coeff\n"); return AVERROR_INVALIDDATA; } for (b=0; b<6; b++) { int ct = 1; /* code type */ if (b > 3) pt = 1; ctx = 6*s->coeff_ctx[ff_vp56_b6to4[b]][0] + s->above_blocks[s->above_block_idx[b]].not_null_dc; model1 = model->coeff_dccv[pt]; model2 = model->coeff_dcct[pt][ctx]; coeff_idx = 0; for (;;) { if (vp56_rac_get_prob_branchy(c, model2[0])) { if (vp56_rac_get_prob_branchy(c, model2[2])) { if (vp56_rac_get_prob_branchy(c, model2[3])) { s->coeff_ctx[ff_vp56_b6to4[b]][coeff_idx] = 4; idx = vp56_rac_get_tree(c, ff_vp56_pc_tree, model1); sign = vp56_rac_get(c); coeff = ff_vp56_coeff_bias[idx+5]; for (i=ff_vp56_coeff_bit_length[idx]; i>=0; i--) coeff += vp56_rac_get_prob(c, ff_vp56_coeff_parse_table[idx][i]) << i; } else { if (vp56_rac_get_prob_branchy(c, model2[4])) { coeff = 3 + vp56_rac_get_prob(c, model1[5]); s->coeff_ctx[ff_vp56_b6to4[b]][coeff_idx] = 3; } else { coeff = 2; s->coeff_ctx[ff_vp56_b6to4[b]][coeff_idx] = 2; } sign = vp56_rac_get(c); } ct = 2; } else { ct = 1; s->coeff_ctx[ff_vp56_b6to4[b]][coeff_idx] = 1; sign = vp56_rac_get(c); coeff = 1; } coeff = (coeff ^ -sign) + sign; if (coeff_idx) coeff *= s->dequant_ac; s->block_coeff[b][permute[coeff_idx]] = coeff; } else { if (ct && !vp56_rac_get_prob_branchy(c, model2[1])) break; ct = 0; s->coeff_ctx[ff_vp56_b6to4[b]][coeff_idx] = 0; } coeff_idx++; if (coeff_idx >= 64) break; cg = vp5_coeff_groups[coeff_idx]; ctx = s->coeff_ctx[ff_vp56_b6to4[b]][coeff_idx]; model1 = model->coeff_ract[pt][ct][cg]; model2 = cg > 2 ? model1 : model->coeff_acct[pt][ct][cg][ctx]; } ctx_last = FFMIN(s->coeff_ctx_last[ff_vp56_b6to4[b]], 24); s->coeff_ctx_last[ff_vp56_b6to4[b]] = coeff_idx; if (coeff_idx < ctx_last) for (i=coeff_idx; i<=ctx_last; i++) s->coeff_ctx[ff_vp56_b6to4[b]][i] = 5; s->above_blocks[s->above_block_idx[b]].not_null_dc = s->coeff_ctx[ff_vp56_b6to4[b]][0]; } return 0; }
17,234
qemu
4efb1f7c612ff35badc8f8cbda78ac891fabf20a
1
static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset, void **table, bool read_from_disk) { BDRVQcow2State *s = bs->opaque; int i; int ret; int lookup_index; uint64_t min_lru_counter = UINT64_MAX; int min_lru_index = -1; trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache, offset, read_from_disk); /* Check if the table is already cached */ i = lookup_index = (offset / s->cluster_size * 4) % c->size; do { const Qcow2CachedTable *t = &c->entries[i]; if (t->offset == offset) { goto found; if (t->ref == 0 && t->lru_counter < min_lru_counter) { min_lru_counter = t->lru_counter; min_lru_index = i; if (++i == c->size) { i = 0; } while (i != lookup_index); if (min_lru_index == -1) { /* This can't happen in current synchronous code, but leave the check * here as a reminder for whoever starts using AIO with the cache */ abort(); /* Cache miss: write a table back and replace it */ i = min_lru_index; trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(), c == s->l2_table_cache, i); ret = qcow2_cache_entry_flush(bs, c, i); if (ret < 0) { return ret; trace_qcow2_cache_get_read(qemu_coroutine_self(), c == s->l2_table_cache, i); c->entries[i].offset = 0; if (read_from_disk) { if (c == s->l2_table_cache) { BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD); ret = bdrv_pread(bs->file, offset, qcow2_cache_get_table_addr(bs, c, i), s->cluster_size); if (ret < 0) { return ret; c->entries[i].offset = offset; /* And return the right table */ found: c->entries[i].ref++; *table = qcow2_cache_get_table_addr(bs, c, i); trace_qcow2_cache_get_done(qemu_coroutine_self(), c == s->l2_table_cache, i); return 0;
17,235
qemu
787d134fb164e0395685744ef75829c15f5aee8d
1
int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src) { int len = 0; if (f_src->buf_index > 0) { len = f_src->buf_index; qemu_put_buffer(f_des, f_src->buf, f_src->buf_index); f_src->buf_index = 0; } return len; }
17,236
FFmpeg
8f19483d0652b43c7c2ff6b973843e4d0b769a5f
1
static int init_filters(const char *filters_descr) { char args[512]; int ret; AVFilter *abuffersrc = avfilter_get_by_name("abuffer"); AVFilter *abuffersink = avfilter_get_by_name("abuffersink"); AVFilterInOut *outputs = avfilter_inout_alloc(); AVFilterInOut *inputs = avfilter_inout_alloc(); const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 }; const int packing_fmts[] = { AVFILTER_PACKED, -1 }; const int64_t *chlayouts = avfilter_all_channel_layouts; AVABufferSinkParams *abuffersink_params; const AVFilterLink *outlink; filter_graph = avfilter_graph_alloc(); /* buffer audio source: the decoded frames from the decoder will be inserted here. */ if (!dec_ctx->channel_layout) dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels); snprintf(args, sizeof(args), "%d:%d:0x%"PRIx64":packed", dec_ctx->sample_rate, dec_ctx->sample_fmt, dec_ctx->channel_layout); ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in", args, NULL, filter_graph); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n"); return ret; } /* buffer audio sink: to terminate the filter chain. */ abuffersink_params = av_abuffersink_params_alloc(); abuffersink_params->sample_fmts = sample_fmts; abuffersink_params->channel_layouts = chlayouts; abuffersink_params->packing_fmts = packing_fmts; ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out", NULL, abuffersink_params, filter_graph); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n"); return ret; } /* Endpoints for the filter graph. */ outputs->name = av_strdup("in"); outputs->filter_ctx = buffersrc_ctx; outputs->pad_idx = 0; outputs->next = NULL; inputs->name = av_strdup("out"); inputs->filter_ctx = buffersink_ctx; inputs->pad_idx = 0; inputs->next = NULL; if ((ret = avfilter_graph_parse(filter_graph, filter_descr, &inputs, &outputs, NULL)) < 0) return ret; if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) return ret; /* Print summary of the sink buffer * Note: args buffer is reused to store channel layout string */ outlink = buffersink_ctx->inputs[0]; av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout); av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n", (int)outlink->sample_rate, (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"), args); return 0; }
17,237
FFmpeg
b829da363985cb2f80130bba304cc29a632f6446
1
static int smacker_decode_tree(GetBitContext *gb, HuffContext *hc, uint32_t prefix, int length) { if(!get_bits1(gb)){ //Leaf if(hc->current >= 256){ av_log(NULL, AV_LOG_ERROR, "Tree size exceeded!\n"); if(length){ hc->bits[hc->current] = prefix; hc->lengths[hc->current] = length; } else { hc->bits[hc->current] = 0; hc->lengths[hc->current] = 0; hc->values[hc->current] = get_bits(gb, 8); hc->current++; if(hc->maxlength < length) hc->maxlength = length; return 0; } else { //Node int r; length++; r = smacker_decode_tree(gb, hc, prefix, length); if(r) return r; return smacker_decode_tree(gb, hc, prefix | (1 << (length - 1)), length);
17,238
FFmpeg
073c2593c9f0aa4445a6fc1b9b24e6e52a8cc2c1
1
static void init_2d_vlc_rl(RLTable *rl) { int i; init_vlc(&rl->vlc, TEX_VLC_BITS, rl->n + 2, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2); rl->rl_vlc[0]= av_malloc(rl->vlc.table_size*sizeof(RL_VLC_ELEM)); for(i=0; i<rl->vlc.table_size; i++){ int code= rl->vlc.table[i][0]; int len = rl->vlc.table[i][1]; int level, run; if(len==0){ // illegal code run= 65; level= MAX_LEVEL; }else if(len<0){ //more bits needed run= 0; level= code; }else{ if(code==rl->n){ //esc run= 65; level= 0; }else if(code==rl->n+1){ //eob run= 0; level= 127; }else{ run= rl->table_run [code] + 1; level= rl->table_level[code]; } } rl->rl_vlc[0][i].len= len; rl->rl_vlc[0][i].level= level; rl->rl_vlc[0][i].run= run; } }
17,239
qemu
2a5ff735dc1074171a0cbb1dc228d6d6e907f571
0
static int ehci_state_advqueue(EHCIQueue *q, int async) { #if 0 /* TO-DO: 4.10.2 - paragraph 2 * if I-bit is set to 1 and QH is not active * go to horizontal QH */ if (I-bit set) { ehci_set_state(ehci, async, EST_HORIZONTALQH); goto out; } #endif /* * want data and alt-next qTD is valid */ if (((q->qh.token & QTD_TOKEN_TBYTES_MASK) != 0) && (q->qh.altnext_qtd > 0x1000) && (NLPTR_TBIT(q->qh.altnext_qtd) == 0)) { q->qtdaddr = q->qh.altnext_qtd; ehci_set_state(q->ehci, async, EST_FETCHQTD); /* * next qTD is valid */ } else if ((q->qh.next_qtd > 0x1000) && (NLPTR_TBIT(q->qh.next_qtd) == 0)) { q->qtdaddr = q->qh.next_qtd; ehci_set_state(q->ehci, async, EST_FETCHQTD); /* * no valid qTD, try next QH */ } else { ehci_set_state(q->ehci, async, EST_HORIZONTALQH); } return 1; }
17,241
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static uint32_t pmac_ide_readl (void *opaque,target_phys_addr_t addr) { uint32_t retval; MACIOIDEState *d = opaque; addr = (addr & 0xFFF) >> 4; if (addr == 0) { retval = ide_data_readl(&d->bus, 0); } else { retval = 0xFFFFFFFF; } retval = bswap32(retval); return retval; }
17,243
qemu
b2c98d9d392c87c9b9e975d30f79924719d9cbbe
0
static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { if (facilities & FACILITY_EXT_IMM) { tcg_out_insn(s, RRE, LGHR, dest, src); return; } if (type == TCG_TYPE_I32) { if (dest == src) { tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16); } else { tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16); } tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16); } else { tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48); tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48); } }
17,244
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
0
static int fill_note_info(struct elf_note_info *info, long signr, const CPUState *env) { #define NUMNOTES 3 CPUState *cpu = NULL; TaskState *ts = (TaskState *)env->opaque; int i; (void) memset(info, 0, sizeof (*info)); TAILQ_INIT(&info->thread_list); info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote)); if (info->notes == NULL) return (-ENOMEM); info->prstatus = qemu_mallocz(sizeof (*info->prstatus)); if (info->prstatus == NULL) return (-ENOMEM); info->psinfo = qemu_mallocz(sizeof (*info->psinfo)); if (info->prstatus == NULL) return (-ENOMEM); /* * First fill in status (and registers) of current thread * including process info & aux vector. */ fill_prstatus(info->prstatus, ts, signr); elf_core_copy_regs(&info->prstatus->pr_reg, env); fill_note(&info->notes[0], "CORE", NT_PRSTATUS, sizeof (*info->prstatus), info->prstatus); fill_psinfo(info->psinfo, ts); fill_note(&info->notes[1], "CORE", NT_PRPSINFO, sizeof (*info->psinfo), info->psinfo); fill_auxv_note(&info->notes[2], ts); info->numnote = 3; info->notes_size = 0; for (i = 0; i < info->numnote; i++) info->notes_size += note_size(&info->notes[i]); /* read and fill status of all threads */ cpu_list_lock(); for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { if (cpu == thread_env) continue; fill_thread_info(info, cpu); } cpu_list_unlock(); return (0); }
17,247
qemu
dfd100f242370886bb6732f70f1f7cbd8eb9fedc
0
static QIOChannelSocket *nbd_establish_connection(SocketAddressFlat *saddr_flat, Error **errp) { SocketAddress *saddr = socket_address_crumple(saddr_flat); QIOChannelSocket *sioc; Error *local_err = NULL; sioc = qio_channel_socket_new(); qio_channel_set_name(QIO_CHANNEL(sioc), "nbd-client"); qio_channel_socket_connect_sync(sioc, saddr, &local_err); qapi_free_SocketAddress(saddr); if (local_err) { object_unref(OBJECT(sioc)); error_propagate(errp, local_err); return NULL; } qio_channel_set_delay(QIO_CHANNEL(sioc), false); return sioc; }
17,250
qemu
0919ac787641db11024912651f3bc5764d4f1286
0
void omap_inth_reset(struct omap_intr_handler_s *s) { int i; for (i = 0; i < s->nbanks; ++i){ s->bank[i].irqs = 0x00000000; s->bank[i].mask = 0xffffffff; s->bank[i].sens_edge = 0x00000000; s->bank[i].fiq = 0x00000000; s->bank[i].inputs = 0x00000000; s->bank[i].swi = 0x00000000; memset(s->bank[i].priority, 0, sizeof(s->bank[i].priority)); if (s->level_only) s->bank[i].sens_edge = 0xffffffff; } s->new_agr[0] = ~0; s->new_agr[1] = ~0; s->sir_intr[0] = 0; s->sir_intr[1] = 0; s->autoidle = 0; s->mask = ~0; qemu_set_irq(s->parent_intr[0], 0); qemu_set_irq(s->parent_intr[1], 0); }
17,252
qemu
41bf234d8e35e9273290df278e2aeb88c0c50a4f
0
static void gic_complete_irq(gic_state * s, int cpu, int irq) { int update = 0; int cm = 1 << cpu; DPRINTF("EOI %d\n", irq); if (s->running_irq[cpu] == 1023) return; /* No active IRQ. */ if (irq != 1023) { /* Mark level triggered interrupts as pending if they are still raised. */ if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq) && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { DPRINTF("Set %d pending mask %x\n", irq, cm); GIC_SET_PENDING(irq, cm); update = 1; } } if (irq != s->running_irq[cpu]) { /* Complete an IRQ that is not currently running. */ int tmp = s->running_irq[cpu]; while (s->last_active[tmp][cpu] != 1023) { if (s->last_active[tmp][cpu] == irq) { s->last_active[tmp][cpu] = s->last_active[irq][cpu]; break; } tmp = s->last_active[tmp][cpu]; } if (update) { gic_update(s); } } else { /* Complete the current running IRQ. */ gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); } }
17,253
qemu
b131c74a0e485b084ddaffc8214c8a19af492be7
0
static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, unsigned int queue_no, unsigned int vector) { VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no); EventNotifier *n = virtio_queue_get_guest_notifier(vq); VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; int ret; ret = kvm_irqchip_remove_irq_notifier(kvm_state, n, irqfd->virq); assert(ret == 0); if (--irqfd->users == 0) { kvm_irqchip_release_virq(kvm_state, irqfd->virq); } virtio_queue_set_guest_notifier_fd_handler(vq, true, false); }
17,254
qemu
fdfab37dfeffefbd4533b4158055c9b82d7c3e69
0
static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVQcow2State *s = bs->opaque; int offset_in_cluster, n1; int ret; unsigned int cur_bytes; /* number of bytes in current iteration */ uint64_t cluster_offset = 0; uint64_t bytes_done = 0; QEMUIOVector hd_qiov; uint8_t *cluster_data = NULL; qemu_iovec_init(&hd_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (bytes != 0) { /* prepare next request */ cur_bytes = MIN(bytes, INT_MAX); if (s->cipher) { cur_bytes = MIN(cur_bytes, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); } ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); if (ret < 0) { goto fail; } offset_in_cluster = offset_into_cluster(s, offset); qemu_iovec_reset(&hd_qiov); qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); switch (ret) { case QCOW2_CLUSTER_UNALLOCATED: if (bs->backing) { /* read from the base image */ n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov, offset, cur_bytes); if (n1 > 0) { QEMUIOVector local_qiov; qemu_iovec_init(&local_qiov, hd_qiov.niov); qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1); BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_preadv(bs->backing, offset, n1, &local_qiov, 0); qemu_co_mutex_lock(&s->lock); qemu_iovec_destroy(&local_qiov); if (ret < 0) { goto fail; } } } else { /* Note: in this case, no need to wait */ qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); } break; case QCOW2_CLUSTER_ZERO: qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); break; case QCOW2_CLUSTER_COMPRESSED: /* add AIO support for compressed blocks ? */ ret = qcow2_decompress_cluster(bs, cluster_offset); if (ret < 0) { goto fail; } qemu_iovec_from_buf(&hd_qiov, 0, s->cluster_cache + offset_in_cluster, cur_bytes); break; case QCOW2_CLUSTER_NORMAL: if ((cluster_offset & 511) != 0) { ret = -EIO; goto fail; } if (bs->encrypted) { assert(s->cipher); /* * For encrypted images, read everything into a temporary * contiguous buffer on which the AES functions can work. */ if (!cluster_data) { cluster_data = qemu_try_blockalign(bs->file->bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); if (cluster_data == NULL) { ret = -ENOMEM; goto fail; } } assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); } BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_preadv(bs->file, cluster_offset + offset_in_cluster, cur_bytes, &hd_qiov, 0); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } if (bs->encrypted) { assert(s->cipher); assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); Error *err = NULL; if (qcow2_encrypt_sectors(s, offset >> BDRV_SECTOR_BITS, cluster_data, cluster_data, cur_bytes >> BDRV_SECTOR_BITS, false, &err) < 0) { error_free(err); ret = -EIO; goto fail; } qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes); } break; default: g_assert_not_reached(); ret = -EIO; goto fail; } bytes -= cur_bytes; offset += cur_bytes; bytes_done += cur_bytes; } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); qemu_vfree(cluster_data); return ret; }
17,255
qemu
861d72cd28b5793fc367c46b7821a5372b66e3f4
0
static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd) { if (fd < target_fd_max && target_fd_trans[fd]) { return target_fd_trans[fd]->target_to_host_addr; } return NULL; }
17,256
qemu
1308c464a8414ce3c6f79e172255fb90b5aa313d
0
void cpu_loop(CPUMIPSState *env) { CPUState *cs = CPU(mips_env_get_cpu(env)); target_siginfo_t info; int trapnr; abi_long ret; # ifdef TARGET_ABI_MIPSO32 unsigned int syscall_num; # endif for(;;) { cpu_exec_start(cs); trapnr = cpu_mips_exec(env); cpu_exec_end(cs); switch(trapnr) { case EXCP_SYSCALL: env->active_tc.PC += 4; # ifdef TARGET_ABI_MIPSO32 syscall_num = env->active_tc.gpr[2] - 4000; if (syscall_num >= sizeof(mips_syscall_args)) { ret = -TARGET_ENOSYS; } else { int nb_args; abi_ulong sp_reg; abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0; nb_args = mips_syscall_args[syscall_num]; sp_reg = env->active_tc.gpr[29]; switch (nb_args) { /* these arguments are taken from the stack */ case 8: if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) { goto done_syscall; } case 7: if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) { goto done_syscall; } case 6: if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) { goto done_syscall; } case 5: if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) { goto done_syscall; } default: break; } ret = do_syscall(env, env->active_tc.gpr[2], env->active_tc.gpr[4], env->active_tc.gpr[5], env->active_tc.gpr[6], env->active_tc.gpr[7], arg5, arg6, arg7, arg8); } done_syscall: # else ret = do_syscall(env, env->active_tc.gpr[2], env->active_tc.gpr[4], env->active_tc.gpr[5], env->active_tc.gpr[6], env->active_tc.gpr[7], env->active_tc.gpr[8], env->active_tc.gpr[9], env->active_tc.gpr[10], env->active_tc.gpr[11]); # endif /* O32 */ if (ret == -TARGET_QEMU_ESIGRETURN) { /* Returning from a successful sigreturn syscall. Avoid clobbering register state. */ break; } if ((abi_ulong)ret >= (abi_ulong)-1133) { env->active_tc.gpr[7] = 1; /* error flag */ ret = -ret; } else { env->active_tc.gpr[7] = 0; /* error flag */ } env->active_tc.gpr[2] = ret; break; case EXCP_TLBL: case EXCP_TLBS: case EXCP_AdEL: case EXCP_AdES: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; /* XXX: check env->error_code */ info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = env->CP0_BadVAddr; queue_signal(env, info.si_signo, &info); break; case EXCP_CpU: case EXCP_RI: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = 0; queue_signal(env, info.si_signo, &info); break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_DEBUG: { int sig; sig = gdb_handlesig(cs, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, &info); } } break; case EXCP_SC: if (do_store_exclusive(env)) { info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = env->active_tc.PC; queue_signal(env, info.si_signo, &info); } break; case EXCP_DSPDIS: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPC; queue_signal(env, info.si_signo, &info); break; /* The code below was inspired by the MIPS Linux kernel trap * handling code in arch/mips/kernel/traps.c. */ case EXCP_BREAK: { abi_ulong trap_instr; unsigned int code; if (env->hflags & MIPS_HFLAG_M16) { if (env->insn_flags & ASE_MICROMIPS) { /* microMIPS mode */ abi_ulong instr[2]; ret = get_user_u16(instr[0], env->active_tc.PC) || get_user_u16(instr[1], env->active_tc.PC + 2); trap_instr = (instr[0] << 16) | instr[1]; } else { /* MIPS16e mode */ ret = get_user_u16(trap_instr, env->active_tc.PC); if (ret != 0) { goto error; } code = (trap_instr >> 6) & 0x3f; if (do_break(env, &info, code) != 0) { goto error; } break; } } else { ret = get_user_ual(trap_instr, env->active_tc.PC); } if (ret != 0) { goto error; } /* As described in the original Linux kernel code, the * below checks on 'code' are to work around an old * assembly bug. */ code = ((trap_instr >> 6) & ((1 << 20) - 1)); if (code >= (1 << 10)) { code >>= 10; } if (do_break(env, &info, code) != 0) { goto error; } } break; case EXCP_TRAP: { abi_ulong trap_instr; unsigned int code = 0; if (env->hflags & MIPS_HFLAG_M16) { /* microMIPS mode */ abi_ulong instr[2]; ret = get_user_u16(instr[0], env->active_tc.PC) || get_user_u16(instr[1], env->active_tc.PC + 2); trap_instr = (instr[0] << 16) | instr[1]; } else { ret = get_user_ual(trap_instr, env->active_tc.PC); } if (ret != 0) { goto error; } /* The immediate versions don't provide a code. */ if (!(trap_instr & 0xFC000000)) { if (env->hflags & MIPS_HFLAG_M16) { /* microMIPS mode */ code = ((trap_instr >> 12) & ((1 << 4) - 1)); } else { code = ((trap_instr >> 6) & ((1 << 10) - 1)); } } if (do_break(env, &info, code) != 0) { goto error; } } break; default: error: fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); abort(); } process_pending_signals(env); } }
17,257
qemu
ef1e1e0782e99c9dcf2b35e5310cdd8ca9211374
0
static void sd_reset(SDState *sd) { uint64_t size; uint64_t sect; if (sd->blk) { blk_get_geometry(sd->blk, &sect); } else { sect = 0; } size = sect << 9; sect = sd_addr_to_wpnum(size) + 1; sd->state = sd_idle_state; sd->rca = 0x0000; sd_set_ocr(sd); sd_set_scr(sd); sd_set_cid(sd); sd_set_csd(sd, size); sd_set_cardstatus(sd); sd_set_sdstatus(sd); if (sd->wp_groups) g_free(sd->wp_groups); sd->wp_switch = sd->blk ? blk_is_read_only(sd->blk) : false; sd->wpgrps_size = sect; sd->wp_groups = bitmap_new(sd->wpgrps_size); memset(sd->function_group, 0, sizeof(sd->function_group)); sd->erase_start = 0; sd->erase_end = 0; sd->size = size; sd->blk_len = 0x200; sd->pwd_len = 0; sd->expecting_acmd = false; }
17,260
qemu
12d69ac03b45156356b240424623719f15d8143e
0
static void test_flush_event_notifier(void) { EventNotifierTestData data = { .n = 0, .active = 10, .auto_set = true }; event_notifier_init(&data.e, false); aio_set_event_notifier(ctx, &data.e, event_ready_cb); g_assert(!aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 0); g_assert_cmpint(data.active, ==, 10); event_notifier_set(&data.e); g_assert(aio_poll(ctx, false)); g_assert_cmpint(data.n, ==, 1); g_assert_cmpint(data.active, ==, 9); g_assert(aio_poll(ctx, false)); wait_until_inactive(&data); g_assert_cmpint(data.n, ==, 10); g_assert_cmpint(data.active, ==, 0); g_assert(!aio_poll(ctx, false)); aio_set_event_notifier(ctx, &data.e, NULL); g_assert(!aio_poll(ctx, false)); event_notifier_cleanup(&data.e); }
17,261
qemu
cd42d5b23691ad73edfd6dbcfc935a960a9c5a65
0
static inline void gen_intermediate_code_internal(X86CPU *cpu, TranslationBlock *tb, bool search_pc) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; DisasContext dc1, *dc = &dc1; target_ulong pc_ptr; uint16_t *gen_opc_end; CPUBreakpoint *bp; int j, lj; uint64_t flags; target_ulong pc_start; target_ulong cs_base; int num_insns; int max_insns; /* generate intermediate code */ pc_start = tb->pc; cs_base = tb->cs_base; flags = tb->flags; dc->pe = (flags >> HF_PE_SHIFT) & 1; dc->code32 = (flags >> HF_CS32_SHIFT) & 1; dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; dc->f_st = 0; dc->vm86 = (flags >> VM_SHIFT) & 1; dc->cpl = (flags >> HF_CPL_SHIFT) & 3; dc->iopl = (flags >> IOPL_SHIFT) & 3; dc->tf = (flags >> TF_SHIFT) & 1; dc->singlestep_enabled = cs->singlestep_enabled; dc->cc_op = CC_OP_DYNAMIC; dc->cc_op_dirty = false; dc->cs_base = cs_base; dc->tb = tb; dc->popl_esp_hack = 0; /* select memory access functions */ dc->mem_index = 0; if (flags & HF_SOFTMMU_MASK) { dc->mem_index = cpu_mmu_index(env); } dc->cpuid_features = env->features[FEAT_1_EDX]; dc->cpuid_ext_features = env->features[FEAT_1_ECX]; dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX]; dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; #ifdef TARGET_X86_64 dc->lma = (flags >> HF_LMA_SHIFT) & 1; dc->code64 = (flags >> HF_CS64_SHIFT) & 1; #endif dc->flags = flags; dc->jmp_opt = !(dc->tf || cs->singlestep_enabled || (flags & HF_INHIBIT_IRQ_MASK) #ifndef CONFIG_SOFTMMU || (flags & HF_SOFTMMU_MASK) #endif ); /* Do not optimize repz jumps at all in icount mode, because rep movsS instructions are execured with different paths in !repz_opt and repz_opt modes. The first one was used always except single step mode. And this setting disables jumps optimization and control paths become equivalent in run and single step modes. Now there will be no jump optimization for repz in record/replay modes and there will always be an additional step for ecx=0 when icount is enabled. */ dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT); #if 0 /* check addseg logic */ if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32)) printf("ERROR addseg\n"); #endif cpu_T[0] = tcg_temp_new(); cpu_T[1] = tcg_temp_new(); cpu_A0 = tcg_temp_new(); cpu_tmp0 = tcg_temp_new(); cpu_tmp1_i64 = tcg_temp_new_i64(); cpu_tmp2_i32 = tcg_temp_new_i32(); cpu_tmp3_i32 = tcg_temp_new_i32(); cpu_tmp4 = tcg_temp_new(); cpu_ptr0 = tcg_temp_new_ptr(); cpu_ptr1 = tcg_temp_new_ptr(); cpu_cc_srcT = tcg_temp_local_new(); gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE; dc->is_jmp = DISAS_NEXT; pc_ptr = pc_start; lj = -1; num_insns = 0; max_insns = tb->cflags & CF_COUNT_MASK; if (max_insns == 0) max_insns = CF_COUNT_MASK; gen_tb_start(); for(;;) { if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) { QTAILQ_FOREACH(bp, &cs->breakpoints, entry) { if (bp->pc == pc_ptr && !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) { gen_debug(dc, pc_ptr - dc->cs_base); goto done_generating; } } } if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; if (lj < j) { lj++; while (lj < j) tcg_ctx.gen_opc_instr_start[lj++] = 0; } tcg_ctx.gen_opc_pc[lj] = pc_ptr; gen_opc_cc_op[lj] = dc->cc_op; tcg_ctx.gen_opc_instr_start[lj] = 1; tcg_ctx.gen_opc_icount[lj] = num_insns; } if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) gen_io_start(); pc_ptr = disas_insn(env, dc, pc_ptr); num_insns++; /* stop translation if indicated */ if (dc->is_jmp) break; /* if single step mode, we generate only one instruction and generate an exception */ /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear the flag and abort the translation to give the irqs a change to be happen */ if (dc->tf || dc->singlestep_enabled || (flags & HF_INHIBIT_IRQ_MASK)) { gen_jmp_im(pc_ptr - dc->cs_base); gen_eob(dc); break; } /* Do not cross the boundary of the pages in icount mode, it can cause an exception. Do it only when boundary is crossed by the first instruction in the block. If current instruction already crossed the bound - it's ok, because an exception hasn't stopped this code. */ if ((tb->cflags & CF_USE_ICOUNT) && ((pc_ptr & TARGET_PAGE_MASK) != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK) || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) { gen_jmp_im(pc_ptr - dc->cs_base); gen_eob(dc); break; } /* if too long translation, stop generation too */ if (tcg_ctx.gen_opc_ptr >= gen_opc_end || (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) || num_insns >= max_insns) { gen_jmp_im(pc_ptr - dc->cs_base); gen_eob(dc); break; } if (singlestep) { gen_jmp_im(pc_ptr - dc->cs_base); gen_eob(dc); break; } } if (tb->cflags & CF_LAST_IO) gen_io_end(); done_generating: gen_tb_end(tb, num_insns); *tcg_ctx.gen_opc_ptr = INDEX_op_end; /* we don't forget to fill the last values */ if (search_pc) { j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf; lj++; while (lj <= j) tcg_ctx.gen_opc_instr_start[lj++] = 0; } #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { int disas_flags; qemu_log("----------------\n"); qemu_log("IN: %s\n", lookup_symbol(pc_start)); #ifdef TARGET_X86_64 if (dc->code64) disas_flags = 2; else #endif disas_flags = !dc->code32; log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags); qemu_log("\n"); } #endif if (!search_pc) { tb->size = pc_ptr - pc_start; tb->icount = num_insns; } }
17,262
qemu
4a1418e07bdcfaa3177739e04707ecaec75d89e1
0
CPUX86State *cpu_x86_init(const char *cpu_model) { CPUX86State *env; static int inited; env = qemu_mallocz(sizeof(CPUX86State)); cpu_exec_init(env); env->cpu_model_str = cpu_model; /* init various static tables */ if (!inited) { inited = 1; optimize_flags_init(); #ifndef CONFIG_USER_ONLY prev_debug_excp_handler = cpu_set_debug_excp_handler(breakpoint_handler); #endif } if (cpu_x86_register(env, cpu_model) < 0) { cpu_x86_close(env); return NULL; } mce_init(env); cpu_reset(env); #ifdef CONFIG_KQEMU kqemu_init(env); #endif qemu_init_vcpu(env); return env; }
17,263
qemu
4ec60c76d5ab513e375f17b043d2b9cb849adf6c
0
static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { X86CPU *cpu = X86_CPU(obj); DeviceState *dev = DEVICE(obj); const int64_t min = 0; const int64_t max = UINT32_MAX; Error *error = NULL; int64_t value; if (dev->realized) { error_setg(errp, "Attempt to set property '%s' on '%s' after " "it was realized", name, object_get_typename(obj)); return; } visit_type_int(v, name, &value, &error); if (error) { error_propagate(errp, error); return; } if (value < min || value > max) { error_setg(errp, "Property %s.%s doesn't take value %" PRId64 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" , object_get_typename(obj), name, value, min, max); return; } if ((value != cpu->apic_id) && cpu_exists(value)) { error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value); return; } cpu->apic_id = value; }
17,264
qemu
eabb7b91b36b202b4dac2df2d59d698e3aff197a
0
static bool tcg_out_opc_jmp(TCGContext *s, MIPSInsn opc, void *target) { uintptr_t dest = (uintptr_t)target; uintptr_t from = (uintptr_t)s->code_ptr + 4; int32_t inst; /* The pc-region branch happens within the 256MB region of the delay slot (thus the +4). */ if ((from ^ dest) & -(1 << 28)) { return false; } assert((dest & 3) == 0); inst = opc; inst |= (dest >> 2) & 0x3ffffff; tcg_out32(s, inst); return true; }
17,265
qemu
4295e15aa730a95003a3639d6dad2eb1e65a59e2
0
void qemu_spice_create_primary_surface(SimpleSpiceDisplay *ssd, uint32_t id, QXLDevSurfaceCreate *surface, qxl_async_io async) { if (async != QXL_SYNC) { #if SPICE_INTERFACE_QXL_MINOR >= 1 spice_qxl_create_primary_surface_async(&ssd->qxl, id, surface, 0); #else abort(); #endif } else { ssd->worker->create_primary_surface(ssd->worker, id, surface); } }
17,268
qemu
cdbf6e165988ab9d7c01da03b9e27bb8ac0c76aa
0
static gboolean fd_chr_read(GIOChannel *chan, GIOCondition cond, void *opaque) { CharDriverState *chr = opaque; FDCharDriver *s = chr->opaque; int len; uint8_t buf[READ_BUF_LEN]; GIOStatus status; gsize bytes_read; len = sizeof(buf); if (len > s->max_size) { len = s->max_size; } if (len == 0) { return FALSE; } status = g_io_channel_read_chars(chan, (gchar *)buf, len, &bytes_read, NULL); if (status == G_IO_STATUS_EOF) { qemu_chr_be_event(chr, CHR_EVENT_CLOSED); return FALSE; } if (status == G_IO_STATUS_NORMAL) { qemu_chr_be_write(chr, buf, bytes_read); } return TRUE; }
17,269
FFmpeg
2d15554850799346472683b4a2df05878dcfad48
0
static int mlp_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size) { MLPParseContext *mp = s->priv_data; int sync_present; uint8_t parity_bits; int next; int i, p = 0; *poutbuf_size = 0; if (buf_size == 0) return 0; if (!mp->in_sync) { // Not in sync - find a major sync header for (i = 0; i < buf_size; i++) { mp->pc.state = (mp->pc.state << 8) | buf[i]; if ((mp->pc.state & 0xfffffffe) == 0xf8726fba && // ignore if we do not have the data for the start of header mp->pc.index + i >= 7) { mp->in_sync = 1; mp->bytes_left = 0; break; } } if (!mp->in_sync) { if (ff_combine_frame(&mp->pc, END_NOT_FOUND, &buf, &buf_size) != -1) av_log(avctx, AV_LOG_WARNING, "ff_combine_frame failed\n"); return buf_size; } ff_combine_frame(&mp->pc, i - 7, &buf, &buf_size); return i - 7; } if (mp->bytes_left == 0) { // Find length of this packet /* Copy overread bytes from last frame into buffer. */ for(; mp->pc.overread>0; mp->pc.overread--) { mp->pc.buffer[mp->pc.index++]= mp->pc.buffer[mp->pc.overread_index++]; } if (mp->pc.index + buf_size < 2) { if (ff_combine_frame(&mp->pc, END_NOT_FOUND, &buf, &buf_size) != -1) av_log(avctx, AV_LOG_WARNING, "ff_combine_frame failed\n"); return buf_size; } mp->bytes_left = ((mp->pc.index > 0 ? mp->pc.buffer[0] : buf[0]) << 8) | (mp->pc.index > 1 ? mp->pc.buffer[1] : buf[1-mp->pc.index]); mp->bytes_left = (mp->bytes_left & 0xfff) * 2; if (mp->bytes_left <= 0) { // prevent infinite loop goto lost_sync; } mp->bytes_left -= mp->pc.index; } next = (mp->bytes_left > buf_size) ? END_NOT_FOUND : mp->bytes_left; if (ff_combine_frame(&mp->pc, next, &buf, &buf_size) < 0) { mp->bytes_left -= buf_size; return buf_size; } mp->bytes_left = 0; sync_present = (AV_RB32(buf + 4) & 0xfffffffe) == 0xf8726fba; if (!sync_present) { /* The first nibble of a frame is a parity check of the 4-byte * access unit header and all the 2- or 4-byte substream headers. */ // Only check when this isn't a sync frame - syncs have a checksum. parity_bits = 0; for (i = -1; i < mp->num_substreams; i++) { parity_bits ^= buf[p++]; parity_bits ^= buf[p++]; if (i < 0 || buf[p-2] & 0x80) { parity_bits ^= buf[p++]; parity_bits ^= buf[p++]; } } if ((((parity_bits >> 4) ^ parity_bits) & 0xF) != 0xF) { av_log(avctx, AV_LOG_INFO, "mlpparse: Parity check failed.\n"); goto lost_sync; } } else { GetBitContext gb; MLPHeaderInfo mh; init_get_bits(&gb, buf + 4, (buf_size - 4) << 3); if (ff_mlp_read_major_sync(avctx, &mh, &gb) < 0) goto lost_sync; avctx->bits_per_raw_sample = mh.group1_bits; if (avctx->bits_per_raw_sample > 16) avctx->sample_fmt = AV_SAMPLE_FMT_S32; else avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_rate = mh.group1_samplerate; s->duration = mh.access_unit_size; if(!avctx->channels || !avctx->channel_layout) { if (mh.stream_type == 0xbb) { /* MLP stream */ #if FF_API_REQUEST_CHANNELS FF_DISABLE_DEPRECATION_WARNINGS if (avctx->request_channels > 0 && avctx->request_channels <= 2 && mh.num_substreams > 1) { avctx->channels = 2; avctx->channel_layout = AV_CH_LAYOUT_STEREO; FF_ENABLE_DEPRECATION_WARNINGS } else #endif if (avctx->request_channel_layout && (avctx->request_channel_layout & AV_CH_LAYOUT_STEREO) == avctx->request_channel_layout && mh.num_substreams > 1) { avctx->channels = 2; avctx->channel_layout = AV_CH_LAYOUT_STEREO; } else { avctx->channels = mh.channels_mlp; avctx->channel_layout = mh.channel_layout_mlp; } } else { /* mh.stream_type == 0xba */ /* TrueHD stream */ #if FF_API_REQUEST_CHANNELS FF_DISABLE_DEPRECATION_WARNINGS if (avctx->request_channels > 0 && avctx->request_channels <= 2 && mh.num_substreams > 1) { avctx->channels = 2; avctx->channel_layout = AV_CH_LAYOUT_STEREO; } else if (avctx->request_channels > 0 && avctx->request_channels <= mh.channels_thd_stream1) { avctx->channels = mh.channels_thd_stream1; avctx->channel_layout = mh.channel_layout_thd_stream1; FF_ENABLE_DEPRECATION_WARNINGS } else #endif if (avctx->request_channel_layout && (avctx->request_channel_layout & AV_CH_LAYOUT_STEREO) == avctx->request_channel_layout && mh.num_substreams > 1) { avctx->channels = 2; avctx->channel_layout = AV_CH_LAYOUT_STEREO; } else if (!mh.channels_thd_stream2 || (avctx->request_channel_layout && (avctx->request_channel_layout & mh.channel_layout_thd_stream1) == avctx->request_channel_layout)) { avctx->channels = mh.channels_thd_stream1; avctx->channel_layout = mh.channel_layout_thd_stream1; } else { avctx->channels = mh.channels_thd_stream2; avctx->channel_layout = mh.channel_layout_thd_stream2; } } } if (!mh.is_vbr) /* Stream is CBR */ avctx->bit_rate = mh.peak_bitrate; mp->num_substreams = mh.num_substreams; } *poutbuf = buf; *poutbuf_size = buf_size; return next; lost_sync: mp->in_sync = 0; return 1; }
17,270
qemu
77524d1157cf7c18b980c9d6f95879f2ce7e56e2
0
static void gem_transmit(CadenceGEMState *s) { unsigned desc[2]; hwaddr packet_desc_addr; uint8_t tx_packet[2048]; uint8_t *p; unsigned total_bytes; int q = 0; /* Do nothing if transmit is not enabled. */ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { return; } DB_PRINT("\n"); /* The packet we will hand off to QEMU. * Packets scattered across multiple descriptors are gathered to this * one contiguous buffer first. */ p = tx_packet; total_bytes = 0; for (q = s->num_priority_queues - 1; q >= 0; q--) { /* read current descriptor */ packet_desc_addr = s->tx_desc_addr[q]; DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); cpu_physical_memory_read(packet_desc_addr, (uint8_t *)desc, sizeof(desc)); /* Handle all descriptors owned by hardware */ while (tx_desc_get_used(desc) == 0) { /* Do nothing if transmit is not enabled. */ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { return; } print_gem_tx_desc(desc, q); /* The real hardware would eat this (and possibly crash). * For QEMU let's lend a helping hand. */ if ((tx_desc_get_buffer(desc) == 0) || (tx_desc_get_length(desc) == 0)) { DB_PRINT("Invalid TX descriptor @ 0x%x\n", (unsigned)packet_desc_addr); break; } if (tx_desc_get_length(desc) > sizeof(tx_packet) - (p - tx_packet)) { DB_PRINT("TX descriptor @ 0x%x too large: size 0x%x space 0x%x\n", (unsigned)packet_desc_addr, (unsigned)tx_desc_get_length(desc), sizeof(tx_packet) - (p - tx_packet)); break; } /* Gather this fragment of the packet from "dma memory" to our contig. * buffer. */ cpu_physical_memory_read(tx_desc_get_buffer(desc), p, tx_desc_get_length(desc)); p += tx_desc_get_length(desc); total_bytes += tx_desc_get_length(desc); /* Last descriptor for this packet; hand the whole thing off */ if (tx_desc_get_last(desc)) { unsigned desc_first[2]; /* Modify the 1st descriptor of this packet to be owned by * the processor. */ cpu_physical_memory_read(s->tx_desc_addr[q], (uint8_t *)desc_first, sizeof(desc_first)); tx_desc_set_used(desc_first); cpu_physical_memory_write(s->tx_desc_addr[q], (uint8_t *)desc_first, sizeof(desc_first)); /* Advance the hardware current descriptor past this packet */ if (tx_desc_get_wrap(desc)) { s->tx_desc_addr[q] = s->regs[GEM_TXQBASE]; } else { s->tx_desc_addr[q] = packet_desc_addr + 8; } DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]); s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL; s->regs[GEM_ISR] |= GEM_INT_TXCMPL & ~(s->regs[GEM_IMR]); /* Update queue interrupt status */ if (s->num_priority_queues > 1) { s->regs[GEM_INT_Q1_STATUS + q] |= GEM_INT_TXCMPL & ~(s->regs[GEM_INT_Q1_MASK + q]); } /* Handle interrupt consequences */ gem_update_int_status(s); /* Is checksum offload enabled? */ if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) { net_checksum_calculate(tx_packet, total_bytes); } /* Update MAC statistics */ gem_transmit_updatestats(s, tx_packet, total_bytes); /* Send the packet somewhere */ if (s->phy_loop || (s->regs[GEM_NWCTRL] & GEM_NWCTRL_LOCALLOOP)) { gem_receive(qemu_get_queue(s->nic), tx_packet, total_bytes); } else { qemu_send_packet(qemu_get_queue(s->nic), tx_packet, total_bytes); } /* Prepare for next packet */ p = tx_packet; total_bytes = 0; } /* read next descriptor */ if (tx_desc_get_wrap(desc)) { tx_desc_set_last(desc); packet_desc_addr = s->regs[GEM_TXQBASE]; } else { packet_desc_addr += 8; } DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); cpu_physical_memory_read(packet_desc_addr, (uint8_t *)desc, sizeof(desc)); } if (tx_desc_get_used(desc)) { s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED; s->regs[GEM_ISR] |= GEM_INT_TXUSED & ~(s->regs[GEM_IMR]); gem_update_int_status(s); } } }
17,271
qemu
6502a14734e71b2f6dd079b0a1e546e6aa2d2f8d
0
static int qemu_balloon_status(BalloonInfo *info) { if (!balloon_stat_fn) { return 0; } balloon_stat_fn(balloon_opaque, info); return 1; }
17,272
qemu
f2d953ec31eeeb3029ca915a55938c538a14efa8
0
static DriveInfo *blockdev_init(const char *file, QDict *bs_opts, Error **errp) { const char *buf; const char *serial; int ro = 0; int bdrv_flags = 0; int on_read_error, on_write_error; DriveInfo *dinfo; ThrottleConfig cfg; int snapshot = 0; bool copy_on_read; int ret; Error *error = NULL; QemuOpts *opts; const char *id; bool has_driver_specific_opts; BlockDriver *drv = NULL; /* Check common options by copying from bs_opts to opts, all other options * stay in bs_opts for processing by bdrv_open(). */ id = qdict_get_try_str(bs_opts, "id"); opts = qemu_opts_create(&qemu_common_drive_opts, id, 1, &error); if (error) { error_propagate(errp, error); return NULL; } qemu_opts_absorb_qdict(opts, bs_opts, &error); if (error) { error_propagate(errp, error); goto early_err; } if (id) { qdict_del(bs_opts, "id"); } has_driver_specific_opts = !!qdict_size(bs_opts); /* extract parameters */ snapshot = qemu_opt_get_bool(opts, "snapshot", 0); ro = qemu_opt_get_bool(opts, "read-only", 0); copy_on_read = qemu_opt_get_bool(opts, "copy-on-read", false); serial = qemu_opt_get(opts, "serial"); if ((buf = qemu_opt_get(opts, "discard")) != NULL) { if (bdrv_parse_discard_flags(buf, &bdrv_flags) != 0) { error_setg(errp, "invalid discard option"); goto early_err; } } if (qemu_opt_get_bool(opts, "cache.writeback", true)) { bdrv_flags |= BDRV_O_CACHE_WB; } if (qemu_opt_get_bool(opts, "cache.direct", false)) { bdrv_flags |= BDRV_O_NOCACHE; } if (qemu_opt_get_bool(opts, "cache.no-flush", false)) { bdrv_flags |= BDRV_O_NO_FLUSH; } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, "aio")) != NULL) { if (!strcmp(buf, "native")) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else if (!strcmp(buf, "threads")) { /* this is the default */ } else { error_setg(errp, "invalid aio option"); goto early_err; } } #endif if ((buf = qemu_opt_get(opts, "format")) != NULL) { if (is_help_option(buf)) { error_printf("Supported formats:"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf("\n"); goto early_err; } drv = bdrv_find_format(buf); if (!drv) { error_setg(errp, "'%s' invalid format", buf); goto early_err; } } /* disk I/O throttling */ memset(&cfg, 0, sizeof(cfg)); cfg.buckets[THROTTLE_BPS_TOTAL].avg = qemu_opt_get_number(opts, "throttling.bps-total", 0); cfg.buckets[THROTTLE_BPS_READ].avg = qemu_opt_get_number(opts, "throttling.bps-read", 0); cfg.buckets[THROTTLE_BPS_WRITE].avg = qemu_opt_get_number(opts, "throttling.bps-write", 0); cfg.buckets[THROTTLE_OPS_TOTAL].avg = qemu_opt_get_number(opts, "throttling.iops-total", 0); cfg.buckets[THROTTLE_OPS_READ].avg = qemu_opt_get_number(opts, "throttling.iops-read", 0); cfg.buckets[THROTTLE_OPS_WRITE].avg = qemu_opt_get_number(opts, "throttling.iops-write", 0); cfg.buckets[THROTTLE_BPS_TOTAL].max = qemu_opt_get_number(opts, "throttling.bps-total-max", 0); cfg.buckets[THROTTLE_BPS_READ].max = qemu_opt_get_number(opts, "throttling.bps-read-max", 0); cfg.buckets[THROTTLE_BPS_WRITE].max = qemu_opt_get_number(opts, "throttling.bps-write-max", 0); cfg.buckets[THROTTLE_OPS_TOTAL].max = qemu_opt_get_number(opts, "throttling.iops-total-max", 0); cfg.buckets[THROTTLE_OPS_READ].max = qemu_opt_get_number(opts, "throttling.iops-read-max", 0); cfg.buckets[THROTTLE_OPS_WRITE].max = qemu_opt_get_number(opts, "throttling.iops-write-max", 0); cfg.op_size = qemu_opt_get_number(opts, "throttling.iops-size", 0); if (!check_throttle_config(&cfg, &error)) { error_propagate(errp, error); goto early_err; } on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; if ((buf = qemu_opt_get(opts, "werror")) != NULL) { on_write_error = parse_block_error_action(buf, 0, &error); if (error) { error_propagate(errp, error); goto early_err; } } on_read_error = BLOCKDEV_ON_ERROR_REPORT; if ((buf = qemu_opt_get(opts, "rerror")) != NULL) { on_read_error = parse_block_error_action(buf, 1, &error); if (error) { error_propagate(errp, error); goto early_err; } } if (bdrv_find_node(qemu_opts_id(opts))) { error_setg(errp, "device id=%s is conflicting with a node-name", qemu_opts_id(opts)); goto early_err; } /* init */ dinfo = g_malloc0(sizeof(*dinfo)); dinfo->id = g_strdup(qemu_opts_id(opts)); dinfo->bdrv = bdrv_new(dinfo->id, &error); if (error) { error_propagate(errp, error); goto bdrv_new_err; } dinfo->bdrv->open_flags = snapshot ? BDRV_O_SNAPSHOT : 0; dinfo->bdrv->read_only = ro; dinfo->refcount = 1; if (serial != NULL) { dinfo->serial = g_strdup(serial); } QTAILQ_INSERT_TAIL(&drives, dinfo, next); bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); /* disk I/O throttling */ if (throttle_enabled(&cfg)) { bdrv_io_limits_enable(dinfo->bdrv); bdrv_set_io_limits(dinfo->bdrv, &cfg); } if (!file || !*file) { if (has_driver_specific_opts) { file = NULL; } else { QDECREF(bs_opts); qemu_opts_del(opts); return dinfo; } } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } if (copy_on_read) { bdrv_flags |= BDRV_O_COPY_ON_READ; } if (runstate_check(RUN_STATE_INMIGRATE)) { bdrv_flags |= BDRV_O_INCOMING; } bdrv_flags |= ro ? 0 : BDRV_O_RDWR; QINCREF(bs_opts); ret = bdrv_open(&dinfo->bdrv, file, NULL, bs_opts, bdrv_flags, drv, &error); if (ret < 0) { error_setg(errp, "could not open disk image %s: %s", file ?: dinfo->id, error_get_pretty(error)); error_free(error); goto err; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; QDECREF(bs_opts); qemu_opts_del(opts); return dinfo; err: bdrv_unref(dinfo->bdrv); QTAILQ_REMOVE(&drives, dinfo, next); bdrv_new_err: g_free(dinfo->id); g_free(dinfo); early_err: QDECREF(bs_opts); qemu_opts_del(opts); return NULL; }
17,273
qemu
9bc3a3a216e2689bfcdd36c3e079333bbdbf3ba0
0
static void ehci_advance_periodic_state(EHCIState *ehci) { uint32_t entry; uint32_t list; const int async = 0; // 4.6 switch(ehci_get_state(ehci, async)) { case EST_INACTIVE: if (!(ehci->frindex & 7) && ehci_periodic_enabled(ehci)) { ehci_set_state(ehci, async, EST_ACTIVE); // No break, fall through to ACTIVE } else break; case EST_ACTIVE: if (!(ehci->frindex & 7) && !ehci_periodic_enabled(ehci)) { ehci_queues_rip_all(ehci, async); ehci_set_state(ehci, async, EST_INACTIVE); break; } list = ehci->periodiclistbase & 0xfffff000; /* check that register has been set */ if (list == 0) { break; } list |= ((ehci->frindex & 0x1ff8) >> 1); pci_dma_read(&ehci->dev, list, &entry, sizeof entry); entry = le32_to_cpu(entry); DPRINTF("PERIODIC state adv fr=%d. [%08X] -> %08X\n", ehci->frindex / 8, list, entry); ehci_set_fetch_addr(ehci, async,entry); ehci_set_state(ehci, async, EST_FETCHENTRY); ehci_advance_state(ehci, async); ehci_queues_rip_unused(ehci, async, 0); break; default: /* this should only be due to a developer mistake */ fprintf(stderr, "ehci: Bad periodic state %d. " "Resetting to active\n", ehci->pstate); assert(0); } }
17,275
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void uart_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { LM32UartState *s = opaque; unsigned char ch = value; trace_lm32_uart_memory_write(addr, value); addr >>= 2; switch (addr) { case R_RXTX: if (s->chr) { qemu_chr_fe_write(s->chr, &ch, 1); } break; case R_IER: case R_LCR: case R_MCR: case R_DIV: s->regs[addr] = value; break; case R_IIR: case R_LSR: case R_MSR: error_report("lm32_uart: write access to read only register 0x" TARGET_FMT_plx, addr << 2); break; default: error_report("lm32_uart: write access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } uart_update_irq(s); }
17,276
qemu
29ddf27b72960d6e6b115cd69812c9c57b2a7b13
1
static int oss_open (int in, struct oss_params *req, struct oss_params *obt, int *pfd) { int fd; int mmmmssss; audio_buf_info abinfo; int fmt, freq, nchannels; const char *dspname = in ? conf.devpath_in : conf.devpath_out; const char *typ = in ? "ADC" : "DAC"; fd = open (dspname, (in ? O_RDONLY : O_WRONLY) | O_NONBLOCK); if (-1 == fd) { oss_logerr2 (errno, typ, "Failed to open `%s'\n", dspname); return -1; freq = req->freq; nchannels = req->nchannels; fmt = req->fmt; if (ioctl (fd, SNDCTL_DSP_SAMPLESIZE, &fmt)) { oss_logerr2 (errno, typ, "Failed to set sample size %d\n", req->fmt); if (ioctl (fd, SNDCTL_DSP_CHANNELS, &nchannels)) { oss_logerr2 (errno, typ, "Failed to set number of channels %d\n", req->nchannels); if (ioctl (fd, SNDCTL_DSP_SPEED, &freq)) { oss_logerr2 (errno, typ, "Failed to set frequency %d\n", req->freq); if (ioctl (fd, SNDCTL_DSP_NONBLOCK)) { oss_logerr2 (errno, typ, "Failed to set non-blocking mode\n"); mmmmssss = (req->nfrags << 16) | lsbindex (req->fragsize); if (ioctl (fd, SNDCTL_DSP_SETFRAGMENT, &mmmmssss)) { oss_logerr2 (errno, typ, "Failed to set buffer length (%d, %d)\n", req->nfrags, req->fragsize); if (ioctl (fd, in ? SNDCTL_DSP_GETISPACE : SNDCTL_DSP_GETOSPACE, &abinfo)) { oss_logerr2 (errno, typ, "Failed to get buffer length\n"); obt->fmt = fmt; obt->nchannels = nchannels; obt->freq = freq; obt->nfrags = abinfo.fragstotal; obt->fragsize = abinfo.fragsize; *pfd = fd; #ifdef DEBUG_MISMATCHES if ((req->fmt != obt->fmt) || (req->nchannels != obt->nchannels) || (req->freq != obt->freq) || (req->fragsize != obt->fragsize) || (req->nfrags != obt->nfrags)) { dolog ("Audio parameters mismatch\n"); oss_dump_info (req, obt); #endif #ifdef DEBUG oss_dump_info (req, obt); #endif return 0; err: oss_anal_close (&fd); return -1;
17,277
FFmpeg
7ab631261033a71a52563c3b23b6eef826eb5994
1
static void draw_rectangle(unsigned val, uint8_t *dst, int dst_linesize, unsigned segment_width, unsigned x, unsigned y, unsigned w, unsigned h) { int i; int step = 3; dst += segment_width * (step * x + y * dst_linesize); w *= segment_width * step; h *= segment_width; for (i = 0; i < h; i++) { memset(dst, val, w); dst += dst_linesize; } }
17,278
qemu
15fa08f8451babc88d733bd411d4c94976f9d0f8
1
static inline TCGOp *tcg_emit_op(TCGOpcode opc) { TCGContext *ctx = tcg_ctx; int oi = ctx->gen_next_op_idx; int ni = oi + 1; int pi = oi - 1; TCGOp *op = &ctx->gen_op_buf[oi]; tcg_debug_assert(oi < OPC_BUF_SIZE); ctx->gen_op_buf[0].prev = oi; ctx->gen_next_op_idx = ni; memset(op, 0, offsetof(TCGOp, args)); op->opc = opc; op->prev = pi; op->next = ni; return op; }
17,279
qemu
f8ed85ac992c48814d916d5df4d44f9a971c5de4
1
static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) { XlnxZynqMPState *s = XLNX_ZYNQMP(dev); MemoryRegion *system_memory = get_system_memory(); uint8_t i; const char *boot_cpu = s->boot_cpu ? s->boot_cpu : "apu-cpu[0]"; qemu_irq gic_spi[GIC_NUM_SPI_INTR]; Error *err = NULL; /* Create the four OCM banks */ for (i = 0; i < XLNX_ZYNQMP_NUM_OCM_BANKS; i++) { char *ocm_name = g_strdup_printf("zynqmp.ocm_ram_bank_%d", i); memory_region_init_ram(&s->ocm_ram[i], NULL, ocm_name, XLNX_ZYNQMP_OCM_RAM_SIZE, &error_abort); vmstate_register_ram_global(&s->ocm_ram[i]); memory_region_add_subregion(get_system_memory(), XLNX_ZYNQMP_OCM_RAM_0_ADDRESS + i * XLNX_ZYNQMP_OCM_RAM_SIZE, &s->ocm_ram[i]); g_free(ocm_name); } qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", GIC_NUM_SPI_INTR + 32); qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2); qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", XLNX_ZYNQMP_NUM_APU_CPUS); object_property_set_bool(OBJECT(&s->gic), true, "realized", &err); if (err) { error_propagate(errp, err); return; } assert(ARRAY_SIZE(xlnx_zynqmp_gic_regions) == XLNX_ZYNQMP_GIC_REGIONS); for (i = 0; i < XLNX_ZYNQMP_GIC_REGIONS; i++) { SysBusDevice *gic = SYS_BUS_DEVICE(&s->gic); const XlnxZynqMPGICRegion *r = &xlnx_zynqmp_gic_regions[i]; MemoryRegion *mr = sysbus_mmio_get_region(gic, r->region_index); uint32_t addr = r->address; int j; sysbus_mmio_map(gic, r->region_index, addr); for (j = 0; j < XLNX_ZYNQMP_GIC_ALIASES; j++) { MemoryRegion *alias = &s->gic_mr[i][j]; addr += XLNX_ZYNQMP_GIC_REGION_SIZE; memory_region_init_alias(alias, OBJECT(s), "zynqmp-gic-alias", mr, 0, XLNX_ZYNQMP_GIC_REGION_SIZE); memory_region_add_subregion(system_memory, addr, alias); } } for (i = 0; i < XLNX_ZYNQMP_NUM_APU_CPUS; i++) { qemu_irq irq; char *name; object_property_set_int(OBJECT(&s->apu_cpu[i]), QEMU_PSCI_CONDUIT_SMC, "psci-conduit", &error_abort); name = object_get_canonical_path_component(OBJECT(&s->apu_cpu[i])); if (strcmp(name, boot_cpu)) { /* Secondary CPUs start in PSCI powered-down state */ object_property_set_bool(OBJECT(&s->apu_cpu[i]), true, "start-powered-off", &error_abort); } else { s->boot_cpu_ptr = &s->apu_cpu[i]; } g_free(name); object_property_set_int(OBJECT(&s->apu_cpu[i]), GIC_BASE_ADDR, "reset-cbar", &error_abort); object_property_set_bool(OBJECT(&s->apu_cpu[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i, qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]), ARM_CPU_IRQ)); irq = qdev_get_gpio_in(DEVICE(&s->gic), arm_gic_ppi_index(i, ARM_PHYS_TIMER_PPI)); qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), 0, irq); irq = qdev_get_gpio_in(DEVICE(&s->gic), arm_gic_ppi_index(i, ARM_VIRT_TIMER_PPI)); qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), 1, irq); } for (i = 0; i < XLNX_ZYNQMP_NUM_RPU_CPUS; i++) { char *name; name = object_get_canonical_path_component(OBJECT(&s->rpu_cpu[i])); if (strcmp(name, boot_cpu)) { /* Secondary CPUs start in PSCI powered-down state */ object_property_set_bool(OBJECT(&s->rpu_cpu[i]), true, "start-powered-off", &error_abort); } else { s->boot_cpu_ptr = &s->rpu_cpu[i]; } g_free(name); object_property_set_bool(OBJECT(&s->rpu_cpu[i]), true, "reset-hivecs", &error_abort); object_property_set_bool(OBJECT(&s->rpu_cpu[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } } if (!s->boot_cpu_ptr) { error_setg(errp, "ZynqMP Boot cpu %s not found\n", boot_cpu); return; } for (i = 0; i < GIC_NUM_SPI_INTR; i++) { gic_spi[i] = qdev_get_gpio_in(DEVICE(&s->gic), i); } for (i = 0; i < XLNX_ZYNQMP_NUM_GEMS; i++) { NICInfo *nd = &nd_table[i]; if (nd->used) { qemu_check_nic_model(nd, TYPE_CADENCE_GEM); qdev_set_nic_properties(DEVICE(&s->gem[i]), nd); } object_property_set_bool(OBJECT(&s->gem[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->gem[i]), 0, gem_addr[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gem[i]), 0, gic_spi[gem_intr[i]]); } for (i = 0; i < XLNX_ZYNQMP_NUM_UARTS; i++) { object_property_set_bool(OBJECT(&s->uart[i]), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->uart[i]), 0, uart_addr[i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, gic_spi[uart_intr[i]]); } object_property_set_int(OBJECT(&s->sata), SATA_NUM_PORTS, "num-ports", &error_abort); object_property_set_bool(OBJECT(&s->sata), true, "realized", &err); if (err) { error_propagate(errp, err); return; } sysbus_mmio_map(SYS_BUS_DEVICE(&s->sata), 0, SATA_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(&s->sata), 0, gic_spi[SATA_INTR]); }
17,281
qemu
b106ad9185f35fc4ad669555ad0e79e276083bd7
1
static int qcow2_create2(const char *filename, int64_t total_size, const char *backing_file, const char *backing_format, int flags, size_t cluster_size, int prealloc, QEMUOptionParameter *options, int version, Error **errp) { /* Calculate cluster_bits */ int cluster_bits; cluster_bits = ffs(cluster_size) - 1; if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || (1 << cluster_bits) != cluster_size) { error_setg(errp, "Cluster size must be a power of two between %d and " "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); return -EINVAL; } /* * Open the image file and write a minimal qcow2 header. * * We keep things simple and start with a zero-sized image. We also * do without refcount blocks or a L1 table for now. We'll fix the * inconsistency later. * * We do need a refcount table because growing the refcount table means * allocating two new refcount blocks - the seconds of which would be at * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file * size for any qcow2 image. */ BlockDriverState* bs; QCowHeader *header; uint8_t* refcount_table; Error *local_err = NULL; int ret; ret = bdrv_create_file(filename, options, &local_err); if (ret < 0) { error_propagate(errp, local_err); return ret; } bs = NULL; ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, NULL, &local_err); if (ret < 0) { error_propagate(errp, local_err); return ret; } /* Write the header */ QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); header = g_malloc0(cluster_size); *header = (QCowHeader) { .magic = cpu_to_be32(QCOW_MAGIC), .version = cpu_to_be32(version), .cluster_bits = cpu_to_be32(cluster_bits), .size = cpu_to_be64(0), .l1_table_offset = cpu_to_be64(0), .l1_size = cpu_to_be32(0), .refcount_table_offset = cpu_to_be64(cluster_size), .refcount_table_clusters = cpu_to_be32(1), .refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT), .header_length = cpu_to_be32(sizeof(*header)), }; if (flags & BLOCK_FLAG_ENCRYPT) { header->crypt_method = cpu_to_be32(QCOW_CRYPT_AES); } else { header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); } if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { header->compatible_features |= cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); } ret = bdrv_pwrite(bs, 0, header, cluster_size); g_free(header); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write qcow2 header"); goto out; } /* Write an empty refcount table */ refcount_table = g_malloc0(cluster_size); ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size); g_free(refcount_table); if (ret < 0) { error_setg_errno(errp, -ret, "Could not write refcount table"); goto out; } bdrv_unref(bs); bs = NULL; /* * And now open the image and make it consistent first (i.e. increase the * refcount of the cluster that is occupied by the header and the refcount * table) */ BlockDriver* drv = bdrv_find_format("qcow2"); assert(drv != NULL); ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, drv, &local_err); if (ret < 0) { error_propagate(errp, local_err); goto out; } ret = qcow2_alloc_clusters(bs, 2 * cluster_size); if (ret < 0) { error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " "header and refcount table"); goto out; } else if (ret != 0) { error_report("Huh, first cluster in empty image is already in use?"); abort(); } /* Okay, now that we have a valid image, let's give it the right size */ ret = bdrv_truncate(bs, total_size * BDRV_SECTOR_SIZE); if (ret < 0) { error_setg_errno(errp, -ret, "Could not resize image"); goto out; } /* Want a backing file? There you go.*/ if (backing_file) { ret = bdrv_change_backing_file(bs, backing_file, backing_format); if (ret < 0) { error_setg_errno(errp, -ret, "Could not assign backing file '%s' " "with format '%s'", backing_file, backing_format); goto out; } } /* And if we're supposed to preallocate metadata, do that now */ if (prealloc) { BDRVQcowState *s = bs->opaque; qemu_co_mutex_lock(&s->lock); ret = preallocate(bs); qemu_co_mutex_unlock(&s->lock); if (ret < 0) { error_setg_errno(errp, -ret, "Could not preallocate metadata"); goto out; } } bdrv_unref(bs); bs = NULL; /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning */ ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_BACKING, drv, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } ret = 0; out: if (bs) { bdrv_unref(bs); } return ret; }
17,282
FFmpeg
d8245c3bcdd162891825a52cf55e4e8173d85a18
1
static av_cold int cinvideo_decode_init(AVCodecContext *avctx) { CinVideoContext *cin = avctx->priv_data; unsigned int i; cin->avctx = avctx; avctx->pix_fmt = AV_PIX_FMT_PAL8; avcodec_get_frame_defaults(&cin->frame); cin->frame.data[0] = NULL; cin->bitmap_size = avctx->width * avctx->height; for (i = 0; i < 3; ++i) { cin->bitmap_table[i] = av_mallocz(cin->bitmap_size); if (!cin->bitmap_table[i]) av_log(avctx, AV_LOG_ERROR, "Can't allocate bitmap buffers.\n"); } return 0; }
17,283
qemu
e23a1b33b53d25510320b26d9f154e19c6c99725
1
PCIBus *pci_bridge_init(PCIBus *bus, int devfn, uint16_t vid, uint16_t did, pci_map_irq_fn map_irq, const char *name) { PCIDevice *dev; PCIBridge *s; dev = pci_create(bus, devfn, "pci-bridge"); qdev_prop_set_uint32(&dev->qdev, "vendorid", vid); qdev_prop_set_uint32(&dev->qdev, "deviceid", did); qdev_init(&dev->qdev); s = DO_UPCAST(PCIBridge, dev, dev); pci_register_secondary_bus(&s->bus, &s->dev, map_irq, name); return &s->bus; }
17,284
FFmpeg
b2460858f64b2070d84dd861d4bbd16acfb9b0e9
1
static void set_options(URLContext *h, const char *uri) { TLSContext *c = h->priv_data; char buf[1024], key[1024]; int has_cert, has_key; #if CONFIG_GNUTLS int ret; #endif const char *p = strchr(uri, '?'); if (!p) return; if (av_find_info_tag(buf, sizeof(buf), "cafile", p)) { #if CONFIG_GNUTLS ret = gnutls_certificate_set_x509_trust_file(c->cred, buf, GNUTLS_X509_FMT_PEM); if (ret < 0) av_log(h, AV_LOG_ERROR, "%s\n", gnutls_strerror(ret)); #elif CONFIG_OPENSSL if (!SSL_CTX_load_verify_locations(c->ctx, buf, NULL)) av_log(h, AV_LOG_ERROR, "SSL_CTX_load_verify_locations %s\n", ERR_error_string(ERR_get_error(), NULL)); #endif } has_cert = av_find_info_tag(buf, sizeof(buf), "cert", p); has_key = av_find_info_tag(key, sizeof(key), "key", p); #if CONFIG_GNUTLS if (has_cert && has_key) { ret = gnutls_certificate_set_x509_key_file(c->cred, buf, key, GNUTLS_X509_FMT_PEM); if (ret < 0) av_log(h, AV_LOG_ERROR, "%s\n", gnutls_strerror(ret)); } else if (has_cert ^ has_key) { av_log(h, AV_LOG_ERROR, "cert and key required\n"); } #elif CONFIG_OPENSSL if (has_cert && !SSL_CTX_use_certificate_chain_file(c->ctx, buf)) av_log(h, AV_LOG_ERROR, "SSL_CTX_use_certificate_chain_file %s\n", ERR_error_string(ERR_get_error(), NULL)); if (has_key && !SSL_CTX_use_PrivateKey_file(c->ctx, key, SSL_FILETYPE_PEM)) av_log(h, AV_LOG_ERROR, "SSL_CTX_use_PrivateKey_file %s\n", ERR_error_string(ERR_get_error(), NULL)); #endif }
17,285
qemu
c9262e8a84a29f22fbb5edde5d17f4f6166d5ae1
1
static void jump_to_IPL_code(uint64_t address) { /* store the subsystem information _after_ the bootmap was loaded */ write_subsystem_identification(); /* * The IPL PSW is at address 0. We also must not overwrite the * content of non-BIOS memory after we loaded the guest, so we * save the original content and restore it in jump_to_IPL_2. */ ResetInfo *current = 0; save = *current; current->ipl_addr = (uint32_t) (uint64_t) &jump_to_IPL_2; current->ipl_continue = address & 0x7fffffff; debug_print_int("set IPL addr to", current->ipl_continue); /* Ensure the guest output starts fresh */ sclp_print("\n"); /* * HACK ALERT. * We use the load normal reset to keep r15 unchanged. jump_to_IPL_2 * can then use r15 as its stack pointer. */ asm volatile("lghi 1,1\n\t" "diag 1,1,0x308\n\t" : : : "1", "memory"); virtio_panic("\n! IPL returns !\n"); }
17,286
qemu
848696bf353750899832c51005f1bd3540da5c29
1
static void wdt_ib700_realize(DeviceState *dev, Error **errp) { IB700State *s = IB700(dev); PortioList *port_list = g_new(PortioList, 1); ib700_debug("watchdog init\n"); s->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ib700_timer_expired, s); portio_list_init(port_list, OBJECT(s), wdt_portio_list, s, "ib700"); portio_list_add(port_list, isa_address_space_io(&s->parent_obj), 0); }
17,287
FFmpeg
3438d82d4b3bd987304975961e2a42e82767107d
0
static int ffm_write_packet(AVFormatContext *s, AVPacket *pkt) { FFMContext *ffm = s->priv_data; AVStream *st = s->streams[pkt->stream_index]; int64_t pts; uint8_t header[FRAME_HEADER_SIZE]; pts = ffm->start_time + pkt->pts; /* packet size & key_frame */ header[0] = pkt->stream_index; header[1] = 0; if (pkt->flags & PKT_FLAG_KEY) header[1] |= FLAG_KEY_FRAME; AV_WB24(header+2, pkt->size); AV_WB24(header+5, pkt->duration); ffm_write_data(s, header, FRAME_HEADER_SIZE, pts, 1); ffm_write_data(s, pkt->data, pkt->size, pts, 0); return 0; }
17,290
FFmpeg
1a2a1d90775b5be03254d123e4b617145a269572
0
static inline void range_dec_normalize(APEContext * ctx) { while (ctx->rc.range <= BOTTOM_VALUE) { ctx->rc.buffer = (ctx->rc.buffer << 8) | bytestream_get_byte(&ctx->ptr); ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF); ctx->rc.range <<= 8; } }
17,291
FFmpeg
3b20ed85489a14cb5028c873d06960dbc5eef88a
0
static int read_rle_sgi(uint8_t *out_buf, SgiState *s) { uint8_t *dest_row; unsigned int len = s->height * s->depth * 4; GetByteContext g_table = s->g; unsigned int y, z; unsigned int start_offset; int linesize, ret; /* size of RLE offset and length tables */ if (len * 2 > bytestream2_get_bytes_left(&s->g)) { return AVERROR_INVALIDDATA; } for (z = 0; z < s->depth; z++) { dest_row = out_buf; for (y = 0; y < s->height; y++) { linesize = s->width * s->depth * s->bytes_per_channel; dest_row -= s->linesize; start_offset = bytestream2_get_be32(&g_table); bytestream2_seek(&s->g, start_offset, SEEK_SET); if (s->bytes_per_channel == 1) ret = expand_rle_row8(s, dest_row + z, linesize, s->depth); else ret = expand_rle_row16(s, (uint16_t *)dest_row + z, linesize, s->depth); if (ret != s->width) return AVERROR_INVALIDDATA; } } return 0; }
17,292
FFmpeg
4b1f5e5090abed6c618c8ba380cd7d28d140f867
0
static int xvid_ff_2pass_destroy(struct xvid_context *ref, xvid_plg_destroy_t *param) { /* Currently cannot think of anything to do on destruction */ /* Still, the framework should be here for reference/use */ if( ref->twopassbuffer != NULL ) ref->twopassbuffer[0] = 0; return 0; }
17,293
qemu
b307446e04232b3a87e9da04886895a8e5a4a407
1
cpu_mips_check_sign_extensions (CPUMIPSState *env, FILE *f, fprintf_function cpu_fprintf, int flags) { int i; if (!SIGN_EXT_P(env->active_tc.PC)) cpu_fprintf(f, "BROKEN: pc=0x" TARGET_FMT_lx "\n", env->active_tc.PC); if (!SIGN_EXT_P(env->active_tc.HI[0])) cpu_fprintf(f, "BROKEN: HI=0x" TARGET_FMT_lx "\n", env->active_tc.HI[0]); if (!SIGN_EXT_P(env->active_tc.LO[0])) cpu_fprintf(f, "BROKEN: LO=0x" TARGET_FMT_lx "\n", env->active_tc.LO[0]); if (!SIGN_EXT_P(env->btarget)) cpu_fprintf(f, "BROKEN: btarget=0x" TARGET_FMT_lx "\n", env->btarget); for (i = 0; i < 32; i++) { if (!SIGN_EXT_P(env->active_tc.gpr[i])) cpu_fprintf(f, "BROKEN: %s=0x" TARGET_FMT_lx "\n", regnames[i], env->active_tc.gpr[i]); } if (!SIGN_EXT_P(env->CP0_EPC)) cpu_fprintf(f, "BROKEN: EPC=0x" TARGET_FMT_lx "\n", env->CP0_EPC); if (!SIGN_EXT_P(env->lladdr)) cpu_fprintf(f, "BROKEN: LLAddr=0x" TARGET_FMT_lx "\n", env->lladdr); }
17,294
FFmpeg
f5b2476fd322a4d36fde912cb2a30a850bd77f43
1
void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { DECLARE_ALIGNED_16(signed int, ABCD[4]) = {((8 - x) * (8 - y)), ((x) * (8 - y)), ((8 - x) * (y)), ((x) * (y))}; register int i; vec_u8 fperm; const vec_s32 vABCD = vec_ld(0, ABCD); const vec_s16 vA = vec_splat((vec_s16)vABCD, 1); const vec_s16 vB = vec_splat((vec_s16)vABCD, 3); const vec_s16 vC = vec_splat((vec_s16)vABCD, 5); const vec_s16 vD = vec_splat((vec_s16)vABCD, 7); LOAD_ZERO; const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4)); const vec_u16 v6us = vec_splat_u16(6); register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; vec_u8 vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1; vec_u8 vsrc0uc, vsrc1uc; vec_s16 vsrc0ssH, vsrc1ssH; vec_u8 vsrcCuc, vsrc2uc, vsrc3uc; vec_s16 vsrc2ssH, vsrc3ssH, psum; vec_u8 vdst, ppsum, fsum; if (((unsigned long)dst) % 16 == 0) { fperm = (vec_u8){0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}; } else { fperm = (vec_u8){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; } vsrcAuc = vec_ld(0, src); if (loadSecond) vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcBuc; else vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc); vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc); if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc); vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc); psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); psum = vec_mladd(vC, vsrc2ssH, psum); psum = vec_mladd(vD, vsrc3ssH, psum); psum = vec_add(v28ss, psum); psum = vec_sra(psum, v6us); vdst = vec_ld(0, dst); ppsum = (vec_u8)vec_packsu(psum, psum); fsum = vec_perm(vdst, ppsum, fperm); vec_st(fsum, 0, dst); vsrc0ssH = vsrc2ssH; vsrc1ssH = vsrc3ssH; dst += stride; src += stride; } } else { vec_u8 vsrcDuc; for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 16, src); vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc3uc = vsrcDuc; else vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc); vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc); psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); psum = vec_mladd(vC, vsrc2ssH, psum); psum = vec_mladd(vD, vsrc3ssH, psum); psum = vec_add(v28ss, psum); psum = vec_sr(psum, v6us); vdst = vec_ld(0, dst); ppsum = (vec_u8)vec_pack(psum, psum); fsum = vec_perm(vdst, ppsum, fperm); vec_st(fsum, 0, dst); vsrc0ssH = vsrc2ssH; vsrc1ssH = vsrc3ssH; dst += stride; src += stride; } } }
17,295
FFmpeg
b15a9888a8f8e8cc9784ffd8d5d0307900fb78bb
1
static int sol_read_packet(AVFormatContext *s, AVPacket *pkt) { int ret; if (s->pb->eof_reached) return AVERROR(EIO); ret= av_get_packet(s->pb, pkt, MAX_SIZE); pkt->stream_index = 0; /* note: we need to modify the packet size here to handle the last packet */ pkt->size = ret; return 0; }
17,296
qemu
7d1b0095bff7157e856d1d0e6c4295641ced2752
1
static inline TCGv gen_ld32(TCGv addr, int index) { TCGv tmp = new_tmp(); tcg_gen_qemu_ld32u(tmp, addr, index); return tmp; }
17,297
qemu
bdf026317daa3b9dfa281f29e96fbb6fd48394c8
1
static int kvm_irqchip_get_virq(KVMState *s) { uint32_t *word = s->used_gsi_bitmap; int max_words = ALIGN(s->gsi_count, 32) / 32; int i, zeroes; bool retry = true; again: /* Return the lowest unused GSI in the bitmap */ for (i = 0; i < max_words; i++) { zeroes = ctz32(~word[i]); if (zeroes == 32) { continue; } return zeroes + i * 32; } if (!s->direct_msi && retry) { retry = false; kvm_flush_dynamic_msi_routes(s); goto again; } return -ENOSPC; }
17,298
qemu
6b98bd649520d07df4d1b7a0a54ac73bf178519c
0
void bdrv_drain_all(void) { /* Always run first iteration so any pending completion BHs run */ bool busy = true; BlockDriverState *bs = NULL; GSList *aio_ctxs = NULL, *ctx; while ((bs = bdrv_next(bs))) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); if (bs->job) { block_job_pause(bs->job); } bdrv_no_throttling_begin(bs); bdrv_drain_recurse(bs); aio_context_release(aio_context); if (!g_slist_find(aio_ctxs, aio_context)) { aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); } } /* Note that completion of an asynchronous I/O operation can trigger any * number of other I/O operations on other devices---for example a * coroutine can submit an I/O request to another device in response to * request completion. Therefore we must keep looping until there was no * more activity rather than simply draining each device independently. */ while (busy) { busy = false; for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { AioContext *aio_context = ctx->data; bs = NULL; aio_context_acquire(aio_context); while ((bs = bdrv_next(bs))) { if (aio_context == bdrv_get_aio_context(bs)) { bdrv_flush_io_queue(bs); if (bdrv_requests_pending(bs)) { busy = true; aio_poll(aio_context, busy); } } } busy |= aio_poll(aio_context, false); aio_context_release(aio_context); } } bs = NULL; while ((bs = bdrv_next(bs))) { AioContext *aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); bdrv_no_throttling_end(bs); if (bs->job) { block_job_resume(bs->job); } aio_context_release(aio_context); } g_slist_free(aio_ctxs); }
17,300
qemu
23ddf2bb1e4bfe2b72a726fe5e828807b65941ad
0
static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, struct in_addr *localaddr) { struct ip_mreq imr; int fd; int val, ret; if (!IN_MULTICAST(ntohl(mcastaddr->sin_addr.s_addr))) { fprintf(stderr, "qemu: error: specified mcastaddr \"%s\" (0x%08x) does not contain a multicast address\n", inet_ntoa(mcastaddr->sin_addr), (int)ntohl(mcastaddr->sin_addr.s_addr)); return -1; } fd = qemu_socket(PF_INET, SOCK_DGRAM, 0); if (fd < 0) { perror("socket(PF_INET, SOCK_DGRAM)"); return -1; } val = 1; ret=setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (const char *)&val, sizeof(val)); if (ret < 0) { perror("setsockopt(SOL_SOCKET, SO_REUSEADDR)"); goto fail; } ret = bind(fd, (struct sockaddr *)mcastaddr, sizeof(*mcastaddr)); if (ret < 0) { perror("bind"); goto fail; } /* Add host to multicast group */ imr.imr_multiaddr = mcastaddr->sin_addr; if (localaddr) { imr.imr_interface = *localaddr; } else { imr.imr_interface.s_addr = htonl(INADDR_ANY); } ret = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, (const char *)&imr, sizeof(struct ip_mreq)); if (ret < 0) { perror("setsockopt(IP_ADD_MEMBERSHIP)"); goto fail; } /* Force mcast msgs to loopback (eg. several QEMUs in same host */ val = 1; ret=setsockopt(fd, IPPROTO_IP, IP_MULTICAST_LOOP, (const char *)&val, sizeof(val)); if (ret < 0) { perror("setsockopt(SOL_IP, IP_MULTICAST_LOOP)"); goto fail; } /* If a bind address is given, only send packets from that address */ if (localaddr != NULL) { ret = setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, (const char *)localaddr, sizeof(*localaddr)); if (ret < 0) { perror("setsockopt(IP_MULTICAST_IF)"); goto fail; } } socket_set_nonblock(fd); return fd; fail: if (fd >= 0) closesocket(fd); return -1; }
17,302
qemu
62be4e3a5041e84304aa23637da623a205c53ecc
0
ram_addr_t last_ram_offset(void) { RAMBlock *block; ram_addr_t last = 0; QTAILQ_FOREACH(block, &ram_list.blocks, next) last = MAX(last, block->offset + block->length); return last; }
17,303
qemu
bf18bee547d19fde314e7b6b81f21f68b46c8a92
0
void qmp_blockdev_open_tray(const char *device, bool has_force, bool force, Error **errp) { if (!has_force) { force = false; } do_open_tray(device, force, errp); }
17,304
qemu
fd56e0612b6454a282fa6a953fdb09281a98c589
0
void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem, MemoryRegion *io_lo, MemoryRegion *io_hi) { assert(!pci_dev->has_vga); assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE); pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem; memory_region_add_subregion_overlap(pci_dev->bus->address_space_mem, QEMU_PCI_VGA_MEM_BASE, mem, 1); assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE); pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo; memory_region_add_subregion_overlap(pci_dev->bus->address_space_io, QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1); assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE); pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi; memory_region_add_subregion_overlap(pci_dev->bus->address_space_io, QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1); pci_dev->has_vga = true; pci_update_vga(pci_dev); }
17,306
qemu
4af9193ae954f87225e1ba5d527f6a13e37b1e0e
0
static int qmp_check_client_args(const mon_cmd_t *cmd, QDict *client_args) { int flags, err; QDict *cmd_args; cmd_args = qdict_from_args_type(cmd->args_type); flags = 0; err = check_mandatory_args(cmd_args, client_args, &flags); if (err) { goto out; } /* TODO: Check client args type */ out: QDECREF(cmd_args); return err; }
17,309
qemu
c2b38b277a7882a592f4f2ec955084b2b756daaa
0
bool aio_prepare(AioContext *ctx) { /* Poll mode cannot be used with glib's event loop, disable it. */ poll_set_started(ctx, false); return false; }
17,310
qemu
6eb8f212d2686ed9b17077d554465df7ae06f805
0
static void sigp_initial_cpu_reset(void *arg) { CPUState *cpu = arg; S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); cpu_synchronize_state(cpu); scc->initial_cpu_reset(cpu); cpu_synchronize_post_reset(cpu); }
17,311
qemu
57407ea44cc0a3d630b9b89a2be011f1955ce5c1
0
static void mcf_fec_cleanup(NetClientState *nc) { mcf_fec_state *s = qemu_get_nic_opaque(nc); g_free(s); }
17,312
qemu
568c73a4783cd981e9aa6de4f15dcda7829643ad
0
static void hid_keyboard_event(DeviceState *dev, QemuConsole *src, InputEvent *evt) { HIDState *hs = (HIDState *)dev; int scancodes[3], i, count; int slot; count = qemu_input_key_value_to_scancode(evt->key->key, evt->key->down, scancodes); if (hs->n + count > QUEUE_LENGTH) { fprintf(stderr, "usb-kbd: warning: key event queue full\n"); return; } for (i = 0; i < count; i++) { slot = (hs->head + hs->n) & QUEUE_MASK; hs->n++; hs->kbd.keycodes[slot] = scancodes[i]; } hs->event(hs); }
17,313
qemu
7d6e771f49c36f4388798ce25bde1dede40cda74
0
static void pci_vpb_unmap(SysBusDevice *dev, target_phys_addr_t base) { PCIVPBState *s = (PCIVPBState *)dev; /* Selfconfig area. */ memory_region_del_subregion(get_system_memory(), &s->mem_config); /* Normal config area. */ memory_region_del_subregion(get_system_memory(), &s->mem_config2); if (s->realview) { /* IO memory area. */ memory_region_del_subregion(get_system_memory(), &s->isa); } }
17,315
qemu
364031f17932814484657e5551ba12957d993d7e
0
static int v9fs_synth_init(FsContext *ctx) { QLIST_INIT(&v9fs_synth_root.child); qemu_mutex_init(&v9fs_synth_mutex); /* Add "." and ".." entries for root */ v9fs_add_dir_node(&v9fs_synth_root, v9fs_synth_root.attr->mode, "..", v9fs_synth_root.attr, v9fs_synth_root.attr->inode); v9fs_add_dir_node(&v9fs_synth_root, v9fs_synth_root.attr->mode, ".", v9fs_synth_root.attr, v9fs_synth_root.attr->inode); /* Mark the subsystem is ready for use */ v9fs_synth_fs = 1; return 0; }
17,316
qemu
5b3715bfdafcf35c352aa6d273cadd4eb543c449
0
const char *get_feature_xml(CPUState *env, const char *p, const char **newp) { extern const char *const xml_builtin[][2]; size_t len; int i; const char *name; static char target_xml[1024]; len = 0; while (p[len] && p[len] != ':') len++; *newp = p + len; name = NULL; if (strncmp(p, "target.xml", len) == 0) { /* Generate the XML description for this CPU. */ if (!target_xml[0]) { GDBRegisterState *r; sprintf(target_xml, "<?xml version=\"1.0\"?>" "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">" "<target>" "<xi:include href=\"%s\"/>", GDB_CORE_XML); for (r = env->gdb_regs; r; r = r->next) { strcat(target_xml, "<xi:include href=\""); strcat(target_xml, r->xml); strcat(target_xml, "\"/>"); } strcat(target_xml, "</target>"); } return target_xml; } for (i = 0; ; i++) { name = xml_builtin[i][0]; if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len)) break; } return name ? xml_builtin[i][1] : NULL; }
17,317
qemu
7f3bdc2d8e17999a26ac0f6649caef92fedfc1c0
0
static int ppc_hash32_translate(CPUPPCState *env, struct mmu_ctx_hash32 *ctx, target_ulong eaddr, int rwx) { int ret; target_ulong sr; /* 1. Handle real mode accesses */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* Translation is off */ ctx->raddr = eaddr; ctx->prot = PAGE_READ | PAGE_EXEC | PAGE_WRITE; return 0; } /* 2. Check Block Address Translation entries (BATs) */ if (env->nb_BATs != 0) { ret = ppc_hash32_get_bat(env, ctx, eaddr, rwx); if (ret == 0) { return 0; } } /* 3. Look up the Segment Register */ sr = env->sr[eaddr >> 28]; /* 4. Handle direct store segments */ if (sr & SR32_T) { return ppc_hash32_direct_store(env, sr, eaddr, rwx, &ctx->raddr, &ctx->prot); } /* 5. Check for segment level no-execute violation */ ctx->nx = !!(sr & SR32_NX); if ((rwx == 2) && ctx->nx) { return -3; } ret = find_pte32(env, ctx, sr, eaddr, rwx); return ret; }
17,318
FFmpeg
4ee247a2bdf2fbe81026a428d4affc46c81f28db
0
static int flv_write_trailer(AVFormatContext *s) { int64_t file_size; AVIOContext *pb = s->pb; FLVContext *flv = s->priv_data; int i; /* Add EOS tag */ for (i = 0; i < s->nb_streams; i++) { AVCodecContext *enc = s->streams[i]->codec; if (enc->codec_type == AVMEDIA_TYPE_VIDEO && enc->codec_id == CODEC_ID_H264) { put_avc_eos_tag(pb, flv->last_video_ts); } } file_size = avio_tell(pb); /* update informations */ avio_seek(pb, flv->duration_offset, SEEK_SET); put_amf_double(pb, flv->duration / (double)1000); avio_seek(pb, flv->filesize_offset, SEEK_SET); put_amf_double(pb, file_size); avio_seek(pb, file_size, SEEK_SET); return 0; }
17,319
qemu
8aa1331c09a9b899f48d97f097bb49b7d458be1c
0
static int vmdk_parse_extents(const char *desc, BlockDriverState *bs, const char *desc_file_path) { int ret; char access[11]; char type[11]; char fname[512]; const char *p = desc; int64_t sectors = 0; int64_t flat_offset; char extent_path[PATH_MAX]; BlockDriverState *extent_file; while (*p) { /* parse extent line: * RW [size in sectors] FLAT "file-name.vmdk" OFFSET * or * RW [size in sectors] SPARSE "file-name.vmdk" */ flat_offset = -1; ret = sscanf(p, "%10s %" SCNd64 " %10s \"%511[^\n\r\"]\" %" SCNd64, access, &sectors, type, fname, &flat_offset); if (ret < 4 || strcmp(access, "RW")) { goto next_line; } else if (!strcmp(type, "FLAT")) { if (ret != 5 || flat_offset < 0) { return -EINVAL; } } else if (ret != 4) { return -EINVAL; } if (sectors <= 0 || (strcmp(type, "FLAT") && strcmp(type, "SPARSE")) || (strcmp(access, "RW"))) { goto next_line; } path_combine(extent_path, sizeof(extent_path), desc_file_path, fname); ret = bdrv_file_open(&extent_file, extent_path, NULL, bs->open_flags); if (ret) { return ret; } /* save to extents array */ if (!strcmp(type, "FLAT")) { /* FLAT extent */ VmdkExtent *extent; extent = vmdk_add_extent(bs, extent_file, true, sectors, 0, 0, 0, 0, sectors); extent->flat_start_offset = flat_offset << 9; } else if (!strcmp(type, "SPARSE")) { /* SPARSE extent */ ret = vmdk_open_sparse(bs, extent_file, bs->open_flags); if (ret) { bdrv_delete(extent_file); return ret; } } else { fprintf(stderr, "VMDK: Not supported extent type \"%s\""".\n", type); return -ENOTSUP; } next_line: /* move to next line */ while (*p && *p != '\n') { p++; } p++; } return 0; }
17,320
qemu
4a3adebb1854d48f0c67958e164c6b2f29d44064
0
static int kvm_irqchip_get_virq(KVMState *s) { uint32_t *word = s->used_gsi_bitmap; int max_words = ALIGN(s->gsi_count, 32) / 32; int i, bit; bool retry = true; again: /* Return the lowest unused GSI in the bitmap */ for (i = 0; i < max_words; i++) { bit = ffs(~word[i]); if (!bit) { continue; } return bit - 1 + i * 32; } if (retry) { retry = false; kvm_flush_dynamic_msi_routes(s); goto again; } return -ENOSPC; }
17,321
qemu
debb38a4cec34709604a00e23368e6cd8932fe3d
0
static int check_empty_sectors(BlockBackend *blk, int64_t sect_num, int sect_count, const char *filename, uint8_t *buffer, bool quiet) { int pnum, ret = 0; ret = blk_pread(blk, sect_num << BDRV_SECTOR_BITS, buffer, sect_count << BDRV_SECTOR_BITS); if (ret < 0) { error_report("Error while reading offset %" PRId64 " of %s: %s", sectors_to_bytes(sect_num), filename, strerror(-ret)); return ret; } ret = is_allocated_sectors(buffer, sect_count, &pnum); if (ret || pnum != sect_count) { qprintf(quiet, "Content mismatch at offset %" PRId64 "!\n", sectors_to_bytes(ret ? sect_num : sect_num + pnum)); return 1; } return 0; }
17,322
qemu
1964a397063967acc5ce71a2a24ed26e74824ee1
0
static int migration_put_buffer(void *opaque, const uint8_t *buf, int64_t pos, int size) { MigrationState *s = opaque; int ret; DPRINTF("putting %d bytes at %" PRId64 "\n", size, pos); if (size <= 0) { return size; } qemu_put_buffer(s->migration_file, buf, size); ret = qemu_file_get_error(s->migration_file); if (ret) { return ret; } s->bytes_xfer += size; return size; }
17,324
qemu
fa879d62eb51253d00b6920ce1d1d9d261370a49
0
void bdrv_delete(BlockDriverState *bs) { assert(!bs->peer); /* remove from list, if necessary */ bdrv_make_anon(bs); bdrv_close(bs); if (bs->file != NULL) { bdrv_delete(bs->file); } assert(bs != bs_snapshots); g_free(bs); }
17,326
FFmpeg
68caef9d48c4f1540b1b3181ebe7062a3417c62a
1
static av_always_inline void mc_chroma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2], uint8_t *dst_u, uint8_t *dst_v, ptrdiff_t dst_stride, const uint8_t *ref_u, ptrdiff_t src_stride_u, const uint8_t *ref_v, ptrdiff_t src_stride_v, ThreadFrame *ref_frame, ptrdiff_t y, ptrdiff_t x, const VP56mv *mv, int bw, int bh, int w, int h, int bytesperpixel) { int mx = mv->x * (1 << !s->ss_h), my = mv->y * (1 << !s->ss_v), th; y += my >> 4; x += mx >> 4; ref_u += y * src_stride_u + x * bytesperpixel; ref_v += y * src_stride_v + x * bytesperpixel; mx &= 15; my &= 15; // FIXME bilinear filter only needs 0/1 pixels, not 3/4 // we use +7 because the last 7 pixels of each sbrow can be changed in // the longest loopfilter of the next sbrow th = (y + bh + 4 * !!my + 7) >> (6 - s->ss_v); ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0); if (x < !!mx * 3 || y < !!my * 3 || x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) { s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ref_u - !!my * 3 * src_stride_u - !!mx * 3 * bytesperpixel, 160, src_stride_u, bw + !!mx * 7, bh + !!my * 7, x - !!mx * 3, y - !!my * 3, w, h); ref_u = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel; mc[!!mx][!!my](dst_u, dst_stride, ref_u, 160, bh, mx, my); s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ref_v - !!my * 3 * src_stride_v - !!mx * 3 * bytesperpixel, 160, src_stride_v, bw + !!mx * 7, bh + !!my * 7, x - !!mx * 3, y - !!my * 3, w, h); ref_v = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel; mc[!!mx][!!my](dst_v, dst_stride, ref_v, 160, bh, mx, my); } else { mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my); mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my); } }
17,327
qemu
efec3dd631d94160288392721a5f9c39e50fb2bc
1
static void m48t59_isa_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = m48t59_isa_realize; dc->no_user = 1; dc->reset = m48t59_reset_isa; dc->props = m48t59_isa_properties; }
17,330
FFmpeg
45b7bd7c53b41bc5ff6fc2158831f2b1b1256113
1
static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type) { H264Context *h = &svq3->h; int i, j, k, m, dir, mode; int cbp = 0; uint32_t vlc; int8_t *top, *left; MpegEncContext *const s = (MpegEncContext *) h; const int mb_xy = h->mb_xy; const int b_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride; h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF; h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF; h->topright_samples_available = 0xFFFF; if (mb_type == 0) { /* SKIP */ if (s->pict_type == AV_PICTURE_TYPE_P || s->next_picture.f.mb_type[mb_xy] == -1) { svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 0, 0); if (s->pict_type == AV_PICTURE_TYPE_B) { svq3_mc_dir_part(s, 16*s->mb_x, 16*s->mb_y, 16, 16, 0, 0, 0, 0, 1, 1); } mb_type = MB_TYPE_SKIP; } else { mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6); if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0) return -1; if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0) return -1; mb_type = MB_TYPE_16x16; } } else if (mb_type < 8) { /* INTER */ if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1 (&s->gb)) { mode = THIRDPEL_MODE; } else if (svq3->halfpel_flag && svq3->thirdpel_flag == !get_bits1 (&s->gb)) { mode = HALFPEL_MODE; } else { mode = FULLPEL_MODE; } /* fill caches */ /* note ref_cache should contain here: ???????? ???11111 N??11111 N??11111 N??11111 */ for (m = 0; m < 2; m++) { if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6] != -1) { for (i = 0; i < 4; i++) { *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - 1 + i*h->b_stride]; } } else { for (i = 0; i < 4; i++) { *(uint32_t *) h->mv_cache[m][scan8[0] - 1 + i*8] = 0; } } if (s->mb_y > 0) { memcpy(h->mv_cache[m][scan8[0] - 1*8], s->current_picture.f.motion_val[m][b_xy - h->b_stride], 4*2*sizeof(int16_t)); memset(&h->ref_cache[m][scan8[0] - 1*8], (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4); if (s->mb_x < (s->mb_width - 1)) { *(uint32_t *) h->mv_cache[m][scan8[0] + 4 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4]; h->ref_cache[m][scan8[0] + 4 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1]+6] == -1 || h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride ] ] == -1) ? PART_NOT_AVAILABLE : 1; }else h->ref_cache[m][scan8[0] + 4 - 1*8] = PART_NOT_AVAILABLE; if (s->mb_x > 0) { *(uint32_t *) h->mv_cache[m][scan8[0] - 1 - 1*8] = *(uint32_t *) s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1]; h->ref_cache[m][scan8[0] - 1 - 1*8] = (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1]+3] == -1) ? PART_NOT_AVAILABLE : 1; }else h->ref_cache[m][scan8[0] - 1 - 1*8] = PART_NOT_AVAILABLE; }else memset(&h->ref_cache[m][scan8[0] - 1*8 - 1], PART_NOT_AVAILABLE, 8); if (s->pict_type != AV_PICTURE_TYPE_B) break; } /* decode motion vector(s) and form prediction(s) */ if (s->pict_type == AV_PICTURE_TYPE_P) { if (svq3_mc_dir(h, (mb_type - 1), mode, 0, 0) < 0) return -1; } else { /* AV_PICTURE_TYPE_B */ if (mb_type != 2) { if (svq3_mc_dir(h, 0, mode, 0, 0) < 0) return -1; } else { for (i = 0; i < 4; i++) { memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } if (mb_type != 1) { if (svq3_mc_dir(h, 0, mode, 1, (mb_type == 3)) < 0) return -1; } else { for (i = 0; i < 4; i++) { memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } } mb_type = MB_TYPE_16x16; } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */ memset(h->intra4x4_pred_mode_cache, -1, 8*5*sizeof(int8_t)); if (mb_type == 8) { if (s->mb_x > 0) { for (i = 0; i < 4; i++) { h->intra4x4_pred_mode_cache[scan8[0] - 1 + i*8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1]+6-i]; } if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1) { h->left_samples_available = 0x5F5F; } } if (s->mb_y > 0) { h->intra4x4_pred_mode_cache[4+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+0]; h->intra4x4_pred_mode_cache[5+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+1]; h->intra4x4_pred_mode_cache[6+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+2]; h->intra4x4_pred_mode_cache[7+8*0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]+3]; if (h->intra4x4_pred_mode_cache[4+8*0] == -1) { h->top_samples_available = 0x33FF; } } /* decode prediction codes for luma blocks */ for (i = 0; i < 16; i+=2) { vlc = svq3_get_ue_golomb(&s->gb); if (vlc >= 25){ av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc); return -1; } left = &h->intra4x4_pred_mode_cache[scan8[i] - 1]; top = &h->intra4x4_pred_mode_cache[scan8[i] - 8]; left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]]; left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]]; if (left[1] == -1 || left[2] == -1){ av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n"); return -1; } } } else { /* mb_type == 33, DC_128_PRED block type */ for (i = 0; i < 4; i++) { memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_PRED, 4); } } write_back_intra_pred_mode(h); if (mb_type == 8) { ff_h264_check_intra4x4_pred_mode(h); h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF; h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF; } else { for (i = 0; i < 4; i++) { memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8*i], DC_128_PRED, 4); } h->top_samples_available = 0x33FF; h->left_samples_available = 0x5F5F; } mb_type = MB_TYPE_INTRA4x4; } else { /* INTRA16x16 */ dir = i_mb_type_info[mb_type - 8].pred_mode; dir = (dir >> 1) ^ 3*(dir & 1) ^ 1; if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir)) == -1){ av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n"); return -1; } cbp = i_mb_type_info[mb_type - 8].cbp; mb_type = MB_TYPE_INTRA16x16; } if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) { for (i = 0; i < 4; i++) { memset(s->current_picture.f.motion_val[0][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } if (s->pict_type == AV_PICTURE_TYPE_B) { for (i = 0; i < 4; i++) { memset(s->current_picture.f.motion_val[1][b_xy + i*h->b_stride], 0, 4*2*sizeof(int16_t)); } } } if (!IS_INTRA4x4(mb_type)) { memset(h->intra4x4_pred_mode+h->mb2br_xy[mb_xy], DC_PRED, 8); } if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) { memset(h->non_zero_count_cache + 8, 0, 14*8*sizeof(uint8_t)); s->dsp.clear_blocks(h->mb+ 0); s->dsp.clear_blocks(h->mb+384); } if (!IS_INTRA16x16(mb_type) && (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) { if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48){ av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc); return -1; } cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc] : golomb_to_inter_cbp[vlc]; } if (IS_INTRA16x16(mb_type) || (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) { s->qscale += svq3_get_se_golomb(&s->gb); if (s->qscale > 31){ av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale); return -1; } } if (IS_INTRA16x16(mb_type)) { AV_ZERO128(h->mb_luma_dc[0]+0); AV_ZERO128(h->mb_luma_dc[0]+8); if (svq3_decode_block(&s->gb, h->mb_luma_dc, 0, 1)){ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding intra luma dc\n"); return -1; } } if (cbp) { const int index = IS_INTRA16x16(mb_type) ? 1 : 0; const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1); for (i = 0; i < 4; i++) { if ((cbp & (1 << i))) { for (j = 0; j < 4; j++) { k = index ? ((j&1) + 2*(i&1) + 2*(j&2) + 4*(i&2)) : (4*i + j); h->non_zero_count_cache[ scan8[k] ] = 1; if (svq3_decode_block(&s->gb, &h->mb[16*k], index, type)){ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding block\n"); return -1; } } } } if ((cbp & 0x30)) { for (i = 1; i < 3; ++i) { if (svq3_decode_block(&s->gb, &h->mb[16*16*i], 0, 3)){ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma dc block\n"); return -1; } } if ((cbp & 0x20)) { for (i = 1; i < 3; i++) { for (j = 0; j < 4; j++) { k = 16*i + j; h->non_zero_count_cache[ scan8[k] ] = 1; if (svq3_decode_block(&s->gb, &h->mb[16*k], 1, 1)){ av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding chroma ac block\n"); return -1; } } } } } } h->cbp= cbp; s->current_picture.f.mb_type[mb_xy] = mb_type; if (IS_INTRA(mb_type)) { h->chroma_pred_mode = ff_h264_check_intra_pred_mode(h, DC_PRED8x8); } return 0; }
17,331
qemu
787aaf5703a702094f395db6795e74230282cd62
1
static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def) { #ifdef CONFIG_KVM KVMState *s = kvm_state; uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; assert(kvm_enabled()); x86_cpu_def->name = "host"; host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); x86_cpu_vendor_words2str(x86_cpu_def->vendor, ebx, edx, ecx); host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); x86_cpu_def->stepping = eax & 0x0F; x86_cpu_def->level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); x86_cpu_def->features[FEAT_1_EDX] = kvm_arch_get_supported_cpuid(s, 0x1, 0, R_EDX); x86_cpu_def->features[FEAT_1_ECX] = kvm_arch_get_supported_cpuid(s, 0x1, 0, R_ECX); if (x86_cpu_def->level >= 7) { x86_cpu_def->features[FEAT_7_0_EBX] = kvm_arch_get_supported_cpuid(s, 0x7, 0, R_EBX); } else { x86_cpu_def->features[FEAT_7_0_EBX] = 0; } x86_cpu_def->xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); x86_cpu_def->features[FEAT_8000_0001_EDX] = kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX); x86_cpu_def->features[FEAT_8000_0001_ECX] = kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX); cpu_x86_fill_model_id(x86_cpu_def->model_id); /* Call Centaur's CPUID instruction. */ if (!strcmp(x86_cpu_def->vendor, CPUID_VENDOR_VIA)) { host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx); eax = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); if (eax >= 0xC0000001) { /* Support VIA max extended level */ x86_cpu_def->xlevel2 = eax; host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx); x86_cpu_def->features[FEAT_C000_0001_EDX] = kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX); } } /* Other KVM-specific feature fields: */ x86_cpu_def->features[FEAT_SVM] = kvm_arch_get_supported_cpuid(s, 0x8000000A, 0, R_EDX); x86_cpu_def->features[FEAT_KVM] = kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX); #endif /* CONFIG_KVM */ }
17,332