id
int32
0
27.3k
func
stringlengths
26
142k
target
bool
2 classes
project
stringclasses
2 values
commit_id
stringlengths
40
40
5,322
static void print_samplesref(AVFilterBufferRef *samplesref) { const AVFilterBufferRefAudioProps *props = samplesref->audio; const int n = props->nb_samples * av_get_channel_layout_nb_channels(props->channel_layout); const uint16_t *p = (uint16_t*)samplesref->data[0]; const uint16_t *p_end = p + n; while (p < p_end) { fputc(*p & 0xff, stdout); fputc(*p>>8 & 0xff, stdout); p++; } fflush(stdout); }
true
FFmpeg
9076a6a943f7855756222181698aba78d3773d8f
5,323
static void test_media_insert(void) { uint8_t dir; /* Insert media in drive. DSKCHK should not be reset until a step pulse * is sent. */ qmp_discard_response("{'execute':'change', 'arguments':{" " 'device':'floppy0', 'target': '%s' }}", test_image); qmp_discard_response(""); /* ignore event (FIXME open -> open transition?!) */ qmp_discard_response(""); /* ignore event */ dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); send_seek(0); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_set(dir, DSKCHG); /* Step to next track should clear DSKCHG bit. */ send_seek(1); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_clear(dir, DSKCHG); dir = inb(FLOPPY_BASE + reg_dir); assert_bit_clear(dir, DSKCHG); }
true
qemu
563890c7c7e977842e2a35afe7a24d06d2103242
5,324
void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable) { int64_t bitmap_size; if (enable) { if (bs->dirty_tracking == 0) { int64_t i; uint8_t test; bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS); bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK; bitmap_size++; bs->dirty_bitmap = qemu_mallocz(bitmap_size); bs->dirty_tracking = enable; for(i = 0; i < bitmap_size; i++) test = bs->dirty_bitmap[i]; } } else { if (bs->dirty_tracking != 0) { qemu_free(bs->dirty_bitmap); bs->dirty_tracking = enable; } } }
true
qemu
c6d2283068026035a6468aae9dcde953bd7521ac
5,325
static void quorum_aio_finalize(QuorumAIOCB *acb) { BDRVQuorumState *s = acb->common.bs->opaque; int i, ret = 0; if (acb->vote_ret) { ret = acb->vote_ret; } acb->common.cb(acb->common.opaque, ret); if (acb->is_read) { for (i = 0; i < s->num_children; i++) { qemu_vfree(acb->qcrs[i].buf); qemu_iovec_destroy(&acb->qcrs[i].qiov); } } g_free(acb->qcrs); qemu_aio_release(acb); }
true
qemu
a9db86b223030bd40bdd81b160788196bc95fe6f
5,326
static int h261_decode_gob_header(H261Context *h){ unsigned int val; MpegEncContext * const s = &h->s; /* Check for GOB Start Code */ val = show_bits(&s->gb, 15); if(val) return -1; /* We have a GBSC */ skip_bits(&s->gb, 16); h->gob_number = get_bits(&s->gb, 4); /* GN */ s->qscale = get_bits(&s->gb, 5); /* GQUANT */ /* GEI */ while (get_bits1(&s->gb) != 0) { skip_bits(&s->gb, 8); } if(s->qscale==0) return -1; // For the first transmitted macroblock in a GOB, MBA is the absolute address. For // subsequent macroblocks, MBA is the difference between the absolute addresses of // the macroblock and the last transmitted macroblock. h->current_mba = 0; h->mba_diff = 0; return 0; }
true
FFmpeg
49e5dcbce5f9e08ec375fd54c413148beb81f1d7
5,328
static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64) { VhostUserMsg msg = { .request = request, .flags = VHOST_USER_VERSION, .u64 = u64, .size = sizeof(m.u64), }; vhost_user_write(dev, &msg, NULL, 0); return 0; }
true
qemu
7f4a930e64b9e69cd340395a7e4f0494aef4fcdd
5,329
static int read_rle_sgi(uint8_t *out_buf, SgiState *s) { uint8_t *dest_row; unsigned int len = s->height * s->depth * 4; GetByteContext g_table = s->g; unsigned int y, z; unsigned int start_offset; /* size of RLE offset and length tables */ if (len * 2 > bytestream2_get_bytes_left(&s->g)) { return AVERROR_INVALIDDATA; } for (z = 0; z < s->depth; z++) { dest_row = out_buf; for (y = 0; y < s->height; y++) { dest_row -= s->linesize; start_offset = bytestream2_get_be32(&g_table); bytestream2_seek(&s->g, start_offset, SEEK_SET); if (expand_rle_row(s, dest_row + z, dest_row + FFABS(s->linesize), s->depth) != s->width) { return AVERROR_INVALIDDATA; } } } return 0; }
true
FFmpeg
f4a8a0080537484154bb74e08ec76cbcbd25484b
5,330
static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused) { #if COMPILE_TEMPLATE_MMX __asm__ volatile( "mov %0, %%"REG_a" \n\t" "1: \n\t" "movq (%1, %%"REG_a",2), %%mm0 \n\t" "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" "psrlw $8, %%mm0 \n\t" "psrlw $8, %%mm1 \n\t" "packuswb %%mm1, %%mm0 \n\t" "movq %%mm0, (%2, %%"REG_a") \n\t" "add $8, %%"REG_a" \n\t" " js 1b \n\t" : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width) : "%"REG_a ); #else int i; for (i=0; i<width; i++) dst[i]= src[2*i+1]; #endif }
false
FFmpeg
d1adad3cca407f493c3637e20ecd4f7124e69212
5,331
static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, long width, long height, long lumStride, long chromStride, long dstStride) { //FIXME interpolate chroma RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2); }
false
FFmpeg
d1adad3cca407f493c3637e20ecd4f7124e69212
5,332
static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride, int is4tap) { LOAD_PIXELS int a, f1, f2; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; a = 3*(q0 - p0); if (is4tap) a += clip_int8(p1 - q1); a = clip_int8(a); // We deviate from the spec here with c(a+3) >> 3 // since that's what libvpx does. f1 = FFMIN(a+4, 127) >> 3; f2 = FFMIN(a+3, 127) >> 3; // Despite what the spec says, we do need to clamp here to // be bitexact with libvpx. p[-1*stride] = cm[p0 + f2]; p[ 0*stride] = cm[q0 - f1]; // only used for _inner on blocks without high edge variance if (!is4tap) { a = (f1+1)>>1; p[-2*stride] = cm[p1 + a]; p[ 1*stride] = cm[q1 - a]; } }
false
FFmpeg
b8664c929437d6d079e16979c496a2db40cf2324
5,334
static int theora_decode_init(AVCodecContext *avctx) { Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; int ptype; uint8_t *p= avctx->extradata; int op_bytes, i; s->theora = 1; if (!avctx->extradata_size) { av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n"); return -1; } for(i=0;i<3;i++) { op_bytes = *(p++)<<8; op_bytes += *(p++); init_get_bits(&gb, p, op_bytes); p += op_bytes; ptype = get_bits(&gb, 8); debug_vp3("Theora headerpacket type: %x\n", ptype); if (!(ptype & 0x80)) { av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n"); return -1; } // FIXME: check for this aswell skip_bits(&gb, 6*8); /* "theora" */ switch(ptype) { case 0x80: theora_decode_header(avctx, gb); break; case 0x81: // FIXME: is this needed? it breaks sometimes // theora_decode_comments(avctx, gb); break; case 0x82: theora_decode_tables(avctx, gb); break; default: av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80); break; } } vp3_decode_init(avctx); return 0; }
false
FFmpeg
e278056fbad7405fc47901faea7de98db003a0fa
5,335
static av_cold int pcm_encode_init(AVCodecContext *avctx) { avctx->frame_size = 0; switch (avctx->codec->id) { case AV_CODEC_ID_PCM_ALAW: pcm_alaw_tableinit(); break; case AV_CODEC_ID_PCM_MULAW: pcm_ulaw_tableinit(); break; default: break; } avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id); avctx->block_align = avctx->channels * avctx->bits_per_coded_sample / 8; avctx->bit_rate = avctx->block_align * avctx->sample_rate * 8; avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) return AVERROR(ENOMEM); return 0; }
false
FFmpeg
d6604b29ef544793479d7fb4e05ef6622bb3e534
5,336
static void write_packet(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) { AVStream *st = ost->st; int ret; /* * Audio encoders may split the packets -- #frames in != #packets out. * But there is no reordering, so we can limit the number of output packets * by simply dropping them here. * Counting encoded video frames needs to be done separately because of * reordering, see do_video_out() */ if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) { if (ost->frame_number >= ost->max_frames) { av_packet_unref(pkt); return; } ost->frame_number++; } if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR, NULL); ost->quality = sd ? *(int *)sd : -1; if (ost->frame_rate.num) { pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate), ost->st->time_base); } } if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) && ost->last_mux_dts != AV_NOPTS_VALUE && pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) { av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream " "%d:%d; previous: %"PRId64", current: %"PRId64"; ", ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts); if (exit_on_error) { av_log(NULL, AV_LOG_FATAL, "aborting.\n"); exit_program(1); } av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result " "in incorrect timestamps in the output file.\n", ost->last_mux_dts + 1); pkt->dts = ost->last_mux_dts + 1; if (pkt->pts != AV_NOPTS_VALUE) pkt->pts = FFMAX(pkt->pts, pkt->dts); } ost->last_mux_dts = pkt->dts; ost->data_size += pkt->size; ost->packets_written++; pkt->stream_index = ost->index; ret = av_interleaved_write_frame(s, pkt); if (ret < 0) { print_error("av_interleaved_write_frame()", ret); exit_program(1); } }
false
FFmpeg
398f015f077c6a2406deffd9e37ff34b9c7bb3bc
5,337
void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64], int lowres_flag, int is_mpeg12) { int mb_x, mb_y; const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; #if CONFIG_MPEG_XVMC_DECODER if(s->avctx->xvmc_acceleration){ ff_xvmc_decode_mb(s);//xvmc uses pblocks return; } #endif mb_x = s->mb_x; mb_y = s->mb_y; if(s->avctx->debug&FF_DEBUG_DCT_COEFF) { /* save DCT coefficients */ int i,j; DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6]; for(i=0; i<6; i++) for(j=0; j<64; j++) *dct++ = block[i][s->dsp.idct_permutation[j]]; } s->current_picture.qscale_table[mb_xy]= s->qscale; /* update DC predictors for P macroblocks */ if (!s->mb_intra) { if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) { if(s->mbintra_table[mb_xy]) ff_clean_intra_table_entries(s); } else { s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision; } } else if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) s->mbintra_table[mb_xy]=1; if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc uint8_t *dest_y, *dest_cb, *dest_cr; int dct_linesize, dct_offset; op_pixels_func (*op_pix)[4]; qpel_mc_func (*op_qpix)[16]; const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics const int uvlinesize= s->current_picture.linesize[1]; const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag; const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8; /* avoid copy if macroblock skipped in last frame too */ /* skip only during decoding as we might trash the buffers during encoding a bit */ if(!s->encoding){ uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy]; const int age= s->current_picture.age; assert(age); if (s->mb_skipped) { s->mb_skipped= 0; assert(s->pict_type!=FF_I_TYPE); (*mbskip_ptr) ++; /* indicate that this time we skipped it */ if(*mbskip_ptr >99) *mbskip_ptr= 99; /* if previous was skipped too, then nothing to do ! */ if (*mbskip_ptr >= age && s->current_picture.reference){ return; } } else if(!s->current_picture.reference){ (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */ if(*mbskip_ptr >99) *mbskip_ptr= 99; } else{ *mbskip_ptr = 0; /* not skipped */ } } dct_linesize = linesize << s->interlaced_dct; dct_offset =(s->interlaced_dct)? linesize : linesize*block_size; if(readable){ dest_y= s->dest[0]; dest_cb= s->dest[1]; dest_cr= s->dest[2]; }else{ dest_y = s->b_scratchpad; dest_cb= s->b_scratchpad+16*linesize; dest_cr= s->b_scratchpad+32*linesize; } if (!s->mb_intra) { /* motion handling */ /* decoding or more than one mb_type (MC was already done otherwise) */ if(!s->encoding){ if(lowres_flag){ h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab; if (s->mv_dir & MV_DIR_FORWARD) { MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix); op_pix = s->dsp.avg_h264_chroma_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix); } }else{ op_qpix= s->me.qpel_put; if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){ op_pix = s->dsp.put_pixels_tab; }else{ op_pix = s->dsp.put_no_rnd_pixels_tab; } if (s->mv_dir & MV_DIR_FORWARD) { MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix); op_pix = s->dsp.avg_pixels_tab; op_qpix= s->me.qpel_avg; } if (s->mv_dir & MV_DIR_BACKWARD) { MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix); } } } /* skip dequant / idct if we are really late ;) */ if(s->hurry_up>1) goto skip_idct; if(s->avctx->skip_idct){ if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE) ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE) || s->avctx->skip_idct >= AVDISCARD_ALL) goto skip_idct; } /* add dct residue */ if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){ add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale); add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale); add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if (s->chroma_y_shift){ add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); }else{ dct_linesize >>= 1; dct_offset >>=1; add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); } } } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){ add_dct(s, block[0], 0, dest_y , dct_linesize); add_dct(s, block[1], 1, dest_y + block_size, dct_linesize); add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize); add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){//Chroma420 add_dct(s, block[4], 4, dest_cb, uvlinesize); add_dct(s, block[5], 5, dest_cr, uvlinesize); }else{ //chroma422 dct_linesize = uvlinesize << s->interlaced_dct; dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8; add_dct(s, block[4], 4, dest_cb, dct_linesize); add_dct(s, block[5], 5, dest_cr, dct_linesize); add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize); add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize); if(!s->chroma_x_shift){//Chroma444 add_dct(s, block[8], 8, dest_cb+8, dct_linesize); add_dct(s, block[9], 9, dest_cr+8, dct_linesize); add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize); add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize); } } }//fi gray } else if (CONFIG_WMV2) { ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr); } } else { /* dct only in intra block */ if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){ put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale); put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale); put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale); put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){ put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale); put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale); }else{ dct_offset >>=1; dct_linesize >>=1; put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale); put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale); put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale); put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale); } } }else{ s->dsp.idct_put(dest_y , dct_linesize, block[0]); s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]); s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]); s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]); if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){ if(s->chroma_y_shift){ s->dsp.idct_put(dest_cb, uvlinesize, block[4]); s->dsp.idct_put(dest_cr, uvlinesize, block[5]); }else{ dct_linesize = uvlinesize << s->interlaced_dct; dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8; s->dsp.idct_put(dest_cb, dct_linesize, block[4]); s->dsp.idct_put(dest_cr, dct_linesize, block[5]); s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]); s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]); if(!s->chroma_x_shift){//Chroma444 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]); s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]); s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]); s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]); } } }//gray } } skip_idct: if(!readable){ s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16); s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift); s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift); } } }
false
FFmpeg
83344066d326e6bad20feb66825ace12708eb084
5,338
void ff_aac_apply_tns(SingleChannelElement *sce) { const int mmm = FFMIN(sce->ics.tns_max_bands, sce->ics.max_sfb); float *coef = sce->pcoeffs; TemporalNoiseShaping *tns = &sce->tns; int w, filt, m, i; int bottom, top, order, start, end, size, inc; float *lpc, tmp[TNS_MAX_ORDER+1]; for (w = 0; w < sce->ics.num_windows; w++) { bottom = sce->ics.num_swb; for (filt = 0; filt < tns->n_filt[w]; filt++) { top = bottom; bottom = FFMAX(0, top - tns->length[w][filt]); order = tns->order[w][filt]; lpc = tns->coef[w][filt]; if (!order) continue; start = sce->ics.swb_offset[FFMIN(bottom, mmm)]; end = sce->ics.swb_offset[FFMIN( top, mmm)]; if ((size = end - start) <= 0) continue; if (tns->direction[w][filt]) { inc = -1; start = end - 1; } else { inc = 1; } start += w * 128; if (!sce->ics.ltp.present) { // ar filter for (m = 0; m < size; m++, start += inc) for (i = 1; i <= FFMIN(m, order); i++) coef[start] += coef[start - i * inc]*lpc[i - 1]; } else { // ma filter for (m = 0; m < size; m++, start += inc) { tmp[0] = coef[start]; for (i = 1; i <= FFMIN(m, order); i++) coef[start] += tmp[i]*lpc[i - 1]; for (i = order; i > 0; i--) tmp[i] = tmp[i - 1]; } } } } }
false
FFmpeg
21bfeec27f933e18e7aac52ec025831353f47430
5,339
void qcow2_free_clusters(BlockDriverState *bs, int64_t offset, int64_t size) { int ret; BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE); ret = update_refcount(bs, offset, size, -1); if (ret < 0) { fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret)); abort(); } }
true
qemu
003fad6e2cae5311d3aea996388c90e3ab17de90
5,340
static void do_video_stats(AVOutputStream *ost, AVInputStream *ist, int frame_size) { static FILE *fvstats=NULL; static INT64 total_size = 0; struct tm *today; time_t today2; char filename[40]; AVCodecContext *enc; int frame_number; INT64 ti; double ti1, bitrate, avg_bitrate; if (!fvstats) { today2 = time(NULL); today = localtime(&today2); sprintf(filename, "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min, today->tm_sec); fvstats = fopen(filename,"w"); if (!fvstats) { perror("fopen"); exit(1); } } ti = MAXINT64; enc = &ost->st->codec; total_size += frame_size; if (enc->codec_type == CODEC_TYPE_VIDEO) { frame_number = ist->frame_number; fprintf(fvstats, "frame= %5d q= %2d ", frame_number, enc->quality); if (do_psnr) fprintf(fvstats, "PSNR= %6.2f ", enc->psnr_y); fprintf(fvstats,"f_size= %6d ", frame_size); /* compute min pts value */ if (!ist->discard && ist->pts < ti) { ti = ist->pts; } ti1 = (double)ti / 1000000.0; if (ti1 < 0.01) ti1 = 0.01; bitrate = (double)(frame_size * 8) * enc->frame_rate / FRAME_RATE_BASE / 1000.0; avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0; fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", (double)total_size / 1024, ti1, bitrate, avg_bitrate); fprintf(fvstats,"type= %s\n", enc->key_frame == 1 ? "I" : "P"); } }
true
FFmpeg
bf5af5687569e34d6e3a4d31fc6bb5dc44efdb29
5,341
static int scsi_handle_rw_error(SCSIDiskReq *r, int error, int type) { int is_read = (type == SCSI_REQ_STATUS_RETRY_READ); SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); BlockErrorAction action = bdrv_get_on_error(s->bs, is_read); if (action == BLOCK_ERR_IGNORE) { bdrv_mon_event(s->bs, BDRV_ACTION_IGNORE, is_read); return 0; } if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC) || action == BLOCK_ERR_STOP_ANY) { type &= SCSI_REQ_STATUS_RETRY_TYPE_MASK; r->status |= SCSI_REQ_STATUS_RETRY | type; bdrv_mon_event(s->bs, BDRV_ACTION_STOP, is_read); vm_stop(VMSTOP_DISKFULL); } else { if (type == SCSI_REQ_STATUS_RETRY_READ) { scsi_req_data(&r->req, 0); } scsi_command_complete(r, CHECK_CONDITION, HARDWARE_ERROR); bdrv_mon_event(s->bs, BDRV_ACTION_REPORT, is_read); } return 1; }
true
qemu
a1f0cce2ac0243572ff72aa561da67fe3766a395
5,342
static gboolean qio_channel_websock_handshake_io(QIOChannel *ioc, GIOCondition condition, gpointer user_data) { QIOTask *task = user_data; QIOChannelWebsock *wioc = QIO_CHANNEL_WEBSOCK( qio_task_get_source(task)); Error *err = NULL; int ret; ret = qio_channel_websock_handshake_read(wioc, &err); if (ret < 0) { trace_qio_channel_websock_handshake_fail(ioc); qio_task_abort(task, err); error_free(err); return FALSE; } if (ret == 0) { trace_qio_channel_websock_handshake_pending(ioc, G_IO_IN); /* need more data still */ return TRUE; } object_ref(OBJECT(task)); trace_qio_channel_websock_handshake_reply(ioc); qio_channel_add_watch( wioc->master, G_IO_OUT, qio_channel_websock_handshake_send, task, (GDestroyNotify)object_unref); return FALSE; }
true
qemu
bc35d51077b33e68a0ab10a057f352747214223f
5,343
static int find_debugfs(char *debugfs) { char type[100]; FILE *fp; fp = fopen("/proc/mounts", "r"); if (fp == NULL) { return 0; } while (fscanf(fp, "%*s %" STR(PATH_MAX) "s %99s %*s %*d %*d\n", debugfs, type) == 2) { if (strcmp(type, "debugfs") == 0) { break; } } fclose(fp); if (strcmp(type, "debugfs") != 0) { return 0; } return 1; }
true
qemu
5070570c9089b905dd9efae30ee4318033c6ccd6
5,344
void qemu_input_event_send(QemuConsole *src, InputEvent *evt) { QemuInputHandlerState *s; if (!runstate_is_running() && !runstate_check(RUN_STATE_SUSPENDED)) { qemu_input_event_trace(src, evt); /* pre processing */ if (graphic_rotate && (evt->kind == INPUT_EVENT_KIND_ABS)) { qemu_input_transform_abs_rotate(evt); /* send event */ s = qemu_input_find_handler(1 << evt->kind); s->handler->event(s->dev, src, evt); s->events++;
true
qemu
bdcc3a28b7f6ed6b90ad8b8af7b5d17e0d3f1f06
5,345
static int qemu_chr_fe_write_buffer(CharDriverState *s, const uint8_t *buf, int len, int *offset) { int res = 0; *offset = 0; qemu_mutex_lock(&s->chr_write_lock); while (*offset < len) { do { res = s->chr_write(s, buf + *offset, len - *offset); if (res == -1 && errno == EAGAIN) { g_usleep(100); } } while (res == -1 && errno == EAGAIN); if (res <= 0) { break; } *offset += res; } if (*offset > 0) { qemu_chr_fe_write_log(s, buf, *offset); } qemu_mutex_unlock(&s->chr_write_lock); return res; }
true
qemu
53628efbc8aa7a7ab5354d24b971f4d69452151d
5,346
VLANClientState *qdev_get_vlan_client(DeviceState *dev, NetCanReceive *can_receive, NetReceive *receive, NetReceiveIOV *receive_iov, NetCleanup *cleanup, void *opaque) { NICInfo *nd = dev->nd; assert(nd); return qemu_new_vlan_client(nd->vlan, nd->model, nd->name, can_receive, receive, receive_iov, cleanup, opaque); }
true
qemu
ae50b2747f77944faa79eb914272b54eb30b63b3
5,347
static int send_sub_rect_nojpeg(VncState *vs, int x, int y, int w, int h, int bg, int fg, int colors, VncPalette *palette) { int ret; if (colors == 0) { if (tight_detect_smooth_image(vs, w, h)) { ret = send_gradient_rect(vs, x, y, w, h); ret = send_full_color_rect(vs, x, y, w, h); } } else if (colors == 1) { ret = send_solid_rect(vs); } else if (colors == 2) { ret = send_mono_rect(vs, x, y, w, h, bg, fg); } else if (colors <= 256) { ret = send_palette_rect(vs, x, y, w, h, palette); } return ret; }
true
qemu
d167f9bc06a577d6c85b8ed6991c1efe175aae7d
5,348
static int guess_disk_lchs(IDEState *s, int *pcylinders, int *pheads, int *psectors) { uint8_t *buf; int ret, i, heads, sectors, cylinders; struct partition *p; uint32_t nr_sects; buf = qemu_memalign(512, 512); if (buf == NULL) return -1; ret = bdrv_read(s->bs, 0, buf, 1); if (ret < 0) { qemu_free(buf); return -1; } /* test msdos magic */ if (buf[510] != 0x55 || buf[511] != 0xaa) { qemu_free(buf); return -1; } for(i = 0; i < 4; i++) { p = ((struct partition *)(buf + 0x1be)) + i; nr_sects = le32_to_cpu(p->nr_sects); if (nr_sects && p->end_head) { /* We make the assumption that the partition terminates on a cylinder boundary */ heads = p->end_head + 1; sectors = p->end_sector & 63; if (sectors == 0) continue; cylinders = s->nb_sectors / (heads * sectors); if (cylinders < 1 || cylinders > 16383) continue; *pheads = heads; *psectors = sectors; *pcylinders = cylinders; #if 0 printf("guessed geometry: LCHS=%d %d %d\n", cylinders, heads, sectors); #endif qemu_free(buf); return 0; } } qemu_free(buf); return -1; }
true
qemu
c717d8bf13d4c24372c4885eefa821ec76378d2b
5,349
USBDevice *usb_host_device_open(USBBus *bus, const char *devname) { struct USBAutoFilter filter; USBDevice *dev; char *p; dev = usb_create(bus, "usb-host"); if (strstr(devname, "auto:")) { if (parse_filter(devname, &filter) < 0) { goto fail; } } else { p = strchr(devname, '.'); if (p) { filter.bus_num = strtoul(devname, NULL, 0); filter.addr = strtoul(p + 1, NULL, 0); filter.vendor_id = 0; filter.product_id = 0; } else { p = strchr(devname, ':'); if (p) { filter.bus_num = 0; filter.addr = 0; filter.vendor_id = strtoul(devname, NULL, 16); filter.product_id = strtoul(p + 1, NULL, 16); } else { goto fail; } } } qdev_prop_set_uint32(&dev->qdev, "hostbus", filter.bus_num); qdev_prop_set_uint32(&dev->qdev, "hostaddr", filter.addr); qdev_prop_set_uint32(&dev->qdev, "vendorid", filter.vendor_id); qdev_prop_set_uint32(&dev->qdev, "productid", filter.product_id); qdev_init_nofail(&dev->qdev); return dev; fail: object_unparent(OBJECT(dev)); return NULL; }
true
qemu
3bc36a401e0f33e63a4d2c58b646ddf78efb567c
5,350
static void xlnx_dp_set_dpdma(Object *obj, const char *name, Object *val, Error **errp) { XlnxDPState *s = XLNX_DP(obj); if (s->console) { DisplaySurface *surface = qemu_console_surface(s->console); XlnxDPDMAState *dma = XLNX_DPDMA(val); xlnx_dpdma_set_host_data_location(dma, DP_GRAPHIC_DMA_CHANNEL, surface_data(surface)); } }
true
qemu
8f5d58ef2c92d7b82d9a6eeefd7c8854a183ba4a
5,351
static int ff_interleave_new_audio_packet(AVFormatContext *s, AVPacket *pkt, int stream_index, int flush) { AVStream *st = s->streams[stream_index]; AudioInterleaveContext *aic = st->priv_data; int size = FFMIN(av_fifo_size(aic->fifo), *aic->samples * aic->sample_size); if (!size || (!flush && size == av_fifo_size(aic->fifo))) return 0; av_new_packet(pkt, size); av_fifo_generic_read(aic->fifo, pkt->data, size, NULL); pkt->dts = pkt->pts = aic->dts; pkt->duration = av_rescale_q(*aic->samples, st->time_base, aic->time_base); pkt->stream_index = stream_index; aic->dts += pkt->duration; aic->samples++; if (!*aic->samples) aic->samples = aic->samples_per_frame; return size; }
false
FFmpeg
3ca8a2328878ebdb203e49d0a060df1b5337a370
5,352
static int alloc_tables(H264Context *h){ MpegEncContext * const s = &h->s; const int big_mb_num= s->mb_stride * (s->mb_height+1); int x,y; CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t)) CHECKED_ALLOCZ(h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t)) CHECKED_ALLOCZ(h->slice_table_base , (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base)) CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t)) CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t)) CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t)); CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t)); CHECKED_ALLOCZ(h->direct_table, 32*big_mb_num * sizeof(uint8_t)); memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base)); h->slice_table= h->slice_table_base + s->mb_stride*2 + 1; CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint32_t)); CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint32_t)); for(y=0; y<s->mb_height; y++){ for(x=0; x<s->mb_width; x++){ const int mb_xy= x + y*s->mb_stride; const int b_xy = 4*x + 4*y*h->b_stride; const int b8_xy= 2*x + 2*y*h->b8_stride; h->mb2b_xy [mb_xy]= b_xy; h->mb2b8_xy[mb_xy]= b8_xy; } } s->obmc_scratchpad = NULL; if(!h->dequant4_coeff[0]) init_dequant_tables(h); return 0; fail: free_tables(h); return -1; }
false
FFmpeg
d31dbec3742e488156621b9ca21069f8c05aabf0
5,354
static int decode_nal_sei_frame_packing_arrangement(HEVCContext *s) { GetBitContext *gb = &s->HEVClc->gb; get_ue_golomb(gb); // frame_packing_arrangement_id s->sei_frame_packing_present = !get_bits1(gb); if (s->sei_frame_packing_present) { s->frame_packing_arrangement_type = get_bits(gb, 7); s->quincunx_subsampling = get_bits1(gb); s->content_interpretation_type = get_bits(gb, 6); // the following skips spatial_flipping_flag frame0_flipped_flag // field_views_flag current_frame_is_frame0_flag // frame0_self_contained_flag frame1_self_contained_flag skip_bits(gb, 6); if (!s->quincunx_subsampling && s->frame_packing_arrangement_type != 5) skip_bits(gb, 16); // frame[01]_grid_position_[xy] skip_bits(gb, 8); // frame_packing_arrangement_reserved_byte skip_bits1(gb); // frame_packing_arrangement_persistance_flag } skip_bits1(gb); // upsampled_aspect_ratio_flag return 0; }
false
FFmpeg
c51c08e0e70c186971385bdbb225f69edd4e3375
5,355
static int decode_frame_header(ProresContext *ctx, const uint8_t *buf, const int data_size, AVCodecContext *avctx) { int hdr_size, width, height, flags; int version; const uint8_t *ptr; hdr_size = AV_RB16(buf); av_dlog(avctx, "header size %d\n", hdr_size); if (hdr_size > data_size) { av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n"); return AVERROR_INVALIDDATA; } version = AV_RB16(buf + 2); av_dlog(avctx, "%.4s version %d\n", buf+4, version); if (version > 1) { av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version); return AVERROR_PATCHWELCOME; } width = AV_RB16(buf + 8); height = AV_RB16(buf + 10); if (width != avctx->width || height != avctx->height) { av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n", avctx->width, avctx->height, width, height); return AVERROR_PATCHWELCOME; } ctx->frame_type = (buf[12] >> 2) & 3; ctx->alpha_info = buf[17] & 0xf; if (ctx->alpha_info > 2) { av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info); return AVERROR_INVALIDDATA; } if (avctx->skip_alpha) ctx->alpha_info = 0; av_dlog(avctx, "frame type %d\n", ctx->frame_type); if (ctx->frame_type == 0) { ctx->scan = ctx->progressive_scan; // permuted } else { ctx->scan = ctx->interlaced_scan; // permuted ctx->frame->interlaced_frame = 1; ctx->frame->top_field_first = ctx->frame_type == 1; } if (ctx->alpha_info) { avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10; } else { avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10; } ptr = buf + 20; flags = buf[19]; av_dlog(avctx, "flags %x\n", flags); if (flags & 2) { if(buf + data_size - ptr < 64) { av_log(avctx, AV_LOG_ERROR, "Header truncated\n"); return AVERROR_INVALIDDATA; } permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr); ptr += 64; } else { memset(ctx->qmat_luma, 4, 64); } if (flags & 1) { if(buf + data_size - ptr < 64) { av_log(avctx, AV_LOG_ERROR, "Header truncated\n"); return AVERROR_INVALIDDATA; } permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr); } else { memset(ctx->qmat_chroma, 4, 64); } return hdr_size; }
false
FFmpeg
229843aa359ae0c9519977d7fa952688db63f559
5,358
static int svq1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, UINT8 *buf, int buf_size) { MpegEncContext *s=avctx->priv_data; uint8_t *current, *previous; int result, i, x, y, width, height; AVFrame *pict = data; /* initialize bit buffer */ init_get_bits(&s->gb,buf,buf_size); /* decode frame header */ s->f_code = get_bits (&s->gb, 22); if ((s->f_code & ~0x70) || !(s->f_code & 0x60)) return -1; /* swap some header bytes (why?) */ if (s->f_code != 0x20) { uint32_t *src = (uint32_t *) (buf + 4); for (i=0; i < 4; i++) { src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i]; } } result = svq1_decode_frame_header (&s->gb, s); if (result != 0) { #ifdef DEBUG_SVQ1 printf("Error in svq1_decode_frame_header %i\n",result); #endif return result; } //FIXME this avoids some confusion for "B frames" without 2 references //this should be removed after libavcodec can handle more flaxible picture types & ordering if(s->pict_type==B_TYPE && s->last_picture.data[0]==NULL) return buf_size; if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size; if(MPV_frame_start(s, avctx) < 0) return -1; /* decode y, u and v components */ for (i=0; i < 3; i++) { int linesize; if (i == 0) { width = (s->width+15)&~15; height = (s->height+15)&~15; linesize= s->linesize; } else { if(s->flags&CODEC_FLAG_GRAY) break; width = (s->width/4+15)&~15; height = (s->height/4+15)&~15; linesize= s->uvlinesize; } current = s->current_picture.data[i]; if(s->pict_type==B_TYPE){ previous = s->next_picture.data[i]; }else{ previous = s->last_picture.data[i]; } if (s->pict_type == I_TYPE) { /* keyframe */ for (y=0; y < height; y+=16) { for (x=0; x < width; x+=16) { result = svq1_decode_block_intra (&s->gb, &current[x], linesize); if (result != 0) { #ifdef DEBUG_SVQ1 printf("Error in svq1_decode_block %i (keyframe)\n",result); #endif return result; } } current += 16*linesize; } } else { svq1_pmv_t pmv[width/8+3]; /* delta frame */ memset (pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv_t)); for (y=0; y < height; y+=16) { for (x=0; x < width; x+=16) { result = svq1_decode_delta_block (s, &s->gb, &current[x], previous, linesize, pmv, x, y); if (result != 0) { #ifdef DEBUG_SVQ1 printf("Error in svq1_decode_delta_block %i\n",result); #endif return result; } } pmv[0].x = pmv[0].y = 0; current += 16*linesize; } } } *pict = *(AVFrame*)&s->current_picture; MPV_frame_end(s); *data_size=sizeof(AVFrame); return buf_size; }
false
FFmpeg
68f593b48433842f3407586679fe07f3e5199ab9
5,360
static int decode_blocks(SnowContext *s){ int x, y; int w= s->b_width; int h= s->b_height; int res; for(y=0; y<h; y++){ for(x=0; x<w; x++){ if ((res = decode_q_branch(s, 0, x, y)) < 0) return res; } } return 0; }
true
FFmpeg
4527ec2216109867498edc3ac8a17fd879b5d017
5,362
static int s390x_write_all_elf64_notes(const char *note_name, WriteCoreDumpFunction f, S390CPU *cpu, int id, void *opaque) { Note note; const NoteFuncDesc *nf; int note_size; int ret = -1; for (nf = note_func; nf->note_contents_func; nf++) { memset(&note, 0, sizeof(note)); note.hdr.n_namesz = cpu_to_be32(sizeof(note.name)); note.hdr.n_descsz = cpu_to_be32(nf->contents_size); strncpy(note.name, note_name, sizeof(note.name)); (*nf->note_contents_func)(&note, cpu); note_size = sizeof(note) - sizeof(note.contents) + nf->contents_size; ret = f(&note, note_size, opaque); if (ret < 0) { return -1; } } return 0; }
true
qemu
5f706fdc164b20b48254eadf7bd413edace34499
5,364
static int decode_unit(SCPRContext *s, PixelModel *pixel, unsigned step, unsigned *rval) { GetByteContext *gb = &s->gb; RangeCoder *rc = &s->rc; unsigned totfr = pixel->total_freq; unsigned value, x = 0, cumfr = 0, cnt_x = 0; int i, j, ret, c, cnt_c; if ((ret = s->get_freq(rc, totfr, &value)) < 0) return ret; while (x < 16) { cnt_x = pixel->lookup[x]; if (value >= cumfr + cnt_x) cumfr += cnt_x; else break; x++; c = x * 16; cnt_c = 0; while (c < 256) { cnt_c = pixel->freq[c]; if (value >= cumfr + cnt_c) cumfr += cnt_c; else break; c++; if ((ret = s->decode(gb, rc, cumfr, cnt_c, totfr)) < 0) return ret; pixel->freq[c] = cnt_c + step; pixel->lookup[x] = cnt_x + step; totfr += step; if (totfr > BOT) { totfr = 0; for (i = 0; i < 256; i++) { unsigned nc = (pixel->freq[i] >> 1) + 1; pixel->freq[i] = nc; totfr += nc; for (i = 0; i < 16; i++) { unsigned sum = 0; unsigned i16_17 = i << 4; for (j = 0; j < 16; j++) sum += pixel->freq[i16_17 + j]; pixel->lookup[i] = sum; pixel->total_freq = totfr; *rval = c & s->cbits; return 0;
true
FFmpeg
2171dfae8c065878a2e130390eb78cf2947a5b69
5,365
void slirp_select_fill(int *pnfds, fd_set *readfds, fd_set *writefds, fd_set *xfds) { }
false
qemu
8917c3bdba37d6fe4393db0fad3fabbde9530d6b
5,367
int tcp_socket_incoming(const char *address, uint16_t port) { char address_and_port[128]; Error *local_err = NULL; combine_addr(address_and_port, 128, address, port); int fd = inet_listen(address_and_port, NULL, 0, SOCK_STREAM, 0, &local_err); if (local_err != NULL) { qerror_report_err(local_err); error_free(local_err); } return fd; }
false
qemu
537b41f5013e1951fa15e8f18855b18d76124ce4
5,368
static void paio_cancel(BlockDriverAIOCB *blockacb) { struct qemu_paiocb *acb = (struct qemu_paiocb *)blockacb; int active = 0; mutex_lock(&lock); if (!acb->active) { TAILQ_REMOVE(&request_list, acb, node); acb->ret = -ECANCELED; } else if (acb->ret == -EINPROGRESS) { active = 1; } mutex_unlock(&lock); if (active) { /* fail safe: if the aio could not be canceled, we wait for it */ while (qemu_paio_error(acb) == EINPROGRESS) ; } paio_remove(acb); }
false
qemu
72cf2d4f0e181d0d3a3122e04129c58a95da713e
5,369
static void machine_initfn(Object *obj) { MachineState *ms = MACHINE(obj); object_property_add_str(obj, "accel", machine_get_accel, machine_set_accel, NULL); object_property_set_description(obj, "accel", "Accelerator list", NULL); object_property_add_bool(obj, "kernel-irqchip", machine_get_kernel_irqchip, machine_set_kernel_irqchip, NULL); object_property_set_description(obj, "kernel-irqchip", "Use KVM in-kernel irqchip", NULL); object_property_add(obj, "kvm-shadow-mem", "int", machine_get_kvm_shadow_mem, machine_set_kvm_shadow_mem, NULL, NULL, NULL); object_property_set_description(obj, "kvm-shadow-mem", "KVM shadow MMU size", NULL); object_property_add_str(obj, "kernel", machine_get_kernel, machine_set_kernel, NULL); object_property_set_description(obj, "kernel", "Linux kernel image file", NULL); object_property_add_str(obj, "initrd", machine_get_initrd, machine_set_initrd, NULL); object_property_set_description(obj, "initrd", "Linux initial ramdisk file", NULL); object_property_add_str(obj, "append", machine_get_append, machine_set_append, NULL); object_property_set_description(obj, "append", "Linux kernel command line", NULL); object_property_add_str(obj, "dtb", machine_get_dtb, machine_set_dtb, NULL); object_property_set_description(obj, "dtb", "Linux kernel device tree file", NULL); object_property_add_str(obj, "dumpdtb", machine_get_dumpdtb, machine_set_dumpdtb, NULL); object_property_set_description(obj, "dumpdtb", "Dump current dtb to a file and quit", NULL); object_property_add(obj, "phandle-start", "int", machine_get_phandle_start, machine_set_phandle_start, NULL, NULL, NULL); object_property_set_description(obj, "phandle-start", "The first phandle ID we may generate dynamically", NULL); object_property_add_str(obj, "dt-compatible", machine_get_dt_compatible, machine_set_dt_compatible, NULL); object_property_set_description(obj, "dt-compatible", "Overrides the \"compatible\" property of the dt root node", NULL); object_property_add_bool(obj, "dump-guest-core", machine_get_dump_guest_core, machine_set_dump_guest_core, NULL); object_property_set_description(obj, "dump-guest-core", "Include guest memory in a core dump", NULL); object_property_add_bool(obj, "mem-merge", machine_get_mem_merge, machine_set_mem_merge, NULL); object_property_set_description(obj, "mem-merge", "Enable/disable memory merge support", NULL); object_property_add_bool(obj, "usb", machine_get_usb, machine_set_usb, NULL); object_property_set_description(obj, "usb", "Set on/off to enable/disable usb", NULL); object_property_add_str(obj, "firmware", machine_get_firmware, machine_set_firmware, NULL); object_property_set_description(obj, "firmware", "Firmware image", NULL); object_property_add_bool(obj, "iommu", machine_get_iommu, machine_set_iommu, NULL); object_property_set_description(obj, "iommu", "Set on/off to enable/disable Intel IOMMU (VT-d)", NULL); /* Register notifier when init is done for sysbus sanity checks */ ms->sysbus_notifier.notify = machine_init_notify; qemu_add_machine_init_done_notifier(&ms->sysbus_notifier); }
false
qemu
d8870d0217216478888c2d3dd6bf62e155d978c8
5,370
static ssize_t flush_buf(VirtIOSerialPort *port, const uint8_t *buf, size_t len) { VirtConsole *vcon = DO_UPCAST(VirtConsole, port, port); ssize_t ret; ret = qemu_chr_write(vcon->chr, buf, len); trace_virtio_console_flush_buf(port->id, len, ret); return ret; }
false
qemu
0219d73283b6399a737ef5a098f849b956618eaa
5,371
static uint64_t l2x0_priv_read(void *opaque, target_phys_addr_t offset, unsigned size) { uint32_t cache_data; l2x0_state *s = (l2x0_state *)opaque; offset &= 0xfff; if (offset >= 0x730 && offset < 0x800) { return 0; /* cache ops complete */ } switch (offset) { case 0: return CACHE_ID; case 0x4: /* aux_ctrl values affect cache_type values */ cache_data = (s->aux_ctrl & (7 << 17)) >> 15; cache_data |= (s->aux_ctrl & (1 << 16)) >> 16; return s->cache_type |= (cache_data << 18) | (cache_data << 6); case 0x100: return s->ctrl; case 0x104: return s->aux_ctrl; case 0x108: return s->tag_ctrl; case 0x10C: return s->data_ctrl; case 0xC00: return s->filter_start; case 0xC04: return s->filter_end; case 0xF40: return 0; case 0xF60: return 0; case 0xF80: return 0; default: fprintf(stderr, "l2x0_priv_read: Bad offset %x\n", (int)offset); break; } return 0; }
false
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
5,372
static int qcow_create2(const char *filename, int64_t total_size, const char *backing_file, const char *backing_format, int flags) { int fd, header_size, backing_filename_len, l1_size, i, shift, l2_bits; int backing_format_len = 0; QCowHeader header; uint64_t tmp, offset; QCowCreateState s1, *s = &s1; QCowExtension ext_bf = {0, 0}; memset(s, 0, sizeof(*s)); fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); if (fd < 0) return -1; memset(&header, 0, sizeof(header)); header.magic = cpu_to_be32(QCOW_MAGIC); header.version = cpu_to_be32(QCOW_VERSION); header.size = cpu_to_be64(total_size * 512); header_size = sizeof(header); backing_filename_len = 0; if (backing_file) { if (backing_format) { ext_bf.magic = QCOW_EXT_MAGIC_BACKING_FORMAT; backing_format_len = strlen(backing_format); ext_bf.len = (backing_format_len + 7) & ~7; header_size += ((sizeof(ext_bf) + ext_bf.len + 7) & ~7); } header.backing_file_offset = cpu_to_be64(header_size); backing_filename_len = strlen(backing_file); header.backing_file_size = cpu_to_be32(backing_filename_len); header_size += backing_filename_len; } s->cluster_bits = 12; /* 4 KB clusters */ s->cluster_size = 1 << s->cluster_bits; header.cluster_bits = cpu_to_be32(s->cluster_bits); header_size = (header_size + 7) & ~7; if (flags & BLOCK_FLAG_ENCRYPT) { header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); } else { header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); } l2_bits = s->cluster_bits - 3; shift = s->cluster_bits + l2_bits; l1_size = (((total_size * 512) + (1LL << shift) - 1) >> shift); offset = align_offset(header_size, s->cluster_size); s->l1_table_offset = offset; header.l1_table_offset = cpu_to_be64(s->l1_table_offset); header.l1_size = cpu_to_be32(l1_size); offset += align_offset(l1_size * sizeof(uint64_t), s->cluster_size); s->refcount_table = qemu_mallocz(s->cluster_size); s->refcount_block = qemu_mallocz(s->cluster_size); s->refcount_table_offset = offset; header.refcount_table_offset = cpu_to_be64(offset); header.refcount_table_clusters = cpu_to_be32(1); offset += s->cluster_size; s->refcount_table[0] = cpu_to_be64(offset); s->refcount_block_offset = offset; offset += s->cluster_size; /* update refcounts */ create_refcount_update(s, 0, header_size); create_refcount_update(s, s->l1_table_offset, l1_size * sizeof(uint64_t)); create_refcount_update(s, s->refcount_table_offset, s->cluster_size); create_refcount_update(s, s->refcount_block_offset, s->cluster_size); /* write all the data */ write(fd, &header, sizeof(header)); if (backing_file) { if (backing_format_len) { char zero[16]; int d = ext_bf.len - backing_format_len; memset(zero, 0, sizeof(zero)); cpu_to_be32s(&ext_bf.magic); cpu_to_be32s(&ext_bf.len); write(fd, &ext_bf, sizeof(ext_bf)); write(fd, backing_format, backing_format_len); if (d>0) { write(fd, zero, d); } } write(fd, backing_file, backing_filename_len); } lseek(fd, s->l1_table_offset, SEEK_SET); tmp = 0; for(i = 0;i < l1_size; i++) { write(fd, &tmp, sizeof(tmp)); } lseek(fd, s->refcount_table_offset, SEEK_SET); write(fd, s->refcount_table, s->cluster_size); lseek(fd, s->refcount_block_offset, SEEK_SET); write(fd, s->refcount_block, s->cluster_size); qemu_free(s->refcount_table); qemu_free(s->refcount_block); close(fd); return 0; }
false
qemu
2d2431f03fc78b532f3a1c5f858cf78859d50fc3
5,373
static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc, TCGv r_cond) { unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29)); target_ulong target = dc->pc + offset; if (cond == 0x0) { /* unconditional not taken */ if (a) { dc->pc = dc->npc + 4; dc->npc = dc->pc + 4; } else { dc->pc = dc->npc; dc->npc = dc->pc + 4; } } else if (cond == 0x8) { /* unconditional taken */ if (a) { dc->pc = target; dc->npc = dc->pc + 4; } else { dc->pc = dc->npc; dc->npc = target; tcg_gen_mov_tl(cpu_pc, cpu_npc); } } else { flush_cond(dc, r_cond); gen_cond(r_cond, cc, cond, dc); if (a) { gen_branch_a(dc, target, dc->npc, r_cond); dc->is_br = 1; } else { dc->pc = dc->npc; dc->jump_pc[0] = target; dc->jump_pc[1] = dc->npc + 4; dc->npc = JUMP_PC; } } }
false
qemu
548f66db33b91bf305c4e5228bb29585701ab58d
5,374
static void prodsum(float *tgt, float *src, int len, int n) { unsigned int x; float *p1, *p2; double sum; while (n >= 0) { p1 = (p2 = src) - n; for (sum=0, x=len; x--; sum += (*p1++) * (*p2++)); tgt[n--] = sum; } }
false
FFmpeg
69c23e6f33c38ebc03ce7f51fcb963deaff7383b
5,375
void cpu_tlb_update_dirty(CPUState *env) { int i; for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[0][i]); for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[1][i]); #if (NB_MMU_MODES >= 3) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[2][i]); #endif #if (NB_MMU_MODES >= 4) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[3][i]); #endif #if (NB_MMU_MODES >= 5) for(i = 0; i < CPU_TLB_SIZE; i++) tlb_update_dirty(&env->tlb_table[4][i]); #endif }
false
qemu
cfde4bd93100c58c0bfaed76deefb144caac488f
5,376
long do_rt_sigreturn(CPUMIPSState *env) { struct target_rt_sigframe *frame; abi_ulong frame_addr; sigset_t blocked; #if defined(DEBUG_SIGNAL) fprintf(stderr, "do_rt_sigreturn\n"); #endif frame_addr = env->active_tc.gpr[29]; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); sigprocmask(SIG_SETMASK, &blocked, NULL); if (restore_sigcontext(env, &frame->rs_uc.tuc_mcontext)) goto badframe; if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT) goto badframe; env->active_tc.PC = env->CP0_EPC; mips_set_hflags_isa_mode_from_pc(env); /* I am not sure this is right, but it seems to work * maybe a problem with nested signals ? */ env->CP0_EPC = 0; return -TARGET_QEMU_ESIGRETURN; badframe: force_sig(TARGET_SIGSEGV/*, current*/); return 0; }
false
qemu
1c275925bfbbc2de84a8f0e09d1dd70bbefb6da3
5,377
static void spr_read_decr (DisasContext *ctx, int gprn, int sprn) { if (use_icount) { gen_io_start(); } gen_helper_load_decr(cpu_gpr[gprn], cpu_env); if (use_icount) { gen_io_end(); gen_stop_exception(ctx); } }
false
qemu
bd79255d2571a3c68820117caf94ea9afe1d527e
5,378
bool memory_region_is_logging(MemoryRegion *mr) { return mr->dirty_log_mask; }
false
qemu
2d1a35bef0ed96b3f23535e459c552414ccdbafd
5,382
static void mv88w8618_eth_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { mv88w8618_eth_state *s = opaque; switch (offset) { case MP_ETH_SMIR: s->smir = value; break; case MP_ETH_PCXR: s->vlan_header = ((value >> MP_ETH_PCXR_2BSM_BIT) & 1) * 2; break; case MP_ETH_SDCMR: if (value & MP_ETH_CMD_TXHI) { eth_send(s, 1); } if (value & MP_ETH_CMD_TXLO) { eth_send(s, 0); } if (value & (MP_ETH_CMD_TXHI | MP_ETH_CMD_TXLO) && s->icr & s->imr) { qemu_irq_raise(s->irq); } break; case MP_ETH_ICR: s->icr &= value; break; case MP_ETH_IMR: s->imr = value; if (s->icr & s->imr) { qemu_irq_raise(s->irq); } break; case MP_ETH_FRDP0 ... MP_ETH_FRDP3: s->frx_queue[(offset - MP_ETH_FRDP0)/4] = value; break; case MP_ETH_CRDP0 ... MP_ETH_CRDP3: s->rx_queue[(offset - MP_ETH_CRDP0)/4] = s->cur_rx[(offset - MP_ETH_CRDP0)/4] = value; break; case MP_ETH_CTDP0 ... MP_ETH_CTDP3: s->tx_queue[(offset - MP_ETH_CTDP0)/4] = value; break; } }
false
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
5,383
softusb_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { MilkymistSoftUsbState *s = opaque; trace_milkymist_softusb_memory_write(addr, value); addr >>= 2; switch (addr) { case R_CTRL: s->regs[addr] = value; break; default: error_report("milkymist_softusb: write access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } }
false
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
5,384
void r4k_invalidate_tlb (CPUState *env, int idx, int use_extra) { r4k_tlb_t *tlb; target_ulong addr; target_ulong end; uint8_t ASID = env->CP0_EntryHi & 0xFF; target_ulong mask; tlb = &env->tlb->mmu.r4k.tlb[idx]; /* The qemu TLB is flushed when the ASID changes, so no need to flush these entries again. */ if (tlb->G == 0 && tlb->ASID != ASID) { return; } if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) { /* For tlbwr, we can shadow the discarded entry into a new (fake) TLB entry, as long as the guest can not tell that it's there. */ env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb; env->tlb->tlb_in_use++; return; } /* 1k pages are not supported. */ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); if (tlb->V0) { addr = tlb->VPN & ~mask; #if defined(TARGET_MIPS64) if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { addr |= 0x3FFFFF0000000000ULL; } #endif end = addr | (mask >> 1); while (addr < end) { tlb_flush_page (env, addr); addr += TARGET_PAGE_SIZE; } } if (tlb->V1) { addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); #if defined(TARGET_MIPS64) if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { addr |= 0x3FFFFF0000000000ULL; } #endif end = addr | mask; while (addr < end) { tlb_flush_page (env, addr); addr += TARGET_PAGE_SIZE; } } }
false
qemu
53715e48b0cc274f577723f5e6aa2cf2cd72414b
5,385
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size) { VC1Context *v = avctx->priv_data; MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; /* no supplementary picture */ if (buf_size == 0) { /* special case for last picture */ if (s->low_delay==0 && s->next_picture_ptr) { *pict= *(AVFrame*)s->next_picture_ptr; s->next_picture_ptr= NULL; *data_size = sizeof(AVFrame); } return 0; } //we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){ int i= ff_find_unused_picture(s, 0); s->current_picture_ptr= &s->picture[i]; } avctx->has_b_frames= !s->low_delay; //for advanced profile we need to unescape buffer if (avctx->codec_id == CODEC_ID_VC1) { int i, buf_size2; buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); buf_size2 = 0; for(i = 0; i < buf_size; i++) { if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) { buf2[buf_size2++] = buf[i+1]; i++; } else buf2[buf_size2++] = buf[i]; } init_get_bits(&s->gb, buf2, buf_size2*8); } else init_get_bits(&s->gb, buf, buf_size*8); // do parse frame header if(v->profile < PROFILE_ADVANCED) { if(vc1_parse_frame_header(v, &s->gb) == -1) { if(buf2)av_free(buf2); return -1; } } else { if(vc1_parse_frame_header_adv(v, &s->gb) == -1) { if(buf2)av_free(buf2); return -1; } } if(s->pict_type != I_TYPE && !v->res_rtm_flag){ if(buf2)av_free(buf2); return -1; } // for hurry_up==5 s->current_picture.pict_type= s->pict_type; s->current_picture.key_frame= s->pict_type == I_TYPE; /* skip B-frames if we don't have reference frames */ if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){ if(buf2)av_free(buf2); return -1;//buf_size; } /* skip b frames if we are in a hurry */ if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size; if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE) || avctx->skip_frame >= AVDISCARD_ALL) { if(buf2)av_free(buf2); return buf_size; } /* skip everything if we are in a hurry>=5 */ if(avctx->hurry_up>=5) { if(buf2)av_free(buf2); return -1;//buf_size; } if(s->next_p_frame_damaged){ if(s->pict_type==B_TYPE) return buf_size; else s->next_p_frame_damaged=0; } if(MPV_frame_start(s, avctx) < 0) { if(buf2)av_free(buf2); return -1; } ff_er_frame_start(s); v->bits = buf_size * 8; vc1_decode_blocks(v); //av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), buf_size*8); // if(get_bits_count(&s->gb) > buf_size * 8) // return -1; ff_er_frame_end(s); MPV_frame_end(s); assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type); assert(s->current_picture.pict_type == s->pict_type); if (s->pict_type == B_TYPE || s->low_delay) { *pict= *(AVFrame*)s->current_picture_ptr; } else if (s->last_picture_ptr != NULL) { *pict= *(AVFrame*)s->last_picture_ptr; } if(s->last_picture_ptr || s->low_delay){ *data_size = sizeof(AVFrame); ff_print_debug_info(s, pict); } /* Return the Picture timestamp as the frame number */ /* we substract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; if(buf2)av_free(buf2); return buf_size; }
false
FFmpeg
34a8dcd031d637273cdea021e5a79cf720c4c51c
5,387
void qmp_guest_set_user_password(const char *username, const char *password, bool crypted, Error **errp) { NET_API_STATUS nas; char *rawpasswddata = NULL; size_t rawpasswdlen; wchar_t *user, *wpass; USER_INFO_1003 pi1003 = { 0, }; if (crypted) { error_setg(errp, QERR_UNSUPPORTED); return; } rawpasswddata = (char *)g_base64_decode(password, &rawpasswdlen); rawpasswddata = g_renew(char, rawpasswddata, rawpasswdlen + 1); rawpasswddata[rawpasswdlen] = '\0'; user = g_utf8_to_utf16(username, -1, NULL, NULL, NULL); wpass = g_utf8_to_utf16(rawpasswddata, -1, NULL, NULL, NULL); pi1003.usri1003_password = wpass; nas = NetUserSetInfo(NULL, user, 1003, (LPBYTE)&pi1003, NULL); if (nas != NERR_Success) { gchar *msg = get_net_error_message(nas); error_setg(errp, "failed to set password: %s", msg); g_free(msg); } g_free(user); g_free(wpass); g_free(rawpasswddata); }
false
qemu
920639cab0fe28d003c90b53bd8b66e8fb333bdd
5,388
static void cpu_exec_nocache(CPUState *cpu, int max_cycles, TranslationBlock *orig_tb, bool ignore_icount) { TranslationBlock *tb; /* Should never happen. We only end up here when an existing TB is too long. */ if (max_cycles > CF_COUNT_MASK) max_cycles = CF_COUNT_MASK; tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, max_cycles | CF_NOCACHE | (ignore_icount ? CF_IGNORE_ICOUNT : 0)); tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb; cpu->current_tb = tb; /* execute the generated code */ trace_exec_tb_nocache(tb, tb->pc); cpu_tb_exec(cpu, tb); cpu->current_tb = NULL; tb_phys_invalidate(tb, -1); tb_free(tb); }
false
qemu
6f789be56d3f38e9214dafcfab3bf9be7191f370
5,389
static void virtio_scsi_migration_state_changed(Notifier *notifier, void *data) { VirtIOSCSI *s = container_of(notifier, VirtIOSCSI, migration_state_notifier); MigrationState *mig = data; if (migration_in_setup(mig)) { if (!s->dataplane_started) { return; } virtio_scsi_dataplane_stop(s); s->dataplane_disabled = true; } else if (migration_has_finished(mig) || migration_has_failed(mig)) { if (s->dataplane_started) { return; } bdrv_drain_all(); /* complete in-flight non-dataplane requests */ s->dataplane_disabled = false; } }
false
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
5,390
static void hpet_reset(void *opaque) { HPETState *s = opaque; int i; static int count = 0; for (i = 0; i < HPET_NUM_TIMERS; i++) { HPETTimer *timer = &s->timer[i]; hpet_del_timer(timer); timer->tn = i; timer->cmp = ~0ULL; timer->config = HPET_TN_PERIODIC_CAP | HPET_TN_SIZE_CAP; /* advertise availability of ioapic inti2 */ timer->config |= 0x00000004ULL << 32; timer->state = s; timer->period = 0ULL; timer->wrap_flag = 0; } s->hpet_counter = 0ULL; s->hpet_offset = 0ULL; /* 64-bit main counter; 3 timers supported; LegacyReplacementRoute. */ s->capability = 0x8086a201ULL; s->capability |= ((HPET_CLK_PERIOD) << 32); s->config = 0ULL; if (count > 0) { /* we don't enable pit when hpet_reset is first called (by hpet_init) * because hpet is taking over for pit here. On subsequent invocations, * hpet_reset is called due to system reset. At this point control must * be returned to pit until SW reenables hpet. */ hpet_pit_enable(); } count = 1; }
false
qemu
7afbecc9efa64a88ab6194c2cf1d6feabd03d119
5,391
static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new) { }
false
qemu
c2b38b277a7882a592f4f2ec955084b2b756daaa
5,392
int inet_listen_opts(QemuOpts *opts, int port_offset) { struct addrinfo ai,*res,*e; const char *addr; char port[33]; char uaddr[INET6_ADDRSTRLEN+1]; char uport[33]; int slisten,rc,to,try_next; memset(&ai,0, sizeof(ai)); ai.ai_flags = AI_PASSIVE | AI_ADDRCONFIG; ai.ai_family = PF_UNSPEC; ai.ai_socktype = SOCK_STREAM; if ((qemu_opt_get(opts, "host") == NULL) || (qemu_opt_get(opts, "port") == NULL)) { fprintf(stderr, "%s: host and/or port not specified\n", __FUNCTION__); return -1; } pstrcpy(port, sizeof(port), qemu_opt_get(opts, "port")); addr = qemu_opt_get(opts, "host"); to = qemu_opt_get_number(opts, "to", 0); if (qemu_opt_get_bool(opts, "ipv4", 0)) ai.ai_family = PF_INET; if (qemu_opt_get_bool(opts, "ipv6", 0)) ai.ai_family = PF_INET6; /* lookup */ if (port_offset) snprintf(port, sizeof(port), "%d", atoi(port) + port_offset); rc = getaddrinfo(strlen(addr) ? addr : NULL, port, &ai, &res); if (rc != 0) { fprintf(stderr,"getaddrinfo(%s,%s): %s\n", addr, port, gai_strerror(rc)); return -1; } /* create socket + bind */ for (e = res; e != NULL; e = e->ai_next) { getnameinfo((struct sockaddr*)e->ai_addr,e->ai_addrlen, uaddr,INET6_ADDRSTRLEN,uport,32, NI_NUMERICHOST | NI_NUMERICSERV); slisten = qemu_socket(e->ai_family, e->ai_socktype, e->ai_protocol); if (slisten < 0) { fprintf(stderr,"%s: socket(%s): %s\n", __FUNCTION__, inet_strfamily(e->ai_family), strerror(errno)); continue; } setsockopt(slisten,SOL_SOCKET,SO_REUSEADDR,(void*)&on,sizeof(on)); #ifdef IPV6_V6ONLY if (e->ai_family == PF_INET6) { /* listen on both ipv4 and ipv6 */ setsockopt(slisten,IPPROTO_IPV6,IPV6_V6ONLY,(void*)&off, sizeof(off)); } #endif for (;;) { if (bind(slisten, e->ai_addr, e->ai_addrlen) == 0) { goto listen; } try_next = to && (inet_getport(e) <= to + port_offset); if (!try_next) fprintf(stderr,"%s: bind(%s,%s,%d): %s\n", __FUNCTION__, inet_strfamily(e->ai_family), uaddr, inet_getport(e), strerror(errno)); if (try_next) { inet_setport(e, inet_getport(e) + 1); continue; } break; } closesocket(slisten); } fprintf(stderr, "%s: FAILED\n", __FUNCTION__); freeaddrinfo(res); return -1; listen: if (listen(slisten,1) != 0) { perror("listen"); closesocket(slisten); freeaddrinfo(res); return -1; } snprintf(uport, sizeof(uport), "%d", inet_getport(e) - port_offset); qemu_opt_set(opts, "host", uaddr); qemu_opt_set(opts, "port", uport); qemu_opt_set(opts, "ipv6", (e->ai_family == PF_INET6) ? "on" : "off"); qemu_opt_set(opts, "ipv4", (e->ai_family != PF_INET6) ? "on" : "off"); freeaddrinfo(res); return slisten; }
false
qemu
877691f96f4ffba2dba45ba5556eacd53b77237b
5,393
static uint64_t iack_read(void *opaque, target_phys_addr_t addr, unsigned size) { return pic_read_irq(isa_pic); }
false
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
5,394
static void sd_set_status(SDState *sd) { switch (sd->state) { case sd_inactive_state: sd->mode = sd_inactive; break; case sd_idle_state: case sd_ready_state: case sd_identification_state: sd->mode = sd_card_identification_mode; break; case sd_standby_state: case sd_transfer_state: case sd_sendingdata_state: case sd_receivingdata_state: case sd_programming_state: case sd_disconnect_state: sd->mode = sd_data_transfer_mode; break; } sd->card_status &= ~CURRENT_STATE; sd->card_status |= sd->state << 9; }
false
qemu
10a412dab3f54439ea3d60274eb41668f7d83bd2
5,395
void init_clocks(void) { QEMUClockType type; for (type = 0; type < QEMU_CLOCK_MAX; type++) { qemu_clock_init(type); } #ifdef CONFIG_PRCTL_PR_SET_TIMERSLACK prctl(PR_SET_TIMERSLACK, 1, 0, 0, 0); #endif }
false
qemu
c2b38b277a7882a592f4f2ec955084b2b756daaa
5,396
static void FUNC(sao_band_filter)(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride, SAOParams *sao, int *borders, int width, int height, int c_idx, int class) { pixel *dst = (pixel *)_dst; pixel *src = (pixel *)_src; int offset_table[32] = { 0 }; int k, y, x; int chroma = !!c_idx; int shift = BIT_DEPTH - 5; int *sao_offset_val = sao->offset_val[c_idx]; int sao_left_class = sao->band_position[c_idx]; int init_y = 0, init_x = 0; stride /= sizeof(pixel); switch (class) { case 0: if (!borders[2]) width -= (8 >> chroma) + 2; if (!borders[3]) height -= (4 >> chroma) + 2; break; case 1: init_y = -(4 >> chroma) - 2; if (!borders[2]) width -= (8 >> chroma) + 2; height = (4 >> chroma) + 2; break; case 2: init_x = -(8 >> chroma) - 2; width = (8 >> chroma) + 2; if (!borders[3]) height -= (4 >> chroma) + 2; break; case 3: init_y = -(4 >> chroma) - 2; init_x = -(8 >> chroma) - 2; width = (8 >> chroma) + 2; height = (4 >> chroma) + 2; break; } dst = dst + (init_y * stride + init_x); src = src + (init_y * stride + init_x); for (k = 0; k < 4; k++) offset_table[(k + sao_left_class) & 31] = sao_offset_val[k + 1]; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) dst[x] = av_clip_pixel(src[x] + offset_table[av_clip_pixel(src[x] >> shift)]); dst += stride; src += stride; } }
false
FFmpeg
5856bca360c5bc3e340a357d91b1f993c80a7bea
5,397
static void vhost_scsi_realize(DeviceState *dev, Error **errp) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); VHostSCSI *s = VHOST_SCSI(dev); Error *err = NULL; int vhostfd = -1; int ret; if (!vs->conf.wwpn) { error_setg(errp, "vhost-scsi: missing wwpn"); return; } if (vs->conf.vhostfd) { vhostfd = monitor_handle_fd_param(cur_mon, vs->conf.vhostfd); if (vhostfd == -1) { error_setg(errp, "vhost-scsi: unable to parse vhostfd"); return; } } else { vhostfd = open("/dev/vhost-scsi", O_RDWR); if (vhostfd < 0) { error_setg(errp, "vhost-scsi: open vhost char device failed: %s", strerror(errno)); return; } } virtio_scsi_common_realize(dev, &err, vhost_dummy_handle_output, vhost_dummy_handle_output, vhost_dummy_handle_output); if (err != NULL) { error_propagate(errp, err); return; } s->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues; s->dev.vqs = g_new(struct vhost_virtqueue, s->dev.nvqs); s->dev.vq_index = 0; s->dev.backend_features = 0; ret = vhost_dev_init(&s->dev, (void *)(uintptr_t)vhostfd, VHOST_BACKEND_TYPE_KERNEL, true); if (ret < 0) { error_setg(errp, "vhost-scsi: vhost initialization failed: %s", strerror(-ret)); return; } error_setg(&s->migration_blocker, "vhost-scsi does not support migration"); migrate_add_blocker(s->migration_blocker); }
true
qemu
b19ca188022d720e6cdf87c43c27cb68bac32f6a
5,398
static void update_refcount_discard(BlockDriverState *bs, uint64_t offset, uint64_t length) { BDRVQcowState *s = bs->opaque; Qcow2DiscardRegion *d, *p, *next; QTAILQ_FOREACH(d, &s->discards, next) { uint64_t new_start = MIN(offset, d->offset); uint64_t new_end = MAX(offset + length, d->offset + d->bytes); if (new_end - new_start <= length + d->bytes) { /* There can't be any overlap, areas ending up here have no * references any more and therefore shouldn't get freed another * time. */ assert(d->bytes + length == new_end - new_start); d->offset = new_start; d->bytes = new_end - new_start; goto found; } } d = g_malloc(sizeof(*d)); *d = (Qcow2DiscardRegion) { .bs = bs, .offset = offset, .bytes = length, }; QTAILQ_INSERT_TAIL(&s->discards, d, next); found: /* Merge discard requests if they are adjacent now */ QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) { if (p == d || p->offset > d->offset + d->bytes || d->offset > p->offset + p->bytes) { continue; } /* Still no overlap possible */ assert(p->offset == d->offset + d->bytes || d->offset == p->offset + p->bytes); QTAILQ_REMOVE(&s->discards, p, next); d->offset = MIN(d->offset, p->offset); d->bytes += p->bytes; } }
true
qemu
d8bb71b6227366c188595b91c24a58c9b06e46dd
5,399
static void qcow2_close(BlockDriverState *bs) { BDRVQcowState *s = bs->opaque; g_free(s->l1_table); /* else pre-write overlap checks in cache_destroy may crash */ s->l1_table = NULL; if (!(bs->open_flags & BDRV_O_INCOMING)) { qcow2_cache_flush(bs, s->l2_table_cache); qcow2_cache_flush(bs, s->refcount_block_cache); qcow2_mark_clean(bs); } qcow2_cache_destroy(bs, s->l2_table_cache); qcow2_cache_destroy(bs, s->refcount_block_cache); g_free(s->unknown_header_fields); cleanup_unknown_header_ext(bs); g_free(s->cluster_cache); qemu_vfree(s->cluster_data); qcow2_refcount_close(bs); qcow2_free_snapshots(bs); }
true
qemu
de82815db1c89da058b7fb941dab137d6d9ab738
5,400
static void pred4x4_vertical_vp8_c(uint8_t *src, const uint8_t *topright, int stride){ const int lt= src[-1-1*stride]; LOAD_TOP_EDGE LOAD_TOP_RIGHT_EDGE uint32_t v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2, (t0 + 2*t1 + t2 + 2) >> 2, (t1 + 2*t2 + t3 + 2) >> 2, (t2 + 2*t3 + t4 + 2) >> 2); AV_WN32A(src+0*stride, v); AV_WN32A(src+1*stride, v); AV_WN32A(src+2*stride, v); AV_WN32A(src+3*stride, v); }
true
FFmpeg
60f10e0ad37418cc697765d85b0bc22db70f726a
5,401
static void end_frame(AVFilterLink *link) { CropContext *crop = link->dst->priv; crop->var_values[N] += 1.0; avfilter_unref_buffer(link->cur_buf); avfilter_end_frame(link->dst->outputs[0]); }
true
FFmpeg
1afab338575810acc5eb75c17c4adfb73504de10
5,404
static void vfio_listener_release(VFIOContainer *container) { memory_listener_unregister(&container->iommu_data.listener); }
true
qemu
87ca1f77b1c406137fe36ab73b2dc91fb75f8d0a
5,405
static int coroutine_fn nfs_co_writev(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *iov) { NFSClient *client = bs->opaque; NFSRPC task; char *buf = NULL; nfs_co_init_task(client, &task); buf = g_malloc(nb_sectors * BDRV_SECTOR_SIZE); qemu_iovec_to_buf(iov, 0, buf, nb_sectors * BDRV_SECTOR_SIZE); if (nfs_pwrite_async(client->context, client->fh, sector_num * BDRV_SECTOR_SIZE, nb_sectors * BDRV_SECTOR_SIZE, buf, nfs_co_generic_cb, &task) != 0) { g_free(buf); return -ENOMEM; } while (!task.complete) { nfs_set_events(client); qemu_coroutine_yield(); } g_free(buf); if (task.ret != nb_sectors * BDRV_SECTOR_SIZE) { return task.ret < 0 ? task.ret : -EIO; } return 0; }
true
qemu
2347dd7b6841c1543ceb49cb232d596eb5dd1ca3
5,406
static void rocker_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = pci_rocker_init; k->exit = pci_rocker_uninit; k->vendor_id = PCI_VENDOR_ID_REDHAT; k->device_id = PCI_DEVICE_ID_REDHAT_ROCKER; k->revision = ROCKER_PCI_REVISION; k->class_id = PCI_CLASS_NETWORK_OTHER; set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); dc->desc = "Rocker Switch"; dc->reset = rocker_reset; dc->props = rocker_properties; dc->vmsd = &rocker_vmsd; }
true
qemu
0c8f86ea98945678622c6e4b070c4218a53a0d19
5,410
static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev, uint32_t node, Error **errp) { Error *local_err = NULL; sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev); PCDIMMDevice *dimm = PC_DIMM(dev); PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm); MemoryRegion *mr = ddc->get_memory_region(dimm); uint64_t align = memory_region_get_alignment(mr); uint64_t size = memory_region_size(mr); uint64_t addr; char *mem_dev; if (size % SPAPR_MEMORY_BLOCK_SIZE) { error_setg(&local_err, "Hotplugged memory size must be a multiple of " "%lld MB", SPAPR_MEMORY_BLOCK_SIZE/M_BYTE); pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err); if (local_err) { addr = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, &local_err); if (local_err) { pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr); spapr_add_lmbs(dev, addr, size, node, spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT), &error_abort); out: error_propagate(errp, local_err);
true
qemu
df58713396f8b2deb923e39c00b10744c5c63909
5,411
int bdrv_write(BlockDriverState *bs, int64_t sector_num, const uint8_t *buf, int nb_sectors) { BlockDriver *drv = bs->drv; if (!bs->drv) return -ENOMEDIUM; if (bs->read_only) return -EACCES; if (bdrv_check_request(bs, sector_num, nb_sectors)) return -EIO; return drv->bdrv_write(bs, sector_num, buf, nb_sectors); }
true
qemu
7cd1e32a860895ccca89eb90a0226efbcd969b55
5,412
void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) { trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); if (notifier) { notifier_list_add(&req->cancel_notifiers, notifier); scsi_req_ref(req); scsi_req_dequeue(req); req->io_canceled = true; if (req->aiocb) { blk_aio_cancel_async(req->aiocb); } else { scsi_req_cancel_complete(req);
true
qemu
3daa41078aedf227ec98b0d1c9d56b77b6d20153
5,413
static int alac_decode_frame(AVCodecContext *avctx, void *outbuffer, int *outputsize, AVPacket *avpkt) { const uint8_t *inbuffer = avpkt->data; int input_buffer_size = avpkt->size; ALACContext *alac = avctx->priv_data; int channels; unsigned int outputsamples; int hassize; unsigned int readsamplesize; int isnotcompressed; uint8_t interlacing_shift; uint8_t interlacing_leftweight; /* short-circuit null buffers */ if (!inbuffer || !input_buffer_size) return input_buffer_size; /* initialize from the extradata */ if (!alac->context_initialized) { if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) { av_log(avctx, AV_LOG_ERROR, "alac: expected %d extradata bytes\n", ALAC_EXTRADATA_SIZE); return input_buffer_size; } if (alac_set_info(alac)) { av_log(avctx, AV_LOG_ERROR, "alac: set_info failed\n"); return input_buffer_size; } alac->context_initialized = 1; } init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8); channels = get_bits(&alac->gb, 3) + 1; if (channels > MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "channels > %d not supported\n", MAX_CHANNELS); return input_buffer_size; } /* 2^result = something to do with output waiting. * perhaps matters if we read > 1 frame in a pass? */ skip_bits(&alac->gb, 4); skip_bits(&alac->gb, 12); /* unknown, skip 12 bits */ /* the output sample size is stored soon */ hassize = get_bits1(&alac->gb); alac->wasted_bits = get_bits(&alac->gb, 2) << 3; /* whether the frame is compressed */ isnotcompressed = get_bits1(&alac->gb); if (hassize) { /* now read the number of samples as a 32bit integer */ outputsamples = get_bits_long(&alac->gb, 32); if(outputsamples > alac->setinfo_max_samples_per_frame){ av_log(avctx, AV_LOG_ERROR, "outputsamples %d > %d\n", outputsamples, alac->setinfo_max_samples_per_frame); return -1; } } else outputsamples = alac->setinfo_max_samples_per_frame; switch (alac->setinfo_sample_size) { case 16: avctx->sample_fmt = SAMPLE_FMT_S16; alac->bytespersample = channels << 1; break; case 24: avctx->sample_fmt = SAMPLE_FMT_S32; alac->bytespersample = channels << 2; break; default: av_log(avctx, AV_LOG_ERROR, "Sample depth %d is not supported.\n", alac->setinfo_sample_size); return -1; } if(outputsamples > *outputsize / alac->bytespersample){ av_log(avctx, AV_LOG_ERROR, "sample buffer too small\n"); return -1; } *outputsize = outputsamples * alac->bytespersample; readsamplesize = alac->setinfo_sample_size - (alac->wasted_bits) + channels - 1; if (readsamplesize > MIN_CACHE_BITS) { av_log(avctx, AV_LOG_ERROR, "readsamplesize too big (%d)\n", readsamplesize); return -1; } if (!isnotcompressed) { /* so it is compressed */ int16_t predictor_coef_table[MAX_CHANNELS][32]; int predictor_coef_num[MAX_CHANNELS]; int prediction_type[MAX_CHANNELS]; int prediction_quantitization[MAX_CHANNELS]; int ricemodifier[MAX_CHANNELS]; int i, chan; interlacing_shift = get_bits(&alac->gb, 8); interlacing_leftweight = get_bits(&alac->gb, 8); for (chan = 0; chan < channels; chan++) { prediction_type[chan] = get_bits(&alac->gb, 4); prediction_quantitization[chan] = get_bits(&alac->gb, 4); ricemodifier[chan] = get_bits(&alac->gb, 3); predictor_coef_num[chan] = get_bits(&alac->gb, 5); /* read the predictor table */ for (i = 0; i < predictor_coef_num[chan]; i++) predictor_coef_table[chan][i] = (int16_t)get_bits(&alac->gb, 16); } if (alac->wasted_bits) { int i, ch; for (i = 0; i < outputsamples; i++) { for (ch = 0; ch < channels; ch++) alac->wasted_bits_buffer[ch][i] = get_bits(&alac->gb, alac->wasted_bits); } } for (chan = 0; chan < channels; chan++) { bastardized_rice_decompress(alac, alac->predicterror_buffer[chan], outputsamples, readsamplesize, alac->setinfo_rice_initialhistory, alac->setinfo_rice_kmodifier, ricemodifier[chan] * alac->setinfo_rice_historymult / 4, (1 << alac->setinfo_rice_kmodifier) - 1); if (prediction_type[chan] == 0) { /* adaptive fir */ predictor_decompress_fir_adapt(alac->predicterror_buffer[chan], alac->outputsamples_buffer[chan], outputsamples, readsamplesize, predictor_coef_table[chan], predictor_coef_num[chan], prediction_quantitization[chan]); } else { av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type[chan]); /* I think the only other prediction type (or perhaps this is * just a boolean?) runs adaptive fir twice.. like: * predictor_decompress_fir_adapt(predictor_error, tempout, ...) * predictor_decompress_fir_adapt(predictor_error, outputsamples ...) * little strange.. */ } } } else { /* not compressed, easy case */ int i, chan; if (alac->setinfo_sample_size <= 16) { for (i = 0; i < outputsamples; i++) for (chan = 0; chan < channels; chan++) { int32_t audiobits; audiobits = get_sbits_long(&alac->gb, alac->setinfo_sample_size); alac->outputsamples_buffer[chan][i] = audiobits; } } else { for (i = 0; i < outputsamples; i++) { for (chan = 0; chan < channels; chan++) { alac->outputsamples_buffer[chan][i] = get_bits(&alac->gb, alac->setinfo_sample_size); alac->outputsamples_buffer[chan][i] = sign_extend(alac->outputsamples_buffer[chan][i], alac->setinfo_sample_size); } } } alac->wasted_bits = 0; interlacing_shift = 0; interlacing_leftweight = 0; } if (get_bits(&alac->gb, 3) != 7) av_log(avctx, AV_LOG_ERROR, "Error : Wrong End Of Frame\n"); switch(alac->setinfo_sample_size) { case 16: if (channels == 2) { reconstruct_stereo_16(alac->outputsamples_buffer, (int16_t*)outbuffer, alac->numchannels, outputsamples, interlacing_shift, interlacing_leftweight); } else { int i; for (i = 0; i < outputsamples; i++) { ((int16_t*)outbuffer)[i] = alac->outputsamples_buffer[0][i]; } } break; case 24: if (channels == 2) { decorrelate_stereo_24(alac->outputsamples_buffer, outbuffer, alac->wasted_bits_buffer, alac->wasted_bits, alac->numchannels, outputsamples, interlacing_shift, interlacing_leftweight); } else { int i; for (i = 0; i < outputsamples; i++) ((int32_t *)outbuffer)[i] = alac->outputsamples_buffer[0][i] << 8; } break; } if (input_buffer_size * 8 - get_bits_count(&alac->gb) > 8) av_log(avctx, AV_LOG_ERROR, "Error : %d bits left\n", input_buffer_size * 8 - get_bits_count(&alac->gb)); return input_buffer_size; }
true
FFmpeg
313b52fbfff47ed934cdeccaebda9b3406466575
5,414
static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) { switch (buf[0]) { case VERIFY_10: case VERIFY_12: case VERIFY_16: /* Check if BYTCHK == 0x01 (data-out buffer contains data * for the number of logical blocks specified in the length * field). For other modes, do not use scatter/gather operation. */ if ((buf[1] & 6) != 2) { return false; } break; case READ_6: case READ_10: case READ_12: case READ_16: case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case WRITE_VERIFY_10: case WRITE_VERIFY_12: case WRITE_VERIFY_16: /* MMC writing cannot be done via DMA helpers, because it sometimes * involves writing beyond the maximum LBA or to negative LBA (lead-in). * We might use scsi_disk_dma_reqops as long as no writing commands are * seen, but performance usually isn't paramount on optical media. So, * just make scsi-block operate the same as scsi-generic for them. */ if (s->qdev.type != TYPE_ROM) { return false; } break; default: break; } return true; }
true
qemu
166dbda7e131f7b6540f56c3234bb2f8b23d84c0
5,415
static void test_smram_lock(void) { QPCIBus *pcibus; QPCIDevice *pcidev; QDict *response; pcibus = qpci_init_pc(NULL); g_assert(pcibus != NULL); pcidev = qpci_device_find(pcibus, 0); g_assert(pcidev != NULL); /* check open is settable */ smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, false); g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false); smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, true); g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == true); /* lock, check open is cleared & not settable */ smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_LCK, true); g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false); smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, true); g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false); /* reset */ response = qmp("{'execute': 'system_reset', 'arguments': {} }"); g_assert(response); g_assert(!qdict_haskey(response, "error")); QDECREF(response); /* check open is settable again */ smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, false); g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == false); smram_set_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN, true); g_assert(smram_test_bit(pcidev, MCH_HOST_BRIDGE_SMRAM_D_OPEN) == true); }
true
qemu
fb6faea888c1e54059aed7f87be93de623b346ee
5,417
void do_mullwo (void) { int64_t res = (int64_t)Ts0 * (int64_t)Ts1; if (likely((int32_t)res == res)) { xer_ov = 0; } else { xer_ov = 1; xer_so = 1; } T0 = (int32_t)res; }
true
qemu
d9bce9d99f4656ae0b0127f7472db9067b8f84ab
5,418
static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) { int ret; vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long)); ret = msix_init(&vdev->pdev, vdev->msix->entries, vdev->bars[vdev->msix->table_bar].region.mem, vdev->msix->table_bar, vdev->msix->table_offset, vdev->bars[vdev->msix->pba_bar].region.mem, vdev->msix->pba_bar, vdev->msix->pba_offset, pos); if (ret < 0) { if (ret == -ENOTSUP) { return 0; } error_setg(errp, "msix_init failed"); return ret; } /* * The PCI spec suggests that devices provide additional alignment for * MSI-X structures and avoid overlapping non-MSI-X related registers. * For an assigned device, this hopefully means that emulation of MSI-X * structures does not affect the performance of the device. If devices * fail to provide that alignment, a significant performance penalty may * result, for instance Mellanox MT27500 VFs: * http://www.spinics.net/lists/kvm/msg125881.html * * The PBA is simply not that important for such a serious regression and * most drivers do not appear to look at it. The solution for this is to * disable the PBA MemoryRegion unless it's being used. We disable it * here and only enable it if a masked vector fires through QEMU. As the * vector-use notifier is called, which occurs on unmask, we test whether * PBA emulation is needed and again disable if not. */ memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); return 0; }
true
qemu
ee640c625e190a0c0e6b8966adc0e4720fb75200
5,419
static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet) { libx265Context *ctx = avctx->priv_data; x265_picture x265pic; x265_picture x265pic_out = { { 0 } }; x265_nal *nal; uint8_t *dst; int payload = 0; int nnal; int ret; int i; x265_picture_init(ctx->params, &x265pic); if (pic) { for (i = 0; i < 3; i++) { x265pic.planes[i] = pic->data[i]; x265pic.stride[i] = pic->linesize[i]; } x265pic.pts = pic->pts; x265pic.bitDepth = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth_minus1 + 1; x265pic.sliceType = pic->pict_type == AV_PICTURE_TYPE_I ? X265_TYPE_I : pic->pict_type == AV_PICTURE_TYPE_P ? X265_TYPE_P : pic->pict_type == AV_PICTURE_TYPE_B ? X265_TYPE_B : X265_TYPE_AUTO; } ret = x265_encoder_encode(ctx->encoder, &nal, &nnal, pic ? &x265pic : NULL, &x265pic_out); if (ret < 0) return AVERROR_UNKNOWN; if (!nnal) return 0; for (i = 0; i < nnal; i++) payload += nal[i].sizeBytes; ret = ff_alloc_packet(pkt, payload); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n"); return ret; } dst = pkt->data; for (i = 0; i < nnal; i++) { memcpy(dst, nal[i].payload, nal[i].sizeBytes); dst += nal[i].sizeBytes; if (is_keyframe(nal[i].type)) pkt->flags |= AV_PKT_FLAG_KEY; } pkt->pts = x265pic_out.pts; pkt->dts = x265pic_out.dts; switch (x265pic_out.sliceType) { case X265_TYPE_IDR: case X265_TYPE_I: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; break; case X265_TYPE_P: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P; break; case X265_TYPE_B: avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B; break; } *got_packet = 1; return 0; }
true
FFmpeg
04070dbca0688ab1e24528ce5c135254a9a79c47
5,420
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { CFHDContext *s = avctx->priv_data; GetByteContext gb; ThreadFrame frame = { .f = data }; AVFrame *pic = data; int ret = 0, i, j, planes, plane, got_buffer = 0; int16_t *coeff_data; s->coded_format = AV_PIX_FMT_YUV422P10; init_frame_defaults(s); planes = av_pix_fmt_count_planes(s->coded_format); bytestream2_init(&gb, avpkt->data, avpkt->size); while (bytestream2_get_bytes_left(&gb) > 4) { /* Bit weird but implement the tag parsing as the spec says */ uint16_t tagu = bytestream2_get_be16(&gb); int16_t tag = (int16_t)tagu; int8_t tag8 = (int8_t)(tagu >> 8); uint16_t abstag = abs(tag); int8_t abs_tag8 = abs(tag8); uint16_t data = bytestream2_get_be16(&gb); if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) { av_log(avctx, AV_LOG_DEBUG, "large len %x\n", ((tagu & 0xff) << 16) | data); } else if (tag == 20) { av_log(avctx, AV_LOG_DEBUG, "Width %"PRIu16"\n", data); s->coded_width = data; } else if (tag == 21) { av_log(avctx, AV_LOG_DEBUG, "Height %"PRIu16"\n", data); s->coded_height = data; } else if (tag == 101) { av_log(avctx, AV_LOG_DEBUG, "Bits per component: %"PRIu16"\n", data); s->bpc = data; } else if (tag == 12) { av_log(avctx, AV_LOG_DEBUG, "Channel Count: %"PRIu16"\n", data); s->channel_cnt = data; if (data > 4) { av_log(avctx, AV_LOG_ERROR, "Channel Count of %"PRIu16" is unsupported\n", data); ret = AVERROR_PATCHWELCOME; break; } } else if (tag == 14) { av_log(avctx, AV_LOG_DEBUG, "Subband Count: %"PRIu16"\n", data); if (data != SUBBAND_COUNT) { av_log(avctx, AV_LOG_ERROR, "Subband Count of %"PRIu16" is unsupported\n", data); ret = AVERROR_PATCHWELCOME; break; } } else if (tag == 62) { s->channel_num = data; av_log(avctx, AV_LOG_DEBUG, "Channel number %"PRIu16"\n", data); if (s->channel_num >= planes) { av_log(avctx, AV_LOG_ERROR, "Invalid channel number\n"); ret = AVERROR(EINVAL); break; } init_plane_defaults(s); } else if (tag == 48) { if (s->subband_num != 0 && data == 1) // hack s->level++; av_log(avctx, AV_LOG_DEBUG, "Subband number %"PRIu16"\n", data); s->subband_num = data; if (s->level >= DWT_LEVELS) { av_log(avctx, AV_LOG_ERROR, "Invalid level\n"); ret = AVERROR(EINVAL); break; } if (s->subband_num > 3) { av_log(avctx, AV_LOG_ERROR, "Invalid subband number\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 51) { av_log(avctx, AV_LOG_DEBUG, "Subband number actual %"PRIu16"\n", data); s->subband_num_actual = data; if (s->subband_num_actual >= 10) { av_log(avctx, AV_LOG_ERROR, "Invalid subband number actual\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 35) av_log(avctx, AV_LOG_DEBUG, "Lowpass precision bits: %"PRIu16"\n", data); else if (tag == 53) { s->quantisation = data; av_log(avctx, AV_LOG_DEBUG, "Quantisation: %"PRIu16"\n", data); } else if (tag == 109) { s->prescale_shift[0] = (data >> 0) & 0x7; s->prescale_shift[1] = (data >> 3) & 0x7; s->prescale_shift[2] = (data >> 6) & 0x7; av_log(avctx, AV_LOG_DEBUG, "Prescale shift (VC-5): %x\n", data); } else if (tag == 27) { s->plane[s->channel_num].band[0][0].width = data; s->plane[s->channel_num].band[0][0].stride = data; av_log(avctx, AV_LOG_DEBUG, "Lowpass width %"PRIu16"\n", data); if (data < 3 || data > s->plane[s->channel_num].band[0][0].a_width) { av_log(avctx, AV_LOG_ERROR, "Invalid lowpass width\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 28) { s->plane[s->channel_num].band[0][0].height = data; av_log(avctx, AV_LOG_DEBUG, "Lowpass height %"PRIu16"\n", data); if (data < 3 || data > s->plane[s->channel_num].band[0][0].height) { av_log(avctx, AV_LOG_ERROR, "Invalid lowpass height\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 1) av_log(avctx, AV_LOG_DEBUG, "Sample type? %"PRIu16"\n", data); else if (tag == 10) { if (data != 0) { avpriv_report_missing_feature(avctx, "Transform type of %"PRIu16, data); ret = AVERROR_PATCHWELCOME; break; } av_log(avctx, AV_LOG_DEBUG, "Transform-type? %"PRIu16"\n", data); } else if (abstag >= 0x4000 && abstag <= 0x40ff) { av_log(avctx, AV_LOG_DEBUG, "Small chunk length %d %s\n", data * 4, tag < 0 ? "optional" : "required"); bytestream2_skipu(&gb, data * 4); } else if (tag == 23) { av_log(avctx, AV_LOG_DEBUG, "Skip frame\n"); avpriv_report_missing_feature(avctx, "Skip frame"); ret = AVERROR_PATCHWELCOME; break; } else if (tag == 2) { av_log(avctx, AV_LOG_DEBUG, "tag=2 header - skipping %i tag/value pairs\n", data); if (data > bytestream2_get_bytes_left(&gb) / 4) { av_log(avctx, AV_LOG_ERROR, "too many tag/value pairs (%d)\n", data); ret = AVERROR_INVALIDDATA; break; } for (i = 0; i < data; i++) { uint16_t tag2 = bytestream2_get_be16(&gb); uint16_t val2 = bytestream2_get_be16(&gb); av_log(avctx, AV_LOG_DEBUG, "Tag/Value = %x %x\n", tag2, val2); } } else if (tag == 41) { s->plane[s->channel_num].band[s->level][s->subband_num].width = data; s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8); av_log(avctx, AV_LOG_DEBUG, "Highpass width %i channel %i level %i subband %i\n", data, s->channel_num, s->level, s->subband_num); if (data < 3) { av_log(avctx, AV_LOG_ERROR, "Invalid highpass width\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 42) { s->plane[s->channel_num].band[s->level][s->subband_num].height = data; av_log(avctx, AV_LOG_DEBUG, "Highpass height %i\n", data); if (data < 3) { av_log(avctx, AV_LOG_ERROR, "Invalid highpass height\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 49) { s->plane[s->channel_num].band[s->level][s->subband_num].width = data; s->plane[s->channel_num].band[s->level][s->subband_num].stride = FFALIGN(data, 8); av_log(avctx, AV_LOG_DEBUG, "Highpass width2 %i\n", data); if (data < 3) { av_log(avctx, AV_LOG_ERROR, "Invalid highpass width2\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 50) { s->plane[s->channel_num].band[s->level][s->subband_num].height = data; av_log(avctx, AV_LOG_DEBUG, "Highpass height2 %i\n", data); if (data < 3) { av_log(avctx, AV_LOG_ERROR, "Invalid highpass height2\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 71) { s->codebook = data; av_log(avctx, AV_LOG_DEBUG, "Codebook %i\n", s->codebook); } else if (tag == 72) { s->codebook = data; av_log(avctx, AV_LOG_DEBUG, "Other codebook? %i\n", s->codebook); } else if (tag == 70) { av_log(avctx, AV_LOG_DEBUG, "Subsampling or bit-depth flag? %i\n", data); s->bpc = data; if (!(s->bpc == 10 || s->bpc == 12)) { av_log(avctx, AV_LOG_ERROR, "Invalid bits per channel\n"); ret = AVERROR(EINVAL); break; } } else if (tag == 84) { av_log(avctx, AV_LOG_DEBUG, "Sample format? %i\n", data); if (data == 1) s->coded_format = AV_PIX_FMT_YUV422P10; else if (data == 3) s->coded_format = AV_PIX_FMT_GBRP12; else if (data == 4) s->coded_format = AV_PIX_FMT_GBRAP12; else { avpriv_report_missing_feature(avctx, "Sample format of %"PRIu16, data); ret = AVERROR_PATCHWELCOME; break; } planes = av_pix_fmt_count_planes(s->coded_format); } else av_log(avctx, AV_LOG_DEBUG, "Unknown tag %i data %x\n", tag, data); /* Some kind of end of header tag */ if (tag == 4 && data == 0x1a4a && s->coded_width && s->coded_height && s->coded_format != AV_PIX_FMT_NONE) { if (s->a_width != s->coded_width || s->a_height != s->coded_height || s->a_format != s->coded_format) { free_buffers(avctx); if ((ret = alloc_buffers(avctx)) < 0) { free_buffers(avctx); return ret; } } ret = ff_set_dimensions(avctx, s->coded_width, s->coded_height); if (ret < 0) return ret; frame.f->width = frame.f->height = 0; if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; s->coded_width = 0; s->coded_height = 0; s->coded_format = AV_PIX_FMT_NONE; got_buffer = 1; } coeff_data = s->plane[s->channel_num].subband[s->subband_num_actual]; /* Lowpass coefficients */ if (tag == 4 && data == 0xf0f && s->a_width && s->a_height) { int lowpass_height = s->plane[s->channel_num].band[0][0].height; int lowpass_width = s->plane[s->channel_num].band[0][0].width; int lowpass_a_height = s->plane[s->channel_num].band[0][0].a_height; int lowpass_a_width = s->plane[s->channel_num].band[0][0].a_width; if (!got_buffer) { av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n"); ret = AVERROR(EINVAL); goto end; } if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width || lowpass_a_width * lowpass_a_height * sizeof(int16_t) > bytestream2_get_bytes_left(&gb)) { av_log(avctx, AV_LOG_ERROR, "Too many lowpass coefficients\n"); ret = AVERROR(EINVAL); goto end; } av_log(avctx, AV_LOG_DEBUG, "Start of lowpass coeffs component %d height:%d, width:%d\n", s->channel_num, lowpass_height, lowpass_width); for (i = 0; i < lowpass_height; i++) { for (j = 0; j < lowpass_width; j++) coeff_data[j] = bytestream2_get_be16u(&gb); coeff_data += lowpass_width; } /* Align to mod-4 position to continue reading tags */ bytestream2_seek(&gb, bytestream2_tell(&gb) & 3, SEEK_CUR); /* Copy last line of coefficients if odd height */ if (lowpass_height & 1) { memcpy(&coeff_data[lowpass_height * lowpass_width], &coeff_data[(lowpass_height - 1) * lowpass_width], lowpass_width * sizeof(*coeff_data)); } av_log(avctx, AV_LOG_DEBUG, "Lowpass coefficients %d\n", lowpass_width * lowpass_height); } if (tag == 55 && s->subband_num_actual != 255 && s->a_width && s->a_height) { int highpass_height = s->plane[s->channel_num].band[s->level][s->subband_num].height; int highpass_width = s->plane[s->channel_num].band[s->level][s->subband_num].width; int highpass_a_width = s->plane[s->channel_num].band[s->level][s->subband_num].a_width; int highpass_a_height = s->plane[s->channel_num].band[s->level][s->subband_num].a_height; int highpass_stride = s->plane[s->channel_num].band[s->level][s->subband_num].stride; int expected = highpass_height * highpass_stride; int a_expected = highpass_a_height * highpass_a_width; int level, run, coeff; int count = 0, bytes; if (!got_buffer) { av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n"); ret = AVERROR(EINVAL); goto end; } if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < expected) { av_log(avctx, AV_LOG_ERROR, "Too many highpass coefficients\n"); ret = AVERROR(EINVAL); goto end; } av_log(avctx, AV_LOG_DEBUG, "Start subband coeffs plane %i level %i codebook %i expected %i\n", s->channel_num, s->level, s->codebook, expected); init_get_bits(&s->gb, gb.buffer, bytestream2_get_bytes_left(&gb) * 8); { OPEN_READER(re, &s->gb); if (!s->codebook) { while (1) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, s->table_9_rl_vlc, VLC_BITS, 3, 1); /* escape */ if (level == 64) break; count += run; if (count > expected) break; coeff = dequant_and_decompand(level, s->quantisation); for (i = 0; i < run; i++) *coeff_data++ = coeff; } } else { while (1) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, s->table_18_rl_vlc, VLC_BITS, 3, 1); /* escape */ if (level == 255 && run == 2) break; count += run; if (count > expected) break; coeff = dequant_and_decompand(level, s->quantisation); for (i = 0; i < run; i++) *coeff_data++ = coeff; } } CLOSE_READER(re, &s->gb); } if (count > expected) { av_log(avctx, AV_LOG_ERROR, "Escape codeword not found, probably corrupt data\n"); ret = AVERROR(EINVAL); goto end; } bytes = FFALIGN(FF_CEIL_RSHIFT(get_bits_count(&s->gb), 3), 4); if (bytes > bytestream2_get_bytes_left(&gb)) { av_log(avctx, AV_LOG_ERROR, "Bitstream overread error\n"); ret = AVERROR(EINVAL); goto end; } else bytestream2_seek(&gb, bytes, SEEK_CUR); av_log(avctx, AV_LOG_DEBUG, "End subband coeffs %i extra %i\n", count, count - expected); s->codebook = 0; /* Copy last line of coefficients if odd height */ if (highpass_height & 1) { memcpy(&coeff_data[highpass_height * highpass_stride], &coeff_data[(highpass_height - 1) * highpass_stride], highpass_stride * sizeof(*coeff_data)); } } } if (!s->a_width || !s->a_height || s->a_format == AV_PIX_FMT_NONE || s->coded_width || s->coded_height || s->coded_format != AV_PIX_FMT_NONE) { av_log(avctx, AV_LOG_ERROR, "Invalid dimensions\n"); ret = AVERROR(EINVAL); goto end; } if (!got_buffer) { av_log(avctx, AV_LOG_ERROR, "No end of header tag found\n"); ret = AVERROR(EINVAL); goto end; } planes = av_pix_fmt_count_planes(avctx->pix_fmt); for (plane = 0; plane < planes && !ret; plane++) { /* level 1 */ int lowpass_height = s->plane[plane].band[0][0].height; int lowpass_width = s->plane[plane].band[0][0].width; int highpass_stride = s->plane[plane].band[0][1].stride; int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane; int16_t *low, *high, *output, *dst; if (lowpass_height > s->plane[plane].band[0][0].a_height || lowpass_width > s->plane[plane].band[0][0].a_width || !highpass_stride || s->plane[plane].band[0][1].width > s->plane[plane].band[0][1].a_width) { av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n"); ret = AVERROR(EINVAL); goto end; } av_log(avctx, AV_LOG_DEBUG, "Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride); low = s->plane[plane].subband[0]; high = s->plane[plane].subband[2]; output = s->plane[plane].l_h[0]; for (i = 0; i < lowpass_width; i++) { vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height); low++; high++; output++; } low = s->plane[plane].subband[1]; high = s->plane[plane].subband[3]; output = s->plane[plane].l_h[1]; for (i = 0; i < lowpass_width; i++) { // note the stride of "low" is highpass_stride vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height); low++; high++; output++; } low = s->plane[plane].l_h[0]; high = s->plane[plane].l_h[1]; output = s->plane[plane].subband[0]; for (i = 0; i < lowpass_height * 2; i++) { horiz_filter(output, low, high, lowpass_width); low += lowpass_width; high += lowpass_width; output += lowpass_width * 2; } if (s->bpc == 12) { output = s->plane[plane].subband[0]; for (i = 0; i < lowpass_height * 2; i++) { for (j = 0; j < lowpass_width * 2; j++) output[j] <<= 2; output += lowpass_width * 2; } } /* level 2 */ lowpass_height = s->plane[plane].band[1][1].height; lowpass_width = s->plane[plane].band[1][1].width; highpass_stride = s->plane[plane].band[1][1].stride; if (lowpass_height > s->plane[plane].band[1][1].a_height || lowpass_width > s->plane[plane].band[1][1].a_width || !highpass_stride || s->plane[plane].band[1][1].width > s->plane[plane].band[1][1].a_width) { av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n"); ret = AVERROR(EINVAL); goto end; } av_log(avctx, AV_LOG_DEBUG, "Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride); low = s->plane[plane].subband[0]; high = s->plane[plane].subband[5]; output = s->plane[plane].l_h[3]; for (i = 0; i < lowpass_width; i++) { vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height); low++; high++; output++; } low = s->plane[plane].subband[4]; high = s->plane[plane].subband[6]; output = s->plane[plane].l_h[4]; for (i = 0; i < lowpass_width; i++) { vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height); low++; high++; output++; } low = s->plane[plane].l_h[3]; high = s->plane[plane].l_h[4]; output = s->plane[plane].subband[0]; for (i = 0; i < lowpass_height * 2; i++) { horiz_filter(output, low, high, lowpass_width); low += lowpass_width; high += lowpass_width; output += lowpass_width * 2; } output = s->plane[plane].subband[0]; for (i = 0; i < lowpass_height * 2; i++) { for (j = 0; j < lowpass_width * 2; j++) output[j] <<= 2; output += lowpass_width * 2; } /* level 3 */ lowpass_height = s->plane[plane].band[2][1].height; lowpass_width = s->plane[plane].band[2][1].width; highpass_stride = s->plane[plane].band[2][1].stride; if (lowpass_height > s->plane[plane].band[2][1].a_height || lowpass_width > s->plane[plane].band[2][1].a_width || !highpass_stride || s->plane[plane].band[2][1].width > s->plane[plane].band[2][1].a_width) { av_log(avctx, AV_LOG_ERROR, "Invalid plane dimensions\n"); ret = AVERROR(EINVAL); goto end; } av_log(avctx, AV_LOG_DEBUG, "Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride); low = s->plane[plane].subband[0]; high = s->plane[plane].subband[8]; output = s->plane[plane].l_h[6]; for (i = 0; i < lowpass_width; i++) { vert_filter(output, lowpass_width, low, lowpass_width, high, highpass_stride, lowpass_height); low++; high++; output++; } low = s->plane[plane].subband[7]; high = s->plane[plane].subband[9]; output = s->plane[plane].l_h[7]; for (i = 0; i < lowpass_width; i++) { vert_filter(output, lowpass_width, low, highpass_stride, high, highpass_stride, lowpass_height); low++; high++; output++; } dst = (int16_t *)pic->data[act_plane]; low = s->plane[plane].l_h[6]; high = s->plane[plane].l_h[7]; for (i = 0; i < lowpass_height * 2; i++) { horiz_filter_clip(dst, low, high, lowpass_width, s->bpc); low += lowpass_width; high += lowpass_width; dst += pic->linesize[act_plane] / 2; } } end: if (ret < 0) return ret; *got_frame = 1; return avpkt->size; }
true
FFmpeg
cd6f319a7470394044627d1bd900e21b9aca5f4a
5,422
static av_cold int dnxhd_decode_init_thread_copy(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; // make sure VLC tables will be loaded when cid is parsed ctx->cid = -1; ctx->rows = av_mallocz_array(avctx->thread_count, sizeof(RowContext)); if (!ctx->rows) return AVERROR(ENOMEM); return 0; }
true
FFmpeg
f800d6508d7e8fbd8d9777b775d333a4f02112ef
5,423
void do_POWER_divo (void) { int64_t tmp; if ((Ts0 == INT32_MIN && Ts1 == -1) || Ts1 == 0) { T0 = (long)((-1) * (T0 >> 31)); env->spr[SPR_MQ] = 0; xer_ov = 1; xer_so = 1; } else { tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ]; env->spr[SPR_MQ] = tmp % T1; tmp /= Ts1; if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) { xer_ov = 1; xer_so = 1; } else { xer_ov = 0; } T0 = tmp; } }
true
qemu
d9bce9d99f4656ae0b0127f7472db9067b8f84ab
5,424
void visit_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp) { int64_t value; if (!error_is_set(errp)) { if (v->type_uint64) { v->type_uint64(v, obj, name, errp); } else { value = *obj; v->type_int(v, &value, name, errp); *obj = value; } } }
true
qemu
297a3646c2947ee64a6d42ca264039732c6218e0
5,425
SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter){ SwsContext *c; int i; int usesFilter; SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; #ifdef ARCH_X86 if(gCpuCaps.hasMMX) asm volatile("emms\n\t"::: "memory"); #endif if(swScale==NULL) globalInit(); /* avoid dupplicate Formats, so we dont need to check to much */ if(srcFormat==IMGFMT_IYUV) srcFormat=IMGFMT_I420; if(srcFormat==IMGFMT_Y8) srcFormat=IMGFMT_Y800; if(dstFormat==IMGFMT_Y8) dstFormat=IMGFMT_Y800; if(!isSupportedIn(srcFormat)) { fprintf(stderr, "swScaler: %s is not supported as input format\n", vo_format_name(srcFormat)); return NULL; } if(!isSupportedOut(dstFormat)) { fprintf(stderr, "swScaler: %s is not supported as output format\n", vo_format_name(dstFormat)); return NULL; } /* sanity check */ if(srcW<4 || srcH<1 || dstW<8 || dstH<1) //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code { fprintf(stderr, "swScaler: %dx%d -> %dx%d is invalid scaling dimension\n", srcW, srcH, dstW, dstH); return NULL; } if(!dstFilter) dstFilter= &dummyFilter; if(!srcFilter) srcFilter= &dummyFilter; c= memalign(64, sizeof(SwsContext)); memset(c, 0, sizeof(SwsContext)); c->srcW= srcW; c->srcH= srcH; c->dstW= dstW; c->dstH= dstH; c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW; c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH; c->flags= flags; c->dstFormat= dstFormat; c->srcFormat= srcFormat; usesFilter=0; if(dstFilter->lumV!=NULL && dstFilter->lumV->length>1) usesFilter=1; if(dstFilter->lumH!=NULL && dstFilter->lumH->length>1) usesFilter=1; if(dstFilter->chrV!=NULL && dstFilter->chrV->length>1) usesFilter=1; if(dstFilter->chrH!=NULL && dstFilter->chrH->length>1) usesFilter=1; if(srcFilter->lumV!=NULL && srcFilter->lumV->length>1) usesFilter=1; if(srcFilter->lumH!=NULL && srcFilter->lumH->length>1) usesFilter=1; if(srcFilter->chrV!=NULL && srcFilter->chrV->length>1) usesFilter=1; if(srcFilter->chrH!=NULL && srcFilter->chrH->length>1) usesFilter=1; /* unscaled special Cases */ if(srcW==dstW && srcH==dstH && !usesFilter) { /* yuv2bgr */ if(isPlanarYUV(srcFormat) && isBGR(dstFormat)) { // FIXME multiple yuv2rgb converters wont work that way cuz that thing is full of globals&statics #ifdef WORDS_BIGENDIAN if(dstFormat==IMGFMT_BGR32) yuv2rgb_init( dstFormat&0xFF /* =bpp */, MODE_BGR); else yuv2rgb_init( dstFormat&0xFF /* =bpp */, MODE_RGB); #else yuv2rgb_init( dstFormat&0xFF /* =bpp */, MODE_RGB); #endif c->swScale= planarYuvToBgr; if(flags&SWS_PRINT_INFO) printf("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } /* simple copy */ if(srcFormat == dstFormat || (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat))) { c->swScale= simpleCopy; if(flags&SWS_PRINT_INFO) printf("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } /* bgr32to24 & rgb32to24*/ if((srcFormat==IMGFMT_BGR32 && dstFormat==IMGFMT_BGR24) ||(srcFormat==IMGFMT_RGB32 && dstFormat==IMGFMT_RGB24)) { c->swScale= bgr32to24Wrapper; if(flags&SWS_PRINT_INFO) printf("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } /* bgr24to32 & rgb24to32*/ if((srcFormat==IMGFMT_BGR24 && dstFormat==IMGFMT_BGR32) ||(srcFormat==IMGFMT_RGB24 && dstFormat==IMGFMT_RGB32)) { c->swScale= bgr24to32Wrapper; if(flags&SWS_PRINT_INFO) printf("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } /* bgr15to16 */ if(srcFormat==IMGFMT_BGR15 && dstFormat==IMGFMT_BGR16) { c->swScale= bgr15to16Wrapper; if(flags&SWS_PRINT_INFO) printf("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } /* bgr24toYV12 */ if(srcFormat==IMGFMT_BGR24 && dstFormat==IMGFMT_YV12) { c->swScale= bgr24toyv12Wrapper; if(flags&SWS_PRINT_INFO) printf("SwScaler: using unscaled %s -> %s special converter\n", vo_format_name(srcFormat), vo_format_name(dstFormat)); return c; } } if(cpuCaps.hasMMX2) { c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0; if(!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR)) { if(flags&SWS_PRINT_INFO) fprintf(stderr, "SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n"); } } else c->canMMX2BeUsed=0; /* dont use full vertical UV input/internaly if the source doesnt even have it */ if(isHalfChrV(srcFormat)) c->flags= flags= flags&(~SWS_FULL_CHR_V); /* dont use full horizontal UV input if the source doesnt even have it */ if(isHalfChrH(srcFormat)) c->flags= flags= flags&(~SWS_FULL_CHR_H_INP); /* dont use full horizontal UV internally if the destination doesnt even have it */ if(isHalfChrH(dstFormat)) c->flags= flags= flags&(~SWS_FULL_CHR_H_INT); if(flags&SWS_FULL_CHR_H_INP) c->chrSrcW= srcW; else c->chrSrcW= (srcW+1)>>1; if(flags&SWS_FULL_CHR_H_INT) c->chrDstW= dstW; else c->chrDstW= (dstW+1)>>1; if(flags&SWS_FULL_CHR_V) c->chrSrcH= srcH; else c->chrSrcH= (srcH+1)>>1; if(isHalfChrV(dstFormat)) c->chrDstH= (dstH+1)>>1; else c->chrDstH= dstH; c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW; c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH; // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst // but only for the FAST_BILINEAR mode otherwise do correct scaling // n-2 is the last chrominance sample available // this is not perfect, but noone shuld notice the difference, the more correct variant // would be like the vertical one, but that would require some special code for the // first and last pixel if(flags&SWS_FAST_BILINEAR) { if(c->canMMX2BeUsed) { c->lumXInc+= 20; c->chrXInc+= 20; } //we dont use the x86asm scaler if mmx is available else if(cpuCaps.hasMMX) { c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20; c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20; } } /* precalculate horizontal scaler filter coefficients */ { const int filterAlign= cpuCaps.hasMMX ? 4 : 1; initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc, srcW , dstW, filterAlign, 1<<14, flags, srcFilter->lumH, dstFilter->lumH); initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc, (srcW+1)>>1, c->chrDstW, filterAlign, 1<<14, flags, srcFilter->chrH, dstFilter->chrH); #ifdef ARCH_X86 // cant downscale !!! if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR)) { initMMX2HScaler( dstW, c->lumXInc, c->funnyYCode); initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode); } #endif } // Init Horizontal stuff /* precalculate vertical scaler filter coefficients */ initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc, srcH , dstH, 1, (1<<12)-4, flags, srcFilter->lumV, dstFilter->lumV); initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc, (srcH+1)>>1, c->chrDstH, 1, (1<<12)-4, flags, srcFilter->chrV, dstFilter->chrV); // Calculate Buffer Sizes so that they wont run out while handling these damn slices c->vLumBufSize= c->vLumFilterSize; c->vChrBufSize= c->vChrFilterSize; for(i=0; i<dstH; i++) { int chrI= i*c->chrDstH / dstH; int nextSlice= MAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1, ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<1)); nextSlice&= ~1; // Slices start at even boundaries if(c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice) c->vLumBufSize= nextSlice - c->vLumFilterPos[i ]; if(c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>1)) c->vChrBufSize= (nextSlice>>1) - c->vChrFilterPos[chrI]; } // allocate pixbufs (we use dynamic allocation because otherwise we would need to c->lumPixBuf= (int16_t**)memalign(4, c->vLumBufSize*2*sizeof(int16_t*)); c->chrPixBuf= (int16_t**)memalign(4, c->vChrBufSize*2*sizeof(int16_t*)); //Note we need at least one pixel more at the end because of the mmx code (just in case someone wanna replace the 4000/8000) for(i=0; i<c->vLumBufSize; i++) c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= (uint16_t*)memalign(8, 4000); for(i=0; i<c->vChrBufSize; i++) c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= (uint16_t*)memalign(8, 8000); //try to avoid drawing green stuff between the right end and the stride end for(i=0; i<c->vLumBufSize; i++) memset(c->lumPixBuf[i], 0, 4000); for(i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, 8000); ASSERT(c->chrDstH <= dstH) // pack filter data for mmx code if(cpuCaps.hasMMX) { c->lumMmxFilter= (int16_t*)memalign(8, c->vLumFilterSize* dstH*4*sizeof(int16_t)); c->chrMmxFilter= (int16_t*)memalign(8, c->vChrFilterSize*c->chrDstH*4*sizeof(int16_t)); for(i=0; i<c->vLumFilterSize*dstH; i++) c->lumMmxFilter[4*i]=c->lumMmxFilter[4*i+1]=c->lumMmxFilter[4*i+2]=c->lumMmxFilter[4*i+3]= c->vLumFilter[i]; for(i=0; i<c->vChrFilterSize*c->chrDstH; i++) c->chrMmxFilter[4*i]=c->chrMmxFilter[4*i+1]=c->chrMmxFilter[4*i+2]=c->chrMmxFilter[4*i+3]= c->vChrFilter[i]; } if(flags&SWS_PRINT_INFO) { #ifdef DITHER1XBPP char *dither= " dithered"; #else char *dither= ""; #endif if(flags&SWS_FAST_BILINEAR) fprintf(stderr, "\nSwScaler: FAST_BILINEAR scaler, "); else if(flags&SWS_BILINEAR) fprintf(stderr, "\nSwScaler: BILINEAR scaler, "); else if(flags&SWS_BICUBIC) fprintf(stderr, "\nSwScaler: BICUBIC scaler, "); else if(flags&SWS_X) fprintf(stderr, "\nSwScaler: Experimental scaler, "); else if(flags&SWS_POINT) fprintf(stderr, "\nSwScaler: Nearest Neighbor / POINT scaler, "); else if(flags&SWS_AREA) fprintf(stderr, "\nSwScaler: Area Averageing scaler, "); else fprintf(stderr, "\nSwScaler: ehh flags invalid?! "); if(dstFormat==IMGFMT_BGR15 || dstFormat==IMGFMT_BGR16) fprintf(stderr, "from %s to%s %s ", vo_format_name(srcFormat), dither, vo_format_name(dstFormat)); else fprintf(stderr, "from %s to %s ", vo_format_name(srcFormat), vo_format_name(dstFormat)); if(cpuCaps.hasMMX2) fprintf(stderr, "using MMX2\n"); else if(cpuCaps.has3DNow) fprintf(stderr, "using 3DNOW\n"); else if(cpuCaps.hasMMX) fprintf(stderr, "using MMX\n"); else fprintf(stderr, "using C\n"); } if((flags & SWS_PRINT_INFO) && verbose) { if(cpuCaps.hasMMX) { if(c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR)) printf("SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n"); else { if(c->hLumFilterSize==4) printf("SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n"); else if(c->hLumFilterSize==8) printf("SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n"); else printf("SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n"); if(c->hChrFilterSize==4) printf("SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n"); else if(c->hChrFilterSize==8) printf("SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n"); else printf("SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n"); } } else { #ifdef ARCH_X86 printf("SwScaler: using X86-Asm scaler for horizontal scaling\n"); #else if(flags & SWS_FAST_BILINEAR) printf("SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n"); else printf("SwScaler: using C scaler for horizontal scaling\n"); #endif } if(isPlanarYUV(dstFormat)) { if(c->vLumFilterSize==1) printf("SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", cpuCaps.hasMMX ? "MMX" : "C"); else printf("SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", cpuCaps.hasMMX ? "MMX" : "C"); } else { if(c->vLumFilterSize==1 && c->vChrFilterSize==2) printf("SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n" "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",cpuCaps.hasMMX ? "MMX" : "C"); else if(c->vLumFilterSize==2 && c->vChrFilterSize==2) printf("SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C"); else printf("SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C"); } if(dstFormat==IMGFMT_BGR24) printf("SwScaler: using %s YV12->BGR24 Converter\n", cpuCaps.hasMMX2 ? "MMX2" : (cpuCaps.hasMMX ? "MMX" : "C")); else if(dstFormat==IMGFMT_BGR32) printf("SwScaler: using %s YV12->BGR32 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); else if(dstFormat==IMGFMT_BGR16) printf("SwScaler: using %s YV12->BGR16 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); else if(dstFormat==IMGFMT_BGR15) printf("SwScaler: using %s YV12->BGR15 Converter\n", cpuCaps.hasMMX ? "MMX" : "C"); printf("SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH); } if((flags & SWS_PRINT_INFO) && verbose>1) { printf("SwScaler:Lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc); printf("SwScaler:Chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc); } c->swScale= swScale; return c; }
true
FFmpeg
b7dc6f662868fbdad779c61c233b1d19d8b89d3c
5,426
static int smacker_decode_bigtree(GetBitContext *gb, HuffContext *hc, DBCtx *ctx) { if (hc->current + 1 >= hc->length) { av_log(NULL, AV_LOG_ERROR, "Tree size exceeded!\n"); return AVERROR_INVALIDDATA; } if(!get_bits1(gb)){ //Leaf int val, i1, i2; i1 = ctx->v1->table ? get_vlc2(gb, ctx->v1->table, SMKTREE_BITS, 3) : 0; i2 = ctx->v2->table ? get_vlc2(gb, ctx->v2->table, SMKTREE_BITS, 3) : 0; if (i1 < 0 || i2 < 0) return AVERROR_INVALIDDATA; val = ctx->recode1[i1] | (ctx->recode2[i2] << 8); if(val == ctx->escapes[0]) { ctx->last[0] = hc->current; val = 0; } else if(val == ctx->escapes[1]) { ctx->last[1] = hc->current; val = 0; } else if(val == ctx->escapes[2]) { ctx->last[2] = hc->current; val = 0; } hc->values[hc->current++] = val; return 1; } else { //Node int r = 0, r_new, t; t = hc->current++; r = smacker_decode_bigtree(gb, hc, ctx); if(r < 0) return r; hc->values[t] = SMK_NODE | r; r++; r_new = smacker_decode_bigtree(gb, hc, ctx); if (r_new < 0) return r_new; return r + r_new; } }
true
FFmpeg
946ecd19ea752399bccc751c9339ff74b815587e
5,427
char *qemu_find_file(int type, const char *name) { int len; const char *subdir; char *buf; /* Try the name as a straight path first */ if (access(name, R_OK) == 0) { return g_strdup(name); } switch (type) { case QEMU_FILE_TYPE_BIOS: subdir = ""; break; case QEMU_FILE_TYPE_KEYMAP: subdir = "keymaps/"; break; default: abort(); } len = strlen(data_dir) + strlen(name) + strlen(subdir) + 2; buf = g_malloc0(len); snprintf(buf, len, "%s/%s%s", data_dir, subdir, name); if (access(buf, R_OK)) { g_free(buf); return NULL; } return buf; }
true
qemu
4524051c32190c1dc13ec2ccd122fd120dbed736
5,428
static inline void decode_block_intra(MadContext *s, int16_t * block) { int level, i, j, run; RLTable *rl = &ff_rl_mpeg1; const uint8_t *scantable = s->scantable.permutated; int16_t *quant_matrix = s->quant_matrix; block[0] = (128 + get_sbits(&s->gb, 8)) * quant_matrix[0]; /* The RL decoder is derived from mpeg1_decode_block_intra; Escaped level and run values a decoded differently */ i = 0; { OPEN_READER(re, &s->gb); /* now quantify & encode AC coefficients */ for (;;) { UPDATE_CACHE(re, &s->gb); GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0); if (level == 127) { break; } else if (level != 0) { i += run; j = scantable[i]; level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; level = (level ^ SHOW_SBITS(re, &s->gb, 1)) - SHOW_SBITS(re, &s->gb, 1); LAST_SKIP_BITS(re, &s->gb, 1); } else { /* escape */ UPDATE_CACHE(re, &s->gb); level = SHOW_SBITS(re, &s->gb, 10); SKIP_BITS(re, &s->gb, 10); UPDATE_CACHE(re, &s->gb); run = SHOW_UBITS(re, &s->gb, 6)+1; LAST_SKIP_BITS(re, &s->gb, 6); i += run; j = scantable[i]; if (level < 0) { level = -level; level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; level = -level; } else { level = (level*quant_matrix[j]) >> 4; level = (level-1)|1; } } if (i > 63) { av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", s->mb_x, s->mb_y); return; } block[j] = level; } CLOSE_READER(re, &s->gb); } }
true
FFmpeg
061c489895d29049a88dc6118e4b639a273b31d6
5,431
static int overlay_opencl_blend(FFFrameSync *fs) { AVFilterContext *avctx = fs->parent; AVFilterLink *outlink = avctx->outputs[0]; OverlayOpenCLContext *ctx = avctx->priv; AVFrame *input_main, *input_overlay; AVFrame *output; cl_mem mem; cl_int cle, x, y; size_t global_work[2]; int kernel_arg = 0; int err, plane; err = ff_framesync_get_frame(fs, 0, &input_main, 0); if (err < 0) return err; err = ff_framesync_get_frame(fs, 1, &input_overlay, 0); if (err < 0) return err; if (!ctx->initialised) { AVHWFramesContext *main_fc = (AVHWFramesContext*)input_main->hw_frames_ctx->data; AVHWFramesContext *overlay_fc = (AVHWFramesContext*)input_overlay->hw_frames_ctx->data; err = overlay_opencl_load(avctx, main_fc->sw_format, overlay_fc->sw_format); if (err < 0) return err; } output = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!output) { err = AVERROR(ENOMEM); goto fail; } for (plane = 0; plane < ctx->nb_planes; plane++) { kernel_arg = 0; mem = (cl_mem)output->data[plane]; cle = clSetKernelArg(ctx->kernel, kernel_arg++, sizeof(cl_mem), &mem); if (cle != CL_SUCCESS) goto fail_kernel_arg; mem = (cl_mem)input_main->data[plane]; cle = clSetKernelArg(ctx->kernel, kernel_arg++, sizeof(cl_mem), &mem); if (cle != CL_SUCCESS) goto fail_kernel_arg; mem = (cl_mem)input_overlay->data[plane]; cle = clSetKernelArg(ctx->kernel, kernel_arg++, sizeof(cl_mem), &mem); if (cle != CL_SUCCESS) goto fail_kernel_arg; if (ctx->alpha_separate) { mem = (cl_mem)input_overlay->data[ctx->nb_planes]; cle = clSetKernelArg(ctx->kernel, kernel_arg++, sizeof(cl_mem), &mem); if (cle != CL_SUCCESS) goto fail_kernel_arg; } x = ctx->x_position / (plane == 0 ? 1 : ctx->x_subsample); y = ctx->y_position / (plane == 0 ? 1 : ctx->y_subsample); cle = clSetKernelArg(ctx->kernel, kernel_arg++, sizeof(cl_int), &x); if (cle != CL_SUCCESS) goto fail_kernel_arg; cle = clSetKernelArg(ctx->kernel, kernel_arg++, sizeof(cl_int), &y); if (cle != CL_SUCCESS) goto fail_kernel_arg; if (ctx->alpha_separate) { cl_int alpha_adj_x = plane == 0 ? 1 : ctx->x_subsample; cl_int alpha_adj_y = plane == 0 ? 1 : ctx->y_subsample; cle = clSetKernelArg(ctx->kernel, kernel_arg++, sizeof(cl_int), &alpha_adj_x); if (cle != CL_SUCCESS) goto fail_kernel_arg; cle = clSetKernelArg(ctx->kernel, kernel_arg++, sizeof(cl_int), &alpha_adj_y); if (cle != CL_SUCCESS) goto fail_kernel_arg; } global_work[0] = output->width; global_work[1] = output->height; cle = clEnqueueNDRangeKernel(ctx->command_queue, ctx->kernel, 2, NULL, global_work, NULL, 0, NULL, NULL); if (cle != CL_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "Failed to enqueue " "overlay kernel for plane %d: %d.\n", cle, plane); err = AVERROR(EIO); goto fail; } } cle = clFinish(ctx->command_queue); if (cle != CL_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "Failed to finish " "command queue: %d.\n", cle); err = AVERROR(EIO); goto fail; } err = av_frame_copy_props(output, input_main); av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", av_get_pix_fmt_name(output->format), output->width, output->height, output->pts); return ff_filter_frame(outlink, output); fail_kernel_arg: av_log(avctx, AV_LOG_ERROR, "Failed to set kernel arg %d: %d.\n", kernel_arg, cle); err = AVERROR(EIO); fail: return err; }
true
FFmpeg
9b4611a1c1f2ac5d1bfd75f7e6e41aa0bc15ca39
5,432
static int oggvorbis_encode_frame(AVCodecContext *avccontext, unsigned char *packets, int buf_size, void *data) { OggVorbisContext *context = avccontext->priv_data ; float **buffer ; ogg_packet op ; signed char *audio = data ; int l, samples = OGGVORBIS_FRAME_SIZE ; buffer = vorbis_analysis_buffer(&context->vd, samples) ; if(context->vi.channels == 1) { for(l = 0 ; l < samples ; l++) buffer[0][l]=((audio[l*2+1]<<8)|(0x00ff&(int)audio[l*2]))/32768.f; } else { for(l = 0 ; l < samples ; l++){ buffer[0][l]=((audio[l*4+1]<<8)|(0x00ff&(int)audio[l*4]))/32768.f; buffer[1][l]=((audio[l*4+3]<<8)|(0x00ff&(int)audio[l*4+2]))/32768.f; } } vorbis_analysis_wrote(&context->vd, samples) ; while(vorbis_analysis_blockout(&context->vd, &context->vb) == 1) { vorbis_analysis(&context->vb, NULL); vorbis_bitrate_addblock(&context->vb) ; while(vorbis_bitrate_flushpacket(&context->vd, &op)) { memcpy(context->buffer + context->buffer_index, &op, sizeof(ogg_packet)); context->buffer_index += sizeof(ogg_packet); memcpy(context->buffer + context->buffer_index, op.packet, op.bytes); context->buffer_index += op.bytes; // av_log(avccontext, AV_LOG_DEBUG, "e%d / %d\n", context->buffer_index, op.bytes); } } if(context->buffer_index){ ogg_packet *op2= (ogg_packet*)context->buffer; op2->packet = context->buffer + sizeof(ogg_packet); l= op2->bytes; memcpy(packets, op2->packet, l); context->buffer_index -= l + sizeof(ogg_packet); memcpy(context->buffer, context->buffer + l + sizeof(ogg_packet), context->buffer_index); // av_log(avccontext, AV_LOG_DEBUG, "E%d\n", l); return l; } return 0; }
false
FFmpeg
3f4993f19b609180ff0f92486ea8bbac9e531db2
5,433
static int mpegps_read_header(AVFormatContext *s) { MpegDemuxContext *m = s->priv_data; char buffer[7]; int64_t last_pos = avio_tell(s->pb); m->header_state = 0xff; s->ctx_flags |= AVFMTCTX_NOHEADER; avio_get_str(s->pb, 6, buffer, sizeof(buffer)); if (!memcmp("IMKH", buffer, 4)) { m->imkh_cctv = 1; } else if (!memcmp("Sofdec", buffer, 6)) { m->sofdec = 1; } else avio_seek(s->pb, last_pos, SEEK_SET); /* no need to do more */ return 0; }
false
FFmpeg
a5c1c7a8b3d13c86b453558628951c3f52054ab4
5,434
static int read_access_unit(AVCodecContext *avctx, void* data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MLPDecodeContext *m = avctx->priv_data; GetBitContext gb; unsigned int length, substr; unsigned int substream_start; unsigned int header_size = 4; unsigned int substr_header_size = 0; uint8_t substream_parity_present[MAX_SUBSTREAMS]; uint16_t substream_data_len[MAX_SUBSTREAMS]; uint8_t parity_bits; int ret; if (buf_size < 4) return 0; length = (AV_RB16(buf) & 0xfff) * 2; if (length < 4 || length > buf_size) return AVERROR_INVALIDDATA; init_get_bits(&gb, (buf + 4), (length - 4) * 8); m->is_major_sync_unit = 0; if (show_bits_long(&gb, 31) == (0xf8726fba >> 1)) { if (read_major_sync(m, &gb) < 0) goto error; m->is_major_sync_unit = 1; header_size += 28; } if (!m->params_valid) { av_log(m->avctx, AV_LOG_WARNING, "Stream parameters not seen; skipping frame.\n"); *got_frame_ptr = 0; return length; } substream_start = 0; for (substr = 0; substr < m->num_substreams; substr++) { int extraword_present, checkdata_present, end, nonrestart_substr; extraword_present = get_bits1(&gb); nonrestart_substr = get_bits1(&gb); checkdata_present = get_bits1(&gb); skip_bits1(&gb); end = get_bits(&gb, 12) * 2; substr_header_size += 2; if (extraword_present) { if (m->avctx->codec_id == AV_CODEC_ID_MLP) { av_log(m->avctx, AV_LOG_ERROR, "There must be no extraword for MLP.\n"); goto error; } skip_bits(&gb, 16); substr_header_size += 2; } if (!(nonrestart_substr ^ m->is_major_sync_unit)) { av_log(m->avctx, AV_LOG_ERROR, "Invalid nonrestart_substr.\n"); goto error; } if (end + header_size + substr_header_size > length) { av_log(m->avctx, AV_LOG_ERROR, "Indicated length of substream %d data goes off end of " "packet.\n", substr); end = length - header_size - substr_header_size; } if (end < substream_start) { av_log(avctx, AV_LOG_ERROR, "Indicated end offset of substream %d data " "is smaller than calculated start offset.\n", substr); goto error; } if (substr > m->max_decoded_substream) continue; substream_parity_present[substr] = checkdata_present; substream_data_len[substr] = end - substream_start; substream_start = end; } parity_bits = ff_mlp_calculate_parity(buf, 4); parity_bits ^= ff_mlp_calculate_parity(buf + header_size, substr_header_size); if ((((parity_bits >> 4) ^ parity_bits) & 0xF) != 0xF) { av_log(avctx, AV_LOG_ERROR, "Parity check failed.\n"); goto error; } buf += header_size + substr_header_size; for (substr = 0; substr <= m->max_decoded_substream; substr++) { SubStream *s = &m->substream[substr]; init_get_bits(&gb, buf, substream_data_len[substr] * 8); m->matrix_changed = 0; memset(m->filter_changed, 0, sizeof(m->filter_changed)); s->blockpos = 0; do { if (get_bits1(&gb)) { if (get_bits1(&gb)) { /* A restart header should be present. */ if (read_restart_header(m, &gb, buf, substr) < 0) goto next_substr; s->restart_seen = 1; } if (!s->restart_seen) goto next_substr; if (read_decoding_params(m, &gb, substr) < 0) goto next_substr; } if (!s->restart_seen) goto next_substr; if ((ret = read_block_data(m, &gb, substr)) < 0) return ret; if (get_bits_count(&gb) >= substream_data_len[substr] * 8) goto substream_length_mismatch; } while (!get_bits1(&gb)); skip_bits(&gb, (-get_bits_count(&gb)) & 15); if (substream_data_len[substr] * 8 - get_bits_count(&gb) >= 32) { int shorten_by; if (get_bits(&gb, 16) != 0xD234) return AVERROR_INVALIDDATA; shorten_by = get_bits(&gb, 16); if (m->avctx->codec_id == AV_CODEC_ID_TRUEHD && shorten_by & 0x2000) s->blockpos -= FFMIN(shorten_by & 0x1FFF, s->blockpos); else if (m->avctx->codec_id == AV_CODEC_ID_MLP && shorten_by != 0xD234) return AVERROR_INVALIDDATA; if (substr == m->max_decoded_substream) av_log(m->avctx, AV_LOG_INFO, "End of stream indicated.\n"); } if (substream_parity_present[substr]) { uint8_t parity, checksum; if (substream_data_len[substr] * 8 - get_bits_count(&gb) != 16) goto substream_length_mismatch; parity = ff_mlp_calculate_parity(buf, substream_data_len[substr] - 2); checksum = ff_mlp_checksum8 (buf, substream_data_len[substr] - 2); if ((get_bits(&gb, 8) ^ parity) != 0xa9 ) av_log(m->avctx, AV_LOG_ERROR, "Substream %d parity check failed.\n", substr); if ( get_bits(&gb, 8) != checksum) av_log(m->avctx, AV_LOG_ERROR, "Substream %d checksum failed.\n" , substr); } if (substream_data_len[substr] * 8 != get_bits_count(&gb)) goto substream_length_mismatch; next_substr: if (!s->restart_seen) av_log(m->avctx, AV_LOG_ERROR, "No restart header present in substream %d.\n", substr); buf += substream_data_len[substr]; } rematrix_channels(m, m->max_decoded_substream); if ((ret = output_data(m, m->max_decoded_substream, data, got_frame_ptr)) < 0) return ret; return length; substream_length_mismatch: av_log(m->avctx, AV_LOG_ERROR, "substream %d length mismatch\n", substr); return AVERROR_INVALIDDATA; error: m->params_valid = 0; return AVERROR_INVALIDDATA; }
true
FFmpeg
f7bea731d955ec25a726abcd31862d3bd0183d58
5,435
static int ipmi_register_netfn(IPMIBmcSim *s, unsigned int netfn, const IPMINetfn *netfnd) { if ((netfn & 1) || (netfn > MAX_NETFNS) || (s->netfns[netfn / 2])) { return -1; } s->netfns[netfn / 2] = netfnd; return 0; }
true
qemu
93a5364620dbfcf3cc13866d0e218fc3624c1edf
5,436
static int nbd_establish_connection(BlockDriverState *bs) { BDRVNBDState *s = bs->opaque; int sock; int ret; off_t size; size_t blocksize; if (s->host_spec[0] == '/') { sock = unix_socket_outgoing(s->host_spec); } else { sock = tcp_socket_outgoing_spec(s->host_spec); } /* Failed to establish connection */ if (sock < 0) { logout("Failed to establish connection to NBD server\n"); return -errno; } /* NBD handshake */ ret = nbd_receive_negotiate(sock, s->export_name, &s->nbdflags, &size, &blocksize); if (ret < 0) { logout("Failed to negotiate with the NBD server\n"); closesocket(sock); return ret; } /* Now that we're connected, set the socket to be non-blocking and * kick the reply mechanism. */ socket_set_nonblock(sock); qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, NULL, nbd_have_request, s); s->sock = sock; s->size = size; s->blocksize = blocksize; logout("Established connection with NBD server\n"); return 0; }
true
qemu
b3adf53a3a10a1ca8347167907e4cf8bbd0204f1
5,438
void memory_region_destroy(MemoryRegion *mr) { assert(QTAILQ_EMPTY(&mr->subregions)); mr->destructor(mr); memory_region_clear_coalescing(mr); g_free((char *)mr->name); g_free(mr->ioeventfds); }
true
qemu
2be0e25f4b6a4f91e39388cc365bbe53b56ab62a
5,440
static av_cold int aac_decode_init(AVCodecContext * avccontext) { AACContext * ac = avccontext->priv_data; int i; ac->avccontext = avccontext; if (avccontext->extradata_size <= 0 || decode_audio_specific_config(ac, avccontext->extradata, avccontext->extradata_size)) return -1; avccontext->sample_fmt = SAMPLE_FMT_S16; avccontext->sample_rate = ac->m4ac.sample_rate; avccontext->frame_size = 1024; AAC_INIT_VLC_STATIC( 0, 144); AAC_INIT_VLC_STATIC( 1, 114); AAC_INIT_VLC_STATIC( 2, 188); AAC_INIT_VLC_STATIC( 3, 180); AAC_INIT_VLC_STATIC( 4, 172); AAC_INIT_VLC_STATIC( 5, 140); AAC_INIT_VLC_STATIC( 6, 168); AAC_INIT_VLC_STATIC( 7, 114); AAC_INIT_VLC_STATIC( 8, 262); AAC_INIT_VLC_STATIC( 9, 248); AAC_INIT_VLC_STATIC(10, 384); dsputil_init(&ac->dsp, avccontext); ac->random_state = 0x1f2e3d4c; // -1024 - Compensate wrong IMDCT method. // 32768 - Required to scale values to the correct range for the bias method // for float to int16 conversion. if(ac->dsp.float_to_int16 == ff_float_to_int16_c) { ac->add_bias = 385.0f; ac->sf_scale = 1. / (-1024. * 32768.); ac->sf_offset = 0; } else { ac->add_bias = 0.0f; ac->sf_scale = 1. / -1024.; ac->sf_offset = 60; } #ifndef CONFIG_HARDCODED_TABLES for (i = 0; i < 428; i++) ff_aac_pow2sf_tab[i] = pow(2, (i - 200)/4.); #endif /* CONFIG_HARDCODED_TABLES */ INIT_VLC_STATIC(&vlc_scalefactors,7,FF_ARRAY_ELEMS(ff_aac_scalefactor_code), ff_aac_scalefactor_bits, sizeof(ff_aac_scalefactor_bits[0]), sizeof(ff_aac_scalefactor_bits[0]), ff_aac_scalefactor_code, sizeof(ff_aac_scalefactor_code[0]), sizeof(ff_aac_scalefactor_code[0]), 352); ff_mdct_init(&ac->mdct, 11, 1); ff_mdct_init(&ac->mdct_small, 8, 1); // window initialization ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); ff_sine_window_init(ff_sine_1024, 1024); ff_sine_window_init(ff_sine_128, 128); return 0; }
false
FFmpeg
158b39126d59f07069e0da07e0658111967c6179
5,441
static void read_info_chunk(AVFormatContext *s, int64_t size) { AVIOContext *pb = s->pb; unsigned int i; unsigned int nb_entries = avio_rb32(pb); for (i = 0; i < nb_entries; i++) { char key[32]; char value[1024]; avio_get_str(pb, INT_MAX, key, sizeof(key)); avio_get_str(pb, INT_MAX, value, sizeof(value)); av_dict_set(&s->metadata, key, value, 0); } }
false
FFmpeg
90b2f3136778311fb5e097b8ee1f527518231c23
5,442
static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts, int64_t *pkt_dts) { AVFrame *decoded_frame, *filtered_frame = NULL; void *buffer_to_free = NULL; int i, ret = 0; float quality = 0; #if CONFIG_AVFILTER int frame_available = 1; #endif int duration=0; int64_t *best_effort_timestamp; AVRational *frame_sample_aspect; if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame())) return AVERROR(ENOMEM); else avcodec_get_frame_defaults(ist->decoded_frame); decoded_frame = ist->decoded_frame; pkt->pts = *pkt_pts; pkt->dts = *pkt_dts; *pkt_pts = AV_NOPTS_VALUE; if (pkt->duration) { duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); } else if(ist->st->codec->time_base.num != 0) { int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; duration = ((int64_t)AV_TIME_BASE * ist->st->codec->time_base.num * ticks) / ist->st->codec->time_base.den; } if(*pkt_dts != AV_NOPTS_VALUE && duration) { *pkt_dts += duration; }else *pkt_dts = AV_NOPTS_VALUE; ret = avcodec_decode_video2(ist->st->codec, decoded_frame, got_output, pkt); if (ret < 0) return ret; quality = same_quant ? decoded_frame->quality : 0; if (!*got_output) { /* no picture yet */ return ret; } best_effort_timestamp= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "best_effort_timestamp"); if(*best_effort_timestamp != AV_NOPTS_VALUE) ist->next_pts = ist->pts = *best_effort_timestamp; ist->next_pts += duration; pkt->size = 0; pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free); #if CONFIG_AVFILTER frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio"); for(i=0;i<nb_output_streams;i++) { OutputStream *ost = ost = &output_streams[i]; if(check_output_constraints(ist, ost)){ if (!frame_sample_aspect->num) *frame_sample_aspect = ist->st->sample_aspect_ratio; decoded_frame->pts = ist->pts; if (ist->dr1) { FrameBuffer *buf = decoded_frame->opaque; AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays( decoded_frame->data, decoded_frame->linesize, AV_PERM_READ | AV_PERM_PRESERVE, ist->st->codec->width, ist->st->codec->height, ist->st->codec->pix_fmt); avfilter_copy_frame_props(fb, decoded_frame); fb->pts = ist->pts; fb->buf->priv = buf; fb->buf->free = filter_release_buffer; buf->refcount++; av_buffersrc_buffer(ost->input_video_filter, fb); } else if((av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE)) < 0){ av_log(0, AV_LOG_FATAL, "Failed to inject frame into filter network\n"); exit_program(1); } } } #endif rate_emu_sleep(ist); for (i = 0; i < nb_output_streams; i++) { OutputStream *ost = &output_streams[i]; int frame_size; if (!check_output_constraints(ist, ost) || !ost->encoding_needed) continue; #if CONFIG_AVFILTER if (ost->input_video_filter) { frame_available = av_buffersink_poll_frame(ost->output_video_filter); } while (frame_available) { if (ost->output_video_filter) { AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base; if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0){ av_log(0, AV_LOG_WARNING, "AV Filter told us it has a frame available but failed to output one\n"); goto cont; } if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) { av_free(buffer_to_free); return AVERROR(ENOMEM); } else avcodec_get_frame_defaults(ist->filtered_frame); filtered_frame = ist->filtered_frame; *filtered_frame= *decoded_frame; //for me_threshold if (ost->picref) { avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref); ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); } } if (ost->picref->video && !ost->frame_aspect_ratio) ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio; #else filtered_frame = decoded_frame; #endif do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size, same_quant ? quality : ost->st->codec->global_quality); if (vstats_filename && frame_size) do_video_stats(output_files[ost->file_index].ctx, ost, frame_size); #if CONFIG_AVFILTER cont: frame_available = ost->output_video_filter && av_buffersink_poll_frame(ost->output_video_filter); avfilter_unref_buffer(ost->picref); } #endif } av_free(buffer_to_free); return ret; }
false
FFmpeg
f2f8632aa5584438a09983b64c67908a96f029b9
5,443
static int mov_read_stps(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; MOVStreamContext *sc; unsigned i, entries; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; sc = st->priv_data; avio_rb32(pb); // version + flags entries = avio_rb32(pb); if (entries >= UINT_MAX / sizeof(*sc->stps_data)) return AVERROR_INVALIDDATA; sc->stps_data = av_malloc(entries * sizeof(*sc->stps_data)); if (!sc->stps_data) return AVERROR(ENOMEM); sc->stps_count = entries; for (i = 0; i < entries; i++) { sc->stps_data[i] = avio_rb32(pb); //av_dlog(c->fc, "stps %d\n", sc->stps_data[i]); } return 0; }
false
FFmpeg
9888ffb1ce5e0a17f711b01933d504c72ea29d3b
5,444
static int decode_frame(AVCodecContext * avctx, void *data, int *data_size, uint8_t * buf, int buf_size) { MPADecodeContext *s = avctx->priv_data; uint32_t header; uint8_t *buf_ptr; int len, out_size; OUT_INT *out_samples = data; buf_ptr = buf; while (buf_size > 0) { len = s->inbuf_ptr - s->inbuf; if (s->frame_size == 0) { /* special case for next header for first frame in free format case (XXX: find a simpler method) */ if (s->free_format_next_header != 0) { s->inbuf[0] = s->free_format_next_header >> 24; s->inbuf[1] = s->free_format_next_header >> 16; s->inbuf[2] = s->free_format_next_header >> 8; s->inbuf[3] = s->free_format_next_header; s->inbuf_ptr = s->inbuf + 4; s->free_format_next_header = 0; goto got_header; } /* no header seen : find one. We need at least HEADER_SIZE bytes to parse it */ len = HEADER_SIZE - len; if (len > buf_size) len = buf_size; if (len > 0) { memcpy(s->inbuf_ptr, buf_ptr, len); buf_ptr += len; buf_size -= len; s->inbuf_ptr += len; } if ((s->inbuf_ptr - s->inbuf) >= HEADER_SIZE) { got_header: header = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | (s->inbuf[2] << 8) | s->inbuf[3]; if (ff_mpa_check_header(header) < 0) { /* no sync found : move by one byte (inefficient, but simple!) */ memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); s->inbuf_ptr--; dprintf("skip %x\n", header); /* reset free format frame size to give a chance to get a new bitrate */ s->free_format_frame_size = 0; } else { if (decode_header(s, header) == 1) { /* free format: prepare to compute frame size */ s->frame_size = -1; } /* update codec info */ avctx->sample_rate = s->sample_rate; avctx->channels = s->nb_channels; avctx->bit_rate = s->bit_rate; avctx->sub_id = s->layer; switch(s->layer) { case 1: avctx->frame_size = 384; break; case 2: avctx->frame_size = 1152; break; case 3: if (s->lsf) avctx->frame_size = 576; else avctx->frame_size = 1152; break; } } } } else if (s->frame_size == -1) { /* free format : find next sync to compute frame size */ len = MPA_MAX_CODED_FRAME_SIZE - len; if (len > buf_size) len = buf_size; if (len == 0) { /* frame too long: resync */ s->frame_size = 0; memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1); s->inbuf_ptr--; } else { uint8_t *p, *pend; uint32_t header1; int padding; memcpy(s->inbuf_ptr, buf_ptr, len); /* check for header */ p = s->inbuf_ptr - 3; pend = s->inbuf_ptr + len - 4; while (p <= pend) { header = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; header1 = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) | (s->inbuf[2] << 8) | s->inbuf[3]; /* check with high probability that we have a valid header */ if ((header & SAME_HEADER_MASK) == (header1 & SAME_HEADER_MASK)) { /* header found: update pointers */ len = (p + 4) - s->inbuf_ptr; buf_ptr += len; buf_size -= len; s->inbuf_ptr = p; /* compute frame size */ s->free_format_next_header = header; s->free_format_frame_size = s->inbuf_ptr - s->inbuf; padding = (header1 >> 9) & 1; if (s->layer == 1) s->free_format_frame_size -= padding * 4; else s->free_format_frame_size -= padding; dprintf("free frame size=%d padding=%d\n", s->free_format_frame_size, padding); decode_header(s, header1); goto next_data; } p++; } /* not found: simply increase pointers */ buf_ptr += len; s->inbuf_ptr += len; buf_size -= len; } } else if (len < s->frame_size) { if (s->frame_size > MPA_MAX_CODED_FRAME_SIZE) s->frame_size = MPA_MAX_CODED_FRAME_SIZE; len = s->frame_size - len; if (len > buf_size) len = buf_size; memcpy(s->inbuf_ptr, buf_ptr, len); buf_ptr += len; s->inbuf_ptr += len; buf_size -= len; } next_data: if (s->frame_size > 0 && (s->inbuf_ptr - s->inbuf) >= s->frame_size) { if (avctx->parse_only) { /* simply return the frame data */ *(uint8_t **)data = s->inbuf; out_size = s->inbuf_ptr - s->inbuf; } else { out_size = mp_decode_frame(s, out_samples); } s->inbuf_ptr = s->inbuf; s->frame_size = 0; *data_size = out_size; break; } } return buf_ptr - buf; }
false
FFmpeg
02af2269c03ed4a17b81247eff11b0d5bb1e9085
5,445
static av_cold void rv34_init_tables(void) { int i, j, k; for(i = 0; i < NUM_INTRA_TABLES; i++){ for(j = 0; j < 2; j++){ rv34_gen_vlc(rv34_table_intra_cbppat [i][j], CBPPAT_VLC_SIZE, &intra_vlcs[i].cbppattern[j], NULL, 19*i + 0 + j); rv34_gen_vlc(rv34_table_intra_secondpat[i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].second_pattern[j], NULL, 19*i + 2 + j); rv34_gen_vlc(rv34_table_intra_thirdpat [i][j], OTHERBLK_VLC_SIZE, &intra_vlcs[i].third_pattern[j], NULL, 19*i + 4 + j); for(k = 0; k < 4; k++){ rv34_gen_vlc(rv34_table_intra_cbp[i][j+k*2], CBP_VLC_SIZE, &intra_vlcs[i].cbp[j][k], rv34_cbp_code, 19*i + 6 + j*4 + k); } } for(j = 0; j < 4; j++){ rv34_gen_vlc(rv34_table_intra_firstpat[i][j], FIRSTBLK_VLC_SIZE, &intra_vlcs[i].first_pattern[j], NULL, 19*i + 14 + j); } rv34_gen_vlc(rv34_intra_coeff[i], COEFF_VLC_SIZE, &intra_vlcs[i].coefficient, NULL, 19*i + 18); } for(i = 0; i < NUM_INTER_TABLES; i++){ rv34_gen_vlc(rv34_inter_cbppat[i], CBPPAT_VLC_SIZE, &inter_vlcs[i].cbppattern[0], NULL, i*12 + 95); for(j = 0; j < 4; j++){ rv34_gen_vlc(rv34_inter_cbp[i][j], CBP_VLC_SIZE, &inter_vlcs[i].cbp[0][j], rv34_cbp_code, i*12 + 96 + j); } for(j = 0; j < 2; j++){ rv34_gen_vlc(rv34_table_inter_firstpat [i][j], FIRSTBLK_VLC_SIZE, &inter_vlcs[i].first_pattern[j], NULL, i*12 + 100 + j); rv34_gen_vlc(rv34_table_inter_secondpat[i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].second_pattern[j], NULL, i*12 + 102 + j); rv34_gen_vlc(rv34_table_inter_thirdpat [i][j], OTHERBLK_VLC_SIZE, &inter_vlcs[i].third_pattern[j], NULL, i*12 + 104 + j); } rv34_gen_vlc(rv34_inter_coeff[i], COEFF_VLC_SIZE, &inter_vlcs[i].coefficient, NULL, i*12 + 106); } }
false
FFmpeg
3df18b3ed1177037892ce5b3db113d52dcdcdbf3