label
int64
0
1
func1
stringlengths
23
97k
id
int64
0
27.3k
0
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size) { int ret; FILE *f = fopen(filename, "rb"); if (!f) { av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename, strerror(errno)); return AVERROR(errno); } fseek(f, 0, SEEK_END); *size = ftell(f); fseek(f, 0, SEEK_SET); *bufptr = av_malloc(*size + 1); if (!*bufptr) { av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n"); fclose(f); return AVERROR(ENOMEM); } ret = fread(*bufptr, 1, *size, f); if (ret < *size) { av_free(*bufptr); if (ferror(f)) { av_log(NULL, AV_LOG_ERROR, "Error while reading file '%s': %s\n", filename, strerror(errno)); ret = AVERROR(errno); } else ret = AVERROR_EOF; } else { ret = 0; (*bufptr)[(*size)++] = '\0'; } fclose(f); return ret; }
20,123
0
static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, int level, int *dir_ptr, int encoding) { int a, b, c, wrap, pred, scale, ret; int16_t *dc_val; /* find prediction */ if (n < 4) { scale = s->y_dc_scale; } else { scale = s->c_dc_scale; } if(IS_3IV1) scale= 8; wrap= s->block_wrap[n]; dc_val = s->dc_val[0] + s->block_index[n]; /* B C * A X */ a = dc_val[ - 1]; b = dc_val[ - 1 - wrap]; c = dc_val[ - wrap]; /* outside slice handling (we can't do that by memset as we need the dc for error resilience) */ if(s->first_slice_line && n!=3){ if(n!=2) b=c= 1024; if(n!=1 && s->mb_x == s->resync_mb_x) b=a= 1024; } if(s->mb_x == s->resync_mb_x && s->mb_y == s->resync_mb_y+1){ if(n==0 || n==4 || n==5) b=1024; } if (abs(a - b) < abs(b - c)) { pred = c; *dir_ptr = 1; /* top */ } else { pred = a; *dir_ptr = 0; /* left */ } /* we assume pred is positive */ pred = FASTDIV((pred + (scale >> 1)), scale); if(encoding){ ret = level - pred; }else{ level += pred; ret= level; if(s->error_recognition>=3){ if(level<0){ av_log(s->avctx, AV_LOG_ERROR, "dc<0 at %dx%d\n", s->mb_x, s->mb_y); return -1; } if(level*scale > 2048 + scale){ av_log(s->avctx, AV_LOG_ERROR, "dc overflow at %dx%d\n", s->mb_x, s->mb_y); return -1; } } } level *=scale; if(level&(~2047)){ if(level<0) level=0; else if(!(s->workaround_bugs&FF_BUG_DC_CLIP)) level=2047; } dc_val[0]= level; return ret; }
20,124
1
void arp_table_add(Slirp *slirp, uint32_t ip_addr, uint8_t ethaddr[ETH_ALEN]) { const uint32_t broadcast_addr = ~slirp->vnetwork_mask.s_addr | slirp->vnetwork_addr.s_addr; ArpTable *arptbl = &slirp->arp_table; int i; DEBUG_CALL("arp_table_add"); DEBUG_ARG("ip = 0x%x", ip_addr); DEBUG_ARGS((dfd, " hw addr = %02x:%02x:%02x:%02x:%02x:%02x\n", ethaddr[0], ethaddr[1], ethaddr[2], ethaddr[3], ethaddr[4], ethaddr[5])); /* Check 0.0.0.0/8 invalid source-only addresses */ assert((ip_addr & htonl(~(0xf << 28))) != 0); if (ip_addr == 0xffffffff || ip_addr == broadcast_addr) { /* Do not register broadcast addresses */ return; } /* Search for an entry */ for (i = 0; i < ARP_TABLE_SIZE; i++) { if (arptbl->table[i].ar_sip == ip_addr) { /* Update the entry */ memcpy(arptbl->table[i].ar_sha, ethaddr, ETH_ALEN); return; } } /* No entry found, create a new one */ arptbl->table[arptbl->next_victim].ar_sip = ip_addr; memcpy(arptbl->table[arptbl->next_victim].ar_sha, ethaddr, ETH_ALEN); arptbl->next_victim = (arptbl->next_victim + 1) % ARP_TABLE_SIZE; }
20,125
1
static void encode_subband_c0run(SnowContext *s, SubBand *b, DWTELEM *src, DWTELEM *parent, int stride, int orientation){ const int w= b->width; const int h= b->height; int x, y; if(1){ int run=0; int runs[w*h]; int run_index=0; for(y=0; y<h; y++){ for(x=0; x<w; x++){ int v, p=0; int /*ll=0, */l=0, lt=0, t=0, rt=0; v= src[x + y*stride]; if(y){ t= src[x + (y-1)*stride]; if(x){ lt= src[x - 1 + (y-1)*stride]; } if(x + 1 < w){ rt= src[x + 1 + (y-1)*stride]; } } if(x){ l= src[x - 1 + y*stride]; /*if(x > 1){ if(orientation==1) ll= src[y + (x-2)*stride]; else ll= src[x - 2 + y*stride]; }*/ } if(parent){ int px= x>>1; int py= y>>1; if(px<b->parent->width && py<b->parent->height) p= parent[px + py*2*stride]; } if(!(/*ll|*/l|lt|t|rt|p)){ if(v){ runs[run_index++]= run; run=0; }else{ run++; } } } } runs[run_index++]= run; run_index=0; run= runs[run_index++]; put_symbol2(&s->c, b->state[1], run, 3); for(y=0; y<h; y++){ for(x=0; x<w; x++){ int v, p=0; int /*ll=0, */l=0, lt=0, t=0, rt=0; v= src[x + y*stride]; if(y){ t= src[x + (y-1)*stride]; if(x){ lt= src[x - 1 + (y-1)*stride]; } if(x + 1 < w){ rt= src[x + 1 + (y-1)*stride]; } } if(x){ l= src[x - 1 + y*stride]; /*if(x > 1){ if(orientation==1) ll= src[y + (x-2)*stride]; else ll= src[x - 2 + y*stride]; }*/ } if(parent){ int px= x>>1; int py= y>>1; if(px<b->parent->width && py<b->parent->height) p= parent[px + py*2*stride]; } if(/*ll|*/l|lt|t|rt|p){ int context= av_log2(/*ABS(ll) + */3*ABS(l) + ABS(lt) + 2*ABS(t) + ABS(rt) + ABS(p)); put_rac(&s->c, &b->state[0][context], !!v); }else{ if(!run){ run= runs[run_index++]; put_symbol2(&s->c, b->state[1], run, 3); assert(v); }else{ run--; assert(!v); } } if(v){ int context= av_log2(/*ABS(ll) + */3*ABS(l) + ABS(lt) + 2*ABS(t) + ABS(rt) + ABS(p)); put_symbol2(&s->c, b->state[context + 2], ABS(v)-1, context-4); put_rac(&s->c, &b->state[0][16 + 1 + 3 + quant3b[l&0xFF] + 3*quant3b[t&0xFF]], v<0); } } } } }
20,126
1
void do_fctiwz (void) { union { double d; uint64_t i; } p; /* XXX: higher bits are not supposed to be significant. * to make tests easier, return the same as a real PowerPC 750 (aka G3) */ p.i = float64_to_int32_round_to_zero(FT0, &env->fp_status); p.i |= 0xFFF80000ULL << 32; FT0 = p.d; }
20,127
1
static void bdrv_ioctl_bh_cb(void *opaque) { BdrvIoctlCompletionData *data = opaque; bdrv_co_io_em_complete(data->co, -ENOTSUP); qemu_bh_delete(data->bh); }
20,128
0
static int tcp_write(URLContext *h, const uint8_t *buf, int size) { TCPContext *s = h->priv_data; int ret, size1, fd_max, len; fd_set wfds; struct timeval tv; size1 = size; while (size > 0) { if (url_interrupt_cb()) return AVERROR(EINTR); fd_max = s->fd; FD_ZERO(&wfds); FD_SET(s->fd, &wfds); tv.tv_sec = 0; tv.tv_usec = 100 * 1000; ret = select(fd_max + 1, NULL, &wfds, NULL, &tv); if (ret > 0 && FD_ISSET(s->fd, &wfds)) { len = send(s->fd, buf, size, 0); if (len < 0) { if (ff_neterrno() != FF_NETERROR(EINTR) && ff_neterrno() != FF_NETERROR(EAGAIN)) return ff_neterrno(); continue; } size -= len; buf += len; } else if (ret < 0) { if (ff_neterrno() == FF_NETERROR(EINTR)) continue; return -1; } } return size1 - size; }
20,132
0
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size) { int cb_size = 1 << log2_cb_size; HEVCLocalContext *lc = &s->HEVClc; int log2_min_cb_size = s->sps->log2_min_cb_size; int length = cb_size >> log2_min_cb_size; int min_cb_width = s->sps->min_cb_width; int x_cb = x0 >> log2_min_cb_size; int y_cb = y0 >> log2_min_cb_size; int x, y; lc->cu.x = x0; lc->cu.y = y0; lc->cu.rqt_root_cbf = 1; lc->cu.pred_mode = MODE_INTRA; lc->cu.part_mode = PART_2Nx2N; lc->cu.intra_split_flag = 0; lc->cu.pcm_flag = 0; SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0; for (x = 0; x < 4; x++) lc->pu.intra_pred_mode[x] = 1; if (s->pps->transquant_bypass_enable_flag) { lc->cu.cu_transquant_bypass_flag = ff_hevc_cu_transquant_bypass_flag_decode(s); if (lc->cu.cu_transquant_bypass_flag) set_deblocking_bypass(s, x0, y0, log2_cb_size); } else lc->cu.cu_transquant_bypass_flag = 0; if (s->sh.slice_type != I_SLICE) { uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb); lc->cu.pred_mode = MODE_SKIP; x = y_cb * min_cb_width + x_cb; for (y = 0; y < length; y++) { memset(&s->skip_flag[x], skip_flag, length); x += min_cb_width; } lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER; } if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) { hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0); intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); if (!s->sh.disable_deblocking_filter_flag) ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size, lc->slice_or_tiles_up_boundary, lc->slice_or_tiles_left_boundary); } else { if (s->sh.slice_type != I_SLICE) lc->cu.pred_mode = ff_hevc_pred_mode_decode(s); if (lc->cu.pred_mode != MODE_INTRA || log2_cb_size == s->sps->log2_min_cb_size) { lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size); lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN && lc->cu.pred_mode == MODE_INTRA; } if (lc->cu.pred_mode == MODE_INTRA) { if (lc->cu.part_mode == PART_2Nx2N && s->sps->pcm_enabled_flag && log2_cb_size >= s->sps->pcm.log2_min_pcm_cb_size && log2_cb_size <= s->sps->pcm.log2_max_pcm_cb_size) { lc->cu.pcm_flag = ff_hevc_pcm_flag_decode(s); } if (lc->cu.pcm_flag) { int ret; intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); ret = hls_pcm_sample(s, x0, y0, log2_cb_size); if (s->sps->pcm.loop_filter_disable_flag) set_deblocking_bypass(s, x0, y0, log2_cb_size); if (ret < 0) return ret; } else { intra_prediction_unit(s, x0, y0, log2_cb_size); } } else { intra_prediction_unit_default_value(s, x0, y0, log2_cb_size); switch (lc->cu.part_mode) { case PART_2Nx2N: hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0); break; case PART_2NxN: hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0); hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1); break; case PART_Nx2N: hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0); hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1); break; case PART_2NxnU: hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0); hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1); break; case PART_2NxnD: hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0); hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1); break; case PART_nLx2N: hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0); hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1); break; case PART_nRx2N: hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0); hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1); break; case PART_NxN: hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0); hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1); hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2); hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3); break; } } if (!lc->cu.pcm_flag) { if (lc->cu.pred_mode != MODE_INTRA && !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) { lc->cu.rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s); } if (lc->cu.rqt_root_cbf) { lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ? s->sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag : s->sps->max_transform_hierarchy_depth_inter; hls_transform_tree(s, x0, y0, x0, y0, x0, y0, log2_cb_size, log2_cb_size, 0, 0); } else { if (!s->sh.disable_deblocking_filter_flag) ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size, lc->slice_or_tiles_up_boundary, lc->slice_or_tiles_left_boundary); } } } if (s->pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0) ff_hevc_set_qPy(s, x0, y0, x0, y0, log2_cb_size); x = y_cb * min_cb_width + x_cb; for (y = 0; y < length; y++) { memset(&s->qp_y_tab[x], lc->qp_y, length); x += min_cb_width; } set_ct_depth(s, x0, y0, log2_cb_size, lc->ct.depth); return 0; }
20,133
0
static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf) { AVCodecContext *s = ist->st->codec; FrameBuffer *buf = av_mallocz(sizeof(*buf)); int ret; const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1; int h_chroma_shift, v_chroma_shift; int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1 int w = s->width, h = s->height; if (!buf) return AVERROR(ENOMEM); if (!(s->flags & CODEC_FLAG_EMU_EDGE)) { w += 2*edge; h += 2*edge; } avcodec_align_dimensions(s, &w, &h); if ((ret = av_image_alloc(buf->base, buf->linesize, w, h, s->pix_fmt, 32)) < 0) { av_freep(&buf); return ret; } /* XXX this shouldn't be needed, but some tests break without this line * those decoders are buggy and need to be fixed. * the following tests fail: * bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit */ memset(buf->base[0], 128, ret); avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) { const int h_shift = i==0 ? 0 : h_chroma_shift; const int v_shift = i==0 ? 0 : v_chroma_shift; if (s->flags & CODEC_FLAG_EMU_EDGE) buf->data[i] = buf->base[i]; else buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*edge >> v_shift) + (pixel_size*edge >> h_shift), 32); } buf->w = s->width; buf->h = s->height; buf->pix_fmt = s->pix_fmt; buf->ist = ist; *pbuf = buf; return 0; }
20,134
0
static av_cold int tqi_decode_init(AVCodecContext *avctx) { TqiContext *t = avctx->priv_data; ff_blockdsp_init(&t->bdsp, avctx); ff_bswapdsp_init(&t->bsdsp); ff_idctdsp_init(&t->idsp, avctx); ff_init_scantable_permutation(t->idsp.idct_permutation, FF_IDCT_PERM_NONE); ff_init_scantable(t->idsp.idct_permutation, &t->intra_scantable, ff_zigzag_direct); avctx->framerate = (AVRational){ 15, 1 }; avctx->pix_fmt = AV_PIX_FMT_YUV420P; ff_mpeg12_init_vlcs(); return 0; }
20,135
0
static int pred_weight_table(H264Context *h) { int list, i; int luma_def, chroma_def; h->use_weight = 0; h->use_weight_chroma = 0; h->luma_log2_weight_denom = get_ue_golomb(&h->gb); if (h->sps.chroma_format_idc) h->chroma_log2_weight_denom = get_ue_golomb(&h->gb); luma_def = 1 << h->luma_log2_weight_denom; chroma_def = 1 << h->chroma_log2_weight_denom; for (list = 0; list < 2; list++) { h->luma_weight_flag[list] = 0; h->chroma_weight_flag[list] = 0; for (i = 0; i < h->ref_count[list]; i++) { int luma_weight_flag, chroma_weight_flag; luma_weight_flag = get_bits1(&h->gb); if (luma_weight_flag) { h->luma_weight[i][list][0] = get_se_golomb(&h->gb); h->luma_weight[i][list][1] = get_se_golomb(&h->gb); if (h->luma_weight[i][list][0] != luma_def || h->luma_weight[i][list][1] != 0) { h->use_weight = 1; h->luma_weight_flag[list] = 1; } } else { h->luma_weight[i][list][0] = luma_def; h->luma_weight[i][list][1] = 0; } if (h->sps.chroma_format_idc) { chroma_weight_flag = get_bits1(&h->gb); if (chroma_weight_flag) { int j; for (j = 0; j < 2; j++) { h->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb); h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb); if (h->chroma_weight[i][list][j][0] != chroma_def || h->chroma_weight[i][list][j][1] != 0) { h->use_weight_chroma = 1; h->chroma_weight_flag[list] = 1; } } } else { int j; for (j = 0; j < 2; j++) { h->chroma_weight[i][list][j][0] = chroma_def; h->chroma_weight[i][list][j][1] = 0; } } } } if (h->slice_type_nos != AV_PICTURE_TYPE_B) break; } h->use_weight = h->use_weight || h->use_weight_chroma; return 0; }
20,137
0
static void rtl8139_cplus_transmit(RTL8139State *s) { int txcount = 0; while (rtl8139_cplus_transmit_one(s)) { ++txcount; } /* Mark transfer completed */ if (!txcount) { DPRINTF("C+ mode : transmitter queue stalled, current TxDesc = %d\n", s->currCPlusTxDesc); } else { /* update interrupt status */ s->IntrStatus |= TxOK; rtl8139_update_irq(s); } }
20,139
0
static int local_utimensat(FsContext *s, V9fsPath *fs_path, const struct timespec *buf) { char buffer[PATH_MAX]; char *path = fs_path->data; return qemu_utimens(rpath(s, path, buffer), buf); }
20,140
0
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags, CPUBreakpoint **breakpoint) { #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; bp = qemu_malloc(sizeof(*bp)); bp->pc = pc; bp->flags = flags; /* keep all GDB-injected breakpoints in front */ if (flags & BP_GDB) TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); else TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); breakpoint_invalidate(env, pc); if (breakpoint) *breakpoint = bp; return 0; #else return -ENOSYS; #endif }
20,141
0
static void id3v2_read_internal(AVIOContext *pb, AVDictionary **metadata, AVFormatContext *s, const char *magic, ID3v2ExtraMeta **extra_meta) { int len, ret; uint8_t buf[ID3v2_HEADER_SIZE]; int found_header; int64_t off; do { /* save the current offset in case there's nothing to read/skip */ off = avio_tell(pb); ret = avio_read(pb, buf, ID3v2_HEADER_SIZE); if (ret != ID3v2_HEADER_SIZE) { avio_seek(pb, off, SEEK_SET); break; } found_header = ff_id3v2_match(buf, magic); if (found_header) { /* parse ID3v2 header */ len = ((buf[6] & 0x7f) << 21) | ((buf[7] & 0x7f) << 14) | ((buf[8] & 0x7f) << 7) | (buf[9] & 0x7f); id3v2_parse(pb, metadata, s, len, buf[3], buf[5], extra_meta); } else { avio_seek(pb, off, SEEK_SET); } } while (found_header); ff_metadata_conv(metadata, NULL, ff_id3v2_34_metadata_conv); ff_metadata_conv(metadata, NULL, id3v2_2_metadata_conv); ff_metadata_conv(metadata, NULL, ff_id3v2_4_metadata_conv); merge_date(metadata); }
20,142
0
static always_inline void gen_fbcond (DisasContext *ctx, void* func, int ra, int32_t disp16) { int l1, l2; TCGv tmp; l1 = gen_new_label(); l2 = gen_new_label(); if (ra != 31) { tmp = tcg_temp_new(TCG_TYPE_I64); tcg_gen_helper_1_1(func, tmp, cpu_fir[ra]); } else { tmp = tcg_const_i64(0); tcg_gen_helper_1_1(func, tmp, tmp); } tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0, l1); tcg_gen_movi_i64(cpu_pc, ctx->pc); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_i64(cpu_pc, ctx->pc + (int64_t)(disp16 << 2)); gen_set_label(l2); }
20,143
0
static int64_t coroutine_fn iscsi_co_get_block_status(BlockDriverState *bs, int64_t sector_num, int nb_sectors, int *pnum, BlockDriverState **file) { IscsiLun *iscsilun = bs->opaque; struct scsi_get_lba_status *lbas = NULL; struct scsi_lba_status_descriptor *lbasd = NULL; struct IscsiTask iTask; int64_t ret; iscsi_co_init_iscsitask(iscsilun, &iTask); if (!is_sector_request_lun_aligned(sector_num, nb_sectors, iscsilun)) { ret = -EINVAL; goto out; } /* default to all sectors allocated */ ret = BDRV_BLOCK_DATA; ret |= (sector_num << BDRV_SECTOR_BITS) | BDRV_BLOCK_OFFSET_VALID; *pnum = nb_sectors; /* LUN does not support logical block provisioning */ if (!iscsilun->lbpme) { goto out; } retry: if (iscsi_get_lba_status_task(iscsilun->iscsi, iscsilun->lun, sector_qemu2lun(sector_num, iscsilun), 8 + 16, iscsi_co_generic_cb, &iTask) == NULL) { ret = -ENOMEM; goto out; } while (!iTask.complete) { iscsi_set_events(iscsilun); qemu_coroutine_yield(); } if (iTask.do_retry) { if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } iTask.complete = 0; goto retry; } if (iTask.status != SCSI_STATUS_GOOD) { /* in case the get_lba_status_callout fails (i.e. * because the device is busy or the cmd is not * supported) we pretend all blocks are allocated * for backwards compatibility */ goto out; } lbas = scsi_datain_unmarshall(iTask.task); if (lbas == NULL) { ret = -EIO; goto out; } lbasd = &lbas->descriptors[0]; if (sector_qemu2lun(sector_num, iscsilun) != lbasd->lba) { ret = -EIO; goto out; } *pnum = sector_lun2qemu(lbasd->num_blocks, iscsilun); if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED || lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) { ret &= ~BDRV_BLOCK_DATA; if (iscsilun->lbprz) { ret |= BDRV_BLOCK_ZERO; } } if (ret & BDRV_BLOCK_ZERO) { iscsi_allocationmap_clear(iscsilun, sector_num, *pnum); } else { iscsi_allocationmap_set(iscsilun, sector_num, *pnum); } if (*pnum > nb_sectors) { *pnum = nb_sectors; } out: if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); } if (ret > 0 && ret & BDRV_BLOCK_OFFSET_VALID) { *file = bs; } return ret; }
20,145
0
static void vnc_tight_stop(VncState *vs) { // switch back to normal output/zlib buffers vs->tight = vs->output; vs->output = vs->tight_tmp; }
20,146
0
void aio_set_fd_handler(AioContext *ctx, int fd, bool is_external, IOHandler *io_read, IOHandler *io_write, AioPollFn *io_poll, void *opaque) { abort(); }
20,147
0
int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, uint32_t vaddr, int is_write, int mmu_idx, uint32_t *paddr, uint32_t *page_size, unsigned *access) { if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { return get_physical_addr_mmu(env, update_tlb, vaddr, is_write, mmu_idx, paddr, page_size, access, true); } else if (xtensa_option_bits_enabled(env->config, XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { return get_physical_addr_region(env, vaddr, is_write, mmu_idx, paddr, page_size, access); } else { *paddr = vaddr; *page_size = TARGET_PAGE_SIZE; *access = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS; return 0; } }
20,149
0
static int usb_msd_handle_control(USBDevice *dev, int request, int value, int index, int length, uint8_t *data) { MSDState *s = (MSDState *)dev; int ret; ret = usb_desc_handle_control(dev, request, value, index, length, data); if (ret >= 0) { return ret; } ret = 0; switch (request) { case DeviceRequest | USB_REQ_GET_STATUS: data[0] = (1 << USB_DEVICE_SELF_POWERED) | (dev->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP); data[1] = 0x00; ret = 2; break; case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { dev->remote_wakeup = 0; } else { goto fail; } ret = 0; break; case DeviceOutRequest | USB_REQ_SET_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { dev->remote_wakeup = 1; } else { goto fail; } ret = 0; break; case DeviceRequest | USB_REQ_GET_CONFIGURATION: data[0] = 1; ret = 1; break; case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: ret = 0; break; case DeviceRequest | USB_REQ_GET_INTERFACE: data[0] = 0; ret = 1; break; case DeviceOutRequest | USB_REQ_SET_INTERFACE: ret = 0; break; case EndpointOutRequest | USB_REQ_CLEAR_FEATURE: ret = 0; break; case InterfaceOutRequest | USB_REQ_SET_INTERFACE: ret = 0; break; /* Class specific requests. */ case ClassInterfaceOutRequest | MassStorageReset: /* Reset state ready for the next CBW. */ s->mode = USB_MSDM_CBW; ret = 0; break; case ClassInterfaceRequest | GetMaxLun: data[0] = 0; ret = 1; break; default: fail: ret = USB_RET_STALL; break; } return ret; }
20,150
0
static int tftp_send_oack(struct tftp_session *spt, const char *keys[], uint32_t values[], int nb, struct tftp_t *recv_tp) { struct sockaddr_in saddr, daddr; struct mbuf *m; struct tftp_t *tp; int i, n = 0; m = m_get(spt->slirp); if (!m) return -1; memset(m->m_data, 0, m->m_size); m->m_data += IF_MAXLINKHDR; tp = (void *)m->m_data; m->m_data += sizeof(struct udpiphdr); tp->tp_op = htons(TFTP_OACK); for (i = 0; i < nb; i++) { n += snprintf(tp->x.tp_buf + n, sizeof(tp->x.tp_buf) - n, "%s", keys[i]) + 1; n += snprintf(tp->x.tp_buf + n, sizeof(tp->x.tp_buf) - n, "%u", values[i]) + 1; } saddr.sin_addr = recv_tp->ip.ip_dst; saddr.sin_port = recv_tp->udp.uh_dport; daddr.sin_addr = spt->client_ip; daddr.sin_port = spt->client_port; m->m_len = sizeof(struct tftp_t) - 514 + n - sizeof(struct ip) - sizeof(struct udphdr); udp_output2(NULL, m, &saddr, &daddr, IPTOS_LOWDELAY); return 0; }
20,152
0
void check_audio_video_inputs(int *has_video_ptr, int *has_audio_ptr) { int has_video, has_audio, i, j; AVFormatContext *ic; has_video = 0; has_audio = 0; for(j=0;j<nb_input_files;j++) { ic = input_files[j]; for(i=0;i<ic->nb_streams;i++) { AVCodecContext *enc = &ic->streams[i]->codec; switch(enc->codec_type) { case CODEC_TYPE_AUDIO: has_audio = 1; break; case CODEC_TYPE_VIDEO: has_video = 1; break; default: abort(); } } } *has_video_ptr = has_video; *has_audio_ptr = has_audio; }
20,153
0
static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) { BDRVQcow2State *s = bs->opaque; int l2_index; uint64_t *l2_table; uint64_t entry; unsigned int nb_clusters; int ret; uint64_t alloc_cluster_offset; trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, *bytes); assert(*bytes > 0); /* * Calculate the number of clusters to look for. We stop at L2 table * boundaries to keep things simple. */ nb_clusters = size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); l2_index = offset_to_l2_index(s, guest_offset); nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); /* Find L2 entry for the first involved cluster */ ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); if (ret < 0) { return ret; } entry = be64_to_cpu(l2_table[l2_index]); /* For the moment, overwrite compressed clusters one by one */ if (entry & QCOW_OFLAG_COMPRESSED) { nb_clusters = 1; } else { nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); } /* This function is only called when there were no non-COW clusters, so if * we can't find any unallocated or COW clusters either, something is * wrong with our code. */ assert(nb_clusters > 0); qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); /* Allocate, if necessary at a given offset in the image file */ alloc_cluster_offset = start_of_cluster(s, *host_offset); ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, &nb_clusters); if (ret < 0) { goto fail; } /* Can't extend contiguous allocation */ if (nb_clusters == 0) { *bytes = 0; return 0; } /* !*host_offset would overwrite the image header and is reserved for "no * host offset preferred". If 0 was a valid host offset, it'd trigger the * following overlap check; do that now to avoid having an invalid value in * *host_offset. */ if (!alloc_cluster_offset) { ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, nb_clusters * s->cluster_size); assert(ret < 0); goto fail; } /* * Save info needed for meta data update. * * requested_sectors: Number of sectors from the start of the first * newly allocated cluster to the end of the (possibly shortened * before) write request. * * avail_sectors: Number of sectors from the start of the first * newly allocated to the end of the last newly allocated cluster. * * nb_sectors: The number of sectors from the start of the first * newly allocated cluster to the end of the area that the write * request actually writes to (excluding COW at the end) */ int requested_sectors = (*bytes + offset_into_cluster(s, guest_offset)) >> BDRV_SECTOR_BITS; int avail_sectors = nb_clusters << (s->cluster_bits - BDRV_SECTOR_BITS); int alloc_n_start = offset_into_cluster(s, guest_offset) >> BDRV_SECTOR_BITS; int nb_sectors = MIN(requested_sectors, avail_sectors); QCowL2Meta *old_m = *m; *m = g_malloc0(sizeof(**m)); **m = (QCowL2Meta) { .next = old_m, .alloc_offset = alloc_cluster_offset, .offset = start_of_cluster(s, guest_offset), .nb_clusters = nb_clusters, .nb_available = nb_sectors, .cow_start = { .offset = 0, .nb_sectors = alloc_n_start, }, .cow_end = { .offset = nb_sectors * BDRV_SECTOR_SIZE, .nb_sectors = avail_sectors - nb_sectors, }, }; qemu_co_queue_init(&(*m)->dependent_requests); QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) - offset_into_cluster(s, guest_offset)); assert(*bytes != 0); return 1; fail: if (*m && (*m)->nb_clusters > 0) { QLIST_REMOVE(*m, next_in_flight); } return ret; }
20,154
0
static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, const char *model, const char *name, const char *ifname, const char *script, const char *downscript, const char *vhostfdname, int vnet_hdr, int fd, Error **errp) { Error *err = NULL; TAPState *s = net_tap_fd_init(peer, model, name, fd, vnet_hdr); int vhostfd; tap_set_sndbuf(s->fd, tap, &err); if (err) { error_propagate(errp, err); return; } if (tap->has_fd || tap->has_fds) { snprintf(s->nc.info_str, sizeof(s->nc.info_str), "fd=%d", fd); } else if (tap->has_helper) { snprintf(s->nc.info_str, sizeof(s->nc.info_str), "helper=%s", tap->helper); } else { snprintf(s->nc.info_str, sizeof(s->nc.info_str), "ifname=%s,script=%s,downscript=%s", ifname, script, downscript); if (strcmp(downscript, "no") != 0) { snprintf(s->down_script, sizeof(s->down_script), "%s", downscript); snprintf(s->down_script_arg, sizeof(s->down_script_arg), "%s", ifname); } } if (tap->has_vhost ? tap->vhost : vhostfdname || (tap->has_vhostforce && tap->vhostforce)) { VhostNetOptions options; options.backend_type = VHOST_BACKEND_TYPE_KERNEL; options.net_backend = &s->nc; if (tap->has_vhostfd || tap->has_vhostfds) { vhostfd = monitor_fd_param(cur_mon, vhostfdname, &err); if (vhostfd == -1) { error_propagate(errp, err); return; } } else { vhostfd = open("/dev/vhost-net", O_RDWR); if (vhostfd < 0) { error_setg_errno(errp, errno, "tap: open vhost char device failed"); return; } } options.opaque = (void *)(uintptr_t)vhostfd; s->vhost_net = vhost_net_init(&options); if (!s->vhost_net) { error_setg(errp, "vhost-net requested but could not be initialized"); return; } } else if (tap->has_vhostfd || tap->has_vhostfds) { error_setg(errp, "vhostfd= is not valid without vhost"); } }
20,155
0
static int virtio_balloon_init_pci(PCIDevice *pci_dev) { VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev); VirtIODevice *vdev; vdev = virtio_balloon_init(&pci_dev->qdev); virtio_init_pci(proxy, vdev, PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_DEVICE_ID_VIRTIO_BALLOON, PCI_CLASS_MEMORY_RAM, 0x00); return 0; }
20,156
0
static void bochs_refresh_limits(BlockDriverState *bs, Error **errp) { bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */ }
20,157
0
static void spr_write_sdr1 (void *opaque, int sprn) { DisasContext *ctx = opaque; gen_op_store_sdr1(); RET_STOP(ctx); }
20,159
0
static int uart_can_receive(void *opaque) { UartState *s = (UartState *)opaque; return RX_FIFO_SIZE - s->rx_count; }
20,160
0
vmdk_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVVmdkState *s = bs->opaque; int ret; uint64_t n_bytes, offset_in_cluster; VmdkExtent *extent = NULL; QEMUIOVector local_qiov; uint64_t cluster_offset; uint64_t bytes_done = 0; qemu_iovec_init(&local_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (bytes > 0) { extent = find_extent(s, offset >> BDRV_SECTOR_BITS, extent); if (!extent) { ret = -EIO; goto fail; } ret = get_cluster_offset(bs, extent, NULL, offset, false, &cluster_offset, 0, 0); offset_in_cluster = vmdk_find_offset_in_cluster(extent, offset); n_bytes = MIN(bytes, extent->cluster_sectors * BDRV_SECTOR_SIZE - offset_in_cluster); if (ret != VMDK_OK) { /* if not allocated, try to read from parent image, if exist */ if (bs->backing && ret != VMDK_ZEROED) { if (!vmdk_is_cid_valid(bs)) { ret = -EINVAL; goto fail; } qemu_iovec_reset(&local_qiov); qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes); ret = bdrv_co_preadv(bs->backing->bs, offset, n_bytes, &local_qiov, 0); if (ret < 0) { goto fail; } } else { qemu_iovec_memset(qiov, bytes_done, 0, n_bytes); } } else { qemu_iovec_reset(&local_qiov); qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes); ret = vmdk_read_extent(extent, cluster_offset, offset_in_cluster, &local_qiov, n_bytes); if (ret) { goto fail; } } bytes -= n_bytes; offset += n_bytes; bytes_done += n_bytes; } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&local_qiov); return ret; }
20,161
0
void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value) { PowerPCCPU *cpu = ppc_env_get_cpu(env); _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value, 0); }
20,162
0
static uint32_t syborg_virtio_readl(void *opaque, target_phys_addr_t offset) { SyborgVirtIOProxy *s = opaque; VirtIODevice *vdev = s->vdev; uint32_t ret; DPRINTF("readl 0x%x\n", (int)offset); if (offset >= SYBORG_VIRTIO_CONFIG) { return virtio_config_readl(vdev, offset - SYBORG_VIRTIO_CONFIG); } switch(offset >> 2) { case SYBORG_VIRTIO_ID: ret = SYBORG_ID_VIRTIO; break; case SYBORG_VIRTIO_DEVTYPE: ret = s->id; break; case SYBORG_VIRTIO_HOST_FEATURES: ret = vdev->get_features(vdev); ret |= vdev->binding->get_features(s); break; case SYBORG_VIRTIO_GUEST_FEATURES: ret = vdev->guest_features; break; case SYBORG_VIRTIO_QUEUE_BASE: ret = virtio_queue_get_addr(vdev, vdev->queue_sel); break; case SYBORG_VIRTIO_QUEUE_NUM: ret = virtio_queue_get_num(vdev, vdev->queue_sel); break; case SYBORG_VIRTIO_QUEUE_SEL: ret = vdev->queue_sel; break; case SYBORG_VIRTIO_STATUS: ret = vdev->status; break; case SYBORG_VIRTIO_INT_ENABLE: ret = s->int_enable; break; case SYBORG_VIRTIO_INT_STATUS: ret = vdev->isr; break; default: BADF("Bad read offset 0x%x\n", (int)offset); return 0; } return ret; }
20,163
1
static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq) { uint16_t old, new; bool v; /* Always notify when queue is empty (when feature acknowledge) */ if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) && !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) { return true; } if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) { return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); } v = vq->signalled_used_valid; vq->signalled_used_valid = true; old = vq->signalled_used; new = vq->signalled_used = vring_used_idx(vq); return !v || vring_need_event(vring_used_event(vq), new, old); }
20,164
1
static void guess_mv(ERContext *s) { uint8_t *fixed = s->er_temp_buffer; #define MV_FROZEN 3 #define MV_CHANGED 2 #define MV_UNCHANGED 1 const int mb_stride = s->mb_stride; const int mb_width = s->mb_width; const int mb_height = s->mb_height; int i, depth, num_avail; int mb_x, mb_y, mot_step, mot_stride; set_mv_strides(s, &mot_step, &mot_stride); num_avail = 0; for (i = 0; i < s->mb_num; i++) { const int mb_xy = s->mb_index2xy[i]; int f = 0; int error = s->error_status_table[mb_xy]; if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) f = MV_FROZEN; // intra // FIXME check if (!(error & ER_MV_ERROR)) f = MV_FROZEN; // inter with undamaged MV fixed[mb_xy] = f; if (f == MV_FROZEN) num_avail++; } if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) || num_avail <= mb_width / 2) { for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD; if (IS_INTRA(s->cur_pic.mb_type[mb_xy])) continue; if (!(s->error_status_table[mb_xy] & ER_MV_ERROR)) continue; s->mv[0][0][0] = 0; s->mv[0][0][1] = 0; s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); } } return; } for (depth = 0; ; depth++) { int changed, pass, none_left; none_left = 1; changed = 1; for (pass = 0; (changed || pass < 2) && pass < 10; pass++) { int mb_x, mb_y; int score_sum = 0; changed = 0; for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int mb_xy = mb_x + mb_y * s->mb_stride; int mv_predictor[8][2] = { { 0 } }; int ref[8] = { 0 }; int pred_count = 0; int j; int best_score = 256 * 256 * 256 * 64; int best_pred = 0; const int mot_index = (mb_x + mb_y * mot_stride) * mot_step; int prev_x, prev_y, prev_ref; if ((mb_x ^ mb_y ^ pass) & 1) continue; if (fixed[mb_xy] == MV_FROZEN) continue; assert(!IS_INTRA(s->cur_pic.mb_type[mb_xy])); assert(s->last_pic && s->last_pic.f->data[0]); j = 0; if (mb_x > 0 && fixed[mb_xy - 1] == MV_FROZEN) j = 1; if (mb_x + 1 < mb_width && fixed[mb_xy + 1] == MV_FROZEN) j = 1; if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_FROZEN) j = 1; if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_FROZEN) j = 1; if (j == 0) continue; j = 0; if (mb_x > 0 && fixed[mb_xy - 1 ] == MV_CHANGED) j = 1; if (mb_x + 1 < mb_width && fixed[mb_xy + 1 ] == MV_CHANGED) j = 1; if (mb_y > 0 && fixed[mb_xy - mb_stride] == MV_CHANGED) j = 1; if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] == MV_CHANGED) j = 1; if (j == 0 && pass > 1) continue; none_left = 0; if (mb_x > 0 && fixed[mb_xy - 1]) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index - mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index - mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy - 1)]; pred_count++; } if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index + mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index + mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy + 1)]; pred_count++; } if (mb_y > 0 && fixed[mb_xy - mb_stride]) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)]; pred_count++; } if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride]) { mv_predictor[pred_count][0] = s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0]; mv_predictor[pred_count][1] = s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1]; ref[pred_count] = s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)]; pred_count++; } if (pred_count == 0) continue; if (pred_count > 1) { int sum_x = 0, sum_y = 0, sum_r = 0; int max_x, max_y, min_x, min_y, max_r, min_r; for (j = 0; j < pred_count; j++) { sum_x += mv_predictor[j][0]; sum_y += mv_predictor[j][1]; sum_r += ref[j]; if (j && ref[j] != ref[j - 1]) goto skip_mean_and_median; } /* mean */ mv_predictor[pred_count][0] = sum_x / j; mv_predictor[pred_count][1] = sum_y / j; ref[pred_count] = sum_r / j; /* median */ if (pred_count >= 3) { min_y = min_x = min_r = 99999; max_y = max_x = max_r = -99999; } else { min_x = min_y = max_x = max_y = min_r = max_r = 0; } for (j = 0; j < pred_count; j++) { max_x = FFMAX(max_x, mv_predictor[j][0]); max_y = FFMAX(max_y, mv_predictor[j][1]); max_r = FFMAX(max_r, ref[j]); min_x = FFMIN(min_x, mv_predictor[j][0]); min_y = FFMIN(min_y, mv_predictor[j][1]); min_r = FFMIN(min_r, ref[j]); } mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x; mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y; ref[pred_count + 1] = sum_r - max_r - min_r; if (pred_count == 4) { mv_predictor[pred_count + 1][0] /= 2; mv_predictor[pred_count + 1][1] /= 2; ref[pred_count + 1] /= 2; } pred_count += 2; } skip_mean_and_median: /* zero MV */ pred_count++; if (!fixed[mb_xy]) { if (s->avctx->codec_id == AV_CODEC_ID_H264) { // FIXME } else { ff_thread_await_progress(s->last_pic.tf, mb_y, 0); } if (!s->last_pic.motion_val[0] || !s->last_pic.ref_index[0]) goto skip_last_mv; prev_x = s->last_pic.motion_val[0][mot_index][0]; prev_y = s->last_pic.motion_val[0][mot_index][1]; prev_ref = s->last_pic.ref_index[0][4 * mb_xy]; } else { prev_x = s->cur_pic.motion_val[0][mot_index][0]; prev_y = s->cur_pic.motion_val[0][mot_index][1]; prev_ref = s->cur_pic.ref_index[0][4 * mb_xy]; } /* last MV */ mv_predictor[pred_count][0] = prev_x; mv_predictor[pred_count][1] = prev_y; ref[pred_count] = prev_ref; pred_count++; skip_last_mv: for (j = 0; j < pred_count; j++) { int *linesize = s->cur_pic.f->linesize; int score = 0; uint8_t *src = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0]; s->cur_pic.motion_val[0][mot_index][0] = s->mv[0][0][0] = mv_predictor[j][0]; s->cur_pic.motion_val[0][mot_index][1] = s->mv[0][0][1] = mv_predictor[j][1]; // predictor intra or otherwise not available if (ref[j] < 0) continue; s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); if (mb_x > 0 && fixed[mb_xy - 1]) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k * linesize[0] - 1] - src[k * linesize[0]]); } if (mb_x + 1 < mb_width && fixed[mb_xy + 1]) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k * linesize[0] + 15] - src[k * linesize[0] + 16]); } if (mb_y > 0 && fixed[mb_xy - mb_stride]) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k - linesize[0]] - src[k]); } if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride]) { int k; for (k = 0; k < 16; k++) score += FFABS(src[k + linesize[0] * 15] - src[k + linesize[0] * 16]); } if (score <= best_score) { // <= will favor the last MV best_score = score; best_pred = j; } } score_sum += best_score; s->mv[0][0][0] = mv_predictor[best_pred][0]; s->mv[0][0][1] = mv_predictor[best_pred][1]; for (i = 0; i < mot_step; i++) for (j = 0; j < mot_step; j++) { s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0]; s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1]; } s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD, MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0); if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) { fixed[mb_xy] = MV_CHANGED; changed++; } else fixed[mb_xy] = MV_UNCHANGED; } } } if (none_left) return; for (i = 0; i < s->mb_num; i++) { int mb_xy = s->mb_index2xy[i]; if (fixed[mb_xy]) fixed[mb_xy] = MV_FROZEN; } } }
20,165
1
void qtest_qmp_discard_response(QTestState *s, const char *fmt, ...) { va_list ap; va_start(ap, fmt); qtest_qmpv_discard_response(s, fmt, ap); va_end(ap); }
20,166
1
static MTPData *usb_mtp_get_partial_object(MTPState *s, MTPControl *c, MTPObject *o) { MTPData *d = usb_mtp_data_alloc(c); off_t offset; trace_usb_mtp_op_get_partial_object(s->dev.addr, o->handle, o->path, c->argv[1], c->argv[2]); d->fd = open(o->path, O_RDONLY); if (d->fd == -1) { return NULL; } offset = c->argv[1]; if (offset > o->stat.st_size) { offset = o->stat.st_size; } lseek(d->fd, offset, SEEK_SET); d->length = c->argv[2]; if (d->length > o->stat.st_size - offset) { d->length = o->stat.st_size - offset; } return d; }
20,167
1
static void set_int16(Object *obj, Visitor *v, void *opaque, const char *name, Error **errp) { DeviceState *dev = DEVICE(obj); Property *prop = opaque; int16_t *ptr = qdev_get_prop_ptr(dev, prop); Error *local_err = NULL; int64_t value; if (dev->state != DEV_STATE_CREATED) { error_set(errp, QERR_PERMISSION_DENIED); return; } visit_type_int(v, &value, name, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (value > prop->info->min && value <= prop->info->max) { *ptr = value; } else { error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, dev->id?:"", name, value, prop->info->min, prop->info->max); } }
20,168
0
static av_cold int nvenc_encode_init(AVCodecContext *avctx) { NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS encode_session_params = { 0 }; NV_ENC_PRESET_CONFIG preset_config = { 0 }; CUcontext cu_context_curr; CUresult cu_res; GUID encoder_preset = NV_ENC_PRESET_HQ_GUID; GUID codec; NVENCSTATUS nv_status = NV_ENC_SUCCESS; int surfaceCount = 0; int i, num_mbs; int isLL = 0; int res = 0; int dw, dh; NvencContext *ctx = avctx->priv_data; NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs; NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs; if (!nvenc_dyload_nvenc(avctx)) return AVERROR_EXTERNAL; avctx->coded_frame = av_frame_alloc(); if (!avctx->coded_frame) { res = AVERROR(ENOMEM); goto error; } ctx->last_dts = AV_NOPTS_VALUE; ctx->encode_config.version = NV_ENC_CONFIG_VER; ctx->init_encode_params.version = NV_ENC_INITIALIZE_PARAMS_VER; preset_config.version = NV_ENC_PRESET_CONFIG_VER; preset_config.presetCfg.version = NV_ENC_CONFIG_VER; encode_session_params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER; encode_session_params.apiVersion = NVENCAPI_VERSION; if (ctx->gpu >= dl_fn->nvenc_device_count) { av_log(avctx, AV_LOG_FATAL, "Requested GPU %d, but only %d GPUs are available!\n", ctx->gpu, dl_fn->nvenc_device_count); res = AVERROR(EINVAL); goto error; } ctx->cu_context = NULL; cu_res = dl_fn->cu_ctx_create(&ctx->cu_context, 0, dl_fn->nvenc_devices[ctx->gpu]); if (cu_res != CUDA_SUCCESS) { av_log(avctx, AV_LOG_FATAL, "Failed creating CUDA context for NVENC: 0x%x\n", (int)cu_res); res = AVERROR_EXTERNAL; goto error; } cu_res = dl_fn->cu_ctx_pop_current(&cu_context_curr); if (cu_res != CUDA_SUCCESS) { av_log(avctx, AV_LOG_FATAL, "Failed popping CUDA context: 0x%x\n", (int)cu_res); res = AVERROR_EXTERNAL; goto error; } encode_session_params.device = ctx->cu_context; encode_session_params.deviceType = NV_ENC_DEVICE_TYPE_CUDA; nv_status = p_nvenc->nvEncOpenEncodeSessionEx(&encode_session_params, &ctx->nvencoder); if (nv_status != NV_ENC_SUCCESS) { ctx->nvencoder = NULL; av_log(avctx, AV_LOG_FATAL, "OpenEncodeSessionEx failed: 0x%x - invalid license key?\n", (int)nv_status); res = AVERROR_EXTERNAL; goto error; } if (ctx->preset) { if (!strcmp(ctx->preset, "hp")) { encoder_preset = NV_ENC_PRESET_HP_GUID; } else if (!strcmp(ctx->preset, "hq")) { encoder_preset = NV_ENC_PRESET_HQ_GUID; } else if (!strcmp(ctx->preset, "bd")) { encoder_preset = NV_ENC_PRESET_BD_GUID; } else if (!strcmp(ctx->preset, "ll")) { encoder_preset = NV_ENC_PRESET_LOW_LATENCY_DEFAULT_GUID; isLL = 1; } else if (!strcmp(ctx->preset, "llhp")) { encoder_preset = NV_ENC_PRESET_LOW_LATENCY_HP_GUID; isLL = 1; } else if (!strcmp(ctx->preset, "llhq")) { encoder_preset = NV_ENC_PRESET_LOW_LATENCY_HQ_GUID; isLL = 1; } else if (!strcmp(ctx->preset, "default")) { encoder_preset = NV_ENC_PRESET_DEFAULT_GUID; } else { av_log(avctx, AV_LOG_FATAL, "Preset \"%s\" is unknown! Supported presets: hp, hq, bd, ll, llhp, llhq, default\n", ctx->preset); res = AVERROR(EINVAL); goto error; } } switch (avctx->codec->id) { case AV_CODEC_ID_H264: codec = NV_ENC_CODEC_H264_GUID; break; case AV_CODEC_ID_H265: codec = NV_ENC_CODEC_HEVC_GUID; break; default: av_log(avctx, AV_LOG_ERROR, "nvenc: Unknown codec name\n"); res = AVERROR(EINVAL); goto error; } nv_status = p_nvenc->nvEncGetEncodePresetConfig(ctx->nvencoder, codec, encoder_preset, &preset_config); if (nv_status != NV_ENC_SUCCESS) { av_log(avctx, AV_LOG_FATAL, "GetEncodePresetConfig failed: 0x%x\n", (int)nv_status); res = AVERROR_EXTERNAL; goto error; } ctx->init_encode_params.encodeGUID = codec; ctx->init_encode_params.encodeHeight = avctx->height; ctx->init_encode_params.encodeWidth = avctx->width; if (avctx->sample_aspect_ratio.num && avctx->sample_aspect_ratio.den && (avctx->sample_aspect_ratio.num != 1 || avctx->sample_aspect_ratio.num != 1)) { av_reduce(&dw, &dh, avctx->width * avctx->sample_aspect_ratio.num, avctx->height * avctx->sample_aspect_ratio.den, 1024 * 1024); ctx->init_encode_params.darHeight = dh; ctx->init_encode_params.darWidth = dw; } else { ctx->init_encode_params.darHeight = avctx->height; ctx->init_encode_params.darWidth = avctx->width; } // De-compensate for hardware, dubiously, trying to compensate for // playback at 704 pixel width. if (avctx->width == 720 && (avctx->height == 480 || avctx->height == 576)) { av_reduce(&dw, &dh, ctx->init_encode_params.darWidth * 44, ctx->init_encode_params.darHeight * 45, 1024 * 1024); ctx->init_encode_params.darHeight = dh; ctx->init_encode_params.darWidth = dw; } ctx->init_encode_params.frameRateNum = avctx->time_base.den; ctx->init_encode_params.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame; num_mbs = ((avctx->width + 15) >> 4) * ((avctx->height + 15) >> 4); ctx->max_surface_count = (num_mbs >= 8160) ? 32 : 48; ctx->init_encode_params.enableEncodeAsync = 0; ctx->init_encode_params.enablePTD = 1; ctx->init_encode_params.presetGUID = encoder_preset; ctx->init_encode_params.encodeConfig = &ctx->encode_config; memcpy(&ctx->encode_config, &preset_config.presetCfg, sizeof(ctx->encode_config)); ctx->encode_config.version = NV_ENC_CONFIG_VER; if (avctx->refs >= 0) { /* 0 means "let the hardware decide" */ switch (avctx->codec->id) { case AV_CODEC_ID_H264: ctx->encode_config.encodeCodecConfig.h264Config.maxNumRefFrames = avctx->refs; break; case AV_CODEC_ID_H265: ctx->encode_config.encodeCodecConfig.hevcConfig.maxNumRefFramesInDPB = avctx->refs; break; /* Earlier switch/case will return if unknown codec is passed. */ } } if (avctx->gop_size > 0) { if (avctx->max_b_frames >= 0) { /* 0 is intra-only, 1 is I/P only, 2 is one B Frame, 3 two B frames, and so on. */ ctx->encode_config.frameIntervalP = avctx->max_b_frames + 1; } ctx->encode_config.gopLength = avctx->gop_size; switch (avctx->codec->id) { case AV_CODEC_ID_H264: ctx->encode_config.encodeCodecConfig.h264Config.idrPeriod = avctx->gop_size; break; case AV_CODEC_ID_H265: ctx->encode_config.encodeCodecConfig.hevcConfig.idrPeriod = avctx->gop_size; break; /* Earlier switch/case will return if unknown codec is passed. */ } } else if (avctx->gop_size == 0) { ctx->encode_config.frameIntervalP = 0; ctx->encode_config.gopLength = 1; switch (avctx->codec->id) { case AV_CODEC_ID_H264: ctx->encode_config.encodeCodecConfig.h264Config.idrPeriod = 1; break; case AV_CODEC_ID_H265: ctx->encode_config.encodeCodecConfig.hevcConfig.idrPeriod = 1; break; /* Earlier switch/case will return if unknown codec is passed. */ } } /* when there're b frames, set dts offset */ if (ctx->encode_config.frameIntervalP >= 2) ctx->last_dts = -2; if (avctx->bit_rate > 0) ctx->encode_config.rcParams.averageBitRate = avctx->bit_rate; if (avctx->rc_max_rate > 0) ctx->encode_config.rcParams.maxBitRate = avctx->rc_max_rate; if (ctx->cbr) { if (!ctx->twopass) { ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR; } else if (ctx->twopass == 1 || isLL) { ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_2_PASS_QUALITY; if (avctx->codec->id == AV_CODEC_ID_H264) { ctx->encode_config.encodeCodecConfig.h264Config.adaptiveTransformMode = NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE; ctx->encode_config.encodeCodecConfig.h264Config.fmoMode = NV_ENC_H264_FMO_DISABLE; } if (!isLL) av_log(avctx, AV_LOG_WARNING, "Twopass mode is only known to work with low latency (ll, llhq, llhp) presets.\n"); } else { ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR; } } else if (avctx->global_quality > 0) { ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CONSTQP; ctx->encode_config.rcParams.constQP.qpInterB = avctx->global_quality; ctx->encode_config.rcParams.constQP.qpInterP = avctx->global_quality; ctx->encode_config.rcParams.constQP.qpIntra = avctx->global_quality; avctx->qmin = -1; avctx->qmax = -1; } else if (avctx->qmin >= 0 && avctx->qmax >= 0) { ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_VBR; ctx->encode_config.rcParams.enableMinQP = 1; ctx->encode_config.rcParams.enableMaxQP = 1; ctx->encode_config.rcParams.minQP.qpInterB = avctx->qmin; ctx->encode_config.rcParams.minQP.qpInterP = avctx->qmin; ctx->encode_config.rcParams.minQP.qpIntra = avctx->qmin; ctx->encode_config.rcParams.maxQP.qpInterB = avctx->qmax; ctx->encode_config.rcParams.maxQP.qpInterP = avctx->qmax; ctx->encode_config.rcParams.maxQP.qpIntra = avctx->qmax; } if (avctx->rc_buffer_size > 0) ctx->encode_config.rcParams.vbvBufferSize = avctx->rc_buffer_size; if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) { ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD; } else { ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME; } switch (avctx->codec->id) { case AV_CODEC_ID_H264: ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourDescriptionPresentFlag = 1; ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoSignalTypePresentFlag = 1; ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourMatrix = avctx->colorspace; ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourPrimaries = avctx->color_primaries; ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.transferCharacteristics = avctx->color_trc; ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoFullRangeFlag = avctx->color_range == AVCOL_RANGE_JPEG; ctx->encode_config.encodeCodecConfig.h264Config.disableSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0; ctx->encode_config.encodeCodecConfig.h264Config.repeatSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1; if (!ctx->profile) { switch (avctx->profile) { case FF_PROFILE_H264_BASELINE: ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_BASELINE_GUID; break; case FF_PROFILE_H264_MAIN: ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_MAIN_GUID; break; case FF_PROFILE_H264_HIGH: case FF_PROFILE_UNKNOWN: ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID; break; default: av_log(avctx, AV_LOG_WARNING, "Unsupported profile requested, falling back to high\n"); ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID; break; } } else { if (!strcmp(ctx->profile, "high")) { ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID; avctx->profile = FF_PROFILE_H264_HIGH; } else if (!strcmp(ctx->profile, "main")) { ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_MAIN_GUID; avctx->profile = FF_PROFILE_H264_MAIN; } else if (!strcmp(ctx->profile, "baseline")) { ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_BASELINE_GUID; avctx->profile = FF_PROFILE_H264_BASELINE; } else { av_log(avctx, AV_LOG_FATAL, "Profile \"%s\" is unknown! Supported profiles: high, main, baseline\n", ctx->profile); res = AVERROR(EINVAL); goto error; } } if (ctx->level) { res = input_string_to_uint32(avctx, nvenc_h264_level_pairs, ctx->level, &ctx->encode_config.encodeCodecConfig.h264Config.level); if (res) { av_log(avctx, AV_LOG_FATAL, "Level \"%s\" is unknown! Supported levels: auto, 1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1, 4.2, 5, 5.1\n", ctx->level); goto error; } } else { ctx->encode_config.encodeCodecConfig.h264Config.level = NV_ENC_LEVEL_AUTOSELECT; } break; case AV_CODEC_ID_H265: ctx->encode_config.encodeCodecConfig.hevcConfig.disableSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0; ctx->encode_config.encodeCodecConfig.hevcConfig.repeatSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1; /* No other profile is supported in the current SDK version 5 */ ctx->encode_config.profileGUID = NV_ENC_HEVC_PROFILE_MAIN_GUID; avctx->profile = FF_PROFILE_HEVC_MAIN; if (ctx->level) { res = input_string_to_uint32(avctx, nvenc_hevc_level_pairs, ctx->level, &ctx->encode_config.encodeCodecConfig.hevcConfig.level); if (res) { av_log(avctx, AV_LOG_FATAL, "Level \"%s\" is unknown! Supported levels: auto, 1, 2, 2.1, 3, 3.1, 4, 4.1, 5, 5.1, 5.2, 6, 6.1, 6.2\n", ctx->level); goto error; } } else { ctx->encode_config.encodeCodecConfig.hevcConfig.level = NV_ENC_LEVEL_AUTOSELECT; } if (ctx->tier) { if (!strcmp(ctx->tier, "main")) { ctx->encode_config.encodeCodecConfig.hevcConfig.tier = NV_ENC_TIER_HEVC_MAIN; } else if (!strcmp(ctx->tier, "high")) { ctx->encode_config.encodeCodecConfig.hevcConfig.tier = NV_ENC_TIER_HEVC_HIGH; } else { av_log(avctx, AV_LOG_FATAL, "Tier \"%s\" is unknown! Supported tiers: main, high\n", ctx->tier); res = AVERROR(EINVAL); goto error; } } break; /* Earlier switch/case will return if unknown codec is passed. */ } nv_status = p_nvenc->nvEncInitializeEncoder(ctx->nvencoder, &ctx->init_encode_params); if (nv_status != NV_ENC_SUCCESS) { av_log(avctx, AV_LOG_FATAL, "InitializeEncoder failed: 0x%x\n", (int)nv_status); res = AVERROR_EXTERNAL; goto error; } ctx->input_surfaces = av_malloc(ctx->max_surface_count * sizeof(*ctx->input_surfaces)); if (!ctx->input_surfaces) { res = AVERROR(ENOMEM); goto error; } ctx->output_surfaces = av_malloc(ctx->max_surface_count * sizeof(*ctx->output_surfaces)); if (!ctx->output_surfaces) { res = AVERROR(ENOMEM); goto error; } for (surfaceCount = 0; surfaceCount < ctx->max_surface_count; ++surfaceCount) { NV_ENC_CREATE_INPUT_BUFFER allocSurf = { 0 }; NV_ENC_CREATE_BITSTREAM_BUFFER allocOut = { 0 }; allocSurf.version = NV_ENC_CREATE_INPUT_BUFFER_VER; allocOut.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER; allocSurf.width = (avctx->width + 31) & ~31; allocSurf.height = (avctx->height + 31) & ~31; allocSurf.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_CACHED; switch (avctx->pix_fmt) { case AV_PIX_FMT_YUV420P: allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_YV12_PL; break; case AV_PIX_FMT_NV12: allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12_PL; break; case AV_PIX_FMT_YUV444P: allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_YUV444_PL; break; default: av_log(avctx, AV_LOG_FATAL, "Invalid input pixel format\n"); res = AVERROR(EINVAL); goto error; } nv_status = p_nvenc->nvEncCreateInputBuffer(ctx->nvencoder, &allocSurf); if (nv_status != NV_ENC_SUCCESS) { av_log(avctx, AV_LOG_FATAL, "CreateInputBuffer failed\n"); res = AVERROR_EXTERNAL; goto error; } ctx->input_surfaces[surfaceCount].lockCount = 0; ctx->input_surfaces[surfaceCount].input_surface = allocSurf.inputBuffer; ctx->input_surfaces[surfaceCount].format = allocSurf.bufferFmt; ctx->input_surfaces[surfaceCount].width = allocSurf.width; ctx->input_surfaces[surfaceCount].height = allocSurf.height; /* 1MB is large enough to hold most output frames. NVENC increases this automaticaly if it's not enough. */ allocOut.size = 1024 * 1024; allocOut.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_CACHED; nv_status = p_nvenc->nvEncCreateBitstreamBuffer(ctx->nvencoder, &allocOut); if (nv_status != NV_ENC_SUCCESS) { av_log(avctx, AV_LOG_FATAL, "CreateBitstreamBuffer failed\n"); ctx->output_surfaces[surfaceCount++].output_surface = NULL; res = AVERROR_EXTERNAL; goto error; } ctx->output_surfaces[surfaceCount].output_surface = allocOut.bitstreamBuffer; ctx->output_surfaces[surfaceCount].size = allocOut.size; ctx->output_surfaces[surfaceCount].busy = 0; } if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) { uint32_t outSize = 0; char tmpHeader[256]; NV_ENC_SEQUENCE_PARAM_PAYLOAD payload = { 0 }; payload.version = NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER; payload.spsppsBuffer = tmpHeader; payload.inBufferSize = sizeof(tmpHeader); payload.outSPSPPSPayloadSize = &outSize; nv_status = p_nvenc->nvEncGetSequenceParams(ctx->nvencoder, &payload); if (nv_status != NV_ENC_SUCCESS) { av_log(avctx, AV_LOG_FATAL, "GetSequenceParams failed\n"); goto error; } avctx->extradata_size = outSize; avctx->extradata = av_mallocz(outSize + FF_INPUT_BUFFER_PADDING_SIZE); if (!avctx->extradata) { res = AVERROR(ENOMEM); goto error; } memcpy(avctx->extradata, tmpHeader, outSize); } if (ctx->encode_config.frameIntervalP > 1) avctx->has_b_frames = 2; if (ctx->encode_config.rcParams.averageBitRate > 0) avctx->bit_rate = ctx->encode_config.rcParams.averageBitRate; return 0; error: for (i = 0; i < surfaceCount; ++i) { p_nvenc->nvEncDestroyInputBuffer(ctx->nvencoder, ctx->input_surfaces[i].input_surface); if (ctx->output_surfaces[i].output_surface) p_nvenc->nvEncDestroyBitstreamBuffer(ctx->nvencoder, ctx->output_surfaces[i].output_surface); } if (ctx->nvencoder) p_nvenc->nvEncDestroyEncoder(ctx->nvencoder); if (ctx->cu_context) dl_fn->cu_ctx_destroy(ctx->cu_context); av_frame_free(&avctx->coded_frame); nvenc_unload_nvenc(avctx); ctx->nvencoder = NULL; ctx->cu_context = NULL; return res; }
20,169
1
size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset, ram_addr_t offset, size_t size, int *bytes_sent) { if (f->ops->save_page) { int ret = f->ops->save_page(f, f->opaque, block_offset, offset, size, bytes_sent); if (ret != RAM_SAVE_CONTROL_DELAYED) { if (*bytes_sent > 0) { qemu_update_position(f, *bytes_sent); } else if (ret < 0) { qemu_file_set_error(f, ret); } } return ret; } return RAM_SAVE_CONTROL_NOT_SUPP; }
20,170
1
static int gif_read_packet(AVFormatContext * s1, AVPacket * pkt) { GifState *s = s1->priv_data; int ret; ret = gif_parse_next_image(s); if (ret < 0) return ret; /* XXX: avoid copying */ if (av_new_packet(pkt, s->screen_width * s->screen_height * 3)) { return AVERROR(EIO); } pkt->stream_index = 0; memcpy(pkt->data, s->image_buf, s->screen_width * s->screen_height * 3); return 0; }
20,171
1
static int decode_residuals(FLACContext *s, int32_t *decoded, int pred_order) { int i, tmp, partition, method_type, rice_order; int rice_bits, rice_esc; int samples; method_type = get_bits(&s->gb, 2); if (method_type > 1) { av_log(s->avctx, AV_LOG_ERROR, "illegal residual coding method %d\n", method_type); rice_order = get_bits(&s->gb, 4); samples= s->blocksize >> rice_order; if (pred_order > samples) { av_log(s->avctx, AV_LOG_ERROR, "invalid predictor order: %i > %i\n", pred_order, samples); rice_bits = 4 + method_type; rice_esc = (1 << rice_bits) - 1; decoded += pred_order; i= pred_order; for (partition = 0; partition < (1 << rice_order); partition++) { tmp = get_bits(&s->gb, rice_bits); if (tmp == rice_esc) { tmp = get_bits(&s->gb, 5); for (; i < samples; i++) *decoded++ = get_sbits_long(&s->gb, tmp); } else { for (; i < samples; i++) { *decoded++ = get_sr_golomb_flac(&s->gb, tmp, INT_MAX, 0); i= 0; return 0;
20,172
1
static XICSState *xics_system_init(int nr_servers, int nr_irqs) { XICSState *icp = NULL; if (kvm_enabled()) { QemuOpts *machine_opts = qemu_get_machine_opts(); bool irqchip_allowed = qemu_opt_get_bool(machine_opts, "kernel_irqchip", true); bool irqchip_required = qemu_opt_get_bool(machine_opts, "kernel_irqchip", false); Error *err = NULL; if (irqchip_allowed) { icp = try_create_xics(TYPE_KVM_XICS, nr_servers, nr_irqs, &err); } if (irqchip_required && !icp) { error_report("kernel_irqchip requested but unavailable: %s", error_get_pretty(err)); } } if (!icp) { icp = try_create_xics(TYPE_XICS, nr_servers, nr_irqs, &error_abort); } return icp; }
20,173
1
m48t59_t *m48t59_init_isa(uint32_t io_base, uint16_t size, int type) { M48t59ISAState *d; ISADevice *dev; m48t59_t *s; dev = isa_create("m48t59_isa"); qdev_prop_set_uint32(&dev->qdev, "type", type); qdev_prop_set_uint32(&dev->qdev, "size", size); qdev_prop_set_uint32(&dev->qdev, "io_base", io_base); qdev_init(&dev->qdev); d = DO_UPCAST(M48t59ISAState, busdev, dev); s = &d->state; if (io_base != 0) { register_ioport_read(io_base, 0x04, 1, NVRAM_readb, s); register_ioport_write(io_base, 0x04, 1, NVRAM_writeb, s); } return s; }
20,175
1
static void close_decoder(QSVContext *q) { QSVFrame *cur; if (q->session) MFXVideoDECODE_Close(q->session); while (q->async_fifo && av_fifo_size(q->async_fifo)) { QSVFrame *out_frame; mfxSyncPoint *sync; av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL); av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL); av_freep(&sync); } cur = q->work_frames; while (cur) { q->work_frames = cur->next; av_frame_free(&cur->frame); av_freep(&cur); cur = q->work_frames; } q->engine_ready = 0; q->reinit_pending = 0; }
20,176
1
static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, unsigned int width, unsigned int height, int lumStride, int chromStride, int srcStride) { unsigned y; const unsigned chromWidth= width>>1; for(y=0; y<height; y+=2) { #ifdef HAVE_MMX asm volatile( "xorl %%eax, %%eax \n\t" "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $8, %%mm7 \n\t" // FF,00,FF,00... ".balign 16 \n\t" "1: \n\t" PREFETCH" 64(%0, %%eax, 4) \n\t" "movq (%0, %%eax, 4), %%mm0 \n\t" // UYVY UYVY(0) "movq 8(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(4) "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0) "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4) "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0) "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4) "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0) "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4) "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0) "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0) MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t" "movq 16(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(8) "movq 24(%0, %%eax, 4), %%mm2 \n\t" // UYVY UYVY(12) "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8) "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12) "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8) "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12) "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8) "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12) "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8) "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8) MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t" "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0) "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8) "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0) "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8) "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0) "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8) "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0) "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0) MOVNTQ" %%mm0, (%3, %%eax) \n\t" MOVNTQ" %%mm2, (%2, %%eax) \n\t" "addl $8, %%eax \n\t" "cmpl %4, %%eax \n\t" " jb 1b \n\t" ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) : "memory", "%eax" ); ydst += lumStride; src += srcStride; asm volatile( "xorl %%eax, %%eax \n\t" ".balign 16 \n\t" "1: \n\t" PREFETCH" 64(%0, %%eax, 4) \n\t" "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0) "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4) "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8) "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12) "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0) "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4) "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8) "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12) "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0) "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8) MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t" MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t" "addl $8, %%eax \n\t" "cmpl %4, %%eax \n\t" " jb 1b \n\t" ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth) : "memory", "%eax" ); #else unsigned i; for(i=0; i<chromWidth; i++) { udst[i] = src[4*i+0]; ydst[2*i+0] = src[4*i+1]; vdst[i] = src[4*i+2]; ydst[2*i+1] = src[4*i+3]; } ydst += lumStride; src += srcStride; for(i=0; i<chromWidth; i++) { ydst[2*i+0] = src[4*i+1]; ydst[2*i+1] = src[4*i+3]; } #endif udst += chromStride; vdst += chromStride; ydst += lumStride; src += srcStride; } #ifdef HAVE_MMX asm volatile( EMMS" \n\t" SFENCE" \n\t" :::"memory"); #endif }
20,177
0
static inline int ape_decode_value_3860(APEContext *ctx, GetBitContext *gb, APERice *rice) { unsigned int x, overflow; overflow = get_unary(gb, 1, get_bits_left(gb)); if (ctx->fileversion > 3880) { while (overflow >= 16) { overflow -= 16; rice->k += 4; } } if (!rice->k) x = overflow; else x = (overflow << rice->k) + get_bits(gb, rice->k); rice->ksum += x - (rice->ksum + 8 >> 4); if (rice->ksum < (rice->k ? 1 << (rice->k + 4) : 0)) rice->k--; else if (rice->ksum >= (1 << (rice->k + 5)) && rice->k < 24) rice->k++; /* Convert to signed */ if (x & 1) return (x >> 1) + 1; else return -(x >> 1); }
20,178
1
int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb) { MpegEncContext *s = &ctx->m; unsigned startcode, v; int ret; int vol = 0; /* search next start code */ align_get_bits(gb); if (s->codec_tag == AV_RL32("WV1F") && show_bits(gb, 24) == 0x575630) { skip_bits(gb, 24); if (get_bits(gb, 8) == 0xF0) goto end; startcode = 0xff; for (;;) { if (get_bits_count(gb) >= gb->size_in_bits) { if (gb->size_in_bits == 8 && (ctx->divx_version >= 0 || ctx->xvid_build >= 0) || s->codec_tag == AV_RL32("QMP4")) { av_log(s->avctx, AV_LOG_VERBOSE, "frame skip %d\n", gb->size_in_bits); return FRAME_SKIPPED; // divx bug } else return -1; // end of stream /* use the bits after the test */ v = get_bits(gb, 8); startcode = ((startcode << 8) | v) & 0xffffffff; if ((startcode & 0xFFFFFF00) != 0x100) continue; // no startcode if (s->avctx->debug & FF_DEBUG_STARTCODE) { av_log(s->avctx, AV_LOG_DEBUG, "startcode: %3X ", startcode); if (startcode <= 0x11F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Start"); else if (startcode <= 0x12F) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Layer Start"); else if (startcode <= 0x13F) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode <= 0x15F) av_log(s->avctx, AV_LOG_DEBUG, "FGS bp start"); else if (startcode <= 0x1AF) av_log(s->avctx, AV_LOG_DEBUG, "Reserved"); else if (startcode == 0x1B0) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq Start"); else if (startcode == 0x1B1) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Seq End"); else if (startcode == 0x1B2) av_log(s->avctx, AV_LOG_DEBUG, "User Data"); else if (startcode == 0x1B3) av_log(s->avctx, AV_LOG_DEBUG, "Group of VOP start"); else if (startcode == 0x1B4) av_log(s->avctx, AV_LOG_DEBUG, "Video Session Error"); else if (startcode == 0x1B5) av_log(s->avctx, AV_LOG_DEBUG, "Visual Object Start"); else if (startcode == 0x1B6) av_log(s->avctx, AV_LOG_DEBUG, "Video Object Plane start"); else if (startcode == 0x1B7) av_log(s->avctx, AV_LOG_DEBUG, "slice start"); else if (startcode == 0x1B8) av_log(s->avctx, AV_LOG_DEBUG, "extension start"); else if (startcode == 0x1B9) av_log(s->avctx, AV_LOG_DEBUG, "fgs start"); else if (startcode == 0x1BA) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object start"); else if (startcode == 0x1BB) av_log(s->avctx, AV_LOG_DEBUG, "FBA Object Plane start"); else if (startcode == 0x1BC) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object start"); else if (startcode == 0x1BD) av_log(s->avctx, AV_LOG_DEBUG, "Mesh Object Plane start"); else if (startcode == 0x1BE) av_log(s->avctx, AV_LOG_DEBUG, "Still Texture Object start"); else if (startcode == 0x1BF) av_log(s->avctx, AV_LOG_DEBUG, "Texture Spatial Layer start"); else if (startcode == 0x1C0) av_log(s->avctx, AV_LOG_DEBUG, "Texture SNR Layer start"); else if (startcode == 0x1C1) av_log(s->avctx, AV_LOG_DEBUG, "Texture Tile start"); else if (startcode == 0x1C2) av_log(s->avctx, AV_LOG_DEBUG, "Texture Shape Layer start"); else if (startcode == 0x1C3) av_log(s->avctx, AV_LOG_DEBUG, "stuffing start"); else if (startcode <= 0x1C5) av_log(s->avctx, AV_LOG_DEBUG, "reserved"); else if (startcode <= 0x1FF) av_log(s->avctx, AV_LOG_DEBUG, "System start"); av_log(s->avctx, AV_LOG_DEBUG, " at %d\n", get_bits_count(gb)); if (startcode >= 0x120 && startcode <= 0x12F) { if ((ret = decode_vol_header(ctx, gb)) < 0) return ret; } else if (startcode == USER_DATA_STARTCODE) { decode_user_data(ctx, gb); } else if (startcode == GOP_STARTCODE) { mpeg4_decode_gop_header(s, gb); } else if (startcode == VOS_STARTCODE) { mpeg4_decode_profile_level(s, gb); } else if (startcode == VOP_STARTCODE) { break; align_get_bits(gb); startcode = 0xff; end: if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) s->low_delay = 1; s->avctx->has_b_frames = !s->low_delay; return decode_vop_header(ctx, gb);
20,180
1
static int asf_read_single_payload(AVFormatContext *s, AVPacket *pkt, ASFPacket *asf_pkt) { ASFContext *asf = s->priv_data; AVIOContext *pb = s->pb; int64_t offset; uint64_t size; unsigned char *p; int ret; if (!asf_pkt->data_size) { asf_pkt->data_size = asf_pkt->size_left = avio_rl32(pb); // read media object size if (asf_pkt->data_size <= 0) return AVERROR_EOF; if ((ret = av_new_packet(&asf_pkt->avpkt, asf_pkt->data_size)) < 0) return ret; } else avio_skip(pb, 4); // skip media object size asf_pkt->dts = avio_rl32(pb); // read presentation time if (asf->rep_data_len >= 8) avio_skip(pb, asf->rep_data_len - 8); // skip replicated data offset = avio_tell(pb); // size of the payload - size of the packet without header and padding if (asf->packet_size_internal) size = asf->packet_size_internal - offset + asf->packet_offset - asf->pad_len; else size = asf->packet_size - offset + asf->packet_offset - asf->pad_len; if (size > asf->packet_size) { av_log(s, AV_LOG_ERROR, "Error: invalid data packet size, offset %"PRId64".\n", avio_tell(pb)); return AVERROR_INVALIDDATA; } p = asf_pkt->avpkt.data + asf_pkt->data_size - asf_pkt->size_left; if (size > asf_pkt->size_left || asf_pkt->size_left <= 0) return AVERROR_INVALIDDATA; if (asf_pkt->size_left > size) asf_pkt->size_left -= size; else asf_pkt->size_left = 0; if ((ret = avio_read(pb, p, size)) < 0) return ret; if (s->key && s->keylen == 20) ff_asfcrypt_dec(s->key, p, ret); if (asf->packet_size_internal) avio_skip(pb, asf->packet_size - asf->packet_size_internal); avio_skip(pb, asf->pad_len); // skip padding return 0; }
20,182
1
static int ide_qdev_init(DeviceState *qdev, DeviceInfo *base) { IDEDevice *dev = DO_UPCAST(IDEDevice, qdev, qdev); IDEDeviceInfo *info = DO_UPCAST(IDEDeviceInfo, qdev, base); IDEBus *bus = DO_UPCAST(IDEBus, qbus, qdev->parent_bus); if (!dev->conf.bs) { fprintf(stderr, "%s: no drive specified\n", qdev->info->name); goto err; } if (dev->unit == -1) { dev->unit = bus->master ? 1 : 0; } switch (dev->unit) { case 0: if (bus->master) { fprintf(stderr, "ide: tried to assign master twice\n"); goto err; } bus->master = dev; break; case 1: if (bus->slave) { fprintf(stderr, "ide: tried to assign slave twice\n"); goto err; } bus->slave = dev; break; default: goto err; } return info->init(dev); err: return -1; }
20,185
1
int net_init_vhost_user(const NetClientOptions *opts, const char *name, NetClientState *peer, Error **errp) { int queues; const NetdevVhostUserOptions *vhost_user_opts; CharDriverState *chr; assert(opts->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER); vhost_user_opts = opts->u.vhost_user; chr = net_vhost_parse_chardev(vhost_user_opts, errp); if (!chr) { return -1; } /* verify net frontend */ if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net, (char *)name, errp)) { return -1; } queues = vhost_user_opts->has_queues ? vhost_user_opts->queues : 1; if (queues < 1) { error_setg(errp, "vhost-user number of queues must be bigger than zero"); return -1; } return net_vhost_user_init(peer, "vhost_user", name, chr, queues); }
20,186
1
static int ds1338_send(I2CSlave *i2c, uint8_t data) { DS1338State *s = FROM_I2C_SLAVE(DS1338State, i2c); if (s->addr_byte) { s->ptr = data; s->addr_byte = 0; return 0; } s->nvram[s->ptr - 8] = data; if (data < 8) { qemu_get_timedate(&s->now, s->offset); switch(data) { case 0: /* TODO: Implement CH (stop) bit. */ s->now.tm_sec = from_bcd(data & 0x7f); break; case 1: s->now.tm_min = from_bcd(data & 0x7f); break; case 2: if (data & 0x40) { if (data & 0x20) { data = from_bcd(data & 0x4f) + 11; } else { data = from_bcd(data & 0x1f) - 1; } } else { data = from_bcd(data); } s->now.tm_hour = data; break; case 3: s->now.tm_wday = from_bcd(data & 7) - 1; break; case 4: s->now.tm_mday = from_bcd(data & 0x3f); break; case 5: s->now.tm_mon = from_bcd(data & 0x1f) - 1; break; case 6: s->now.tm_year = from_bcd(data) + 100; break; case 7: /* Control register. Currently ignored. */ break; } s->offset = qemu_timedate_diff(&s->now); } s->ptr = (s->ptr + 1) & 0xff; return 0; }
20,187
0
static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y) { int P[6][2]; const int mot_stride = s->mb_width + 2; const int mot_xy = (mb_y + 1)*mot_stride + mb_x + 1; int dmin, dmin2; int motion_fx, motion_fy, motion_bx, motion_by, motion_bx0, motion_by0; int motion_dx, motion_dy; const int motion_px= s->p_mv_table[mot_xy][0]; const int motion_py= s->p_mv_table[mot_xy][1]; const int time_pp= s->pp_time; const int time_bp= s->bp_time; const int time_pb= time_pp - time_bp; int bx, by; int mx, my, mx2, my2; uint8_t *ref_picture= s->me_scratchpad - (mb_x + 1 + (mb_y + 1)*s->linesize)*16; int16_t (*mv_table)[2]= s->b_direct_mv_table; uint16_t *mv_penalty= s->mv_penalty[s->f_code] + MAX_MV; // f_code of the prev frame /* thanks to iso-mpeg the rounding is different for the zero vector, so we need to handle that ... */ motion_fx= (motion_px*time_pb)/time_pp; motion_fy= (motion_py*time_pb)/time_pp; motion_bx0= (-motion_px*time_bp)/time_pp; motion_by0= (-motion_py*time_bp)/time_pp; motion_dx= motion_dy=0; dmin2= check_bidir_mv(s, mb_x, mb_y, motion_fx, motion_fy, motion_bx0, motion_by0, motion_fx, motion_fy, motion_bx0, motion_by0) - s->qscale; motion_bx= motion_fx - motion_px; motion_by= motion_fy - motion_py; for(by=-1; by<2; by++){ for(bx=-1; bx<2; bx++){ uint8_t *dest_y = s->me_scratchpad + (by+1)*s->linesize*16 + (bx+1)*16; uint8_t *ptr; int dxy; int src_x, src_y; const int width= s->width; const int height= s->height; dxy = ((motion_fy & 1) << 1) | (motion_fx & 1); src_x = (mb_x + bx) * 16 + (motion_fx >> 1); src_y = (mb_y + by) * 16 + (motion_fy >> 1); src_x = clip(src_x, -16, width); if (src_x == width) dxy &= ~1; src_y = clip(src_y, -16, height); if (src_y == height) dxy &= ~2; ptr = s->last_picture[0] + (src_y * s->linesize) + src_x; put_pixels_tab[dxy](dest_y , ptr , s->linesize, 16); put_pixels_tab[dxy](dest_y + 8, ptr + 8, s->linesize, 16); dxy = ((motion_by & 1) << 1) | (motion_bx & 1); src_x = (mb_x + bx) * 16 + (motion_bx >> 1); src_y = (mb_y + by) * 16 + (motion_by >> 1); src_x = clip(src_x, -16, width); if (src_x == width) dxy &= ~1; src_y = clip(src_y, -16, height); if (src_y == height) dxy &= ~2; avg_pixels_tab[dxy](dest_y , ptr , s->linesize, 16); avg_pixels_tab[dxy](dest_y + 8, ptr + 8, s->linesize, 16); } } P[0][0] = mv_table[mot_xy ][0]; P[0][1] = mv_table[mot_xy ][1]; P[1][0] = mv_table[mot_xy - 1][0]; P[1][1] = mv_table[mot_xy - 1][1]; /* special case for first line */ if ((mb_y == 0 || s->first_slice_line || s->first_gob_line)) { P[4][0] = P[1][0]; P[4][1] = P[1][1]; } else { P[2][0] = mv_table[mot_xy - mot_stride ][0]; P[2][1] = mv_table[mot_xy - mot_stride ][1]; P[3][0] = mv_table[mot_xy - mot_stride + 1 ][0]; P[3][1] = mv_table[mot_xy - mot_stride + 1 ][1]; P[4][0]= mid_pred(P[1][0], P[2][0], P[3][0]); P[4][1]= mid_pred(P[1][1], P[2][1], P[3][1]); } dmin = epzs_motion_search(s, &mx, &my, P, 0, 0, -16, -16, 15, 15, ref_picture); if(mx==0 && my==0) dmin=99999999; // not representable, due to rounding stuff if(dmin2<dmin){ dmin= dmin2; mx=0; my=0; } #if 1 mx2= mx= mx*2; my2= my= my*2; for(by=-1; by<2; by++){ if(my2+by < -32) continue; for(bx=-1; bx<2; bx++){ if(bx==0 && by==0) continue; if(mx2+bx < -32) continue; dmin2= check_bidir_mv(s, mb_x, mb_y, mx2+bx+motion_fx, my2+by+motion_fy, mx2+bx+motion_bx, my2+by+motion_by, mx2+bx+motion_fx, my2+by+motion_fy, motion_bx, motion_by) - s->qscale; if(dmin2<dmin){ dmin=dmin2; mx= mx2 + bx; my= my2 + by; } } } #else mx*=2; my*=2; #endif if(mx==0 && my==0){ motion_bx= motion_bx0; motion_by= motion_by0; } s->b_direct_mv_table[mot_xy][0]= mx; s->b_direct_mv_table[mot_xy][1]= my; s->b_direct_forw_mv_table[mot_xy][0]= motion_fx + mx; s->b_direct_forw_mv_table[mot_xy][1]= motion_fy + my; s->b_direct_back_mv_table[mot_xy][0]= motion_bx + mx; s->b_direct_back_mv_table[mot_xy][1]= motion_by + my; return dmin; }
20,188
0
static int vnc_display_get_address(const char *addrstr, bool websocket, bool reverse, int displaynum, int to, bool has_ipv4, bool has_ipv6, bool ipv4, bool ipv6, SocketAddress **retaddr, Error **errp) { int ret = -1; SocketAddress *addr = NULL; addr = g_new0(SocketAddress, 1); if (strncmp(addrstr, "unix:", 5) == 0) { addr->type = SOCKET_ADDRESS_KIND_UNIX; addr->u.q_unix.data = g_new0(UnixSocketAddress, 1); addr->u.q_unix.data->path = g_strdup(addrstr + 5); if (websocket) { error_setg(errp, "UNIX sockets not supported with websock"); goto cleanup; } if (to) { error_setg(errp, "Port range not support with UNIX socket"); goto cleanup; } ret = 0; } else { const char *port; size_t hostlen; unsigned long long baseport = 0; InetSocketAddress *inet; port = strrchr(addrstr, ':'); if (!port) { if (websocket) { hostlen = 0; port = addrstr; } else { error_setg(errp, "no vnc port specified"); goto cleanup; } } else { hostlen = port - addrstr; port++; if (*port == '\0') { error_setg(errp, "vnc port cannot be empty"); goto cleanup; } } addr->type = SOCKET_ADDRESS_KIND_INET; inet = addr->u.inet.data = g_new0(InetSocketAddress, 1); if (addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { inet->host = g_strndup(addrstr + 1, hostlen - 2); } else { inet->host = g_strndup(addrstr, hostlen); } /* plain VNC port is just an offset, for websocket * port is absolute */ if (websocket) { if (g_str_equal(addrstr, "") || g_str_equal(addrstr, "on")) { if (displaynum == -1) { error_setg(errp, "explicit websocket port is required"); goto cleanup; } inet->port = g_strdup_printf( "%d", displaynum + 5700); if (to) { inet->has_to = true; inet->to = to + 5700; } } else { inet->port = g_strdup(port); } } else { int offset = reverse ? 0 : 5900; if (parse_uint_full(port, &baseport, 10) < 0) { error_setg(errp, "can't convert to a number: %s", port); goto cleanup; } if (baseport > 65535 || baseport + offset > 65535) { error_setg(errp, "port %s out of range", port); goto cleanup; } inet->port = g_strdup_printf( "%d", (int)baseport + offset); if (to) { inet->has_to = true; inet->to = to + offset; } } inet->ipv4 = ipv4; inet->has_ipv4 = has_ipv4; inet->ipv6 = ipv6; inet->has_ipv6 = has_ipv6; ret = baseport; } *retaddr = addr; cleanup: if (ret < 0) { qapi_free_SocketAddress(addr); } return ret; }
20,190
0
static void sdhci_do_data_transfer(void *opaque) { SDHCIState *s = (SDHCIState *)opaque; SDHCI_GET_CLASS(s)->data_transfer(s); }
20,191
0
static void kbd_send_chars(void *opaque) { TextConsole *s = opaque; int len; uint8_t buf[16]; len = qemu_chr_can_read(s->chr); if (len > s->out_fifo.count) len = s->out_fifo.count; if (len > 0) { if (len > sizeof(buf)) len = sizeof(buf); qemu_fifo_read(&s->out_fifo, buf, len); qemu_chr_read(s->chr, buf, len); } /* characters are pending: we send them a bit later (XXX: horrible, should change char device API) */ if (s->out_fifo.count > 0) { qemu_mod_timer(s->kbd_timer, qemu_get_clock(rt_clock) + 1); } }
20,192
0
void bdrv_dirty_bitmap_deserialize_ones(BdrvDirtyBitmap *bitmap, uint64_t start, uint64_t count, bool finish) { hbitmap_deserialize_ones(bitmap->bitmap, start, count, finish); }
20,193
0
void slirp_input(Slirp *slirp, const uint8_t *pkt, int pkt_len) { struct mbuf *m; int proto; if (pkt_len < ETH_HLEN) return; proto = ntohs(*(uint16_t *)(pkt + 12)); switch(proto) { case ETH_P_ARP: arp_input(slirp, pkt, pkt_len); break; case ETH_P_IP: m = m_get(slirp); if (!m) return; /* Note: we add to align the IP header */ if (M_FREEROOM(m) < pkt_len + 2) { m_inc(m, pkt_len + 2); } m->m_len = pkt_len + 2; memcpy(m->m_data + 2, pkt, pkt_len); m->m_data += 2 + ETH_HLEN; m->m_len -= 2 + ETH_HLEN; ip_input(m); break; default: break; } }
20,194
0
struct omap_intr_handler_s *omap_inth_init(target_phys_addr_t base, unsigned long size, unsigned char nbanks, qemu_irq **pins, qemu_irq parent_irq, qemu_irq parent_fiq, omap_clk clk) { struct omap_intr_handler_s *s = (struct omap_intr_handler_s *) g_malloc0(sizeof(struct omap_intr_handler_s) + sizeof(struct omap_intr_handler_bank_s) * nbanks); s->parent_intr[0] = parent_irq; s->parent_intr[1] = parent_fiq; s->nbanks = nbanks; s->pins = qemu_allocate_irqs(omap_set_intr, s, nbanks * 32); if (pins) *pins = s->pins; memory_region_init_io(&s->mmio, &omap_inth_mem_ops, s, "omap-intc", size); memory_region_add_subregion(get_system_memory(), base, &s->mmio); omap_inth_reset(s); return s; }
20,195
0
static bool do_check_io_limits(BlockIOLimit *io_limits) { bool bps_flag; bool iops_flag; assert(io_limits); bps_flag = (io_limits->bps[BLOCK_IO_LIMIT_TOTAL] != 0) && ((io_limits->bps[BLOCK_IO_LIMIT_READ] != 0) || (io_limits->bps[BLOCK_IO_LIMIT_WRITE] != 0)); iops_flag = (io_limits->iops[BLOCK_IO_LIMIT_TOTAL] != 0) && ((io_limits->iops[BLOCK_IO_LIMIT_READ] != 0) || (io_limits->iops[BLOCK_IO_LIMIT_WRITE] != 0)); if (bps_flag || iops_flag) { return false; } return true; }
20,196
0
static int blkdebug_co_flush(BlockDriverState *bs) { BDRVBlkdebugState *s = bs->opaque; BlkdebugRule *rule = NULL; QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) { if (rule->options.inject.offset == -1) { break; } } if (rule && rule->options.inject.error) { return inject_error(bs, rule); } return bdrv_co_flush(bs->file->bs); }
20,198
0
static void rtas_quiesce(sPAPREnvironment *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { VIOsPAPRBus *bus = spapr->vio_bus; BusChild *kid; VIOsPAPRDevice *dev = NULL; if (nargs != 0) { rtas_st(rets, 0, -3); return; } QTAILQ_FOREACH(kid, &bus->bus.children, sibling) { dev = (VIOsPAPRDevice *)kid->child; spapr_vio_quiesce_one(dev); } rtas_st(rets, 0, 0); }
20,201
0
static int kvm_put_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_xsave* xsave = env->kvm_xsave_buf; uint16_t cwd, swd, twd; int i, r; if (!kvm_has_xsave()) { return kvm_put_fpu(cpu); } memset(xsave, 0, sizeof(struct kvm_xsave)); twd = 0; swd = env->fpus & ~(7 << 11); swd |= (env->fpstt & 7) << 11; cwd = env->fpuc; for (i = 0; i < 8; ++i) { twd |= (!env->fptags[i]) << i; } xsave->region[XSAVE_FCW_FSW] = (uint32_t)(swd << 16) + cwd; xsave->region[XSAVE_FTW_FOP] = (uint32_t)(env->fpop << 16) + twd; memcpy(&xsave->region[XSAVE_CWD_RIP], &env->fpip, sizeof(env->fpip)); memcpy(&xsave->region[XSAVE_CWD_RDP], &env->fpdp, sizeof(env->fpdp)); memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs, sizeof env->fpregs); memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs, sizeof env->xmm_regs); xsave->region[XSAVE_MXCSR] = env->mxcsr; *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv; memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs, sizeof env->ymmh_regs); memcpy(&xsave->region[XSAVE_BNDREGS], env->bnd_regs, sizeof env->bnd_regs); memcpy(&xsave->region[XSAVE_BNDCSR], &env->bndcs_regs, sizeof(env->bndcs_regs)); memcpy(&xsave->region[XSAVE_OPMASK], env->opmask_regs, sizeof env->opmask_regs); memcpy(&xsave->region[XSAVE_ZMM_Hi256], env->zmmh_regs, sizeof env->zmmh_regs); #ifdef TARGET_X86_64 memcpy(&xsave->region[XSAVE_Hi16_ZMM], env->hi16_zmm_regs, sizeof env->hi16_zmm_regs); #endif r = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); return r; }
20,202
0
static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) { VirtIOBlock *s = VIRTIO_BLK(vdev); struct virtio_blk_config blkcfg; memcpy(&blkcfg, config, sizeof(blkcfg)); aio_context_acquire(bdrv_get_aio_context(s->bs)); bdrv_set_enable_write_cache(s->bs, blkcfg.wce != 0); aio_context_release(bdrv_get_aio_context(s->bs)); }
20,203
0
static int ide_handle_rw_error(IDEState *s, int error, int op) { bool is_read = (op & IDE_RETRY_READ) != 0; BlockErrorAction action = bdrv_get_error_action(s->bs, is_read, error); if (action == BLOCK_ERROR_ACTION_STOP) { s->bus->dma->ops->set_unit(s->bus->dma, s->unit); s->bus->error_status = op; } else if (action == BLOCK_ERROR_ACTION_REPORT) { if (op & IDE_RETRY_DMA) { dma_buf_commit(s); ide_dma_error(s); } else { ide_rw_error(s); } } bdrv_error_action(s->bs, action, is_read, error); return action != BLOCK_ERROR_ACTION_IGNORE; }
20,205
0
static int sdl_init_out (HWVoiceOut *hw, audsettings_t *as) { SDLVoiceOut *sdl = (SDLVoiceOut *) hw; SDLAudioState *s = &glob_sdl; SDL_AudioSpec req, obt; int shift; int endianess; int err; audfmt_e effective_fmt; audsettings_t obt_as; shift <<= as->nchannels == 2; req.freq = as->freq; req.format = aud_to_sdlfmt (as->fmt, &shift); req.channels = as->nchannels; req.samples = conf.nb_samples; req.callback = sdl_callback; req.userdata = sdl; if (sdl_open (&req, &obt)) { return -1; } err = sdl_to_audfmt (obt.format, &effective_fmt, &endianess); if (err) { sdl_close (s); return -1; } obt_as.freq = obt.freq; obt_as.nchannels = obt.channels; obt_as.fmt = effective_fmt; obt_as.endianness = endianess; audio_pcm_init_info (&hw->info, &obt_as); hw->samples = obt.samples; s->initialized = 1; s->exit = 0; SDL_PauseAudio (0); return 0; }
20,206
0
void qemu_spice_init(void) { QemuOpts *opts = QTAILQ_FIRST(&qemu_spice_opts.head); const char *password, *str, *x509_dir, *addr, *x509_key_password = NULL, *x509_dh_file = NULL, *tls_ciphers = NULL; char *x509_key_file = NULL, *x509_cert_file = NULL, *x509_cacert_file = NULL; int port, tls_port, len, addr_flags; spice_image_compression_t compression; spice_wan_compression_t wan_compr; qemu_thread_get_self(&me); if (!opts) { return; } port = qemu_opt_get_number(opts, "port", 0); tls_port = qemu_opt_get_number(opts, "tls-port", 0); if (!port && !tls_port) { fprintf(stderr, "neither port nor tls-port specified for spice."); exit(1); } if (port < 0 || port > 65535) { fprintf(stderr, "spice port is out of range"); exit(1); } if (tls_port < 0 || tls_port > 65535) { fprintf(stderr, "spice tls-port is out of range"); exit(1); } password = qemu_opt_get(opts, "password"); if (tls_port) { x509_dir = qemu_opt_get(opts, "x509-dir"); if (NULL == x509_dir) { x509_dir = "."; } len = strlen(x509_dir) + 32; str = qemu_opt_get(opts, "x509-key-file"); if (str) { x509_key_file = g_strdup(str); } else { x509_key_file = g_malloc(len); snprintf(x509_key_file, len, "%s/%s", x509_dir, X509_SERVER_KEY_FILE); } str = qemu_opt_get(opts, "x509-cert-file"); if (str) { x509_cert_file = g_strdup(str); } else { x509_cert_file = g_malloc(len); snprintf(x509_cert_file, len, "%s/%s", x509_dir, X509_SERVER_CERT_FILE); } str = qemu_opt_get(opts, "x509-cacert-file"); if (str) { x509_cacert_file = g_strdup(str); } else { x509_cacert_file = g_malloc(len); snprintf(x509_cacert_file, len, "%s/%s", x509_dir, X509_CA_CERT_FILE); } x509_key_password = qemu_opt_get(opts, "x509-key-password"); x509_dh_file = qemu_opt_get(opts, "x509-dh-file"); tls_ciphers = qemu_opt_get(opts, "tls-ciphers"); } addr = qemu_opt_get(opts, "addr"); addr_flags = 0; if (qemu_opt_get_bool(opts, "ipv4", 0)) { addr_flags |= SPICE_ADDR_FLAG_IPV4_ONLY; } else if (qemu_opt_get_bool(opts, "ipv6", 0)) { addr_flags |= SPICE_ADDR_FLAG_IPV6_ONLY; } spice_server = spice_server_new(); spice_server_set_addr(spice_server, addr ? addr : "", addr_flags); if (port) { spice_server_set_port(spice_server, port); } if (tls_port) { spice_server_set_tls(spice_server, tls_port, x509_cacert_file, x509_cert_file, x509_key_file, x509_key_password, x509_dh_file, tls_ciphers); } if (password) { spice_server_set_ticket(spice_server, password, 0, 0, 0); } if (qemu_opt_get_bool(opts, "sasl", 0)) { #if SPICE_SERVER_VERSION >= 0x000900 /* 0.9.0 */ if (spice_server_set_sasl_appname(spice_server, "qemu") == -1 || spice_server_set_sasl(spice_server, 1) == -1) { fprintf(stderr, "spice: failed to enable sasl\n"); exit(1); } #else fprintf(stderr, "spice: sasl is not available (spice >= 0.9 required)\n"); exit(1); #endif } if (qemu_opt_get_bool(opts, "disable-ticketing", 0)) { auth = "none"; spice_server_set_noauth(spice_server); } #if SPICE_SERVER_VERSION >= 0x000801 if (qemu_opt_get_bool(opts, "disable-copy-paste", 0)) { spice_server_set_agent_copypaste(spice_server, false); } #endif compression = SPICE_IMAGE_COMPRESS_AUTO_GLZ; str = qemu_opt_get(opts, "image-compression"); if (str) { compression = parse_compression(str); } spice_server_set_image_compression(spice_server, compression); wan_compr = SPICE_WAN_COMPRESSION_AUTO; str = qemu_opt_get(opts, "jpeg-wan-compression"); if (str) { wan_compr = parse_wan_compression(str); } spice_server_set_jpeg_compression(spice_server, wan_compr); wan_compr = SPICE_WAN_COMPRESSION_AUTO; str = qemu_opt_get(opts, "zlib-glz-wan-compression"); if (str) { wan_compr = parse_wan_compression(str); } spice_server_set_zlib_glz_compression(spice_server, wan_compr); str = qemu_opt_get(opts, "streaming-video"); if (str) { int streaming_video = parse_stream_video(str); spice_server_set_streaming_video(spice_server, streaming_video); } spice_server_set_agent_mouse (spice_server, qemu_opt_get_bool(opts, "agent-mouse", 1)); spice_server_set_playback_compression (spice_server, qemu_opt_get_bool(opts, "playback-compression", 1)); qemu_opt_foreach(opts, add_channel, NULL, 0); if (0 != spice_server_init(spice_server, &core_interface)) { fprintf(stderr, "failed to initialize spice server"); exit(1); }; using_spice = 1; migration_state.notify = migration_state_notifier; add_migration_state_change_notifier(&migration_state); #ifdef SPICE_INTERFACE_MIGRATION spice_migrate.sin.base.sif = &migrate_interface.base; spice_migrate.connect_complete.cb = NULL; qemu_spice_add_interface(&spice_migrate.sin.base); #endif qemu_spice_input_init(); qemu_spice_audio_init(); g_free(x509_key_file); g_free(x509_cert_file); g_free(x509_cacert_file); }
20,207
0
eth_write(void *opaque, target_phys_addr_t addr, uint64_t val64, unsigned int size) { struct fs_eth *eth = opaque; uint32_t value = val64; addr >>= 2; switch (addr) { case RW_MA0_LO: case RW_MA0_HI: eth->regs[addr] = value; eth_update_ma(eth, 0); break; case RW_MA1_LO: case RW_MA1_HI: eth->regs[addr] = value; eth_update_ma(eth, 1); break; case RW_MGM_CTRL: /* Attach an MDIO/PHY abstraction. */ if (value & 2) eth->mdio_bus.mdio = value & 1; if (eth->mdio_bus.mdc != (value & 4)) { mdio_cycle(&eth->mdio_bus); eth_validate_duplex(eth); } eth->mdio_bus.mdc = !!(value & 4); eth->regs[addr] = value; break; case RW_REC_CTRL: eth->regs[addr] = value; eth_validate_duplex(eth); break; default: eth->regs[addr] = value; D(printf ("%s %x %x\n", __func__, addr, value)); break; } }
20,208
0
static void strongarm_gpio_handler_update(StrongARMGPIOInfo *s) { uint32_t level, diff; int bit; level = s->olevel & s->dir; for (diff = s->prev_level ^ level; diff; diff ^= 1 << bit) { bit = ffs(diff) - 1; qemu_set_irq(s->handler[bit], (level >> bit) & 1); } s->prev_level = level; }
20,209
0
void ff_vc1dsp_init_altivec(VC1DSPContext* dsp) { if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) return; dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec; dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec; dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec; dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec; }
20,210
0
static void cuda_receive_packet(CUDAState *s, const uint8_t *data, int len) { uint8_t obuf[16]; int ti, autopoll; switch(data[0]) { case CUDA_AUTOPOLL: autopoll = (data[1] != 0); if (autopoll != s->autopoll) { s->autopoll = autopoll; if (autopoll) { qemu_mod_timer(s->adb_poll_timer, qemu_get_clock(vm_clock) + (ticks_per_sec / CUDA_ADB_POLL_FREQ)); } else { qemu_del_timer(s->adb_poll_timer); } } obuf[0] = CUDA_PACKET; obuf[1] = data[1]; cuda_send_packet_to_host(s, obuf, 2); break; case CUDA_GET_TIME: case CUDA_SET_TIME: /* XXX: add time support ? */ ti = time(NULL) + RTC_OFFSET; obuf[0] = CUDA_PACKET; obuf[1] = 0; obuf[2] = 0; obuf[3] = ti >> 24; obuf[4] = ti >> 16; obuf[5] = ti >> 8; obuf[6] = ti; cuda_send_packet_to_host(s, obuf, 7); break; case CUDA_FILE_SERVER_FLAG: case CUDA_SET_DEVICE_LIST: case CUDA_SET_AUTO_RATE: case CUDA_SET_POWER_MESSAGES: obuf[0] = CUDA_PACKET; obuf[1] = 0; cuda_send_packet_to_host(s, obuf, 2); break; case CUDA_POWERDOWN: obuf[0] = CUDA_PACKET; obuf[1] = 0; cuda_send_packet_to_host(s, obuf, 2); qemu_system_shutdown_request(); break; case CUDA_RESET_SYSTEM: obuf[0] = CUDA_PACKET; obuf[1] = 0; cuda_send_packet_to_host(s, obuf, 2); qemu_system_reset_request(); break; default: break; } }
20,211
0
iscsi_aio_flush(BlockDriverState *bs, BlockDriverCompletionFunc *cb, void *opaque) { IscsiLun *iscsilun = bs->opaque; struct iscsi_context *iscsi = iscsilun->iscsi; IscsiAIOCB *acb; acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque); acb->iscsilun = iscsilun; acb->canceled = 0; acb->bh = NULL; acb->status = -EINPROGRESS; acb->buf = NULL; acb->task = iscsi_synchronizecache10_task(iscsi, iscsilun->lun, 0, 0, 0, 0, iscsi_synccache10_cb, acb); if (acb->task == NULL) { error_report("iSCSI: Failed to send synchronizecache10 command. %s", iscsi_get_error(iscsi)); qemu_aio_release(acb); return NULL; } iscsi_set_events(iscsilun); return &acb->common; }
20,212
0
void qmp_blockdev_change_medium(bool has_device, const char *device, bool has_id, const char *id, const char *filename, bool has_format, const char *format, bool has_read_only, BlockdevChangeReadOnlyMode read_only, Error **errp) { BlockBackend *blk; BlockDriverState *medium_bs = NULL; int bdrv_flags; int rc; QDict *options = NULL; Error *err = NULL; blk = qmp_get_blk(has_device ? device : NULL, has_id ? id : NULL, errp); if (!blk) { goto fail; } if (blk_bs(blk)) { blk_update_root_state(blk); } bdrv_flags = blk_get_open_flags_from_root_state(blk); bdrv_flags &= ~(BDRV_O_TEMPORARY | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING | BDRV_O_PROTOCOL); if (!has_read_only) { read_only = BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN; } switch (read_only) { case BLOCKDEV_CHANGE_READ_ONLY_MODE_RETAIN: break; case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_ONLY: bdrv_flags &= ~BDRV_O_RDWR; break; case BLOCKDEV_CHANGE_READ_ONLY_MODE_READ_WRITE: bdrv_flags |= BDRV_O_RDWR; break; default: abort(); } if (has_format) { options = qdict_new(); qdict_put(options, "driver", qstring_from_str(format)); } medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp); if (!medium_bs) { goto fail; } bdrv_add_key(medium_bs, NULL, &err); if (err) { error_propagate(errp, err); goto fail; } rc = do_open_tray(has_device ? device : NULL, has_id ? id : NULL, false, &err); if (rc && rc != -ENOSYS) { error_propagate(errp, err); goto fail; } error_free(err); err = NULL; qmp_x_blockdev_remove_medium(has_device, device, has_id, id, &err); if (err) { error_propagate(errp, err); goto fail; } qmp_blockdev_insert_anon_medium(blk, medium_bs, &err); if (err) { error_propagate(errp, err); goto fail; } blk_apply_root_state(blk, medium_bs); qmp_blockdev_close_tray(has_device, device, has_id, id, errp); fail: /* If the medium has been inserted, the device has its own reference, so * ours must be relinquished; and if it has not been inserted successfully, * the reference must be relinquished anyway */ bdrv_unref(medium_bs); }
20,215
0
int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) { CPUS390XState *env = &cpu->env; uint64_t offset, data; S390PCIBusDevice *pbdev; uint8_t len; uint32_t fh; uint8_t pcias; cpu_synchronize_state(CPU(cpu)); if (env->psw.mask & PSW_MASK_PSTATE) { program_interrupt(env, PGM_PRIVILEGED, 4); return 0; } if (r2 & 0x1) { program_interrupt(env, PGM_SPECIFICATION, 4); return 0; } fh = env->regs[r2] >> 32; pcias = (env->regs[r2] >> 16) & 0xf; len = env->regs[r2] & 0xf; offset = env->regs[r2 + 1]; pbdev = s390_pci_find_dev_by_fh(fh); if (!pbdev || !(pbdev->fh & FH_MASK_ENABLE)) { DPRINTF("pcistg no pci dev\n"); setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); return 0; } if (pbdev->lgstg_blocked) { setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); return 0; } data = env->regs[r1]; if (pcias < 6) { if ((8 - (offset & 0x7)) < len) { program_interrupt(env, PGM_OPERAND, 4); return 0; } MemoryRegion *mr; if (trap_msix(pbdev, offset, pcias)) { offset = offset - pbdev->msix.table_offset; mr = &pbdev->pdev->msix_table_mmio; update_msix_table_msg_data(pbdev, offset, &data, len); } else { mr = pbdev->pdev->io_regions[pcias].memory; } memory_region_dispatch_write(mr, offset, data, len, MEMTXATTRS_UNSPECIFIED); } else if (pcias == 15) { if ((4 - (offset & 0x3)) < len) { program_interrupt(env, PGM_OPERAND, 4); return 0; } switch (len) { case 1: break; case 2: data = bswap16(data); break; case 4: data = bswap32(data); break; case 8: data = bswap64(data); break; default: program_interrupt(env, PGM_OPERAND, 4); return 0; } pci_host_config_write_common(pbdev->pdev, offset, pci_config_size(pbdev->pdev), data, len); } else { DPRINTF("pcistg invalid space\n"); setcc(cpu, ZPCI_PCI_LS_ERR); s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); return 0; } setcc(cpu, ZPCI_PCI_LS_OK); return 0; }
20,216
0
static int kvm_put_msrs(CPUState *env, int level) { struct { struct kvm_msrs info; struct kvm_msr_entry entries[100]; } msr_data; struct kvm_msr_entry *msrs = msr_data.entries; int n = 0; kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); if (kvm_has_msr_star(env)) kvm_msr_entry_set(&msrs[n++], MSR_STAR, env->star); if (kvm_has_msr_hsave_pa(env)) kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave); #ifdef TARGET_X86_64 if (lm_capable_kernel) { kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar); kvm_msr_entry_set(&msrs[n++], MSR_KERNELGSBASE, env->kernelgsbase); kvm_msr_entry_set(&msrs[n++], MSR_FMASK, env->fmask); kvm_msr_entry_set(&msrs[n++], MSR_LSTAR, env->lstar); } #endif if (level == KVM_PUT_FULL_STATE) { /* * KVM is yet unable to synchronize TSC values of multiple VCPUs on * writeback. Until this is fixed, we only write the offset to SMP * guests after migration, desynchronizing the VCPUs, but avoiding * huge jump-backs that would occur without any writeback at all. */ if (smp_cpus == 1 || env->tsc != 0) { kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc); } kvm_msr_entry_set(&msrs[n++], MSR_KVM_SYSTEM_TIME, env->system_time_msr); kvm_msr_entry_set(&msrs[n++], MSR_KVM_WALL_CLOCK, env->wall_clock_msr); #ifdef KVM_CAP_ASYNC_PF kvm_msr_entry_set(&msrs[n++], MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); #endif } #ifdef KVM_CAP_MCE if (env->mcg_cap) { int i; if (level == KVM_PUT_RESET_STATE) kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); else if (level == KVM_PUT_FULL_STATE) { kvm_msr_entry_set(&msrs[n++], MSR_MCG_STATUS, env->mcg_status); kvm_msr_entry_set(&msrs[n++], MSR_MCG_CTL, env->mcg_ctl); for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) kvm_msr_entry_set(&msrs[n++], MSR_MC0_CTL + i, env->mce_banks[i]); } } #endif msr_data.info.nmsrs = n; return kvm_vcpu_ioctl(env, KVM_SET_MSRS, &msr_data); }
20,217
0
static void scsi_block_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); int sg_version; int rc; if (!s->qdev.conf.bs) { error_setg(errp, "drive property not set"); return; } /* check we are using a driver managing SG_IO (version 3 and after) */ rc = bdrv_ioctl(s->qdev.conf.bs, SG_GET_VERSION_NUM, &sg_version); if (rc < 0) { error_setg(errp, "cannot get SG_IO version number: %s. " "Is this a SCSI device?", strerror(-rc)); return; } if (sg_version < 30000) { error_setg(errp, "scsi generic interface too old"); return; } /* get device type from INQUIRY data */ rc = get_device_type(s); if (rc < 0) { error_setg(errp, "INQUIRY failed"); return; } /* Make a guess for the block size, we'll fix it when the guest sends. * READ CAPACITY. If they don't, they likely would assume these sizes * anyway. (TODO: check in /sys). */ if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { s->qdev.blocksize = 2048; } else { s->qdev.blocksize = 512; } /* Makes the scsi-block device not removable by using HMP and QMP eject * command. */ s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); scsi_realize(&s->qdev, errp); }
20,218
0
static void init_proc_460 (CPUPPCState *env) { /* Time base */ gen_tbl(env); gen_spr_BookE(env); gen_spr_440(env); spr_register(env, SPR_BOOKE_MCSR, "MCSR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_440_CCR1, "CCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_DCRIPR, "SPR_DCRIPR", &spr_read_generic, &spr_write_generic, &spr_read_generic, &spr_write_generic, 0x00000000); /* Memory management */ env->nb_tlb = 64; env->nb_ways = 1; env->id_tlbs = 0; /* XXX: TODO: allocate internal IRQ controller */ }
20,219
0
static int xen_pt_status_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg, uint32_t real_offset, uint32_t *data) { XenPTRegGroup *reg_grp_entry = NULL; XenPTReg *reg_entry = NULL; uint32_t reg_field = 0; /* find Header register group */ reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST); if (reg_grp_entry) { /* find Capabilities Pointer register */ reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST); if (reg_entry) { /* check Capabilities Pointer register */ if (reg_entry->data) { reg_field |= PCI_STATUS_CAP_LIST; } else { reg_field &= ~PCI_STATUS_CAP_LIST; } } else { xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*" " for Capabilities Pointer register." " (%s)\n", __func__); return -1; } } else { xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup" " for Header. (%s)\n", __func__); return -1; } *data = reg_field; return 0; }
20,220
0
static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); CcwDevice *ccw_dev = CCW_DEVICE(d); SubchDev *s = ccw_dev->sch; VirtIODevice *vdev = virtio_ccw_get_vdev(s); subch_device_save(s, f); if (dev->indicators != NULL) { qemu_put_be32(f, dev->indicators->len); qemu_put_be64(f, dev->indicators->addr); } else { qemu_put_be32(f, 0); qemu_put_be64(f, 0UL); } if (dev->indicators2 != NULL) { qemu_put_be32(f, dev->indicators2->len); qemu_put_be64(f, dev->indicators2->addr); } else { qemu_put_be32(f, 0); qemu_put_be64(f, 0UL); } if (dev->summary_indicator != NULL) { qemu_put_be32(f, dev->summary_indicator->len); qemu_put_be64(f, dev->summary_indicator->addr); } else { qemu_put_be32(f, 0); qemu_put_be64(f, 0UL); } qemu_put_be16(f, vdev->config_vector); qemu_put_be64(f, dev->routes.adapter.ind_offset); qemu_put_byte(f, dev->thinint_isc); qemu_put_be32(f, dev->revision); }
20,221
0
static int xen_pt_register_regions(XenPCIPassthroughState *s) { int i = 0; XenHostPCIDevice *d = &s->real_device; /* Register PIO/MMIO BARs */ for (i = 0; i < PCI_ROM_SLOT; i++) { XenHostPCIIORegion *r = &d->io_regions[i]; uint8_t type; if (r->base_addr == 0 || r->size == 0) { continue; } s->bases[i].access.u = r->base_addr; if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) { type = PCI_BASE_ADDRESS_SPACE_IO; } else { type = PCI_BASE_ADDRESS_SPACE_MEMORY; if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) { type |= PCI_BASE_ADDRESS_MEM_PREFETCH; } if (r->type & XEN_HOST_PCI_REGION_TYPE_MEM_64) { type |= PCI_BASE_ADDRESS_MEM_TYPE_64; } } memory_region_init_io(&s->bar[i], OBJECT(s), &ops, &s->dev, "xen-pci-pt-bar", r->size); pci_register_bar(&s->dev, i, type, &s->bar[i]); XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64 " base_addr=0x%08"PRIx64" type: %#x)\n", i, r->size, r->base_addr, type); } /* Register expansion ROM address */ if (d->rom.base_addr && d->rom.size) { uint32_t bar_data = 0; /* Re-set BAR reported by OS, otherwise ROM can't be read. */ if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) { return 0; } if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) { bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK; xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data); } s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr; memory_region_init_rom_device(&s->rom, OBJECT(s), NULL, NULL, "xen-pci-pt-rom", d->rom.size); pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->rom); XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64 " base_addr=0x%08"PRIx64")\n", d->rom.size, d->rom.base_addr); } return 0; }
20,223
0
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared) { int i, ret; if (shared) { assert(pic->f.data[0]); pic->shared = 1; } else { assert(!pic->f.data[0]); if (alloc_frame_buffer(s, pic) < 0) return -1; s->linesize = pic->f.linesize[0]; s->uvlinesize = pic->f.linesize[1]; } if (!pic->qscale_table_buf) ret = alloc_picture_tables(s, pic); else ret = make_tables_writable(pic); if (ret < 0) goto fail; if (s->encoding) { pic->mb_var = (uint16_t*)pic->mb_var_buf->data; pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data; pic->mb_mean = pic->mb_mean_buf->data; } pic->mbskip_table = pic->mbskip_table_buf->data; pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1; pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1; if (pic->motion_val_buf[0]) { for (i = 0; i < 2; i++) { pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4; pic->ref_index[i] = pic->ref_index_buf[i]->data; } } return 0; fail: av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n"); ff_mpeg_unref_picture(s, pic); free_picture_tables(pic); return AVERROR(ENOMEM); }
20,224
0
PCIBus *pci_prep_init(qemu_irq *pic) { PREPPCIState *s; PCIDevice *d; int PPC_io_memory; s = qemu_mallocz(sizeof(PREPPCIState)); s->bus = pci_register_bus(NULL, "pci", prep_set_irq, prep_map_irq, pic, 0, 4); register_ioport_write(0xcf8, 4, 4, pci_prep_addr_writel, s); register_ioport_read(0xcf8, 4, 4, pci_prep_addr_readl, s); register_ioport_write(0xcfc, 4, 1, pci_host_data_writeb, s); register_ioport_write(0xcfc, 4, 2, pci_host_data_writew, s); register_ioport_write(0xcfc, 4, 4, pci_host_data_writel, s); register_ioport_read(0xcfc, 4, 1, pci_host_data_readb, s); register_ioport_read(0xcfc, 4, 2, pci_host_data_readw, s); register_ioport_read(0xcfc, 4, 4, pci_host_data_readl, s); PPC_io_memory = cpu_register_io_memory(PPC_PCIIO_read, PPC_PCIIO_write, s); cpu_register_physical_memory(0x80800000, 0x00400000, PPC_io_memory); /* PCI host bridge */ d = pci_register_device(s->bus, "PREP Host Bridge - Motorola Raven", sizeof(PCIDevice), 0, NULL, NULL); pci_config_set_vendor_id(d->config, PCI_VENDOR_ID_MOTOROLA); pci_config_set_device_id(d->config, PCI_DEVICE_ID_MOTOROLA_RAVEN); d->config[0x08] = 0x00; // revision pci_config_set_class(d->config, PCI_CLASS_BRIDGE_HOST); d->config[0x0C] = 0x08; // cache_line_size d->config[0x0D] = 0x10; // latency_timer d->config[PCI_HEADER_TYPE] = PCI_HEADER_TYPE_NORMAL; // header_type d->config[0x34] = 0x00; // capabilities_pointer return s->bus; }
20,225
0
static DisplaySurface *qemu_create_dummy_surface(void) { static const char msg[] = "This VM has no graphic display device."; DisplaySurface *surface = qemu_create_displaysurface(640, 480); pixman_color_t bg = color_table_rgb[0][COLOR_BLACK]; pixman_color_t fg = color_table_rgb[0][COLOR_WHITE]; pixman_image_t *glyph; int len, x, y, i; len = strlen(msg); x = (640/FONT_WIDTH - len) / 2; y = (480/FONT_HEIGHT - 1) / 2; for (i = 0; i < len; i++) { glyph = qemu_pixman_glyph_from_vgafont(FONT_HEIGHT, vgafont16, msg[i]); qemu_pixman_glyph_render(glyph, surface->image, &fg, &bg, x+i, y, FONT_WIDTH, FONT_HEIGHT); qemu_pixman_image_unref(glyph); } return surface; }
20,226
0
int pit_get_out(PITState *pit, int channel, int64_t current_time) { PITChannelState *s = &pit->channels[channel]; return pit_get_out1(s, current_time); }
20,227
0
void mirror_start(const char *job_id, BlockDriverState *bs, BlockDriverState *target, const char *replaces, int64_t speed, uint32_t granularity, int64_t buf_size, MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, BlockdevOnError on_source_error, BlockdevOnError on_target_error, bool unmap, BlockCompletionFunc *cb, void *opaque, Error **errp) { bool is_none_mode; BlockDriverState *base; if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { error_setg(errp, "Sync mode 'incremental' not supported"); return; } is_none_mode = mode == MIRROR_SYNC_MODE_NONE; base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; mirror_start_job(job_id, bs, target, replaces, speed, granularity, buf_size, backing_mode, on_source_error, on_target_error, unmap, cb, opaque, errp, &mirror_job_driver, is_none_mode, base); }
20,228
0
static int nfs_file_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { NFSClient *client = bs->opaque; int64_t ret; QemuOpts *opts; Error *local_err = NULL; opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort); qemu_opts_absorb_qdict(opts, options, &local_err); if (error_is_set(&local_err)) { error_propagate(errp, local_err); return -EINVAL; } ret = nfs_client_open(client, qemu_opt_get(opts, "filename"), (flags & BDRV_O_RDWR) ? O_RDWR : O_RDONLY, errp); if (ret < 0) { return ret; } bs->total_sectors = ret; return 0; }
20,229
0
static void memory_map_init(void) { system_memory = g_malloc(sizeof(*system_memory)); memory_region_init(system_memory, "system", INT64_MAX); address_space_init(&address_space_memory, system_memory, "memory"); system_io = g_malloc(sizeof(*system_io)); memory_region_init(system_io, "io", 65536); address_space_init(&address_space_io, system_io, "I/O"); memory_listener_register(&core_memory_listener, &address_space_memory); memory_listener_register(&io_memory_listener, &address_space_io); memory_listener_register(&tcg_memory_listener, &address_space_memory); }
20,230
0
void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb) { int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0; int max_xfer_len = 0; int64_t sector_num = 0; if (mrb->num_reqs == 1) { submit_requests(blk, mrb, 0, 1, -1); mrb->num_reqs = 0; return; } max_xfer_len = blk_get_max_transfer_length(mrb->reqs[0]->dev->blk); max_xfer_len = MIN_NON_ZERO(max_xfer_len, BDRV_REQUEST_MAX_SECTORS); qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs), &multireq_compare); for (i = 0; i < mrb->num_reqs; i++) { VirtIOBlockReq *req = mrb->reqs[i]; if (num_reqs > 0) { /* * NOTE: We cannot merge the requests in below situations: * 1. requests are not sequential * 2. merge would exceed maximum number of IOVs * 3. merge would exceed maximum transfer length of backend device */ if (sector_num + nb_sectors != req->sector_num || niov > blk_get_max_iov(blk) - req->qiov.niov || req->qiov.size / BDRV_SECTOR_SIZE > max_xfer_len || nb_sectors > max_xfer_len - req->qiov.size / BDRV_SECTOR_SIZE) { submit_requests(blk, mrb, start, num_reqs, niov); num_reqs = 0; } } if (num_reqs == 0) { sector_num = req->sector_num; nb_sectors = niov = 0; start = i; } nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE; niov += req->qiov.niov; num_reqs++; } submit_requests(blk, mrb, start, num_reqs, niov); mrb->num_reqs = 0; }
20,231
0
MemoryRegion *iotlb_to_region(hwaddr index) { return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr; }
20,232
0
static void restore_native_fp_fxrstor(CPUState *env) { struct fpxstate *fp = &fpx1; int i, j, fptag; fp->fpuc = env->fpuc; fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; fptag = 0; for(i = 0; i < 8; i++) fptag |= (env->fptags[i] << i); fp->fptag = fptag ^ 0xff; j = env->fpstt; for(i = 0;i < 8; i++) { memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10); j = (j + 1) & 7; } if (env->cpuid_features & CPUID_SSE) { fp->mxcsr = env->mxcsr; /* XXX: check if DAZ is not available */ fp->mxcsr_mask = 0xffff; memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16); } asm volatile ("fxrstor %0" : "=m" (*fp)); }
20,234
0
static int qemu_lock_fcntl(int fd, int64_t start, int64_t len, int fl_type) { int ret; struct flock fl = { .l_whence = SEEK_SET, .l_start = start, .l_len = len, .l_type = fl_type, }; ret = fcntl(fd, QEMU_SETLK, &fl); return ret == -1 ? -errno : 0; }
20,235
0
static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) { Coroutine *next; CoQueueNextData *data; if (QTAILQ_EMPTY(&queue->entries)) { return false; } data = g_slice_new(CoQueueNextData); data->bh = aio_bh_new(queue->ctx, qemu_co_queue_next_bh, data); QTAILQ_INIT(&data->entries); qemu_bh_schedule(data->bh); while ((next = QTAILQ_FIRST(&queue->entries)) != NULL) { QTAILQ_REMOVE(&queue->entries, next, co_queue_next); QTAILQ_INSERT_TAIL(&data->entries, next, co_queue_next); trace_qemu_co_queue_next(next); if (single) { break; } } return true; }
20,236
0
void bdrv_dirty_bitmap_deserialize_zeroes(BdrvDirtyBitmap *bitmap, uint64_t start, uint64_t count, bool finish) { hbitmap_deserialize_zeroes(bitmap->bitmap, start, count, finish); }
20,239
0
int ff_mp4_read_dec_config_descr(AVFormatContext *fc, AVStream *st, AVIOContext *pb) { enum AVCodecID codec_id; unsigned v; int len, tag; int ret; int object_type_id = avio_r8(pb); avio_r8(pb); /* stream type */ avio_rb24(pb); /* buffer size db */ v = avio_rb32(pb); // TODO: fix this with codecpar #if FF_API_LAVF_AVCTX FF_DISABLE_DEPRECATION_WARNINGS if (v < INT32_MAX) st->codec->rc_max_rate = v; FF_ENABLE_DEPRECATION_WARNINGS #endif st->codecpar->bit_rate = avio_rb32(pb); /* avg bitrate */ codec_id= ff_codec_get_id(ff_mp4_obj_type, object_type_id); if (codec_id) st->codecpar->codec_id = codec_id; av_log(fc, AV_LOG_TRACE, "esds object type id 0x%02x\n", object_type_id); len = ff_mp4_read_descr(fc, pb, &tag); if (tag == MP4DecSpecificDescrTag) { av_log(fc, AV_LOG_TRACE, "Specific MPEG-4 header len=%d\n", len); if (!len || (uint64_t)len > (1<<30)) return -1; av_free(st->codecpar->extradata); if ((ret = ff_get_extradata(fc, st->codecpar, pb, len)) < 0) return ret; if (st->codecpar->codec_id == AV_CODEC_ID_AAC) { MPEG4AudioConfig cfg = {0}; avpriv_mpeg4audio_get_config(&cfg, st->codecpar->extradata, st->codecpar->extradata_size * 8, 1); st->codecpar->channels = cfg.channels; if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4 st->codecpar->sample_rate = avpriv_mpa_freq_tab[cfg.sampling_index]; else if (cfg.ext_sample_rate) st->codecpar->sample_rate = cfg.ext_sample_rate; else st->codecpar->sample_rate = cfg.sample_rate; av_log(fc, AV_LOG_TRACE, "mp4a config channels %d obj %d ext obj %d " "sample rate %d ext sample rate %d\n", st->codecpar->channels, cfg.object_type, cfg.ext_object_type, cfg.sample_rate, cfg.ext_sample_rate); if (!(st->codecpar->codec_id = ff_codec_get_id(mp4_audio_types, cfg.object_type))) st->codecpar->codec_id = AV_CODEC_ID_AAC; } } return 0; }
20,242
0
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx) { ImgUtils imgutils = { &imgutils_class, log_offset, log_ctx }; if ((int)w>0 && (int)h>0 && (w+128)*(uint64_t)(h+128) < INT_MAX/8) return 0; av_log(&imgutils, AV_LOG_ERROR, "Picture size %ux%u is invalid\n", w, h); return AVERROR(EINVAL); }
20,244
0
void qemu_aio_unref(void *p) { BlockAIOCB *acb = p; assert(acb->refcnt > 0); if (--acb->refcnt == 0) { g_free(acb); } }
20,245
0
void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) { /* * We use only the 32 LSB of the incoming fpr */ uint32_t prev, new; int i; prev = env->fpscr; new = (uint32_t)arg; new &= ~0x60000000; new |= prev & 0x60000000; for (i = 0; i < 8; i++) { if (mask & (1 << i)) { env->fpscr &= ~(0xF << (4 * i)); env->fpscr |= new & (0xF << (4 * i)); } } /* Update VX and FEX */ if (fpscr_ix != 0) { env->fpscr |= 1 << FPSCR_VX; } else { env->fpscr &= ~(1 << FPSCR_VX); } if ((fpscr_ex & fpscr_eex) != 0) { env->fpscr |= 1 << FPSCR_FEX; env->exception_index = POWERPC_EXCP_PROGRAM; /* XXX: we should compute it properly */ env->error_code = POWERPC_EXCP_FP; } else { env->fpscr &= ~(1 << FPSCR_FEX); } fpscr_set_rounding_mode(env); }
20,246
0
static void gen_compute_branch1 (CPUState *env, DisasContext *ctx, uint32_t op, int32_t cc, int32_t offset) { target_ulong btarget; const char *opn = "cp1 cond branch"; TCGv t0 = tcg_temp_local_new(TCG_TYPE_TL); TCGv t1 = tcg_temp_local_new(TCG_TYPE_TL); if (cc != 0) check_insn(env, ctx, ISA_MIPS4 | ISA_MIPS32); btarget = ctx->pc + 4 + offset; switch (op) { case OPC_BC1F: { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32); get_fp_cond(r_tmp1); tcg_gen_ext_i32_tl(t0, r_tmp1); tcg_temp_free(r_tmp1); tcg_gen_not_tl(t0, t0); tcg_gen_movi_tl(t1, 0x1 << cc); tcg_gen_and_tl(t0, t0, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1); tcg_gen_movi_tl(t0, 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(t0, 1); gen_set_label(l2); } opn = "bc1f"; goto not_likely; case OPC_BC1FL: { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32); get_fp_cond(r_tmp1); tcg_gen_ext_i32_tl(t0, r_tmp1); tcg_temp_free(r_tmp1); tcg_gen_not_tl(t0, t0); tcg_gen_movi_tl(t1, 0x1 << cc); tcg_gen_and_tl(t0, t0, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1); tcg_gen_movi_tl(t0, 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(t0, 1); gen_set_label(l2); } opn = "bc1fl"; goto likely; case OPC_BC1T: { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32); get_fp_cond(r_tmp1); tcg_gen_ext_i32_tl(t0, r_tmp1); tcg_temp_free(r_tmp1); tcg_gen_movi_tl(t1, 0x1 << cc); tcg_gen_and_tl(t0, t0, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1); tcg_gen_movi_tl(t0, 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(t0, 1); gen_set_label(l2); } opn = "bc1t"; goto not_likely; case OPC_BC1TL: { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32); get_fp_cond(r_tmp1); tcg_gen_ext_i32_tl(t0, r_tmp1); tcg_temp_free(r_tmp1); tcg_gen_movi_tl(t1, 0x1 << cc); tcg_gen_and_tl(t0, t0, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1); tcg_gen_movi_tl(t0, 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(t0, 1); gen_set_label(l2); } opn = "bc1tl"; likely: ctx->hflags |= MIPS_HFLAG_BL; tcg_gen_trunc_tl_i32(bcond, t0); break; case OPC_BC1FANY2: { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32); get_fp_cond(r_tmp1); tcg_gen_ext_i32_tl(t0, r_tmp1); tcg_temp_free(r_tmp1); tcg_gen_not_tl(t0, t0); tcg_gen_movi_tl(t1, 0x3 << cc); tcg_gen_and_tl(t0, t0, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1); tcg_gen_movi_tl(t0, 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(t0, 1); gen_set_label(l2); } opn = "bc1any2f"; goto not_likely; case OPC_BC1TANY2: { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32); get_fp_cond(r_tmp1); tcg_gen_ext_i32_tl(t0, r_tmp1); tcg_temp_free(r_tmp1); tcg_gen_movi_tl(t1, 0x3 << cc); tcg_gen_and_tl(t0, t0, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1); tcg_gen_movi_tl(t0, 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(t0, 1); gen_set_label(l2); } opn = "bc1any2t"; goto not_likely; case OPC_BC1FANY4: { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32); get_fp_cond(r_tmp1); tcg_gen_ext_i32_tl(t0, r_tmp1); tcg_temp_free(r_tmp1); tcg_gen_not_tl(t0, t0); tcg_gen_movi_tl(t1, 0xf << cc); tcg_gen_and_tl(t0, t0, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1); tcg_gen_movi_tl(t0, 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(t0, 1); gen_set_label(l2); } opn = "bc1any4f"; goto not_likely; case OPC_BC1TANY4: { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv r_tmp1 = tcg_temp_new(TCG_TYPE_I32); get_fp_cond(r_tmp1); tcg_gen_ext_i32_tl(t0, r_tmp1); tcg_temp_free(r_tmp1); tcg_gen_movi_tl(t1, 0xf << cc); tcg_gen_and_tl(t0, t0, t1); tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1); tcg_gen_movi_tl(t0, 0); tcg_gen_br(l2); gen_set_label(l1); tcg_gen_movi_tl(t0, 1); gen_set_label(l2); } opn = "bc1any4t"; not_likely: ctx->hflags |= MIPS_HFLAG_BC; tcg_gen_trunc_tl_i32(bcond, t0); break; default: MIPS_INVAL(opn); generate_exception (ctx, EXCP_RI); goto out; } MIPS_DEBUG("%s: cond %02x target " TARGET_FMT_lx, opn, ctx->hflags, btarget); ctx->btarget = btarget; out: tcg_temp_free(t0); tcg_temp_free(t1); }
20,247
0
void kvm_s390_crw_mchk(void) { struct kvm_s390_irq irq = { .type = KVM_S390_MCHK, .u.mchk.cr14 = 1 << 28, .u.mchk.mcic = build_channel_report_mcic(), }; kvm_s390_floating_interrupt(&irq); }
20,249
0
void timerlistgroup_init(QEMUTimerListGroup *tlg, QEMUTimerListNotifyCB *cb, void *opaque) { QEMUClockType type; for (type = 0; type < QEMU_CLOCK_MAX; type++) { tlg->tl[type] = timerlist_new(type, cb, opaque); } }
20,250
0
static ssize_t block_crypto_init_func(QCryptoBlock *block, size_t headerlen, Error **errp, void *opaque) { struct BlockCryptoCreateData *data = opaque; int ret; /* User provided size should reflect amount of space made * available to the guest, so we must take account of that * which will be used by the crypto header */ data->size += headerlen; qemu_opt_set_number(data->opts, BLOCK_OPT_SIZE, data->size, &error_abort); ret = bdrv_create_file(data->filename, data->opts, errp); if (ret < 0) { return -1; } data->blk = blk_new_open(data->filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, errp); if (!data->blk) { return -1; } return 0; }
20,251