label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | static int assign_intx(AssignedDevice *dev) { AssignedIRQType new_type; PCIINTxRoute intx_route; bool intx_host_msi; int r; /* Interrupt PIN 0 means don't use INTx */ if (assigned_dev_pci_read_byte(&dev->dev, PCI_INTERRUPT_PIN) == 0) { pci_device_set_intx_routing_notifier(&dev->dev, NULL); return 0; } if (!check_irqchip_in_kernel()) { return -ENOTSUP; } pci_device_set_intx_routing_notifier(&dev->dev, assigned_dev_update_irq_routing); intx_route = pci_device_route_intx_to_irq(&dev->dev, dev->intpin); assert(intx_route.mode != PCI_INTX_INVERTED); if (!pci_intx_route_changed(&dev->intx_route, &intx_route)) { return 0; } switch (dev->assigned_irq_type) { case ASSIGNED_IRQ_INTX_HOST_INTX: case ASSIGNED_IRQ_INTX_HOST_MSI: intx_host_msi = dev->assigned_irq_type == ASSIGNED_IRQ_INTX_HOST_MSI; r = kvm_device_intx_deassign(kvm_state, dev->dev_id, intx_host_msi); break; case ASSIGNED_IRQ_MSI: r = kvm_device_msi_deassign(kvm_state, dev->dev_id); break; case ASSIGNED_IRQ_MSIX: r = kvm_device_msix_deassign(kvm_state, dev->dev_id); break; default: r = 0; break; } if (r) { perror("assign_intx: deassignment of previous interrupt failed"); } dev->assigned_irq_type = ASSIGNED_IRQ_NONE; if (intx_route.mode == PCI_INTX_DISABLED) { dev->intx_route = intx_route; return 0; } retry: if (dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK && dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) { intx_host_msi = true; new_type = ASSIGNED_IRQ_INTX_HOST_MSI; } else { intx_host_msi = false; new_type = ASSIGNED_IRQ_INTX_HOST_INTX; } r = kvm_device_intx_assign(kvm_state, dev->dev_id, intx_host_msi, intx_route.irq); if (r < 0) { if (r == -EIO && !(dev->features & ASSIGNED_DEVICE_PREFER_MSI_MASK) && dev->cap.available & ASSIGNED_DEVICE_CAP_MSI) { /* Retry with host-side MSI. There might be an IRQ conflict and * either the kernel or the device doesn't support sharing. */ error_report("Host-side INTx sharing not supported, " "using MSI instead"); error_printf("Some devices do not work properly in this mode.\n"); dev->features |= ASSIGNED_DEVICE_PREFER_MSI_MASK; goto retry; } error_report("Failed to assign irq for \"%s\": %s", dev->dev.qdev.id, strerror(-r)); error_report("Perhaps you are assigning a device " "that shares an IRQ with another device?"); return r; } dev->intx_route = intx_route; dev->assigned_irq_type = new_type; return r; } | 16,445 |
1 | void cpu_register_physical_memory(target_phys_addr_t start_addr, unsigned long size, unsigned long phys_offset) { target_phys_addr_t addr, end_addr; PhysPageDesc *p; CPUState *env; unsigned long orig_size = size; void *subpage; end_addr = start_addr + (target_phys_addr_t)size; size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK; for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) { p = phys_page_find(addr >> TARGET_PAGE_BITS); if (p && p->phys_offset != IO_MEM_UNASSIGNED) { unsigned long orig_memory = p->phys_offset; target_phys_addr_t start_addr2, end_addr2; int need_subpage = 0; CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, need_subpage); if (need_subpage) { if (!(orig_memory & IO_MEM_SUBPAGE)) { subpage = subpage_init((addr & TARGET_PAGE_MASK), &p->phys_offset, orig_memory); } else { subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK) >> IO_MEM_SHIFT]; } subpage_register(subpage, start_addr2, end_addr2, phys_offset); } else { p->phys_offset = phys_offset; if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || (phys_offset & IO_MEM_ROMD)) phys_offset += TARGET_PAGE_SIZE; } } else { p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1); p->phys_offset = phys_offset; if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM || (phys_offset & IO_MEM_ROMD)) phys_offset += TARGET_PAGE_SIZE; else { target_phys_addr_t start_addr2, end_addr2; int need_subpage = 0; CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, need_subpage); if (need_subpage) { subpage = subpage_init((addr & TARGET_PAGE_MASK), &p->phys_offset, IO_MEM_UNASSIGNED); subpage_register(subpage, start_addr2, end_addr2, phys_offset); } } } } /* since each CPU stores ram addresses in its TLB cache, we must reset the modified entries */ /* XXX: slow ! */ for(env = first_cpu; env != NULL; env = env->next_cpu) { tlb_flush(env, 1); } } | 16,446 |
1 | static int query_formats(AVFilterGraph *graph, AVClass *log_ctx) { int i, j, ret; int scaler_count = 0, resampler_count = 0; int count_queried = 0; /* successful calls to query_formats() */ int count_merged = 0; /* successful merge of formats lists */ int count_already_merged = 0; /* lists already merged */ int count_delayed = 0; /* lists that need to be merged later */ for (i = 0; i < graph->nb_filters; i++) { AVFilterContext *f = graph->filters[i]; if (formats_declared(f)) continue; if (f->filter->query_formats) ret = filter_query_formats(f); else ret = ff_default_query_formats(f); if (ret < 0 && ret != AVERROR(EAGAIN)) return ret; /* note: EAGAIN could indicate a partial success, not counted yet */ count_queried += ret >= 0; } /* go through and merge as many format lists as possible */ for (i = 0; i < graph->nb_filters; i++) { AVFilterContext *filter = graph->filters[i]; for (j = 0; j < filter->nb_inputs; j++) { AVFilterLink *link = filter->inputs[j]; int convert_needed = 0; if (!link) continue; if (link->in_formats != link->out_formats && link->in_formats && link->out_formats) if (!can_merge_formats(link->in_formats, link->out_formats, link->type, 0)) convert_needed = 1; if (link->type == AVMEDIA_TYPE_AUDIO) { if (link->in_samplerates != link->out_samplerates && link->in_samplerates && link->out_samplerates) if (!can_merge_formats(link->in_samplerates, link->out_samplerates, 0, 1)) convert_needed = 1; } #define MERGE_DISPATCH(field, statement) \ if (!(link->in_ ## field && link->out_ ## field)) { \ count_delayed++; \ } else if (link->in_ ## field == link->out_ ## field) { \ count_already_merged++; \ } else if (!convert_needed) { \ count_merged++; \ statement \ } if (link->type == AVMEDIA_TYPE_AUDIO) { MERGE_DISPATCH(channel_layouts, if (!ff_merge_channel_layouts(link->in_channel_layouts, link->out_channel_layouts)) convert_needed = 1; ) MERGE_DISPATCH(samplerates, if (!ff_merge_samplerates(link->in_samplerates, link->out_samplerates)) convert_needed = 1; ) } MERGE_DISPATCH(formats, if (!ff_merge_formats(link->in_formats, link->out_formats, link->type)) convert_needed = 1; ) #undef MERGE_DISPATCH if (convert_needed) { AVFilterContext *convert; AVFilter *filter; AVFilterLink *inlink, *outlink; char scale_args[256]; char inst_name[30]; /* couldn't merge format lists. auto-insert conversion filter */ switch (link->type) { case AVMEDIA_TYPE_VIDEO: if (!(filter = avfilter_get_by_name("scale"))) { av_log(log_ctx, AV_LOG_ERROR, "'scale' filter " "not present, cannot convert pixel formats.\n"); return AVERROR(EINVAL); } snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d", scaler_count++); if ((ret = avfilter_graph_create_filter(&convert, filter, inst_name, graph->scale_sws_opts, NULL, graph)) < 0) return ret; break; case AVMEDIA_TYPE_AUDIO: if (!(filter = avfilter_get_by_name("aresample"))) { av_log(log_ctx, AV_LOG_ERROR, "'aresample' filter " "not present, cannot convert audio formats.\n"); return AVERROR(EINVAL); } snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d", resampler_count++); scale_args[0] = '\0'; if (graph->aresample_swr_opts) snprintf(scale_args, sizeof(scale_args), "%s", graph->aresample_swr_opts); if ((ret = avfilter_graph_create_filter(&convert, filter, inst_name, graph->aresample_swr_opts, NULL, graph)) < 0) return ret; break; default: return AVERROR(EINVAL); } if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0) return ret; filter_query_formats(convert); inlink = convert->inputs[0]; outlink = convert->outputs[0]; av_assert0( inlink-> in_formats->refcount > 0); av_assert0( inlink->out_formats->refcount > 0); av_assert0(outlink-> in_formats->refcount > 0); av_assert0(outlink->out_formats->refcount > 0); if (outlink->type == AVMEDIA_TYPE_AUDIO) { av_assert0( inlink-> in_samplerates->refcount > 0); av_assert0( inlink->out_samplerates->refcount > 0); av_assert0(outlink-> in_samplerates->refcount > 0); av_assert0(outlink->out_samplerates->refcount > 0); av_assert0( inlink-> in_channel_layouts->refcount > 0); av_assert0( inlink->out_channel_layouts->refcount > 0); av_assert0(outlink-> in_channel_layouts->refcount > 0); av_assert0(outlink->out_channel_layouts->refcount > 0); } if (!ff_merge_formats( inlink->in_formats, inlink->out_formats, inlink->type) || !ff_merge_formats(outlink->in_formats, outlink->out_formats, outlink->type)) ret = AVERROR(ENOSYS); if (inlink->type == AVMEDIA_TYPE_AUDIO && (!ff_merge_samplerates(inlink->in_samplerates, inlink->out_samplerates) || !ff_merge_channel_layouts(inlink->in_channel_layouts, inlink->out_channel_layouts))) ret = AVERROR(ENOSYS); if (outlink->type == AVMEDIA_TYPE_AUDIO && (!ff_merge_samplerates(outlink->in_samplerates, outlink->out_samplerates) || !ff_merge_channel_layouts(outlink->in_channel_layouts, outlink->out_channel_layouts))) ret = AVERROR(ENOSYS); if (ret < 0) { av_log(log_ctx, AV_LOG_ERROR, "Impossible to convert between the formats supported by the filter " "'%s' and the filter '%s'\n", link->src->name, link->dst->name); return ret; } } } } av_log(graph, AV_LOG_DEBUG, "query_formats: " "%d queried, %d merged, %d already done, %d delayed\n", count_queried, count_merged, count_already_merged, count_delayed); if (count_delayed) { AVBPrint bp; /* if count_queried > 0, one filter at least did set its formats, that will give additional information to its neighbour; if count_merged > 0, one pair of formats lists at least was merged, that will give additional information to all connected filters; in both cases, progress was made and a new round must be done */ if (count_queried || count_merged) return AVERROR(EAGAIN); av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC); for (i = 0; i < graph->nb_filters; i++) if (!formats_declared(graph->filters[i])) av_bprintf(&bp, "%s%s", bp.len ? ", " : "", graph->filters[i]->name); av_log(graph, AV_LOG_ERROR, "The following filters could not choose their formats: %s\n" "Consider inserting the (a)format filter near their input or " "output.\n", bp.str); return AVERROR(EIO); } return 0; } | 16,447 |
1 | void mpeg1_encode_init(MpegEncContext *s) { static int done=0; if(!done){ int f_code; int mv; done=1; for(f_code=1; f_code<=MAX_FCODE; f_code++){ for(mv=-MAX_MV; mv<=MAX_MV; mv++){ int len; if(mv==0) len= mbMotionVectorTable[0][1]; else{ int val, bit_size, range, code; bit_size = s->f_code - 1; range = 1 << bit_size; val=mv; if (val < 0) val = -val; val--; code = (val >> bit_size) + 1; if(code<17){ len= mbMotionVectorTable[code][1] + 1 + bit_size; }else{ len= mbMotionVectorTable[16][1] + 2 + bit_size; } } mv_penalty[f_code][mv+MAX_MV]= len; } } for(f_code=MAX_FCODE; f_code>0; f_code--){ for(mv=-(8<<f_code); mv<(8<<f_code); mv++){ fcode_tab[mv+MAX_MV]= f_code; } } } s->mv_penalty= mv_penalty; s->fcode_tab= fcode_tab; } | 16,448 |
0 | static int cook_decode_close(AVCodecContext *avctx) { int i; COOKContext *q = avctx->priv_data; av_log(NULL,AV_LOG_DEBUG, "Deallocating memory.\n"); /* Free allocated memory buffers. */ av_free(q->mlt_window); av_free(q->mlt_precos); av_free(q->mlt_presin); av_free(q->mlt_postcos); av_free(q->frame_reorder_index); av_free(q->frame_reorder_buffer); av_free(q->decoded_bytes_buffer); /* Free the transform. */ ff_fft_end(&q->fft_ctx); /* Free the VLC tables. */ for (i=0 ; i<13 ; i++) { free_vlc(&q->envelope_quant_index[i]); } for (i=0 ; i<7 ; i++) { free_vlc(&q->sqvh[i]); } if(q->nb_channels==2 && q->joint_stereo==1 ){ free_vlc(&q->ccpl); } av_log(NULL,AV_LOG_DEBUG,"Memory deallocated.\n"); return 0; } | 16,450 |
0 | static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){ int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames); int num, den, frame_size, i; av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index); /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) return AVERROR(EINVAL);*/ /* duration field */ if (pkt->duration == 0) { compute_frame_duration(&num, &den, st, NULL, pkt); if (den && num) { pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num); } } if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0) pkt->pts= pkt->dts; //XXX/FIXME this is a temporary hack until all encoders output pts if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){ pkt->dts= // pkt->pts= st->cur_dts; pkt->pts= st->pts.val; } //calculate dts from pts if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ st->pts_buffer[0]= pkt->pts; for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration; for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); pkt->dts= st->pts_buffer[0]; } if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)){ av_log(s, AV_LOG_ERROR, "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n", st->index, st->cur_dts, pkt->dts); return AVERROR(EINVAL); } if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index); return AVERROR(EINVAL); } // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts); st->cur_dts= pkt->dts; st->pts.val= pkt->dts; /* update pts */ switch (st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: frame_size = get_audio_frame_size(st->codec, pkt->size); /* HACK/FIXME, we skip the initial 0 size packets as they are most likely equal to the encoder delay, but it would be better if we had the real timestamps from the encoder */ if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); } break; case AVMEDIA_TYPE_VIDEO: frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); break; default: break; } return 0; } | 16,451 |
1 | static void monitor_fdset_cleanup(MonFdset *mon_fdset) { MonFdsetFd *mon_fdset_fd; MonFdsetFd *mon_fdset_fd_next; QLIST_FOREACH_SAFE(mon_fdset_fd, &mon_fdset->fds, next, mon_fdset_fd_next) { if (mon_fdset_fd->removed) { close(mon_fdset_fd->fd); g_free(mon_fdset_fd->opaque); QLIST_REMOVE(mon_fdset_fd, next); g_free(mon_fdset_fd); } } if (QLIST_EMPTY(&mon_fdset->fds) && QLIST_EMPTY(&mon_fdset->dup_fds)) { QLIST_REMOVE(mon_fdset, next); g_free(mon_fdset); } } | 16,453 |
1 | int ff_h264_check_intra_pred_mode(H264Context *h, int mode){ MpegEncContext * const s = &h->s; static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1}; static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8}; if(mode > 6U) { av_log(h->s.avctx, AV_LOG_ERROR, "out of range intra chroma pred mode at %d %d\n", s->mb_x, s->mb_y); return -1; } if(!(h->top_samples_available&0x8000)){ mode= top[ mode ]; if(mode<0){ av_log(h->s.avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y); return -1; } } if((h->left_samples_available&0x8080) != 0x8080){ mode= left[ mode ]; if(h->left_samples_available&0x8080){ //mad cow disease mode, aka MBAFF + constrained_intra_pred mode= ALZHEIMER_DC_L0T_PRED8x8 + (!(h->left_samples_available&0x8000)) + 2*(mode == DC_128_PRED8x8); } if(mode<0){ av_log(h->s.avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", s->mb_x, s->mb_y); return -1; } } return mode; } | 16,454 |
1 | static inline void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func * pix_op, int mx, int my) { const int lowres = s->avctx->lowres; const int op_index = FFMIN(lowres, 2); const int block_s = 8 >> lowres; const int s_mask = (2 << lowres) - 1; const int h_edge_pos = s->h_edge_pos >> lowres + 1; const int v_edge_pos = s->v_edge_pos >> lowres + 1; int emu = 0, src_x, src_y, offset, sx, sy; uint8_t *ptr; if (s->quarter_sample) { mx /= 2; my /= 2; } /* In case of 8X8, we construct a single chroma motion vector with a special rounding */ mx = ff_h263_round_chroma(mx); my = ff_h263_round_chroma(my); sx = mx & s_mask; sy = my & s_mask; src_x = s->mb_x * block_s + (mx >> lowres + 1); src_y = s->mb_y * block_s + (my >> lowres + 1); offset = src_y * s->uvlinesize + src_x; ptr = ref_picture[1] + offset; if (s->flags & CODEC_FLAG_EMU_EDGE) { if ((unsigned) src_x > h_edge_pos - (!!sx) - block_s || (unsigned) src_y > v_edge_pos - (!!sy) - block_s) { s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); ptr = s->edge_emu_buffer; emu = 1; } } sx = (sx << 2) >> lowres; sy = (sy << 2) >> lowres; pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy); ptr = ref_picture[2] + offset; if (emu) { s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos); ptr = s->edge_emu_buffer; } pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy); } | 16,455 |
1 | static int mov_read_trun(MOVContext *c, AVIOContext *pb, MOVAtom atom) { MOVFragment *frag = &c->fragment; AVStream *st = NULL; MOVStreamContext *sc; MOVStts *ctts_data; uint64_t offset; int64_t dts; int data_offset = 0; unsigned entries, first_sample_flags = frag->flags; int flags, distance, i; for (i = 0; i < c->fc->nb_streams; i++) { if (c->fc->streams[i]->id == frag->track_id) { st = c->fc->streams[i]; break; } } if (!st) { av_log(c->fc, AV_LOG_ERROR, "could not find corresponding track id %u\n", frag->track_id); return AVERROR_INVALIDDATA; } sc = st->priv_data; if (sc->pseudo_stream_id+1 != frag->stsd_id && sc->pseudo_stream_id != -1) return 0; avio_r8(pb); /* version */ flags = avio_rb24(pb); entries = avio_rb32(pb); av_log(c->fc, AV_LOG_TRACE, "flags 0x%x entries %u\n", flags, entries); /* Always assume the presence of composition time offsets. * Without this assumption, for instance, we cannot deal with a track in fragmented movies that meet the following. * 1) in the initial movie, there are no samples. * 2) in the first movie fragment, there is only one sample without composition time offset. * 3) in the subsequent movie fragments, there are samples with composition time offset. */ if (!sc->ctts_count && sc->sample_count) { /* Complement ctts table if moov atom doesn't have ctts atom. */ ctts_data = av_fast_realloc(NULL, &sc->ctts_allocated_size, sizeof(*sc->ctts_data) * sc->sample_count); if (!ctts_data) return AVERROR(ENOMEM); /* Don't use a count greater than 1 here since it will leave a gap in * the ctts index which the code below relies on being sequential. */ sc->ctts_data = ctts_data; for (i = 0; i < sc->sample_count; i++) { sc->ctts_data[sc->ctts_count].count = 1; sc->ctts_data[sc->ctts_count].duration = 0; sc->ctts_count++; } } if ((uint64_t)entries+sc->ctts_count >= UINT_MAX/sizeof(*sc->ctts_data)) return AVERROR_INVALIDDATA; if (flags & MOV_TRUN_DATA_OFFSET) data_offset = avio_rb32(pb); if (flags & MOV_TRUN_FIRST_SAMPLE_FLAGS) first_sample_flags = avio_rb32(pb); dts = sc->track_end - sc->time_offset; offset = frag->base_data_offset + data_offset; distance = 0; av_log(c->fc, AV_LOG_TRACE, "first sample flags 0x%x\n", first_sample_flags); for (i = 0; i < entries && !pb->eof_reached; i++) { unsigned sample_size = frag->size; int sample_flags = i ? frag->flags : first_sample_flags; unsigned sample_duration = frag->duration; unsigned ctts_duration = 0; int keyframe = 0; int ctts_index = 0; int old_nb_index_entries = st->nb_index_entries; if (flags & MOV_TRUN_SAMPLE_DURATION) sample_duration = avio_rb32(pb); if (flags & MOV_TRUN_SAMPLE_SIZE) sample_size = avio_rb32(pb); if (flags & MOV_TRUN_SAMPLE_FLAGS) sample_flags = avio_rb32(pb); if (flags & MOV_TRUN_SAMPLE_CTS) ctts_duration = avio_rb32(pb); mov_update_dts_shift(sc, ctts_duration); if (frag->time != AV_NOPTS_VALUE) { if (c->use_mfra_for == FF_MOV_FLAG_MFRA_PTS) { int64_t pts = frag->time; av_log(c->fc, AV_LOG_DEBUG, "found frag time %"PRId64 " sc->dts_shift %d ctts.duration %d" " sc->time_offset %"PRId64" flags & MOV_TRUN_SAMPLE_CTS %d\n", pts, sc->dts_shift, ctts_duration, sc->time_offset, flags & MOV_TRUN_SAMPLE_CTS); dts = pts - sc->dts_shift; if (flags & MOV_TRUN_SAMPLE_CTS) { dts -= ctts_duration; } else { dts -= sc->time_offset; } av_log(c->fc, AV_LOG_DEBUG, "calculated into dts %"PRId64"\n", dts); } else { dts = frag->time - sc->time_offset; av_log(c->fc, AV_LOG_DEBUG, "found frag time %"PRId64 ", using it for dts\n", dts); } frag->time = AV_NOPTS_VALUE; } if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) keyframe = 1; else keyframe = !(sample_flags & (MOV_FRAG_SAMPLE_FLAG_IS_NON_SYNC | MOV_FRAG_SAMPLE_FLAG_DEPENDS_YES)); if (keyframe) distance = 0; ctts_index = av_add_index_entry(st, offset, dts, sample_size, distance, keyframe ? AVINDEX_KEYFRAME : 0); if (ctts_index >= 0 && old_nb_index_entries < st->nb_index_entries) { unsigned int size_needed = st->nb_index_entries * sizeof(*sc->ctts_data); unsigned int request_size = size_needed > sc->ctts_allocated_size ? FFMAX(size_needed, 2 * sc->ctts_allocated_size) : size_needed; ctts_data = av_fast_realloc(sc->ctts_data, &sc->ctts_allocated_size, request_size); if (!ctts_data) { av_freep(&sc->ctts_data); return AVERROR(ENOMEM); } sc->ctts_data = ctts_data; if (ctts_index != old_nb_index_entries) { memmove(sc->ctts_data + ctts_index + 1, sc->ctts_data + ctts_index, sizeof(*sc->ctts_data) * (sc->ctts_count - ctts_index)); if (ctts_index <= sc->current_sample) { // if we inserted a new item before the current sample, move the // counter ahead so it is still pointing to the same sample. sc->current_sample++; } } sc->ctts_data[ctts_index].count = 1; sc->ctts_data[ctts_index].duration = ctts_duration; sc->ctts_count++; } else { av_log(c->fc, AV_LOG_ERROR, "Failed to add index entry\n"); } av_log(c->fc, AV_LOG_TRACE, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", " "size %u, distance %d, keyframe %d\n", st->index, ctts_index, offset, dts, sample_size, distance, keyframe); distance++; dts += sample_duration; offset += sample_size; sc->data_size += sample_size; sc->duration_for_fps += sample_duration; sc->nb_frames_for_fps ++; } if (pb->eof_reached) return AVERROR_EOF; frag->implicit_offset = offset; sc->track_end = dts + sc->time_offset; if (st->duration < sc->track_end) st->duration = sc->track_end; return 0; } | 16,456 |
1 | void qemu_cpu_kick(CPUState *cpu) { qemu_cond_broadcast(cpu->halt_cond); if (tcg_enabled()) { cpu_exit(cpu); /* Also ensure current RR cpu is kicked */ qemu_cpu_kick_rr_cpu(); } else { if (hax_enabled()) { /* * FIXME: race condition with the exit_request check in * hax_vcpu_hax_exec */ cpu->exit_request = 1; } qemu_cpu_kick_thread(cpu); } } | 16,457 |
1 | static void exec_accept_incoming_migration(void *opaque) { QEMUFile *f = opaque; qemu_set_fd_handler2(qemu_get_fd(f), NULL, NULL, NULL, NULL); process_incoming_migration(f); } | 16,458 |
1 | static int vnc_auth_sasl_check_access(VncState *vs) { const void *val; int err; int allow; err = sasl_getprop(vs->sasl.conn, SASL_USERNAME, &val); if (err != SASL_OK) { VNC_DEBUG("cannot query SASL username on connection %d (%s), denying access\n", err, sasl_errstring(err, NULL, NULL)); return -1; } if (val == NULL) { VNC_DEBUG("no client username was found, denying access\n"); return -1; } VNC_DEBUG("SASL client username %s\n", (const char *)val); vs->sasl.username = g_strdup((const char*)val); if (vs->vd->sasl.acl == NULL) { VNC_DEBUG("no ACL activated, allowing access\n"); return 0; } allow = qemu_acl_party_is_allowed(vs->vd->sasl.acl, vs->sasl.username); VNC_DEBUG("SASL client %s %s by ACL\n", vs->sasl.username, allow ? "allowed" : "denied"); return allow ? 0 : -1; } | 16,459 |
1 | static int enable_write_target(BDRVVVFATState *s) { BlockDriver *bdrv_qcow; QEMUOptionParameter *options; int ret; int size = sector2cluster(s, s->sector_count); s->used_clusters = calloc(size, 1); array_init(&(s->commits), sizeof(commit_t)); s->qcow_filename = g_malloc(1024); ret = get_tmp_filename(s->qcow_filename, 1024); if (ret < 0) { g_free(s->qcow_filename); s->qcow_filename = NULL; return ret; } bdrv_qcow = bdrv_find_format("qcow"); options = parse_option_parameters("", bdrv_qcow->create_options, NULL); set_option_parameter_int(options, BLOCK_OPT_SIZE, s->sector_count * 512); set_option_parameter(options, BLOCK_OPT_BACKING_FILE, "fat:"); if (bdrv_create(bdrv_qcow, s->qcow_filename, options) < 0) return -1; s->qcow = bdrv_new(""); if (s->qcow == NULL) { return -1; } ret = bdrv_open(s->qcow, s->qcow_filename, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, bdrv_qcow); if (ret < 0) { return ret; } #ifndef _WIN32 unlink(s->qcow_filename); #endif s->bs->backing_hd = calloc(sizeof(BlockDriverState), 1); s->bs->backing_hd->drv = &vvfat_write_target; s->bs->backing_hd->opaque = g_malloc(sizeof(void*)); *(void**)s->bs->backing_hd->opaque = s; return 0; } | 16,460 |
1 | void ide_atapi_cmd(IDEState *s) { uint8_t *buf = s->io_buffer; const struct AtapiCmd *cmd = &atapi_cmd_table[s->io_buffer[0]]; #ifdef DEBUG_IDE_ATAPI { int i; printf("ATAPI limit=0x%x packet:", s->lcyl | (s->hcyl << 8)); for(i = 0; i < ATAPI_PACKET_SIZE; i++) { printf(" %02x", buf[i]); } printf("\n"); } #endif /* * If there's a UNIT_ATTENTION condition pending, only command flagged with * ALLOW_UA are allowed to complete. with other commands getting a CHECK * condition response unless a higher priority status, defined by the drive * here, is pending. */ if (s->sense_key == UNIT_ATTENTION && !(cmd->flags & ALLOW_UA)) { ide_atapi_cmd_check_status(s); return; } /* * When a CD gets changed, we have to report an ejected state and * then a loaded state to guests so that they detect tray * open/close and media change events. Guests that do not use * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close * states rely on this behavior. */ if (!(cmd->flags & ALLOW_UA) && !s->tray_open && blk_is_inserted(s->blk) && s->cdrom_changed) { if (s->cdrom_changed == 1) { ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); s->cdrom_changed = 2; } else { ide_atapi_cmd_error(s, UNIT_ATTENTION, ASC_MEDIUM_MAY_HAVE_CHANGED); s->cdrom_changed = 0; } return; } /* Report a Not Ready condition if appropriate for the command */ if ((cmd->flags & CHECK_READY) && (!media_present(s) || !blk_is_inserted(s->blk))) { ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); return; } /* Nondata commands permit the byte_count_limit to be 0. * If this is a data-transferring PIO command and BCL is 0, * we abort at the /ATA/ level, not the ATAPI level. * See ATA8 ACS3 section 7.17.6.49 and 7.21.5 */ if (cmd->handler && !(cmd->flags & NONDATA)) { /* TODO: Check IDENTIFY data word 125 for default BCL (currently 0) */ if (!(atapi_byte_count_limit(s) || s->atapi_dma)) { /* TODO: Move abort back into core.c and make static inline again */ ide_abort_command(s); return; } } /* Execute the command */ if (cmd->handler) { cmd->handler(s, buf); return; } ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_ILLEGAL_OPCODE); } | 16,461 |
1 | static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp) { uint16_t nodenr; uint16List *cpus = NULL; if (node->has_nodeid) { nodenr = node->nodeid; } else { nodenr = nb_numa_nodes; } if (nodenr >= MAX_NODES) { error_setg(errp, "Max number of NUMA nodes reached: %" PRIu16 "", nodenr); return; } if (numa_info[nodenr].present) { error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); return; } for (cpus = node->cpus; cpus; cpus = cpus->next) { if (cpus->value > MAX_CPUMASK_BITS) { error_setg(errp, "CPU number %" PRIu16 " is bigger than %d", cpus->value, MAX_CPUMASK_BITS); return; } bitmap_set(numa_info[nodenr].node_cpu, cpus->value, 1); } if (node->has_mem && node->has_memdev) { error_setg(errp, "qemu: cannot specify both mem= and memdev="); return; } if (have_memdevs == -1) { have_memdevs = node->has_memdev; } if (node->has_memdev != have_memdevs) { error_setg(errp, "qemu: memdev option must be specified for either " "all or no nodes"); return; } if (node->has_mem) { uint64_t mem_size = node->mem; const char *mem_str = qemu_opt_get(opts, "mem"); /* Fix up legacy suffix-less format */ if (g_ascii_isdigit(mem_str[strlen(mem_str) - 1])) { mem_size <<= 20; } numa_info[nodenr].node_mem = mem_size; } if (node->has_memdev) { Object *o; o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); if (!o) { error_setg(errp, "memdev=%s is ambiguous", node->memdev); return; } object_ref(o); numa_info[nodenr].node_mem = object_property_get_int(o, "size", NULL); numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); } numa_info[nodenr].present = true; max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); } | 16,462 |
1 | static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, MSIMessage *msg, IOHandler *handler) { VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); VFIOMSIVector *vector; int ret; trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr); vector = &vdev->msi_vectors[nr]; if (!vector->use) { vector->vdev = vdev; vector->virq = -1; if (event_notifier_init(&vector->interrupt, 0)) { error_report("vfio: Error: event_notifier_init failed"); } vector->use = true; msix_vector_use(pdev, nr); } qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt), handler, NULL, vector); /* * Attempt to enable route through KVM irqchip, * default to userspace handling if unavailable. */ if (vector->virq >= 0) { if (!msg) { vfio_remove_kvm_msi_virq(vector); } else { vfio_update_kvm_msi_virq(vector, *msg, pdev); } } else { vfio_add_kvm_msi_virq(vdev, vector, nr, true); } /* * We don't want to have the host allocate all possible MSI vectors * for a device if they're not in use, so we shutdown and incrementally * increase them as needed. */ if (vdev->nr_vectors < nr + 1) { vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX); vdev->nr_vectors = nr + 1; ret = vfio_enable_vectors(vdev, true); if (ret) { error_report("vfio: failed to enable vectors, %d", ret); } } else { int argsz; struct vfio_irq_set *irq_set; int32_t *pfd; argsz = sizeof(*irq_set) + sizeof(*pfd); irq_set = g_malloc0(argsz); irq_set->argsz = argsz; irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX; irq_set->start = nr; irq_set->count = 1; pfd = (int32_t *)&irq_set->data; if (vector->virq >= 0) { *pfd = event_notifier_get_fd(&vector->kvm_interrupt); } else { *pfd = event_notifier_get_fd(&vector->interrupt); } ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set); g_free(irq_set); if (ret) { error_report("vfio: failed to modify vector, %d", ret); } } /* Disable PBA emulation when nothing more is pending. */ clear_bit(nr, vdev->msix->pending); if (find_first_bit(vdev->msix->pending, vdev->nr_vectors) == vdev->nr_vectors) { memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); trace_vfio_msix_pba_disable(vdev->vbasedev.name); } return 0; } | 16,463 |
1 | void hmp_drive_mirror(Monitor *mon, const QDict *qdict) { const char *device = qdict_get_str(qdict, "device"); const char *filename = qdict_get_str(qdict, "target"); const char *format = qdict_get_try_str(qdict, "format"); int reuse = qdict_get_try_bool(qdict, "reuse", 0); int full = qdict_get_try_bool(qdict, "full", 0); enum NewImageMode mode; Error *errp = NULL; if (!filename) { error_set(&errp, QERR_MISSING_PARAMETER, "target"); hmp_handle_error(mon, &errp); return; } if (reuse) { mode = NEW_IMAGE_MODE_EXISTING; } else { mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; } qmp_drive_mirror(device, filename, !!format, format, full ? MIRROR_SYNC_MODE_FULL : MIRROR_SYNC_MODE_TOP, true, mode, false, 0, &errp); hmp_handle_error(mon, &errp); } | 16,466 |
1 | static int qemu_rdma_unregister_waiting(RDMAContext *rdma) { while (rdma->unregistrations[rdma->unregister_current]) { int ret; uint64_t wr_id = rdma->unregistrations[rdma->unregister_current]; uint64_t chunk = (wr_id & RDMA_WRID_CHUNK_MASK) >> RDMA_WRID_CHUNK_SHIFT; uint64_t index = (wr_id & RDMA_WRID_BLOCK_MASK) >> RDMA_WRID_BLOCK_SHIFT; RDMALocalBlock *block = &(rdma->local_ram_blocks.block[index]); RDMARegister reg = { .current_index = index }; RDMAControlHeader resp = { .type = RDMA_CONTROL_UNREGISTER_FINISHED, }; RDMAControlHeader head = { .len = sizeof(RDMARegister), .type = RDMA_CONTROL_UNREGISTER_REQUEST, .repeat = 1, }; DDPRINTF("Processing unregister for chunk: %" PRIu64 " at position %d\n", chunk, rdma->unregister_current); rdma->unregistrations[rdma->unregister_current] = 0; rdma->unregister_current++; if (rdma->unregister_current == RDMA_SIGNALED_SEND_MAX) { rdma->unregister_current = 0; } /* * Unregistration is speculative (because migration is single-threaded * and we cannot break the protocol's inifinband message ordering). * Thus, if the memory is currently being used for transmission, * then abort the attempt to unregister and try again * later the next time a completion is received for this memory. */ clear_bit(chunk, block->unregister_bitmap); if (test_bit(chunk, block->transit_bitmap)) { DDPRINTF("Cannot unregister inflight chunk: %" PRIu64 "\n", chunk); continue; } DDPRINTF("Sending unregister for chunk: %" PRIu64 "\n", chunk); ret = ibv_dereg_mr(block->pmr[chunk]); block->pmr[chunk] = NULL; block->remote_keys[chunk] = 0; if (ret != 0) { perror("unregistration chunk failed"); return -ret; } rdma->total_registrations--; reg.key.chunk = chunk; register_to_network(®); ret = qemu_rdma_exchange_send(rdma, &head, (uint8_t *) ®, &resp, NULL, NULL); if (ret < 0) { return ret; } DDPRINTF("Unregister for chunk: %" PRIu64 " complete.\n", chunk); } return 0; } | 16,467 |
1 | static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) { VirtIOBlockReq *req = virtio_blk_alloc_request(s); if (!virtqueue_pop(s->vq, req->elem)) { virtio_blk_free_request(req); return NULL; } return req; } | 16,469 |
1 | static void memory_region_oldmmio_read_accessor(MemoryRegion *mr, hwaddr addr, uint64_t *value, unsigned size, unsigned shift, uint64_t mask) { uint64_t tmp; tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); trace_memory_region_ops_read(mr, addr, tmp, size); *value |= (tmp & mask) << shift; } | 16,470 |
0 | static int file_open_dir(URLContext *h) { #if HAVE_DIRENT_H FileContext *c = h->priv_data; c->dir = opendir(h->filename); if (!c->dir) return AVERROR(errno); return 0; #else return AVERROR(ENOSYS); #endif /* HAVE_DIRENT_H */ } | 16,471 |
0 | void h263_encode_picture_header(MpegEncContext * s, int picture_number) { int format; align_put_bits(&s->pb); /* Update the pointer to last GOB */ s->ptr_lastgob = pbBufPtr(&s->pb); s->gob_number = 0; put_bits(&s->pb, 22, 0x20); /* PSC */ put_bits(&s->pb, 8, (((int64_t)s->picture_number * 30 * s->avctx->frame_rate_base) / s->avctx->frame_rate) & 0xff); put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 0); /* h263 id */ put_bits(&s->pb, 1, 0); /* split screen off */ put_bits(&s->pb, 1, 0); /* camera off */ put_bits(&s->pb, 1, 0); /* freeze picture release off */ format = h263_get_picture_format(s->width, s->height); if (!s->h263_plus) { /* H.263v1 */ put_bits(&s->pb, 3, format); put_bits(&s->pb, 1, (s->pict_type == P_TYPE)); /* By now UMV IS DISABLED ON H.263v1, since the restrictions of H.263v1 UMV implies to check the predicted MV after calculation of the current MB to see if we're on the limits */ put_bits(&s->pb, 1, 0); /* unrestricted motion vector: off */ put_bits(&s->pb, 1, 0); /* SAC: off */ put_bits(&s->pb, 1, s->obmc); /* advanced prediction mode */ put_bits(&s->pb, 1, 0); /* not PB frame */ put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */ } else { /* H.263v2 */ /* H.263 Plus PTYPE */ put_bits(&s->pb, 3, 7); put_bits(&s->pb,3,1); /* Update Full Extended PTYPE */ if (format == 7) put_bits(&s->pb,3,6); /* Custom Source Format */ else put_bits(&s->pb, 3, format); put_bits(&s->pb,1,0); /* Custom PCF: off */ s->umvplus = s->unrestricted_mv; put_bits(&s->pb, 1, s->umvplus); /* Unrestricted Motion Vector */ put_bits(&s->pb,1,0); /* SAC: off */ put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */ put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */ put_bits(&s->pb,1,0); /* Deblocking Filter: off */ put_bits(&s->pb,1,0); /* Slice Structured: off */ put_bits(&s->pb,1,0); /* Reference Picture Selection: off */ put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */ put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */ put_bits(&s->pb,1,0); /* Modified Quantization: off */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,3,0); /* Reserved */ put_bits(&s->pb, 3, s->pict_type == P_TYPE); put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */ put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */ put_bits(&s->pb,1,s->no_rounding); /* Rounding Type */ put_bits(&s->pb,2,0); /* Reserved */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ /* This should be here if PLUSPTYPE */ put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */ if (format == 7) { /* Custom Picture Format (CPFMT) */ aspect_to_info(s, s->avctx->sample_aspect_ratio); put_bits(&s->pb,4,s->aspect_ratio_info); put_bits(&s->pb,9,(s->width >> 2) - 1); put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,9,(s->height >> 2)); if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){ put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num); put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den); } } /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */ if (s->umvplus) // put_bits(&s->pb,1,1); /* Limited according tables of Annex D */ put_bits(&s->pb,2,1); /* unlimited */ put_bits(&s->pb, 5, s->qscale); } put_bits(&s->pb, 1, 0); /* no PEI */ if(s->h263_aic){ s->y_dc_scale_table= s->c_dc_scale_table= ff_aic_dc_scale_table; }else{ s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; } } | 16,472 |
0 | static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; APadContext *apad = ctx->priv; if (apad->whole_len) apad->whole_len -= frame->nb_samples; apad->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); return ff_filter_frame(ctx->outputs[0], frame); } | 16,473 |
0 | static int seg_write_header(AVFormatContext *s) { SegmentContext *seg = s->priv_data; AVFormatContext *oc = NULL; AVDictionary *options = NULL; int ret; seg->segment_count = 0; if (!seg->write_header_trailer) seg->individual_header_trailer = 0; if (!!seg->time_str + !!seg->times_str + !!seg->frames_str > 1) { av_log(s, AV_LOG_ERROR, "segment_time, segment_times, and segment_frames options " "are mutually exclusive, select just one of them\n"); return AVERROR(EINVAL); } if (seg->times_str) { if ((ret = parse_times(s, &seg->times, &seg->nb_times, seg->times_str)) < 0) return ret; } else if (seg->frames_str) { if ((ret = parse_frames(s, &seg->frames, &seg->nb_frames, seg->frames_str)) < 0) return ret; } else { /* set default value if not specified */ if (!seg->time_str) seg->time_str = av_strdup("2"); if ((ret = av_parse_time(&seg->time, seg->time_str, 1)) < 0) { av_log(s, AV_LOG_ERROR, "Invalid time duration specification '%s' for segment_time option\n", seg->time_str); return ret; } } if (seg->format_options_str) { ret = av_dict_parse_string(&seg->format_options, seg->format_options_str, "=", ":", 0); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Could not parse format options list '%s'\n", seg->format_options_str); goto fail; } } if (seg->list) { if (seg->list_type == LIST_TYPE_UNDEFINED) { if (av_match_ext(seg->list, "csv" )) seg->list_type = LIST_TYPE_CSV; else if (av_match_ext(seg->list, "ext" )) seg->list_type = LIST_TYPE_EXT; else if (av_match_ext(seg->list, "m3u8")) seg->list_type = LIST_TYPE_M3U8; else if (av_match_ext(seg->list, "ffcat,ffconcat")) seg->list_type = LIST_TYPE_FFCONCAT; else seg->list_type = LIST_TYPE_FLAT; } if ((ret = segment_list_open(s)) < 0) goto fail; } if (seg->list_type == LIST_TYPE_EXT) av_log(s, AV_LOG_WARNING, "'ext' list type option is deprecated in favor of 'csv'\n"); if ((ret = select_reference_stream(s)) < 0) goto fail; av_log(s, AV_LOG_VERBOSE, "Selected stream id:%d type:%s\n", seg->reference_stream_index, av_get_media_type_string(s->streams[seg->reference_stream_index]->codec->codec_type)); seg->oformat = av_guess_format(seg->format, s->filename, NULL); if (!seg->oformat) { ret = AVERROR_MUXER_NOT_FOUND; goto fail; } if (seg->oformat->flags & AVFMT_NOFILE) { av_log(s, AV_LOG_ERROR, "format %s not supported.\n", seg->oformat->name); ret = AVERROR(EINVAL); goto fail; } if ((ret = segment_mux_init(s)) < 0) goto fail; oc = seg->avf; if ((ret = set_segment_filename(s)) < 0) goto fail; if (seg->write_header_trailer) { if ((ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL)) < 0) { av_log(s, AV_LOG_ERROR, "Failed to open segment '%s'\n", oc->filename); goto fail; } } else { if ((ret = open_null_ctx(&oc->pb)) < 0) goto fail; } av_dict_copy(&options, seg->format_options, 0); ret = avformat_write_header(oc, &options); if (av_dict_count(options)) { av_log(s, AV_LOG_ERROR, "Some of the provided format options in '%s' are not recognized\n", seg->format_options_str); } av_dict_free(&options); if (ret < 0) { avio_close(oc->pb); goto fail; } seg->segment_frame_count = 0; if (oc->avoid_negative_ts > 0 && s->avoid_negative_ts < 0) s->avoid_negative_ts = 1; if (!seg->write_header_trailer) { close_null_ctx(oc->pb); if ((ret = avio_open2(&oc->pb, oc->filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL)) < 0) goto fail; } fail: if (ret) { if (seg->list) avio_close(seg->list_pb); if (seg->avf) avformat_free_context(seg->avf); } return ret; } | 16,474 |
0 | static void free_duplicate_context(MpegEncContext *s) { if (s == NULL) return; av_freep(&s->edge_emu_buffer); av_freep(&s->me.scratchpad); s->me.temp = s->rd_scratchpad = s->b_scratchpad = s->obmc_scratchpad = NULL; av_freep(&s->dct_error_sum); av_freep(&s->me.map); av_freep(&s->me.score_map); av_freep(&s->blocks); av_freep(&s->ac_val_base); s->block = NULL; } | 16,475 |
0 | void spapr_tce_reset(sPAPRTCETable *tcet) { size_t table_size = (tcet->window_size >> SPAPR_TCE_PAGE_SHIFT) * sizeof(sPAPRTCE); tcet->bypass = false; memset(tcet->table, 0, table_size); } | 16,476 |
0 | void OPPROTO op_decq_ECX(void) { ECX--; } | 16,477 |
0 | aio_ctx_prepare(GSource *source, gint *timeout) { AioContext *ctx = (AioContext *) source; uint32_t wait = -1; aio_bh_update_timeout(ctx, &wait); if (wait != -1) { *timeout = MIN(*timeout, wait); return wait == 0; } return false; } | 16,478 |
0 | static void pci_unplug_disks(PCIBus *bus) { pci_for_each_device(bus, 0, unplug_disks, NULL); } | 16,479 |
0 | static void dec_div(DisasContext *dc) { unsigned int u; u = dc->imm & 2; LOG_DIS("div\n"); if (!(dc->env->pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK) && !((dc->env->pvr.regs[0] & PVR0_USE_DIV_MASK))) { tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP); t_gen_raise_exception(dc, EXCP_HW_EXCP); } if (u) gen_helper_divu(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]); else gen_helper_divs(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]); if (!dc->rd) tcg_gen_movi_tl(cpu_R[dc->rd], 0); } | 16,480 |
0 | static int nbd_negotiate_handle_export_name(NBDClient *client, uint32_t length, uint16_t myflags, bool no_zeroes, Error **errp) { char name[NBD_MAX_NAME_SIZE + 1]; char buf[8 + 4 + 124] = ""; size_t len; int ret; /* Client sends: [20 .. xx] export name (length bytes) */ trace_nbd_negotiate_handle_export_name(); if (length >= sizeof(name)) { error_setg(errp, "Bad length received"); return -EINVAL; } if (nbd_read(client->ioc, name, length, errp) < 0) { error_prepend(errp, "read failed: "); return -EINVAL; } name[length] = '\0'; trace_nbd_negotiate_handle_export_name_request(name); client->exp = nbd_export_find(name); if (!client->exp) { error_setg(errp, "export not found"); return -EINVAL; } trace_nbd_negotiate_new_style_size_flags(client->exp->size, client->exp->nbdflags | myflags); stq_be_p(buf, client->exp->size); stw_be_p(buf + 8, client->exp->nbdflags | myflags); len = no_zeroes ? 10 : sizeof(buf); ret = nbd_write(client->ioc, buf, len, errp); if (ret < 0) { error_prepend(errp, "write failed: "); return ret; } QTAILQ_INSERT_TAIL(&client->exp->clients, client, next); nbd_export_get(client->exp); return 0; } | 16,481 |
0 | void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) { MapClient *client = qemu_malloc(sizeof(*client)); client->opaque = opaque; client->callback = callback; LIST_INSERT_HEAD(&map_client_list, client, link); return client; } | 16,482 |
0 | vnc_socket_ip_addr_string(QIOChannelSocket *ioc, bool local, Error **errp) { SocketAddress *addr; char *ret; if (local) { addr = qio_channel_socket_get_local_address(ioc, errp); } else { addr = qio_channel_socket_get_remote_address(ioc, errp); } if (!addr) { return NULL; } if (addr->type != SOCKET_ADDRESS_KIND_INET) { error_setg(errp, "Not an inet socket type"); return NULL; } ret = g_strdup_printf("%s;%s", addr->u.inet->host, addr->u.inet->port); qapi_free_SocketAddress(addr); return ret; } | 16,485 |
0 | static void RENAME(extract_even2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count) { dst0 += count; dst1 += count; src0 += 4*count; src1 += 4*count; count= - count; #ifdef PAVGB if(count <= -8) { count += 7; __asm__ volatile( "pcmpeqw %%mm7, %%mm7 \n\t" "psrlw $8, %%mm7 \n\t" "1: \n\t" "movq -28(%1, %0, 4), %%mm0 \n\t" "movq -20(%1, %0, 4), %%mm1 \n\t" "movq -12(%1, %0, 4), %%mm2 \n\t" "movq -4(%1, %0, 4), %%mm3 \n\t" PAVGB" -28(%2, %0, 4), %%mm0 \n\t" PAVGB" -20(%2, %0, 4), %%mm1 \n\t" PAVGB" -12(%2, %0, 4), %%mm2 \n\t" PAVGB" - 4(%2, %0, 4), %%mm3 \n\t" "pand %%mm7, %%mm0 \n\t" "pand %%mm7, %%mm1 \n\t" "pand %%mm7, %%mm2 \n\t" "pand %%mm7, %%mm3 \n\t" "packuswb %%mm1, %%mm0 \n\t" "packuswb %%mm3, %%mm2 \n\t" "movq %%mm0, %%mm1 \n\t" "movq %%mm2, %%mm3 \n\t" "psrlw $8, %%mm0 \n\t" "psrlw $8, %%mm2 \n\t" "pand %%mm7, %%mm1 \n\t" "pand %%mm7, %%mm3 \n\t" "packuswb %%mm2, %%mm0 \n\t" "packuswb %%mm3, %%mm1 \n\t" MOVNTQ" %%mm0,- 7(%4, %0) \n\t" MOVNTQ" %%mm1,- 7(%3, %0) \n\t" "add $8, %0 \n\t" " js 1b \n\t" : "+r"(count) : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1) ); count -= 7; } #endif while(count<0) { dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1; dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1; count++; } } | 16,486 |
0 | void r4k_helper_tlbp(CPUMIPSState *env) { r4k_tlb_t *tlb; target_ulong mask; target_ulong tag; target_ulong VPN; uint8_t ASID; int i; ASID = env->CP0_EntryHi & 0xFF; for (i = 0; i < env->tlb->nb_tlb; i++) { tlb = &env->tlb->mmu.r4k.tlb[i]; /* 1k pages are not supported. */ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); tag = env->CP0_EntryHi & ~mask; VPN = tlb->VPN & ~mask; #if defined(TARGET_MIPS64) tag &= env->SEGMask; #endif /* Check ASID, virtual page number & size */ if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) { /* TLB match */ env->CP0_Index = i; break; } } if (i == env->tlb->nb_tlb) { /* No match. Discard any shadow entries, if any of them match. */ for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { tlb = &env->tlb->mmu.r4k.tlb[i]; /* 1k pages are not supported. */ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); tag = env->CP0_EntryHi & ~mask; VPN = tlb->VPN & ~mask; #if defined(TARGET_MIPS64) tag &= env->SEGMask; #endif /* Check ASID, virtual page number & size */ if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) { r4k_mips_tlb_flush_extra (env, i); break; } } env->CP0_Index |= 0x80000000; } } | 16,488 |
0 | static void cloop_refresh_limits(BlockDriverState *bs, Error **errp) { bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */ } | 16,489 |
0 | static void vmsvga_init(struct vmsvga_state_s *s, DisplayState *ds, uint8_t *vga_ram_base, unsigned long vga_ram_offset, int vga_ram_size) { s->ds = ds; s->vram = vga_ram_base; s->vram_size = vga_ram_size; s->vram_offset = vga_ram_offset; s->scratch_size = SVGA_SCRATCH_SIZE; s->scratch = (uint32_t *) qemu_malloc(s->scratch_size * 4); vmsvga_reset(s); s->console = graphic_console_init(ds, vmsvga_update_display, vmsvga_invalidate_display, vmsvga_screen_dump, vmsvga_text_update, s); #ifdef EMBED_STDVGA vga_common_init((VGAState *) s, ds, vga_ram_base, vga_ram_offset, vga_ram_size); vga_init((VGAState *) s); #endif } | 16,490 |
0 | static void simple_string(void) { int i; struct { const char *encoded; const char *decoded; } test_cases[] = { { "\"hello world\"", "hello world" }, { "\"the quick brown fox jumped over the fence\"", "the quick brown fox jumped over the fence" }, {} }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QSTRING); str = qobject_to_qstring(obj); g_assert(strcmp(qstring_get_str(str), test_cases[i].decoded) == 0); str = qobject_to_json(obj); g_assert(strcmp(qstring_get_str(str), test_cases[i].encoded) == 0); qobject_decref(obj); QDECREF(str); } } | 16,491 |
0 | static uint32_t lan9118_readw(void *opaque, target_phys_addr_t offset) { lan9118_state *s = (lan9118_state *)opaque; uint32_t val; if (s->read_word_prev_offset != (offset & ~0x3)) { /* New offset, reset word counter */ s->read_word_n = 0; s->read_word_prev_offset = offset & ~0x3; } s->read_word_n++; if (s->read_word_n == 1) { s->read_long = lan9118_readl(s, offset & ~3, 4); } else { s->read_word_n = 0; } if (offset & 2) { val = s->read_long >> 16; } else { val = s->read_long & 0xFFFF; } //DPRINTF("Readw reg 0x%02x, val 0x%x\n", (int)offset, val); return val; } | 16,492 |
0 | int32_t helper_fdtoi(CPUSPARCState *env, float64 src) { int32_t ret; clear_float_exceptions(env); ret = float64_to_int32_round_to_zero(src, &env->fp_status); check_ieee_exceptions(env); return ret; } | 16,493 |
0 | static void escaped_string(void) { int i; struct { const char *encoded; const char *decoded; int skip; } test_cases[] = { { "\"\\b\"", "\b" }, { "\"\\f\"", "\f" }, { "\"\\n\"", "\n" }, { "\"\\r\"", "\r" }, { "\"\\t\"", "\t" }, { "\"/\"", "/" }, { "\"\\/\"", "/", .skip = 1 }, { "\"\\\\\"", "\\" }, { "\"\\\"\"", "\"" }, { "\"hello world \\\"embedded string\\\"\"", "hello world \"embedded string\"" }, { "\"hello world\\nwith new line\"", "hello world\nwith new line" }, { "\"single byte utf-8 \\u0020\"", "single byte utf-8 ", .skip = 1 }, { "\"double byte utf-8 \\u00A2\"", "double byte utf-8 \xc2\xa2" }, { "\"triple byte utf-8 \\u20AC\"", "triple byte utf-8 \xe2\x82\xac" }, { "'\\b'", "\b", .skip = 1 }, { "'\\f'", "\f", .skip = 1 }, { "'\\n'", "\n", .skip = 1 }, { "'\\r'", "\r", .skip = 1 }, { "'\\t'", "\t", .skip = 1 }, { "'\\/'", "/", .skip = 1 }, { "'\\\\'", "\\", .skip = 1 }, {} }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QSTRING); str = qobject_to_qstring(obj); g_assert_cmpstr(qstring_get_str(str), ==, test_cases[i].decoded); if (test_cases[i].skip == 0) { str = qobject_to_json(obj); g_assert_cmpstr(qstring_get_str(str), ==, test_cases[i].encoded); qobject_decref(obj); } QDECREF(str); } } | 16,494 |
0 | static size_t v9fs_packunpack(void *addr, struct iovec *sg, int sg_count, size_t offset, size_t size, int pack) { int i = 0; size_t copied = 0; for (i = 0; size && i < sg_count; i++) { size_t len; if (offset >= sg[i].iov_len) { /* skip this sg */ offset -= sg[i].iov_len; continue; } else { len = MIN(sg[i].iov_len - offset, size); if (pack) { memcpy(sg[i].iov_base + offset, addr, len); } else { memcpy(addr, sg[i].iov_base + offset, len); } size -= len; copied += len; addr += len; if (size) { offset = 0; continue; } } } return copied; } | 16,495 |
0 | static void pci_bridge_write_config(PCIDevice *d, uint32_t address, uint32_t val, int len) { PCIBridge *s = (PCIBridge *)d; if (address == 0x19 || (address == 0x18 && len > 1)) { if (address == 0x19) s->bus->bus_num = val & 0xff; else s->bus->bus_num = (val >> 8) & 0xff; #if defined(DEBUG_PCI) printf ("pci-bridge: %s: Assigned bus %d\n", d->name, s->bus->bus_num); #endif } pci_default_write_config(d, address, val, len); } | 16,496 |
0 | static void decode_mb_i(AVSContext *h) { GetBitContext *gb = &h->s.gb; int block, pred_mode_uv; uint8_t top[18]; uint8_t left[18]; uint8_t *d; init_mb(h); /* get intra prediction modes from stream */ for(block=0;block<4;block++) { int nA,nB,predpred; int pos = scan3x3[block]; nA = h->pred_mode_Y[pos-1]; nB = h->pred_mode_Y[pos-3]; if((nA == NOT_AVAIL) || (nB == NOT_AVAIL)) predpred = 2; else predpred = FFMIN(nA,nB); if(get_bits1(gb)) h->pred_mode_Y[pos] = predpred; else { h->pred_mode_Y[pos] = get_bits(gb,2); if(h->pred_mode_Y[pos] >= predpred) h->pred_mode_Y[pos]++; } } pred_mode_uv = get_ue_golomb(gb); if(pred_mode_uv > 6) { av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra chroma pred mode\n"); pred_mode_uv = 0; } /* save pred modes before they get modified */ h->pred_mode_Y[3] = h->pred_mode_Y[5]; h->pred_mode_Y[6] = h->pred_mode_Y[8]; h->top_pred_Y[h->mbx*2+0] = h->pred_mode_Y[7]; h->top_pred_Y[h->mbx*2+1] = h->pred_mode_Y[8]; /* modify pred modes according to availability of neighbour samples */ if(!(h->flags & A_AVAIL)) { modify_pred(left_modifier_l, &h->pred_mode_Y[4] ); modify_pred(left_modifier_l, &h->pred_mode_Y[7] ); modify_pred(left_modifier_c, &pred_mode_uv ); } if(!(h->flags & B_AVAIL)) { modify_pred(top_modifier_l, &h->pred_mode_Y[4] ); modify_pred(top_modifier_l, &h->pred_mode_Y[5] ); modify_pred(top_modifier_c, &pred_mode_uv ); } /* get coded block pattern */ if(h->pic_type == FF_I_TYPE) h->cbp = cbp_tab[get_ue_golomb(gb)][0]; if(h->cbp && !h->qp_fixed) h->qp += get_se_golomb(gb); //qp_delta /* luma intra prediction interleaved with residual decode/transform/add */ for(block=0;block<4;block++) { d = h->cy + h->luma_scan[block]; load_intra_pred_luma(h, top, left, block); h->intra_pred_l[h->pred_mode_Y[scan3x3[block]]] (d, top, left, h->l_stride); if(h->cbp & (1<<block)) decode_residual_block(h,gb,intra_2dvlc,1,h->qp,d,h->l_stride); } /* chroma intra prediction */ /* extend borders by one pixel */ h->left_border_u[9] = h->left_border_u[8]; h->left_border_v[9] = h->left_border_v[8]; h->top_border_u[h->mbx*10+9] = h->top_border_u[h->mbx*10+8]; h->top_border_v[h->mbx*10+9] = h->top_border_v[h->mbx*10+8]; if(h->mbx && h->mby) { h->top_border_u[h->mbx*10] = h->left_border_u[0] = h->topleft_border_u; h->top_border_v[h->mbx*10] = h->left_border_v[0] = h->topleft_border_v; } else { h->left_border_u[0] = h->left_border_u[1]; h->left_border_v[0] = h->left_border_v[1]; h->top_border_u[h->mbx*10] = h->top_border_u[h->mbx*10+1]; h->top_border_v[h->mbx*10] = h->top_border_v[h->mbx*10+1]; } h->intra_pred_c[pred_mode_uv](h->cu, &h->top_border_u[h->mbx*10], h->left_border_u, h->c_stride); h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx*10], h->left_border_v, h->c_stride); decode_residual_chroma(h); filter_mb(h,I_8X8); /* mark motion vectors as intra */ h->mv[MV_FWD_X0] = intra_mv; set_mvs(&h->mv[MV_FWD_X0], BLK_16X16); h->mv[MV_BWD_X0] = intra_mv; set_mvs(&h->mv[MV_BWD_X0], BLK_16X16); if(h->pic_type != FF_B_TYPE) *h->col_type = I_8X8; } | 16,497 |
0 | static void handle_buffered_io(void *opaque) { XenIOState *state = opaque; if (handle_buffered_iopage(state)) { timer_mod(state->buffered_io_timer, BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); } else { timer_del(state->buffered_io_timer); xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port); } } | 16,499 |
0 | static void gt64120_isd_mapping(GT64120State *s) { target_phys_addr_t start = s->regs[GT_ISD] << 21; target_phys_addr_t length = 0x1000; if (s->ISD_length) { memory_region_del_subregion(get_system_memory(), &s->ISD_mem); } check_reserved_space(&start, &length); length = 0x1000; /* Map new address */ DPRINTF("ISD: "TARGET_FMT_plx"@"TARGET_FMT_plx " -> "TARGET_FMT_plx"@"TARGET_FMT_plx"\n", s->ISD_length, s->ISD_start, length, start); s->ISD_start = start; s->ISD_length = length; memory_region_add_subregion(get_system_memory(), s->ISD_start, &s->ISD_mem); } | 16,502 |
0 | void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, AcpiCpuHotplug *gpe_cpu, uint16_t base) { CPUState *cpu; CPU_FOREACH(cpu) { acpi_set_cpu_present_bit(gpe_cpu, cpu, &error_abort); } memory_region_init_io(&gpe_cpu->io, owner, &AcpiCpuHotplug_ops, gpe_cpu, "acpi-cpu-hotplug", ACPI_GPE_PROC_LEN); memory_region_add_subregion(parent, base, &gpe_cpu->io); gpe_cpu->device = owner; } | 16,504 |
0 | void helper_fcmp_eq_FT(CPUSH4State *env, float32 t0, float32 t1) { int relation; set_float_exception_flags(0, &env->fp_status); relation = float32_compare(t0, t1, &env->fp_status); if (unlikely(relation == float_relation_unordered)) { update_fpscr(env, GETPC()); } else { env->sr_t = (relation == float_relation_equal); } } | 16,505 |
0 | static size_t handle_aiocb_rw_linear(struct qemu_paiocb *aiocb, char *buf) { size_t offset = 0; size_t len; while (offset < aiocb->aio_nbytes) { if (aiocb->aio_type == QEMU_PAIO_WRITE) len = pwrite(aiocb->aio_fildes, (const char *)buf + offset, aiocb->aio_nbytes - offset, aiocb->aio_offset + offset); else len = pread(aiocb->aio_fildes, buf + offset, aiocb->aio_nbytes - offset, aiocb->aio_offset + offset); if (len == -1 && errno == EINTR) continue; else if (len == -1) { offset = -errno; break; } else if (len == 0) break; offset += len; } return offset; } | 16,506 |
0 | char *vnc_display_local_addr(const char *id) { VncDisplay *vs = vnc_display_find(id); SocketAddress *addr; char *ret; Error *err = NULL; assert(vs); addr = qio_channel_socket_get_local_address(vs->lsock, &err); if (!addr) { return NULL; } if (addr->type != SOCKET_ADDRESS_KIND_INET) { qapi_free_SocketAddress(addr); return NULL; } ret = g_strdup_printf("%s;%s", addr->u.inet->host, addr->u.inet->port); qapi_free_SocketAddress(addr); return ret; } | 16,509 |
0 | static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) { SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint64_t sector_num; uint32_t nb_sectors; assert(r->req.aiocb == NULL); if (r->req.io_canceled) { scsi_req_cancel_complete(&r->req); goto done; } if (ret < 0) { if (scsi_handle_rw_error(r, -ret, false)) { goto done; } } if (data->count > 0) { sector_num = ldq_be_p(&data->inbuf[0]); nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; if (!check_lba_range(s, sector_num, nb_sectors)) { scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); goto done; } r->req.aiocb = blk_aio_discard(s->qdev.conf.blk, sector_num * (s->qdev.blocksize / 512), nb_sectors * (s->qdev.blocksize / 512), scsi_unmap_complete, data); data->count--; data->inbuf += 16; return; } scsi_req_complete(&r->req, GOOD); done: scsi_req_unref(&r->req); g_free(data); } | 16,512 |
0 | void do_savevm(Monitor *mon, const QDict *qdict) { DriveInfo *dinfo; BlockDriverState *bs, *bs1; QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1; int must_delete, ret; QEMUFile *f; int saved_vm_running; uint32_t vm_state_size; #ifdef _WIN32 struct _timeb tb; #else struct timeval tv; #endif const char *name = qdict_get_try_str(qdict, "name"); bs = get_bs_snapshots(); if (!bs) { monitor_printf(mon, "No block device can accept snapshots\n"); return; } /* ??? Should this occur after vm_stop? */ qemu_aio_flush(); saved_vm_running = vm_running; vm_stop(0); must_delete = 0; if (name) { ret = bdrv_snapshot_find(bs, old_sn, name); if (ret >= 0) { must_delete = 1; } } memset(sn, 0, sizeof(*sn)); if (must_delete) { pstrcpy(sn->name, sizeof(sn->name), old_sn->name); pstrcpy(sn->id_str, sizeof(sn->id_str), old_sn->id_str); } else { if (name) pstrcpy(sn->name, sizeof(sn->name), name); } /* fill auxiliary fields */ #ifdef _WIN32 _ftime(&tb); sn->date_sec = tb.time; sn->date_nsec = tb.millitm * 1000000; #else gettimeofday(&tv, NULL); sn->date_sec = tv.tv_sec; sn->date_nsec = tv.tv_usec * 1000; #endif sn->vm_clock_nsec = qemu_get_clock(vm_clock); /* save the VM state */ f = qemu_fopen_bdrv(bs, 1); if (!f) { monitor_printf(mon, "Could not open VM state file\n"); goto the_end; } ret = qemu_savevm_state(f); vm_state_size = qemu_ftell(f); qemu_fclose(f); if (ret < 0) { monitor_printf(mon, "Error %d while writing VM\n", ret); goto the_end; } /* create the snapshots */ TAILQ_FOREACH(dinfo, &drives, next) { bs1 = dinfo->bdrv; if (bdrv_has_snapshot(bs1)) { if (must_delete) { ret = bdrv_snapshot_delete(bs1, old_sn->id_str); if (ret < 0) { monitor_printf(mon, "Error while deleting snapshot on '%s'\n", bdrv_get_device_name(bs1)); } } /* Write VM state size only to the image that contains the state */ sn->vm_state_size = (bs == bs1 ? vm_state_size : 0); ret = bdrv_snapshot_create(bs1, sn); if (ret < 0) { monitor_printf(mon, "Error while creating snapshot on '%s'\n", bdrv_get_device_name(bs1)); } } } the_end: if (saved_vm_running) vm_start(); } | 16,513 |
0 | static int ast_write_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; ASTMuxContext *ast = s->priv_data; AVCodecContext *enc = s->streams[0]->codec; int size = pkt->size / enc->channels; if (enc->frame_number == 1) ast->fbs = size; ffio_wfourcc(pb, "BLCK"); avio_wb32(pb, size); /* Block size */ /* padding */ avio_wb64(pb, 0); avio_wb64(pb, 0); avio_wb64(pb, 0); avio_write(pb, pkt->data, pkt->size); return 0; } | 16,514 |
0 | static bool object_create_initial(const char *type) { if (g_str_equal(type, "rng-egd")) { return false; } /* * return false for concrete netfilters since * they depend on netdevs already existing */ if (g_str_equal(type, "filter-buffer") || g_str_equal(type, "filter-dump") || g_str_equal(type, "filter-mirror") || g_str_equal(type, "filter-redirector") || g_str_equal(type, "colo-compare") || g_str_equal(type, "filter-rewriter")) { return false; } /* Memory allocation by backends needs to be done * after configure_accelerator() (due to the tcg_enabled() * checks at memory_region_init_*()). * * Also, allocation of large amounts of memory may delay * chardev initialization for too long, and trigger timeouts * on software that waits for a monitor socket to be created * (e.g. libvirt). */ if (g_str_has_prefix(type, "memory-backend-")) { return false; } return true; } | 16,516 |
0 | static int xio3130_upstream_initfn(PCIDevice *d) { PCIBridge* br = DO_UPCAST(PCIBridge, dev, d); PCIEPort *p = DO_UPCAST(PCIEPort, br, br); int rc; int tmp; rc = pci_bridge_initfn(d); if (rc < 0) { return rc; } pcie_port_init_reg(d); pci_config_set_vendor_id(d->config, PCI_VENDOR_ID_TI); pci_config_set_device_id(d->config, PCI_DEVICE_ID_TI_XIO3130U); d->config[PCI_REVISION_ID] = XIO3130_REVISION; rc = msi_init(d, XIO3130_MSI_OFFSET, XIO3130_MSI_NR_VECTOR, XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_64BIT, XIO3130_MSI_SUPPORTED_FLAGS & PCI_MSI_FLAGS_MASKBIT); if (rc < 0) { goto err_bridge; } rc = pci_bridge_ssvid_init(d, XIO3130_SSVID_OFFSET, XIO3130_SSVID_SVID, XIO3130_SSVID_SSID); if (rc < 0) { goto err_bridge; } rc = pcie_cap_init(d, XIO3130_EXP_OFFSET, PCI_EXP_TYPE_UPSTREAM, p->port); if (rc < 0) { goto err_msi; } pcie_cap_flr_init(d); pcie_cap_deverr_init(d); rc = pcie_aer_init(d, XIO3130_AER_OFFSET); if (rc < 0) { goto err; } return 0; err: pcie_cap_exit(d); err_msi: msi_uninit(d); err_bridge: tmp = pci_bridge_exitfn(d); assert(!tmp); return rc; } | 16,517 |
0 | void event_notifier_set_handler(EventNotifier *e, EventNotifierHandler *handler) { iohandler_init(); aio_set_event_notifier(iohandler_ctx, e, false, handler, NULL); } | 16,518 |
0 | static void nic_cleanup(NetClientState *nc) { dp8393xState *s = qemu_get_nic_opaque(nc); memory_region_del_subregion(s->address_space, &s->mmio); memory_region_destroy(&s->mmio); timer_del(s->watchdog); timer_free(s->watchdog); g_free(s); } | 16,519 |
0 | static void test_visitor_in_int_overflow(TestInputVisitorData *data, const void *unused) { int64_t res = 0; Error *err = NULL; Visitor *v; /* this will overflow a Qint/int64, so should be deserialized into * a QFloat/double field instead, leading to an error if we pass it * to visit_type_int. confirm this. */ v = visitor_input_test_init(data, "%f", DBL_MAX); visit_type_int(v, NULL, &res, &err); error_free_or_abort(&err); } | 16,520 |
0 | int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) { struct kvm_msi msi; KVMMSIRoute *route; if (s->direct_msi) { msi.address_lo = (uint32_t)msg.address; msi.address_hi = msg.address >> 32; msi.data = msg.data; msi.flags = 0; memset(msi.pad, 0, sizeof(msi.pad)); return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); } route = kvm_lookup_msi_route(s, msg); if (!route) { int virq; virq = kvm_irqchip_get_virq(s); if (virq < 0) { return virq; } route = g_malloc(sizeof(KVMMSIRoute)); route->kroute.gsi = virq; route->kroute.type = KVM_IRQ_ROUTING_MSI; route->kroute.flags = 0; route->kroute.u.msi.address_lo = (uint32_t)msg.address; route->kroute.u.msi.address_hi = msg.address >> 32; route->kroute.u.msi.data = msg.data; kvm_add_routing_entry(s, &route->kroute); QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route, entry); } assert(route->kroute.type == KVM_IRQ_ROUTING_MSI); return kvm_set_irq(s, route->kroute.gsi, 1); } | 16,522 |
0 | int css_do_tsch(SubchDev *sch, IRB *target_irb) { SCSW *s = &sch->curr_status.scsw; PMCW *p = &sch->curr_status.pmcw; uint16_t stctl; uint16_t fctl; uint16_t actl; IRB irb; int ret; if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { ret = 3; goto out; } stctl = s->ctrl & SCSW_CTRL_MASK_STCTL; fctl = s->ctrl & SCSW_CTRL_MASK_FCTL; actl = s->ctrl & SCSW_CTRL_MASK_ACTL; /* Prepare the irb for the guest. */ memset(&irb, 0, sizeof(IRB)); /* Copy scsw from current status. */ memcpy(&irb.scsw, s, sizeof(SCSW)); if (stctl & SCSW_STCTL_STATUS_PEND) { if (s->cstat & (SCSW_CSTAT_DATA_CHECK | SCSW_CSTAT_CHN_CTRL_CHK | SCSW_CSTAT_INTF_CTRL_CHK)) { irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF; irb.esw[0] = 0x04804000; } else { irb.esw[0] = 0x00800000; } /* If a unit check is pending, copy sense data. */ if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) && (p->chars & PMCW_CHARS_MASK_CSENSE)) { irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data)); irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8); } } /* Store the irb to the guest. */ copy_irb_to_guest(target_irb, &irb, p); /* Clear conditions on subchannel, if applicable. */ if (stctl & SCSW_STCTL_STATUS_PEND) { s->ctrl &= ~SCSW_CTRL_MASK_STCTL; if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) || ((fctl & SCSW_FCTL_HALT_FUNC) && (actl & SCSW_ACTL_SUSP))) { s->ctrl &= ~SCSW_CTRL_MASK_FCTL; } if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) { s->flags &= ~SCSW_FLAGS_MASK_PNO; s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_HALT_PEND | SCSW_ACTL_CLEAR_PEND | SCSW_ACTL_SUSP); } else { if ((actl & SCSW_ACTL_SUSP) && (fctl & SCSW_FCTL_START_FUNC)) { s->flags &= ~SCSW_FLAGS_MASK_PNO; if (fctl & SCSW_FCTL_HALT_FUNC) { s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_HALT_PEND | SCSW_ACTL_CLEAR_PEND | SCSW_ACTL_SUSP); } else { s->ctrl &= ~SCSW_ACTL_RESUME_PEND; } } } /* Clear pending sense data. */ if (p->chars & PMCW_CHARS_MASK_CSENSE) { memset(sch->sense_data, 0 , sizeof(sch->sense_data)); } } ret = ((stctl & SCSW_STCTL_STATUS_PEND) == 0); out: return ret; } | 16,523 |
0 | static void blend_image_rgb_pm(AVFilterContext *ctx, AVFrame *dst, const AVFrame *src, int x, int y) { blend_image_packed_rgb(ctx, dst, src, 0, x, y, 1); } | 16,525 |
0 | static void cpudef_init(void) { #if defined(cpudef_setup) cpudef_setup(); /* parse cpu definitions in target config file */ #endif } | 16,526 |
0 | uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2) { CPU_DoubleU farg1, farg2; farg1.ll = arg1; farg2.ll = arg2; if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) && float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) { /* Magnitude subtraction of infinities */ farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI); } else { if (unlikely(float64_is_signaling_nan(farg1.d) || float64_is_signaling_nan(farg2.d))) { /* sNaN addition */ fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN); } farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status); } return farg1.ll; } | 16,528 |
0 | static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque) { CURLState *s = ((CURLState*)opaque); size_t realsize = size * nmemb; int i; DPRINTF("CURL: Just reading %zd bytes\n", realsize); if (!s || !s->orig_buf) goto read_end; if (s->buf_off >= s->buf_len) { /* buffer full, read nothing */ return 0; } realsize = MIN(realsize, s->buf_len - s->buf_off); memcpy(s->orig_buf + s->buf_off, ptr, realsize); s->buf_off += realsize; for(i=0; i<CURL_NUM_ACB; i++) { CURLAIOCB *acb = s->acb[i]; if (!acb) continue; if ((s->buf_off >= acb->end)) { qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start, acb->end - acb->start); acb->common.cb(acb->common.opaque, 0); qemu_aio_release(acb); s->acb[i] = NULL; } } read_end: return realsize; } | 16,529 |
0 | static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log) { uint64_t features = dev->acked_features; int r; if (enable_log) { features |= 0x1ULL << VHOST_F_LOG_ALL; } r = dev->vhost_ops->vhost_set_features(dev, features); if (r < 0) { VHOST_OPS_DEBUG("vhost_set_features failed"); } return r < 0 ? -errno : 0; } | 16,530 |
0 | static int ccid_card_init(DeviceState *qdev) { CCIDCardState *card = CCID_CARD(qdev); USBDevice *dev = USB_DEVICE(qdev->parent_bus->parent); USBCCIDState *s = USB_CCID_DEV(dev); int ret = 0; if (card->slot != 0) { error_report("Warning: usb-ccid supports one slot, can't add %d", card->slot); return -1; } if (s->card != NULL) { error_report("Warning: usb-ccid card already full, not adding"); return -1; } ret = ccid_card_initfn(card); if (ret == 0) { s->card = card; } return ret; } | 16,531 |
0 | uint32_t HELPER(lpdbr)(CPUS390XState *env, uint32_t f1, uint32_t f2) { float64 v1; float64 v2 = env->fregs[f2].d; v1 = float64_abs(v2); env->fregs[f1].d = v1; return set_cc_nz_f64(v1); } | 16,532 |
0 | static inline int get_segment(CPUState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int type) { target_phys_addr_t sdr, hash, mask, sdr_mask, htab_mask; target_ulong sr, vsid, vsid_mask, pgidx, page_mask; int ds, vsid_sh, sdr_sh, pr, target_page_bits; int ret, ret2; pr = msr_pr; #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { ppc_slb_t *slb; LOG_MMU("Check SLBs\n"); slb = slb_lookup(env, eaddr); if (!slb) { return -5; } vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; page_mask = ~SEGMENT_MASK_256M; target_page_bits = (slb->vsid & SLB_VSID_L) ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS; ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP) : (slb->vsid & SLB_VSID_KS)); ds = 0; ctx->nx = !!(slb->vsid & SLB_VSID_N); ctx->eaddr = eaddr; vsid_mask = 0x00003FFFFFFFFF80ULL; vsid_sh = 7; sdr_sh = 18; sdr_mask = 0x3FF80; } else #endif /* defined(TARGET_PPC64) */ { sr = env->sr[eaddr >> 28]; page_mask = 0x0FFFFFFF; ctx->key = (((sr & 0x20000000) && (pr != 0)) || ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; ds = sr & 0x80000000 ? 1 : 0; ctx->nx = sr & 0x10000000 ? 1 : 0; vsid = sr & 0x00FFFFFF; vsid_mask = 0x01FFFFC0; vsid_sh = 6; sdr_sh = 16; sdr_mask = 0xFFC0; target_page_bits = TARGET_PAGE_BITS; LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx " ir=%d dr=%d pr=%d %d t=%d\n", eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, (int)msr_dr, pr != 0 ? 1 : 0, rw, type); } LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", ctx->key, ds, ctx->nx, vsid); ret = -1; if (!ds) { /* Check if instruction fetch is allowed, if needed */ if (type != ACCESS_CODE || ctx->nx == 0) { /* Page address translation */ /* Primary table address */ sdr = env->sdr1; pgidx = (eaddr & page_mask) >> target_page_bits; #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { htab_mask = 0x0FFFFFFF >> (28 - (sdr & 0x1F)); /* XXX: this is false for 1 TB segments */ hash = ((vsid ^ pgidx) << vsid_sh) & vsid_mask; } else #endif { htab_mask = sdr & 0x000001FF; hash = ((vsid ^ pgidx) << vsid_sh) & vsid_mask; } mask = (htab_mask << sdr_sh) | sdr_mask; LOG_MMU("sdr " TARGET_FMT_plx " sh %d hash " TARGET_FMT_plx " mask " TARGET_FMT_plx " " TARGET_FMT_lx "\n", sdr, sdr_sh, hash, mask, page_mask); ctx->pg_addr[0] = get_pgaddr(sdr, sdr_sh, hash, mask); /* Secondary table address */ hash = (~hash) & vsid_mask; LOG_MMU("sdr " TARGET_FMT_plx " sh %d hash " TARGET_FMT_plx " mask " TARGET_FMT_plx "\n", sdr, sdr_sh, hash, mask); ctx->pg_addr[1] = get_pgaddr(sdr, sdr_sh, hash, mask); #if defined(TARGET_PPC64) if (env->mmu_model & POWERPC_MMU_64) { /* Only 5 bits of the page index are used in the AVPN */ if (target_page_bits > 23) { ctx->ptem = (vsid << 12) | ((pgidx << (target_page_bits - 16)) & 0xF80); } else { ctx->ptem = (vsid << 12) | ((pgidx >> 4) & 0x0F80); } } else #endif { ctx->ptem = (vsid << 7) | (pgidx >> 10); } /* Initialize real address with an invalid value */ ctx->raddr = (target_phys_addr_t)-1ULL; if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx || env->mmu_model == POWERPC_MMU_SOFT_74xx)) { /* Software TLB search */ ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); } else { LOG_MMU("0 sdr1=" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " " "api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx " pg_addr=" TARGET_FMT_plx "\n", sdr, vsid, pgidx, hash, ctx->pg_addr[0]); /* Primary table lookup */ ret = find_pte(env, ctx, 0, rw, type, target_page_bits); if (ret < 0) { /* Secondary table lookup */ if (eaddr != 0xEFFFFFFF) LOG_MMU("1 sdr1=" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " " "api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx " pg_addr=" TARGET_FMT_plx "\n", sdr, vsid, pgidx, hash, ctx->pg_addr[1]); ret2 = find_pte(env, ctx, 1, rw, type, target_page_bits); if (ret2 != -1) ret = ret2; } } #if defined (DUMP_PAGE_TABLES) if (qemu_log_enabled()) { target_phys_addr_t curaddr; uint32_t a0, a1, a2, a3; qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx "\n", sdr, mask + 0x80); for (curaddr = sdr; curaddr < (sdr + mask + 0x80); curaddr += 16) { a0 = ldl_phys(curaddr); a1 = ldl_phys(curaddr + 4); a2 = ldl_phys(curaddr + 8); a3 = ldl_phys(curaddr + 12); if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", curaddr, a0, a1, a2, a3); } } } #endif } else { LOG_MMU("No access allowed\n"); ret = -3; } } else { LOG_MMU("direct store...\n"); /* Direct-store segment : absolutely *BUGGY* for now */ switch (type) { case ACCESS_INT: /* Integer load/store : only access allowed */ break; case ACCESS_CODE: /* No code fetch is allowed in direct-store areas */ return -4; case ACCESS_FLOAT: /* Floating point load/store */ return -4; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ return -4; case ACCESS_CACHE: /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ /* Should make the instruction do no-op. * As it already do no-op, it's quite easy :-) */ ctx->raddr = eaddr; return 0; case ACCESS_EXT: /* eciwx or ecowx */ return -4; default: qemu_log("ERROR: instruction should not need " "address translation\n"); return -4; } if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { ctx->raddr = eaddr; ret = 2; } else { ret = -2; } } return ret; } | 16,534 |
0 | int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, struct virtio_gpu_ctrl_command *cmd, struct iovec **iov) { struct virtio_gpu_mem_entry *ents; size_t esize, s; int i; if (ab->nr_entries > 16384) { qemu_log_mask(LOG_GUEST_ERROR, "%s: nr_entries is too big (%d > 1024)\n", __func__, ab->nr_entries); return -1; } esize = sizeof(*ents) * ab->nr_entries; ents = g_malloc(esize); s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, sizeof(*ab), ents, esize); if (s != esize) { qemu_log_mask(LOG_GUEST_ERROR, "%s: command data size incorrect %zu vs %zu\n", __func__, s, esize); g_free(ents); return -1; } *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); for (i = 0; i < ab->nr_entries; i++) { hwaddr len = ents[i].length; (*iov)[i].iov_len = ents[i].length; (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); if (!(*iov)[i].iov_base || len != ents[i].length) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" " resource %d element %d\n", __func__, ab->resource_id, i); virtio_gpu_cleanup_mapping_iov(*iov, i); g_free(ents); g_free(*iov); *iov = NULL; return -1; } } g_free(ents); return 0; } | 16,535 |
0 | static void l2cap_channel_close(struct l2cap_instance_s *l2cap, int cid, int source_cid) { struct l2cap_chan_s *ch = NULL; /* According to Volume 3, section 6.1.1, pg 1048 of BT Core V2.0, a * connection in CLOSED state still responds with a L2CAP_DisconnectRsp * message on an L2CAP_DisconnectReq event. */ if (unlikely(cid < L2CAP_CID_ALLOC)) { l2cap_command_reject_cid(l2cap, l2cap->last_id, L2CAP_REJ_CID_INVAL, cid, source_cid); return; } if (likely(cid >= L2CAP_CID_ALLOC && cid < L2CAP_CID_MAX)) ch = l2cap->cid[cid]; if (likely(ch)) { if (ch->remote_cid != source_cid) { fprintf(stderr, "%s: Ignoring a Disconnection Request with the " "invalid SCID %04x.\n", __func__, source_cid); return; } l2cap->cid[cid] = NULL; ch->params.close(ch->params.opaque); g_free(ch); } l2cap_disconnection_response(l2cap, cid, source_cid); } | 16,537 |
0 | void bdrv_dirty_iter_init(BdrvDirtyBitmap *bitmap, HBitmapIter *hbi) { hbitmap_iter_init(hbi, bitmap->bitmap, 0); } | 16,538 |
0 | static void lz_unpack(unsigned char *src, unsigned char *dest) { unsigned char *s; unsigned char *d; unsigned char queue[QUEUE_SIZE]; unsigned int qpos; unsigned int dataleft; unsigned int chainofs; unsigned int chainlen; unsigned int speclen; unsigned char tag; unsigned int i, j; s = src; d = dest; dataleft = LE_32(s); s += 4; memset(queue, QUEUE_SIZE, 0x20); if (LE_32(s) == 0x56781234) { s += 4; qpos = 0x111; speclen = 0xF + 3; } else { qpos = 0xFEE; speclen = 100; /* no speclen */ } while (dataleft > 0) { tag = *s++; if ((tag == 0xFF) && (dataleft > 8)) { for (i = 0; i < 8; i++) { queue[qpos++] = *d++ = *s++; qpos &= QUEUE_MASK; } dataleft -= 8; } else { for (i = 0; i < 8; i++) { if (dataleft == 0) break; if (tag & 0x01) { queue[qpos++] = *d++ = *s++; qpos &= QUEUE_MASK; dataleft--; } else { chainofs = *s++; chainofs |= ((*s & 0xF0) << 4); chainlen = (*s++ & 0x0F) + 3; if (chainlen == speclen) chainlen = *s++ + 0xF + 3; for (j = 0; j < chainlen; j++) { *d = queue[chainofs++ & QUEUE_MASK]; queue[qpos++] = *d++; qpos &= QUEUE_MASK; } dataleft -= chainlen; } tag >>= 1; } } } } | 16,539 |
0 | int qdev_prop_set_drive(DeviceState *dev, const char *name, BlockDriverState *value) { Error *err = NULL; const char *bdrv_name = value ? bdrv_get_device_name(value) : ""; object_property_set_str(OBJECT(dev), bdrv_name, name, &err); if (err) { qerror_report_err(err); error_free(err); return -1; } return 0; } | 16,540 |
0 | bool virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) { VirtIOSCSICommon *vs = &s->parent_obj; SCSIDevice *d; int rc; rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size, sizeof(VirtIOSCSICmdResp) + vs->sense_size); if (rc < 0) { if (rc == -ENOTSUP) { virtio_scsi_fail_cmd_req(req); } else { virtio_scsi_bad_req(); } return false; } d = virtio_scsi_device_find(s, req->req.cmd.lun); if (!d) { req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; virtio_scsi_complete_cmd_req(req); return false; } if (s->dataplane_started && bdrv_get_aio_context(d->conf.bs) != s->ctx) { aio_context_acquire(s->ctx); bdrv_set_aio_context(d->conf.bs, s->ctx); aio_context_release(s->ctx); } req->sreq = scsi_req_new(d, req->req.cmd.tag, virtio_scsi_get_lun(req->req.cmd.lun), req->req.cdb, req); if (req->sreq->cmd.mode != SCSI_XFER_NONE && (req->sreq->cmd.mode != req->mode || req->sreq->cmd.xfer > req->qsgl.size)) { req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN; virtio_scsi_complete_cmd_req(req); return false; } scsi_req_ref(req->sreq); bdrv_io_plug(d->conf.bs); return true; } | 16,543 |
0 | static void dec_divu(DisasContext *dc) { int l1; LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); if (!(dc->features & LM32_FEATURE_DIVIDE)) { qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n"); t_gen_illegal_insn(dc); return; } l1 = gen_new_label(); tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1); tcg_gen_movi_tl(cpu_pc, dc->pc); t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO); gen_set_label(l1); tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); } | 16,544 |
0 | static void do_acpitable_option(const char *optarg) { if (acpi_table_add(optarg) < 0) { fprintf(stderr, "Wrong acpi table provided\n"); exit(1); } } | 16,545 |
0 | static void mv88w8618_audio_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { mv88w8618_audio_state *s = opaque; switch (offset) { case MP_AUDIO_PLAYBACK_MODE: if (value & MP_AUDIO_PLAYBACK_EN && !(s->playback_mode & MP_AUDIO_PLAYBACK_EN)) { s->status = 0; s->last_free = 0; s->play_pos = 0; } s->playback_mode = value; mv88w8618_audio_clock_update(s); break; case MP_AUDIO_CLOCK_DIV: s->clock_div = value; s->last_free = 0; s->play_pos = 0; mv88w8618_audio_clock_update(s); break; case MP_AUDIO_IRQ_STATUS: s->status &= ~value; break; case MP_AUDIO_IRQ_ENABLE: s->irq_enable = value; if (s->status & s->irq_enable) { qemu_irq_raise(s->irq); } break; case MP_AUDIO_TX_START_LO: s->phys_buf = (s->phys_buf & 0xFFFF0000) | (value & 0xFFFF); s->target_buffer = s->phys_buf; s->play_pos = 0; s->last_free = 0; break; case MP_AUDIO_TX_THRESHOLD: s->threshold = (value + 1) * 4; break; case MP_AUDIO_TX_START_HI: s->phys_buf = (s->phys_buf & 0xFFFF) | (value << 16); s->target_buffer = s->phys_buf; s->play_pos = 0; s->last_free = 0; break; } } | 16,546 |
0 | void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32) { CPUX86State *saved_env; saved_env = env; env = s; helper_frstor(ptr, data32); env = saved_env; } | 16,547 |
0 | static AHCIQState *ahci_boot_and_enable(void) { AHCIQState *ahci; ahci = ahci_boot(); ahci_pci_enable(ahci); ahci_hba_enable(ahci); return ahci; } | 16,548 |
0 | static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt, NetClientState *nc) { struct iovec fragment[NET_MAX_FRAG_SG_LIST]; size_t fragment_len = 0; bool more_frags = false; /* some pointers for shorter code */ void *l2_iov_base, *l3_iov_base; size_t l2_iov_len, l3_iov_len; int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx; size_t src_offset = 0; size_t fragment_offset = 0; l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base; l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len; l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base; l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len; /* Copy headers */ fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base; fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len; fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base; fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len; /* Put as much data as possible and send */ do { fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset, fragment, &dst_idx); more_frags = (fragment_offset + fragment_len < pkt->payload_len); eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base, l3_iov_len, fragment_len, fragment_offset, more_frags); eth_fix_ip4_checksum(l3_iov_base, l3_iov_len); net_tx_pkt_sendv(pkt, nc, fragment, dst_idx); fragment_offset += fragment_len; } while (more_frags); return true; } | 16,549 |
0 | static int nvdec_mpeg12_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) { NVDECContext *ctx = avctx->internal->hwaccel_priv_data; void *tmp; tmp = av_fast_realloc(ctx->slice_offsets, &ctx->slice_offsets_allocated, (ctx->nb_slices + 1) * sizeof(*ctx->slice_offsets)); if (!tmp) return AVERROR(ENOMEM); ctx->slice_offsets = tmp; if (!ctx->bitstream) ctx->bitstream = (uint8_t*)buffer; ctx->slice_offsets[ctx->nb_slices] = buffer - ctx->bitstream; ctx->bitstream_len += size; ctx->nb_slices++; return 0; } | 16,550 |
0 | void stq_phys(target_phys_addr_t addr, uint64_t val) { val = tswap64(val); cpu_physical_memory_write(addr, &val, 8); } | 16,551 |
0 | static int do_load_save_vmstate(BDRVSheepdogState *s, uint8_t *data, int64_t pos, int size, int load) { bool create; int fd, ret = 0, remaining = size; unsigned int data_len; uint64_t vmstate_oid; uint32_t vdi_index; uint64_t offset; fd = connect_to_sdog(s->addr, s->port); if (fd < 0) { return fd; } while (remaining) { vdi_index = pos / SD_DATA_OBJ_SIZE; offset = pos % SD_DATA_OBJ_SIZE; data_len = MIN(remaining, SD_DATA_OBJ_SIZE - offset); vmstate_oid = vid_to_vmstate_oid(s->inode.vdi_id, vdi_index); create = (offset == 0); if (load) { ret = read_object(fd, (char *)data, vmstate_oid, s->inode.nr_copies, data_len, offset, s->cache_enabled); } else { ret = write_object(fd, (char *)data, vmstate_oid, s->inode.nr_copies, data_len, offset, create, s->cache_enabled); } if (ret < 0) { error_report("failed to save vmstate %s", strerror(errno)); goto cleanup; } pos += data_len; data += data_len; remaining -= data_len; } ret = size; cleanup: closesocket(fd); return ret; } | 16,552 |
0 | int ioinst_handle_stsch(CPUS390XState *env, uint64_t reg1, uint32_t ipb) { int cssid, ssid, schid, m; SubchDev *sch; uint64_t addr; int cc; SCHIB *schib; hwaddr len = sizeof(*schib); if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(env, PGM_OPERAND, 2); return -EIO; } trace_ioinst_sch_id("stsch", cssid, ssid, schid); addr = decode_basedisp_s(env, ipb); if (addr & 3) { program_interrupt(env, PGM_SPECIFICATION, 2); return -EIO; } schib = s390_cpu_physical_memory_map(env, addr, &len, 1); if (!schib || len != sizeof(*schib)) { program_interrupt(env, PGM_ADDRESSING, 2); cc = -EIO; goto out; } sch = css_find_subch(m, cssid, ssid, schid); if (sch) { if (css_subch_visible(sch)) { css_do_stsch(sch, schib); cc = 0; } else { /* Indicate no more subchannels in this css/ss */ cc = 3; } } else { if (css_schid_final(m, cssid, ssid, schid)) { cc = 3; /* No more subchannels in this css/ss */ } else { /* Store an empty schib. */ memset(schib, 0, sizeof(*schib)); cc = 0; } } out: s390_cpu_physical_memory_unmap(env, schib, len, 1); return cc; } | 16,553 |
0 | int kvm_on_sigbus(int code, void *addr) { #if defined(KVM_CAP_MCE) if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) { uint64_t status; void *vaddr; ram_addr_t ram_addr; target_phys_addr_t paddr; /* Hope we are lucky for AO MCE */ vaddr = addr; if (qemu_ram_addr_from_host(vaddr, &ram_addr) || !kvm_physical_memory_addr_from_ram(first_cpu->kvm_state, ram_addr, &paddr)) { fprintf(stderr, "Hardware memory error for memory used by " "QEMU itself instead of guest system!: %p\n", addr); return 0; } status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S | 0xc0; kvm_inject_x86_mce(first_cpu, 9, status, MCG_STATUS_MCIP | MCG_STATUS_RIPV, paddr, (MCM_ADDR_PHYS << 6) | 0xc, ABORT_ON_ERROR); kvm_mce_broadcast_rest(first_cpu); } else #endif { if (code == BUS_MCEERR_AO) { return 0; } else if (code == BUS_MCEERR_AR) { hardware_memory_error(); } else { return 1; } } return 0; } | 16,554 |
0 | void bdrv_get_backing_filename(BlockDriverState *bs, char *filename, int filename_size) { pstrcpy(filename, filename_size, bs->backing_file); } | 16,555 |
0 | static int kvm_get_fpu(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_fpu fpu; int i, ret; ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu); if (ret < 0) { return ret; } env->fpstt = (fpu.fsw >> 11) & 7; env->fpus = fpu.fsw; env->fpuc = fpu.fcw; env->fpop = fpu.last_opcode; env->fpip = fpu.last_ip; env->fpdp = fpu.last_dp; for (i = 0; i < 8; ++i) { env->fptags[i] = !((fpu.ftwx >> i) & 1); } memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); memcpy(env->xmm_regs, fpu.xmm, sizeof env->xmm_regs); env->mxcsr = fpu.mxcsr; return 0; } | 16,556 |
0 | static int ehci_fill_queue(EHCIPacket *p) { EHCIQueue *q = p->queue; EHCIqtd qtd = p->qtd; uint32_t qtdaddr; for (;;) { if (NLPTR_TBIT(qtd.altnext) == 0) { break; } if (NLPTR_TBIT(qtd.next) != 0) { break; } qtdaddr = qtd.next; get_dwords(q->ehci, NLPTR_GET(qtdaddr), (uint32_t *) &qtd, sizeof(EHCIqtd) >> 2); ehci_trace_qtd(q, NLPTR_GET(qtdaddr), &qtd); if (!(qtd.token & QTD_TOKEN_ACTIVE)) { break; } p = ehci_alloc_packet(q); p->qtdaddr = qtdaddr; p->qtd = qtd; p->usb_status = ehci_execute(p, "queue"); if (p->usb_status == USB_RET_PROCERR) { break; } assert(p->usb_status == USB_RET_ASYNC); p->async = EHCI_ASYNC_INFLIGHT; } return p->usb_status; } | 16,557 |
0 | int64_t qemu_file_get_rate_limit(QEMUFile *f) { if (f->ops->get_rate_limit) return f->ops->get_rate_limit(f->opaque); return 0; } | 16,559 |
0 | static int qcow_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sectors) { BDRVQcowState *s = bs->opaque; int ret, index_in_cluster, n, n1; uint64_t cluster_offset; while (nb_sectors > 0) { n = nb_sectors; cluster_offset = qcow2_get_cluster_offset(bs, sector_num << 9, &n); index_in_cluster = sector_num & (s->cluster_sectors - 1); if (!cluster_offset) { if (bs->backing_hd) { /* read from the base image */ n1 = qcow2_backing_read1(bs->backing_hd, sector_num, buf, n); if (n1 > 0) { BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING); ret = bdrv_read(bs->backing_hd, sector_num, buf, n1); if (ret < 0) return -1; } } else { memset(buf, 0, 512 * n); } } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { if (qcow2_decompress_cluster(bs, cluster_offset) < 0) return -1; memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n); } else { BLKDBG_EVENT(bs->file, BLKDBG_READ); ret = bdrv_pread(bs->file, cluster_offset + index_in_cluster * 512, buf, n * 512); if (ret != n * 512) return -1; if (s->crypt_method) { qcow2_encrypt_sectors(s, sector_num, buf, buf, n, 0, &s->aes_decrypt_key); } } nb_sectors -= n; sector_num += n; buf += n * 512; } return 0; } | 16,560 |
0 | yuv2rgb48_1_c_template(SwsContext *c, const int32_t *buf0, const int32_t *ubuf[2], const int32_t *vbuf[2], const int32_t *abuf0, uint16_t *dest, int dstW, int uvalpha, int y, enum AVPixelFormat target) { const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0]; int i; if (uvalpha < 2048) { for (i = 0; i < ((dstW + 1) >> 1); i++) { int Y1 = (buf0[i * 2] ) >> 2; int Y2 = (buf0[i * 2 + 1]) >> 2; int U = (ubuf0[i] + (-128 << 11)) >> 2; int V = (vbuf0[i] + (-128 << 11)) >> 2; int R, G, B; Y1 -= c->yuv2rgb_y_offset; Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; Y1 += 1 << 13; Y2 += 1 << 13; R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); dest += 6; } } else { const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1]; for (i = 0; i < ((dstW + 1) >> 1); i++) { int Y1 = (buf0[i * 2] ) >> 2; int Y2 = (buf0[i * 2 + 1]) >> 2; int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3; int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3; int R, G, B; Y1 -= c->yuv2rgb_y_offset; Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; Y1 += 1 << 13; Y2 += 1 << 13; R = V * c->yuv2rgb_v2r_coeff; G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); dest += 6; } } } | 16,561 |
0 | void ff_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { ff_start_frame(link->dst->outputs[0], picref); } | 16,562 |
0 | void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1) { int cssid, ssid, schid, m; SubchDev *sch; int ret = -ENODEV; int cc; if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) { program_interrupt(&cpu->env, PGM_OPERAND, 2); return; } trace_ioinst_sch_id("hsch", cssid, ssid, schid); sch = css_find_subch(m, cssid, ssid, schid); if (sch && css_subch_visible(sch)) { ret = css_do_hsch(sch); } switch (ret) { case -ENODEV: cc = 3; break; case -EBUSY: cc = 2; break; case 0: cc = 0; break; default: cc = 1; break; } setcc(cpu, cc); } | 16,563 |
0 | static uint64_t memory_region_dispatch_read1(MemoryRegion *mr, hwaddr addr, unsigned size) { uint64_t data = 0; if (!memory_region_access_valid(mr, addr, size, false)) { return -1U; /* FIXME: better signalling */ } if (!mr->ops->read) { return mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); } /* FIXME: support unaligned access */ access_with_adjusted_size(addr, &data, size, mr->ops->impl.min_access_size, mr->ops->impl.max_access_size, memory_region_read_accessor, mr); return data; } | 16,564 |
0 | void virtio_queue_set_notification(VirtQueue *vq, int enable) { vq->notification = enable; if (virtio_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) { vring_set_avail_event(vq, vring_avail_idx(vq)); } else if (enable) { vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); } else { vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); } if (enable) { /* Expose avail event/used flags before caller checks the avail idx. */ smp_mb(); } } | 16,565 |
0 | static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask) { unsigned offset = vector * PCI_MSIX_ENTRY_SIZE; uint32_t *data = (uint32_t *)&dev->msix_table[offset + PCI_MSIX_ENTRY_DATA]; /* MSIs on Xen can be remapped into pirqs. In those cases, masking * and unmasking go through the PV evtchn path. */ if (xen_is_pirq_msi(*data)) { return false; } return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] & PCI_MSIX_ENTRY_CTRL_MASKBIT; } | 16,567 |
0 | static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, int type) { LinuxAioState *s = laiocb->ctx; struct iocb *iocbs = &laiocb->iocb; QEMUIOVector *qiov = laiocb->qiov; switch (type) { case QEMU_AIO_WRITE: io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset); break; case QEMU_AIO_READ: io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset); break; /* Currently Linux kernel does not support other operations */ default: fprintf(stderr, "%s: invalid AIO request type 0x%x.\n", __func__, type); return -EIO; } io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e)); QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next); s->io_q.n++; if (!s->io_q.blocked && (!s->io_q.plugged || s->io_q.n >= MAX_QUEUED_IO)) { ioq_submit(s); } return 0; } | 16,568 |
0 | static void av_always_inline filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, const int16_t bS[7], int bsi, int qp, int intra ) { const int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8); int index_a = qp - qp_bd_offset + h->slice_alpha_c0_offset; int alpha = alpha_table[index_a]; int beta = beta_table[qp - qp_bd_offset + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 || !intra ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0*bsi]] + 1; tc[1] = tc0_table[index_a][bS[1*bsi]] + 1; tc[2] = tc0_table[index_a][bS[2*bsi]] + 1; tc[3] = tc0_table[index_a][bS[3*bsi]] + 1; h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta); } } | 16,571 |
0 | static av_cold int encode_init(AVCodecContext* avc_context) { th_info t_info; th_comment t_comment; ogg_packet o_packet; unsigned int offset; TheoraContext *h = avc_context->priv_data; uint32_t gop_size = avc_context->gop_size; /* Set up the theora_info struct */ th_info_init(&t_info); t_info.frame_width = FFALIGN(avc_context->width, 16); t_info.frame_height = FFALIGN(avc_context->height, 16); t_info.pic_width = avc_context->width; t_info.pic_height = avc_context->height; t_info.pic_x = 0; t_info.pic_y = 0; /* Swap numerator and denominator as time_base in AVCodecContext gives the * time period between frames, but theora_info needs the framerate. */ t_info.fps_numerator = avc_context->time_base.den; t_info.fps_denominator = avc_context->time_base.num; if (avc_context->sample_aspect_ratio.num) { t_info.aspect_numerator = avc_context->sample_aspect_ratio.num; t_info.aspect_denominator = avc_context->sample_aspect_ratio.den; } else { t_info.aspect_numerator = 1; t_info.aspect_denominator = 1; } if (avc_context->color_primaries == AVCOL_PRI_BT470M) t_info.colorspace = TH_CS_ITU_REC_470M; else if (avc_context->color_primaries == AVCOL_PRI_BT470BG) t_info.colorspace = TH_CS_ITU_REC_470BG; else t_info.colorspace = TH_CS_UNSPECIFIED; if (avc_context->pix_fmt == AV_PIX_FMT_YUV420P) t_info.pixel_fmt = TH_PF_420; else if (avc_context->pix_fmt == AV_PIX_FMT_YUV422P) t_info.pixel_fmt = TH_PF_422; else if (avc_context->pix_fmt == AV_PIX_FMT_YUV444P) t_info.pixel_fmt = TH_PF_444; else { av_log(avc_context, AV_LOG_ERROR, "Unsupported pix_fmt\n"); return -1; } av_pix_fmt_get_chroma_sub_sample(avc_context->pix_fmt, &h->uv_hshift, &h->uv_vshift); if (avc_context->flags & CODEC_FLAG_QSCALE) { /* to be constant with the libvorbis implementation, clip global_quality to 0 - 10 Theora accepts a quality parameter p, which is: * 0 <= p <=63 * an int value */ t_info.quality = av_clipf(avc_context->global_quality / (float)FF_QP2LAMBDA, 0, 10) * 6.3; t_info.target_bitrate = 0; } else { t_info.target_bitrate = avc_context->bit_rate; t_info.quality = 0; } /* Now initialise libtheora */ h->t_state = th_encode_alloc(&t_info); if (!h->t_state) { av_log(avc_context, AV_LOG_ERROR, "theora_encode_init failed\n"); return -1; } h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1; /* Clear up theora_info struct */ th_info_clear(&t_info); if (th_encode_ctl(h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE, &gop_size, sizeof(gop_size))) { av_log(avc_context, AV_LOG_ERROR, "Error setting GOP size\n"); return -1; } // need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers if (avc_context->flags & CODEC_FLAG_PASS1) { if (get_stats(avc_context, 0)) return -1; } else if (avc_context->flags & CODEC_FLAG_PASS2) { if (submit_stats(avc_context)) return -1; } /* Output first header packet consisting of theora header, comment, and tables. Each one is prefixed with a 16bit size, then they are concatenated together into libavcodec's extradata. */ offset = 0; /* Headers */ th_comment_init(&t_comment); while (th_encode_flushheader(h->t_state, &t_comment, &o_packet)) if (concatenate_packet(&offset, avc_context, &o_packet)) return -1; th_comment_clear(&t_comment); /* Set up the output AVFrame */ avc_context->coded_frame = av_frame_alloc(); if (!avc_context->coded_frame) return AVERROR(ENOMEM); return 0; } | 16,572 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.