project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
qemu
2e14072f9e859272c7b94b8e189bd30bb4954aa1
1
static void core_rtas_register_types(void) { spapr_rtas_register(RTAS_DISPLAY_CHARACTER, "display-character", rtas_display_character); spapr_rtas_register(RTAS_GET_TIME_OF_DAY, "get-time-of-day", rtas_get_time_of_day); spapr_rtas_register(RTAS_SET_TIME_OF_DAY, "set-time-of-day", rtas_set_time_of_day); spapr_rtas_register(RTAS_POWER_OFF, "power-off", rtas_power_off); spapr_rtas_register(RTAS_SYSTEM_REBOOT, "system-reboot", rtas_system_reboot); spapr_rtas_register(RTAS_QUERY_CPU_STOPPED_STATE, "query-cpu-stopped-state", rtas_query_cpu_stopped_state); spapr_rtas_register(RTAS_START_CPU, "start-cpu", rtas_start_cpu); spapr_rtas_register(RTAS_STOP_SELF, "stop-self", rtas_stop_self); spapr_rtas_register(RTAS_IBM_GET_SYSTEM_PARAMETER, "ibm,get-system-parameter", rtas_ibm_get_system_parameter); spapr_rtas_register(RTAS_IBM_SET_SYSTEM_PARAMETER, "ibm,set-system-parameter", rtas_ibm_set_system_parameter); }
19,744
qemu
72700d7e733948fa7fbb735ccdf2209931c88476
1
static void spapr_phb_vfio_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass); dc->props = spapr_phb_vfio_properties; spc->finish_realize = spapr_phb_vfio_finish_realize; }
19,745
FFmpeg
7df3b426bbfbd7efd9a0f56393e3cc78413b0869
1
static void mxf_write_preface(AVFormatContext *s) { MXFContext *mxf = s->priv_data; AVIOContext *pb = s->pb; mxf_write_metadata_key(pb, 0x012f00); PRINT_KEY(s, "preface key", pb->buf_ptr - 16); klv_encode_ber_length(pb, 130 + 16 * mxf->essence_container_count); // write preface set uid mxf_write_local_tag(pb, 16, 0x3C0A); mxf_write_uuid(pb, Preface, 0); PRINT_KEY(s, "preface uid", pb->buf_ptr - 16); // last modified date mxf_write_local_tag(pb, 8, 0x3B02); avio_wb64(pb, mxf->timestamp); // write version mxf_write_local_tag(pb, 2, 0x3B05); avio_wb16(pb, 258); // v1.2 // write identification_refs mxf_write_local_tag(pb, 16 + 8, 0x3B06); mxf_write_refs_count(pb, 1); mxf_write_uuid(pb, Identification, 0); // write content_storage_refs mxf_write_local_tag(pb, 16, 0x3B03); mxf_write_uuid(pb, ContentStorage, 0); // operational pattern mxf_write_local_tag(pb, 16, 0x3B09); avio_write(pb, op1a_ul, 16); // write essence_container_refs mxf_write_local_tag(pb, 8 + 16 * mxf->essence_container_count, 0x3B0A); mxf_write_essence_container_refs(s); // write dm_scheme_refs mxf_write_local_tag(pb, 8, 0x3B0B); avio_wb64(pb, 0); }
19,746
qemu
ff1685a3338072be9a94ab8a8f529c12e4265224
1
static void add_machine_test_cases(void) { const char *arch = qtest_get_arch(); QDict *response, *minfo; QList *list; const QListEntry *p; QObject *qobj; QString *qstr; const char *mname, *path; qtest_start("-machine none"); response = qmp("{ 'execute': 'query-machines' }"); g_assert(response); list = qdict_get_qlist(response, "return"); g_assert(list); for (p = qlist_first(list); p; p = qlist_next(p)) { minfo = qobject_to_qdict(qlist_entry_obj(p)); g_assert(minfo); qobj = qdict_get(minfo, "name"); g_assert(qobj); qstr = qobject_to_qstring(qobj); g_assert(qstr); mname = qstring_get_str(qstr); if (!is_blacklisted(arch, mname)) { path = g_strdup_printf("qom/%s", mname); qtest_add_data_func(path, g_strdup(mname), test_machine); } } qtest_end(); QDECREF(response); }
19,747
FFmpeg
f19af812a32c1398d48c3550d11dbc6aafbb2bfc
1
static int adx_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf0, int buf_size) { ADXContext *c = avctx->priv_data; short *samples = data; const uint8_t *buf = buf0; int rest = buf_size; if (!c->header_parsed) { int hdrsize = adx_decode_header(avctx,buf,rest); if (hdrsize==0) return -1; c->header_parsed = 1; buf += hdrsize; rest -= hdrsize; } if (c->in_temp) { int copysize = 18*avctx->channels - c->in_temp; memcpy(c->dec_temp+c->in_temp,buf,copysize); rest -= copysize; buf += copysize; if (avctx->channels==1) { adx_decode(samples,c->dec_temp,c->prev); samples += 32; } else { adx_decode_stereo(samples,c->dec_temp,c->prev); samples += 32*2; } } // if (avctx->channels==1) { while(rest>=18) { adx_decode(samples,buf,c->prev); rest-=18; buf+=18; samples+=32; } } else { while(rest>=18*2) { adx_decode_stereo(samples,buf,c->prev); rest-=18*2; buf+=18*2; samples+=32*2; } } // c->in_temp = rest; if (rest) { memcpy(c->dec_temp,buf,rest); buf+=rest; } *data_size = (uint8_t*)samples - (uint8_t*)data; // printf("%d:%d ",buf-buf0,*data_size); fflush(stdout); return buf-buf0; }
19,749
qemu
c527e0afcd7d719abc3a5ca5e4c8ac2fe48b999f
1
static void vmxnet3_process_tx_queue(VMXNET3State *s, int qidx) { struct Vmxnet3_TxDesc txd; uint32_t txd_idx; uint32_t data_len; hwaddr data_pa; for (;;) { if (!vmxnet3_pop_next_tx_descr(s, qidx, &txd, &txd_idx)) { break; } vmxnet3_dump_tx_descr(&txd); if (!s->skip_current_tx_pkt) { data_len = (txd.len > 0) ? txd.len : VMXNET3_MAX_TX_BUF_SIZE; data_pa = le64_to_cpu(txd.addr); if (!net_tx_pkt_add_raw_fragment(s->tx_pkt, data_pa, data_len)) { s->skip_current_tx_pkt = true; } } if (s->tx_sop) { vmxnet3_tx_retrieve_metadata(s, &txd); s->tx_sop = false; } if (txd.eop) { if (!s->skip_current_tx_pkt && net_tx_pkt_parse(s->tx_pkt)) { if (s->needs_vlan) { net_tx_pkt_setup_vlan_header(s->tx_pkt, s->tci); } vmxnet3_send_packet(s, qidx); } else { vmxnet3_on_tx_done_update_stats(s, qidx, VMXNET3_PKT_STATUS_ERROR); } vmxnet3_complete_packet(s, qidx, txd_idx); s->tx_sop = true; s->skip_current_tx_pkt = false; net_tx_pkt_reset(s->tx_pkt); } } }
19,750
FFmpeg
1918057c8a3bc37c27e476d16736fe8bc76afd34
0
static void spatial_compose53i(IDWTELEM *buffer, int width, int height, int stride){ dwt_compose_t cs; spatial_compose53i_init(&cs, buffer, height, stride); while(cs.y <= height) spatial_compose53i_dy(&cs, buffer, width, height, stride); }
19,753
FFmpeg
c854102da773fa898cc6dbc8ca474b1088ce5f12
0
static void assert_codec_experimental(AVCodecContext *c, int encoder) { const char *codec_string = encoder ? "encoder" : "decoder"; AVCodec *codec; if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL && c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad " "results.\nAdd '-strict experimental' if you want to use it.\n", codec_string, c->codec->name); codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id); if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL)) av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n", codec_string, codec->name); exit(1); } }
19,754
FFmpeg
256c0662ef4c7dc9fb03c95d96ba8833a1f54b13
0
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; Vp3DecodeContext *s = avctx->priv_data; GetBitContext gb; static int counter = 0; int i; init_get_bits(&gb, buf, buf_size * 8); if (s->theora && get_bits1(&gb)) { av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n"); return -1; } s->keyframe = !get_bits1(&gb); if (!s->theora) skip_bits(&gb, 1); for (i = 0; i < 3; i++) s->last_qps[i] = s->qps[i]; s->nqps=0; do{ s->qps[s->nqps++]= get_bits(&gb, 6); } while(s->theora >= 0x030200 && s->nqps<3 && get_bits1(&gb)); for (i = s->nqps; i < 3; i++) s->qps[i] = -1; if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n", s->keyframe?"key":"", counter, s->qps[0]); counter++; if (s->qps[0] != s->last_qps[0]) init_loop_filter(s); for (i = 0; i < s->nqps; i++) // reinit all dequantizers if the first one changed, because // the DC of the first quantizer must be used for all matrices if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0]) init_dequantizer(s, i); if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe) return buf_size; if (s->keyframe) { if (!s->theora) { skip_bits(&gb, 4); /* width code */ skip_bits(&gb, 4); /* height code */ if (s->version) { s->version = get_bits(&gb, 5); if (counter == 1) av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version); } } if (s->version || s->theora) { if (get_bits1(&gb)) av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n"); skip_bits(&gb, 2); /* reserved? */ } if (s->last_frame.data[0] == s->golden_frame.data[0]) { if (s->golden_frame.data[0]) avctx->release_buffer(avctx, &s->golden_frame); s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */ } else { if (s->golden_frame.data[0]) avctx->release_buffer(avctx, &s->golden_frame); if (s->last_frame.data[0]) avctx->release_buffer(avctx, &s->last_frame); } s->golden_frame.reference = 3; if(avctx->get_buffer(avctx, &s->golden_frame) < 0) { av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); return -1; } /* golden frame is also the current frame */ s->current_frame= s->golden_frame; /* time to figure out pixel addresses? */ if (!s->pixel_addresses_initialized) { vp3_calculate_pixel_addresses(s); s->pixel_addresses_initialized = 1; } } else { /* allocate a new current frame */ s->current_frame.reference = 3; if (!s->pixel_addresses_initialized) { av_log(s->avctx, AV_LOG_ERROR, "vp3: first frame not a keyframe\n"); return -1; } if(avctx->get_buffer(avctx, &s->current_frame) < 0) { av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n"); return -1; } } s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame s->current_frame.qstride= 0; init_frame(s, &gb); if (unpack_superblocks(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n"); return -1; } if (unpack_modes(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n"); return -1; } if (unpack_vectors(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n"); return -1; } if (unpack_block_qpis(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n"); return -1; } if (unpack_dct_coeffs(s, &gb)){ av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n"); return -1; } for (i = 0; i < s->macroblock_height; i++) render_slice(s, i); apply_loop_filter(s); *data_size=sizeof(AVFrame); *(AVFrame*)data= s->current_frame; /* release the last frame, if it is allocated and if it is not the * golden frame */ if ((s->last_frame.data[0]) && (s->last_frame.data[0] != s->golden_frame.data[0])) avctx->release_buffer(avctx, &s->last_frame); /* shuffle frames (last = current) */ s->last_frame= s->current_frame; s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */ return buf_size; }
19,755
FFmpeg
d6604b29ef544793479d7fb4e05ef6622bb3e534
0
static av_cold int pnm_encode_close(AVCodecContext *avctx) { av_frame_free(&avctx->coded_frame); return 0; }
19,756
FFmpeg
3992526b3c43278945d00fac6e2ba5cb8f810ef3
0
void ff_vc1dsp_init(DSPContext* dsp, AVCodecContext *avctx) { dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c; dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c; dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_c; dsp->vc1_inv_trans_4x4 = vc1_inv_trans_4x4_c; dsp->vc1_h_overlap = vc1_h_overlap_c; dsp->vc1_v_overlap = vc1_v_overlap_c; dsp->vc1_loop_filter = vc1_loop_filter; dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_c; dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_c; dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_c; dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_c; dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_c; dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_c; dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_c; dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_c; dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_c; dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_c; dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_c; dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_c; dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_c; dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_c; dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_c; dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_c; dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_c; dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_c; dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_c; dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_c; dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_c; dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_c; dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_c; dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_c; dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_c; dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_c; dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_c; dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_c; dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_c; dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_c; dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_c; dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_c; }
19,757
qemu
12d4536f7d911b6d87a766ad7300482ea663cea2
1
void cpu_stop_current(void) { }
19,760
qemu
1ee24514aed34760fb2863d98bea3a1b705d9c9f
1
static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, unsigned size) { switch (offset) { case 0xc: n->bar.intms |= data & 0xffffffff; n->bar.intmc = n->bar.intms; break; case 0x10: n->bar.intms &= ~(data & 0xffffffff); n->bar.intmc = n->bar.intms; break; case 0x14: /* Windows first sends data, then sends enable bit */ if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) { n->bar.cc = data; } if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { n->bar.cc = data; if (nvme_start_ctrl(n)) { n->bar.csts = NVME_CSTS_FAILED; } else { n->bar.csts = NVME_CSTS_READY; } } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) { nvme_clear_ctrl(n); n->bar.csts &= ~NVME_CSTS_READY; } if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) { nvme_clear_ctrl(n); n->bar.cc = data; n->bar.csts |= NVME_CSTS_SHST_COMPLETE; } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) { n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE; n->bar.cc = data; } break; case 0x24: n->bar.aqa = data & 0xffffffff; break; case 0x28: n->bar.asq = data; break; case 0x2c: n->bar.asq |= data << 32; break; case 0x30: n->bar.acq = data; break; case 0x34: n->bar.acq |= data << 32; break; default: break; } }
19,761
qemu
9b2fadda3e0196ffd485adde4fe9cdd6fae35300
1
static void gen_wrtee(DisasContext *ctx) { #if defined(CONFIG_USER_ONLY) gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); #else TCGv t0; if (unlikely(ctx->pr)) { gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; } t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE)); tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); tcg_gen_or_tl(cpu_msr, cpu_msr, t0); tcg_temp_free(t0); /* Stop translation to have a chance to raise an exception * if we just set msr_ee to 1 */ gen_stop_exception(ctx); #endif }
19,762
FFmpeg
5537c92f84db5f10a853c0b974bc5223252114f4
0
int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, AVFrame *pic_arg, int *got_packet) { MpegEncContext *s = avctx->priv_data; int i, stuffing_count, ret; int context_count = s->slice_context_count; s->picture_in_gop_number++; if (load_input_picture(s, pic_arg) < 0) return -1; if (select_input_picture(s) < 0) { return -1; } /* output? */ if (s->new_picture.f.data[0]) { if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0) return ret; if (s->mb_info) { s->mb_info_ptr = av_packet_new_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_width*s->mb_height*12); s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0; } for (i = 0; i < context_count; i++) { int start_y = s->thread_context[i]->start_mb_y; int end_y = s->thread_context[i]-> end_mb_y; int h = s->mb_height; uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h); uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h); init_put_bits(&s->thread_context[i]->pb, start, end - start); } s->pict_type = s->new_picture.f.pict_type; //emms_c(); ff_MPV_frame_start(s, avctx); vbv_retry: if (encode_picture(s, s->picture_number) < 0) return -1; avctx->header_bits = s->header_bits; avctx->mv_bits = s->mv_bits; avctx->misc_bits = s->misc_bits; avctx->i_tex_bits = s->i_tex_bits; avctx->p_tex_bits = s->p_tex_bits; avctx->i_count = s->i_count; // FIXME f/b_count in avctx avctx->p_count = s->mb_num - s->i_count - s->skip_count; avctx->skip_count = s->skip_count; ff_MPV_frame_end(s); if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG) ff_mjpeg_encode_picture_trailer(s); if (avctx->rc_buffer_size) { RateControlContext *rcc = &s->rc_context; int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use; if (put_bits_count(&s->pb) > max_size && s->lambda < s->avctx->lmax) { s->next_lambda = FFMAX(s->lambda + 1, s->lambda * (s->qscale + 1) / s->qscale); if (s->adaptive_quant) { int i; for (i = 0; i < s->mb_height * s->mb_stride; i++) s->lambda_table[i] = FFMAX(s->lambda_table[i] + 1, s->lambda_table[i] * (s->qscale + 1) / s->qscale); } s->mb_skipped = 0; // done in MPV_frame_start() // done in encode_picture() so we must undo it if (s->pict_type == AV_PICTURE_TYPE_P) { if (s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4) s->no_rounding ^= 1; } if (s->pict_type != AV_PICTURE_TYPE_B) { s->time_base = s->last_time_base; s->last_non_b_time = s->time - s->pp_time; } for (i = 0; i < context_count; i++) { PutBitContext *pb = &s->thread_context[i]->pb; init_put_bits(pb, pb->buf, pb->buf_end - pb->buf); } goto vbv_retry; } assert(s->avctx->rc_max_rate); } if (s->flags & CODEC_FLAG_PASS1) ff_write_pass1_stats(s); for (i = 0; i < 4; i++) { s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i]; avctx->error[i] += s->current_picture_ptr->f.error[i]; } if (s->flags & CODEC_FLAG_PASS1) assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb)); flush_put_bits(&s->pb); s->frame_bits = put_bits_count(&s->pb); stuffing_count = ff_vbv_update(s, s->frame_bits); s->stuffing_bits = 8*stuffing_count; if (stuffing_count) { if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < stuffing_count + 50) { av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n"); return -1; } switch (s->codec_id) { case AV_CODEC_ID_MPEG1VIDEO: case AV_CODEC_ID_MPEG2VIDEO: while (stuffing_count--) { put_bits(&s->pb, 8, 0); } break; case AV_CODEC_ID_MPEG4: put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, 0x1C3); stuffing_count -= 4; while (stuffing_count--) { put_bits(&s->pb, 8, 0xFF); } break; default: av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n"); } flush_put_bits(&s->pb); s->frame_bits = put_bits_count(&s->pb); } /* update mpeg1/2 vbv_delay for CBR */ if (s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && s->out_format == FMT_MPEG1 && 90000LL * (avctx->rc_buffer_size - 1) <= s->avctx->rc_max_rate * 0xFFFFLL) { int vbv_delay, min_delay; double inbits = s->avctx->rc_max_rate * av_q2d(s->avctx->time_base); int minbits = s->frame_bits - 8 * (s->vbv_delay_ptr - s->pb.buf - 1); double bits = s->rc_context.buffer_index + minbits - inbits; if (bits < 0) av_log(s->avctx, AV_LOG_ERROR, "Internal error, negative bits\n"); assert(s->repeat_first_field == 0); vbv_delay = bits * 90000 / s->avctx->rc_max_rate; min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) / s->avctx->rc_max_rate; vbv_delay = FFMAX(vbv_delay, min_delay); av_assert0(vbv_delay < 0xFFFF); s->vbv_delay_ptr[0] &= 0xF8; s->vbv_delay_ptr[0] |= vbv_delay >> 13; s->vbv_delay_ptr[1] = vbv_delay >> 5; s->vbv_delay_ptr[2] &= 0x07; s->vbv_delay_ptr[2] |= vbv_delay << 3; avctx->vbv_delay = vbv_delay * 300; } s->total_bits += s->frame_bits; avctx->frame_bits = s->frame_bits; pkt->pts = s->current_picture.f.pts; if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) { if (!s->current_picture.f.coded_picture_number) pkt->dts = pkt->pts - s->dts_delta; else pkt->dts = s->reordered_pts; s->reordered_pts = pkt->pts; } else pkt->dts = pkt->pts; if (s->current_picture.f.key_frame) pkt->flags |= AV_PKT_FLAG_KEY; if (s->mb_info) av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size); } else { s->frame_bits = 0; } assert((s->frame_bits & 7) == 0); pkt->size = s->frame_bits / 8; *got_packet = !!pkt->size; return 0; }
19,763
qemu
6f2d8978728c48ca46f5c01835438508aace5c64
1
static always_inline target_ulong MASK (uint32_t start, uint32_t end) { target_ulong ret; #if defined(TARGET_PPC64) if (likely(start == 0)) { ret = (uint64_t)(-1ULL) << (63 - end); } else if (likely(end == 63)) { ret = (uint64_t)(-1ULL) >> start; } #else if (likely(start == 0)) { ret = (uint32_t)(-1ULL) << (31 - end); } else if (likely(end == 31)) { ret = (uint32_t)(-1ULL) >> start; } #endif else { ret = (((target_ulong)(-1ULL)) >> (start)) ^ (((target_ulong)(-1ULL) >> (end)) >> 1); if (unlikely(start > end)) return ~ret; } return ret; }
19,764
qemu
fedf0d35aafc4f1f1e5f6dbc80cb23ae1ae49f0b
1
void sdl_display_init(DisplayState *ds, int full_screen, int no_frame) { int flags; uint8_t data = 0; const SDL_VideoInfo *vi; char *filename; #if defined(__APPLE__) /* always use generic keymaps */ if (!keyboard_layout) keyboard_layout = "en-us"; #endif if(keyboard_layout) { kbd_layout = init_keyboard_layout(name2keysym, keyboard_layout); if (!kbd_layout) exit(1); } if (no_frame) gui_noframe = 1; if (!full_screen) { setenv("SDL_VIDEO_ALLOW_SCREENSAVER", "1", 0); } #ifdef __linux__ /* on Linux, SDL may use fbcon|directfb|svgalib when run without * accessible $DISPLAY to open X11 window. This is often the case * when qemu is run using sudo. But in this case, and when actually * run in X11 environment, SDL fights with X11 for the video card, * making current display unavailable, often until reboot. * So make x11 the default SDL video driver if this variable is unset. * This is a bit hackish but saves us from bigger problem. * Maybe it's a good idea to fix this in SDL instead. */ setenv("SDL_VIDEODRIVER", "x11", 0); #endif /* Enable normal up/down events for Caps-Lock and Num-Lock keys. * This requires SDL >= 1.2.14. */ setenv("SDL_DISABLE_LOCK_KEYS", "1", 1); flags = SDL_INIT_VIDEO | SDL_INIT_NOPARACHUTE; if (SDL_Init (flags)) { fprintf(stderr, "Could not initialize SDL(%s) - exiting\n", SDL_GetError()); exit(1); } vi = SDL_GetVideoInfo(); host_format = *(vi->vfmt); /* Load a 32x32x4 image. White pixels are transparent. */ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "qemu-icon.bmp"); if (filename) { SDL_Surface *image = SDL_LoadBMP(filename); if (image) { uint32_t colorkey = SDL_MapRGB(image->format, 255, 255, 255); SDL_SetColorKey(image, SDL_SRCCOLORKEY, colorkey); SDL_WM_SetIcon(image, NULL); } g_free(filename); } if (full_screen) { gui_fullscreen = 1; sdl_grab_start(); } dcl = g_malloc0(sizeof(DisplayChangeListener)); dcl->ops = &dcl_ops; register_displaychangelistener(dcl); mouse_mode_notifier.notify = sdl_mouse_mode_change; qemu_add_mouse_mode_change_notifier(&mouse_mode_notifier); sdl_update_caption(); SDL_EnableKeyRepeat(250, 50); gui_grab = 0; sdl_cursor_hidden = SDL_CreateCursor(&data, &data, 8, 1, 0, 0); sdl_cursor_normal = SDL_GetCursor(); atexit(sdl_cleanup); }
19,765
FFmpeg
0ef8f03133a0bd83c74200a8cf30982c0f574016
1
static void apply_tns(INTFLOAT coef[1024], TemporalNoiseShaping *tns, IndividualChannelStream *ics, int decode) { const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb); int w, filt, m, i; int bottom, top, order, start, end, size, inc; INTFLOAT lpc[TNS_MAX_ORDER]; INTFLOAT tmp[TNS_MAX_ORDER+1]; for (w = 0; w < ics->num_windows; w++) { bottom = ics->num_swb; for (filt = 0; filt < tns->n_filt[w]; filt++) { top = bottom; bottom = FFMAX(0, top - tns->length[w][filt]); order = tns->order[w][filt]; if (order == 0) continue; // tns_decode_coef AAC_RENAME(compute_lpc_coefs)(tns->coef[w][filt], order, lpc, 0, 0, 0); start = ics->swb_offset[FFMIN(bottom, mmm)]; end = ics->swb_offset[FFMIN( top, mmm)]; if ((size = end - start) <= 0) continue; if (tns->direction[w][filt]) { inc = -1; start = end - 1; } else { inc = 1; } start += w * 128; if (decode) { // ar filter for (m = 0; m < size; m++, start += inc) for (i = 1; i <= FFMIN(m, order); i++) coef[start] -= AAC_MUL26(coef[start - i * inc], lpc[i - 1]); } else { // ma filter for (m = 0; m < size; m++, start += inc) { tmp[0] = coef[start]; for (i = 1; i <= FFMIN(m, order); i++) coef[start] += AAC_MUL26(tmp[i], lpc[i - 1]); for (i = order; i > 0; i--) tmp[i] = tmp[i - 1]; } } } } }
19,767
qemu
792b45b142e6b901e1de20886bc3369211582b8c
1
static void checkpoint(void) { assert(((mapping_t*)array_get(&(vvv->mapping), 0))->end == 2); check1(vvv); check2(vvv); assert(!vvv->current_mapping || vvv->current_fd || (vvv->current_mapping->mode & MODE_DIRECTORY)); #if 0 if (((direntry_t*)vvv->directory.pointer)[1].attributes != 0xf) fprintf(stderr, "Nonono!\n"); mapping_t* mapping; direntry_t* direntry; assert(vvv->mapping.size >= vvv->mapping.item_size * vvv->mapping.next); assert(vvv->directory.size >= vvv->directory.item_size * vvv->directory.next); if (vvv->mapping.next<47) return; assert((mapping = array_get(&(vvv->mapping), 47))); assert(mapping->dir_index < vvv->directory.next); direntry = array_get(&(vvv->directory), mapping->dir_index); assert(!memcmp(direntry->name, "USB H ", 11) || direntry->name[0]==0); #endif return; /* avoid compiler warnings: */ hexdump(NULL, 100); remove_mapping(vvv, NULL); print_mapping(NULL); print_direntry(NULL); }
19,768
FFmpeg
2f014567cfd63e58156f60666f1a61ba147276ab
1
static const uint8_t *avc_mp4_find_startcode(const uint8_t *start, const uint8_t *end, int nal_length_size) { int res = 0; if (end - start < nal_length_size) return NULL; while (nal_length_size--) res = (res << 8) | *start++; if (start + res > end || res < 0 || start + res < start) return NULL; return start + res; }
19,769
FFmpeg
ac4b32df71bd932838043a4838b86d11e169707f
1
av_cold void ff_vp8dsp_init_neon(VP8DSPContext *dsp) { dsp->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_neon; dsp->vp8_idct_add = ff_vp8_idct_add_neon; dsp->vp8_idct_dc_add = ff_vp8_idct_dc_add_neon; dsp->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_neon; dsp->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_neon; dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_neon; dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_neon; dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_neon; dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_neon; dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_neon; dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_neon; dsp->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_neon; dsp->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_neon; dsp->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_neon; dsp->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_neon; dsp->put_vp8_epel_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon; dsp->put_vp8_epel_pixels_tab[0][0][2] = ff_put_vp8_epel16_h6_neon; dsp->put_vp8_epel_pixels_tab[0][2][0] = ff_put_vp8_epel16_v6_neon; dsp->put_vp8_epel_pixels_tab[0][2][2] = ff_put_vp8_epel16_h6v6_neon; dsp->put_vp8_epel_pixels_tab[1][0][0] = ff_put_vp8_pixels8_neon; dsp->put_vp8_epel_pixels_tab[1][0][1] = ff_put_vp8_epel8_h4_neon; dsp->put_vp8_epel_pixels_tab[1][0][2] = ff_put_vp8_epel8_h6_neon; dsp->put_vp8_epel_pixels_tab[1][1][0] = ff_put_vp8_epel8_v4_neon; dsp->put_vp8_epel_pixels_tab[1][1][1] = ff_put_vp8_epel8_h4v4_neon; dsp->put_vp8_epel_pixels_tab[1][1][2] = ff_put_vp8_epel8_h6v4_neon; dsp->put_vp8_epel_pixels_tab[1][2][0] = ff_put_vp8_epel8_v6_neon; dsp->put_vp8_epel_pixels_tab[1][2][1] = ff_put_vp8_epel8_h4v6_neon; dsp->put_vp8_epel_pixels_tab[1][2][2] = ff_put_vp8_epel8_h6v6_neon; dsp->put_vp8_epel_pixels_tab[2][0][1] = ff_put_vp8_epel4_h4_neon; dsp->put_vp8_epel_pixels_tab[2][0][2] = ff_put_vp8_epel4_h6_neon; dsp->put_vp8_epel_pixels_tab[2][1][0] = ff_put_vp8_epel4_v4_neon; dsp->put_vp8_epel_pixels_tab[2][1][1] = ff_put_vp8_epel4_h4v4_neon; dsp->put_vp8_epel_pixels_tab[2][1][2] = ff_put_vp8_epel4_h6v4_neon; dsp->put_vp8_epel_pixels_tab[2][2][0] = ff_put_vp8_epel4_v6_neon; dsp->put_vp8_epel_pixels_tab[2][2][1] = ff_put_vp8_epel4_h4v6_neon; dsp->put_vp8_epel_pixels_tab[2][2][2] = ff_put_vp8_epel4_h6v6_neon; dsp->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_neon; dsp->put_vp8_bilinear_pixels_tab[0][0][1] = ff_put_vp8_bilin16_h_neon; dsp->put_vp8_bilinear_pixels_tab[0][0][2] = ff_put_vp8_bilin16_h_neon; dsp->put_vp8_bilinear_pixels_tab[0][1][0] = ff_put_vp8_bilin16_v_neon; dsp->put_vp8_bilinear_pixels_tab[0][1][1] = ff_put_vp8_bilin16_hv_neon; dsp->put_vp8_bilinear_pixels_tab[0][1][2] = ff_put_vp8_bilin16_hv_neon; dsp->put_vp8_bilinear_pixels_tab[0][2][0] = ff_put_vp8_bilin16_v_neon; dsp->put_vp8_bilinear_pixels_tab[0][2][1] = ff_put_vp8_bilin16_hv_neon; dsp->put_vp8_bilinear_pixels_tab[0][2][2] = ff_put_vp8_bilin16_hv_neon; dsp->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_neon; dsp->put_vp8_bilinear_pixels_tab[1][0][1] = ff_put_vp8_bilin8_h_neon; dsp->put_vp8_bilinear_pixels_tab[1][0][2] = ff_put_vp8_bilin8_h_neon; dsp->put_vp8_bilinear_pixels_tab[1][1][0] = ff_put_vp8_bilin8_v_neon; dsp->put_vp8_bilinear_pixels_tab[1][1][1] = ff_put_vp8_bilin8_hv_neon; dsp->put_vp8_bilinear_pixels_tab[1][1][2] = ff_put_vp8_bilin8_hv_neon; dsp->put_vp8_bilinear_pixels_tab[1][2][0] = ff_put_vp8_bilin8_v_neon; dsp->put_vp8_bilinear_pixels_tab[1][2][1] = ff_put_vp8_bilin8_hv_neon; dsp->put_vp8_bilinear_pixels_tab[1][2][2] = ff_put_vp8_bilin8_hv_neon; dsp->put_vp8_bilinear_pixels_tab[2][0][1] = ff_put_vp8_bilin4_h_neon; dsp->put_vp8_bilinear_pixels_tab[2][0][2] = ff_put_vp8_bilin4_h_neon; dsp->put_vp8_bilinear_pixels_tab[2][1][0] = ff_put_vp8_bilin4_v_neon; dsp->put_vp8_bilinear_pixels_tab[2][1][1] = ff_put_vp8_bilin4_hv_neon; dsp->put_vp8_bilinear_pixels_tab[2][1][2] = ff_put_vp8_bilin4_hv_neon; dsp->put_vp8_bilinear_pixels_tab[2][2][0] = ff_put_vp8_bilin4_v_neon; dsp->put_vp8_bilinear_pixels_tab[2][2][1] = ff_put_vp8_bilin4_hv_neon; dsp->put_vp8_bilinear_pixels_tab[2][2][2] = ff_put_vp8_bilin4_hv_neon; }
19,770
qemu
b2f95feec5e4d546b932848dd421ec3361e8ef77
1
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int count, BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; QEMUIOVector qiov; struct iovec iov = {0}; int ret = 0; bool need_flush = false; int head = 0; int tail = 0; int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); int alignment = MAX(bs->bl.pwrite_zeroes_alignment, bs->bl.request_alignment); assert(alignment % bs->bl.request_alignment == 0); head = offset % alignment; tail = (offset + count) % alignment; max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment); assert(max_write_zeroes >= bs->bl.request_alignment); while (count > 0 && !ret) { int num = count; /* Align request. Block drivers can expect the "bulk" of the request * to be aligned, and that unaligned requests do not cross cluster * boundaries. */ if (head) { /* Make a small request up to the first aligned sector. */ num = MIN(count, alignment - head); head = 0; } else if (tail && num > alignment) { /* Shorten the request to the last aligned sector. */ num -= tail; } /* limit request size */ if (num > max_write_zeroes) { num = max_write_zeroes; } ret = -ENOTSUP; /* First try the efficient write zeroes operation */ if (drv->bdrv_co_pwrite_zeroes) { ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num, flags & bs->supported_zero_flags); if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) && !(bs->supported_zero_flags & BDRV_REQ_FUA)) { need_flush = true; } } else { assert(!bs->supported_zero_flags); } if (ret == -ENOTSUP) { /* Fall back to bounce buffer if write zeroes is unsupported */ int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_WRITE_ZEROES_BOUNCE_BUFFER); BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE; if ((flags & BDRV_REQ_FUA) && !(bs->supported_write_flags & BDRV_REQ_FUA)) { /* No need for bdrv_driver_pwrite() to do a fallback * flush on each chunk; use just one at the end */ write_flags &= ~BDRV_REQ_FUA; need_flush = true; } num = MIN(num, max_transfer); iov.iov_len = num; if (iov.iov_base == NULL) { iov.iov_base = qemu_try_blockalign(bs, num); if (iov.iov_base == NULL) { ret = -ENOMEM; goto fail; } memset(iov.iov_base, 0, num); } qemu_iovec_init_external(&qiov, &iov, 1); ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags); /* Keep bounce buffer around if it is big enough for all * all future requests. */ if (num < max_transfer) { qemu_vfree(iov.iov_base); iov.iov_base = NULL; } } offset += num; count -= num; } fail: if (ret == 0 && need_flush) { ret = bdrv_co_flush(bs); } qemu_vfree(iov.iov_base); return ret; }
19,771
qemu
1789f4e37c78d408dfa61655dfd8b397554152f9
1
static void virtio_ccw_notify(DeviceState *d, uint16_t vector) { VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d); SubchDev *sch = dev->sch; uint64_t indicators; if (vector >= 128) { return; } if (vector < VIRTIO_CCW_QUEUE_MAX) { if (!dev->indicators) { return; } if (sch->thinint_active) { /* * In the adapter interrupt case, indicators points to a * memory area that may be (way) larger than 64 bit and * ind_bit indicates the start of the indicators in a big * endian notation. */ uint64_t ind_bit = dev->routes.adapter.ind_offset; virtio_set_ind_atomic(sch, dev->indicators->addr + (ind_bit + vector) / 8, 0x80 >> ((ind_bit + vector) % 8)); if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr, 0x01)) { css_adapter_interrupt(dev->thinint_isc); } } else { indicators = address_space_ldq(&address_space_memory, dev->indicators->addr, MEMTXATTRS_UNSPECIFIED, NULL); indicators |= 1ULL << vector; address_space_stq(&address_space_memory, dev->indicators->addr, indicators, MEMTXATTRS_UNSPECIFIED, NULL); css_conditional_io_interrupt(sch); } } else { if (!dev->indicators2) { return; } vector = 0; indicators = address_space_ldq(&address_space_memory, dev->indicators2->addr, MEMTXATTRS_UNSPECIFIED, NULL); indicators |= 1ULL << vector; address_space_stq(&address_space_memory, dev->indicators2->addr, indicators, MEMTXATTRS_UNSPECIFIED, NULL); css_conditional_io_interrupt(sch); } }
19,772
FFmpeg
52268def10f33e36ca1190906c8235f4e91fcf24
1
static int thp_probe(AVProbeData *p) { /* check file header */ if (AV_RL32(p->buf) == MKTAG('T', 'H', 'P', '\0')) return AVPROBE_SCORE_MAX; else return 0; }
19,773
FFmpeg
b648b246f07a4b041dcefd7309af407c1b74862a
1
static int parse_source_parameters(AVDiracSeqHeader *dsh, GetBitContext *gb, void *log_ctx) { AVRational frame_rate = { 0, 0 }; unsigned luma_depth = 8, luma_offset = 16; int idx; int chroma_x_shift, chroma_y_shift; /* [DIRAC_STD] 10.3.2 Frame size. frame_size(video_params) */ /* [DIRAC_STD] custom_dimensions_flag */ if (get_bits1(gb)) { dsh->width = svq3_get_ue_golomb(gb); /* [DIRAC_STD] FRAME_WIDTH */ dsh->height = svq3_get_ue_golomb(gb); /* [DIRAC_STD] FRAME_HEIGHT */ } /* [DIRAC_STD] 10.3.3 Chroma Sampling Format. * chroma_sampling_format(video_params) */ /* [DIRAC_STD] custom_chroma_format_flag */ if (get_bits1(gb)) /* [DIRAC_STD] CHROMA_FORMAT_INDEX */ dsh->chroma_format = svq3_get_ue_golomb(gb); if (dsh->chroma_format > 2U) { if (log_ctx) av_log(log_ctx, AV_LOG_ERROR, "Unknown chroma format %d\n", dsh->chroma_format); } /* [DIRAC_STD] 10.3.4 Scan Format. scan_format(video_params) */ /* [DIRAC_STD] custom_scan_format_flag */ if (get_bits1(gb)) /* [DIRAC_STD] SOURCE_SAMPLING */ dsh->interlaced = svq3_get_ue_golomb(gb); if (dsh->interlaced > 1U) /* [DIRAC_STD] 10.3.5 Frame Rate. frame_rate(video_params) */ if (get_bits1(gb)) { /* [DIRAC_STD] custom_frame_rate_flag */ dsh->frame_rate_index = svq3_get_ue_golomb(gb); if (dsh->frame_rate_index > 10U) if (!dsh->frame_rate_index) { /* [DIRAC_STD] FRAME_RATE_NUMER */ frame_rate.num = svq3_get_ue_golomb(gb); /* [DIRAC_STD] FRAME_RATE_DENOM */ frame_rate.den = svq3_get_ue_golomb(gb); } } /* [DIRAC_STD] preset_frame_rate(video_params, index) */ if (dsh->frame_rate_index > 0) { if (dsh->frame_rate_index <= 8) frame_rate = ff_mpeg12_frame_rate_tab[dsh->frame_rate_index]; else /* [DIRAC_STD] Table 10.3 values 9-10 */ frame_rate = dirac_frame_rate[dsh->frame_rate_index - 9]; } dsh->framerate = frame_rate; /* [DIRAC_STD] 10.3.6 Pixel Aspect Ratio. * pixel_aspect_ratio(video_params) */ if (get_bits1(gb)) { /* [DIRAC_STD] custom_pixel_aspect_ratio_flag */ /* [DIRAC_STD] index */ dsh->aspect_ratio_index = svq3_get_ue_golomb(gb); if (dsh->aspect_ratio_index > 6U) if (!dsh->aspect_ratio_index) { dsh->sample_aspect_ratio.num = svq3_get_ue_golomb(gb); dsh->sample_aspect_ratio.den = svq3_get_ue_golomb(gb); } } /* [DIRAC_STD] Take value from Table 10.4 Available preset pixel * aspect ratio values */ if (dsh->aspect_ratio_index > 0) dsh->sample_aspect_ratio = dirac_preset_aspect_ratios[dsh->aspect_ratio_index - 1]; /* [DIRAC_STD] 10.3.7 Clean area. clean_area(video_params) */ if (get_bits1(gb)) { /* [DIRAC_STD] custom_clean_area_flag */ /* [DIRAC_STD] CLEAN_WIDTH */ dsh->clean_width = svq3_get_ue_golomb(gb); /* [DIRAC_STD] CLEAN_HEIGHT */ dsh->clean_height = svq3_get_ue_golomb(gb); /* [DIRAC_STD] CLEAN_LEFT_OFFSET */ dsh->clean_left_offset = svq3_get_ue_golomb(gb); /* [DIRAC_STD] CLEAN_RIGHT_OFFSET */ dsh->clean_right_offset = svq3_get_ue_golomb(gb); } /* [DIRAC_STD] 10.3.8 Signal range. signal_range(video_params) * WARNING: Some adaptation seems to be done using the * AVCOL_RANGE_MPEG/JPEG values */ if (get_bits1(gb)) { /* [DIRAC_STD] custom_signal_range_flag */ /* [DIRAC_STD] index */ dsh->pixel_range_index = svq3_get_ue_golomb(gb); if (dsh->pixel_range_index > 4U) /* This assumes either fullrange or MPEG levels only */ if (!dsh->pixel_range_index) { luma_offset = svq3_get_ue_golomb(gb); luma_depth = av_log2(svq3_get_ue_golomb(gb)) + 1; svq3_get_ue_golomb(gb); /* chroma offset */ svq3_get_ue_golomb(gb); /* chroma excursion */ dsh->color_range = luma_offset ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG; } } /* [DIRAC_STD] Table 10.5 * Available signal range presets <--> pixel_range_presets */ if (dsh->pixel_range_index > 0) { idx = dsh->pixel_range_index - 1; luma_depth = pixel_range_presets[idx].bitdepth; dsh->color_range = pixel_range_presets[idx].color_range; } dsh->bit_depth = luma_depth; dsh->pix_fmt = dirac_pix_fmt[dsh->chroma_format][dsh->pixel_range_index-2]; avcodec_get_chroma_sub_sample(dsh->pix_fmt, &chroma_x_shift, &chroma_y_shift); if ((dsh->width % (1<<chroma_x_shift)) || (dsh->height % (1<<chroma_y_shift))) { if (log_ctx) av_log(log_ctx, AV_LOG_ERROR, "Dimensions must be an integer multiple of the chroma subsampling\n"); } /* [DIRAC_STD] 10.3.9 Colour specification. colour_spec(video_params) */ if (get_bits1(gb)) { /* [DIRAC_STD] custom_colour_spec_flag */ /* [DIRAC_STD] index */ idx = dsh->color_spec_index = svq3_get_ue_golomb(gb); if (dsh->color_spec_index > 4U) dsh->color_primaries = dirac_color_presets[idx].color_primaries; dsh->colorspace = dirac_color_presets[idx].colorspace; dsh->color_trc = dirac_color_presets[idx].color_trc; if (!dsh->color_spec_index) { /* [DIRAC_STD] 10.3.9.1 Colour primaries */ if (get_bits1(gb)) { idx = svq3_get_ue_golomb(gb); if (idx < 3U) dsh->color_primaries = dirac_primaries[idx]; } /* [DIRAC_STD] 10.3.9.2 Colour matrix */ if (get_bits1(gb)) { idx = svq3_get_ue_golomb(gb); if (!idx) dsh->colorspace = AVCOL_SPC_BT709; else if (idx == 1) dsh->colorspace = AVCOL_SPC_BT470BG; } /* [DIRAC_STD] 10.3.9.3 Transfer function */ if (get_bits1(gb) && !svq3_get_ue_golomb(gb)) dsh->color_trc = AVCOL_TRC_BT709; } } else { idx = dsh->color_spec_index; dsh->color_primaries = dirac_color_presets[idx].color_primaries; dsh->colorspace = dirac_color_presets[idx].colorspace; dsh->color_trc = dirac_color_presets[idx].color_trc; } return 0; }
19,774
FFmpeg
1aa708988ac131cf7d5c8bd59aca256a7c974df9
1
static int handle_packets(MpegTSContext *ts, int nb_packets) { AVFormatContext *s = ts->stream; uint8_t packet[TS_PACKET_SIZE]; int packet_num, ret = 0; if (avio_tell(s->pb) != ts->last_pos) { int i; av_dlog(ts->stream, "Skipping after seek\n"); /* seek detected, flush pes buffer */ for (i = 0; i < NB_PID_MAX; i++) { if (ts->pids[i]) { if (ts->pids[i]->type == MPEGTS_PES) { PESContext *pes = ts->pids[i]->u.pes_filter.opaque; av_freep(&pes->buffer); pes->data_index = 0; pes->state = MPEGTS_SKIP; /* skip until pes header */ } ts->pids[i]->last_cc = -1; } } } ts->stop_parse = 0; packet_num = 0; for(;;) { if (ts->stop_parse>0) break; packet_num++; if (nb_packets != 0 && packet_num >= nb_packets) break; ret = read_packet(s, packet, ts->raw_packet_size); if (ret != 0) break; ret = handle_packet(ts, packet); if (ret != 0) break; } ts->last_pos = avio_tell(s->pb); return ret; }
19,775
FFmpeg
1b325ce91ab78e2edd676cd8a099d04dd90a202c
0
static int v4l2_read_header(AVFormatContext *s1) { struct video_data *s = s1->priv_data; AVStream *st; int res = 0; uint32_t desired_format; enum AVCodecID codec_id = AV_CODEC_ID_NONE; enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; st = avformat_new_stream(s1, NULL); if (!st) return AVERROR(ENOMEM); s->fd = device_open(s1); if (s->fd < 0) return s->fd; if (s->list_format) { list_formats(s1, s->fd, s->list_format); return AVERROR_EXIT; } avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ if (s->pixel_format) { AVCodec *codec = avcodec_find_decoder_by_name(s->pixel_format); if (codec) s1->video_codec_id = codec->id; pix_fmt = av_get_pix_fmt(s->pixel_format); if (pix_fmt == AV_PIX_FMT_NONE && !codec) { av_log(s1, AV_LOG_ERROR, "No such input format: %s.\n", s->pixel_format); return AVERROR(EINVAL); } } if (!s->width && !s->height) { struct v4l2_format fmt; av_log(s1, AV_LOG_VERBOSE, "Querying the device for the current frame size\n"); fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (v4l2_ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) { av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", strerror(errno)); return AVERROR(errno); } s->width = fmt.fmt.pix.width; s->height = fmt.fmt.pix.height; av_log(s1, AV_LOG_VERBOSE, "Setting frame size to %dx%d\n", s->width, s->height); } desired_format = device_try_init(s1, pix_fmt, &s->width, &s->height, &codec_id); /* If no pixel_format was specified, the codec_id was not known up * until now. Set video_codec_id in the context, as codec_id will * not be available outside this function */ if (codec_id != AV_CODEC_ID_NONE && s1->video_codec_id == AV_CODEC_ID_NONE) s1->video_codec_id = codec_id; if (desired_format == 0) { av_log(s1, AV_LOG_ERROR, "Cannot find a proper format for " "codec_id %d, pix_fmt %d.\n", s1->video_codec_id, pix_fmt); v4l2_close(s->fd); return AVERROR(EIO); } if ((res = av_image_check_size(s->width, s->height, 0, s1)) < 0) return res; s->frame_format = desired_format; if ((res = v4l2_set_parameters(s1)) < 0) return res; st->codec->pix_fmt = fmt_v4l2ff(desired_format, codec_id); s->frame_size = avpicture_get_size(st->codec->pix_fmt, s->width, s->height); if ((res = mmap_init(s1)) || (res = mmap_start(s1)) < 0) { v4l2_close(s->fd); return res; } s->top_field_first = first_field(s->fd); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = codec_id; if (codec_id == AV_CODEC_ID_RAWVIDEO) st->codec->codec_tag = avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt); if (desired_format == V4L2_PIX_FMT_YVU420) st->codec->codec_tag = MKTAG('Y', 'V', '1', '2'); st->codec->width = s->width; st->codec->height = s->height; st->codec->bit_rate = s->frame_size * av_q2d(st->avg_frame_rate) * 8; return 0; }
19,776
qemu
651060aba79dc9d0cc77ac3921948ea78dba7409
1
static uint32_t ppc_hash64_pte_size_decode(uint64_t pte1, uint32_t slb_pshift) { switch (slb_pshift) { case 12: return 12; case 16: if ((pte1 & 0xf000) == 0x1000) { return 16; } return 0; case 24: if ((pte1 & 0xff000) == 0) { return 24; } return 0; } return 0; }
19,777
qemu
133e9b228df16d11de01529c217417e78d1d9370
1
static int pci_unregister_device(DeviceState *dev) { PCIDevice *pci_dev = PCI_DEVICE(dev); PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev); pci_unregister_io_regions(pci_dev); pci_del_option_rom(pci_dev); if (pc->exit) { pc->exit(pci_dev); } do_pci_unregister_device(pci_dev); return 0; }
19,778
qemu
882b3b97697affb36ca3d174f42f846232008979
1
bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid) { SubchSet *set; uint8_t real_cssid; real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; if (real_cssid > MAX_CSSID || ssid > MAX_SSID || !channel_subsys.css[real_cssid] || !channel_subsys.css[real_cssid]->sch_set[ssid]) { return true; } set = channel_subsys.css[real_cssid]->sch_set[ssid]; return schid > find_last_bit(set->schids_used, (MAX_SCHID + 1) / sizeof(unsigned long)); }
19,779
FFmpeg
af9ec3dd1d9e90ec8134b01074b7beb01a1beb1a
1
AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret) { AVProbeData lpd = *pd; AVInputFormat *fmt1 = NULL, *fmt; int score, nodat = 0, score_max=0; if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) { int id3len = ff_id3v2_tag_len(lpd.buf); if (lpd.buf_size > id3len + 16) { lpd.buf += id3len; lpd.buf_size -= id3len; }else nodat = 1; } fmt = NULL; while ((fmt1 = av_iformat_next(fmt1))) { if (!is_opened == !(fmt1->flags & AVFMT_NOFILE)) continue; score = 0; if (fmt1->read_probe) { score = fmt1->read_probe(&lpd); if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions)) score = FFMAX(score, nodat ? AVPROBE_SCORE_MAX/4-1 : 1); } else if (fmt1->extensions) { if (av_match_ext(lpd.filename, fmt1->extensions)) { score = 50; } } if (score > score_max) { score_max = score; fmt = fmt1; }else if (score == score_max) fmt = NULL; } *score_ret= score_max; return fmt; }
19,780
qemu
d9bce9d99f4656ae0b0127f7472db9067b8f84ab
1
void do_addco (void) { T2 = T0; T0 += T1; if (likely(T0 >= T2)) { xer_ca = 0; } else { xer_ca = 1; } if (likely(!((T2 ^ T1 ^ (-1)) & (T2 ^ T0) & (1 << 31)))) { xer_ov = 0; } else { xer_so = 1; xer_ov = 1; } }
19,781
FFmpeg
48ddd8ddec3587453dffcfaa4130698d99228937
1
static av_cold int movie_common_init(AVFilterContext *ctx) { MovieContext *movie = ctx->priv; AVInputFormat *iformat = NULL; int64_t timestamp; int nb_streams = 1, ret, i; char default_streams[16], *stream_specs, *spec, *cursor; char name[16]; AVStream *st; if (!movie->file_name) { av_log(ctx, AV_LOG_ERROR, "No filename provided!\n"); return AVERROR(EINVAL); } movie->seek_point = movie->seek_point_d * 1000000 + 0.5; stream_specs = movie->stream_specs; if (!stream_specs) { snprintf(default_streams, sizeof(default_streams), "d%c%d", !strcmp(ctx->filter->name, "amovie") ? 'a' : 'v', movie->stream_index); stream_specs = default_streams; } for (cursor = stream_specs; *cursor; cursor++) if (*cursor == '+') nb_streams++; if (movie->loop_count != 1 && nb_streams != 1) { av_log(ctx, AV_LOG_ERROR, "Loop with several streams is currently unsupported\n"); return AVERROR_PATCHWELCOME; } av_register_all(); // Try to find the movie format (container) iformat = movie->format_name ? av_find_input_format(movie->format_name) : NULL; movie->format_ctx = NULL; if ((ret = avformat_open_input(&movie->format_ctx, movie->file_name, iformat, NULL)) < 0) { av_log(ctx, AV_LOG_ERROR, "Failed to avformat_open_input '%s'\n", movie->file_name); return ret; } if ((ret = avformat_find_stream_info(movie->format_ctx, NULL)) < 0) av_log(ctx, AV_LOG_WARNING, "Failed to find stream info\n"); // if seeking requested, we execute it if (movie->seek_point > 0) { timestamp = movie->seek_point; // add the stream start time, should it exist if (movie->format_ctx->start_time != AV_NOPTS_VALUE) { if (timestamp > 0 && movie->format_ctx->start_time > INT64_MAX - timestamp) { av_log(ctx, AV_LOG_ERROR, "%s: seek value overflow with start_time:%"PRId64" seek_point:%"PRId64"\n", movie->file_name, movie->format_ctx->start_time, movie->seek_point); return AVERROR(EINVAL); } timestamp += movie->format_ctx->start_time; } if ((ret = av_seek_frame(movie->format_ctx, -1, timestamp, AVSEEK_FLAG_BACKWARD)) < 0) { av_log(ctx, AV_LOG_ERROR, "%s: could not seek to position %"PRId64"\n", movie->file_name, timestamp); return ret; } } for (i = 0; i < movie->format_ctx->nb_streams; i++) movie->format_ctx->streams[i]->discard = AVDISCARD_ALL; movie->st = av_calloc(nb_streams, sizeof(*movie->st)); if (!movie->st) return AVERROR(ENOMEM); for (i = 0; i < nb_streams; i++) { spec = av_strtok(stream_specs, "+", &cursor); if (!spec) return AVERROR_BUG; stream_specs = NULL; /* for next strtok */ st = find_stream(ctx, movie->format_ctx, spec); if (!st) return AVERROR(EINVAL); st->discard = AVDISCARD_DEFAULT; movie->st[i].st = st; movie->max_stream_index = FFMAX(movie->max_stream_index, st->index); movie->st[i].discontinuity_threshold = av_rescale_q(movie->discontinuity_threshold, AV_TIME_BASE_Q, st->time_base); } if (av_strtok(NULL, "+", &cursor)) return AVERROR_BUG; movie->out_index = av_calloc(movie->max_stream_index + 1, sizeof(*movie->out_index)); if (!movie->out_index) return AVERROR(ENOMEM); for (i = 0; i <= movie->max_stream_index; i++) movie->out_index[i] = -1; for (i = 0; i < nb_streams; i++) { AVFilterPad pad = { 0 }; movie->out_index[movie->st[i].st->index] = i; snprintf(name, sizeof(name), "out%d", i); pad.type = movie->st[i].st->codecpar->codec_type; pad.name = av_strdup(name); if (!pad.name) return AVERROR(ENOMEM); pad.config_props = movie_config_output_props; pad.request_frame = movie_request_frame; ff_insert_outpad(ctx, i, &pad); if ( movie->st[i].st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && !movie->st[i].st->codecpar->channel_layout) { ret = guess_channel_layout(&movie->st[i], i, ctx); if (ret < 0) return ret; } ret = open_stream(ctx, &movie->st[i]); if (ret < 0) return ret; } av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n", movie->seek_point, movie->format_name, movie->file_name, movie->stream_index); return 0; }
19,783
FFmpeg
088eca28164c8cd3b72b0c3d3f9e3fe5ee5cb28f
1
static inline int available_samples(AVFrame *out) { int bytes_per_sample = av_get_bytes_per_sample(out->format); int samples = out->linesize[0] / bytes_per_sample; if (av_sample_fmt_is_planar(out->format)) { return samples; } else { int channels = av_get_channel_layout_nb_channels(out->channel_layout); return samples / channels; } }
19,784
qemu
f3a06403b82c7f036564e4caf18b52ce6885fcfb
1
GACommandState *ga_command_state_new(void) { GACommandState *cs = g_malloc0(sizeof(GACommandState)); cs->groups = NULL; return cs; }
19,786
FFmpeg
2884688bd51a808ccda3c0e13367619cd79e0579
1
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, const AVFrame *reference) { int i, mb_x, mb_y; uint8_t *data[MAX_COMPONENTS]; const uint8_t *reference_data[MAX_COMPONENTS]; int linesize[MAX_COMPONENTS]; GetBitContext mb_bitmask_gb; int bytes_per_pixel = 1 + (s->bits > 8); if (mb_bitmask) init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height); s->restart_count = 0; for (i = 0; i < nb_components; i++) { int c = s->comp_index[i]; data[c] = s->picture_ptr->data[c]; reference_data[c] = reference ? reference->data[c] : NULL; linesize[c] = s->linesize[c]; s->coefs_finished[c] |= 1; } for (mb_y = 0; mb_y < s->mb_height; mb_y++) { for (mb_x = 0; mb_x < s->mb_width; mb_x++) { const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb); if (s->restart_interval && !s->restart_count) s->restart_count = s->restart_interval; if (get_bits_left(&s->gb) < 0) { av_log(s->avctx, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb)); return AVERROR_INVALIDDATA; } for (i = 0; i < nb_components; i++) { uint8_t *ptr; int n, h, v, x, y, c, j; int block_offset; n = s->nb_blocks[i]; c = s->comp_index[i]; h = s->h_scount[i]; v = s->v_scount[i]; x = 0; y = 0; for (j = 0; j < n; j++) { block_offset = (((linesize[c] * (v * mb_y + y) * 8) + (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres); if (s->interlaced && s->bottom_field) block_offset += linesize[c] >> 1; ptr = data[c] + block_offset; if (!s->progressive) { if (copy_mb) mjpeg_copy_block(s, ptr, reference_data[c] + block_offset, linesize[c], s->avctx->lowres); else { s->dsp.clear_block(s->block); if (decode_block(s, s->block, i, s->dc_index[i], s->ac_index[i], s->quant_matrixes[s->quant_sindex[i]]) < 0) { av_log(s->avctx, AV_LOG_ERROR, "error y=%d x=%d\n", mb_y, mb_x); return AVERROR_INVALIDDATA; } s->dsp.idct_put(ptr, linesize[c], s->block); if (s->bits & 7) shift_output(s, ptr, linesize[c]); } } else { int block_idx = s->block_stride[c] * (v * mb_y + y) + (h * mb_x + x); int16_t *block = s->blocks[c][block_idx]; if (Ah) block[0] += get_bits1(&s->gb) * s->quant_matrixes[s->quant_sindex[i]][0] << Al; else if (decode_dc_progressive(s, block, i, s->dc_index[i], s->quant_matrixes[s->quant_sindex[i]], Al) < 0) { av_log(s->avctx, AV_LOG_ERROR, "error y=%d x=%d\n", mb_y, mb_x); return AVERROR_INVALIDDATA; } } av_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x); av_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n", mb_x, mb_y, x, y, c, s->bottom_field, (v * mb_y + y) * 8, (h * mb_x + x) * 8); if (++x == h) { x = 0; y++; } } } handle_rstn(s, nb_components); } } return 0; }
19,787
FFmpeg
3ab9a2a5577d445252724af4067d2a7c8a378efa
1
static int rv40_v_loop_filter_strength(uint8_t *src, int stride, int beta, int beta2, int edge, int *p1, int *q1) { return rv40_loop_filter_strength(src, 1, stride, beta, beta2, edge, p1, q1); }
19,789
qemu
709a340d679d95a0c6cbb9b5f654498f04345b50
1
static void build_trampolines(TCGContext *s) { static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, [MO_BEQ] = helper_be_ldq_mmu, }; static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; int i; TCGReg ra; for (i = 0; i < 16; ++i) { if (qemu_ld_helpers[i] == NULL) { continue; } /* May as well align the trampoline. */ while ((uintptr_t)s->code_ptr & 15) { tcg_out_nop(s); } qemu_ld_trampoline[i] = s->code_ptr; if (SPARC64 || TARGET_LONG_BITS == 32) { ra = TCG_REG_O3; } else { /* Install the high part of the address. */ tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX); ra = TCG_REG_O4; } /* Set the retaddr operand. */ tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); /* Set the env operand. */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); /* Tail call. */ tcg_out_call_nodelay(s, qemu_ld_helpers[i]); tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); } for (i = 0; i < 16; ++i) { if (qemu_st_helpers[i] == NULL) { continue; } /* May as well align the trampoline. */ while ((uintptr_t)s->code_ptr & 15) { tcg_out_nop(s); } qemu_st_trampoline[i] = s->code_ptr; if (SPARC64) { emit_extend(s, TCG_REG_O2, i); ra = TCG_REG_O4; } else { ra = TCG_REG_O1; if (TARGET_LONG_BITS == 64) { /* Install the high part of the address. */ tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); ra += 2; } else { ra += 1; } if ((i & MO_SIZE) == MO_64) { /* Install the high part of the data. */ tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX); ra += 2; } else { ra += 1; } /* Skip the oi argument. */ ra += 1; } /* Set the retaddr operand. */ if (ra >= TCG_REG_O6) { tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK, TCG_TARGET_CALL_STACK_OFFSET); ra = TCG_REG_G1; } tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7); /* Set the env operand. */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0); /* Tail call. */ tcg_out_call_nodelay(s, qemu_st_helpers[i]); tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); } }
19,790
qemu
11c89769dc3e638ef72915d97058411ddf79b64b
1
int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, int *num, uint64_t *host_offset, QCowL2Meta **m) { BDRVQcowState *s = bs->opaque; uint64_t start, remaining; uint64_t cluster_offset; uint64_t cur_bytes; int ret; trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num); assert((offset & ~BDRV_SECTOR_MASK) == 0); again: start = offset; remaining = *num << BDRV_SECTOR_BITS; cluster_offset = 0; *host_offset = 0; cur_bytes = 0; *m = NULL; while (true) { if (!*host_offset) { *host_offset = start_of_cluster(s, cluster_offset); } assert(remaining >= cur_bytes); start += cur_bytes; remaining -= cur_bytes; cluster_offset += cur_bytes; if (remaining == 0) { break; } cur_bytes = remaining; /* * Now start gathering as many contiguous clusters as possible: * * 1. Check for overlaps with in-flight allocations * * a) Overlap not in the first cluster -> shorten this request and * let the caller handle the rest in its next loop iteration. * * b) Real overlaps of two requests. Yield and restart the search * for contiguous clusters (the situation could have changed * while we were sleeping) * * c) TODO: Request starts in the same cluster as the in-flight * allocation ends. Shorten the COW of the in-fight allocation, * set cluster_offset to write to the same cluster and set up * the right synchronisation between the in-flight request and * the new one. */ ret = handle_dependencies(bs, start, &cur_bytes, m); if (ret == -EAGAIN) { /* Currently handle_dependencies() doesn't yield if we already had * an allocation. If it did, we would have to clean up the L2Meta * structs before starting over. */ assert(*m == NULL); goto again; } else if (ret < 0) { return ret; } else if (cur_bytes == 0) { break; } else { /* handle_dependencies() may have decreased cur_bytes (shortened * the allocations below) so that the next dependency is processed * correctly during the next loop iteration. */ } /* * 2. Count contiguous COPIED clusters. */ ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); if (ret < 0) { return ret; } else if (ret) { continue; } else if (cur_bytes == 0) { break; } /* * 3. If the request still hasn't completed, allocate new clusters, * considering any cluster_offset of steps 1c or 2. */ ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); if (ret < 0) { return ret; } else if (ret) { continue; } else { assert(cur_bytes == 0); break; } } *num -= remaining >> BDRV_SECTOR_BITS; assert(*num > 0); assert(*host_offset != 0); return 0; }
19,792
FFmpeg
3689ec3d28d76b7a67a5d3838870dfd25cd2daad
1
static void RENAME(postProcess)(const uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height, const QP_STORE_T QPs[], int QPStride, int isColor, PPContext *c2) { DECLARE_ALIGNED(8, PPContext, c)= *c2; //copy to stack for faster access int x,y; #ifdef COMPILE_TIME_MODE const int mode= COMPILE_TIME_MODE; #else const int mode= isColor ? c.ppMode.chromMode : c.ppMode.lumMode; #endif int black=0, white=255; // blackest black and whitest white in the picture int QPCorrecture= 256*256; int copyAhead; #if HAVE_MMX_INLINE int i; #endif const int qpHShift= isColor ? 4-c.hChromaSubSample : 4; const int qpVShift= isColor ? 4-c.vChromaSubSample : 4; //FIXME remove uint64_t * const yHistogram= c.yHistogram; uint8_t * const tempSrc= srcStride > 0 ? c.tempSrc : c.tempSrc - 23*srcStride; uint8_t * const tempDst= dstStride > 0 ? c.tempDst : c.tempDst - 23*dstStride; //const int mbWidth= isColor ? (width+7)>>3 : (width+15)>>4; #if HAVE_MMX_INLINE for(i=0; i<57; i++){ int offset= ((i*c.ppMode.baseDcDiff)>>8) + 1; int threshold= offset*2 + 1; c.mmxDcOffset[i]= 0x7F - offset; c.mmxDcThreshold[i]= 0x7F - threshold; c.mmxDcOffset[i]*= 0x0101010101010101LL; c.mmxDcThreshold[i]*= 0x0101010101010101LL; } #endif if(mode & CUBIC_IPOL_DEINT_FILTER) copyAhead=16; else if( (mode & LINEAR_BLEND_DEINT_FILTER) || (mode & FFMPEG_DEINT_FILTER) || (mode & LOWPASS5_DEINT_FILTER)) copyAhead=14; else if( (mode & V_DEBLOCK) || (mode & LINEAR_IPOL_DEINT_FILTER) || (mode & MEDIAN_DEINT_FILTER) || (mode & V_A_DEBLOCK)) copyAhead=13; else if(mode & V_X1_FILTER) copyAhead=11; // else if(mode & V_RK1_FILTER) copyAhead=10; else if(mode & DERING) copyAhead=9; else copyAhead=8; copyAhead-= 8; if(!isColor){ uint64_t sum= 0; int i; uint64_t maxClipped; uint64_t clipped; double scale; c.frameNum++; // first frame is fscked so we ignore it if(c.frameNum == 1) yHistogram[0]= width*height/64*15/256; for(i=0; i<256; i++){ sum+= yHistogram[i]; } /* We always get a completely black picture first. */ maxClipped= (uint64_t)(sum * c.ppMode.maxClippedThreshold); clipped= sum; for(black=255; black>0; black--){ if(clipped < maxClipped) break; clipped-= yHistogram[black]; } clipped= sum; for(white=0; white<256; white++){ if(clipped < maxClipped) break; clipped-= yHistogram[white]; } scale= (double)(c.ppMode.maxAllowedY - c.ppMode.minAllowedY) / (double)(white-black); #if HAVE_MMXEXT_INLINE c.packedYScale= (uint16_t)(scale*256.0 + 0.5); c.packedYOffset= (((black*c.packedYScale)>>8) - c.ppMode.minAllowedY) & 0xFFFF; #else c.packedYScale= (uint16_t)(scale*1024.0 + 0.5); c.packedYOffset= (black - c.ppMode.minAllowedY) & 0xFFFF; #endif c.packedYOffset|= c.packedYOffset<<32; c.packedYOffset|= c.packedYOffset<<16; c.packedYScale|= c.packedYScale<<32; c.packedYScale|= c.packedYScale<<16; if(mode & LEVEL_FIX) QPCorrecture= (int)(scale*256*256 + 0.5); else QPCorrecture= 256*256; }else{ c.packedYScale= 0x0100010001000100LL; c.packedYOffset= 0; QPCorrecture= 256*256; } /* copy & deinterlace first row of blocks */ y=-BLOCK_SIZE; { const uint8_t *srcBlock= &(src[y*srcStride]); uint8_t *dstBlock= tempDst + dstStride; // From this point on it is guaranteed that we can read and write 16 lines downward // finish 1 block before the next otherwise we might have a problem // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing for(x=0; x<width; x+=BLOCK_SIZE){ #if HAVE_MMXEXT_INLINE /* prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32); prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32); prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32); prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32); */ __asm__( "mov %4, %%"REG_a" \n\t" "shr $2, %%"REG_a" \n\t" "and $6, %%"REG_a" \n\t" "add %5, %%"REG_a" \n\t" "mov %%"REG_a", %%"REG_d" \n\t" "imul %1, %%"REG_a" \n\t" "imul %3, %%"REG_d" \n\t" "prefetchnta 32(%%"REG_a", %0) \n\t" "prefetcht0 32(%%"REG_d", %2) \n\t" "add %1, %%"REG_a" \n\t" "add %3, %%"REG_d" \n\t" "prefetchnta 32(%%"REG_a", %0) \n\t" "prefetcht0 32(%%"REG_d", %2) \n\t" :: "r" (srcBlock), "r" ((x86_reg)srcStride), "r" (dstBlock), "r" ((x86_reg)dstStride), "g" ((x86_reg)x), "g" ((x86_reg)copyAhead) : "%"REG_a, "%"REG_d ); #elif HAVE_AMD3DNOW_INLINE //FIXME check if this is faster on an 3dnow chip or if it is faster without the prefetch or ... /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32); prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32); prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32); prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32); */ #endif RENAME(blockCopy)(dstBlock + dstStride*8, dstStride, srcBlock + srcStride*8, srcStride, mode & LEVEL_FIX, &c.packedYOffset); RENAME(duplicate)(dstBlock + dstStride*8, dstStride); if(mode & LINEAR_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride); else if(mode & LINEAR_BLEND_DEINT_FILTER) RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x); else if(mode & MEDIAN_DEINT_FILTER) RENAME(deInterlaceMedian)(dstBlock, dstStride); else if(mode & CUBIC_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateCubic)(dstBlock, dstStride); else if(mode & FFMPEG_DEINT_FILTER) RENAME(deInterlaceFF)(dstBlock, dstStride, c.deintTemp + x); else if(mode & LOWPASS5_DEINT_FILTER) RENAME(deInterlaceL5)(dstBlock, dstStride, c.deintTemp + x, c.deintTemp + width + x); /* else if(mode & CUBIC_BLEND_DEINT_FILTER) RENAME(deInterlaceBlendCubic)(dstBlock, dstStride); */ dstBlock+=8; srcBlock+=8; } if(width==FFABS(dstStride)) linecpy(dst, tempDst + 9*dstStride, copyAhead, dstStride); else{ int i; for(i=0; i<copyAhead; i++){ memcpy(dst + i*dstStride, tempDst + (9+i)*dstStride, width); } } } for(y=0; y<height; y+=BLOCK_SIZE){ //1% speedup if these are here instead of the inner loop const uint8_t *srcBlock= &(src[y*srcStride]); uint8_t *dstBlock= &(dst[y*dstStride]); #if HAVE_MMX_INLINE uint8_t *tempBlock1= c.tempBlocks; uint8_t *tempBlock2= c.tempBlocks + 8; #endif const int8_t *QPptr= &QPs[(y>>qpVShift)*QPStride]; int8_t *nonBQPptr= &c.nonBQPTable[(y>>qpVShift)*FFABS(QPStride)]; int QP=0; /* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards if not than use a temporary buffer */ if(y+15 >= height){ int i; /* copy from line (copyAhead) to (copyAhead+7) of src, these will be copied with blockcopy to dst later */ linecpy(tempSrc + srcStride*copyAhead, srcBlock + srcStride*copyAhead, FFMAX(height-y-copyAhead, 0), srcStride); /* duplicate last line of src to fill the void up to line (copyAhead+7) */ for(i=FFMAX(height-y, 8); i<copyAhead+8; i++) memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), FFABS(srcStride)); /* copy up to (copyAhead+1) lines of dst (line -1 to (copyAhead-1))*/ linecpy(tempDst, dstBlock - dstStride, FFMIN(height-y+1, copyAhead+1), dstStride); /* duplicate last line of dst to fill the void up to line (copyAhead) */ for(i=height-y+1; i<=copyAhead; i++) memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), FFABS(dstStride)); dstBlock= tempDst + dstStride; srcBlock= tempSrc; } // From this point on it is guaranteed that we can read and write 16 lines downward // finish 1 block before the next otherwise we might have a problem // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing for(x=0; x<width; x+=BLOCK_SIZE){ const int stride= dstStride; #if HAVE_MMX_INLINE uint8_t *tmpXchg; #endif if(isColor){ QP= QPptr[x>>qpHShift]; c.nonBQP= nonBQPptr[x>>qpHShift]; }else{ QP= QPptr[x>>4]; QP= (QP* QPCorrecture + 256*128)>>16; c.nonBQP= nonBQPptr[x>>4]; c.nonBQP= (c.nonBQP* QPCorrecture + 256*128)>>16; yHistogram[ srcBlock[srcStride*12 + 4] ]++; } c.QP= QP; #if HAVE_MMX_INLINE __asm__ volatile( "movd %1, %%mm7 \n\t" "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP "movq %%mm7, %0 \n\t" : "=m" (c.pQPb) : "r" (QP) ); #endif #if HAVE_MMXEXT_INLINE /* prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32); prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32); prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32); prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32); */ __asm__( "mov %4, %%"REG_a" \n\t" "shr $2, %%"REG_a" \n\t" "and $6, %%"REG_a" \n\t" "add %5, %%"REG_a" \n\t" "mov %%"REG_a", %%"REG_d" \n\t" "imul %1, %%"REG_a" \n\t" "imul %3, %%"REG_d" \n\t" "prefetchnta 32(%%"REG_a", %0) \n\t" "prefetcht0 32(%%"REG_d", %2) \n\t" "add %1, %%"REG_a" \n\t" "add %3, %%"REG_d" \n\t" "prefetchnta 32(%%"REG_a", %0) \n\t" "prefetcht0 32(%%"REG_d", %2) \n\t" :: "r" (srcBlock), "r" ((x86_reg)srcStride), "r" (dstBlock), "r" ((x86_reg)dstStride), "g" ((x86_reg)x), "g" ((x86_reg)copyAhead) : "%"REG_a, "%"REG_d ); #elif HAVE_AMD3DNOW_INLINE //FIXME check if this is faster on an 3dnow chip or if it is faster without the prefetch or ... /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32); prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32); prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32); prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32); */ #endif RENAME(blockCopy)(dstBlock + dstStride*copyAhead, dstStride, srcBlock + srcStride*copyAhead, srcStride, mode & LEVEL_FIX, &c.packedYOffset); if(mode & LINEAR_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride); else if(mode & LINEAR_BLEND_DEINT_FILTER) RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x); else if(mode & MEDIAN_DEINT_FILTER) RENAME(deInterlaceMedian)(dstBlock, dstStride); else if(mode & CUBIC_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateCubic)(dstBlock, dstStride); else if(mode & FFMPEG_DEINT_FILTER) RENAME(deInterlaceFF)(dstBlock, dstStride, c.deintTemp + x); else if(mode & LOWPASS5_DEINT_FILTER) RENAME(deInterlaceL5)(dstBlock, dstStride, c.deintTemp + x, c.deintTemp + width + x); /* else if(mode & CUBIC_BLEND_DEINT_FILTER) RENAME(deInterlaceBlendCubic)(dstBlock, dstStride); */ /* only deblock if we have 2 blocks */ if(y + 8 < height){ if(mode & V_X1_FILTER) RENAME(vertX1Filter)(dstBlock, stride, &c); else if(mode & V_DEBLOCK){ const int t= RENAME(vertClassify)(dstBlock, stride, &c); if(t==1) RENAME(doVertLowPass)(dstBlock, stride, &c); else if(t==2) RENAME(doVertDefFilter)(dstBlock, stride, &c); }else if(mode & V_A_DEBLOCK){ RENAME(do_a_deblock)(dstBlock, stride, 1, &c); } } #if HAVE_MMX_INLINE RENAME(transpose1)(tempBlock1, tempBlock2, dstBlock, dstStride); #endif /* check if we have a previous block to deblock it with dstBlock */ if(x - 8 >= 0){ #if HAVE_MMX_INLINE if(mode & H_X1_FILTER) RENAME(vertX1Filter)(tempBlock1, 16, &c); else if(mode & H_DEBLOCK){ //START_TIMER const int t= RENAME(vertClassify)(tempBlock1, 16, &c); //STOP_TIMER("dc & minmax") if(t==1) RENAME(doVertLowPass)(tempBlock1, 16, &c); else if(t==2) RENAME(doVertDefFilter)(tempBlock1, 16, &c); }else if(mode & H_A_DEBLOCK){ RENAME(do_a_deblock)(tempBlock1, 16, 1, &c); } RENAME(transpose2)(dstBlock-4, dstStride, tempBlock1 + 4*16); #else if(mode & H_X1_FILTER) horizX1Filter(dstBlock-4, stride, QP); else if(mode & H_DEBLOCK){ #if HAVE_ALTIVEC DECLARE_ALIGNED(16, unsigned char, tempBlock)[272]; int t; transpose_16x8_char_toPackedAlign_altivec(tempBlock, dstBlock - (4 + 1), stride); t = vertClassify_altivec(tempBlock-48, 16, &c); if(t==1) { doVertLowPass_altivec(tempBlock-48, 16, &c); transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride); } else if(t==2) { doVertDefFilter_altivec(tempBlock-48, 16, &c); transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride); } #else const int t= RENAME(horizClassify)(dstBlock-4, stride, &c); if(t==1) RENAME(doHorizLowPass)(dstBlock-4, stride, &c); else if(t==2) RENAME(doHorizDefFilter)(dstBlock-4, stride, &c); #endif }else if(mode & H_A_DEBLOCK){ RENAME(do_a_deblock)(dstBlock-8, 1, stride, &c); } #endif //HAVE_MMX_INLINE if(mode & DERING){ //FIXME filter first line if(y>0) RENAME(dering)(dstBlock - stride - 8, stride, &c); } if(mode & TEMP_NOISE_FILTER) { RENAME(tempNoiseReducer)(dstBlock-8, stride, c.tempBlurred[isColor] + y*dstStride + x, c.tempBlurredPast[isColor] + (y>>3)*256 + (x>>3) + 256, c.ppMode.maxTmpNoise); } } dstBlock+=8; srcBlock+=8; #if HAVE_MMX_INLINE tmpXchg= tempBlock1; tempBlock1= tempBlock2; tempBlock2 = tmpXchg; #endif } if(mode & DERING){ if(y > 0) RENAME(dering)(dstBlock - dstStride - 8, dstStride, &c); } if((mode & TEMP_NOISE_FILTER)){ RENAME(tempNoiseReducer)(dstBlock-8, dstStride, c.tempBlurred[isColor] + y*dstStride + x, c.tempBlurredPast[isColor] + (y>>3)*256 + (x>>3) + 256, c.ppMode.maxTmpNoise); } /* did we use a tmp buffer for the last lines*/ if(y+15 >= height){ uint8_t *dstBlock= &(dst[y*dstStride]); if(width==FFABS(dstStride)) linecpy(dstBlock, tempDst + dstStride, height-y, dstStride); else{ int i; for(i=0; i<height-y; i++){ memcpy(dstBlock + i*dstStride, tempDst + (i+1)*dstStride, width); } } } /* for(x=0; x<width; x+=32){ volatile int i; i+= dstBlock[x + 7*dstStride] + dstBlock[x + 8*dstStride] + dstBlock[x + 9*dstStride] + dstBlock[x +10*dstStride] + dstBlock[x +11*dstStride] + dstBlock[x +12*dstStride]; + dstBlock[x +13*dstStride] + dstBlock[x +14*dstStride] + dstBlock[x +15*dstStride]; }*/ } #if HAVE_AMD3DNOW_INLINE __asm__ volatile("femms"); #elif HAVE_MMX_INLINE __asm__ volatile("emms"); #endif #ifdef DEBUG_BRIGHTNESS if(!isColor){ int max=1; int i; for(i=0; i<256; i++) if(yHistogram[i] > max) max=yHistogram[i]; for(i=1; i<256; i++){ int x; int start=yHistogram[i-1]/(max/256+1); int end=yHistogram[i]/(max/256+1); int inc= end > start ? 1 : -1; for(x=start; x!=end+inc; x+=inc) dst[ i*dstStride + x]+=128; } for(i=0; i<100; i+=2){ dst[ (white)*dstStride + i]+=128; dst[ (black)*dstStride + i]+=128; } } #endif *c2= c; //copy local context back }
19,793
qemu
aedbe19297907143f17b733a7ff0e0534377bed1
1
static int qemu_shutdown_requested(void) { return atomic_xchg(&shutdown_requested, 0); }
19,794
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
1
static int socket_close(void *opaque) { QEMUFileSocket *s = opaque; closesocket(s->fd); g_free(s); return 0; }
19,795
qemu
ea375f9ab8c76686dca0af8cb4f87a4eb569cad3
1
static int cpu_pre_load(void *opaque) { CPUState *env = opaque; cpu_synchronize_state(env); return 0; }
19,796
FFmpeg
5430839144c6da0160e8e0cfb0c8db01de432e94
1
static int cmv_process_header(CmvContext *s, const uint8_t *buf, const uint8_t *buf_end) { int pal_start, pal_count, i, ret, fps; if(buf_end - buf < 16) { av_log(s->avctx, AV_LOG_WARNING, "truncated header\n"); return AVERROR_INVALIDDATA; s->width = AV_RL16(&buf[4]); s->height = AV_RL16(&buf[6]); ret = ff_set_dimensions(s->avctx, s->width, s->height); if (ret < 0) return ret; fps = AV_RL16(&buf[10]); if (fps > 0) s->avctx->time_base = (AVRational){ 1, fps }; pal_start = AV_RL16(&buf[12]); pal_count = AV_RL16(&buf[14]); buf += 16; for (i=pal_start; i<pal_start+pal_count && i<AVPALETTE_COUNT && buf_end - buf >= 3; i++) { s->palette[i] = AV_RB24(buf); buf += 3; return 0;
19,797
qemu
b4db54132ffeadafa9516cc553ba9548e42d42ad
1
static target_ulong h_register_process_table(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong opcode, target_ulong *args) { qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n", opcode, " (H_REGISTER_PROC_TBL)"); return H_FUNCTION; }
19,798
qemu
66a08cbe6ad1aebec8eecf58b3ba042e19dd1649
1
static void uhci_async_cancel_all(UHCIState *s) { UHCIQueue *queue, *nq; QTAILQ_FOREACH_SAFE(queue, &s->queues, next, nq) { uhci_queue_free(queue); } }
19,799
qemu
5b1ded224f46d56053f419cf24c1335b6dde40ee
1
static uint64_t get_migration_pass(void) { QDict *rsp, *rsp_return, *rsp_ram; uint64_t result; rsp = return_or_event(qmp("{ 'execute': 'query-migrate' }")); rsp_return = qdict_get_qdict(rsp, "return"); if (!qdict_haskey(rsp_return, "ram")) { /* Still in setup */ result = 0; } else { rsp_ram = qdict_get_qdict(rsp_return, "ram"); result = qdict_get_try_int(rsp_ram, "dirty-sync-count", 0); QDECREF(rsp); } return result; }
19,800
qemu
c919297379e9980c2bcc4d2053addbc1fd6d762b
1
static int img_compare(int argc, char **argv) { const char *fmt1 = NULL, *fmt2 = NULL, *cache, *filename1, *filename2; BlockBackend *blk1, *blk2; BlockDriverState *bs1, *bs2; int64_t total_sectors1, total_sectors2; uint8_t *buf1 = NULL, *buf2 = NULL; int pnum1, pnum2; int allocated1, allocated2; int ret = 0; /* return value - 0 Ident, 1 Different, >1 Error */ bool progress = false, quiet = false, strict = false; int flags; bool writethrough; int64_t total_sectors; int64_t sector_num = 0; int64_t nb_sectors; int c, pnum; uint64_t progress_base; bool image_opts = false; cache = BDRV_DEFAULT_CACHE; for (;;) { static const struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"object", required_argument, 0, OPTION_OBJECT}, {"image-opts", no_argument, 0, OPTION_IMAGE_OPTS}, {0, 0, 0, 0} }; c = getopt_long(argc, argv, "hf:F:T:pqs", long_options, NULL); if (c == -1) { break; } switch (c) { case '?': case 'h': help(); break; case 'f': fmt1 = optarg; break; case 'F': fmt2 = optarg; break; case 'T': cache = optarg; break; case 'p': progress = true; break; case 'q': quiet = true; break; case 's': strict = true; break; case OPTION_OBJECT: { QemuOpts *opts; opts = qemu_opts_parse_noisily(&qemu_object_opts, optarg, true); if (!opts) { ret = 2; goto out4; } } break; case OPTION_IMAGE_OPTS: image_opts = true; break; } } /* Progress is not shown in Quiet mode */ if (quiet) { progress = false; } if (optind != argc - 2) { error_exit("Expecting two image file names"); } filename1 = argv[optind++]; filename2 = argv[optind++]; if (qemu_opts_foreach(&qemu_object_opts, user_creatable_add_opts_foreach, NULL, NULL)) { ret = 2; goto out4; } /* Initialize before goto out */ qemu_progress_init(progress, 2.0); flags = 0; ret = bdrv_parse_cache_mode(cache, &flags, &writethrough); if (ret < 0) { error_report("Invalid source cache option: %s", cache); ret = 2; goto out3; } blk1 = img_open(image_opts, filename1, fmt1, flags, writethrough, quiet); if (!blk1) { ret = 2; goto out3; } blk2 = img_open(image_opts, filename2, fmt2, flags, writethrough, quiet); if (!blk2) { ret = 2; goto out2; } bs1 = blk_bs(blk1); bs2 = blk_bs(blk2); buf1 = blk_blockalign(blk1, IO_BUF_SIZE); buf2 = blk_blockalign(blk2, IO_BUF_SIZE); total_sectors1 = blk_nb_sectors(blk1); if (total_sectors1 < 0) { error_report("Can't get size of %s: %s", filename1, strerror(-total_sectors1)); ret = 4; goto out; } total_sectors2 = blk_nb_sectors(blk2); if (total_sectors2 < 0) { error_report("Can't get size of %s: %s", filename2, strerror(-total_sectors2)); ret = 4; goto out; } total_sectors = MIN(total_sectors1, total_sectors2); progress_base = MAX(total_sectors1, total_sectors2); qemu_progress_print(0, 100); if (strict && total_sectors1 != total_sectors2) { ret = 1; qprintf(quiet, "Strict mode: Image size mismatch!\n"); goto out; } for (;;) { int64_t status1, status2; BlockDriverState *file; nb_sectors = sectors_to_process(total_sectors, sector_num); if (nb_sectors <= 0) { break; } status1 = bdrv_get_block_status_above(bs1, NULL, sector_num, total_sectors1 - sector_num, &pnum1, &file); if (status1 < 0) { ret = 3; error_report("Sector allocation test failed for %s", filename1); goto out; } allocated1 = status1 & BDRV_BLOCK_ALLOCATED; status2 = bdrv_get_block_status_above(bs2, NULL, sector_num, total_sectors2 - sector_num, &pnum2, &file); if (status2 < 0) { ret = 3; error_report("Sector allocation test failed for %s", filename2); goto out; } allocated2 = status2 & BDRV_BLOCK_ALLOCATED; if (pnum1) { nb_sectors = MIN(nb_sectors, pnum1); } if (pnum2) { nb_sectors = MIN(nb_sectors, pnum2); } if (strict) { if ((status1 & ~BDRV_BLOCK_OFFSET_MASK) != (status2 & ~BDRV_BLOCK_OFFSET_MASK)) { ret = 1; qprintf(quiet, "Strict mode: Offset %" PRId64 " block status mismatch!\n", sectors_to_bytes(sector_num)); goto out; } } if ((status1 & BDRV_BLOCK_ZERO) && (status2 & BDRV_BLOCK_ZERO)) { nb_sectors = MIN(pnum1, pnum2); } else if (allocated1 == allocated2) { if (allocated1) { ret = blk_pread(blk1, sector_num << BDRV_SECTOR_BITS, buf1, nb_sectors << BDRV_SECTOR_BITS); if (ret < 0) { error_report("Error while reading offset %" PRId64 " of %s:" " %s", sectors_to_bytes(sector_num), filename1, strerror(-ret)); ret = 4; goto out; } ret = blk_pread(blk2, sector_num << BDRV_SECTOR_BITS, buf2, nb_sectors << BDRV_SECTOR_BITS); if (ret < 0) { error_report("Error while reading offset %" PRId64 " of %s: %s", sectors_to_bytes(sector_num), filename2, strerror(-ret)); ret = 4; goto out; } ret = compare_sectors(buf1, buf2, nb_sectors, &pnum); if (ret || pnum != nb_sectors) { qprintf(quiet, "Content mismatch at offset %" PRId64 "!\n", sectors_to_bytes( ret ? sector_num : sector_num + pnum)); ret = 1; goto out; } } } else { if (allocated1) { ret = check_empty_sectors(blk1, sector_num, nb_sectors, filename1, buf1, quiet); } else { ret = check_empty_sectors(blk2, sector_num, nb_sectors, filename2, buf1, quiet); } if (ret) { if (ret < 0) { error_report("Error while reading offset %" PRId64 ": %s", sectors_to_bytes(sector_num), strerror(-ret)); ret = 4; } goto out; } } sector_num += nb_sectors; qemu_progress_print(((float) nb_sectors / progress_base)*100, 100); } if (total_sectors1 != total_sectors2) { BlockBackend *blk_over; int64_t total_sectors_over; const char *filename_over; qprintf(quiet, "Warning: Image size mismatch!\n"); if (total_sectors1 > total_sectors2) { total_sectors_over = total_sectors1; blk_over = blk1; filename_over = filename1; } else { total_sectors_over = total_sectors2; blk_over = blk2; filename_over = filename2; } for (;;) { nb_sectors = sectors_to_process(total_sectors_over, sector_num); if (nb_sectors <= 0) { break; } ret = bdrv_is_allocated_above(blk_bs(blk_over), NULL, sector_num, nb_sectors, &pnum); if (ret < 0) { ret = 3; error_report("Sector allocation test failed for %s", filename_over); goto out; } nb_sectors = pnum; if (ret) { ret = check_empty_sectors(blk_over, sector_num, nb_sectors, filename_over, buf1, quiet); if (ret) { if (ret < 0) { error_report("Error while reading offset %" PRId64 " of %s: %s", sectors_to_bytes(sector_num), filename_over, strerror(-ret)); ret = 4; } goto out; } } sector_num += nb_sectors; qemu_progress_print(((float) nb_sectors / progress_base)*100, 100); } } qprintf(quiet, "Images are identical.\n"); ret = 0; out: qemu_vfree(buf1); qemu_vfree(buf2); blk_unref(blk2); out2: blk_unref(blk1); out3: qemu_progress_end(); out4: return ret; }
19,801
qemu
6c352ca9b4ee3e1e286ea9e8434bd8e69ac7d0d8
1
vmxnet3_io_bar0_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { VMXNET3State *s = opaque; if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_TXPROD, VMXNET3_DEVICE_MAX_TX_QUEUES, VMXNET3_REG_ALIGN)) { int tx_queue_idx = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_TXPROD, VMXNET3_REG_ALIGN); assert(tx_queue_idx <= s->txq_num); vmxnet3_process_tx_queue(s, tx_queue_idx); if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR, VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) { int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR, VMXNET3_REG_ALIGN); VMW_CBPRN("Interrupt mask for line %d written: 0x%" PRIx64, l, val); vmxnet3_on_interrupt_mask_changed(s, l, val); if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD, VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN) || VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD2, VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN)) { VMW_WRPRN("BAR0 unknown write [%" PRIx64 "] = %" PRIx64 ", size %d", (uint64_t) addr, val, size);
19,802
FFmpeg
5ab1efb9d0dc65e748a0291b67915e35578b302e
1
static av_cold int hevc_decode_free(AVCodecContext *avctx) { HEVCContext *s = avctx->priv_data; HEVCLocalContext *lc = s->HEVClc; int i; pic_arrays_free(s); av_freep(&lc->edge_emu_buffer); av_freep(&s->md5_ctx); for(i=0; i < s->nals_allocated; i++) { av_freep(&s->skipped_bytes_pos_nal[i]); } av_freep(&s->skipped_bytes_pos_size_nal); av_freep(&s->skipped_bytes_nal); av_freep(&s->skipped_bytes_pos_nal); av_freep(&s->cabac_state); av_frame_free(&s->tmp_frame); av_frame_free(&s->output_frame); for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) { ff_hevc_unref_frame(s, &s->DPB[i], ~0); av_frame_free(&s->DPB[i].frame); } for (i = 0; i < FF_ARRAY_ELEMS(s->vps_list); i++) av_freep(&s->vps_list[i]); for (i = 0; i < FF_ARRAY_ELEMS(s->sps_list); i++) av_buffer_unref(&s->sps_list[i]); for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++) av_buffer_unref(&s->pps_list[i]); av_freep(&s->sh.entry_point_offset); av_freep(&s->sh.offset); av_freep(&s->sh.size); for (i = 1; i < s->threads_number; i++) { lc = s->HEVClcList[i]; if (lc) { av_freep(&lc->edge_emu_buffer); av_freep(&s->HEVClcList[i]); av_freep(&s->sList[i]); } } av_freep(&s->HEVClcList[0]); for (i = 0; i < s->nals_allocated; i++) av_freep(&s->nals[i].rbsp_buffer); av_freep(&s->nals); s->nals_allocated = 0; return 0; }
19,803
FFmpeg
3cb0bec6870cf0bb7879f7bfd4119ef39a02a464
1
static void handle_child_exit(int sig) { pid_t pid; int status; while ((pid = waitpid(-1, &status, WNOHANG)) > 0) { FFServerStream *feed; for (feed = config.first_feed; feed; feed = feed->next) { if (feed->pid == pid) { int uptime = time(0) - feed->pid_start; feed->pid = 0; fprintf(stderr, "%s: Pid %d exited with status %d after %d seconds\n", feed->filename, pid, status, uptime); if (uptime < 30) /* Turn off any more restarts */ feed->child_argv = 0; } } } need_to_start_children = 1; }
19,804
qemu
1534ee93cc6be992c05577886b24bd44c37ecff6
1
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data, ram_addr_t current_addr, RAMBlock *block, ram_addr_t offset, int cont, bool last_stage) { int encoded_len = 0, bytes_sent = -1; uint8_t *prev_cached_page; if (!cache_is_cached(XBZRLE.cache, current_addr)) { if (!last_stage) { if (cache_insert(XBZRLE.cache, current_addr, current_data) == -1) { return -1; } } acct_info.xbzrle_cache_miss++; return -1; } prev_cached_page = get_cached_data(XBZRLE.cache, current_addr); /* save current buffer into memory */ memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE); /* XBZRLE encoding (if there is no overflow) */ encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE, XBZRLE.encoded_buf, TARGET_PAGE_SIZE); if (encoded_len == 0) { DPRINTF("Skipping unmodified page\n"); return 0; } else if (encoded_len == -1) { DPRINTF("Overflow\n"); acct_info.xbzrle_overflows++; /* update data in the cache */ memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE); return -1; } /* we need to update the data in the cache, in order to get the same data */ if (!last_stage) { memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE); } /* Send XBZRLE based compressed page */ bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE); qemu_put_byte(f, ENCODING_FLAG_XBZRLE); qemu_put_be16(f, encoded_len); qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); bytes_sent += encoded_len + 1 + 2; acct_info.xbzrle_pages++; acct_info.xbzrle_bytes += bytes_sent; return bytes_sent; }
19,805
qemu
09b9418c6d085a0728372aa760ebd10128a020b1
1
static void do_info_registers(Monitor *mon) { CPUState *env; env = mon_get_cpu(); if (!env) return; #ifdef TARGET_I386 cpu_dump_state(env, (FILE *)mon, monitor_fprintf, X86_DUMP_FPU); #else cpu_dump_state(env, (FILE *)mon, monitor_fprintf, 0); #endif }
19,806
FFmpeg
bc7e128a6e8e2a79d0ff7cab5e8a799b3ea042ea
1
static int wsaud_read_packet(AVFormatContext *s, AVPacket *pkt) { AVIOContext *pb = s->pb; unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE]; unsigned int chunk_size; int ret = 0; AVStream *st = s->streams[0]; if (avio_read(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) != AUD_CHUNK_PREAMBLE_SIZE) return AVERROR(EIO); /* validate the chunk */ if (AV_RL32(&preamble[4]) != AUD_CHUNK_SIGNATURE) chunk_size = AV_RL16(&preamble[0]); if (st->codecpar->codec_id == AV_CODEC_ID_WESTWOOD_SND1) { /* For Westwood SND1 audio we need to add the output size and input size to the start of the packet to match what is in VQA. Specifically, this is needed to signal when a packet should be decoding as raw 8-bit pcm or variable-size ADPCM. */ int out_size = AV_RL16(&preamble[2]); if ((ret = av_new_packet(pkt, chunk_size + 4)) < 0) return ret; if ((ret = avio_read(pb, &pkt->data[4], chunk_size)) != chunk_size) return ret < 0 ? ret : AVERROR(EIO); AV_WL16(&pkt->data[0], out_size); AV_WL16(&pkt->data[2], chunk_size); pkt->duration = out_size; } else { ret = av_get_packet(pb, pkt, chunk_size); if (ret != chunk_size) return AVERROR(EIO); /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ pkt->duration = (chunk_size * 2) / st->codecpar->channels; pkt->stream_index = st->index; return ret;
19,807
qemu
1f51470d044852592922f91000e741c381582cdc
1
static int qemu_chr_open_win(QemuOpts *opts, CharDriverState **_chr) { const char *filename = qemu_opt_get(opts, "path"); CharDriverState *chr; WinCharState *s; chr = g_malloc0(sizeof(CharDriverState)); s = g_malloc0(sizeof(WinCharState)); chr->opaque = s; chr->chr_write = win_chr_write; chr->chr_close = win_chr_close; if (win_chr_init(chr, filename) < 0) { g_free(s); g_free(chr); return -EIO; } qemu_chr_generic_open(chr); *_chr = chr; return 0; }
19,810
FFmpeg
97cda76a69afe59dae6168f8bf13a9170343d380
1
SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, int dstW, int dstH, enum PixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param) { SwsContext *c; int i; int usesVFilter, usesHFilter; int unscaled; int srcRange, dstRange; SwsFilter dummyFilter= {NULL, NULL, NULL, NULL}; #if ARCH_X86 if (flags & SWS_CPU_CAPS_MMX) __asm__ volatile("emms\n\t"::: "memory"); #endif #if !CONFIG_RUNTIME_CPUDETECT //ensure that the flags match the compiled variant if cpudetect is off flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC|SWS_CPU_CAPS_BFIN); flags |= ff_hardcodedcpuflags(); #endif /* CONFIG_RUNTIME_CPUDETECT */ if (!rgb15to16) sws_rgb2rgb_init(flags); unscaled = (srcW == dstW && srcH == dstH); srcRange = handle_jpeg(&srcFormat); dstRange = handle_jpeg(&dstFormat); if (!isSupportedIn(srcFormat)) { av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as input pixel format\n", sws_format_name(srcFormat)); return NULL; } if (!isSupportedOut(dstFormat)) { av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as output pixel format\n", sws_format_name(dstFormat)); return NULL; } i= flags & ( SWS_POINT |SWS_AREA |SWS_BILINEAR |SWS_FAST_BILINEAR |SWS_BICUBIC |SWS_X |SWS_GAUSS |SWS_LANCZOS |SWS_SINC |SWS_SPLINE |SWS_BICUBLIN); if(!i || (i & (i-1))) { av_log(NULL, AV_LOG_ERROR, "swScaler: Exactly one scaler algorithm must be chosen\n"); return NULL; } /* sanity check */ if (srcW<4 || srcH<1 || dstW<8 || dstH<1) { //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code av_log(NULL, AV_LOG_ERROR, "swScaler: %dx%d -> %dx%d is invalid scaling dimension\n", srcW, srcH, dstW, dstH); return NULL; } if(srcW > VOFW || dstW > VOFW) { av_log(NULL, AV_LOG_ERROR, "swScaler: Compile-time maximum width is "AV_STRINGIFY(VOFW)" change VOF/VOFW and recompile\n"); return NULL; } if (!dstFilter) dstFilter= &dummyFilter; if (!srcFilter) srcFilter= &dummyFilter; FF_ALLOCZ_OR_GOTO(NULL, c, sizeof(SwsContext), fail); c->av_class = &sws_context_class; c->srcW= srcW; c->srcH= srcH; c->dstW= dstW; c->dstH= dstH; c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW; c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH; c->flags= flags; c->dstFormat= dstFormat; c->srcFormat= srcFormat; c->dstFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[dstFormat]); c->srcFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[srcFormat]); c->vRounder= 4* 0x0001000100010001ULL; usesVFilter = (srcFilter->lumV && srcFilter->lumV->length>1) || (srcFilter->chrV && srcFilter->chrV->length>1) || (dstFilter->lumV && dstFilter->lumV->length>1) || (dstFilter->chrV && dstFilter->chrV->length>1); usesHFilter = (srcFilter->lumH && srcFilter->lumH->length>1) || (srcFilter->chrH && srcFilter->chrH->length>1) || (dstFilter->lumH && dstFilter->lumH->length>1) || (dstFilter->chrH && dstFilter->chrH->length>1); getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat); getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat); // reuse chroma for 2 pixels RGB/BGR unless user wants full chroma interpolation if (isAnyRGB(dstFormat) && !(flags&SWS_FULL_CHR_H_INT)) c->chrDstHSubSample=1; // drop some chroma lines if the user wants it c->vChrDrop= (flags&SWS_SRC_V_CHR_DROP_MASK)>>SWS_SRC_V_CHR_DROP_SHIFT; c->chrSrcVSubSample+= c->vChrDrop; // drop every other pixel for chroma calculation unless user wants full chroma if (isAnyRGB(srcFormat) && !(flags&SWS_FULL_CHR_H_INP) && srcFormat!=PIX_FMT_RGB8 && srcFormat!=PIX_FMT_BGR8 && srcFormat!=PIX_FMT_RGB4 && srcFormat!=PIX_FMT_BGR4 && srcFormat!=PIX_FMT_RGB4_BYTE && srcFormat!=PIX_FMT_BGR4_BYTE && ((dstW>>c->chrDstHSubSample) <= (srcW>>1) || (flags&(SWS_FAST_BILINEAR|SWS_POINT)))) c->chrSrcHSubSample=1; if (param) { c->param[0] = param[0]; c->param[1] = param[1]; } else { c->param[0] = c->param[1] = SWS_PARAM_DEFAULT; } // Note the -((-x)>>y) is so that we always round toward +inf. c->chrSrcW= -((-srcW) >> c->chrSrcHSubSample); c->chrSrcH= -((-srcH) >> c->chrSrcVSubSample); c->chrDstW= -((-dstW) >> c->chrDstHSubSample); c->chrDstH= -((-dstH) >> c->chrDstVSubSample); sws_setColorspaceDetails(c, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT], srcRange, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT] /* FIXME*/, dstRange, 0, 1<<16, 1<<16); /* unscaled special cases */ if (unscaled && !usesHFilter && !usesVFilter && (srcRange == dstRange || isAnyRGB(dstFormat))) { ff_get_unscaled_swscale(c); if (c->swScale) { if (flags&SWS_PRINT_INFO) av_log(c, AV_LOG_INFO, "using unscaled %s -> %s special converter\n", sws_format_name(srcFormat), sws_format_name(dstFormat)); return c; } } if (flags & SWS_CPU_CAPS_MMX2) { c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0; if (!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR)) { if (flags&SWS_PRINT_INFO) av_log(c, AV_LOG_INFO, "output width is not a multiple of 32 -> no MMX2 scaler\n"); } if (usesHFilter) c->canMMX2BeUsed=0; } else c->canMMX2BeUsed=0; c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW; c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH; // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst // but only for the FAST_BILINEAR mode otherwise do correct scaling // n-2 is the last chrominance sample available // this is not perfect, but no one should notice the difference, the more correct variant // would be like the vertical one, but that would require some special code for the // first and last pixel if (flags&SWS_FAST_BILINEAR) { if (c->canMMX2BeUsed) { c->lumXInc+= 20; c->chrXInc+= 20; } //we don't use the x86 asm scaler if MMX is available else if (flags & SWS_CPU_CAPS_MMX) { c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20; c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20; } } /* precalculate horizontal scaler filter coefficients */ { #if ARCH_X86 && (HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT) && CONFIG_GPL // can't downscale !!! if (c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR)) { c->lumMmx2FilterCodeSize = initMMX2HScaler( dstW, c->lumXInc, NULL, NULL, NULL, 8); c->chrMmx2FilterCodeSize = initMMX2HScaler(c->chrDstW, c->chrXInc, NULL, NULL, NULL, 4); #ifdef MAP_ANONYMOUS c->lumMmx2FilterCode = mmap(NULL, c->lumMmx2FilterCodeSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); c->chrMmx2FilterCode = mmap(NULL, c->chrMmx2FilterCodeSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); #elif HAVE_VIRTUALALLOC c->lumMmx2FilterCode = VirtualAlloc(NULL, c->lumMmx2FilterCodeSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE); c->chrMmx2FilterCode = VirtualAlloc(NULL, c->chrMmx2FilterCodeSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE); #else c->lumMmx2FilterCode = av_malloc(c->lumMmx2FilterCodeSize); c->chrMmx2FilterCode = av_malloc(c->chrMmx2FilterCodeSize); #endif FF_ALLOCZ_OR_GOTO(c, c->hLumFilter , (dstW /8+8)*sizeof(int16_t), fail); FF_ALLOCZ_OR_GOTO(c, c->hChrFilter , (c->chrDstW /4+8)*sizeof(int16_t), fail); FF_ALLOCZ_OR_GOTO(c, c->hLumFilterPos, (dstW /2/8+8)*sizeof(int32_t), fail); FF_ALLOCZ_OR_GOTO(c, c->hChrFilterPos, (c->chrDstW/2/4+8)*sizeof(int32_t), fail); initMMX2HScaler( dstW, c->lumXInc, c->lumMmx2FilterCode, c->hLumFilter, c->hLumFilterPos, 8); initMMX2HScaler(c->chrDstW, c->chrXInc, c->chrMmx2FilterCode, c->hChrFilter, c->hChrFilterPos, 4); #ifdef MAP_ANONYMOUS mprotect(c->lumMmx2FilterCode, c->lumMmx2FilterCodeSize, PROT_EXEC | PROT_READ); mprotect(c->chrMmx2FilterCode, c->chrMmx2FilterCodeSize, PROT_EXEC | PROT_READ); #endif } else #endif /* ARCH_X86 && (HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT) && CONFIG_GPL */ { const int filterAlign= (flags & SWS_CPU_CAPS_MMX) ? 4 : (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 : 1; if (initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc, srcW , dstW, filterAlign, 1<<14, (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags, srcFilter->lumH, dstFilter->lumH, c->param) < 0) if (initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc, c->chrSrcW, c->chrDstW, filterAlign, 1<<14, (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags, srcFilter->chrH, dstFilter->chrH, c->param) < 0) } } // initialize horizontal stuff /* precalculate vertical scaler filter coefficients */ { const int filterAlign= (flags & SWS_CPU_CAPS_MMX) && (flags & SWS_ACCURATE_RND) ? 2 : (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 : 1; if (initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc, srcH , dstH, filterAlign, (1<<12), (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags, srcFilter->lumV, dstFilter->lumV, c->param) < 0) if (initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc, c->chrSrcH, c->chrDstH, filterAlign, (1<<12), (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags, srcFilter->chrV, dstFilter->chrV, c->param) < 0) #if ARCH_PPC && (HAVE_ALTIVEC || CONFIG_RUNTIME_CPUDETECT) FF_ALLOC_OR_GOTO(c, c->vYCoeffsBank, sizeof (vector signed short)*c->vLumFilterSize*c->dstH, fail); FF_ALLOC_OR_GOTO(c, c->vCCoeffsBank, sizeof (vector signed short)*c->vChrFilterSize*c->chrDstH, fail); for (i=0;i<c->vLumFilterSize*c->dstH;i++) { int j; short *p = (short *)&c->vYCoeffsBank[i]; for (j=0;j<8;j++) p[j] = c->vLumFilter[i]; } for (i=0;i<c->vChrFilterSize*c->chrDstH;i++) { int j; short *p = (short *)&c->vCCoeffsBank[i]; for (j=0;j<8;j++) p[j] = c->vChrFilter[i]; } #endif } // calculate buffer sizes so that they won't run out while handling these damn slices c->vLumBufSize= c->vLumFilterSize; c->vChrBufSize= c->vChrFilterSize; for (i=0; i<dstH; i++) { int chrI= i*c->chrDstH / dstH; int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1, ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample)); nextSlice>>= c->chrSrcVSubSample; nextSlice<<= c->chrSrcVSubSample; if (c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice) c->vLumBufSize= nextSlice - c->vLumFilterPos[i]; if (c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample)) c->vChrBufSize= (nextSlice>>c->chrSrcVSubSample) - c->vChrFilterPos[chrI]; } // allocate pixbufs (we use dynamic allocation because otherwise we would need to // allocate several megabytes to handle all possible cases) FF_ALLOC_OR_GOTO(c, c->lumPixBuf, c->vLumBufSize*2*sizeof(int16_t*), fail); FF_ALLOC_OR_GOTO(c, c->chrPixBuf, c->vChrBufSize*2*sizeof(int16_t*), fail); if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat)) FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf, c->vLumBufSize*2*sizeof(int16_t*), fail); //Note we need at least one pixel more at the end because of the MMX code (just in case someone wanna replace the 4000/8000) /* align at 16 bytes for AltiVec */ for (i=0; i<c->vLumBufSize; i++) { FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf[i+c->vLumBufSize], VOF+1, fail); c->lumPixBuf[i] = c->lumPixBuf[i+c->vLumBufSize]; } for (i=0; i<c->vChrBufSize; i++) { FF_ALLOC_OR_GOTO(c, c->chrPixBuf[i+c->vChrBufSize], (VOF+1)*2, fail); c->chrPixBuf[i] = c->chrPixBuf[i+c->vChrBufSize]; } if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) for (i=0; i<c->vLumBufSize; i++) { FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf[i+c->vLumBufSize], VOF+1, fail); c->alpPixBuf[i] = c->alpPixBuf[i+c->vLumBufSize]; } //try to avoid drawing green stuff between the right end and the stride end for (i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, (VOF+1)*2); assert(2*VOFW == VOF); assert(c->chrDstH <= dstH); if (flags&SWS_PRINT_INFO) { if (flags&SWS_FAST_BILINEAR) av_log(c, AV_LOG_INFO, "FAST_BILINEAR scaler, "); else if (flags&SWS_BILINEAR) av_log(c, AV_LOG_INFO, "BILINEAR scaler, "); else if (flags&SWS_BICUBIC) av_log(c, AV_LOG_INFO, "BICUBIC scaler, "); else if (flags&SWS_X) av_log(c, AV_LOG_INFO, "Experimental scaler, "); else if (flags&SWS_POINT) av_log(c, AV_LOG_INFO, "Nearest Neighbor / POINT scaler, "); else if (flags&SWS_AREA) av_log(c, AV_LOG_INFO, "Area Averaging scaler, "); else if (flags&SWS_BICUBLIN) av_log(c, AV_LOG_INFO, "luma BICUBIC / chroma BILINEAR scaler, "); else if (flags&SWS_GAUSS) av_log(c, AV_LOG_INFO, "Gaussian scaler, "); else if (flags&SWS_SINC) av_log(c, AV_LOG_INFO, "Sinc scaler, "); else if (flags&SWS_LANCZOS) av_log(c, AV_LOG_INFO, "Lanczos scaler, "); else if (flags&SWS_SPLINE) av_log(c, AV_LOG_INFO, "Bicubic spline scaler, "); else av_log(c, AV_LOG_INFO, "ehh flags invalid?! "); av_log(c, AV_LOG_INFO, "from %s to %s%s ", sws_format_name(srcFormat), #ifdef DITHER1XBPP dstFormat == PIX_FMT_BGR555 || dstFormat == PIX_FMT_BGR565 ? "dithered " : "", #else "", #endif sws_format_name(dstFormat)); if (flags & SWS_CPU_CAPS_MMX2) av_log(c, AV_LOG_INFO, "using MMX2\n"); else if (flags & SWS_CPU_CAPS_3DNOW) av_log(c, AV_LOG_INFO, "using 3DNOW\n"); else if (flags & SWS_CPU_CAPS_MMX) av_log(c, AV_LOG_INFO, "using MMX\n"); else if (flags & SWS_CPU_CAPS_ALTIVEC) av_log(c, AV_LOG_INFO, "using AltiVec\n"); else av_log(c, AV_LOG_INFO, "using C\n"); if (flags & SWS_CPU_CAPS_MMX) { if (c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR)) av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR MMX2 scaler for horizontal scaling\n"); else { if (c->hLumFilterSize==4) av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal luminance scaling\n"); else if (c->hLumFilterSize==8) av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal luminance scaling\n"); else av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal luminance scaling\n"); if (c->hChrFilterSize==4) av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal chrominance scaling\n"); else if (c->hChrFilterSize==8) av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal chrominance scaling\n"); else av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal chrominance scaling\n"); } } else { #if ARCH_X86 av_log(c, AV_LOG_VERBOSE, "using x86 asm scaler for horizontal scaling\n"); #else if (flags & SWS_FAST_BILINEAR) av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR C scaler for horizontal scaling\n"); else av_log(c, AV_LOG_VERBOSE, "using C scaler for horizontal scaling\n"); #endif } if (isPlanarYUV(dstFormat)) { if (c->vLumFilterSize==1) av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); else av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); } else { if (c->vLumFilterSize==1 && c->vChrFilterSize==2) av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n" " 2-tap scaler for vertical chrominance scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); else if (c->vLumFilterSize==2 && c->vChrFilterSize==2) av_log(c, AV_LOG_VERBOSE, "using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); else av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); } if (dstFormat==PIX_FMT_BGR24) av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR24 converter\n", (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C")); else if (dstFormat==PIX_FMT_RGB32) av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR32 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); else if (dstFormat==PIX_FMT_BGR565) av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR16 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); else if (dstFormat==PIX_FMT_BGR555) av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR15 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"); av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH); av_log(c, AV_LOG_DEBUG, "lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc); av_log(c, AV_LOG_DEBUG, "chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n", c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc); } c->swScale= ff_getSwsFunc(c); return c; fail: sws_freeContext(c); return NULL; }
19,811
FFmpeg
301cc4f37050ed5c08aec8de6d4e22ede2ce9a9f
1
static const AVOption *av_set_number(void *obj, const char *name, double num, int den, int64_t intnum){ const AVOption *o= av_find_opt(obj, name, NULL, 0, 0); void *dst; if(!o || o->offset<=0) return NULL; if(o->max*den < num*intnum || o->min*den > num*intnum) { av_log(NULL, AV_LOG_ERROR, "Value %lf for parameter '%s' out of range\n", num, name); return NULL; } dst= ((uint8_t*)obj) + o->offset; switch(o->type){ case FF_OPT_TYPE_FLAGS: case FF_OPT_TYPE_INT: *(int *)dst= llrint(num/den)*intnum; break; case FF_OPT_TYPE_INT64: *(int64_t *)dst= llrint(num/den)*intnum; break; case FF_OPT_TYPE_FLOAT: *(float *)dst= num*intnum/den; break; case FF_OPT_TYPE_DOUBLE:*(double *)dst= num*intnum/den; break; case FF_OPT_TYPE_RATIONAL: if((int)num == num) *(AVRational*)dst= (AVRational){num*intnum, den}; else *(AVRational*)dst= av_d2q(num*intnum/den, 1<<24); break; default: return NULL; } return o; }
19,812
FFmpeg
1a3598aae768465a8efc8475b6df5a8261bc62fc
1
static void jpeg2000_flush(Jpeg2000DecoderContext *s) { if (*s->buf == 0xff) s->buf++; s->bit_index = 8; s->buf++; }
19,813
qemu
0188fadb7fe460d8c4c743372b1f7b25773e183e
1
static void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUMBState *env) { struct target_signal_frame *frame; abi_ulong frame_addr; int err = 0; int i; frame_addr = get_sigframe(ka, env, sizeof *frame); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto badframe; /* Save the mask. */ __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask); if (err) goto badframe; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__put_user(set->sig[i], &frame->extramask[i - 1])) goto badframe; } setup_sigcontext(&frame->uc.tuc_mcontext, env); /* Set up to return from userspace. If provided, use a stub already in userspace. */ /* minus 8 is offset to cater for "rtsd r15,8" offset */ if (ka->sa_flags & TARGET_SA_RESTORER) { env->regs[15] = ((unsigned long)ka->sa_restorer)-8; } else { uint32_t t; /* Note, these encodings are _big endian_! */ /* addi r12, r0, __NR_sigreturn */ t = 0x31800000UL | TARGET_NR_sigreturn; __put_user(t, frame->tramp + 0); /* brki r14, 0x8 */ t = 0xb9cc0008UL; __put_user(t, frame->tramp + 1); /* Return from sighandler will jump to the tramp. Negative 8 offset because return is rtsd r15, 8 */ env->regs[15] = ((unsigned long)frame->tramp) - 8; } if (err) goto badframe; /* Set up registers for signal handler */ env->regs[1] = frame_addr; /* Signal handler args: */ env->regs[5] = sig; /* Arg 0: signum */ env->regs[6] = 0; /* arg 1: sigcontext */ env->regs[7] = frame_addr += offsetof(typeof(*frame), uc); /* Offset of 4 to handle microblaze rtid r14, 0 */ env->sregs[SR_PC] = (unsigned long)ka->_sa_handler; unlock_user_struct(frame, frame_addr, 1); return; badframe: unlock_user_struct(frame, frame_addr, 1); force_sig(TARGET_SIGSEGV); }
19,814
qemu
e305a16510afa74eec20390479e349402e55ef4c
1
static void i8257_realize(DeviceState *dev, Error **errp) { ISADevice *isa = ISA_DEVICE(dev); I8257State *d = I8257(dev); int i; memory_region_init_io(&d->channel_io, NULL, &channel_io_ops, d, "dma-chan", 8 << d->dshift); memory_region_add_subregion(isa_address_space_io(isa), d->base, &d->channel_io); isa_register_portio_list(isa, d->page_base, page_portio_list, d, "dma-page"); if (d->pageh_base >= 0) { isa_register_portio_list(isa, d->pageh_base, pageh_portio_list, d, "dma-pageh"); } memory_region_init_io(&d->cont_io, OBJECT(isa), &cont_io_ops, d, "dma-cont", 8 << d->dshift); memory_region_add_subregion(isa_address_space_io(isa), d->base + (8 << d->dshift), &d->cont_io); for (i = 0; i < ARRAY_SIZE(d->regs); ++i) { d->regs[i].transfer_handler = i8257_phony_handler; } d->dma_bh = qemu_bh_new(i8257_dma_run, d); }
19,815
FFmpeg
ba4ffc2b48832c7ca95ac6e48f8c4f23aa4ad3a6
1
static void flush_dpb(AVCodecContext *avctx){ H264Context *h= avctx->priv_data; int i; for(i=0; i<16; i++) h->delayed_pic[i]= NULL; h->delayed_output_pic= NULL; idr(h); }
19,816
qemu
0e180d9c8a7429c55d23d2e7855f1e490a063aaa
1
void pcie_aer_root_init(PCIDevice *dev) { uint16_t pos = dev->exp.aer_cap; pci_set_long(dev->wmask + pos + PCI_ERR_ROOT_COMMAND, PCI_ERR_ROOT_CMD_EN_MASK); pci_set_long(dev->w1cmask + pos + PCI_ERR_ROOT_STATUS, PCI_ERR_ROOT_STATUS_REPORT_MASK); }
19,817
FFmpeg
d59591fb02c29b41e5b8d611160971a4493394c2
1
static void mpegvideo_extract_headers(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t *buf, int buf_size) { ParseContext1 *pc = s->priv_data; const uint8_t *buf_end; const uint8_t *buf_start= buf; uint32_t start_code; int frame_rate_index, ext_type, bytes_left; int frame_rate_ext_n, frame_rate_ext_d; int picture_structure, top_field_first, repeat_first_field, progressive_frame; int horiz_size_ext, vert_size_ext, bit_rate_ext; //FIXME replace the crap with get_bits() s->repeat_pict = 0; buf_end = buf + buf_size; while (buf < buf_end) { start_code= -1; buf= ff_find_start_code(buf, buf_end, &start_code); bytes_left = buf_end - buf; switch(start_code) { case PICTURE_START_CODE: if (bytes_left >= 2) { s->pict_type = (buf[1] >> 3) & 7; } break; case SEQ_START_CODE: if (bytes_left >= 7) { pc->width = (buf[0] << 4) | (buf[1] >> 4); pc->height = ((buf[1] & 0x0f) << 8) | buf[2]; avcodec_set_dimensions(avctx, pc->width, pc->height); frame_rate_index = buf[3] & 0xf; pc->frame_rate.den = avctx->time_base.den = ff_frame_rate_tab[frame_rate_index].num; pc->frame_rate.num = avctx->time_base.num = ff_frame_rate_tab[frame_rate_index].den; avctx->bit_rate = ((buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6))*400; avctx->codec_id = CODEC_ID_MPEG1VIDEO; avctx->sub_id = 1; } break; case EXT_START_CODE: if (bytes_left >= 1) { ext_type = (buf[0] >> 4); switch(ext_type) { case 0x1: /* sequence extension */ if (bytes_left >= 6) { horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7); vert_size_ext = (buf[2] >> 5) & 3; bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1); frame_rate_ext_n = (buf[5] >> 5) & 3; frame_rate_ext_d = (buf[5] & 0x1f); pc->progressive_sequence = buf[1] & (1 << 3); avctx->has_b_frames= !(buf[5] >> 7); pc->width |=(horiz_size_ext << 12); pc->height |=( vert_size_ext << 12); avctx->bit_rate += (bit_rate_ext << 18) * 400; avcodec_set_dimensions(avctx, pc->width, pc->height); avctx->time_base.den = pc->frame_rate.den * (frame_rate_ext_n + 1); avctx->time_base.num = pc->frame_rate.num * (frame_rate_ext_d + 1); avctx->codec_id = CODEC_ID_MPEG2VIDEO; avctx->sub_id = 2; /* forces MPEG2 */ } break; case 0x8: /* picture coding extension */ if (bytes_left >= 5) { picture_structure = buf[2]&3; top_field_first = buf[3] & (1 << 7); repeat_first_field = buf[3] & (1 << 1); progressive_frame = buf[4] & (1 << 7); /* check if we must repeat the frame */ if (repeat_first_field) { if (pc->progressive_sequence) { if (top_field_first) s->repeat_pict = 4; else s->repeat_pict = 2; } else if (progressive_frame) { s->repeat_pict = 1; } } } break; } } break; case -1: goto the_end; default: /* we stop parsing when we encounter a slice. It ensures that this function takes a negligible amount of time */ if (start_code >= SLICE_MIN_START_CODE && start_code <= SLICE_MAX_START_CODE) goto the_end; break; } } the_end: ; }
19,818
FFmpeg
009f829dde811af654af7110326aea3a72c05d5e
1
static inline void RENAME(yuv2yuyv422_2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *ubuf0, const uint16_t *ubuf1, const uint16_t *vbuf0, const uint16_t *vbuf1, const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y) { x86_reg uv_off = c->uv_off << 1; //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :( __asm__ volatile( "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t" "mov %4, %%"REG_b" \n\t" "push %%"REG_BP" \n\t" YSCALEYUV2PACKED(%%REGBP, %5, %6) WRITEYUY2(%%REGb, 8280(%5), %%REGBP) "pop %%"REG_BP" \n\t" "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t" :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest), "a" (&c->redDither), "m"(uv_off) ); }
19,819
FFmpeg
285c015f1077a1d357dacdccc22ab02fd4aefe0a
1
static int dc1394_read_header(AVFormatContext *c) { dc1394_data* dc1394 = c->priv_data; dc1394camera_list_t *list; int res, i; const struct dc1394_frame_format *fmt = NULL; const struct dc1394_frame_rate *fps = NULL; if (dc1394_read_common(c, &fmt, &fps) != 0) return -1; /* Now let us prep the hardware. */ dc1394->d = dc1394_new(); if (dc1394_camera_enumerate(dc1394->d, &list) != DC1394_SUCCESS || !list) { av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera.\n"); if (list->num == 0) { av_log(c, AV_LOG_ERROR, "No cameras found.\n"); /* FIXME: To select a specific camera I need to search in list its guid */ dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid); if (list->num > 1) { av_log(c, AV_LOG_INFO, "Working with the first camera found\n"); /* Freeing list of cameras */ dc1394_camera_free_list (list); /* Select MAX Speed possible from the cam */ if (dc1394->camera->bmode_capable>0) { dc1394_video_set_operation_mode(dc1394->camera, DC1394_OPERATION_MODE_1394B); i = DC1394_ISO_SPEED_800; } else { i = DC1394_ISO_SPEED_400; for (res = DC1394_FAILURE; i >= DC1394_ISO_SPEED_MIN && res != DC1394_SUCCESS; i--) { res=dc1394_video_set_iso_speed(dc1394->camera, i); if (res != DC1394_SUCCESS) { av_log(c, AV_LOG_ERROR, "Couldn't set ISO Speed\n"); goto out_camera; if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) { av_log(c, AV_LOG_ERROR, "Couldn't set video format\n"); goto out_camera; if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) { av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate); goto out_camera; if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) { av_log(c, AV_LOG_ERROR, "Cannot setup camera \n"); goto out_camera; if (dc1394_video_set_transmission(dc1394->camera, DC1394_ON) !=DC1394_SUCCESS) { av_log(c, AV_LOG_ERROR, "Cannot start capture\n"); goto out_camera; return 0; out_camera: dc1394_capture_stop(dc1394->camera); dc1394_video_set_transmission(dc1394->camera, DC1394_OFF); dc1394_camera_free (dc1394->camera); out: dc1394_free(dc1394->d); return -1;
19,820
qemu
2a8e7499093cd33a607ebd7c1cd591169aa68a3e
1
static void error_mem_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { abort(); }
19,821
qemu
f8ed85ac992c48814d916d5df4d44f9a971c5de4
1
static void spitz_common_init(MachineState *machine, enum spitz_model_e model, int arm_id) { PXA2xxState *mpu; DeviceState *scp0, *scp1 = NULL; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *rom = g_new(MemoryRegion, 1); const char *cpu_model = machine->cpu_model; if (!cpu_model) cpu_model = (model == terrier) ? "pxa270-c5" : "pxa270-c0"; /* Setup CPU & memory */ mpu = pxa270_init(address_space_mem, spitz_binfo.ram_size, cpu_model); sl_flash_register(mpu, (model == spitz) ? FLASH_128M : FLASH_1024M); memory_region_init_ram(rom, NULL, "spitz.rom", SPITZ_ROM, &error_abort); vmstate_register_ram_global(rom); memory_region_set_readonly(rom, true); memory_region_add_subregion(address_space_mem, 0, rom); /* Setup peripherals */ spitz_keyboard_register(mpu); spitz_ssp_attach(mpu); scp0 = sysbus_create_simple("scoop", 0x10800000, NULL); if (model != akita) { scp1 = sysbus_create_simple("scoop", 0x08800040, NULL); } spitz_scoop_gpio_setup(mpu, scp0, scp1); spitz_gpio_setup(mpu, (model == akita) ? 1 : 2); spitz_i2c_setup(mpu); if (model == akita) spitz_akita_i2c_setup(mpu); if (model == terrier) /* A 6.0 GB microdrive is permanently sitting in CF slot 1. */ spitz_microdrive_attach(mpu, 1); else if (model != akita) /* A 4.0 GB microdrive is permanently sitting in CF slot 0. */ spitz_microdrive_attach(mpu, 0); spitz_binfo.kernel_filename = machine->kernel_filename; spitz_binfo.kernel_cmdline = machine->kernel_cmdline; spitz_binfo.initrd_filename = machine->initrd_filename; spitz_binfo.board_id = arm_id; arm_load_kernel(mpu->cpu, &spitz_binfo); sl_bootparam_write(SL_PXA_PARAM_BASE); }
19,822
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
1
static void blk_mig_read_cb(void *opaque, int ret) { BlkMigBlock *blk = opaque; blk_mig_lock(); blk->ret = ret; QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry); bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0); block_mig_state.submitted--; block_mig_state.read_done++; assert(block_mig_state.submitted >= 0); blk_mig_unlock(); }
19,824
FFmpeg
56706ac0d5723cb549fec2602e798ab1bf6004cd
1
static int libopenjpeg_copy_packed16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image) { int compno; int x; int y; int *image_line; int frame_index; const int numcomps = image->numcomps; uint16_t *frame_ptr = (uint16_t*)frame->data[0]; for (compno = 0; compno < numcomps; ++compno) { if (image->comps[compno].w > frame->linesize[0] / numcomps) { av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n"); return 0; } } for (compno = 0; compno < numcomps; ++compno) { for (y = 0; y < avctx->height; ++y) { image_line = image->comps[compno].data + y * image->comps[compno].w; frame_index = y * (frame->linesize[0] / 2) + compno; for (x = 0; x < avctx->width; ++x) { image_line[x] = frame_ptr[frame_index]; frame_index += numcomps; } for (; x < image->comps[compno].w; ++x) { image_line[x] = image_line[x - 1]; } } for (; y < image->comps[compno].h; ++y) { image_line = image->comps[compno].data + y * image->comps[compno].w; for (x = 0; x < image->comps[compno].w; ++x) { image_line[x] = image_line[x - image->comps[compno].w]; } } } return 1; }
19,826
qemu
3d100d0fa9ee4826425ea1c2a120a0f661d8e739
1
static void i82801b11_bridge_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); k->is_bridge = 1; k->vendor_id = PCI_VENDOR_ID_INTEL; k->device_id = PCI_DEVICE_ID_INTEL_82801BA_11; k->revision = ICH9_D2P_A2_REVISION; k->init = i82801b11_bridge_initfn; k->config_write = pci_bridge_write_config; set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); }
19,828
qemu
378df4b23753a11be650af7664ca76bc75cb9f01
1
static void tcg_handle_interrupt(CPUArchState *env, int mask) { CPUState *cpu = ENV_GET_CPU(env); int old_mask; old_mask = env->interrupt_request; env->interrupt_request |= mask; /* * If called from iothread context, wake the target cpu in * case its halted. */ if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); return; } if (use_icount) { env->icount_decr.u16.high = 0xffff; if (!can_do_io(env) && (mask & ~old_mask) != 0) { cpu_abort(env, "Raised interrupt while not in I/O function"); } } else { cpu_unlink_tb(cpu); } }
19,829
FFmpeg
fc06ee6ee377cc3b512dff8f02057e26311bc4da
1
static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert) { int data_off = bytestream2_get_le16(&s->gb), y; GetByteContext data_ptr; if (bytestream2_get_bytes_left(&s->gb) < data_off) return AVERROR_INVALIDDATA; bytestream2_init(&data_ptr, s->gb.buffer + data_off, bytestream2_get_bytes_left(&s->gb) - data_off); while (s->gb.buffer < data_ptr.buffer_start) { int i, j; int length = bytestream2_get_byte(&s->gb); int x = bytestream2_get_byte(&s->gb) + ((length & 0x80) << 1); length &= 0x7F; if (length==0) { y += x; continue; } if (y + half_vert >= s->avctx->height) return 0; for(i=0; i<length; i++) { int replace_array = bytestream2_get_byte(&s->gb); for(j=0; j<8; j++) { int replace = (replace_array >> (7-j)) & 1; if (replace) { int color = bytestream2_get_byte(&data_ptr); s->frame.data[0][y*s->frame.linesize[0] + x] = color; if (half_horiz) s->frame.data[0][y*s->frame.linesize[0] + x + 1] = color; if (half_vert) { s->frame.data[0][(y+1)*s->frame.linesize[0] + x] = color; if (half_horiz) s->frame.data[0][(y+1)*s->frame.linesize[0] + x + 1] = color; } } x += 1 + half_horiz; } } y += 1 + half_vert; } return 0; }
19,830
qemu
6265eb26a375179f193f792e4f0d49036d2cf052
1
int parse_host_src_port(struct sockaddr_in *haddr, struct sockaddr_in *saddr, const char *input_str) { char *str = strdup(input_str); char *host_str = str; char *src_str; const char *src_str2; char *ptr; /* * Chop off any extra arguments at the end of the string which * would start with a comma, then fill in the src port information * if it was provided else use the "any address" and "any port". */ if ((ptr = strchr(str,','))) *ptr = '\0'; if ((src_str = strchr(input_str,'@'))) { *src_str = '\0'; src_str++; } if (parse_host_port(haddr, host_str) < 0) goto fail; src_str2 = src_str; if (!src_str || *src_str == '\0') src_str2 = ":0"; if (parse_host_port(saddr, src_str2) < 0) goto fail; free(str); return(0); fail: free(str); return -1; }
19,831
FFmpeg
5255acc6beb61ef30f43bc2c746b0b487815f76d
1
static av_cold int decode_end(AVCodecContext *avctx) { ALSDecContext *ctx = avctx->priv_data; av_freep(&ctx->sconf.chan_pos); ff_bgmc_end(&ctx->bgmc_lut, &ctx->bgmc_lut_status); av_freep(&ctx->const_block); av_freep(&ctx->shift_lsbs); av_freep(&ctx->opt_order); av_freep(&ctx->store_prev_samples); av_freep(&ctx->use_ltp); av_freep(&ctx->ltp_lag); av_freep(&ctx->ltp_gain); av_freep(&ctx->ltp_gain_buffer); av_freep(&ctx->quant_cof); av_freep(&ctx->lpc_cof); av_freep(&ctx->quant_cof_buffer); av_freep(&ctx->lpc_cof_buffer); av_freep(&ctx->lpc_cof_reversed_buffer); av_freep(&ctx->prev_raw_samples); av_freep(&ctx->raw_samples); av_freep(&ctx->raw_buffer); av_freep(&ctx->chan_data); av_freep(&ctx->chan_data_buffer); av_freep(&ctx->reverted_channels); return 0; }
19,832
FFmpeg
0ecca7a49f8e254c12a3a1de048d738bfbb614c6
1
static int xan_decode_init(AVCodecContext *avctx) { XanContext *s = avctx->priv_data; int i; s->avctx = avctx; if ((avctx->codec->id == CODEC_ID_XAN_WC3) && (s->avctx->palctrl == NULL)) { av_log(avctx, AV_LOG_ERROR, " WC3 Xan video: palette expected.\n"); } avctx->pix_fmt = PIX_FMT_PAL8; avctx->has_b_frames = 0; dsputil_init(&s->dsp, avctx); /* initialize the RGB -> YUV tables */ for (i = 0; i < 256; i++) { y_r_table[i] = Y_R * i; y_g_table[i] = Y_G * i; y_b_table[i] = Y_B * i; u_r_table[i] = U_R * i; u_g_table[i] = U_G * i; u_b_table[i] = U_B * i; v_r_table[i] = V_R * i; v_g_table[i] = V_G * i; v_b_table[i] = V_B * i; } s->buffer1 = av_malloc(avctx->width * avctx->height); s->buffer2 = av_malloc(avctx->width * avctx->height); if (!s->buffer1 || !s->buffer2) return 0; }
19,836
qemu
5b185639c5740998de403415c749ac98e13418fd
1
void HELPER(cksm)(uint32_t r1, uint32_t r2) { uint64_t src = get_address_31fix(r2); uint64_t src_len = env->regs[(r2 + 1) & 15]; uint64_t cksm = 0; while (src_len >= 4) { cksm += ldl(src); cksm = cksm_overflow(cksm); /* move to next word */ src_len -= 4; src += 4; } switch (src_len) { case 0: break; case 1: cksm += ldub(src); cksm = cksm_overflow(cksm); break; case 2: cksm += lduw(src); cksm = cksm_overflow(cksm); break; case 3: /* XXX check if this really is correct */ cksm += lduw(src) << 8; cksm += ldub(src + 2); cksm = cksm_overflow(cksm); break; } /* indicate we've processed everything */ env->regs[(r2 + 1) & 15] = 0; /* store result */ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (uint32_t)cksm; }
19,837
qemu
3bc36a401e0f33e63a4d2c58b646ddf78efb567c
1
static USBDevice *usb_net_init(USBBus *bus, const char *cmdline) { Error *local_err = NULL; USBDevice *dev; QemuOpts *opts; int idx; opts = qemu_opts_parse(qemu_find_opts("net"), cmdline, 0); if (!opts) { return NULL; } qemu_opt_set(opts, "type", "nic"); qemu_opt_set(opts, "model", "usb"); idx = net_client_init(opts, 0, &local_err); if (local_err) { error_report("%s", error_get_pretty(local_err)); error_free(local_err); return NULL; } dev = usb_create(bus, "usb-net"); qdev_set_nic_properties(&dev->qdev, &nd_table[idx]); qdev_init_nofail(&dev->qdev); return dev; }
19,838
qemu
83cc6f8c2f134ccff1a41ed86bbe3bc305e0c334
1
static void gen_spr_book3s_pmu_sup(CPUPPCState *env) { spr_register(env, SPR_POWER_MMCR0, "MMCR0", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_MMCR1, "MMCR1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_MMCRA, "MMCRA", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC2, "PMC2", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC3, "PMC3", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC4, "PMC4", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC5, "PMC5", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_PMC6, "PMC6", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_SIAR, "SIAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); spr_register(env, SPR_POWER_SDAR, "SDAR", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); }
19,839
qemu
60fe637bf0e4d7989e21e50f52526444765c63b4
1
size_t qsb_set_length(QEMUSizedBuffer *qsb, size_t new_len) { if (new_len <= qsb->size) { qsb->used = new_len; } else { qsb->used = qsb->size; } return qsb->used; }
19,840
FFmpeg
e924967fd5ec240cf97022f054cb02a0bc7101d9
1
static void process_tns_coeffs(TemporalNoiseShaping *tns, double *coef_raw, int *order_p, int w, int filt) { int i, j, order = *order_p; int *idx = tns->coef_idx[w][filt]; float *lpc = tns->coef[w][filt]; float temp[TNS_MAX_ORDER] = {0.0f}, out[TNS_MAX_ORDER] = {0.0f}; if (!order) return; /* Not what the specs say, but it's better */ for (i = 0; i < order; i++) { idx[i] = quant_array_idx(coef_raw[i], tns_tmp2_map_0_4, 16); lpc[i] = tns_tmp2_map_0_4[idx[i]]; } /* Trim any coeff less than 0.1f from the end */ for (i = order-1; i > -1; i--) { lpc[i] = (fabs(lpc[i]) > 0.1f) ? lpc[i] : 0.0f; if (lpc[i] != 0.0 ) { order = i; break; } } /* Step up procedure, convert to LPC coeffs */ out[0] = 1.0f; for (i = 1; i <= order; i++) { for (j = 1; j < i; j++) { temp[j] = out[j] + lpc[i]*out[i-j]; } for (j = 1; j <= i; j++) { out[j] = temp[j]; } out[i] = lpc[i-1]; } *order_p = order; memcpy(lpc, out, TNS_MAX_ORDER*sizeof(float)); }
19,841
FFmpeg
2391e46430fa2af28542124dbcfc935c0a5ce82c
0
int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir) { int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample; int my, off, i, mvs; if (s->picture_structure != PICT_FRAME || s->mcsel) goto unhandled; switch (s->mv_type) { case MV_TYPE_16X16: mvs = 1; break; case MV_TYPE_16X8: mvs = 2; break; case MV_TYPE_8X8: mvs = 4; break; default: goto unhandled; } for (i = 0; i < mvs; i++) { my = s->mv[dir][i][1]<<qpel_shift; my_max = FFMAX(my_max, my); my_min = FFMIN(my_min, my); } off = (FFMAX(-my_min, my_max) + 63) >> 6; return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1); unhandled: return s->mb_height-1; }
19,842
qemu
ead7a57df37d2187813a121308213f41591bd811
1
static int ssd0323_load(QEMUFile *f, void *opaque, int version_id) { SSISlave *ss = SSI_SLAVE(opaque); ssd0323_state *s = (ssd0323_state *)opaque; int i; if (version_id != 1) s->cmd_len = qemu_get_be32(f); s->cmd = qemu_get_be32(f); for (i = 0; i < 8; i++) s->cmd_data[i] = qemu_get_be32(f); s->row = qemu_get_be32(f); s->row_start = qemu_get_be32(f); s->row_end = qemu_get_be32(f); s->col = qemu_get_be32(f); s->col_start = qemu_get_be32(f); s->col_end = qemu_get_be32(f); s->redraw = qemu_get_be32(f); s->remap = qemu_get_be32(f); s->mode = qemu_get_be32(f); qemu_get_buffer(f, s->framebuffer, sizeof(s->framebuffer)); ss->cs = qemu_get_be32(f); return 0;
19,843
qemu
18674b26788a9e47f1157170234e32ece2044367
1
static int s390_ipl_init(SysBusDevice *dev) { S390IPLState *ipl = S390_IPL(dev); int kernel_size; if (!ipl->kernel) { int bios_size; char *bios_filename; /* Load zipl bootloader */ if (bios_name == NULL) { bios_name = ipl->firmware; } bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (bios_filename == NULL) { hw_error("could not find stage1 bootloader\n"); } bios_size = load_elf(bios_filename, NULL, NULL, &ipl->start_addr, NULL, NULL, 1, ELF_MACHINE, 0); if (bios_size == -1) { bios_size = load_image_targphys(bios_filename, ZIPL_IMAGE_START, 4096); ipl->start_addr = ZIPL_IMAGE_START; if (bios_size > 4096) { hw_error("stage1 bootloader is > 4k\n"); } } g_free(bios_filename); if (bios_size == -1) { hw_error("could not load bootloader '%s'\n", bios_name); } return 0; } else { uint64_t pentry = KERN_IMAGE_START; kernel_size = load_elf(ipl->kernel, NULL, NULL, &pentry, NULL, NULL, 1, ELF_MACHINE, 0); if (kernel_size == -1) { kernel_size = load_image_targphys(ipl->kernel, 0, ram_size); } if (kernel_size == -1) { fprintf(stderr, "could not load kernel '%s'\n", ipl->kernel); return -1; } /* * Is it a Linux kernel (starting at 0x10000)? If yes, we fill in the * kernel parameters here as well. Note: For old kernels (up to 3.2) * we can not rely on the ELF entry point - it was 0x800 (the SALIPL * loader) and it won't work. For this case we force it to 0x10000, too. */ if (pentry == KERN_IMAGE_START || pentry == 0x800) { ipl->start_addr = KERN_IMAGE_START; /* Overwrite parameters in the kernel image, which are "rom" */ strcpy(rom_ptr(KERN_PARM_AREA), ipl->cmdline); } else { ipl->start_addr = pentry; } } if (ipl->initrd) { ram_addr_t initrd_offset; int initrd_size; initrd_offset = INITRD_START; while (kernel_size + 0x100000 > initrd_offset) { initrd_offset += 0x100000; } initrd_size = load_image_targphys(ipl->initrd, initrd_offset, ram_size - initrd_offset); if (initrd_size == -1) { fprintf(stderr, "qemu: could not load initrd '%s'\n", ipl->initrd); exit(1); } /* we have to overwrite values in the kernel image, which are "rom" */ stq_p(rom_ptr(INITRD_PARM_START), initrd_offset); stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size); } return 0; }
19,844
qemu
efec3dd631d94160288392721a5f9c39e50fb2bc
1
static void grackle_pci_class_init(ObjectClass *klass, void *data) { PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); k->init = grackle_pci_host_init; k->vendor_id = PCI_VENDOR_ID_MOTOROLA; k->device_id = PCI_DEVICE_ID_MOTOROLA_MPC106; k->revision = 0x00; k->class_id = PCI_CLASS_BRIDGE_HOST; dc->no_user = 1; }
19,846
qemu
e4a426e75ef35e4d8db4f0e242d67055e1cde973
1
int main(int argc, char *argv[]) { g_test_init(&argc, &argv, NULL); qtest_add_func("qmp/protocol", test_qmp_protocol); return g_test_run(); }
19,847
qemu
83d768b5640946b7da55ce8335509df297e2c7cd
1
void virtio_irq(VirtQueue *vq) { trace_virtio_irq(vq); virtio_set_isr(vq->vdev, 0x1); virtio_notify_vector(vq->vdev, vq->vector); }
19,848
qemu
d96391c1ffeb30a0afa695c86579517c69d9a889
1
target_ulong helper_rdhwr_xnp(CPUMIPSState *env) { check_hwrena(env, 5); return (env->CP0_Config5 >> CP0C5_XNP) & 1; }
19,849
FFmpeg
465e1dadbef7596a3eb87089a66bb4ecdc26d3c4
0
unsigned long get_checksum(ByteIOContext *s){ s->checksum= s->update_checksum(s->checksum, s->checksum_ptr, s->buf_ptr - s->checksum_ptr); s->checksum_ptr= NULL; return s->checksum; }
19,850
FFmpeg
26a7d6a301b9b6c67153c87d42db145cdc0e57cf
0
static int decode_rle_bpp2(AVCodecContext *avctx, AVFrame *p, GetByteContext *gbc) { int offset = avctx->width; uint8_t *outdata = p->data[0]; int i, j; for (i = 0; i < avctx->height; i++) { int size, left, code, pix; uint8_t *out = outdata; int pos = 0; /* size of packed line */ size = left = bytestream2_get_be16(gbc); if (bytestream2_get_bytes_left(gbc) < size) return AVERROR_INVALIDDATA; /* decode line */ while (left > 0) { code = bytestream2_get_byte(gbc); if (code & 0x80 ) { /* run */ pix = bytestream2_get_byte(gbc); for (j = 0; j < 257 - code; j++) { if (pos < offset) out[pos++] = (pix & 0xC0) >> 6; if (pos < offset) out[pos++] = (pix & 0x30) >> 4; if (pos < offset) out[pos++] = (pix & 0x0C) >> 2; if (pos < offset) out[pos++] = (pix & 0x03); } left -= 2; } else { /* copy */ for (j = 0; j < code + 1; j++) { pix = bytestream2_get_byte(gbc); if (pos < offset) out[pos++] = (pix & 0xC0) >> 6; if (pos < offset) out[pos++] = (pix & 0x30) >> 4; if (pos < offset) out[pos++] = (pix & 0x0C) >> 2; if (pos < offset) out[pos++] = (pix & 0x03); } left -= 1 + (code + 1); } } outdata += p->linesize[0]; } return 0; }
19,851
FFmpeg
185142a5ea93ef723f70a3ea43797f6c8827eb79
0
void ff_fmt_convert_init_x86(FmtConvertContext *c, AVCodecContext *avctx) { int mm_flags = av_get_cpu_flags(); if (mm_flags & AV_CPU_FLAG_MMX) { #if HAVE_YASM c->float_interleave = float_interleave_mmx; if(mm_flags & AV_CPU_FLAG_3DNOW){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->float_to_int16 = ff_float_to_int16_3dnow; c->float_to_int16_interleave = float_to_int16_interleave_3dnow; } } if(mm_flags & AV_CPU_FLAG_3DNOWEXT){ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->float_to_int16_interleave = float_to_int16_interleave_3dn2; } } #endif if(mm_flags & AV_CPU_FLAG_SSE){ c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse; #if HAVE_YASM c->float_to_int16 = ff_float_to_int16_sse; c->float_to_int16_interleave = float_to_int16_interleave_sse; c->float_interleave = float_interleave_sse; #endif } if(mm_flags & AV_CPU_FLAG_SSE2){ c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2; #if HAVE_YASM c->float_to_int16 = ff_float_to_int16_sse2; c->float_to_int16_interleave = float_to_int16_interleave_sse2; #endif } } }
19,852
qemu
6152e2ae4344ec8c849393da3f76f2263cc55766
1
static KVMSlot *kvm_lookup_slot(KVMState *s, target_phys_addr_t start_addr) { int i; for (i = 0; i < ARRAY_SIZE(s->slots); i++) { KVMSlot *mem = &s->slots[i]; if (start_addr >= mem->start_addr && start_addr < (mem->start_addr + mem->memory_size)) return mem; } return NULL; }
19,853
qemu
9b7b85d26006af61b69dbabe2354d73a8c67cc6c
1
static inline void tcg_out_qemu_st(TCGContext *s, int cond, const TCGArg *args, int opc) { int addr_reg, data_reg, data_reg2; #ifdef CONFIG_SOFTMMU int mem_index, s_bits; # if TARGET_LONG_BITS == 64 int addr_reg2; # endif uint32_t *label_ptr; #endif data_reg = *args++; if (opc == 3) data_reg2 = *args++; else data_reg2 = 0; /* surpress warning */ addr_reg = *args++; #if TARGET_LONG_BITS == 64 addr_reg2 = *args++; #endif #ifdef CONFIG_SOFTMMU mem_index = *args; s_bits = opc & 3; /* Should generate something like the following: * shr r8, addr_reg, #TARGET_PAGE_BITS * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS */ tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS)); tcg_out_dat_imm(s, COND_AL, ARITH_AND, 0, 8, CPU_TLB_SIZE - 1); tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS)); /* In the * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))] * below, the offset is likely to exceed 12 bits if mem_index != 0 and * not exceed otherwise, so use an * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table) * before. */ if (mem_index) tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0, (mem_index << (TLB_SHIFT & 1)) | ((16 - (TLB_SHIFT >> 1)) << 8)); tcg_out_ld32_12(s, COND_AL, 1, 0, offsetof(CPUState, tlb_table[0][0].addr_write)); tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS)); /* Check alignment. */ if (s_bits) tcg_out_dat_imm(s, COND_EQ, ARITH_TST, 0, addr_reg, (1 << s_bits) - 1); # if TARGET_LONG_BITS == 64 /* XXX: possibly we could use a block data load or writeback in * the first access. */ tcg_out_ld32_12(s, COND_EQ, 1, 0, offsetof(CPUState, tlb_table[0][0].addr_write) + 4); tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, 1, addr_reg2, SHIFT_IMM_LSL(0)); # endif tcg_out_ld32_12(s, COND_EQ, 1, 0, offsetof(CPUState, tlb_table[0][0].addend)); switch (opc) { case 0: tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1); break; case 0 | 4: tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1); break; case 1: tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1); break; case 1 | 4: tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1); break; case 2: default: tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1); break; case 3: tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg); tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4); break; } label_ptr = (void *) s->code_ptr; tcg_out_b(s, COND_EQ, 8); /* TODO: move this code to where the constants pool will be */ if (addr_reg) tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, addr_reg, SHIFT_IMM_LSL(0)); # if TARGET_LONG_BITS == 32 switch (opc) { case 0: tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff); tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); break; case 1: tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, data_reg, SHIFT_IMM_LSL(16)); tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, 1, SHIFT_IMM_LSR(16)); tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); break; case 2: if (data_reg != 1) tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, data_reg, SHIFT_IMM_LSL(0)); tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index); break; case 3: if (data_reg != 1) tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, data_reg, SHIFT_IMM_LSL(0)); if (data_reg2 != 2) tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, data_reg2, SHIFT_IMM_LSL(0)); tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); break; } # else if (addr_reg2 != 1) tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, addr_reg2, SHIFT_IMM_LSL(0)); switch (opc) { case 0: tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff); tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); break; case 1: tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, data_reg, SHIFT_IMM_LSL(16)); tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, 2, SHIFT_IMM_LSR(16)); tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); break; case 2: if (data_reg != 2) tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, data_reg, SHIFT_IMM_LSL(0)); tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index); break; case 3: tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index); tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */ if (data_reg != 2) tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, data_reg, SHIFT_IMM_LSL(0)); if (data_reg2 != 3) tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, data_reg2, SHIFT_IMM_LSL(0)); break; } # endif # ifdef SAVE_LR tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0)); # endif tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] - (tcg_target_long) s->code_ptr); # if TARGET_LONG_BITS == 64 if (opc == 3) tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10); # endif # ifdef SAVE_LR tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0)); # endif *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2; #else switch (opc) { case 0: tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0); break; case 0 | 4: tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0); break; case 1: tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0); break; case 1 | 4: tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0); break; case 2: default: tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); break; case 3: /* TODO: use block store - * check that data_reg2 > data_reg or the other way */ tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0); tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4); break; } #endif }
19,854
FFmpeg
24fee95321c1463360ba7042d026dae021854360
1
static int update_offset(RTMPContext *rt, int size) { int old_flv_size; // generate packet header and put data into buffer for FLV demuxer if (rt->flv_off < rt->flv_size) { // There is old unread data in the buffer, thus append at the end old_flv_size = rt->flv_size; rt->flv_size += size + 15; } else { // All data has been read, write the new data at the start of the buffer old_flv_size = 0; rt->flv_size = size + 15; rt->flv_off = 0; } return old_flv_size; }
19,855
qemu
b4ba67d9a702507793c2724e56f98e9b0f7be02b
1
void qpci_io_writew(QPCIDevice *dev, void *data, uint16_t value) { uintptr_t addr = (uintptr_t)data; if (addr < QPCI_PIO_LIMIT) { dev->bus->pio_writew(dev->bus, addr, value); } else { value = cpu_to_le16(value); dev->bus->memwrite(dev->bus, addr, &value, sizeof(value)); } }
19,856
FFmpeg
2278ecc434d390bccd32a083a12ab964a6b7b0ce
0
static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; const uint8_t *buf_end; int buf_size = avpkt->size; VmdAudioContext *s = avctx->priv_data; int block_type, silent_chunks, audio_chunks; int ret; uint8_t *output_samples_u8; int16_t *output_samples_s16; if (buf_size < 16) { av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n"); *got_frame_ptr = 0; return buf_size; } block_type = buf[6]; if (block_type < BLOCK_TYPE_AUDIO || block_type > BLOCK_TYPE_SILENCE) { av_log(avctx, AV_LOG_ERROR, "unknown block type: %d\n", block_type); return AVERROR(EINVAL); } buf += 16; buf_size -= 16; /* get number of silent chunks */ silent_chunks = 0; if (block_type == BLOCK_TYPE_INITIAL) { uint32_t flags; if (buf_size < 4) { av_log(avctx, AV_LOG_ERROR, "packet is too small\n"); return AVERROR(EINVAL); } flags = AV_RB32(buf); silent_chunks = av_popcount(flags); buf += 4; buf_size -= 4; } else if (block_type == BLOCK_TYPE_SILENCE) { silent_chunks = 1; buf_size = 0; // should already be zero but set it just to be sure } /* ensure output buffer is large enough */ audio_chunks = buf_size / s->chunk_size; /* get output buffer */ s->frame.nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels; if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } output_samples_u8 = s->frame.data[0]; output_samples_s16 = (int16_t *)s->frame.data[0]; /* decode silent chunks */ if (silent_chunks > 0) { int silent_size = avctx->block_align * silent_chunks; if (s->out_bps == 2) { memset(output_samples_s16, 0x00, silent_size * 2); output_samples_s16 += silent_size; } else { memset(output_samples_u8, 0x80, silent_size); output_samples_u8 += silent_size; } } /* decode audio chunks */ if (audio_chunks > 0) { buf_end = buf + buf_size; while (buf < buf_end) { if (s->out_bps == 2) { decode_audio_s16(output_samples_s16, buf, s->chunk_size, avctx->channels); output_samples_s16 += avctx->block_align; } else { memcpy(output_samples_u8, buf, s->chunk_size); output_samples_u8 += avctx->block_align; } buf += s->chunk_size; } } *got_frame_ptr = 1; *(AVFrame *)data = s->frame; return avpkt->size; }
19,857
FFmpeg
51cbabc79241dc9089d98965079fe1da4742d966
0
static void search_for_quantizers_twoloop(AVCodecContext *avctx, AACEncContext *s, SingleChannelElement *sce, const float lambda) { int start = 0, i, w, w2, g; int destbits = avctx->bit_rate * 1024.0 / avctx->sample_rate / avctx->channels; float dists[128], uplims[128]; int fflag, minscaler; int its = 0; int allz = 0; float minthr = INFINITY; //XXX: some heuristic to determine initial quantizers will reduce search time memset(dists, 0, sizeof(dists)); //determine zero bands and upper limits for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { for (g = 0; g < sce->ics.num_swb; g++) { int nz = 0; float uplim = 0.0f; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { FFPsyBand *band = &s->psy.psy_bands[s->cur_channel*PSY_MAX_BANDS+(w+w2)*16+g]; uplim += band->threshold; if (band->energy <= band->threshold || band->threshold == 0.0f) { sce->zeroes[(w+w2)*16+g] = 1; continue; } nz = 1; } uplims[w*16+g] = uplim *512; sce->zeroes[w*16+g] = !nz; if (nz) minthr = FFMIN(minthr, uplim); allz = FFMAX(allz, nz); } } for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { for (g = 0; g < sce->ics.num_swb; g++) { if (sce->zeroes[w*16+g]) { sce->sf_idx[w*16+g] = SCALE_ONE_POS; continue; } sce->sf_idx[w*16+g] = SCALE_ONE_POS + FFMIN(log2(uplims[w*16+g]/minthr)*4,59); } } if (!allz) return; abs_pow34_v(s->scoefs, sce->coeffs, 1024); //perform two-loop search //outer loop - improve quality do { int tbits, qstep; minscaler = sce->sf_idx[0]; //inner loop - quantize spectrum to fit into given number of bits qstep = its ? 1 : 32; do { int prev = -1; tbits = 0; fflag = 0; for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { const float *coefs = sce->coeffs + start; const float *scaled = s->scoefs + start; int bits = 0; int cb; float mindist = INFINITY; int minbits = 0; if (sce->zeroes[w*16+g] || sce->sf_idx[w*16+g] >= 218) { start += sce->ics.swb_sizes[g]; continue; } minscaler = FFMIN(minscaler, sce->sf_idx[w*16+g]); for (cb = 0; cb <= ESC_BT; cb++) { float dist = 0.0f; int bb = 0; for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) { int b; dist += quantize_band_cost(s, coefs + w2*128, scaled + w2*128, sce->ics.swb_sizes[g], sce->sf_idx[w*16+g], cb, lambda, INFINITY, &b); bb += b; } if (dist < mindist) { mindist = dist; minbits = bb; } } dists[w*16+g] = (mindist - minbits) / lambda; bits = minbits; if (prev != -1) { bits += ff_aac_scalefactor_bits[sce->sf_idx[w*16+g] - prev + SCALE_DIFF_ZERO]; } tbits += bits; start += sce->ics.swb_sizes[g]; prev = sce->sf_idx[w*16+g]; } } if (tbits > destbits) { for (i = 0; i < 128; i++) if (sce->sf_idx[i] < 218 - qstep) sce->sf_idx[i] += qstep; } else { for (i = 0; i < 128; i++) if (sce->sf_idx[i] > 60 - qstep) sce->sf_idx[i] -= qstep; } qstep >>= 1; if (!qstep && tbits > destbits*1.02) qstep = 1; if (sce->sf_idx[0] >= 217) break; } while (qstep); fflag = 0; minscaler = av_clip(minscaler, 60, 255 - SCALE_MAX_DIFF); for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { start = w*128; for (g = 0; g < sce->ics.num_swb; g++) { int prevsc = sce->sf_idx[w*16+g]; if (dists[w*16+g] > uplims[w*16+g] && sce->sf_idx[w*16+g] > 60) sce->sf_idx[w*16+g]--; sce->sf_idx[w*16+g] = av_clip(sce->sf_idx[w*16+g], minscaler, minscaler + SCALE_MAX_DIFF); sce->sf_idx[w*16+g] = FFMIN(sce->sf_idx[w*16+g], 219); if (sce->sf_idx[w*16+g] != prevsc) fflag = 1; } } its++; } while (fflag && its < 10); }
19,858
qemu
bc5008a832f95aae86efce844382e64d54da2146
1
static struct pathelem *new_entry(const char *root, struct pathelem *parent, const char *name) { struct pathelem *new = malloc(sizeof(*new)); new->name = strdup(name); new->pathname = g_strdup_printf("%s/%s", root, name); new->num_entries = 0; return new; }
19,859
qemu
8786db7cb96f8ce5c75c6e1e074319c9dca8d356
1
static void address_space_update_ioeventfds(AddressSpace *as) { FlatRange *fr; unsigned ioeventfd_nb = 0; MemoryRegionIoeventfd *ioeventfds = NULL; AddrRange tmp; unsigned i; FOR_EACH_FLAT_RANGE(fr, &as->current_map) { for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, int128_sub(fr->addr.start, int128_make64(fr->offset_in_region))); if (addrrange_intersects(fr->addr, tmp)) { ++ioeventfd_nb; ioeventfds = g_realloc(ioeventfds, ioeventfd_nb * sizeof(*ioeventfds)); ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; ioeventfds[ioeventfd_nb-1].addr = tmp; } } } address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, as->ioeventfds, as->ioeventfd_nb); g_free(as->ioeventfds); as->ioeventfds = ioeventfds; as->ioeventfd_nb = ioeventfd_nb; }
19,860
qemu
77cb0f5aafc8e6d0c6d3c339f381c9b7921648e0
1
static void adb_kbd_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); ADBDeviceClass *adc = ADB_DEVICE_CLASS(oc); ADBKeyboardClass *akc = ADB_KEYBOARD_CLASS(oc); akc->parent_realize = dc->realize; dc->realize = adb_kbd_realizefn; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); adc->devreq = adb_kbd_request; dc->reset = adb_kbd_reset; dc->vmsd = &vmstate_adb_kbd; }
19,861
qemu
4e60a250d395ef0d04eb8b6489cc5f7615a8909b
1
is_vlan_packet(E1000State *s, const uint8_t *buf) { return (be16_to_cpup((uint16_t *)(buf + 12)) == le16_to_cpup((uint16_t *)(s->mac_reg + VET))); }
19,862