project
stringclasses
2 values
commit_id
stringlengths
40
40
target
int64
0
1
func
stringlengths
26
142k
idx
int64
0
27.3k
FFmpeg
3019b4f6480a5d8c38e0e32ef75dabe6e0f3ae98
0
static int crystalhd_receive_frame(AVCodecContext *avctx, AVFrame *frame) { BC_STATUS bc_ret; BC_DTS_STATUS decoder_status = { 0, }; CopyRet rec_ret; CHDContext *priv = avctx->priv_data; HANDLE dev = priv->dev; int got_frame = 0; av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: receive_frame\n"); bc_ret = DtsGetDriverStatus(dev, &decoder_status); if (bc_ret != BC_STS_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n"); return -1; } if (decoder_status.ReadyListCount == 0) { av_log(avctx, AV_LOG_INFO, "CrystalHD: Insufficient frames ready. Returning\n"); return AVERROR(EAGAIN); } rec_ret = receive_frame(avctx, frame, &got_frame); if (rec_ret == RET_ERROR) { return -1; } else if (got_frame == 0) { return AVERROR(EAGAIN); } else { return 0; } }
12,084
FFmpeg
7d3baebe408cb7377dbb6fa1a7fd285e8e366440
1
av_cold int ff_opus_parse_extradata(AVCodecContext *avctx, OpusContext *s) { static const uint8_t default_channel_map[2] = { 0, 1 }; int (*channel_reorder)(int, int) = channel_reorder_unknown; const uint8_t *extradata, *channel_map; int extradata_size; int version, channels, map_type, streams, stereo_streams, i, j; uint64_t layout; if (!avctx->extradata) { if (avctx->channels > 2) { av_log(avctx, AV_LOG_ERROR, "Multichannel configuration without extradata.\n"); return AVERROR(EINVAL); } extradata = opus_default_extradata; extradata_size = sizeof(opus_default_extradata); } else { extradata = avctx->extradata; extradata_size = avctx->extradata_size; } if (extradata_size < 19) { av_log(avctx, AV_LOG_ERROR, "Invalid extradata size: %d\n", extradata_size); return AVERROR_INVALIDDATA; } version = extradata[8]; if (version > 15) { avpriv_request_sample(avctx, "Extradata version %d", version); return AVERROR_PATCHWELCOME; } avctx->delay = AV_RL16(extradata + 10); channels = avctx->extradata ? extradata[9] : (avctx->channels == 1) ? 1 : 2; if (!channels) { av_log(avctx, AV_LOG_ERROR, "Zero channel count specified in the extradata\n"); return AVERROR_INVALIDDATA; } s->gain_i = AV_RL16(extradata + 16); if (s->gain_i) s->gain = ff_exp10(s->gain_i / (20.0 * 256)); map_type = extradata[18]; if (!map_type) { if (channels > 2) { av_log(avctx, AV_LOG_ERROR, "Channel mapping 0 is only specified for up to 2 channels\n"); return AVERROR_INVALIDDATA; } layout = (channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO; streams = 1; stereo_streams = channels - 1; channel_map = default_channel_map; } else if (map_type == 1 || map_type == 2 || map_type == 255) { if (extradata_size < 21 + channels) { av_log(avctx, AV_LOG_ERROR, "Invalid extradata size: %d\n", extradata_size); return AVERROR_INVALIDDATA; } streams = extradata[19]; stereo_streams = extradata[20]; if (!streams || stereo_streams > streams || streams + stereo_streams > 255) { av_log(avctx, AV_LOG_ERROR, "Invalid stream/stereo stream count: %d/%d\n", streams, stereo_streams); return AVERROR_INVALIDDATA; } if (map_type == 1) { if (channels > 8) { av_log(avctx, AV_LOG_ERROR, "Channel mapping 1 is only specified for up to 8 channels\n"); return AVERROR_INVALIDDATA; } layout = ff_vorbis_channel_layouts[channels - 1]; channel_reorder = channel_reorder_vorbis; } else if (map_type == 2) { int ambisonic_order = ff_sqrt(channels) - 1; if (channels != (ambisonic_order + 1) * (ambisonic_order + 1)) { av_log(avctx, AV_LOG_ERROR, "Channel mapping 2 is only specified for channel counts" " which can be written as (n + 1)^2 for nonnegative integer n\n"); return AVERROR_INVALIDDATA; } layout = 0; } else layout = 0; channel_map = extradata + 21; } else { avpriv_request_sample(avctx, "Mapping type %d", map_type); return AVERROR_PATCHWELCOME; } s->channel_maps = av_mallocz_array(channels, sizeof(*s->channel_maps)); if (!s->channel_maps) return AVERROR(ENOMEM); for (i = 0; i < channels; i++) { ChannelMap *map = &s->channel_maps[i]; uint8_t idx = channel_map[channel_reorder(channels, i)]; if (idx == 255) { map->silence = 1; continue; } else if (idx >= streams + stereo_streams) { av_log(avctx, AV_LOG_ERROR, "Invalid channel map for output channel %d: %d\n", i, idx); return AVERROR_INVALIDDATA; } /* check that we did not see this index yet */ map->copy = 0; for (j = 0; j < i; j++) if (channel_map[channel_reorder(channels, j)] == idx) { map->copy = 1; map->copy_idx = j; break; } if (idx < 2 * stereo_streams) { map->stream_idx = idx / 2; map->channel_idx = idx & 1; } else { map->stream_idx = idx - stereo_streams; map->channel_idx = 0; } } avctx->channels = channels; avctx->channel_layout = layout; s->nb_streams = streams; s->nb_stereo_streams = stereo_streams; return 0; }
12,085
qemu
34e1c27bc3094ffe484d9855e07ad104bddf579f
1
uint32 float32_to_uint32_round_to_zero( float32 a STATUS_PARAM ) { int64_t v; uint32 res; v = float32_to_int64_round_to_zero(a STATUS_VAR); if (v < 0) { res = 0; float_raise( float_flag_invalid STATUS_VAR); } else if (v > 0xffffffff) { res = 0xffffffff; float_raise( float_flag_invalid STATUS_VAR); } else { res = v; } return res; }
12,087
qemu
0d07fe47d4986271a21ed4ff5237275ff55dd93f
1
static inline abi_long do_msgrcv(int msqid, abi_long msgp, unsigned int msgsz, abi_long msgtyp, int msgflg) { struct target_msgbuf *target_mb; char *target_mtext; struct msgbuf *host_mb; abi_long ret = 0; if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0)) return -TARGET_EFAULT; host_mb = malloc(msgsz+sizeof(long)); ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapal(msgtyp), msgflg)); if (ret > 0) { abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong); target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0); if (!target_mtext) { ret = -TARGET_EFAULT; goto end; } memcpy(target_mb->mtext, host_mb->mtext, ret); unlock_user(target_mtext, target_mtext_addr, ret); } target_mb->mtype = tswapal(host_mb->mtype); free(host_mb); end: if (target_mb) unlock_user_struct(target_mb, msgp, 1); return ret; }
12,088
qemu
016d2e1dfa21b64a524d3629fdd317d4c25bc3b8
1
restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc, int *pd0) { int err = 0; int temp; __get_user(env->aregs[7], &sc->sc_usp); __get_user(env->dregs[1], &sc->sc_d1); __get_user(env->aregs[0], &sc->sc_a0); __get_user(env->aregs[1], &sc->sc_a1); __get_user(env->pc, &sc->sc_pc); __get_user(temp, &sc->sc_sr); env->sr = (env->sr & 0xff00) | (temp & 0xff); *pd0 = tswapl(sc->sc_d0); return err; }
12,089
qemu
56ad3e54dad6cdcee8668d170df161d89581846f
1
static ssize_t mp_pacl_getxattr(FsContext *ctx, const char *path, const char *name, void *value, size_t size) { char *buffer; ssize_t ret; buffer = rpath(ctx, path); ret = lgetxattr(buffer, MAP_ACL_ACCESS, value, size); g_free(buffer); return ret; }
12,090
qemu
231bb267644ee3a9ebfd9c7f42d5d41610194b45
1
int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info) { BDRVQcowState *s = bs->opaque; QCowSnapshot *new_snapshot_list = NULL; QCowSnapshot *old_snapshot_list = NULL; QCowSnapshot sn1, *sn = &sn1; int i, ret; uint64_t *l1_table = NULL; int64_t l1_table_offset; memset(sn, 0, sizeof(*sn)); /* Generate an ID if it wasn't passed */ if (sn_info->id_str[0] == '\0') { find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str)); } /* Check that the ID is unique */ if (find_snapshot_by_id_and_name(bs, sn_info->id_str, NULL) >= 0) { return -EEXIST; } /* Populate sn with passed data */ sn->id_str = g_strdup(sn_info->id_str); sn->name = g_strdup(sn_info->name); sn->disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; sn->vm_state_size = sn_info->vm_state_size; sn->date_sec = sn_info->date_sec; sn->date_nsec = sn_info->date_nsec; sn->vm_clock_nsec = sn_info->vm_clock_nsec; /* Allocate the L1 table of the snapshot and copy the current one there. */ l1_table_offset = qcow2_alloc_clusters(bs, s->l1_size * sizeof(uint64_t)); if (l1_table_offset < 0) { ret = l1_table_offset; goto fail; } sn->l1_table_offset = l1_table_offset; sn->l1_size = s->l1_size; l1_table = g_malloc(s->l1_size * sizeof(uint64_t)); for(i = 0; i < s->l1_size; i++) { l1_table[i] = cpu_to_be64(s->l1_table[i]); } ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT, sn->l1_table_offset, s->l1_size * sizeof(uint64_t)); if (ret < 0) { goto fail; } ret = bdrv_pwrite(bs->file, sn->l1_table_offset, l1_table, s->l1_size * sizeof(uint64_t)); if (ret < 0) { goto fail; } g_free(l1_table); l1_table = NULL; /* * Increase the refcounts of all clusters and make sure everything is * stable on disk before updating the snapshot table to contain a pointer * to the new L1 table. */ ret = qcow2_update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1); if (ret < 0) { goto fail; } /* Append the new snapshot to the snapshot list */ new_snapshot_list = g_malloc((s->nb_snapshots + 1) * sizeof(QCowSnapshot)); if (s->snapshots) { memcpy(new_snapshot_list, s->snapshots, s->nb_snapshots * sizeof(QCowSnapshot)); old_snapshot_list = s->snapshots; } s->snapshots = new_snapshot_list; s->snapshots[s->nb_snapshots++] = *sn; ret = qcow2_write_snapshots(bs); if (ret < 0) { g_free(s->snapshots); s->snapshots = old_snapshot_list; s->nb_snapshots--; goto fail; } g_free(old_snapshot_list); /* The VM state isn't needed any more in the active L1 table; in fact, it * hurts by causing expensive COW for the next snapshot. */ qcow2_discard_clusters(bs, qcow2_vm_state_offset(s), align_offset(sn->vm_state_size, s->cluster_size) >> BDRV_SECTOR_BITS, QCOW2_DISCARD_NEVER); #ifdef DEBUG_ALLOC { BdrvCheckResult result = {0}; qcow2_check_refcounts(bs, &result, 0); } #endif return 0; fail: g_free(sn->id_str); g_free(sn->name); g_free(l1_table); return ret; }
12,091
qemu
84a3a53cf61ef691478bd91afa455c801696053c
1
static int omap_i2c_init(SysBusDevice *sbd) { DeviceState *dev = DEVICE(sbd); OMAPI2CState *s = OMAP_I2C(dev); if (!s->fclk) { hw_error("omap_i2c: fclk not connected\n"); } if (s->revision >= OMAP2_INTR_REV && !s->iclk) { /* Note that OMAP1 doesn't have a separate interface clock */ hw_error("omap_i2c: iclk not connected\n"); } sysbus_init_irq(sbd, &s->irq); sysbus_init_irq(sbd, &s->drq[0]); sysbus_init_irq(sbd, &s->drq[1]); memory_region_init_io(&s->iomem, OBJECT(s), &omap_i2c_ops, s, "omap.i2c", (s->revision < OMAP2_INTR_REV) ? 0x800 : 0x1000); sysbus_init_mmio(sbd, &s->iomem); s->bus = i2c_init_bus(dev, NULL); return 0; }
12,092
qemu
80eecda8e5d09c442c24307f340840a5b70ea3b9
1
static int is_rndis(USBNetState *s) { return s->dev.config->bConfigurationValue == DEV_RNDIS_CONFIG_VALUE; }
12,096
FFmpeg
437ad467c250f8b96dc3456997906866d748f6ba
0
static int do_packet_auto_bsf(AVFormatContext *s, AVPacket *pkt) { AVStream *st = s->streams[pkt->stream_index]; int i, ret; if (!(s->flags & AVFMT_FLAG_AUTO_BSF)) return 1; if (s->oformat->check_bitstream) { if (!st->internal->bitstream_checked) { if ((ret = s->oformat->check_bitstream(s, pkt)) < 0) return ret; else if (ret == 1) st->internal->bitstream_checked = 1; } } #if FF_API_LAVF_MERGE_SD FF_DISABLE_DEPRECATION_WARNINGS if (st->internal->nb_bsfcs) { ret = av_packet_split_side_data(pkt); if (ret < 0) av_log(s, AV_LOG_WARNING, "Failed to split side data before bitstream filter\n"); } FF_ENABLE_DEPRECATION_WARNINGS #endif for (i = 0; i < st->internal->nb_bsfcs; i++) { AVBSFContext *ctx = st->internal->bsfcs[i]; if (i > 0) { AVBSFContext* prev_ctx = st->internal->bsfcs[i - 1]; if (prev_ctx->par_out->extradata_size != ctx->par_in->extradata_size) { if ((ret = avcodec_parameters_copy(ctx->par_in, prev_ctx->par_out)) < 0) return ret; } } // TODO: when any bitstream filter requires flushing at EOF, we'll need to // flush each stream's BSF chain on write_trailer. if ((ret = av_bsf_send_packet(ctx, pkt)) < 0) { av_log(ctx, AV_LOG_ERROR, "Failed to send packet to filter %s for stream %d\n", ctx->filter->name, pkt->stream_index); return ret; } // TODO: when any automatically-added bitstream filter is generating multiple // output packets for a single input one, we'll need to call this in a loop // and write each output packet. if ((ret = av_bsf_receive_packet(ctx, pkt)) < 0) { if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return 0; av_log(ctx, AV_LOG_ERROR, "Failed to send packet to filter %s for stream %d\n", ctx->filter->name, pkt->stream_index); return ret; } if (i == st->internal->nb_bsfcs - 1) { if (ctx->par_out->extradata_size != st->codecpar->extradata_size) { if ((ret = avcodec_parameters_copy(st->codecpar, ctx->par_out)) < 0) return ret; } } } return 1; }
12,097
FFmpeg
7e8fe4be5fb4c98aa3c6a4ed3cec999f4e3cc3aa
0
AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name) { AVFilterContext *ret; if (!filter) return NULL; ret = av_mallocz(sizeof(AVFilterContext)); if (!ret) return NULL; ret->av_class = &avfilter_class; ret->filter = filter; ret->name = inst_name ? av_strdup(inst_name) : NULL; if (filter->priv_size) { ret->priv = av_mallocz(filter->priv_size); if (!ret->priv) goto err; } if (filter->priv_class) { *(const AVClass**)ret->priv = filter->priv_class; av_opt_set_defaults(ret->priv); } ret->nb_inputs = pad_count(filter->inputs); if (ret->nb_inputs ) { ret->input_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs); if (!ret->input_pads) goto err; memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs); ret->inputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs); if (!ret->inputs) goto err; } ret->nb_outputs = pad_count(filter->outputs); if (ret->nb_outputs) { ret->output_pads = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs); if (!ret->output_pads) goto err; memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs); ret->outputs = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs); if (!ret->outputs) goto err; } #if FF_API_FOO_COUNT ret->output_count = ret->nb_outputs; ret->input_count = ret->nb_inputs; #endif return ret; err: av_freep(&ret->inputs); av_freep(&ret->input_pads); ret->nb_inputs = 0; av_freep(&ret->outputs); av_freep(&ret->output_pads); ret->nb_outputs = 0; av_freep(&ret->priv); av_free(ret); return NULL; }
12,100
FFmpeg
70d54392f5015b9c6594fcae558f59f952501e3b
0
static int dv_decode_video_segment(AVCodecContext *avctx, void *arg) { DVVideoContext *s = avctx->priv_data; DVwork_chunk *work_chunk = arg; int quant, dc, dct_mode, class1, j; int mb_index, mb_x, mb_y, last_index; int y_stride, linesize; DCTELEM *block, *block1; int c_offset; uint8_t *y_ptr; const uint8_t *buf_ptr; PutBitContext pb, vs_pb; GetBitContext gb; BlockInfo mb_data[5 * DV_MAX_BPM], *mb, *mb1; LOCAL_ALIGNED_16(DCTELEM, sblock, [5*DV_MAX_BPM], [64]); LOCAL_ALIGNED_16(uint8_t, mb_bit_buffer, [ 80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */ LOCAL_ALIGNED_16(uint8_t, vs_bit_buffer, [5*80 + FF_INPUT_BUFFER_PADDING_SIZE]); /* allow some slack */ const int log2_blocksize = 3; int is_field_mode[5]; assert((((int)mb_bit_buffer) & 7) == 0); assert((((int)vs_bit_buffer) & 7) == 0); memset(sblock, 0, 5*DV_MAX_BPM*sizeof(*sblock)); /* pass 1: read DC and AC coefficients in blocks */ buf_ptr = &s->buf[work_chunk->buf_offset*80]; block1 = &sblock[0][0]; mb1 = mb_data; init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80); for (mb_index = 0; mb_index < 5; mb_index++, mb1 += s->sys->bpm, block1 += s->sys->bpm * 64) { /* skip header */ quant = buf_ptr[3] & 0x0f; buf_ptr += 4; init_put_bits(&pb, mb_bit_buffer, 80); mb = mb1; block = block1; is_field_mode[mb_index] = 0; for (j = 0; j < s->sys->bpm; j++) { last_index = s->sys->block_sizes[j]; init_get_bits(&gb, buf_ptr, last_index); /* get the DC */ dc = get_sbits(&gb, 9); dct_mode = get_bits1(&gb); class1 = get_bits(&gb, 2); if (DV_PROFILE_IS_HD(s->sys)) { mb->idct_put = s->idct_put[0]; mb->scan_table = s->dv_zigzag[0]; mb->factor_table = &s->sys->idct_factor[(j >= 4)*4*16*64 + class1*16*64 + quant*64]; is_field_mode[mb_index] |= !j && dct_mode; } else { mb->idct_put = s->idct_put[dct_mode && log2_blocksize == 3]; mb->scan_table = s->dv_zigzag[dct_mode]; mb->factor_table = &s->sys->idct_factor[(class1 == 3)*2*22*64 + dct_mode*22*64 + (quant + ff_dv_quant_offset[class1])*64]; } dc = dc << 2; /* convert to unsigned because 128 is not added in the standard IDCT */ dc += 1024; block[0] = dc; buf_ptr += last_index >> 3; mb->pos = 0; mb->partial_bit_count = 0; av_dlog(avctx, "MB block: %d, %d ", mb_index, j); dv_decode_ac(&gb, mb, block); /* write the remaining bits in a new buffer only if the block is finished */ if (mb->pos >= 64) bit_copy(&pb, &gb); block += 64; mb++; } /* pass 2: we can do it just after */ av_dlog(avctx, "***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), mb_index); block = block1; mb = mb1; init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb)); put_bits32(&pb, 0); // padding must be zeroed flush_put_bits(&pb); for (j = 0; j < s->sys->bpm; j++, block += 64, mb++) { if (mb->pos < 64 && get_bits_left(&gb) > 0) { dv_decode_ac(&gb, mb, block); /* if still not finished, no need to parse other blocks */ if (mb->pos < 64) break; } } /* all blocks are finished, so the extra bytes can be used at the video segment level */ if (j >= s->sys->bpm) bit_copy(&vs_pb, &gb); } /* we need a pass over the whole video segment */ av_dlog(avctx, "***pass 3 size=%d\n", put_bits_count(&vs_pb)); block = &sblock[0][0]; mb = mb_data; init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb)); put_bits32(&vs_pb, 0); // padding must be zeroed flush_put_bits(&vs_pb); for (mb_index = 0; mb_index < 5; mb_index++) { for (j = 0; j < s->sys->bpm; j++) { if (mb->pos < 64) { av_dlog(avctx, "start %d:%d\n", mb_index, j); dv_decode_ac(&gb, mb, block); } if (mb->pos >= 64 && mb->pos < 127) av_log(avctx, AV_LOG_ERROR, "AC EOB marker is absent pos=%d\n", mb->pos); block += 64; mb++; } } /* compute idct and place blocks */ block = &sblock[0][0]; mb = mb_data; for (mb_index = 0; mb_index < 5; mb_index++) { dv_calculate_mb_xy(s, work_chunk, mb_index, &mb_x, &mb_y); /* idct_put'ting luminance */ if ((s->sys->pix_fmt == PIX_FMT_YUV420P) || (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) || (s->sys->height >= 720 && mb_y != 134)) { y_stride = (s->picture.linesize[0] << ((!is_field_mode[mb_index]) * log2_blocksize)); } else { y_stride = (2 << log2_blocksize); } y_ptr = s->picture.data[0] + ((mb_y * s->picture.linesize[0] + mb_x) << log2_blocksize); linesize = s->picture.linesize[0] << is_field_mode[mb_index]; mb[0] .idct_put(y_ptr , linesize, block + 0*64); if (s->sys->video_stype == 4) { /* SD 422 */ mb[2].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 2*64); } else { mb[1].idct_put(y_ptr + (1 << log2_blocksize) , linesize, block + 1*64); mb[2].idct_put(y_ptr + y_stride, linesize, block + 2*64); mb[3].idct_put(y_ptr + (1 << log2_blocksize) + y_stride, linesize, block + 3*64); } mb += 4; block += 4*64; /* idct_put'ting chrominance */ c_offset = (((mb_y >> (s->sys->pix_fmt == PIX_FMT_YUV420P)) * s->picture.linesize[1] + (mb_x >> ((s->sys->pix_fmt == PIX_FMT_YUV411P) ? 2 : 1))) << log2_blocksize); for (j = 2; j; j--) { uint8_t *c_ptr = s->picture.data[j] + c_offset; if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) { uint64_t aligned_pixels[64/8]; uint8_t *pixels = (uint8_t*)aligned_pixels; uint8_t *c_ptr1, *ptr1; int x, y; mb->idct_put(pixels, 8, block); for (y = 0; y < (1 << log2_blocksize); y++, c_ptr += s->picture.linesize[j], pixels += 8) { ptr1 = pixels + (1 << (log2_blocksize - 1)); c_ptr1 = c_ptr + (s->picture.linesize[j] << log2_blocksize); for (x = 0; x < (1 << (log2_blocksize - 1)); x++) { c_ptr[x] = pixels[x]; c_ptr1[x] = ptr1[x]; } } block += 64; mb++; } else { y_stride = (mb_y == 134) ? (1 << log2_blocksize) : s->picture.linesize[j] << ((!is_field_mode[mb_index]) * log2_blocksize); linesize = s->picture.linesize[j] << is_field_mode[mb_index]; (mb++)-> idct_put(c_ptr , linesize, block); block += 64; if (s->sys->bpm == 8) { (mb++)->idct_put(c_ptr + y_stride, linesize, block); block += 64; } } } } return 0; }
12,102
FFmpeg
cd2f69cdd63d3ca5c2388d9ce6737b3e46bb19b2
0
static void compute_default_clut(AVSubtitleRect *rect, int w, int h) { uint8_t list[256] = {0}; uint8_t list_inv[256]; int counttab[256] = {0}; int count, i, x, y; #define V(x,y) rect->data[0][(x) + (y)*rect->linesize[0]] for (y = 0; y<h; y++) { for (x = 0; x<w; x++) { int v = V(x,y) + 1; int vl = x ? V(x-1,y) + 1 : 0; int vr = x+1<w ? V(x+1,y) + 1 : 0; int vt = y ? V(x,y-1) + 1 : 0; int vb = y+1<h ? V(x,y+1) + 1 : 0; counttab[v-1] += !!((v!=vl) + (v!=vr) + (v!=vt) + (v!=vb)); } } #define L(x,y) list[ rect->data[0][(x) + (y)*rect->linesize[0]] ] for (i = 0; i<256; i++) { int scoretab[256] = {0}; int bestscore = 0; int bestv = 0; for (y = 0; y<h; y++) { for (x = 0; x<w; x++) { int v = rect->data[0][x + y*rect->linesize[0]]; int l_m = list[v]; int l_l = x ? L(x-1, y) : 1; int l_r = x+1<w ? L(x+1, y) : 1; int l_t = y ? L(x, y-1) : 1; int l_b = y+1<h ? L(x, y+1) : 1; int score; if (l_m) continue; scoretab[v] += l_l + l_r + l_t + l_b; score = 1024LL*scoretab[v] / counttab[v]; if (score > bestscore) { bestscore = score; bestv = v; } } } if (!bestscore) break; list [ bestv ] = 1; list_inv[ i ] = bestv; } count = FFMAX(i - 1, 1); for (i--; i>=0; i--) { int v = i*255/count; AV_WN32(rect->data[1] + 4*list_inv[i], RGBA(v/2,v,v/2,v)); } }
12,103
FFmpeg
fd09cd08c0ad059ee41ccafc6836a285c1b35c45
0
static int seqvideo_decode(SeqVideoContext *seq, const unsigned char *data, int data_size) { const unsigned char *data_end = data + data_size; GetBitContext gb; int flags, i, j, x, y, op; unsigned char c[3]; unsigned char *dst; uint32_t *palette; flags = *data++; if (flags & 1) { palette = (uint32_t *)seq->frame.data[1]; if (data_end - data < 256 * 3) return AVERROR_INVALIDDATA; for (i = 0; i < 256; i++) { for (j = 0; j < 3; j++, data++) c[j] = (*data << 2) | (*data >> 4); palette[i] = AV_RB24(c); } seq->frame.palette_has_changed = 1; } if (flags & 2) { if (data_end - data < 128) return AVERROR_INVALIDDATA; init_get_bits(&gb, data, 128 * 8); data += 128; for (y = 0; y < 128; y += 8) for (x = 0; x < 256; x += 8) { dst = &seq->frame.data[0][y * seq->frame.linesize[0] + x]; op = get_bits(&gb, 2); switch (op) { case 1: data = seq_decode_op1(seq, data, data_end, dst); break; case 2: data = seq_decode_op2(seq, data, data_end, dst); break; case 3: data = seq_decode_op3(seq, data, data_end, dst); break; } if (!data) return AVERROR_INVALIDDATA; } } return 0; }
12,104
qemu
b7fb49f0c709a406f79372be397367ff2550373b
0
int dpy_set_ui_info(QemuConsole *con, QemuUIInfo *info) { assert(con != NULL); con->ui_info = *info; if (!con->hw_ops->ui_info) { return -1; } /* * Typically we get a flood of these as the user resizes the window. * Wait until the dust has settled (one second without updates), then * go notify the guest. */ timer_mod(con->ui_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1000); return 0; }
12,107
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
0
static void dma_aio_cancel(BlockAIOCB *acb) { DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common); trace_dma_aio_cancel(dbs); if (dbs->acb) { bdrv_aio_cancel_async(dbs->acb); } }
12,108
qemu
40fda982f2e887f7d5cc36b8a7e3b5a07a1e6704
0
static void kvmppc_host_cpu_initfn(Object *obj) { assert(kvm_enabled()); }
12,110
qemu
b6ce27a593ab39ac28baebc3045901925046bebd
0
static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy) { static const MemoryRegionOps common_ops = { .read = virtio_pci_common_read, .write = virtio_pci_common_write, .impl = { .min_access_size = 1, .max_access_size = 4, }, .endianness = DEVICE_LITTLE_ENDIAN, }; static const MemoryRegionOps isr_ops = { .read = virtio_pci_isr_read, .write = virtio_pci_isr_write, .impl = { .min_access_size = 1, .max_access_size = 4, }, .endianness = DEVICE_LITTLE_ENDIAN, }; static const MemoryRegionOps device_ops = { .read = virtio_pci_device_read, .write = virtio_pci_device_write, .impl = { .min_access_size = 1, .max_access_size = 4, }, .endianness = DEVICE_LITTLE_ENDIAN, }; static const MemoryRegionOps notify_ops = { .read = virtio_pci_notify_read, .write = virtio_pci_notify_write, .impl = { .min_access_size = 1, .max_access_size = 4, }, .endianness = DEVICE_LITTLE_ENDIAN, }; memory_region_init_io(&proxy->common.mr, OBJECT(proxy), &common_ops, proxy, "virtio-pci-common", 0x1000); proxy->common.offset = 0x0; proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), &isr_ops, proxy, "virtio-pci-isr", 0x1000); proxy->isr.offset = 0x1000; proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; memory_region_init_io(&proxy->device.mr, OBJECT(proxy), &device_ops, virtio_bus_get_device(&proxy->bus), "virtio-pci-device", 0x1000); proxy->device.offset = 0x2000; proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), &notify_ops, virtio_bus_get_device(&proxy->bus), "virtio-pci-notify", QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX); proxy->notify.offset = 0x3000; proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; }
12,112
qemu
3dc6f8693694a649a9c83f1e2746565b47683923
0
static void coroutine_fn v9fs_flush(void *opaque) { ssize_t err; int16_t tag; size_t offset = 7; V9fsPDU *cancel_pdu = NULL; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; err = pdu_unmarshal(pdu, offset, "w", &tag); if (err < 0) { pdu_complete(pdu, err); return; } trace_v9fs_flush(pdu->tag, pdu->id, tag); if (pdu->tag == tag) { error_report("Warning: the guest sent a self-referencing 9P flush request"); } else { QLIST_FOREACH(cancel_pdu, &s->active_list, next) { if (cancel_pdu->tag == tag) { break; } } } if (cancel_pdu) { cancel_pdu->cancelled = 1; /* * Wait for pdu to complete. */ qemu_co_queue_wait(&cancel_pdu->complete, NULL); if (!qemu_co_queue_next(&cancel_pdu->complete)) { cancel_pdu->cancelled = 0; pdu_free(cancel_pdu); } } pdu_complete(pdu, 7); }
12,113
qemu
49bd1458da8909434eb83c5cda472c63ff6a529c
0
static PCIBus *pci_get_bus_devfn(int *devfnp, const char *devaddr) { int dom, bus; unsigned slot; if (!devaddr) { *devfnp = -1; return pci_find_bus(0); } if (pci_parse_devaddr(devaddr, &dom, &bus, &slot) < 0) { return NULL; } *devfnp = slot << 3; return pci_find_bus(bus); }
12,114
qemu
7e7e2ebc942da8285931ceabf12823e165dced8b
0
static void vnc_connect(VncDisplay *vd, int csock) { VncState *vs = qemu_mallocz(sizeof(VncState)); int i; vs->csock = csock; vs->lossy_rect = qemu_mallocz(VNC_STAT_ROWS * sizeof (*vs->lossy_rect)); for (i = 0; i < VNC_STAT_ROWS; ++i) { vs->lossy_rect[i] = qemu_mallocz(VNC_STAT_COLS * sizeof (uint8_t)); } VNC_DEBUG("New client on socket %d\n", csock); dcl->idle = 0; socket_set_nonblock(vs->csock); qemu_set_fd_handler2(vs->csock, NULL, vnc_client_read, NULL, vs); vnc_client_cache_addr(vs); vnc_qmp_event(vs, QEVENT_VNC_CONNECTED); vs->vd = vd; vs->ds = vd->ds; vs->last_x = -1; vs->last_y = -1; vs->as.freq = 44100; vs->as.nchannels = 2; vs->as.fmt = AUD_FMT_S16; vs->as.endianness = 0; #ifdef CONFIG_VNC_THREAD qemu_mutex_init(&vs->output_mutex); #endif QTAILQ_INSERT_HEAD(&vd->clients, vs, next); vga_hw_update(); vnc_write(vs, "RFB 003.008\n", 12); vnc_flush(vs); vnc_read_when(vs, protocol_version, 12); reset_keys(vs); if (vs->vd->lock_key_sync) vs->led = qemu_add_led_event_handler(kbd_leds, vs); vs->mouse_mode_notifier.notify = check_pointer_type_change; qemu_add_mouse_mode_change_notifier(&vs->mouse_mode_notifier); vnc_init_timer(vd); /* vs might be free()ed here */ }
12,115
qemu
13f12430d48b62e2304e0e5a7c607279af68b98a
0
qcrypto_tls_session_new(QCryptoTLSCreds *creds, const char *hostname, const char *aclname, QCryptoTLSCredsEndpoint endpoint, Error **errp) { QCryptoTLSSession *session; int ret; session = g_new0(QCryptoTLSSession, 1); trace_qcrypto_tls_session_new( session, creds, hostname ? hostname : "<none>", aclname ? aclname : "<none>", endpoint); if (hostname) { session->hostname = g_strdup(hostname); } if (aclname) { session->aclname = g_strdup(aclname); } session->creds = creds; object_ref(OBJECT(creds)); if (creds->endpoint != endpoint) { error_setg(errp, "Credentials endpoint doesn't match session"); goto error; } if (endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { ret = gnutls_init(&session->handle, GNUTLS_SERVER); } else { ret = gnutls_init(&session->handle, GNUTLS_CLIENT); } if (ret < 0) { error_setg(errp, "Cannot initialize TLS session: %s", gnutls_strerror(ret)); goto error; } if (object_dynamic_cast(OBJECT(creds), TYPE_QCRYPTO_TLS_CREDS_ANON)) { QCryptoTLSCredsAnon *acreds = QCRYPTO_TLS_CREDS_ANON(creds); ret = gnutls_priority_set_direct(session->handle, "NORMAL:+ANON-DH", NULL); if (ret < 0) { error_setg(errp, "Unable to set TLS session priority: %s", gnutls_strerror(ret)); goto error; } if (creds->endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { ret = gnutls_credentials_set(session->handle, GNUTLS_CRD_ANON, acreds->data.server); } else { ret = gnutls_credentials_set(session->handle, GNUTLS_CRD_ANON, acreds->data.client); } if (ret < 0) { error_setg(errp, "Cannot set session credentials: %s", gnutls_strerror(ret)); goto error; } } else if (object_dynamic_cast(OBJECT(creds), TYPE_QCRYPTO_TLS_CREDS_X509)) { QCryptoTLSCredsX509 *tcreds = QCRYPTO_TLS_CREDS_X509(creds); ret = gnutls_set_default_priority(session->handle); if (ret < 0) { error_setg(errp, "Cannot set default TLS session priority: %s", gnutls_strerror(ret)); goto error; } ret = gnutls_credentials_set(session->handle, GNUTLS_CRD_CERTIFICATE, tcreds->data); if (ret < 0) { error_setg(errp, "Cannot set session credentials: %s", gnutls_strerror(ret)); goto error; } if (creds->endpoint == QCRYPTO_TLS_CREDS_ENDPOINT_SERVER) { /* This requests, but does not enforce a client cert. * The cert checking code later does enforcement */ gnutls_certificate_server_set_request(session->handle, GNUTLS_CERT_REQUEST); } } else { error_setg(errp, "Unsupported TLS credentials type %s", object_get_typename(OBJECT(creds))); goto error; } gnutls_transport_set_ptr(session->handle, session); gnutls_transport_set_push_function(session->handle, qcrypto_tls_session_push); gnutls_transport_set_pull_function(session->handle, qcrypto_tls_session_pull); return session; error: qcrypto_tls_session_free(session); return NULL; }
12,116
qemu
1828be316f6637d43dd4c4f5f32925b17fb8107f
0
static void tcg_cpu_exec(void) { int ret = 0; if (next_cpu == NULL) next_cpu = first_cpu; for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) { CPUState *env = cur_cpu = next_cpu; if (timer_alarm_pending) { timer_alarm_pending = 0; break; } if (cpu_can_run(env)) ret = qemu_cpu_exec(env); else if (env->stop) break; if (ret == EXCP_DEBUG) { gdb_set_stop_cpu(env); debug_requested = 1; break; } } }
12,118
qemu
7df9381b7aa56c897e344f3bfe43bf5848bbd3e0
0
static int vfio_base_device_init(VFIODevice *vbasedev) { VFIOGroup *group; VFIODevice *vbasedev_iter; char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name; ssize_t len; struct stat st; int groupid; int ret; /* name must be set prior to the call */ if (!vbasedev->name || strchr(vbasedev->name, '/')) { return -EINVAL; } /* Check that the host device exists */ g_snprintf(path, sizeof(path), "/sys/bus/platform/devices/%s/", vbasedev->name); if (stat(path, &st) < 0) { error_report("vfio: error: no such host device: %s", path); return -errno; } g_strlcat(path, "iommu_group", sizeof(path)); len = readlink(path, iommu_group_path, sizeof(iommu_group_path)); if (len < 0 || len >= sizeof(iommu_group_path)) { error_report("vfio: error no iommu_group for device"); return len < 0 ? -errno : -ENAMETOOLONG; } iommu_group_path[len] = 0; group_name = basename(iommu_group_path); if (sscanf(group_name, "%d", &groupid) != 1) { error_report("vfio: error reading %s: %m", path); return -errno; } trace_vfio_platform_base_device_init(vbasedev->name, groupid); group = vfio_get_group(groupid, &address_space_memory); if (!group) { error_report("vfio: failed to get group %d", groupid); return -ENOENT; } g_snprintf(path, sizeof(path), "%s", vbasedev->name); QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { error_report("vfio: error: device %s is already attached", path); vfio_put_group(group); return -EBUSY; } } ret = vfio_get_device(group, path, vbasedev); if (ret) { error_report("vfio: failed to get device %s", path); vfio_put_group(group); return ret; } ret = vfio_populate_device(vbasedev); if (ret) { error_report("vfio: failed to populate device %s", path); vfio_put_group(group); } return ret; }
12,119
qemu
c11e80e299e57c64934c164b231fa0d4279db445
0
void Y8950UpdateOne(FM_OPL *OPL, INT16 *buffer, int length) { int i; int data; OPLSAMPLE *buf = buffer; UINT32 amsCnt = OPL->amsCnt; UINT32 vibCnt = OPL->vibCnt; UINT8 rythm = OPL->rythm&0x20; OPL_CH *CH,*R_CH; YM_DELTAT *DELTAT = OPL->deltat; /* setup DELTA-T unit */ YM_DELTAT_DECODE_PRESET(DELTAT); if( (void *)OPL != cur_chip ){ cur_chip = (void *)OPL; /* channel pointers */ S_CH = OPL->P_CH; E_CH = &S_CH[9]; /* rythm slot */ SLOT7_1 = &S_CH[7].SLOT[SLOT1]; SLOT7_2 = &S_CH[7].SLOT[SLOT2]; SLOT8_1 = &S_CH[8].SLOT[SLOT1]; SLOT8_2 = &S_CH[8].SLOT[SLOT2]; /* LFO state */ amsIncr = OPL->amsIncr; vibIncr = OPL->vibIncr; ams_table = OPL->ams_table; vib_table = OPL->vib_table; } R_CH = rythm ? &S_CH[6] : E_CH; for( i=0; i < length ; i++ ) { /* channel A channel B channel C */ /* LFO */ ams = ams_table[(amsCnt+=amsIncr)>>AMS_SHIFT]; vib = vib_table[(vibCnt+=vibIncr)>>VIB_SHIFT]; outd[0] = 0; /* deltaT ADPCM */ if( DELTAT->portstate ) YM_DELTAT_ADPCM_CALC(DELTAT); /* FM part */ for(CH=S_CH ; CH < R_CH ; CH++) OPL_CALC_CH(CH); /* Rythn part */ if(rythm) OPL_CALC_RH(S_CH); /* limit check */ data = Limit( outd[0] , OPL_MAXOUT, OPL_MINOUT ); /* store to sound buffer */ buf[i] = data >> OPL_OUTSB; } OPL->amsCnt = amsCnt; OPL->vibCnt = vibCnt; /* deltaT START flag */ if( !DELTAT->portstate ) OPL->status &= 0xfe; }
12,120
qemu
a1e985833cde3208b0f57c4c7e640b60fbc6c54d
0
static const ppc_def_t *ppc_find_by_pvr (uint32_t pvr) { int i; for (i = 0; i < ARRAY_SIZE(ppc_defs); i++) { /* If we have an exact match, we're done */ if (pvr == ppc_defs[i].pvr) { return &ppc_defs[i]; } } return NULL; }
12,122
qemu
eb5d4f5329df83ea15244b47f7fbca21adaae41b
0
static int slirp_state_load(QEMUFile *f, void *opaque, int version_id) { Slirp *slirp = opaque; struct ex_list *ex_ptr; while (qemu_get_byte(f)) { int ret; struct socket *so = socreate(slirp); if (!so) return -ENOMEM; ret = vmstate_load_state(f, &vmstate_slirp_socket, so, version_id); if (ret < 0) return ret; if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) != slirp->vnetwork_addr.s_addr) { return -EINVAL; } for (ex_ptr = slirp->exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) { if (ex_ptr->ex_pty == 3 && so->so_faddr.s_addr == ex_ptr->ex_addr.s_addr && so->so_fport == ex_ptr->ex_fport) { break; } } if (!ex_ptr) return -EINVAL; so->extra = (void *)ex_ptr->ex_exec; } if (version_id >= 2) { slirp->ip_id = qemu_get_be16(f); } if (version_id >= 3) { slirp_bootp_load(f, slirp); } return 0; }
12,124
qemu
629bd516fda67c95ba1c7d1393bacb9e68ea0712
0
int get_segment32(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, int rw, int type) { hwaddr hash; target_ulong vsid; int ds, pr, target_page_bits; int ret, ret2; target_ulong sr, pgidx; pr = msr_pr; ctx->eaddr = eaddr; sr = env->sr[eaddr >> 28]; ctx->key = (((sr & 0x20000000) && (pr != 0)) || ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; ds = sr & 0x80000000 ? 1 : 0; ctx->nx = sr & 0x10000000 ? 1 : 0; vsid = sr & 0x00FFFFFF; target_page_bits = TARGET_PAGE_BITS; LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx " ir=%d dr=%d pr=%d %d t=%d\n", eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, (int)msr_dr, pr != 0 ? 1 : 0, rw, type); pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; hash = vsid ^ pgidx; ctx->ptem = (vsid << 7) | (pgidx >> 10); LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", ctx->key, ds, ctx->nx, vsid); ret = -1; if (!ds) { /* Check if instruction fetch is allowed, if needed */ if (type != ACCESS_CODE || ctx->nx == 0) { /* Page address translation */ LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx " hash " TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, hash); ctx->hash[0] = hash; ctx->hash[1] = ~hash; /* Initialize real address with an invalid value */ ctx->raddr = (hwaddr)-1ULL; LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx " hash=" TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, vsid, ctx->ptem, ctx->hash[0]); /* Primary table lookup */ ret = find_pte32(env, ctx, 0, rw, type, target_page_bits); if (ret < 0) { /* Secondary table lookup */ LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx " hash=" TARGET_FMT_plx "\n", env->htab_base, env->htab_mask, vsid, ctx->ptem, ctx->hash[1]); ret2 = find_pte32(env, ctx, 1, rw, type, target_page_bits); if (ret2 != -1) { ret = ret2; } } #if defined(DUMP_PAGE_TABLES) if (qemu_log_enabled()) { hwaddr curaddr; uint32_t a0, a1, a2, a3; qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx "\n", sdr, mask + 0x80); for (curaddr = sdr; curaddr < (sdr + mask + 0x80); curaddr += 16) { a0 = ldl_phys(curaddr); a1 = ldl_phys(curaddr + 4); a2 = ldl_phys(curaddr + 8); a3 = ldl_phys(curaddr + 12); if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n", curaddr, a0, a1, a2, a3); } } } #endif } else { LOG_MMU("No access allowed\n"); ret = -3; } } else { target_ulong sr; LOG_MMU("direct store...\n"); /* Direct-store segment : absolutely *BUGGY* for now */ /* Direct-store implies a 32-bit MMU. * Check the Segment Register's bus unit ID (BUID). */ sr = env->sr[eaddr >> 28]; if ((sr & 0x1FF00000) >> 20 == 0x07f) { /* Memory-forced I/O controller interface access */ /* If T=1 and BUID=x'07F', the 601 performs a memory access * to SR[28-31] LA[4-31], bypassing all protection mechanisms. */ ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return 0; } switch (type) { case ACCESS_INT: /* Integer load/store : only access allowed */ break; case ACCESS_CODE: /* No code fetch is allowed in direct-store areas */ return -4; case ACCESS_FLOAT: /* Floating point load/store */ return -4; case ACCESS_RES: /* lwarx, ldarx or srwcx. */ return -4; case ACCESS_CACHE: /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ /* Should make the instruction do no-op. * As it already do no-op, it's quite easy :-) */ ctx->raddr = eaddr; return 0; case ACCESS_EXT: /* eciwx or ecowx */ return -4; default: qemu_log("ERROR: instruction should not need " "address translation\n"); return -4; } if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) { ctx->raddr = eaddr; ret = 2; } else { ret = -2; } } return ret; }
12,125
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static void bonito_cop_writel(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { PCIBonitoState *s = opaque; ((uint32_t *)(&s->boncop))[addr/sizeof(uint32_t)] = val & 0xffffffff; }
12,126
qemu
213189ab65d83ecd9072f27c80a15dcb91b6bdbf
0
static void scsi_dma_restart_cb(void *opaque, int running, int reason) { SCSIDeviceState *s = opaque; SCSIRequest *r = s->requests; if (!running) return; while (r) { if (r->status & SCSI_REQ_STATUS_RETRY) { r->status &= ~SCSI_REQ_STATUS_RETRY; scsi_write_request(r); } r = r->next; } }
12,127
FFmpeg
fef2147b7a689b80d716c3edb9d4a18904865275
0
int ff_eac3_parse_header(AC3DecodeContext *s) { int i, blk, ch; int ac3_exponent_strategy, parse_aht_info, parse_spx_atten_data; int parse_transient_proc_info; int num_cpl_blocks; GetBitContext *gbc = &s->gbc; /* An E-AC-3 stream can have multiple independent streams which the application can select from. each independent stream can also contain dependent streams which are used to add or replace channels. */ if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT) { avpriv_request_sample(s->avctx, "Dependent substream decoding"); return AAC_AC3_PARSE_ERROR_FRAME_TYPE; } else if (s->frame_type == EAC3_FRAME_TYPE_RESERVED) { av_log(s->avctx, AV_LOG_ERROR, "Reserved frame type\n"); return AAC_AC3_PARSE_ERROR_FRAME_TYPE; } /* The substream id indicates which substream this frame belongs to. each independent stream has its own substream id, and the dependent streams associated to an independent stream have matching substream id's. */ if (s->substreamid) { /* only decode substream with id=0. skip any additional substreams. */ avpriv_request_sample(s->avctx, "Additional substreams"); return AAC_AC3_PARSE_ERROR_FRAME_TYPE; } if (s->bit_alloc_params.sr_code == EAC3_SR_CODE_REDUCED) { /* The E-AC-3 specification does not tell how to handle reduced sample rates in bit allocation. The best assumption would be that it is handled like AC-3 DolbyNet, but we cannot be sure until we have a sample which utilizes this feature. */ avpriv_request_sample(s->avctx, "Reduced sampling rate"); return AVERROR_PATCHWELCOME; } skip_bits(gbc, 5); // skip bitstream id /* volume control params */ for (i = 0; i < (s->channel_mode ? 1 : 2); i++) { skip_bits(gbc, 5); // skip dialog normalization if (get_bits1(gbc)) { skip_bits(gbc, 8); // skip compression gain word } } /* dependent stream channel map */ if (s->frame_type == EAC3_FRAME_TYPE_DEPENDENT) { if (get_bits1(gbc)) { skip_bits(gbc, 16); // skip custom channel map } } /* mixing metadata */ if (get_bits1(gbc)) { /* center and surround mix levels */ if (s->channel_mode > AC3_CHMODE_STEREO) { s->preferred_downmix = get_bits(gbc, 2); if (s->channel_mode & 1) { /* if three front channels exist */ s->center_mix_level_ltrt = get_bits(gbc, 3); s->center_mix_level = get_bits(gbc, 3); } if (s->channel_mode & 4) { /* if a surround channel exists */ s->surround_mix_level_ltrt = av_clip(get_bits(gbc, 3), 3, 7); s->surround_mix_level = av_clip(get_bits(gbc, 3), 3, 7); } } /* lfe mix level */ if (s->lfe_on && (s->lfe_mix_level_exists = get_bits1(gbc))) { s->lfe_mix_level = get_bits(gbc, 5); } /* info for mixing with other streams and substreams */ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT) { for (i = 0; i < (s->channel_mode ? 1 : 2); i++) { // TODO: apply program scale factor if (get_bits1(gbc)) { skip_bits(gbc, 6); // skip program scale factor } } if (get_bits1(gbc)) { skip_bits(gbc, 6); // skip external program scale factor } /* skip mixing parameter data */ switch(get_bits(gbc, 2)) { case 1: skip_bits(gbc, 5); break; case 2: skip_bits(gbc, 12); break; case 3: { int mix_data_size = (get_bits(gbc, 5) + 2) << 3; skip_bits_long(gbc, mix_data_size); break; } } /* skip pan information for mono or dual mono source */ if (s->channel_mode < AC3_CHMODE_STEREO) { for (i = 0; i < (s->channel_mode ? 1 : 2); i++) { if (get_bits1(gbc)) { /* note: this is not in the ATSC A/52B specification reference: ETSI TS 102 366 V1.1.1 section: E.1.3.1.25 */ skip_bits(gbc, 8); // skip pan mean direction index skip_bits(gbc, 6); // skip reserved paninfo bits } } } /* skip mixing configuration information */ if (get_bits1(gbc)) { for (blk = 0; blk < s->num_blocks; blk++) { if (s->num_blocks == 1 || get_bits1(gbc)) { skip_bits(gbc, 5); } } } } } /* informational metadata */ if (get_bits1(gbc)) { s->bitstream_mode = get_bits(gbc, 3); skip_bits(gbc, 2); // skip copyright bit and original bitstream bit if (s->channel_mode == AC3_CHMODE_STEREO) { s->dolby_surround_mode = get_bits(gbc, 2); s->dolby_headphone_mode = get_bits(gbc, 2); } if (s->channel_mode >= AC3_CHMODE_2F2R) { s->dolby_surround_ex_mode = get_bits(gbc, 2); } for (i = 0; i < (s->channel_mode ? 1 : 2); i++) { if (get_bits1(gbc)) { skip_bits(gbc, 8); // skip mix level, room type, and A/D converter type } } if (s->bit_alloc_params.sr_code != EAC3_SR_CODE_REDUCED) { skip_bits1(gbc); // skip source sample rate code } } /* converter synchronization flag If frames are less than six blocks, this bit should be turned on once every 6 blocks to indicate the start of a frame set. reference: RFC 4598, Section 2.1.3 Frame Sets */ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT && s->num_blocks != 6) { skip_bits1(gbc); // skip converter synchronization flag } /* original frame size code if this stream was converted from AC-3 */ if (s->frame_type == EAC3_FRAME_TYPE_AC3_CONVERT && (s->num_blocks == 6 || get_bits1(gbc))) { skip_bits(gbc, 6); // skip frame size code } /* additional bitstream info */ if (get_bits1(gbc)) { int addbsil = get_bits(gbc, 6); for (i = 0; i < addbsil + 1; i++) { skip_bits(gbc, 8); // skip additional bit stream info } } /* audio frame syntax flags, strategy data, and per-frame data */ if (s->num_blocks == 6) { ac3_exponent_strategy = get_bits1(gbc); parse_aht_info = get_bits1(gbc); } else { /* less than 6 blocks, so use AC-3-style exponent strategy syntax, and do not use AHT */ ac3_exponent_strategy = 1; parse_aht_info = 0; } s->snr_offset_strategy = get_bits(gbc, 2); parse_transient_proc_info = get_bits1(gbc); s->block_switch_syntax = get_bits1(gbc); if (!s->block_switch_syntax) memset(s->block_switch, 0, sizeof(s->block_switch)); s->dither_flag_syntax = get_bits1(gbc); if (!s->dither_flag_syntax) { for (ch = 1; ch <= s->fbw_channels; ch++) s->dither_flag[ch] = 1; } s->dither_flag[CPL_CH] = s->dither_flag[s->lfe_ch] = 0; s->bit_allocation_syntax = get_bits1(gbc); if (!s->bit_allocation_syntax) { /* set default bit allocation parameters */ s->bit_alloc_params.slow_decay = ff_ac3_slow_decay_tab[2]; s->bit_alloc_params.fast_decay = ff_ac3_fast_decay_tab[1]; s->bit_alloc_params.slow_gain = ff_ac3_slow_gain_tab [1]; s->bit_alloc_params.db_per_bit = ff_ac3_db_per_bit_tab[2]; s->bit_alloc_params.floor = ff_ac3_floor_tab [7]; } s->fast_gain_syntax = get_bits1(gbc); s->dba_syntax = get_bits1(gbc); s->skip_syntax = get_bits1(gbc); parse_spx_atten_data = get_bits1(gbc); /* coupling strategy occurrence and coupling use per block */ num_cpl_blocks = 0; if (s->channel_mode > 1) { for (blk = 0; blk < s->num_blocks; blk++) { s->cpl_strategy_exists[blk] = (!blk || get_bits1(gbc)); if (s->cpl_strategy_exists[blk]) { s->cpl_in_use[blk] = get_bits1(gbc); } else { s->cpl_in_use[blk] = s->cpl_in_use[blk-1]; } num_cpl_blocks += s->cpl_in_use[blk]; } } else { memset(s->cpl_in_use, 0, sizeof(s->cpl_in_use)); } /* exponent strategy data */ if (ac3_exponent_strategy) { /* AC-3-style exponent strategy syntax */ for (blk = 0; blk < s->num_blocks; blk++) { for (ch = !s->cpl_in_use[blk]; ch <= s->fbw_channels; ch++) { s->exp_strategy[blk][ch] = get_bits(gbc, 2); } } } else { /* LUT-based exponent strategy syntax */ for (ch = !((s->channel_mode > 1) && num_cpl_blocks); ch <= s->fbw_channels; ch++) { int frmchexpstr = get_bits(gbc, 5); for (blk = 0; blk < 6; blk++) { s->exp_strategy[blk][ch] = ff_eac3_frm_expstr[frmchexpstr][blk]; } } } /* LFE exponent strategy */ if (s->lfe_on) { for (blk = 0; blk < s->num_blocks; blk++) { s->exp_strategy[blk][s->lfe_ch] = get_bits1(gbc); } } /* original exponent strategies if this stream was converted from AC-3 */ if (s->frame_type == EAC3_FRAME_TYPE_INDEPENDENT && (s->num_blocks == 6 || get_bits1(gbc))) { skip_bits(gbc, 5 * s->fbw_channels); // skip converter channel exponent strategy } /* determine which channels use AHT */ if (parse_aht_info) { /* For AHT to be used, all non-zero blocks must reuse exponents from the first block. Furthermore, for AHT to be used in the coupling channel, all blocks must use coupling and use the same coupling strategy. */ s->channel_uses_aht[CPL_CH]=0; for (ch = (num_cpl_blocks != 6); ch <= s->channels; ch++) { int use_aht = 1; for (blk = 1; blk < 6; blk++) { if ((s->exp_strategy[blk][ch] != EXP_REUSE) || (!ch && s->cpl_strategy_exists[blk])) { use_aht = 0; break; } } s->channel_uses_aht[ch] = use_aht && get_bits1(gbc); } } else { memset(s->channel_uses_aht, 0, sizeof(s->channel_uses_aht)); } /* per-frame SNR offset */ if (!s->snr_offset_strategy) { int csnroffst = (get_bits(gbc, 6) - 15) << 4; int snroffst = (csnroffst + get_bits(gbc, 4)) << 2; for (ch = 0; ch <= s->channels; ch++) s->snr_offset[ch] = snroffst; } /* transient pre-noise processing data */ if (parse_transient_proc_info) { for (ch = 1; ch <= s->fbw_channels; ch++) { if (get_bits1(gbc)) { // channel in transient processing skip_bits(gbc, 10); // skip transient processing location skip_bits(gbc, 8); // skip transient processing length } } } /* spectral extension attenuation data */ for (ch = 1; ch <= s->fbw_channels; ch++) { if (parse_spx_atten_data && get_bits1(gbc)) { s->spx_atten_code[ch] = get_bits(gbc, 5); } else { s->spx_atten_code[ch] = -1; } } /* block start information */ if (s->num_blocks > 1 && get_bits1(gbc)) { /* reference: Section E2.3.2.27 nblkstrtbits = (numblks - 1) * (4 + ceiling(log2(words_per_frame))) The spec does not say what this data is or what it's used for. It is likely the offset of each block within the frame. */ int block_start_bits = (s->num_blocks-1) * (4 + av_log2(s->frame_size-2)); skip_bits_long(gbc, block_start_bits); avpriv_request_sample(s->avctx, "Block start info"); } /* syntax state initialization */ for (ch = 1; ch <= s->fbw_channels; ch++) { s->first_spx_coords[ch] = 1; s->first_cpl_coords[ch] = 1; } s->first_cpl_leak = 1; return 0; }
12,128
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
qemu_irq *mcf_intc_init(MemoryRegion *sysmem, target_phys_addr_t base, CPUM68KState *env) { mcf_intc_state *s; s = g_malloc0(sizeof(mcf_intc_state)); s->env = env; mcf_intc_reset(s); memory_region_init_io(&s->iomem, &mcf_intc_ops, s, "mcf", 0x100); memory_region_add_subregion(sysmem, base, &s->iomem); return qemu_allocate_irqs(mcf_intc_set_irq, s, 64); }
12,129
qemu
4be746345f13e99e468c60acbd3a355e8183e3ce
0
SDState *sd_init(BlockDriverState *bs, bool is_spi) { SDState *sd; if (bs && bdrv_is_read_only(bs)) { fprintf(stderr, "sd_init: Cannot use read-only drive\n"); return NULL; } sd = (SDState *) g_malloc0(sizeof(SDState)); sd->buf = qemu_blockalign(bs, 512); sd->spi = is_spi; sd->enable = true; sd_reset(sd, bs); if (sd->bdrv) { bdrv_attach_dev_nofail(sd->bdrv, sd); bdrv_set_dev_ops(sd->bdrv, &sd_block_ops, sd); } vmstate_register(NULL, -1, &sd_vmstate, sd); return sd; }
12,131
qemu
57407ea44cc0a3d630b9b89a2be011f1955ce5c1
0
static void isa_ne2000_cleanup(NetClientState *nc) { NE2000State *s = qemu_get_nic_opaque(nc); s->nic = NULL; }
12,135
qemu
20674449453924ac808a82619456f68bb14df007
0
static int configure_accelerator(MachineClass *mc) { const char *p; char buf[10]; int i, ret; bool accel_initialised = false; bool init_failed = false; p = qemu_opt_get(qemu_get_machine_opts(), "accel"); if (p == NULL) { /* Use the default "accelerator", tcg */ p = "tcg"; } while (!accel_initialised && *p != '\0') { if (*p == ':') { p++; } p = get_opt_name(buf, sizeof (buf), p, ':'); for (i = 0; i < ARRAY_SIZE(accel_list); i++) { if (strcmp(accel_list[i].opt_name, buf) == 0) { if (!accel_list[i].available()) { printf("%s not supported for this target\n", accel_list[i].name); break; } *(accel_list[i].allowed) = true; ret = accel_list[i].init(mc); if (ret < 0) { init_failed = true; fprintf(stderr, "failed to initialize %s: %s\n", accel_list[i].name, strerror(-ret)); *(accel_list[i].allowed) = false; } else { accel_initialised = true; } break; } } if (i == ARRAY_SIZE(accel_list)) { fprintf(stderr, "\"%s\" accelerator does not exist.\n", buf); } } if (!accel_initialised) { if (!init_failed) { fprintf(stderr, "No accelerator found!\n"); } exit(1); } if (init_failed) { fprintf(stderr, "Back to %s accelerator.\n", accel_list[i].name); } return !accel_initialised; }
12,136
qemu
4ffb17f5c3244e405198ae285ffbb20a62e0d4b3
0
VirtIODevice *virtio_net_init(DeviceState *dev) { VirtIONet *n; static int virtio_net_id; n = (VirtIONet *)virtio_common_init("virtio-net", VIRTIO_ID_NET, sizeof(struct virtio_net_config), sizeof(VirtIONet)); n->vdev.get_config = virtio_net_get_config; n->vdev.set_config = virtio_net_set_config; n->vdev.get_features = virtio_net_get_features; n->vdev.set_features = virtio_net_set_features; n->vdev.bad_features = virtio_net_bad_features; n->vdev.reset = virtio_net_reset; n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx); n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx); n->ctrl_vq = virtio_add_queue(&n->vdev, 16, virtio_net_handle_ctrl); qdev_get_macaddr(dev, n->mac); n->status = VIRTIO_NET_S_LINK_UP; n->vc = qdev_get_vlan_client(dev, virtio_net_can_receive, virtio_net_receive, NULL, virtio_net_cleanup, n); n->vc->link_status_changed = virtio_net_set_link_status; qemu_format_nic_info_str(n->vc, n->mac); n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n); n->tx_timer_active = 0; n->mergeable_rx_bufs = 0; n->promisc = 1; /* for compatibility */ n->mac_table.macs = qemu_mallocz(MAC_TABLE_ENTRIES * ETH_ALEN); n->vlans = qemu_mallocz(MAX_VLAN >> 3); register_savevm("virtio-net", virtio_net_id++, VIRTIO_NET_VM_VERSION, virtio_net_save, virtio_net_load, n); return &n->vdev; }
12,138
qemu
922f893e57da24bc80db3e79bea56485d1c111fa
0
static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis, int slot) { AHCIDevice *ad = &s->dev[port]; IDEState *ide_state = &ad->port.ifs[0]; NCQFrame *ncq_fis = (NCQFrame*)cmd_fis; uint8_t tag = ncq_fis->tag >> 3; NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag]; size_t size; if (ncq_tfs->used) { /* error - already in use */ fprintf(stderr, "%s: tag %d already used\n", __FUNCTION__, tag); return; } ncq_tfs->used = 1; ncq_tfs->drive = ad; ncq_tfs->slot = slot; ncq_tfs->cmd = ncq_fis->command; ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) | ((uint64_t)ncq_fis->lba4 << 32) | ((uint64_t)ncq_fis->lba3 << 24) | ((uint64_t)ncq_fis->lba2 << 16) | ((uint64_t)ncq_fis->lba1 << 8) | (uint64_t)ncq_fis->lba0; ncq_tfs->tag = tag; /* Sanity-check the NCQ packet */ if (tag != slot) { DPRINTF(port, "Warn: NCQ slot (%d) did not match the given tag (%d)\n", slot, tag); } if (ncq_fis->aux0 || ncq_fis->aux1 || ncq_fis->aux2 || ncq_fis->aux3) { DPRINTF(port, "Warn: Attempt to use NCQ auxiliary fields.\n"); } if (ncq_fis->prio || ncq_fis->icc) { DPRINTF(port, "Warn: Unsupported attempt to use PRIO/ICC fields\n"); } if (ncq_fis->fua & NCQ_FIS_FUA_MASK) { DPRINTF(port, "Warn: Unsupported attempt to use Force Unit Access\n"); } if (ncq_fis->tag & NCQ_FIS_RARC_MASK) { DPRINTF(port, "Warn: Unsupported attempt to use Rebuild Assist\n"); } ncq_tfs->sector_count = ((uint16_t)ncq_fis->sector_count_high << 8) | ncq_fis->sector_count_low; size = ncq_tfs->sector_count * 512; ahci_populate_sglist(ad, &ncq_tfs->sglist, size, 0); if (ncq_tfs->sglist.size < size) { error_report("ahci: PRDT length for NCQ command (0x%zx) " "is smaller than the requested size (0x%zx)", ncq_tfs->sglist.size, size); qemu_sglist_destroy(&ncq_tfs->sglist); ncq_err(ncq_tfs); ahci_trigger_irq(ad->hba, ad, PORT_IRQ_OVERFLOW); return; } else if (ncq_tfs->sglist.size != size) { DPRINTF(port, "Warn: PRDTL (0x%zx)" " does not match requested size (0x%zx)", ncq_tfs->sglist.size, size); } DPRINTF(port, "NCQ transfer LBA from %"PRId64" to %"PRId64", " "drive max %"PRId64"\n", ncq_tfs->lba, ncq_tfs->lba + ncq_tfs->sector_count - 1, ide_state->nb_sectors - 1); switch (ncq_tfs->cmd) { case READ_FPDMA_QUEUED: DPRINTF(port, "NCQ reading %d sectors from LBA %"PRId64", " "tag %d\n", ncq_tfs->sector_count, ncq_tfs->lba, ncq_tfs->tag); DPRINTF(port, "tag %d aio read %"PRId64"\n", ncq_tfs->tag, ncq_tfs->lba); dma_acct_start(ide_state->blk, &ncq_tfs->acct, &ncq_tfs->sglist, BLOCK_ACCT_READ); ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist, ncq_tfs->lba, ncq_cb, ncq_tfs); break; case WRITE_FPDMA_QUEUED: DPRINTF(port, "NCQ writing %d sectors to LBA %"PRId64", tag %d\n", ncq_tfs->sector_count, ncq_tfs->lba, ncq_tfs->tag); DPRINTF(port, "tag %d aio write %"PRId64"\n", ncq_tfs->tag, ncq_tfs->lba); dma_acct_start(ide_state->blk, &ncq_tfs->acct, &ncq_tfs->sglist, BLOCK_ACCT_WRITE); ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist, ncq_tfs->lba, ncq_cb, ncq_tfs); break; default: if (is_ncq(cmd_fis[2])) { DPRINTF(port, "error: unsupported NCQ command (0x%02x) received\n", cmd_fis[2]); } else { DPRINTF(port, "error: tried to process non-NCQ command as NCQ\n"); } qemu_sglist_destroy(&ncq_tfs->sglist); ncq_err(ncq_tfs); } }
12,140
qemu
149f54b53b7666a3facd45e86eece60ce7d3b114
0
static inline uint32_t lduw_phys_internal(hwaddr addr, enum device_endian endian) { uint8_t *ptr; uint64_t val; MemoryRegionSection *section; section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* I/O case */ addr = memory_region_section_addr(section, addr); val = io_mem_read(section->mr, addr, 2); #if defined(TARGET_WORDS_BIGENDIAN) if (endian == DEVICE_LITTLE_ENDIAN) { val = bswap16(val); } #else if (endian == DEVICE_BIG_ENDIAN) { val = bswap16(val); } #endif } else { /* RAM case */ ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr)); switch (endian) { case DEVICE_LITTLE_ENDIAN: val = lduw_le_p(ptr); break; case DEVICE_BIG_ENDIAN: val = lduw_be_p(ptr); break; default: val = lduw_p(ptr); break; } } return val; }
12,141
FFmpeg
a304def1dca50d63bf2a39651f84792980db3508
0
static void decode_block(BinkAudioContext *s, short *out, int use_dct) { int ch, i, j, k; float q, quant[25]; int width, coeff; GetBitContext *gb = &s->gb; if (use_dct) skip_bits(gb, 2); for (ch = 0; ch < s->channels; ch++) { FFTSample *coeffs = s->coeffs_ptr[ch]; q = 0.0f; coeffs[0] = get_float(gb) * s->root; coeffs[1] = get_float(gb) * s->root; for (i = 0; i < s->num_bands; i++) { /* constant is result of 0.066399999/log10(M_E) */ int value = get_bits(gb, 8); quant[i] = expf(FFMIN(value, 95) * 0.15289164787221953823f) * s->root; } // find band (k) for (k = 0; s->bands[k] < 1; k++) { q = quant[k]; } // parse coefficients i = 2; while (i < s->frame_len) { if (get_bits1(gb)) { j = i + rle_length_tab[get_bits(gb, 4)] * 8; } else { j = i + 8; } j = FFMIN(j, s->frame_len); width = get_bits(gb, 4); if (width == 0) { memset(coeffs + i, 0, (j - i) * sizeof(*coeffs)); i = j; while (s->bands[k] < i) q = quant[k++]; } else { while (i < j) { if (s->bands[k] == i) q = quant[k++]; coeff = get_bits(gb, width); if (coeff) { if (get_bits1(gb)) coeffs[i] = -q * coeff; else coeffs[i] = q * coeff; } else { coeffs[i] = 0.0f; } i++; } } } if (CONFIG_BINKAUDIO_DCT_DECODER && use_dct) { coeffs[0] /= 0.5; ff_dct_calc (&s->trans.dct, coeffs); s->dsp.vector_fmul_scalar(coeffs, coeffs, s->frame_len / 2, s->frame_len); } else if (CONFIG_BINKAUDIO_RDFT_DECODER) ff_rdft_calc(&s->trans.rdft, coeffs); } s->fmt_conv.float_to_int16_interleave(out, (const float **)s->coeffs_ptr, s->frame_len, s->channels); if (!s->first) { int count = s->overlap_len * s->channels; int shift = av_log2(count); for (i = 0; i < count; i++) { out[i] = (s->previous[i] * (count - i) + out[i] * i) >> shift; } } memcpy(s->previous, out + s->block_size, s->overlap_len * s->channels * sizeof(*out)); s->first = 0; }
12,143
qemu
0ae18ceeaaa2c1749e742c4b112f6c3bf0896408
0
static void sun4c_hw_init(const struct sun4c_hwdef *hwdef, ram_addr_t RAM_size, const char *boot_device, DisplayState *ds, const char *kernel_filename, const char *kernel_cmdline, const char *initrd_filename, const char *cpu_model) { CPUState *env; unsigned int i; void *iommu, *espdma, *ledma, *main_esp, *nvram; qemu_irq *cpu_irqs, *slavio_irq, *espdma_irq, *ledma_irq; qemu_irq *esp_reset, *le_reset; qemu_irq *fdc_tc; ram_addr_t ram_offset, prom_offset, tcx_offset; unsigned long kernel_size; int ret; char buf[1024]; BlockDriverState *fd[MAX_FD]; int drive_index; void *fw_cfg; /* init CPU */ if (!cpu_model) cpu_model = hwdef->default_cpu_model; env = cpu_init(cpu_model); if (!env) { fprintf(stderr, "qemu: Unable to find Sparc CPU definition\n"); exit(1); } cpu_sparc_set_id(env, 0); qemu_register_reset(main_cpu_reset, env); cpu_irqs = qemu_allocate_irqs(cpu_set_irq, env, MAX_PILS); env->prom_addr = hwdef->slavio_base; /* allocate RAM */ if ((uint64_t)RAM_size > hwdef->max_mem) { fprintf(stderr, "qemu: Too much memory for this machine: %d, maximum %d\n", (unsigned int)(RAM_size / (1024 * 1024)), (unsigned int)(hwdef->max_mem / (1024 * 1024))); exit(1); } ram_offset = qemu_ram_alloc(RAM_size); cpu_register_physical_memory(0, RAM_size, ram_offset); /* load boot prom */ prom_offset = qemu_ram_alloc(PROM_SIZE_MAX); cpu_register_physical_memory(hwdef->slavio_base, (PROM_SIZE_MAX + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK, prom_offset | IO_MEM_ROM); if (bios_name == NULL) bios_name = PROM_FILENAME; snprintf(buf, sizeof(buf), "%s/%s", bios_dir, bios_name); ret = load_elf(buf, hwdef->slavio_base - PROM_VADDR, NULL, NULL, NULL); if (ret < 0 || ret > PROM_SIZE_MAX) ret = load_image_targphys(buf, hwdef->slavio_base, PROM_SIZE_MAX); if (ret < 0 || ret > PROM_SIZE_MAX) { fprintf(stderr, "qemu: could not load prom '%s'\n", buf); exit(1); } /* set up devices */ slavio_intctl = sun4c_intctl_init(hwdef->intctl_base, &slavio_irq, cpu_irqs); iommu = iommu_init(hwdef->iommu_base, hwdef->iommu_version, slavio_irq[hwdef->me_irq]); espdma = sparc32_dma_init(hwdef->dma_base, slavio_irq[hwdef->esp_irq], iommu, &espdma_irq, &esp_reset); ledma = sparc32_dma_init(hwdef->dma_base + 16ULL, slavio_irq[hwdef->le_irq], iommu, &ledma_irq, &le_reset); if (graphic_depth != 8 && graphic_depth != 24) { fprintf(stderr, "qemu: Unsupported depth: %d\n", graphic_depth); exit (1); } tcx_offset = qemu_ram_alloc(hwdef->vram_size); tcx_init(ds, hwdef->tcx_base, phys_ram_base + tcx_offset, tcx_offset, hwdef->vram_size, graphic_width, graphic_height, graphic_depth); if (nd_table[0].model == NULL) nd_table[0].model = "lance"; if (strcmp(nd_table[0].model, "lance") == 0) { lance_init(&nd_table[0], hwdef->le_base, ledma, *ledma_irq, le_reset); } else if (strcmp(nd_table[0].model, "?") == 0) { fprintf(stderr, "qemu: Supported NICs: lance\n"); exit (1); } else { fprintf(stderr, "qemu: Unsupported NIC: %s\n", nd_table[0].model); exit (1); } nvram = m48t59_init(slavio_irq[0], hwdef->nvram_base, 0, hwdef->nvram_size, 2); slavio_serial_ms_kbd_init(hwdef->ms_kb_base, slavio_irq[hwdef->ms_kb_irq], nographic, ESCC_CLOCK, 1); // Slavio TTYA (base+4, Linux ttyS0) is the first Qemu serial device // Slavio TTYB (base+0, Linux ttyS1) is the second Qemu serial device escc_init(hwdef->serial_base, slavio_irq[hwdef->ser_irq], serial_hds[0], serial_hds[1], ESCC_CLOCK, 1); slavio_misc = slavio_misc_init(0, 0, hwdef->aux1_base, 0, slavio_irq[hwdef->me_irq], NULL, &fdc_tc); if (hwdef->fd_base != (target_phys_addr_t)-1) { /* there is zero or one floppy drive */ memset(fd, 0, sizeof(fd)); drive_index = drive_get_index(IF_FLOPPY, 0, 0); if (drive_index != -1) fd[0] = drives_table[drive_index].bdrv; sun4m_fdctrl_init(slavio_irq[hwdef->fd_irq], hwdef->fd_base, fd, fdc_tc); } if (drive_get_max_bus(IF_SCSI) > 0) { fprintf(stderr, "qemu: too many SCSI bus\n"); exit(1); } main_esp = esp_init(hwdef->esp_base, 2, espdma_memory_read, espdma_memory_write, espdma, *espdma_irq, esp_reset); for (i = 0; i < ESP_MAX_DEVS; i++) { drive_index = drive_get_index(IF_SCSI, 0, i); if (drive_index == -1) continue; esp_scsi_attach(main_esp, drives_table[drive_index].bdrv, i); } kernel_size = sun4m_load_kernel(kernel_filename, initrd_filename, RAM_size); nvram_init(nvram, (uint8_t *)&nd_table[0].macaddr, kernel_cmdline, boot_device, RAM_size, kernel_size, graphic_width, graphic_height, graphic_depth, hwdef->nvram_machine_id, "Sun4c"); fw_cfg = fw_cfg_init(0, 0, CFG_ADDR, CFG_ADDR + 2); fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1); fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id); }
12,144
qemu
bec1631100323fac0900aea71043d5c4e22fc2fa
0
static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index) { TCGLabel *l = &s->labels[label_index]; if (l->has_value) { tcg_out_goto(s, cond, l->u.value_ptr); } else { tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 0); tcg_out_b_noaddr(s, cond); } }
12,146
qemu
10ee2aaa417d8d8978cdb2bbed55ebb152df5f6b
0
static uint32_t nabm_readl (void *opaque, uint32_t addr) { PCIAC97LinkState *d = opaque; AC97LinkState *s = &d->ac97; AC97BusMasterRegs *r = NULL; uint32_t index = addr - s->base[1]; uint32_t val = ~0U; switch (index) { case PI_BDBAR: case PO_BDBAR: case MC_BDBAR: r = &s->bm_regs[GET_BM (index)]; val = r->bdbar; dolog ("BMADDR[%d] -> %#x\n", GET_BM (index), val); break; case PI_CIV: case PO_CIV: case MC_CIV: r = &s->bm_regs[GET_BM (index)]; val = r->civ | (r->lvi << 8) | (r->sr << 16); dolog ("CIV LVI SR[%d] -> %#x, %#x, %#x\n", GET_BM (index), r->civ, r->lvi, r->sr); break; case PI_PICB: case PO_PICB: case MC_PICB: r = &s->bm_regs[GET_BM (index)]; val = r->picb | (r->piv << 16) | (r->cr << 24); dolog ("PICB PIV CR[%d] -> %#x %#x %#x %#x\n", GET_BM (index), val, r->picb, r->piv, r->cr); break; case GLOB_CNT: val = s->glob_cnt; dolog ("glob_cnt -> %#x\n", val); break; case GLOB_STA: val = s->glob_sta | GS_S0CR; dolog ("glob_sta -> %#x\n", val); break; default: dolog ("U nabm readl %#x -> %#x\n", addr, val); break; } return val; }
12,147
qemu
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
0
static uint32_t dp8393x_readl(void *opaque, target_phys_addr_t addr) { uint32_t v; v = dp8393x_readw(opaque, addr); v |= dp8393x_readw(opaque, addr + 2) << 16; return v; }
12,149
qemu
08cb175a24d642a40e41db2fef2892b0a1ab504e
0
qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, bool isServer, const char *cacertFile, const char *certFile, Error **errp) { gnutls_x509_crt_t cert = NULL; gnutls_x509_crt_t cacerts[MAX_CERTS]; size_t ncacerts = 0; size_t i; int ret = -1; memset(cacerts, 0, sizeof(cacerts)); if (access(certFile, R_OK) == 0) { cert = qcrypto_tls_creds_load_cert(creds, certFile, isServer, errp); if (!cert) { goto cleanup; } } if (access(cacertFile, R_OK) == 0) { if (qcrypto_tls_creds_load_ca_cert_list(creds, cacertFile, cacerts, MAX_CERTS, &ncacerts, errp) < 0) { goto cleanup; } } if (cert && qcrypto_tls_creds_check_cert(creds, cert, certFile, isServer, false, errp) < 0) { goto cleanup; } for (i = 0; i < ncacerts; i++) { if (qcrypto_tls_creds_check_cert(creds, cacerts[i], cacertFile, isServer, true, errp) < 0) { goto cleanup; } } if (cert && ncacerts && qcrypto_tls_creds_check_cert_pair(cert, certFile, cacerts, ncacerts, cacertFile, isServer, errp) < 0) { goto cleanup; } ret = 0; cleanup: if (cert) { gnutls_x509_crt_deinit(cert); } for (i = 0; i < ncacerts; i++) { gnutls_x509_crt_deinit(cacerts[i]); } return ret; }
12,150
qemu
c4d9d19645a484298a67e9021060bc7c2b081d0f
0
static void qemu_aio_wait_all(void) { while (qemu_aio_wait()) { /* Do nothing */ } }
12,151
qemu
42a268c241183877192c376d03bd9b6d527407c7
0
static inline void gen_jcc1(DisasContext *s, int b, int l1) { CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]); gen_update_cc_op(s); if (cc.mask != -1) { tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask); cc.reg = cpu_T[0]; } set_cc_op(s, CC_OP_DYNAMIC); if (cc.use_reg2) { tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1); } else { tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1); } }
12,152
qemu
ccf1d14a1e37abe1f0da162c00a8941963b47a4c
0
static void rtl8139_do_receive(void *opaque, const uint8_t *buf, int size, int do_interrupt) { RTL8139State *s = opaque; uint32_t packet_header = 0; uint8_t buf1[60]; static const uint8_t broadcast_macaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; DEBUG_PRINT((">>> RTL8139: received len=%d\n", size)); /* test if board clock is stopped */ if (!s->clock_enabled) { DEBUG_PRINT(("RTL8139: stopped ==========================\n")); return; } /* first check if receiver is enabled */ if (!rtl8139_receiver_enabled(s)) { DEBUG_PRINT(("RTL8139: receiver disabled ================\n")); return; } /* XXX: check this */ if (s->RxConfig & AcceptAllPhys) { /* promiscuous: receive all */ DEBUG_PRINT((">>> RTL8139: packet received in promiscuous mode\n")); } else { if (!memcmp(buf, broadcast_macaddr, 6)) { /* broadcast address */ if (!(s->RxConfig & AcceptBroadcast)) { DEBUG_PRINT((">>> RTL8139: broadcast packet rejected\n")); /* update tally counter */ ++s->tally_counters.RxERR; return; } packet_header |= RxBroadcast; DEBUG_PRINT((">>> RTL8139: broadcast packet received\n")); /* update tally counter */ ++s->tally_counters.RxOkBrd; } else if (buf[0] & 0x01) { /* multicast */ if (!(s->RxConfig & AcceptMulticast)) { DEBUG_PRINT((">>> RTL8139: multicast packet rejected\n")); /* update tally counter */ ++s->tally_counters.RxERR; return; } int mcast_idx = compute_mcast_idx(buf); if (!(s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7)))) { DEBUG_PRINT((">>> RTL8139: multicast address mismatch\n")); /* update tally counter */ ++s->tally_counters.RxERR; return; } packet_header |= RxMulticast; DEBUG_PRINT((">>> RTL8139: multicast packet received\n")); /* update tally counter */ ++s->tally_counters.RxOkMul; } else if (s->phys[0] == buf[0] && s->phys[1] == buf[1] && s->phys[2] == buf[2] && s->phys[3] == buf[3] && s->phys[4] == buf[4] && s->phys[5] == buf[5]) { /* match */ if (!(s->RxConfig & AcceptMyPhys)) { DEBUG_PRINT((">>> RTL8139: rejecting physical address matching packet\n")); /* update tally counter */ ++s->tally_counters.RxERR; return; } packet_header |= RxPhysical; DEBUG_PRINT((">>> RTL8139: physical address matching packet received\n")); /* update tally counter */ ++s->tally_counters.RxOkPhy; } else { DEBUG_PRINT((">>> RTL8139: unknown packet\n")); /* update tally counter */ ++s->tally_counters.RxERR; return; } } /* if too small buffer, then expand it */ if (size < MIN_BUF_SIZE) { memcpy(buf1, buf, size); memset(buf1 + size, 0, MIN_BUF_SIZE - size); buf = buf1; size = MIN_BUF_SIZE; } if (rtl8139_cp_receiver_enabled(s)) { DEBUG_PRINT(("RTL8139: in C+ Rx mode ================\n")); /* begin C+ receiver mode */ /* w0 ownership flag */ #define CP_RX_OWN (1<<31) /* w0 end of ring flag */ #define CP_RX_EOR (1<<30) /* w0 bits 0...12 : buffer size */ #define CP_RX_BUFFER_SIZE_MASK ((1<<13) - 1) /* w1 tag available flag */ #define CP_RX_TAVA (1<<16) /* w1 bits 0...15 : VLAN tag */ #define CP_RX_VLAN_TAG_MASK ((1<<16) - 1) /* w2 low 32bit of Rx buffer ptr */ /* w3 high 32bit of Rx buffer ptr */ int descriptor = s->currCPlusRxDesc; target_phys_addr_t cplus_rx_ring_desc; cplus_rx_ring_desc = rtl8139_addr64(s->RxRingAddrLO, s->RxRingAddrHI); cplus_rx_ring_desc += 16 * descriptor; DEBUG_PRINT(("RTL8139: +++ C+ mode reading RX descriptor %d from host memory at %08x %08x = %016" PRIx64 "\n", descriptor, s->RxRingAddrHI, s->RxRingAddrLO, (uint64_t)cplus_rx_ring_desc)); uint32_t val, rxdw0,rxdw1,rxbufLO,rxbufHI; cpu_physical_memory_read(cplus_rx_ring_desc, (uint8_t *)&val, 4); rxdw0 = le32_to_cpu(val); cpu_physical_memory_read(cplus_rx_ring_desc+4, (uint8_t *)&val, 4); rxdw1 = le32_to_cpu(val); cpu_physical_memory_read(cplus_rx_ring_desc+8, (uint8_t *)&val, 4); rxbufLO = le32_to_cpu(val); cpu_physical_memory_read(cplus_rx_ring_desc+12, (uint8_t *)&val, 4); rxbufHI = le32_to_cpu(val); DEBUG_PRINT(("RTL8139: +++ C+ mode RX descriptor %d %08x %08x %08x %08x\n", descriptor, rxdw0, rxdw1, rxbufLO, rxbufHI)); if (!(rxdw0 & CP_RX_OWN)) { DEBUG_PRINT(("RTL8139: C+ Rx mode : descriptor %d is owned by host\n", descriptor)); s->IntrStatus |= RxOverflow; ++s->RxMissed; /* update tally counter */ ++s->tally_counters.RxERR; ++s->tally_counters.MissPkt; rtl8139_update_irq(s); return; } uint32_t rx_space = rxdw0 & CP_RX_BUFFER_SIZE_MASK; /* TODO: scatter the packet over available receive ring descriptors space */ if (size+4 > rx_space) { DEBUG_PRINT(("RTL8139: C+ Rx mode : descriptor %d size %d received %d + 4\n", descriptor, rx_space, size)); s->IntrStatus |= RxOverflow; ++s->RxMissed; /* update tally counter */ ++s->tally_counters.RxERR; ++s->tally_counters.MissPkt; rtl8139_update_irq(s); return; } target_phys_addr_t rx_addr = rtl8139_addr64(rxbufLO, rxbufHI); /* receive/copy to target memory */ cpu_physical_memory_write( rx_addr, buf, size ); if (s->CpCmd & CPlusRxChkSum) { /* do some packet checksumming */ } /* write checksum */ #if defined (RTL8139_CALCULATE_RXCRC) val = cpu_to_le32(crc32(~0, buf, size)); #else val = 0; #endif cpu_physical_memory_write( rx_addr+size, (uint8_t *)&val, 4); /* first segment of received packet flag */ #define CP_RX_STATUS_FS (1<<29) /* last segment of received packet flag */ #define CP_RX_STATUS_LS (1<<28) /* multicast packet flag */ #define CP_RX_STATUS_MAR (1<<26) /* physical-matching packet flag */ #define CP_RX_STATUS_PAM (1<<25) /* broadcast packet flag */ #define CP_RX_STATUS_BAR (1<<24) /* runt packet flag */ #define CP_RX_STATUS_RUNT (1<<19) /* crc error flag */ #define CP_RX_STATUS_CRC (1<<18) /* IP checksum error flag */ #define CP_RX_STATUS_IPF (1<<15) /* UDP checksum error flag */ #define CP_RX_STATUS_UDPF (1<<14) /* TCP checksum error flag */ #define CP_RX_STATUS_TCPF (1<<13) /* transfer ownership to target */ rxdw0 &= ~CP_RX_OWN; /* set first segment bit */ rxdw0 |= CP_RX_STATUS_FS; /* set last segment bit */ rxdw0 |= CP_RX_STATUS_LS; /* set received packet type flags */ if (packet_header & RxBroadcast) rxdw0 |= CP_RX_STATUS_BAR; if (packet_header & RxMulticast) rxdw0 |= CP_RX_STATUS_MAR; if (packet_header & RxPhysical) rxdw0 |= CP_RX_STATUS_PAM; /* set received size */ rxdw0 &= ~CP_RX_BUFFER_SIZE_MASK; rxdw0 |= (size+4); /* reset VLAN tag flag */ rxdw1 &= ~CP_RX_TAVA; /* update ring data */ val = cpu_to_le32(rxdw0); cpu_physical_memory_write(cplus_rx_ring_desc, (uint8_t *)&val, 4); val = cpu_to_le32(rxdw1); cpu_physical_memory_write(cplus_rx_ring_desc+4, (uint8_t *)&val, 4); /* update tally counter */ ++s->tally_counters.RxOk; /* seek to next Rx descriptor */ if (rxdw0 & CP_RX_EOR) { s->currCPlusRxDesc = 0; } else { ++s->currCPlusRxDesc; } DEBUG_PRINT(("RTL8139: done C+ Rx mode ----------------\n")); } else { DEBUG_PRINT(("RTL8139: in ring Rx mode ================\n")); /* begin ring receiver mode */ int avail = MOD2(s->RxBufferSize + s->RxBufPtr - s->RxBufAddr, s->RxBufferSize); /* if receiver buffer is empty then avail == 0 */ if (avail != 0 && size + 8 >= avail) { DEBUG_PRINT(("rx overflow: rx buffer length %d head 0x%04x read 0x%04x === available 0x%04x need 0x%04x\n", s->RxBufferSize, s->RxBufAddr, s->RxBufPtr, avail, size + 8)); s->IntrStatus |= RxOverflow; ++s->RxMissed; rtl8139_update_irq(s); return; } packet_header |= RxStatusOK; packet_header |= (((size+4) << 16) & 0xffff0000); /* write header */ uint32_t val = cpu_to_le32(packet_header); rtl8139_write_buffer(s, (uint8_t *)&val, 4); rtl8139_write_buffer(s, buf, size); /* write checksum */ #if defined (RTL8139_CALCULATE_RXCRC) val = cpu_to_le32(crc32(~0, buf, size)); #else val = 0; #endif rtl8139_write_buffer(s, (uint8_t *)&val, 4); /* correct buffer write pointer */ s->RxBufAddr = MOD2((s->RxBufAddr + 3) & ~0x3, s->RxBufferSize); /* now we can signal we have received something */ DEBUG_PRINT((" received: rx buffer length %d head 0x%04x read 0x%04x\n", s->RxBufferSize, s->RxBufAddr, s->RxBufPtr)); } s->IntrStatus |= RxOK; if (do_interrupt) { rtl8139_update_irq(s); } }
12,153
FFmpeg
22b37f5d3200cfe4c15eded883663cf0612093c1
0
static int img_read_packet(AVFormatContext *s1, AVPacket *pkt) { VideoData *s = s1->priv_data; char filename[1024]; int ret; ByteIOContext f1, *f; if (!s->is_pipe) { if (get_frame_filename(filename, sizeof(filename), s->path, s->img_number) < 0) return -EIO; f = &f1; if (url_fopen(f, filename, URL_RDONLY) < 0) return -EIO; } else { f = &s1->pb; if (url_feof(f)) return -EIO; } av_new_packet(pkt, s->img_size); pkt->stream_index = 0; s->ptr = pkt->data; ret = av_read_image(f, filename, s->img_fmt, read_packet_alloc_cb, s); if (!s->is_pipe) { url_fclose(f); } if (ret < 0) { av_free_packet(pkt); return -EIO; /* signal EOF */ } else { pkt->pts = av_rescale((int64_t)s->img_number * s1->streams[0]->codec.frame_rate_base, s1->pts_den, s1->streams[0]->codec.frame_rate) / s1->pts_num; s->img_number++; return 0; } }
12,154
qemu
0931304788ecac6c7871f570c7ac8407b54e30c6
1
int qemu_chr_fe_write_all(CharDriverState *s, const uint8_t *buf, int len) { int offset = 0; int res; qemu_mutex_lock(&s->chr_write_lock); while (offset < len) { do { res = s->chr_write(s, buf + offset, len - offset); if (res == -1 && errno == EAGAIN) { g_usleep(100); } } while (res == -1 && errno == EAGAIN); if (res <= 0) { break; } offset += res; } qemu_mutex_unlock(&s->chr_write_lock); if (res < 0) { return res; } return offset; }
12,155
qemu
0b8b8753e4d94901627b3e86431230f2319215c4
1
static void blkreplay_bh_cb(void *opaque) { Request *req = opaque; qemu_coroutine_enter(req->co, NULL); qemu_bh_delete(req->bh); g_free(req); }
12,156
FFmpeg
6706a2986c48e3f20f1274b24345e6555d8f0f48
1
static void store_slice_c(uint8_t *dst, const uint16_t *src, int dst_linesize, int src_linesize, int width, int height, int log2_scale, const uint8_t dither[8][8]) { int y, x; #define STORE(pos) do { \ temp = ((src[x + y*src_linesize + pos] << log2_scale) + d[pos]) >> 6; \ if (temp & 0x100) \ temp = ~(temp >> 31); \ dst[x + y*dst_linesize + pos] = temp; \ } while (0) for (y = 0; y < height; y++) { const uint8_t *d = dither[y]; for (x = 0; x < width; x += 8) { int temp; STORE(0); STORE(1); STORE(2); STORE(3); STORE(4); STORE(5); STORE(6); STORE(7); } } }
12,157
qemu
a9fc37f6bc3f2ab90585cb16493da9f6dcfbfbcf
1
static QObject *qobject_input_get_object(QObjectInputVisitor *qiv, const char *name, bool consume, Error **errp) { StackObject *tos; QObject *qobj; QObject *ret; if (QSLIST_EMPTY(&qiv->stack)) { /* Starting at root, name is ignored. */ assert(qiv->root); return qiv->root; } /* We are in a container; find the next element. */ tos = QSLIST_FIRST(&qiv->stack); qobj = tos->obj; assert(qobj); if (qobject_type(qobj) == QTYPE_QDICT) { assert(name); ret = qdict_get(qobject_to_qdict(qobj), name); if (tos->h && consume && ret) { bool removed = g_hash_table_remove(tos->h, name); assert(removed); } if (!ret) { error_setg(errp, QERR_MISSING_PARAMETER, name); } } else { assert(qobject_type(qobj) == QTYPE_QLIST); assert(!name); ret = qlist_entry_obj(tos->entry); assert(ret); if (consume) { tos->entry = qlist_next(tos->entry); } } return ret; }
12,158
qemu
47d4be12c3997343e436c6cca89aefbbbeb70863
1
static void test_qemu_strtoll_full_empty(void) { const char *str = ""; int64_t res = 999; int err; err = qemu_strtoll(str, NULL, 0, &res); g_assert_cmpint(err, ==, 0); g_assert_cmpint(res, ==, 0); }
12,159
qemu
38c4d0aea3e1264c86e282d99560330adf2b6e25
1
static int64_t try_fiemap(BlockDriverState *bs, off_t start, off_t *data, off_t *hole, int nb_sectors, int *pnum) { #ifdef CONFIG_FIEMAP BDRVRawState *s = bs->opaque; int64_t ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | start; struct { struct fiemap fm; struct fiemap_extent fe; } f; if (s->skip_fiemap) { return -ENOTSUP; } f.fm.fm_start = start; f.fm.fm_length = (int64_t)nb_sectors * BDRV_SECTOR_SIZE; f.fm.fm_flags = 0; f.fm.fm_extent_count = 1; f.fm.fm_reserved = 0; if (ioctl(s->fd, FS_IOC_FIEMAP, &f) == -1) { s->skip_fiemap = true; return -errno; } if (f.fm.fm_mapped_extents == 0) { /* No extents found, data is beyond f.fm.fm_start + f.fm.fm_length. * f.fm.fm_start + f.fm.fm_length must be clamped to the file size! */ off_t length = lseek(s->fd, 0, SEEK_END); *hole = f.fm.fm_start; *data = MIN(f.fm.fm_start + f.fm.fm_length, length); } else { *data = f.fe.fe_logical; *hole = f.fe.fe_logical + f.fe.fe_length; if (f.fe.fe_flags & FIEMAP_EXTENT_UNWRITTEN) { ret |= BDRV_BLOCK_ZERO; } } return ret; #else return -ENOTSUP; #endif }
12,160
qemu
60ff4e63e2ea4738f114cbaf1f17e6e0184fc09c
1
static void calxeda_init(MachineState *machine, enum cxmachines machine_id) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; DeviceState *dev = NULL; SysBusDevice *busdev; qemu_irq pic[128]; int n; qemu_irq cpu_irq[4]; MemoryRegion *sysram; MemoryRegion *dram; MemoryRegion *sysmem; char *sysboot_filename; if (!cpu_model) { switch (machine_id) { case CALXEDA_HIGHBANK: cpu_model = "cortex-a9"; break; case CALXEDA_MIDWAY: cpu_model = "cortex-a15"; break; } } for (n = 0; n < smp_cpus; n++) { ObjectClass *oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model); Object *cpuobj; ARMCPU *cpu; Error *err = NULL; if (!oc) { error_report("Unable to find CPU definition"); exit(1); } cpuobj = object_new(object_class_get_name(oc)); cpu = ARM_CPU(cpuobj); /* By default A9 and A15 CPUs have EL3 enabled. This board does not * currently support EL3 so the CPU EL3 property is disabled before * realization. */ if (object_property_find(cpuobj, "has_el3", NULL)) { object_property_set_bool(cpuobj, false, "has_el3", &err); if (err) { error_report_err(err); exit(1); } } if (object_property_find(cpuobj, "reset-cbar", NULL)) { object_property_set_int(cpuobj, MPCORE_PERIPHBASE, "reset-cbar", &error_abort); } object_property_set_bool(cpuobj, true, "realized", &err); if (err) { error_report_err(err); exit(1); } cpu_irq[n] = qdev_get_gpio_in(DEVICE(cpu), ARM_CPU_IRQ); } sysmem = get_system_memory(); dram = g_new(MemoryRegion, 1); memory_region_init_ram(dram, NULL, "highbank.dram", ram_size, &error_abort); /* SDRAM at address zero. */ memory_region_add_subregion(sysmem, 0, dram); sysram = g_new(MemoryRegion, 1); memory_region_init_ram(sysram, NULL, "highbank.sysram", 0x8000, &error_abort); memory_region_add_subregion(sysmem, 0xfff88000, sysram); if (bios_name != NULL) { sysboot_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (sysboot_filename != NULL) { uint32_t filesize = get_image_size(sysboot_filename); if (load_image_targphys("sysram.bin", 0xfff88000, filesize) < 0) { hw_error("Unable to load %s\n", bios_name); } g_free(sysboot_filename); } else { hw_error("Unable to find %s\n", bios_name); } } switch (machine_id) { case CALXEDA_HIGHBANK: dev = qdev_create(NULL, "l2x0"); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, 0xfff12000); dev = qdev_create(NULL, "a9mpcore_priv"); break; case CALXEDA_MIDWAY: dev = qdev_create(NULL, "a15mpcore_priv"); break; } qdev_prop_set_uint32(dev, "num-cpu", smp_cpus); qdev_prop_set_uint32(dev, "num-irq", NIRQ_GIC); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, MPCORE_PERIPHBASE); for (n = 0; n < smp_cpus; n++) { sysbus_connect_irq(busdev, n, cpu_irq[n]); } for (n = 0; n < 128; n++) { pic[n] = qdev_get_gpio_in(dev, n); } dev = qdev_create(NULL, "sp804"); qdev_prop_set_uint32(dev, "freq0", 150000000); qdev_prop_set_uint32(dev, "freq1", 150000000); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, 0xfff34000); sysbus_connect_irq(busdev, 0, pic[18]); sysbus_create_simple("pl011", 0xfff36000, pic[20]); dev = qdev_create(NULL, "highbank-regs"); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, 0xfff3c000); sysbus_create_simple("pl061", 0xfff30000, pic[14]); sysbus_create_simple("pl061", 0xfff31000, pic[15]); sysbus_create_simple("pl061", 0xfff32000, pic[16]); sysbus_create_simple("pl061", 0xfff33000, pic[17]); sysbus_create_simple("pl031", 0xfff35000, pic[19]); sysbus_create_simple("pl022", 0xfff39000, pic[23]); sysbus_create_simple("sysbus-ahci", 0xffe08000, pic[83]); if (nd_table[0].used) { qemu_check_nic_model(&nd_table[0], "xgmac"); dev = qdev_create(NULL, "xgmac"); qdev_set_nic_properties(dev, &nd_table[0]); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xfff50000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[77]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, pic[78]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, pic[79]); qemu_check_nic_model(&nd_table[1], "xgmac"); dev = qdev_create(NULL, "xgmac"); qdev_set_nic_properties(dev, &nd_table[1]); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xfff51000); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[80]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, pic[81]); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 2, pic[82]); } highbank_binfo.ram_size = ram_size; highbank_binfo.kernel_filename = kernel_filename; highbank_binfo.kernel_cmdline = kernel_cmdline; highbank_binfo.initrd_filename = initrd_filename; /* highbank requires a dtb in order to boot, and the dtb will override * the board ID. The following value is ignored, so set it to -1 to be * clear that the value is meaningless. */ highbank_binfo.board_id = -1; highbank_binfo.nb_cpus = smp_cpus; highbank_binfo.loader_start = 0; highbank_binfo.write_secondary_boot = hb_write_secondary; highbank_binfo.secondary_cpu_reset_hook = hb_reset_secondary; arm_load_kernel(ARM_CPU(first_cpu), &highbank_binfo); }
12,161
qemu
0a852417564bc59441dc09509beacf7b49bc1e57
1
bool trace_init_backends(void) { #ifdef CONFIG_TRACE_SIMPLE if (!st_init()) { fprintf(stderr, "failed to initialize simple tracing backend.\n"); return false; } #ifdef CONFIG_TRACE_FTRACE if (!ftrace_init()) { fprintf(stderr, "failed to initialize ftrace backend.\n"); return false; } return true; }
12,162
qemu
ac369a77967d5dd984a5430505eaf24a380af1c0
1
static inline void acpi_build_tables_cleanup(AcpiBuildTables *tables, bool mfre) { void *linker_data = bios_linker_loader_cleanup(tables->linker); if (mfre) { g_free(linker_data); } g_array_free(tables->rsdp, mfre); g_array_free(tables->table_data, mfre); g_array_free(tables->tcpalog, mfre); }
12,163
qemu
3604a76fea6ff37738d4a8f596be38407be74a83
1
static void dec_mul(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("muli r%d, r%d, %d\n", dc->r0, dc->r1, sign_extend(dc->imm16, 16)); } else { LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (!(dc->env->features & LM32_FEATURE_MULTIPLY)) { cpu_abort(dc->env, "hardware multiplier is not available\n"); } if (dc->format == OP_FMT_RI) { tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0], sign_extend(dc->imm16, 16)); } else { tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]); } }
12,164
qemu
c572f23a3e7180dbeab5e86583e43ea2afed6271
1
static void v9fs_clunk(void *opaque) { int err; int32_t fid; size_t offset = 7; V9fsFidState *fidp; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; pdu_unmarshal(pdu, offset, "d", &fid); fidp = clunk_fid(s, fid); if (fidp == NULL) { err = -ENOENT; goto out_nofid; } /* * Bump the ref so that put_fid will * free the fid. */ fidp->ref++; err = offset; put_fid(pdu, fidp); out_nofid: complete_pdu(s, pdu, err); }
12,165
FFmpeg
7bf3f380466eeff24916fd6218aca13e414c6240
1
static int cbs_mpeg2_read_unit(CodedBitstreamContext *ctx, CodedBitstreamUnit *unit) { BitstreamContext bc; int err; err = bitstream_init(&bc, unit->data, 8 * unit->data_size); if (err < 0) return err; if (MPEG2_START_IS_SLICE(unit->type)) { MPEG2RawSlice *slice; int pos, len; slice = av_mallocz(sizeof(*slice)); if (!slice) return AVERROR(ENOMEM); err = cbs_mpeg2_read_slice_header(ctx, &bc, &slice->header); if (err < 0) { av_free(slice); return err; } pos = bitstream_tell(&bc); len = unit->data_size; slice->data_size = len - pos / 8; slice->data = av_malloc(slice->data_size); if (!slice->data) { av_free(slice); return AVERROR(ENOMEM); } memcpy(slice->data, unit->data + pos / 8, slice->data_size); slice->data_bit_start = pos % 8; unit->content = slice; } else { switch (unit->type) { #define START(start_code, type, func) \ case start_code: \ { \ type *header; \ header = av_mallocz(sizeof(*header)); \ if (!header) \ return AVERROR(ENOMEM); \ err = cbs_mpeg2_read_ ## func(ctx, &bc, header); \ if (err < 0) { \ av_free(header); \ return err; \ } \ unit->content = header; \ } \ break; START(0x00, MPEG2RawPictureHeader, picture_header); START(0xb2, MPEG2RawUserData, user_data); START(0xb3, MPEG2RawSequenceHeader, sequence_header); START(0xb5, MPEG2RawExtensionData, extension_data); START(0xb8, MPEG2RawGroupOfPicturesHeader, group_of_pictures_header); #undef START default: av_log(ctx->log_ctx, AV_LOG_ERROR, "Unknown start code %02x.\n", unit->type); return AVERROR_INVALIDDATA; } } return 0; }
12,167
FFmpeg
4c160fa23996c05efcd952ccfac2359311d8a1bc
1
static int scan_for_extensions(AVCodecContext *avctx) { DCAContext *s = avctx->priv_data; int core_ss_end, ret; core_ss_end = FFMIN(s->frame_size, s->dca_buffer_size) * 8; /* only scan for extensions if ext_descr was unknown or indicated a * supported XCh extension */ if (s->core_ext_mask < 0 || s->core_ext_mask & DCA_EXT_XCH) { /* if ext_descr was unknown, clear s->core_ext_mask so that the * extensions scan can fill it up */ s->core_ext_mask = FFMAX(s->core_ext_mask, 0); /* extensions start at 32-bit boundaries into bitstream */ skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); while (core_ss_end - get_bits_count(&s->gb) >= 32) { uint32_t bits = get_bits_long(&s->gb, 32); int i; switch (bits) { case DCA_SYNCWORD_XCH: { int ext_amode, xch_fsize; s->xch_base_channel = s->prim_channels; /* validate sync word using XCHFSIZE field */ xch_fsize = show_bits(&s->gb, 10); if ((s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize) && (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + xch_fsize + 1)) continue; /* skip length-to-end-of-frame field for the moment */ skip_bits(&s->gb, 10); s->core_ext_mask |= DCA_EXT_XCH; /* extension amode(number of channels in extension) should be 1 */ /* AFAIK XCh is not used for more channels */ if ((ext_amode = get_bits(&s->gb, 4)) != 1) { av_log(avctx, AV_LOG_ERROR, "XCh extension amode %d not supported!\n", ext_amode); continue; } /* much like core primary audio coding header */ dca_parse_audio_coding_header(s, s->xch_base_channel); for (i = 0; i < (s->sample_blocks / 8); i++) if ((ret = dca_decode_block(s, s->xch_base_channel, i))) { av_log(avctx, AV_LOG_ERROR, "error decoding XCh extension\n"); continue; } s->xch_present = 1; break; } case DCA_SYNCWORD_XXCH: /* XXCh: extended channels */ /* usually found either in core or HD part in DTS-HD HRA streams, * but not in DTS-ES which contains XCh extensions instead */ s->core_ext_mask |= DCA_EXT_XXCH; break; case 0x1d95f262: { int fsize96 = show_bits(&s->gb, 12) + 1; if (s->frame_size != (get_bits_count(&s->gb) >> 3) - 4 + fsize96) continue; av_log(avctx, AV_LOG_DEBUG, "X96 extension found at %d bits\n", get_bits_count(&s->gb)); skip_bits(&s->gb, 12); av_log(avctx, AV_LOG_DEBUG, "FSIZE96 = %d bytes\n", fsize96); av_log(avctx, AV_LOG_DEBUG, "REVNO = %d\n", get_bits(&s->gb, 4)); s->core_ext_mask |= DCA_EXT_X96; break; } } skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31); } } else { /* no supported extensions, skip the rest of the core substream */ skip_bits_long(&s->gb, core_ss_end - get_bits_count(&s->gb)); } if (s->core_ext_mask & DCA_EXT_X96) s->profile = FF_PROFILE_DTS_96_24; else if (s->core_ext_mask & (DCA_EXT_XCH | DCA_EXT_XXCH)) s->profile = FF_PROFILE_DTS_ES; /* check for ExSS (HD part) */ if (s->dca_buffer_size - s->frame_size > 32 && get_bits_long(&s->gb, 32) == DCA_SYNCWORD_SUBSTREAM) ff_dca_exss_parse_header(s); return ret; }
12,168
qemu
01f7cecf0037997cb0e58ec0d56bf9b5a6f7cb2a
1
udp_input(register struct mbuf *m, int iphlen) { Slirp *slirp = m->slirp; register struct ip *ip; register struct udphdr *uh; int len; struct ip save_ip; struct socket *so; DEBUG_CALL("udp_input"); DEBUG_ARG("m = %lx", (long)m); DEBUG_ARG("iphlen = %d", iphlen); /* * Strip IP options, if any; should skip this, * make available to user, and use on returned packets, * but we don't yet have a way to check the checksum * with options still present. */ if(iphlen > sizeof(struct ip)) { ip_stripoptions(m, (struct mbuf *)0); iphlen = sizeof(struct ip); } /* * Get IP and UDP header together in first mbuf. */ ip = mtod(m, struct ip *); uh = (struct udphdr *)((caddr_t)ip + iphlen); /* * Make mbuf data length reflect UDP length. * If not enough data to reflect UDP length, drop. */ len = ntohs((uint16_t)uh->uh_ulen); if (ip->ip_len != len) { if (len > ip->ip_len) { goto bad; } m_adj(m, len - ip->ip_len); ip->ip_len = len; } /* * Save a copy of the IP header in case we want restore it * for sending an ICMP error message in response. */ save_ip = *ip; save_ip.ip_len+= iphlen; /* tcp_input subtracts this */ /* * Checksum extended UDP header and data. */ if (uh->uh_sum) { memset(&((struct ipovly *)ip)->ih_mbuf, 0, sizeof(struct mbuf_ptr)); ((struct ipovly *)ip)->ih_x1 = 0; ((struct ipovly *)ip)->ih_len = uh->uh_ulen; if(cksum(m, len + sizeof(struct ip))) { goto bad; } } /* * handle DHCP/BOOTP */ if (ntohs(uh->uh_dport) == BOOTP_SERVER && (ip->ip_dst.s_addr == slirp->vhost_addr.s_addr || ip->ip_dst.s_addr == 0xffffffff)) { bootp_input(m); goto bad; } /* * handle TFTP */ if (ntohs(uh->uh_dport) == TFTP_SERVER && ip->ip_dst.s_addr == slirp->vhost_addr.s_addr) { tftp_input(m); goto bad; } if (slirp->restricted) { goto bad; } /* * Locate pcb for datagram. */ so = slirp->udp_last_so; if (so->so_lport != uh->uh_sport || so->so_laddr.s_addr != ip->ip_src.s_addr) { struct socket *tmp; for (tmp = slirp->udb.so_next; tmp != &slirp->udb; tmp = tmp->so_next) { if (tmp->so_lport == uh->uh_sport && tmp->so_laddr.s_addr == ip->ip_src.s_addr) { so = tmp; break; } } if (tmp == &slirp->udb) { so = NULL; } else { slirp->udp_last_so = so; } } if (so == NULL) { /* * If there's no socket for this packet, * create one */ so = socreate(slirp); if (!so) { goto bad; } if(udp_attach(so) == -1) { DEBUG_MISC((dfd," udp_attach errno = %d-%s\n", errno,strerror(errno))); sofree(so); goto bad; } /* * Setup fields */ so->so_laddr = ip->ip_src; so->so_lport = uh->uh_sport; if ((so->so_iptos = udp_tos(so)) == 0) so->so_iptos = ip->ip_tos; /* * XXXXX Here, check if it's in udpexec_list, * and if it is, do the fork_exec() etc. */ } so->so_faddr = ip->ip_dst; /* XXX */ so->so_fport = uh->uh_dport; /* XXX */ iphlen += sizeof(struct udphdr); m->m_len -= iphlen; m->m_data += iphlen; /* * Now we sendto() the packet. */ if(sosendto(so,m) == -1) { m->m_len += iphlen; m->m_data -= iphlen; *ip=save_ip; DEBUG_MISC((dfd,"udp tx errno = %d-%s\n",errno,strerror(errno))); icmp_error(m, ICMP_UNREACH,ICMP_UNREACH_NET, 0,strerror(errno)); } m_free(so->so_m); /* used for ICMP if error on sorecvfrom */ /* restore the orig mbuf packet */ m->m_len += iphlen; m->m_data -= iphlen; *ip=save_ip; so->so_m=m; /* ICMP backup */ return; bad: m_free(m); }
12,169
qemu
5e6b701aba8689a336297dda047bf760ffc05291
1
static int con_init(struct XenDevice *xendev) { struct XenConsole *con = container_of(xendev, struct XenConsole, xendev); char *type, *dom; /* setup */ dom = xs_get_domain_path(xenstore, con->xendev.dom); snprintf(con->console, sizeof(con->console), "%s/console", dom); free(dom); type = xenstore_read_str(con->console, "type"); if (!type || strcmp(type, "ioemu") != 0) { xen_be_printf(xendev, 1, "not for me (type=%s)\n", type); return -1; } if (!serial_hds[con->xendev.dev]) xen_be_printf(xendev, 1, "WARNING: serial line %d not configured\n", con->xendev.dev); else con->chr = serial_hds[con->xendev.dev]; return 0; }
12,170
FFmpeg
01ecb7172b684f1c4b3e748f95c5a9a494ca36ec
1
static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx, const int16_t *audio, const int16_t *la, int channel, int prev_type) { int i, j; int br = ctx->avctx->bit_rate / ctx->avctx->channels; int attack_ratio = br <= 16000 ? 18 : 10; AacPsyContext *pctx = (AacPsyContext*) ctx->model_priv_data; AacPsyChannel *pch = &pctx->ch[channel]; uint8_t grouping = 0; int next_type = pch->next_window_seq; FFPsyWindowInfo wi = { { 0 } }; if (la) { float s[8], v; int switch_to_eight = 0; float sum = 0.0, sum2 = 0.0; int attack_n = 0; int stay_short = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 128; j++) { v = iir_filter(la[i*128+j], pch->iir_state); sum += v*v; } s[i] = sum; sum2 += sum; } for (i = 0; i < 8; i++) { if (s[i] > pch->win_energy * attack_ratio) { attack_n = i + 1; switch_to_eight = 1; break; } } pch->win_energy = pch->win_energy*7/8 + sum2/64; wi.window_type[1] = prev_type; switch (prev_type) { case ONLY_LONG_SEQUENCE: wi.window_type[0] = switch_to_eight ? LONG_START_SEQUENCE : ONLY_LONG_SEQUENCE; next_type = switch_to_eight ? EIGHT_SHORT_SEQUENCE : ONLY_LONG_SEQUENCE; break; case LONG_START_SEQUENCE: wi.window_type[0] = EIGHT_SHORT_SEQUENCE; grouping = pch->next_grouping; next_type = switch_to_eight ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE; break; case LONG_STOP_SEQUENCE: wi.window_type[0] = switch_to_eight ? LONG_START_SEQUENCE : ONLY_LONG_SEQUENCE; next_type = switch_to_eight ? EIGHT_SHORT_SEQUENCE : ONLY_LONG_SEQUENCE; break; case EIGHT_SHORT_SEQUENCE: stay_short = next_type == EIGHT_SHORT_SEQUENCE || switch_to_eight; wi.window_type[0] = stay_short ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE; grouping = next_type == EIGHT_SHORT_SEQUENCE ? pch->next_grouping : 0; next_type = switch_to_eight ? EIGHT_SHORT_SEQUENCE : LONG_STOP_SEQUENCE; break; } pch->next_grouping = window_grouping[attack_n]; pch->next_window_seq = next_type; } else { for (i = 0; i < 3; i++) wi.window_type[i] = prev_type; grouping = (prev_type == EIGHT_SHORT_SEQUENCE) ? window_grouping[0] : 0; } wi.window_shape = 1; if (wi.window_type[0] != EIGHT_SHORT_SEQUENCE) { wi.num_windows = 1; wi.grouping[0] = 1; } else { int lastgrp = 0; wi.num_windows = 8; for (i = 0; i < 8; i++) { if (!((grouping >> i) & 1)) lastgrp = i; wi.grouping[lastgrp]++; } } return wi; }
12,171
FFmpeg
2ef0f392711445e173a56b2c073dedb021ae3783
1
static int rac_get_model_sym(RangeCoder *c, Model *m) { int prob, prob2, helper, val; int end, end2; prob = 0; prob2 = c->range; c->range >>= MODEL_SCALE; val = 0; end = m->num_syms >> 1; end2 = m->num_syms; do { helper = m->freqs[end] * c->range; if (helper <= c->low) { val = end; prob = helper; } else { end2 = end; prob2 = helper; } end = (end2 + val) >> 1; } while (end != val); c->low -= prob; c->range = prob2 - prob; if (c->range < RAC_BOTTOM) rac_normalise(c); model_update(m, val); return val; }
12,172
FFmpeg
af09be4f4b2f87e71a3396f54c24a166092ec8e3
1
static int dvbsub_display_end_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size, AVSubtitle *sub) { DVBSubContext *ctx = avctx->priv_data; DVBSubDisplayDefinition *display_def = ctx->display_definition; DVBSubRegion *region; DVBSubRegionDisplay *display; AVSubtitleRect *rect; DVBSubCLUT *clut; uint32_t *clut_table; int i; int offset_x=0, offset_y=0; sub->end_display_time = ctx->time_out * 1000; if (display_def) { offset_x = display_def->x; offset_y = display_def->y; } sub->num_rects = ctx->display_list_size; if (sub->num_rects > 0){ sub->rects = av_mallocz(sizeof(*sub->rects) * sub->num_rects); for(i=0; i<sub->num_rects; i++) sub->rects[i] = av_mallocz(sizeof(*sub->rects[i])); i = 0; for (display = ctx->display_list; display; display = display->next) { region = get_region(ctx, display->region_id); if (!region) continue; if (!region->dirty) continue; rect = sub->rects[i]; rect->x = display->x_pos + offset_x; rect->y = display->y_pos + offset_y; rect->w = region->width; rect->h = region->height; rect->nb_colors = (1 << region->depth); rect->type = SUBTITLE_BITMAP; rect->pict.linesize[0] = region->width; clut = get_clut(ctx, region->clut); if (!clut) clut = &default_clut; switch (region->depth) { case 2: clut_table = clut->clut4; break; case 8: clut_table = clut->clut256; break; case 4: default: clut_table = clut->clut16; break; } rect->pict.data[1] = av_mallocz(AVPALETTE_SIZE); memcpy(rect->pict.data[1], clut_table, (1 << region->depth) * sizeof(uint32_t)); rect->pict.data[0] = av_malloc(region->buf_size); memcpy(rect->pict.data[0], region->pbuf, region->buf_size); i++; } sub->num_rects = i; } #ifdef DEBUG save_display_set(ctx); #endif return 1; }
12,173
qemu
967b75230b9720ea2b3ae49f38f8287026125f9f
1
static void pnv_chip_realize(DeviceState *dev, Error **errp) { PnvChip *chip = PNV_CHIP(dev); Error *error = NULL; PnvChipClass *pcc = PNV_CHIP_GET_CLASS(chip); char *typename = pnv_core_typename(pcc->cpu_model); size_t typesize = object_type_get_instance_size(typename); int i, core_hwid; if (!object_class_by_name(typename)) { error_setg(errp, "Unable to find PowerNV CPU Core '%s'", typename); /* Cores */ pnv_chip_core_sanitize(chip, &error); chip->cores = g_malloc0(typesize * chip->nr_cores); for (i = 0, core_hwid = 0; (core_hwid < sizeof(chip->cores_mask) * 8) && (i < chip->nr_cores); core_hwid++) { char core_name[32]; void *pnv_core = chip->cores + i * typesize; if (!(chip->cores_mask & (1ull << core_hwid))) { continue; object_initialize(pnv_core, typesize, typename); snprintf(core_name, sizeof(core_name), "core[%d]", core_hwid); object_property_add_child(OBJECT(chip), core_name, OBJECT(pnv_core), &error_fatal); object_property_set_int(OBJECT(pnv_core), smp_threads, "nr-threads", &error_fatal); object_property_set_int(OBJECT(pnv_core), core_hwid, CPU_CORE_PROP_CORE_ID, &error_fatal); object_property_set_int(OBJECT(pnv_core), pcc->core_pir(chip, core_hwid), "pir", &error_fatal); object_property_set_bool(OBJECT(pnv_core), true, "realized", &error_fatal); object_unref(OBJECT(pnv_core)); i++; g_free(typename);
12,174
qemu
393c13b940be8f2e5b126cd9f442c12e7ecb4cac
1
static void bt_l2cap_sdp_close_ch(void *opaque) { struct bt_l2cap_sdp_state_s *sdp = opaque; int i; for (i = 0; i < sdp->services; i ++) { g_free(sdp->service_list[i].attribute_list->pair); g_free(sdp->service_list[i].attribute_list); g_free(sdp->service_list[i].uuid); } g_free(sdp->service_list); g_free(sdp); }
12,175
qemu
e0e2d644096c79a71099b176d08f465f6803a8b1
1
static uint16_t vring_used_idx(VirtQueue *vq) { VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches); hwaddr pa = offsetof(VRingUsed, idx); return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa); }
12,176
FFmpeg
4691a77db4672026d62d524fd292fb17db6514b4
1
static av_always_inline void hl_decode_mb_internal(H264Context *h, int simple){ MpegEncContext * const s = &h->s; const int mb_x= s->mb_x; const int mb_y= s->mb_y; const int mb_xy= mb_x + mb_y*s->mb_stride; const int mb_type= s->current_picture.mb_type[mb_xy]; uint8_t *dest_y, *dest_cb, *dest_cr; int linesize, uvlinesize /*dct_offset*/; int i; int *block_offset = &h->block_offset[0]; const unsigned int bottom = mb_y & 1; const int transform_bypass = (s->qscale == 0 && h->sps.transform_bypass), is_h264 = (simple || s->codec_id == CODEC_ID_H264); void (*idct_add)(uint8_t *dst, DCTELEM *block, int stride); void (*idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride); dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16; dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + 64, s->linesize, 4); s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + 64, dest_cr - dest_cb, 2); if (!simple && MB_FIELD) { linesize = h->mb_linesize = s->linesize * 2; uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2; block_offset = &h->block_offset[24]; if(mb_y&1){ //FIXME move out of this func? dest_y -= s->linesize*15; dest_cb-= s->uvlinesize*7; dest_cr-= s->uvlinesize*7; } if(FRAME_MBAFF) { int list; for(list=0; list<h->list_count; list++){ if(!USES_LIST(mb_type, list)) continue; if(IS_16X16(mb_type)){ int8_t *ref = &h->ref_cache[list][scan8[0]]; fill_rectangle(ref, 4, 4, 8, 16+*ref^(s->mb_y&1), 1); }else{ for(i=0; i<16; i+=4){ //FIXME can refs be smaller than 8x8 when !direct_8x8_inference ? int ref = h->ref_cache[list][scan8[i]]; if(ref >= 0) fill_rectangle(&h->ref_cache[list][scan8[i]], 2, 2, 8, 16+ref^(s->mb_y&1), 1); } } } } } else { linesize = h->mb_linesize = s->linesize; uvlinesize = h->mb_uvlinesize = s->uvlinesize; // dct_offset = s->linesize * 16; } if(transform_bypass){ idct_dc_add = idct_add = IS_8x8DCT(mb_type) ? s->dsp.add_pixels8 : s->dsp.add_pixels4; }else if(IS_8x8DCT(mb_type)){ idct_dc_add = s->dsp.h264_idct8_dc_add; idct_add = s->dsp.h264_idct8_add; }else{ idct_dc_add = s->dsp.h264_idct_dc_add; idct_add = s->dsp.h264_idct_add; } if(!simple && FRAME_MBAFF && h->deblocking_filter && IS_INTRA(mb_type) && (!bottom || !IS_INTRA(s->current_picture.mb_type[mb_xy-s->mb_stride]))){ int mbt_y = mb_y&~1; uint8_t *top_y = s->current_picture.data[0] + (mbt_y * 16* s->linesize ) + mb_x * 16; uint8_t *top_cb = s->current_picture.data[1] + (mbt_y * 8 * s->uvlinesize) + mb_x * 8; uint8_t *top_cr = s->current_picture.data[2] + (mbt_y * 8 * s->uvlinesize) + mb_x * 8; xchg_pair_border(h, top_y, top_cb, top_cr, s->linesize, s->uvlinesize, 1); } if (!simple && IS_INTRA_PCM(mb_type)) { unsigned int x, y; // The pixels are stored in h->mb array in the same order as levels, // copy them in output in the correct order. for(i=0; i<16; i++) { for (y=0; y<4; y++) { for (x=0; x<4; x++) { *(dest_y + block_offset[i] + y*linesize + x) = h->mb[i*16+y*4+x]; } } } for(i=16; i<16+4; i++) { for (y=0; y<4; y++) { for (x=0; x<4; x++) { *(dest_cb + block_offset[i] + y*uvlinesize + x) = h->mb[i*16+y*4+x]; } } } for(i=20; i<20+4; i++) { for (y=0; y<4; y++) { for (x=0; x<4; x++) { *(dest_cr + block_offset[i] + y*uvlinesize + x) = h->mb[i*16+y*4+x]; } } } } else { if(IS_INTRA(mb_type)){ if(h->deblocking_filter && (simple || !FRAME_MBAFF)) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 1, simple); if(simple || !(s->flags&CODEC_FLAG_GRAY)){ h->pred8x8[ h->chroma_pred_mode ](dest_cb, uvlinesize); h->pred8x8[ h->chroma_pred_mode ](dest_cr, uvlinesize); } if(IS_INTRA4x4(mb_type)){ if(simple || !s->encoding){ if(IS_8x8DCT(mb_type)){ for(i=0; i<16; i+=4){ uint8_t * const ptr= dest_y + block_offset[i]; const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ]; const int nnz = h->non_zero_count_cache[ scan8[i] ]; h->pred8x8l[ dir ](ptr, (h->topleft_samples_available<<i)&0x8000, (h->topright_samples_available<<i)&0x4000, linesize); if(nnz){ if(nnz == 1 && h->mb[i*16]) idct_dc_add(ptr, h->mb + i*16, linesize); else idct_add(ptr, h->mb + i*16, linesize); } } }else for(i=0; i<16; i++){ uint8_t * const ptr= dest_y + block_offset[i]; uint8_t *topright; const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ]; int nnz, tr; if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){ const int topright_avail= (h->topright_samples_available<<i)&0x8000; assert(mb_y || linesize <= block_offset[i]); if(!topright_avail){ tr= ptr[3 - linesize]*0x01010101; topright= (uint8_t*) &tr; }else topright= ptr + 4 - linesize; }else topright= NULL; h->pred4x4[ dir ](ptr, topright, linesize); nnz = h->non_zero_count_cache[ scan8[i] ]; if(nnz){ if(is_h264){ if(nnz == 1 && h->mb[i*16]) idct_dc_add(ptr, h->mb + i*16, linesize); else idct_add(ptr, h->mb + i*16, linesize); }else svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0); } } } }else{ h->pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize); if(is_h264){ if(!transform_bypass) h264_luma_dc_dequant_idct_c(h->mb, s->qscale, h->dequant4_coeff[IS_INTRA(mb_type) ? 0:3][s->qscale][0]); }else svq3_luma_dc_dequant_idct_c(h->mb, s->qscale); } if(h->deblocking_filter && (simple || !FRAME_MBAFF)) xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0, simple); }else if(is_h264){ hl_motion(h, dest_y, dest_cb, dest_cr, s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab, s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab, s->dsp.weight_h264_pixels_tab, s->dsp.biweight_h264_pixels_tab); } if(!IS_INTRA4x4(mb_type)){ if(is_h264){ if(IS_INTRA16x16(mb_type)){ for(i=0; i<16; i++){ if(h->non_zero_count_cache[ scan8[i] ]) idct_add(dest_y + block_offset[i], h->mb + i*16, linesize); else if(h->mb[i*16]) idct_dc_add(dest_y + block_offset[i], h->mb + i*16, linesize); } }else{ const int di = IS_8x8DCT(mb_type) ? 4 : 1; for(i=0; i<16; i+=di){ int nnz = h->non_zero_count_cache[ scan8[i] ]; if(nnz){ if(nnz==1 && h->mb[i*16]) idct_dc_add(dest_y + block_offset[i], h->mb + i*16, linesize); else idct_add(dest_y + block_offset[i], h->mb + i*16, linesize); } } } }else{ for(i=0; i<16; i++){ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ //FIXME benchmark weird rule, & below uint8_t * const ptr= dest_y + block_offset[i]; svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, IS_INTRA(mb_type) ? 1 : 0); } } } } if(simple || !(s->flags&CODEC_FLAG_GRAY)){ uint8_t *dest[2] = {dest_cb, dest_cr}; if(transform_bypass){ idct_add = idct_dc_add = s->dsp.add_pixels4; }else{ idct_add = s->dsp.h264_idct_add; idct_dc_add = s->dsp.h264_idct_dc_add; chroma_dc_dequant_idct_c(h->mb + 16*16, h->chroma_qp, h->dequant4_coeff[IS_INTRA(mb_type) ? 1:4][h->chroma_qp][0]); chroma_dc_dequant_idct_c(h->mb + 16*16+4*16, h->chroma_qp, h->dequant4_coeff[IS_INTRA(mb_type) ? 2:5][h->chroma_qp][0]); } if(is_h264){ for(i=16; i<16+8; i++){ if(h->non_zero_count_cache[ scan8[i] ]) idct_add(dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize); else if(h->mb[i*16]) idct_dc_add(dest[(i&4)>>2] + block_offset[i], h->mb + i*16, uvlinesize); } }else{ for(i=16; i<16+8; i++){ if(h->non_zero_count_cache[ scan8[i] ] || h->mb[i*16]){ uint8_t * const ptr= dest[(i&4)>>2] + block_offset[i]; svq3_add_idct_c(ptr, h->mb + i*16, uvlinesize, chroma_qp[s->qscale + 12] - 12, 2); } } } } } if(h->deblocking_filter) { if (!simple && FRAME_MBAFF) { //FIXME try deblocking one mb at a time? // the reduction in load/storing mvs and such might outweigh the extra backup/xchg_border const int mb_y = s->mb_y - 1; uint8_t *pair_dest_y, *pair_dest_cb, *pair_dest_cr; const int mb_xy= mb_x + mb_y*s->mb_stride; const int mb_type_top = s->current_picture.mb_type[mb_xy]; const int mb_type_bottom= s->current_picture.mb_type[mb_xy+s->mb_stride]; if (!bottom) return; pair_dest_y = s->current_picture.data[0] + (mb_y * 16* s->linesize ) + mb_x * 16; pair_dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; pair_dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8; if(IS_INTRA(mb_type_top | mb_type_bottom)) xchg_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize, 0); backup_pair_border(h, pair_dest_y, pair_dest_cb, pair_dest_cr, s->linesize, s->uvlinesize); // deblock a pair // top s->mb_y--; tprintf(h->s.avctx, "call mbaff filter_mb mb_x:%d mb_y:%d pair_dest_y = %p, dest_y = %p\n", mb_x, mb_y, pair_dest_y, dest_y); fill_caches(h, mb_type_top, 1); //FIXME don't fill stuff which isn't used by filter_mb h->chroma_qp = get_chroma_qp(h, s->current_picture.qscale_table[mb_xy]); filter_mb(h, mb_x, mb_y, pair_dest_y, pair_dest_cb, pair_dest_cr, linesize, uvlinesize); // bottom s->mb_y++; tprintf(h->s.avctx, "call mbaff filter_mb\n"); fill_caches(h, mb_type_bottom, 1); //FIXME don't fill stuff which isn't used by filter_mb h->chroma_qp = get_chroma_qp(h, s->current_picture.qscale_table[mb_xy+s->mb_stride]); filter_mb(h, mb_x, mb_y+1, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } else { tprintf(h->s.avctx, "call filter_mb\n"); backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, simple); fill_caches(h, mb_type, 1); //FIXME don't fill stuff which isn't used by filter_mb filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize); } } }
12,177
qemu
8ccccff9dd7ba24c7a78861172e8dc6b07f1c392
1
static void spapr_rtc_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = spapr_rtc_realize; dc->vmsd = &vmstate_spapr_rtc; spapr_rtas_register(RTAS_GET_TIME_OF_DAY, "get-time-of-day", rtas_get_time_of_day); spapr_rtas_register(RTAS_SET_TIME_OF_DAY, "set-time-of-day", rtas_set_time_of_day); }
12,178
FFmpeg
84c202cc37024bd78261e4222e46631ea73c48dd
1
static int read_len_table(uint8_t *dst, GetBitContext *gb){ int i, val, repeat; for(i=0; i<256;){ repeat= get_bits(gb, 3); val = get_bits(gb, 5); if(repeat==0) repeat= get_bits(gb, 8); //printf("%d %d\n", val, repeat); if(i+repeat > 256) { av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n"); return -1; } while (repeat--) dst[i++] = val; } return 0; }
12,179
qemu
857d4f46c31d2f4d57d2f0fad9dfb584262bf9b9
1
static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs, int64_t sector_num, int nb_sectors, BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; QEMUIOVector qiov; struct iovec iov = {0}; int ret = 0; int max_write_zeroes = bs->bl.max_write_zeroes ? bs->bl.max_write_zeroes : MAX_WRITE_ZEROES_DEFAULT; while (nb_sectors > 0 && !ret) { int num = nb_sectors; /* Align request. Block drivers can expect the "bulk" of the request * to be aligned. */ if (bs->bl.write_zeroes_alignment && num > bs->bl.write_zeroes_alignment) { if (sector_num % bs->bl.write_zeroes_alignment != 0) { /* Make a small request up to the first aligned sector. */ num = bs->bl.write_zeroes_alignment; num -= sector_num % bs->bl.write_zeroes_alignment; } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) { /* Shorten the request to the last aligned sector. num cannot * underflow because num > bs->bl.write_zeroes_alignment. */ num -= (sector_num + num) % bs->bl.write_zeroes_alignment; } } /* limit request size */ if (num > max_write_zeroes) { num = max_write_zeroes; } ret = -ENOTSUP; /* First try the efficient write zeroes operation */ if (drv->bdrv_co_write_zeroes) { ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags); } if (ret == -ENOTSUP) { /* Fall back to bounce buffer if write zeroes is unsupported */ iov.iov_len = num * BDRV_SECTOR_SIZE; if (iov.iov_base == NULL) { iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE); memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE); } qemu_iovec_init_external(&qiov, &iov, 1); ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov); /* Keep bounce buffer around if it is big enough for all * all future requests. */ if (num < max_write_zeroes) { qemu_vfree(iov.iov_base); iov.iov_base = NULL; } } sector_num += num; nb_sectors -= num; } qemu_vfree(iov.iov_base); return ret; }
12,180
FFmpeg
357f2316a08478a4442e8051978c7b161e10281c
1
void ff_ivi_inverse_haar_4x4(const int32_t *in, int16_t *out, ptrdiff_t pitch, const uint8_t *flags) { int i, shift, sp1, sp2; const int32_t *src; int32_t *dst; int tmp[16]; int t0, t1, t2, t3, t4; /* apply the InvHaar4 to all columns */ #define COMPENSATE(x) (x) src = in; dst = tmp; for (i = 0; i < 4; i++) { if (flags[i]) { /* pre-scaling */ shift = !(i & 2); sp1 = src[0] << shift; sp2 = src[4] << shift; INV_HAAR4( sp1, sp2, src[8], src[12], dst[0], dst[4], dst[8], dst[12], t0, t1, t2, t3, t4); } else dst[0] = dst[4] = dst[8] = dst[12] = 0; src++; dst++; } #undef COMPENSATE /* apply the InvHaar8 to all rows */ #define COMPENSATE(x) (x) src = tmp; for (i = 0; i < 4; i++) { if (!src[0] && !src[1] && !src[2] && !src[3]) { memset(out, 0, 4 * sizeof(out[0])); } else { INV_HAAR4(src[0], src[1], src[2], src[3], out[0], out[1], out[2], out[3], t0, t1, t2, t3, t4); } src += 4; out += pitch; } #undef COMPENSATE }
12,181
FFmpeg
1e5cfad57e88d168f50794e1523abfa477ad9aed
1
static int submit_packet(PerThreadContext *p, AVPacket *avpkt) { FrameThreadContext *fctx = p->parent; PerThreadContext *prev_thread = fctx->prev_thread; const AVCodec *codec = p->avctx->codec; if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY)) return 0; pthread_mutex_lock(&p->mutex); release_delayed_buffers(p); if (prev_thread) { int err; if (prev_thread->state == STATE_SETTING_UP) { pthread_mutex_lock(&prev_thread->progress_mutex); while (prev_thread->state == STATE_SETTING_UP) pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex); pthread_mutex_unlock(&prev_thread->progress_mutex); } err = update_context_from_thread(p->avctx, prev_thread->avctx, 0); if (err) { pthread_mutex_unlock(&p->mutex); return err; } } av_packet_unref(&p->avpkt); av_packet_ref(&p->avpkt, avpkt); p->state = STATE_SETTING_UP; pthread_cond_signal(&p->input_cond); pthread_mutex_unlock(&p->mutex); /* * If the client doesn't have a thread-safe get_buffer(), * then decoding threads call back to the main thread, * and it calls back to the client here. */ if (!p->avctx->thread_safe_callbacks && ( p->avctx->get_format != avcodec_default_get_format || p->avctx->get_buffer2 != avcodec_default_get_buffer2)) { while (p->state != STATE_SETUP_FINISHED && p->state != STATE_INPUT_READY) { int call_done = 1; pthread_mutex_lock(&p->progress_mutex); while (p->state == STATE_SETTING_UP) pthread_cond_wait(&p->progress_cond, &p->progress_mutex); switch (p->state) { case STATE_GET_BUFFER: p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags); break; case STATE_GET_FORMAT: p->result_format = ff_get_format(p->avctx, p->available_formats); break; default: call_done = 0; break; } if (call_done) { p->state = STATE_SETTING_UP; pthread_cond_signal(&p->progress_cond); } pthread_mutex_unlock(&p->progress_mutex); } } fctx->prev_thread = p; fctx->next_decoding++; return 0; }
12,183
FFmpeg
6665938ca8b7ad8b7ec77c23e611bb8224e88a90
0
static int ty_read_packet(AVFormatContext *s, AVPacket *pkt) { TYDemuxContext *ty = s->priv_data; AVIOContext *pb = s->pb; TyRecHdr *rec; int64_t rec_size = 0; int ret = 0; if (avio_feof(pb)) return AVERROR_EOF; while (ret <= 0) { if (ty->first_chunk || ty->cur_rec >= ty->num_recs) { if (get_chunk(s) < 0 || ty->num_recs == 0) return AVERROR_EOF; } rec = &ty->rec_hdrs[ty->cur_rec]; rec_size = rec->rec_size; ty->cur_rec++; if (rec_size <= 0) continue; if (ty->cur_chunk_pos + rec->rec_size > CHUNK_SIZE) return AVERROR_INVALIDDATA; if (avio_feof(pb)) return AVERROR_EOF; switch (rec->rec_type) { case VIDEO_ID: ret = demux_video(s, rec, pkt); break; case AUDIO_ID: ret = demux_audio(s, rec, pkt); break; default: ff_dlog(s, "Invalid record type 0x%02x\n", rec->rec_type); case 0x01: case 0x02: case 0x03: /* TiVo data services */ case 0x05: /* unknown, but seen regularly */ ty->cur_chunk_pos += rec->rec_size; break; } } return 0; }
12,184
FFmpeg
96d0494123a05fb78a0fd3f03b0b5aaefc170b1c
0
static int lag_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; LagarithContext *l = avctx->priv_data; AVFrame *const p = &l->picture; uint8_t frametype = 0; uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9; int offs[4]; uint8_t *srcs[4], *dst; int i, j, planes = 3; AVFrame *picture = data; if (p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; p->key_frame = 1; frametype = buf[0]; offset_gu = AV_RL32(buf + 1); offset_bv = AV_RL32(buf + 5); switch (frametype) { case FRAME_SOLID_RGBA: avctx->pix_fmt = PIX_FMT_RGB32; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } dst = p->data[0]; for (j = 0; j < avctx->height; j++) { for (i = 0; i < avctx->width; i++) AV_WN32(dst + i * 4, offset_gu); dst += p->linesize[0]; } break; case FRAME_ARITH_RGBA: avctx->pix_fmt = PIX_FMT_RGB32; planes = 4; offset_ry += 4; offs[3] = AV_RL32(buf + 9); case FRAME_ARITH_RGB24: if (frametype == FRAME_ARITH_RGB24) avctx->pix_fmt = PIX_FMT_RGB24; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } offs[0] = offset_bv; offs[1] = offset_gu; offs[2] = offset_ry; if (!l->rgb_planes) { l->rgb_stride = FFALIGN(avctx->width, 16); l->rgb_planes = av_malloc(l->rgb_stride * avctx->height * planes + 16); if (!l->rgb_planes) { av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n"); return AVERROR(ENOMEM); } } for (i = 0; i < planes; i++) srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride; for (i = 0; i < planes; i++) lag_decode_arith_plane(l, srcs[i], avctx->width, avctx->height, -l->rgb_stride, buf + offs[i], buf_size); dst = p->data[0]; for (i = 0; i < planes; i++) srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height; for (j = 0; j < avctx->height; j++) { for (i = 0; i < avctx->width; i++) { uint8_t r, g, b, a; r = srcs[0][i]; g = srcs[1][i]; b = srcs[2][i]; r += g; b += g; if (frametype == FRAME_ARITH_RGBA) { a = srcs[3][i]; AV_WN32(dst + i * 4, MKBETAG(a, r, g, b)); } else { dst[i * 3 + 0] = r; dst[i * 3 + 1] = g; dst[i * 3 + 2] = b; } } dst += p->linesize[0]; for (i = 0; i < planes; i++) srcs[i] += l->rgb_stride; } break; case FRAME_ARITH_YV12: avctx->pix_fmt = PIX_FMT_YUV420P; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height, p->linesize[0], buf + offset_ry, buf_size); lag_decode_arith_plane(l, p->data[2], avctx->width / 2, avctx->height / 2, p->linesize[2], buf + offset_gu, buf_size); lag_decode_arith_plane(l, p->data[1], avctx->width / 2, avctx->height / 2, p->linesize[1], buf + offset_bv, buf_size); break; default: av_log(avctx, AV_LOG_ERROR, "Unsupported Lagarith frame type: %#x\n", frametype); return -1; } *picture = *p; *data_size = sizeof(AVFrame); return buf_size; }
12,185
FFmpeg
470de55aa17cb933a21f7e4c4015202eaba7277f
0
static inline int check_for_slice(AVSContext *h) { GetBitContext *gb = &h->s.gb; int align; if(h->mbx) return 0; align = (-get_bits_count(gb)) & 7; /* check for stuffing byte */ if(!align && (show_bits(gb,8) == 0x80)) get_bits(gb,8); if((show_bits_long(gb,24+align) & 0xFFFFFF) == 0x000001) { skip_bits_long(gb,24+align); h->stc = get_bits(gb,8); decode_slice_header(h,gb); return 1; } return 0; }
12,186
FFmpeg
c51c08e0e70c186971385bdbb225f69edd4e3375
0
static int decode_display_orientation(H264Context *h) { h->sei_display_orientation_present = !get_bits1(&h->gb); if (h->sei_display_orientation_present) { h->sei_hflip = get_bits1(&h->gb); // hor_flip h->sei_vflip = get_bits1(&h->gb); // ver_flip h->sei_anticlockwise_rotation = get_bits(&h->gb, 16); get_ue_golomb(&h->gb); // display_orientation_repetition_period skip_bits1(&h->gb); // display_orientation_extension_flag } return 0; }
12,187
FFmpeg
b12d92efd6c0d48665383a9baecc13e7ebbd8a22
1
static int init_image(TiffContext *s) { int i, ret; uint32_t *pal; switch (s->bpp * 10 + s->bppcount) { case 11: if (!s->palette_is_set) { s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK; break; } case 21: case 41: case 81: s->avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case 243: s->avctx->pix_fmt = AV_PIX_FMT_RGB24; break; case 161: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE; break; case 162: s->avctx->pix_fmt = AV_PIX_FMT_GRAY8A; break; case 324: s->avctx->pix_fmt = AV_PIX_FMT_RGBA; break; case 483: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE; break; case 644: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE; break; default: av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, bppcount=%d)\n", s->bpp, s->bppcount); return AVERROR_INVALIDDATA; } if (s->width != s->avctx->width || s->height != s->avctx->height) { if ((ret = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0) return ret; avcodec_set_dimensions(s->avctx, s->width, s->height); } if (s->picture.data[0]) s->avctx->release_buffer(s->avctx, &s->picture); if ((ret = s->avctx->get_buffer(s->avctx, &s->picture)) < 0) { av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) { if (s->palette_is_set) { memcpy(s->picture.data[1], s->palette, sizeof(s->palette)); } else { /* make default grayscale pal */ pal = (uint32_t *) s->picture.data[1]; for (i = 0; i < 1<<s->bpp; i++) pal[i] = 0xFF << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101; } } return 0; }
12,188
qemu
f9530c32420fff941b7bc8bb5d90310eecab5a96
1
static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) { CPUS390XState *env = &cpu->env; const uint8_t r1 = ipa1 >> 4; const uint8_t r3 = ipa1 & 0x0f; int ret; uint8_t order; uint64_t *status_reg; uint64_t param; S390CPU *dst_cpu = NULL; cpu_synchronize_state(CPU(cpu)); /* get order code */ order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL) & SIGP_ORDER_MASK; status_reg = &env->regs[r1]; param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; if (qemu_mutex_trylock(&qemu_sigp_mutex)) { ret = SIGP_CC_BUSY; goto out; } switch (order) { case SIGP_SET_ARCH: ret = sigp_set_architecture(cpu, param, status_reg); break; default: /* all other sigp orders target a single vcpu */ dst_cpu = s390_cpu_addr2state(env->regs[r3]); ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg); } trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index, dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret); if (ret >= 0) { setcc(cpu, ret); return 0; } return ret; }
12,189
FFmpeg
719dbe86ea0e85b3b89f492c69e10bb0e733bcbb
0
static int h261_decode_gob_header(H261Context *h) { unsigned int val; MpegEncContext *const s = &h->s; if (!h->gob_start_code_skipped) { /* Check for GOB Start Code */ val = show_bits(&s->gb, 15); if (val) return -1; /* We have a GBSC */ skip_bits(&s->gb, 16); } h->gob_start_code_skipped = 0; h->gob_number = get_bits(&s->gb, 4); /* GN */ s->qscale = get_bits(&s->gb, 5); /* GQUANT */ /* Check if gob_number is valid */ if (s->mb_height == 18) { // CIF if ((h->gob_number <= 0) || (h->gob_number > 12)) return -1; } else { // QCIF if ((h->gob_number != 1) && (h->gob_number != 3) && (h->gob_number != 5)) return -1; } /* GEI */ while (get_bits1(&s->gb) != 0) skip_bits(&s->gb, 8); if (s->qscale == 0) { av_log(s->avctx, AV_LOG_ERROR, "qscale has forbidden 0 value\n"); if (s->avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_COMPLIANT)) return -1; } /* For the first transmitted macroblock in a GOB, MBA is the absolute * address. For subsequent macroblocks, MBA is the difference between * the absolute addresses of the macroblock and the last transmitted * macroblock. */ h->current_mba = 0; h->mba_diff = 0; return 0; }
12,191
FFmpeg
38a4be3fa7a7bb83f0a553577427e916a7bda390
0
static int has_decode_delay_been_guessed(AVStream *st) { return st->codec->codec_id != CODEC_ID_H264 || st->codec_info_nb_frames >= 6 + st->codec->has_b_frames; }
12,193
FFmpeg
e403e4bdbea08af0c4a068eb560b577d1b64cf7a
1
static int64_t scene_sad8(FrameRateContext *s, uint8_t *p1, int p1_linesize, uint8_t* p2, int p2_linesize, int height) { int64_t sad; int x, y; for (sad = y = 0; y < height; y += 8) { for (x = 0; x < p1_linesize; x += 8) { sad += s->sad(p1 + y * p1_linesize + x, p1_linesize, p2 + y * p2_linesize + x, p2_linesize); } } emms_c(); return sad; }
12,195
qemu
5df6a1855b62dc653515d919e48c5b6f00c48f32
1
e1000_link_up(E1000State *s) { s->mac_reg[STATUS] |= E1000_STATUS_LU; s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS; }
12,196
qemu
f3c7d0389fe8a2792fd4c1cf151b885de03c8f62
1
static void omap_gp_timer_clk_setup(struct omap_gp_timer_s *timer) { omap_clk_adduser(timer->clk, qemu_allocate_irqs(omap_gp_timer_clk_update, timer, 1)[0]); timer->rate = omap_clk_getrate(timer->clk); }
12,197
FFmpeg
f78cd0c243b9149c7f604ecf1006d78e344aa6ca
1
static inline void FUNC(idctRowCondDC)(DCTELEM *row) { int a0, a1, a2, a3, b0, b1, b2, b3; #if HAVE_FAST_64BIT #define ROW0_MASK (0xffffLL << 48 * HAVE_BIGENDIAN) if (((((uint64_t *)row)[0] & ~ROW0_MASK) | ((uint64_t *)row)[1]) == 0) { uint64_t temp = (row[0] << DC_SHIFT) & 0xffff; temp += temp << 16; temp += temp << 32; ((uint64_t *)row)[0] = temp; ((uint64_t *)row)[1] = temp; return; } #else if (!(((uint32_t*)row)[1] | ((uint32_t*)row)[2] | ((uint32_t*)row)[3] | row[1])) { uint32_t temp = (row[0] << DC_SHIFT) & 0xffff; temp += temp << 16; ((uint32_t*)row)[0]=((uint32_t*)row)[1] = ((uint32_t*)row)[2]=((uint32_t*)row)[3] = temp; return; } #endif a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1)); a1 = a0; a2 = a0; a3 = a0; a0 += W2 * row[2]; a1 += W6 * row[2]; a2 -= W6 * row[2]; a3 -= W2 * row[2]; b0 = MUL(W1, row[1]); MAC(b0, W3, row[3]); b1 = MUL(W3, row[1]); MAC(b1, -W7, row[3]); b2 = MUL(W5, row[1]); MAC(b2, -W1, row[3]); b3 = MUL(W7, row[1]); MAC(b3, -W5, row[3]); if (AV_RN64A(row + 4)) { a0 += W4*row[4] + W6*row[6]; a1 += - W4*row[4] - W2*row[6]; a2 += - W4*row[4] + W2*row[6]; a3 += W4*row[4] - W6*row[6]; MAC(b0, W5, row[5]); MAC(b0, W7, row[7]); MAC(b1, -W1, row[5]); MAC(b1, -W5, row[7]); MAC(b2, W7, row[5]); MAC(b2, W3, row[7]); MAC(b3, W3, row[5]); MAC(b3, -W1, row[7]); } row[0] = (a0 + b0) >> ROW_SHIFT; row[7] = (a0 - b0) >> ROW_SHIFT; row[1] = (a1 + b1) >> ROW_SHIFT; row[6] = (a1 - b1) >> ROW_SHIFT; row[2] = (a2 + b2) >> ROW_SHIFT; row[5] = (a2 - b2) >> ROW_SHIFT; row[3] = (a3 + b3) >> ROW_SHIFT; row[4] = (a3 - b3) >> ROW_SHIFT; }
12,198
qemu
ad739706bbadee49f164b4b7f4c7f5454ddf83cd
1
Object *user_creatable_add_type(const char *type, const char *id, const QDict *qdict, Visitor *v, Error **errp) { Object *obj; ObjectClass *klass; const QDictEntry *e; Error *local_err = NULL; klass = object_class_by_name(type); if (!klass) { error_setg(errp, "invalid object type: %s", type); return NULL; } if (!object_class_dynamic_cast(klass, TYPE_USER_CREATABLE)) { error_setg(errp, "object type '%s' isn't supported by object-add", type); return NULL; } if (object_class_is_abstract(klass)) { error_setg(errp, "object type '%s' is abstract", type); return NULL; } obj = object_new(type); if (qdict) { for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) { object_property_set(obj, v, e->key, &local_err); if (local_err) { goto out; } } } object_property_add_child(object_get_objects_root(), id, obj, &local_err); if (local_err) { goto out; } user_creatable_complete(obj, &local_err); if (local_err) { object_property_del(object_get_objects_root(), id, &error_abort); goto out; } out: if (local_err) { error_propagate(errp, local_err); object_unref(obj); return NULL; } return obj; }
12,199
FFmpeg
8696f254444c2ec24daa570f26feadbd3df911e4
1
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n) { if(avctx->slice_count) return avctx->slice_offset[n]; else return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8); }
12,201
FFmpeg
9c8922acadb5187c274250d6cde653b7bad2559e
1
static int tls_read(URLContext *h, uint8_t *buf, int size) { TLSContext *c = h->priv_data; size_t processed = 0; int ret = SSLRead(c->ssl_context, buf, size, &processed); ret = map_ssl_error(ret, processed); if (ret > 0) return ret; if (ret == 0) return AVERROR_EOF; return print_tls_error(h, ret); }
12,202
qemu
689ed13e73bdb5a5ca3366524475e3065fae854a
1
static void test_io_channel_tls(const void *opaque) { struct QIOChannelTLSTestData *data = (struct QIOChannelTLSTestData *)opaque; QCryptoTLSCreds *clientCreds; QCryptoTLSCreds *serverCreds; QIOChannelTLS *clientChanTLS; QIOChannelTLS *serverChanTLS; QIOChannelSocket *clientChanSock; QIOChannelSocket *serverChanSock; qemu_acl *acl; const char * const *wildcards; int channel[2]; struct QIOChannelTLSHandshakeData clientHandshake = { false, false }; struct QIOChannelTLSHandshakeData serverHandshake = { false, false }; Error *err = NULL; QIOChannelTest *test; GMainContext *mainloop; /* We'll use this for our fake client-server connection */ g_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, channel) == 0); #define CLIENT_CERT_DIR "tests/test-io-channel-tls-client/" #define SERVER_CERT_DIR "tests/test-io-channel-tls-server/" mkdir(CLIENT_CERT_DIR, 0700); mkdir(SERVER_CERT_DIR, 0700); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_SERVER_CERT); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_SERVER_KEY); unlink(CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT); unlink(CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CLIENT_CERT); unlink(CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CLIENT_KEY); g_assert(link(data->servercacrt, SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT) == 0); g_assert(link(data->servercrt, SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_SERVER_CERT) == 0); g_assert(link(KEYFILE, SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_SERVER_KEY) == 0); g_assert(link(data->clientcacrt, CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT) == 0); g_assert(link(data->clientcrt, CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CLIENT_CERT) == 0); g_assert(link(KEYFILE, CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CLIENT_KEY) == 0); clientCreds = test_tls_creds_create( QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, CLIENT_CERT_DIR, &err); g_assert(clientCreds != NULL); serverCreds = test_tls_creds_create( QCRYPTO_TLS_CREDS_ENDPOINT_SERVER, SERVER_CERT_DIR, &err); g_assert(serverCreds != NULL); acl = qemu_acl_init("channeltlsacl"); qemu_acl_reset(acl); wildcards = data->wildcards; while (wildcards && *wildcards) { qemu_acl_append(acl, 0, *wildcards); wildcards++; } clientChanSock = qio_channel_socket_new_fd( channel[0], &err); g_assert(clientChanSock != NULL); serverChanSock = qio_channel_socket_new_fd( channel[1], &err); g_assert(serverChanSock != NULL); /* * We have an evil loop to do the handshake in a single * thread, so we need these non-blocking to avoid deadlock * of ourselves */ qio_channel_set_blocking(QIO_CHANNEL(clientChanSock), false, NULL); qio_channel_set_blocking(QIO_CHANNEL(serverChanSock), false, NULL); /* Now the real part of the test, setup the sessions */ clientChanTLS = qio_channel_tls_new_client( QIO_CHANNEL(clientChanSock), clientCreds, data->hostname, &err); g_assert(clientChanTLS != NULL); serverChanTLS = qio_channel_tls_new_server( QIO_CHANNEL(serverChanSock), serverCreds, "channeltlsacl", &err); g_assert(serverChanTLS != NULL); qio_channel_tls_handshake(clientChanTLS, test_tls_handshake_done, &clientHandshake, NULL); qio_channel_tls_handshake(serverChanTLS, test_tls_handshake_done, &serverHandshake, NULL); /* * Finally we loop around & around doing handshake on each * session until we get an error, or the handshake completes. * This relies on the socketpair being nonblocking to avoid * deadlocking ourselves upon handshake */ mainloop = g_main_context_default(); do { g_main_context_iteration(mainloop, TRUE); } while (!clientHandshake.finished && !serverHandshake.finished); g_assert(clientHandshake.failed == data->expectClientFail); g_assert(serverHandshake.failed == data->expectServerFail); test = qio_channel_test_new(); qio_channel_test_run_threads(test, false, QIO_CHANNEL(clientChanTLS), QIO_CHANNEL(serverChanTLS)); qio_channel_test_validate(test); test = qio_channel_test_new(); qio_channel_test_run_threads(test, true, QIO_CHANNEL(clientChanTLS), QIO_CHANNEL(serverChanTLS)); qio_channel_test_validate(test); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_SERVER_CERT); unlink(SERVER_CERT_DIR QCRYPTO_TLS_CREDS_X509_SERVER_KEY); unlink(CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CA_CERT); unlink(CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CLIENT_CERT); unlink(CLIENT_CERT_DIR QCRYPTO_TLS_CREDS_X509_CLIENT_KEY); rmdir(CLIENT_CERT_DIR); rmdir(SERVER_CERT_DIR); object_unparent(OBJECT(serverCreds)); object_unparent(OBJECT(clientCreds)); object_unref(OBJECT(serverChanTLS)); object_unref(OBJECT(clientChanTLS)); object_unref(OBJECT(serverChanSock)); object_unref(OBJECT(clientChanSock)); close(channel[0]); close(channel[1]); }
12,204
FFmpeg
4df8bdeef3c8042659147daafad34bd76ad09096
1
int ff_wma_run_level_decode(AVCodecContext* avctx, GetBitContext* gb, VLC *vlc, const uint16_t *level_table, const uint16_t *run_table, int version, WMACoef *ptr, int offset, int num_coefs, int block_len, int frame_len_bits, int coef_nb_bits) { int code, run, level, sign; WMACoef* eptr = ptr + num_coefs; ptr += offset; for(;;) { code = get_vlc2(gb, vlc->table, VLCBITS, VLCMAX); if (code < 0) return -1; if (code == 1) { /* EOB */ break; } else if (code == 0) { /* escape */ if (!version) { level = get_bits(gb, coef_nb_bits); /* NOTE: this is rather suboptimal. reading block_len_bits would be better */ run = get_bits(gb, frame_len_bits); } else { level = ff_wma_get_large_val(gb); /** escape decode */ if (get_bits1(gb)) { if (get_bits1(gb)) { if (get_bits1(gb)) { av_log(avctx,AV_LOG_ERROR, "broken escape sequence\n"); return -1; } else run = get_bits(gb, frame_len_bits) + 4; } else run = get_bits(gb, 2) + 1; } else run = 0; } } else { /* normal code */ run = run_table[code]; level = level_table[code]; } sign = get_bits1(gb); if (!sign) level = -level; ptr += run; if (ptr >= eptr) { av_log(NULL, AV_LOG_ERROR, "overflow in spectral RLE, ignoring\n"); break; } *ptr++ = level; /* NOTE: EOB can be omitted */ if (ptr >= eptr) break; } return 0; }
12,206
qemu
a89d89d3e65800fa4a8e00de7af0ea8272bef779
1
static int qemu_rbd_snap_remove(BlockDriverState *bs, const char *snapshot_name) { BDRVRBDState *s = bs->opaque; int r; r = rbd_snap_remove(s->image, snapshot_name); return r; }
12,207
FFmpeg
2cbe6bac0337939f023bd1c37a9c455e6d535f3a
1
static void blend_frames_c(BLEND_FUNC_PARAMS) { int line, pixel; for (line = 0; line < height; line++) { for (pixel = 0; pixel < width; pixel++) { // integer version of (src1 * factor1) + (src2 * factor2) + 0.5 // 0.5 is for rounding // 128 is the integer representation of 0.5 << 8 dst[pixel] = ((src1[pixel] * factor1) + (src2[pixel] * factor2) + 128) >> 8; } src1 += src1_linesize; src2 += src2_linesize; dst += dst_linesize; } }
12,208
qemu
40ff6d7e8dceca227e7f8a3e8e0d58b2c66d19b4
1
int tcp_start_incoming_migration(const char *host_port) { struct sockaddr_in addr; int val; int s; if (parse_host_port(&addr, host_port) < 0) { fprintf(stderr, "invalid host/port combination: %s\n", host_port); return -EINVAL; } s = socket(PF_INET, SOCK_STREAM, 0); if (s == -1) return -socket_error(); val = 1; setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char *)&val, sizeof(val)); if (bind(s, (struct sockaddr *)&addr, sizeof(addr)) == -1) goto err; if (listen(s, 1) == -1) goto err; qemu_set_fd_handler2(s, NULL, tcp_accept_incoming_migration, NULL, (void *)(unsigned long)s); return 0; err: close(s); return -socket_error(); }
12,209
qemu
cb45de6798956975c4b13a6233f7a00d2239b61a
1
opts_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp) { OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v); const QemuOpt *opt; int64_t val; char *endptr; opt = lookup_scalar(ov, name, errp); if (!opt) { return; } val = strtosz_suffix(opt->str ? opt->str : "", &endptr, STRTOSZ_DEFSUFFIX_B); if (val != -1 && *endptr == '\0') { *obj = val; processed(ov, name); return; } error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name, "a size value representible as a non-negative int64"); }
12,210
qemu
000cacf6f9dce7d71f88aadf7e9b3688eaa3ab69
1
static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) { int b1, op1_offset, op2_offset, is_xmm, val, ot; int modrm, mod, rm, reg, reg_addr, offset_addr; void *sse_op2; b &= 0xff; if (s->prefix & PREFIX_DATA) b1 = 1; else if (s->prefix & PREFIX_REPZ) b1 = 2; else if (s->prefix & PREFIX_REPNZ) b1 = 3; else b1 = 0; sse_op2 = sse_op_table1[b][b1]; if (!sse_op2) goto illegal_op; if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) { is_xmm = 1; } else { if (b1 == 0) { /* MMX case */ is_xmm = 0; } else { is_xmm = 1; } } /* simple MMX/SSE operation */ if (s->flags & HF_TS_MASK) { gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); return; } if (s->flags & HF_EM_MASK) { illegal_op: gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); return; } if (is_xmm && !(s->flags & HF_OSFXSR_MASK)) if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA)) goto illegal_op; if (b == 0x0e) { if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) goto illegal_op; /* femms */ tcg_gen_helper_0_0(helper_emms); return; } if (b == 0x77) { /* emms */ tcg_gen_helper_0_0(helper_emms); return; } /* prepare MMX state (XXX: optimize by storing fptt and fptags in the static cpu state) */ if (!is_xmm) { tcg_gen_helper_0_0(helper_enter_mmx); } modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7); if (is_xmm) reg |= rex_r; mod = (modrm >> 6) & 3; if (sse_op2 == SSE_SPECIAL) { b |= (b1 << 8); switch(b) { case 0x0e7: /* movntq */ if (mod == 3) goto illegal_op; gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); break; case 0x1e7: /* movntdq */ case 0x02b: /* movntps */ case 0x12b: /* movntps */ case 0x3f0: /* lddqu */ if (mod == 3) goto illegal_op; gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); break; case 0x6e: /* movd mm, ea */ #ifdef TARGET_X86_64 if (s->dflag == 2) { gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0); tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); } else #endif { gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); tcg_gen_helper_0_2(helper_movl_mm_T0_mmx, cpu_ptr0, cpu_T[0]); } break; case 0x16e: /* movd xmm, ea */ #ifdef TARGET_X86_64 if (s->dflag == 2) { gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); tcg_gen_helper_0_2(helper_movq_mm_T0_xmm, cpu_ptr0, cpu_T[0]); } else #endif { gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[reg])); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_2(helper_movl_mm_T0_xmm, cpu_ptr0, cpu_tmp2_i32); } break; case 0x6f: /* movq mm, ea */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); } else { rm = (modrm & 7); tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); } break; case 0x010: /* movups */ case 0x110: /* movupd */ case 0x028: /* movaps */ case 0x128: /* movapd */ case 0x16f: /* movdqa xmm, ea */ case 0x26f: /* movdqu xmm, ea */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]), offsetof(CPUX86State,xmm_regs[rm])); } break; case 0x210: /* movss xmm, ea */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_op_movl_T0_0(); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); } break; case 0x310: /* movsd xmm, ea */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); gen_op_movl_T0_0(); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); } break; case 0x012: /* movlps */ case 0x112: /* movlpd */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { /* movhlps */ rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); } break; case 0x212: /* movsldup */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(0))); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(2))); } gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(2))); break; case 0x312: /* movddup */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); } gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); break; case 0x016: /* movhps */ case 0x116: /* movhpd */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); } else { /* movlhps */ rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); } break; case 0x216: /* movshdup */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(1))); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)), offsetof(CPUX86State,xmm_regs[rm].XMM_L(3))); } gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(1))); gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(3))); break; case 0x7e: /* movd ea, mm */ #ifdef TARGET_X86_64 if (s->dflag == 2) { tcg_gen_ld_i64(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx)); gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1); } else #endif { tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0))); gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); } break; case 0x17e: /* movd ea, xmm */ #ifdef TARGET_X86_64 if (s->dflag == 2) { tcg_gen_ld_i64(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1); } else #endif { tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1); } break; case 0x27e: /* movq xmm, ea */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); } gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); break; case 0x7f: /* movq ea, mm */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx)); } else { rm = (modrm & 7); gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx), offsetof(CPUX86State,fpregs[reg].mmx)); } break; case 0x011: /* movups */ case 0x111: /* movupd */ case 0x029: /* movaps */ case 0x129: /* movapd */ case 0x17f: /* movdqa ea, xmm */ case 0x27f: /* movdqu ea, xmm */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg])); } else { rm = (modrm & 7) | REX_B(s); gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]), offsetof(CPUX86State,xmm_regs[reg])); } break; case 0x211: /* movss ea, xmm */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); gen_op_st_T0_A0(OT_LONG + s->mem_index); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)), offsetof(CPUX86State,xmm_regs[reg].XMM_L(0))); } break; case 0x311: /* movsd ea, xmm */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } break; case 0x013: /* movlps */ case 0x113: /* movlpd */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { goto illegal_op; } break; case 0x017: /* movhps */ case 0x117: /* movhpd */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); } else { goto illegal_op; } break; case 0x71: /* shift mm, im */ case 0x72: case 0x73: case 0x171: /* shift xmm, im */ case 0x172: case 0x173: val = ldub_code(s->pc++); if (is_xmm) { gen_op_movl_T0_im(val); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); gen_op_movl_T0_0(); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1))); op1_offset = offsetof(CPUX86State,xmm_t0); } else { gen_op_movl_T0_im(val); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0))); gen_op_movl_T0_0(); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1))); op1_offset = offsetof(CPUX86State,mmx_t0); } sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1]; if (!sse_op2) goto illegal_op; if (is_xmm) { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset); tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); break; case 0x050: /* movmskps */ rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); tcg_gen_helper_1_1(helper_movmskps, cpu_tmp2_i32, cpu_ptr0); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_mov_reg_T0(OT_LONG, reg); break; case 0x150: /* movmskpd */ rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); tcg_gen_helper_1_1(helper_movmskpd, cpu_tmp2_i32, cpu_ptr0); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); gen_op_mov_reg_T0(OT_LONG, reg); break; case 0x02a: /* cvtpi2ps */ case 0x12a: /* cvtpi2pd */ tcg_gen_helper_0_0(helper_enter_mmx); if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); op2_offset = offsetof(CPUX86State,mmx_t0); gen_ldq_env_A0(s->mem_index, op2_offset); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } op1_offset = offsetof(CPUX86State,xmm_regs[reg]); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); switch(b >> 8) { case 0x0: tcg_gen_helper_0_2(helper_cvtpi2ps, cpu_ptr0, cpu_ptr1); break; default: case 0x1: tcg_gen_helper_0_2(helper_cvtpi2pd, cpu_ptr0, cpu_ptr1); break; } break; case 0x22a: /* cvtsi2ss */ case 0x32a: /* cvtsi2sd */ ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); op1_offset = offsetof(CPUX86State,xmm_regs[reg]); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)]; if (ot == OT_LONG) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_tmp2_i32); } else { tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_T[0]); } break; case 0x02c: /* cvttps2pi */ case 0x12c: /* cvttpd2pi */ case 0x02d: /* cvtps2pi */ case 0x12d: /* cvtpd2pi */ tcg_gen_helper_0_0(helper_enter_mmx); if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); op2_offset = offsetof(CPUX86State,xmm_t0); gen_ldo_env_A0(s->mem_index, op2_offset); } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); switch(b) { case 0x02c: tcg_gen_helper_0_2(helper_cvttps2pi, cpu_ptr0, cpu_ptr1); break; case 0x12c: tcg_gen_helper_0_2(helper_cvttpd2pi, cpu_ptr0, cpu_ptr1); break; case 0x02d: tcg_gen_helper_0_2(helper_cvtps2pi, cpu_ptr0, cpu_ptr1); break; case 0x12d: tcg_gen_helper_0_2(helper_cvtpd2pi, cpu_ptr0, cpu_ptr1); break; } break; case 0x22c: /* cvttss2si */ case 0x32c: /* cvttsd2si */ case 0x22d: /* cvtss2si */ case 0x32d: /* cvtsd2si */ ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); if ((b >> 8) & 1) { gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0))); } else { gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); } op2_offset = offsetof(CPUX86State,xmm_t0); } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 + (b & 1) * 4]; tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset); if (ot == OT_LONG) { tcg_gen_helper_1_1(sse_op2, cpu_tmp2_i32, cpu_ptr0); tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); } else { tcg_gen_helper_1_1(sse_op2, cpu_T[0], cpu_ptr0); } gen_op_mov_reg_T0(ot, reg); break; case 0xc4: /* pinsrw */ case 0x1c4: s->rip_offset = 1; gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); val = ldub_code(s->pc++); if (b1) { val &= 7; tcg_gen_st16_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_W(val))); } else { val &= 3; tcg_gen_st16_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val))); } break; case 0xc5: /* pextrw */ case 0x1c5: if (mod != 3) goto illegal_op; ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; val = ldub_code(s->pc++); if (b1) { val &= 7; rm = (modrm & 7) | REX_B(s); tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[rm].XMM_W(val))); } else { val &= 3; rm = (modrm & 7); tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val))); } reg = ((modrm >> 3) & 7) | rex_r; gen_op_mov_reg_T0(ot, reg); break; case 0x1d6: /* movq ea, xmm */ if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); } else { rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)), offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0))); gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1))); } break; case 0x2d6: /* movq2dq */ tcg_gen_helper_0_0(helper_enter_mmx); rm = (modrm & 7); gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)), offsetof(CPUX86State,fpregs[rm].mmx)); gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1))); break; case 0x3d6: /* movdq2q */ tcg_gen_helper_0_0(helper_enter_mmx); rm = (modrm & 7) | REX_B(s); gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx), offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0))); break; case 0xd7: /* pmovmskb */ case 0x1d7: if (mod != 3) goto illegal_op; if (b1) { rm = (modrm & 7) | REX_B(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm])); tcg_gen_helper_1_1(helper_pmovmskb_xmm, cpu_tmp2_i32, cpu_ptr0); } else { rm = (modrm & 7); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx)); tcg_gen_helper_1_1(helper_pmovmskb_mmx, cpu_tmp2_i32, cpu_ptr0); } tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32); reg = ((modrm >> 3) & 7) | rex_r; gen_op_mov_reg_T0(OT_LONG, reg); break; case 0x038: case 0x138: b = modrm; modrm = ldub_code(s->pc++); rm = modrm & 7; reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; if (s->prefix & PREFIX_REPNZ) goto crc32; sse_op2 = sse_op_table6[b].op[b1]; if (!sse_op2) goto illegal_op; if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask)) goto illegal_op; if (b1) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod == 3) { op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); } else { op2_offset = offsetof(CPUX86State,xmm_t0); gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); switch (b) { case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */ case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */ case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */ gen_ldq_env_A0(s->mem_index, op2_offset + offsetof(XMMReg, XMM_Q(0))); break; case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */ case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */ tcg_gen_qemu_ld32u(cpu_tmp2_i32, cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset + offsetof(XMMReg, XMM_L(0))); break; case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */ tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset + offsetof(XMMReg, XMM_W(0))); break; case 0x2a: /* movntqda */ gen_ldo_env_A0(s->mem_index, op1_offset); return; default: gen_ldo_env_A0(s->mem_index, op2_offset); } } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod == 3) { op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } else { op2_offset = offsetof(CPUX86State,mmx_t0); gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldq_env_A0(s->mem_index, op2_offset); } } if (sse_op2 == SSE_SPECIAL) goto illegal_op; tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); if (b == 0x17) s->cc_op = CC_OP_EFLAGS; break; case 0x338: /* crc32 */ crc32: b = modrm; modrm = ldub_code(s->pc++); reg = ((modrm >> 3) & 7) | rex_r; if (b != 0xf0 && b != 0xf1) goto illegal_op; if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) goto illegal_op; if (b == 0xf0) ot = OT_BYTE; else if (b == 0xf1 && s->dflag != 2) if (s->prefix & PREFIX_DATA) ot = OT_WORD; else ot = OT_LONG; else ot = OT_QUAD; gen_op_mov_TN_reg(OT_LONG, 0, reg); tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]); gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); tcg_gen_helper_1_3(helper_crc32, cpu_T[0], cpu_tmp2_i32, cpu_T[0], tcg_const_i32(8 << ot)); ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; gen_op_mov_reg_T0(ot, reg); break; case 0x03a: case 0x13a: b = modrm; modrm = ldub_code(s->pc++); rm = modrm & 7; reg = ((modrm >> 3) & 7) | rex_r; mod = (modrm >> 6) & 3; sse_op2 = sse_op_table7[b].op[b1]; if (!sse_op2) goto illegal_op; if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask)) goto illegal_op; if (sse_op2 == SSE_SPECIAL) { ot = (s->dflag == 2) ? OT_QUAD : OT_LONG; rm = (modrm & 7) | REX_B(s); if (mod != 3) gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); reg = ((modrm >> 3) & 7) | rex_r; val = ldub_code(s->pc++); switch (b) { case 0x14: /* pextrb */ tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_B(val & 15))); if (mod == 3) gen_op_mov_reg_T0(ot, rm); else tcg_gen_qemu_st8(cpu_T[0], cpu_A0, (s->mem_index >> 2) - 1); break; case 0x15: /* pextrw */ tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_W(val & 7))); if (mod == 3) gen_op_mov_reg_T0(ot, rm); else tcg_gen_qemu_st16(cpu_T[0], cpu_A0, (s->mem_index >> 2) - 1); break; case 0x16: if (ot == OT_LONG) { /* pextrd */ tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(val & 3))); if (mod == 3) gen_op_mov_reg_v(ot, rm, cpu_tmp2_i32); else tcg_gen_qemu_st32(cpu_tmp2_i32, cpu_A0, (s->mem_index >> 2) - 1); } else { /* pextrq */ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(val & 1))); if (mod == 3) gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64); else tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); } break; case 0x17: /* extractps */ tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(val & 3))); if (mod == 3) gen_op_mov_reg_T0(ot, rm); else tcg_gen_qemu_st32(cpu_T[0], cpu_A0, (s->mem_index >> 2) - 1); break; case 0x20: /* pinsrb */ if (mod == 3) gen_op_mov_TN_reg(OT_LONG, 0, rm); else tcg_gen_qemu_ld8u(cpu_T[0], cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_B(val & 15))); break; case 0x21: /* insertps */ if (mod == 3) tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State,xmm_regs[rm] .XMM_L((val >> 6) & 3))); else tcg_gen_qemu_ld32u(cpu_tmp2_i32, cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State,xmm_regs[reg] .XMM_L((val >> 4) & 3))); if ((val >> 0) & 1) tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(0))); if ((val >> 1) & 1) tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(1))); if ((val >> 2) & 1) tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(2))); if ((val >> 3) & 1) tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/), cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(3))); break; case 0x22: if (ot == OT_LONG) { /* pinsrd */ if (mod == 3) gen_op_mov_v_reg(ot, cpu_tmp2_i32, rm); else tcg_gen_qemu_ld32u(cpu_tmp2_i32, cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_L(val & 3))); } else { /* pinsrq */ if (mod == 3) gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm); else tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, (s->mem_index >> 2) - 1); tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offsetof(CPUX86State, xmm_regs[reg].XMM_Q(val & 1))); } break; } return; } if (b1) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod == 3) { op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]); } else { op2_offset = offsetof(CPUX86State,xmm_t0); gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldo_env_A0(s->mem_index, op2_offset); } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod == 3) { op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } else { op2_offset = offsetof(CPUX86State,mmx_t0); gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); gen_ldq_env_A0(s->mem_index, op2_offset); } } val = ldub_code(s->pc++); if ((b & 0xfc) == 0x60) { /* pcmpXstrX */ s->cc_op = CC_OP_EFLAGS; if (s->dflag == 2) /* The helper must use entire 64-bit gp registers */ val |= 1 << 8; } tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); break; default: goto illegal_op; } } else { /* generic MMX or SSE operation */ switch(b) { case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ case 0xc2: /* compare insns */ s->rip_offset = 1; break; default: break; } if (is_xmm) { op1_offset = offsetof(CPUX86State,xmm_regs[reg]); if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); op2_offset = offsetof(CPUX86State,xmm_t0); if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) || b == 0xc2)) { /* specific case for SSE single instructions */ if (b1 == 2) { /* 32 bit access */ gen_op_ld_T0_A0(OT_LONG + s->mem_index); tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0))); } else { /* 64 bit access */ gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0))); } } else { gen_ldo_env_A0(s->mem_index, op2_offset); } } else { rm = (modrm & 7) | REX_B(s); op2_offset = offsetof(CPUX86State,xmm_regs[rm]); } } else { op1_offset = offsetof(CPUX86State,fpregs[reg].mmx); if (mod != 3) { gen_lea_modrm(s, modrm, &reg_addr, &offset_addr); op2_offset = offsetof(CPUX86State,mmx_t0); gen_ldq_env_A0(s->mem_index, op2_offset); } else { rm = (modrm & 7); op2_offset = offsetof(CPUX86State,fpregs[rm].mmx); } } switch(b) { case 0x0f: /* 3DNow! data insns */ if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) goto illegal_op; val = ldub_code(s->pc++); sse_op2 = sse_op_table5[val]; if (!sse_op2) goto illegal_op; tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); break; case 0x70: /* pshufx insn */ case 0xc6: /* pshufx insn */ val = ldub_code(s->pc++); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, tcg_const_i32(val)); break; case 0xc2: /* compare insns */ val = ldub_code(s->pc++); if (val >= 8) goto illegal_op; sse_op2 = sse_op_table4[val][b1]; tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); break; case 0xf7: /* maskmov : we must prepare A0 */ if (mod != 3) goto illegal_op; #ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_movq_A0_reg(R_EDI); } else #endif { gen_op_movl_A0_reg(R_EDI); if (s->aflag == 0) gen_op_andl_A0_ffff(); } gen_add_A0_ds_seg(s); tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); tcg_gen_helper_0_3(sse_op2, cpu_ptr0, cpu_ptr1, cpu_A0); break; default: tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset); tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset); tcg_gen_helper_0_2(sse_op2, cpu_ptr0, cpu_ptr1); break; } if (b == 0x2e || b == 0x2f) { s->cc_op = CC_OP_EFLAGS; } } }
12,213
FFmpeg
c23acbaed40101c677dfcfbbfe0d2c230a8e8f44
1
static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block) { uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; dest[0] = cm[dest[0] + ((block[0] + 4)>>3)]; }
12,214
FFmpeg
2254b559cbcfc0418135f09add37c0a5866b1981
1
static void hScale_altivec_real(SwsContext *c, int16_t *dst, int dstW, const uint8_t *src, const int16_t *filter, const int16_t *filterPos, int filterSize) { register int i; DECLARE_ALIGNED(16, int, tempo)[4]; if (filterSize % 4) { for (i = 0; i < dstW; i++) { register int j; register int srcPos = filterPos[i]; register int val = 0; for (j = 0; j < filterSize; j++) val += ((int)src[srcPos + j]) * filter[filterSize * i + j]; dst[i] = FFMIN(val >> 7, (1 << 15) - 1); } } else switch (filterSize) { case 4: for (i = 0; i < dstW; i++) { register int srcPos = filterPos[i]; vector unsigned char src_v0 = vec_ld(srcPos, src); vector unsigned char src_v1, src_vF; vector signed short src_v, filter_v; vector signed int val_vEven, val_s; if ((((uintptr_t)src + srcPos) % 16) > 12) { src_v1 = vec_ld(srcPos + 16, src); } src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); src_v = // vec_unpackh sign-extends... (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); // now put our elements in the even slots src_v = vec_mergeh(src_v, (vector signed short)vzero); filter_v = vec_ld(i << 3, filter); // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2). // The neat trick: We only care for half the elements, // high or low depending on (i<<3)%16 (it's 0 or 8 here), // and we're going to use vec_mule, so we choose // carefully how to "unpack" the elements into the even slots. if ((i << 3) % 16) filter_v = vec_mergel(filter_v, (vector signed short)vzero); else filter_v = vec_mergeh(filter_v, (vector signed short)vzero); val_vEven = vec_mule(src_v, filter_v); val_s = vec_sums(val_vEven, vzero); vec_st(val_s, 0, tempo); dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1); } break; case 8: for (i = 0; i < dstW; i++) { register int srcPos = filterPos[i]; vector unsigned char src_v0 = vec_ld(srcPos, src); vector unsigned char src_v1, src_vF; vector signed short src_v, filter_v; vector signed int val_v, val_s; if ((((uintptr_t)src + srcPos) % 16) > 8) { src_v1 = vec_ld(srcPos + 16, src); } src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); src_v = // vec_unpackh sign-extends... (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); filter_v = vec_ld(i << 4, filter); // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2) val_v = vec_msums(src_v, filter_v, (vector signed int)vzero); val_s = vec_sums(val_v, vzero); vec_st(val_s, 0, tempo); dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1); } break; case 16: for (i = 0; i < dstW; i++) { register int srcPos = filterPos[i]; vector unsigned char src_v0 = vec_ld(srcPos, src); vector unsigned char src_v1 = vec_ld(srcPos + 16, src); vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src)); vector signed short src_vA = // vec_unpackh sign-extends... (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); vector signed short src_vB = // vec_unpackh sign-extends... (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF)); vector signed short filter_v0 = vec_ld(i << 5, filter); vector signed short filter_v1 = vec_ld((i << 5) + 16, filter); // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2) vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero); vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc); vector signed int val_s = vec_sums(val_v, vzero); vec_st(val_s, 0, tempo); dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1); } break; default: for (i = 0; i < dstW; i++) { register int j; register int srcPos = filterPos[i]; vector signed int val_s, val_v = (vector signed int)vzero; vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter); vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter); vector unsigned char src_v0 = vec_ld(srcPos, src); vector unsigned char permS = vec_lvsl(srcPos, src); for (j = 0; j < filterSize - 15; j += 16) { vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src); vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS); vector signed short src_vA = // vec_unpackh sign-extends... (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); vector signed short src_vB = // vec_unpackh sign-extends... (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF)); vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter); vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF); vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF); vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v); val_v = vec_msums(src_vB, filter_v1, val_acc); filter_v0R = filter_v2R; src_v0 = src_v1; } if (j < filterSize - 7) { // loading src_v0 is useless, it's already done above // vector unsigned char src_v0 = vec_ld(srcPos + j, src); vector unsigned char src_v1, src_vF; vector signed short src_v, filter_v1R, filter_v; if ((((uintptr_t)src + srcPos) % 16) > 8) { src_v1 = vec_ld(srcPos + j + 16, src); } src_vF = vec_perm(src_v0, src_v1, permS); src_v = // vec_unpackh sign-extends... (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF)); // loading filter_v0R is useless, it's already done above // vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter); filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter); filter_v = vec_perm(filter_v0R, filter_v1R, permF); val_v = vec_msums(src_v, filter_v, val_v); } val_s = vec_sums(val_v, vzero); vec_st(val_s, 0, tempo); dst[i] = FFMIN(tempo[3] >> 7, (1 << 15) - 1); } } }
12,217
FFmpeg
29b0d94b43ac960cb442049a5d737a3386ff0337
1
static int dfa_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { DfaContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; const uint8_t *buf_end = avpkt->data + avpkt->size; const uint8_t *tmp_buf; uint32_t chunk_type, chunk_size; uint8_t *dst; int ret; int i, pal_elems; if (s->pic.data[0]) avctx->release_buffer(avctx, &s->pic); if ((ret = avctx->get_buffer(avctx, &s->pic))) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } while (buf < buf_end) { chunk_size = AV_RL32(buf + 4); chunk_type = AV_RL32(buf + 8); buf += 12; if (buf_end - buf < chunk_size) { av_log(avctx, AV_LOG_ERROR, "Chunk size is too big (%d bytes)\n", chunk_size); return -1; } if (!chunk_type) break; if (chunk_type == 1) { pal_elems = FFMIN(chunk_size / 3, 256); tmp_buf = buf; for (i = 0; i < pal_elems; i++) { s->pal[i] = bytestream_get_be24(&tmp_buf) << 2; s->pal[i] |= (s->pal[i] >> 6) & 0x333; } s->pic.palette_has_changed = 1; } else if (chunk_type <= 9) { if (decoder[chunk_type - 2](s->frame_buf, avctx->width, avctx->height, buf, buf + chunk_size)) { av_log(avctx, AV_LOG_ERROR, "Error decoding %s chunk\n", chunk_name[chunk_type - 2]); return -1; } } else { av_log(avctx, AV_LOG_WARNING, "Ignoring unknown chunk type %d\n", chunk_type); } buf += chunk_size; } buf = s->frame_buf; dst = s->pic.data[0]; for (i = 0; i < avctx->height; i++) { memcpy(dst, buf, avctx->width); dst += s->pic.linesize[0]; buf += avctx->width; } memcpy(s->pic.data[1], s->pal, sizeof(s->pal)); *data_size = sizeof(AVFrame); *(AVFrame*)data = s->pic; return avpkt->size; }
12,218