label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
1 | static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt) { LavfiContext *lavfi = avctx->priv_data; double min_pts = DBL_MAX; int stream_idx, min_pts_sink_idx = 0; AVFilterBufferRef *ref; AVPicture pict; int ret, i; int size = 0; /* iterate through all the graph sinks. Select the sink with the * minimum PTS */ for (i = 0; i < avctx->nb_streams; i++) { AVRational tb = lavfi->sinks[i]->inputs[0]->time_base; double d; int ret; if (lavfi->sink_eof[i]) continue; ret = av_buffersink_get_buffer_ref(lavfi->sinks[i], &ref, AV_BUFFERSINK_FLAG_PEEK); if (ret == AVERROR_EOF) { av_dlog(avctx, "EOF sink_idx:%d\n", i); lavfi->sink_eof[i] = 1; continue; } else if (ret < 0) return ret; d = av_rescale_q(ref->pts, tb, AV_TIME_BASE_Q); av_dlog(avctx, "sink_idx:%d time:%f\n", i, d); if (d < min_pts) { min_pts = d; min_pts_sink_idx = i; } } if (min_pts == DBL_MAX) return AVERROR_EOF; av_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx); av_buffersink_get_buffer_ref(lavfi->sinks[min_pts_sink_idx], &ref, 0); stream_idx = lavfi->sink_stream_map[min_pts_sink_idx]; if (ref->video) { size = avpicture_get_size(ref->format, ref->video->w, ref->video->h); if ((ret = av_new_packet(pkt, size)) < 0) return ret; memcpy(pict.data, ref->data, 4*sizeof(ref->data[0])); memcpy(pict.linesize, ref->linesize, 4*sizeof(ref->linesize[0])); avpicture_layout(&pict, ref->format, ref->video->w, ref->video->h, pkt->data, size); } else if (ref->audio) { size = ref->audio->nb_samples * av_get_bytes_per_sample(ref->format) * av_get_channel_layout_nb_channels(ref->audio->channel_layout); if ((ret = av_new_packet(pkt, size)) < 0) return ret; memcpy(pkt->data, ref->data[0], size); } if (ref->metadata) { uint8_t *metadata; AVDictionaryEntry *e = NULL; AVBPrint meta_buf; av_bprint_init(&meta_buf, 0, AV_BPRINT_SIZE_UNLIMITED); while ((e = av_dict_get(ref->metadata, "", e, AV_DICT_IGNORE_SUFFIX))) { av_bprintf(&meta_buf, "%s", e->key); av_bprint_chars(&meta_buf, '\0', 1); av_bprintf(&meta_buf, "%s", e->value); av_bprint_chars(&meta_buf, '\0', 1); } if (!av_bprint_is_complete(&meta_buf) || !(metadata = av_packet_new_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA, meta_buf.len))) { av_bprint_finalize(&meta_buf, NULL); return AVERROR(ENOMEM); } memcpy(metadata, meta_buf.str, meta_buf.len); av_bprint_finalize(&meta_buf, NULL); } pkt->stream_index = stream_idx; pkt->pts = ref->pts; pkt->pos = ref->pos; pkt->size = size; avfilter_unref_buffer(ref); return size; } | 24,240 |
1 | static inline void qtrle_decode_2n4bpp(QtrleContext *s, int stream_ptr, int row_ptr, int lines_to_change, int bpp) { int rle_code, i; int pixel_ptr; int row_inc = s->frame.linesize[0]; unsigned char pi[16]; /* 16 palette indices */ unsigned char *rgb = s->frame.data[0]; int pixel_limit = s->frame.linesize[0] * s->avctx->height; int num_pixels = (bpp == 4) ? 8 : 16; while (lines_to_change--) { CHECK_STREAM_PTR(2); pixel_ptr = row_ptr + (num_pixels * (s->buf[stream_ptr++] - 1)); while ((rle_code = (signed char)s->buf[stream_ptr++]) != -1) { if (rle_code == 0) { /* there's another skip code in the stream */ CHECK_STREAM_PTR(1); pixel_ptr += (num_pixels * (s->buf[stream_ptr++] - 1)); } else if (rle_code < 0) { /* decode the run length code */ rle_code = -rle_code; /* get the next 4 bytes from the stream, treat them as palette * indexes, and output them rle_code times */ CHECK_STREAM_PTR(4); for (i = num_pixels-1; i >= 0; i--) { pi[num_pixels-1-i] = (s->buf[stream_ptr] >> ((i*bpp) & 0x07)) & ((1<<bpp)-1); stream_ptr+= ((i & ((num_pixels>>2)-1)) == 0); } CHECK_PIXEL_PTR(rle_code * num_pixels); while (rle_code--) { for (i = 0; i < num_pixels; i++) rgb[pixel_ptr++] = pi[i]; } } else { /* copy the same pixel directly to output 4 times */ rle_code *= 4; CHECK_STREAM_PTR(rle_code); CHECK_PIXEL_PTR(rle_code*(num_pixels>>2)); while (rle_code--) { if(bpp == 4) { rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 4) & 0x0f; rgb[pixel_ptr++] = (s->buf[stream_ptr++]) & 0x0f; } else { rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 6) & 0x03; rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 4) & 0x03; rgb[pixel_ptr++] = ((s->buf[stream_ptr]) >> 2) & 0x03; rgb[pixel_ptr++] = (s->buf[stream_ptr++]) & 0x03; } } } } row_ptr += row_inc; } } | 24,241 |
1 | static void display_picref(AVFilterBufferRef *picref, AVRational time_base) { int x, y; uint8_t *p0, *p; int64_t delay; if (picref->pts != AV_NOPTS_VALUE) { if (last_pts != AV_NOPTS_VALUE) { /* sleep roughly the right amount of time; * usleep is in microseconds, just like AV_TIME_BASE. */ delay = av_rescale_q(picref->pts - last_pts, time_base, AV_TIME_BASE_Q); if (delay > 0 && delay < 1000000) usleep(delay); } last_pts = picref->pts; } /* Trivial ASCII grayscale display. */ p0 = picref->data[0]; puts("\033c"); for (y = 0; y < picref->video->h; y++) { p = p0; for (x = 0; x < picref->video->w; x++) putchar(" .-+#"[*(p++) / 52]); putchar('\n'); p0 += picref->linesize[0]; } fflush(stdout); } | 24,242 |
1 | static int colo_packet_compare_udp(Packet *spkt, Packet *ppkt) { int ret; int network_header_length = ppkt->ip->ip_hl * 4; trace_colo_compare_main("compare udp"); /* * Because of ppkt and spkt are both in the same connection, * The ppkt's src ip, dst ip, src port, dst port, ip_proto all are * same with spkt. In addition, IP header's Identification is a random * field, we can handle it in IP fragmentation function later. * COLO just concern the response net packet payload from primary guest * and secondary guest are same or not, So we ignored all IP header include * other field like TOS,TTL,IP Checksum. we only need to compare * the ip payload here. */ ret = colo_packet_compare_common(ppkt, spkt, network_header_length + ETH_HLEN); if (ret) { trace_colo_compare_udp_miscompare("primary pkt size", ppkt->size); trace_colo_compare_udp_miscompare("Secondary pkt size", spkt->size); if (trace_event_get_state(TRACE_COLO_COMPARE_MISCOMPARE)) { qemu_hexdump((char *)ppkt->data, stderr, "colo-compare pri pkt", ppkt->size); qemu_hexdump((char *)spkt->data, stderr, "colo-compare sec pkt", spkt->size); } } return ret; } | 24,243 |
1 | void vga_mem_writeb(VGACommonState *s, hwaddr addr, uint32_t val) { int memory_map_mode, plane, write_mode, b, func_select, mask; uint32_t write_mask, bit_mask, set_mask; #ifdef DEBUG_VGA_MEM printf("vga: [0x" TARGET_FMT_plx "] = 0x%02x\n", addr, val); #endif /* convert to VGA memory offset */ memory_map_mode = (s->gr[VGA_GFX_MISC] >> 2) & 3; addr &= 0x1ffff; switch(memory_map_mode) { case 0: break; case 1: if (addr >= 0x10000) return; addr += s->bank_offset; break; case 2: addr -= 0x10000; if (addr >= 0x8000) return; break; default: case 3: addr -= 0x18000; if (addr >= 0x8000) return; break; } if (s->sr[VGA_SEQ_MEMORY_MODE] & VGA_SR04_CHN_4M) { /* chain 4 mode : simplest access */ plane = addr & 3; mask = (1 << plane); if (s->sr[VGA_SEQ_PLANE_WRITE] & mask) { assert(addr < s->vram_size); s->vram_ptr[addr] = val; #ifdef DEBUG_VGA_MEM printf("vga: chain4: [0x" TARGET_FMT_plx "]\n", addr); #endif s->plane_updated |= mask; /* only used to detect font change */ memory_region_set_dirty(&s->vram, addr, 1); } } else if (s->gr[VGA_GFX_MODE] & 0x10) { /* odd/even mode (aka text mode mapping) */ plane = (s->gr[VGA_GFX_PLANE_READ] & 2) | (addr & 1); mask = (1 << plane); if (s->sr[VGA_SEQ_PLANE_WRITE] & mask) { addr = ((addr & ~1) << 1) | plane; if (addr >= s->vram_size) { return; } s->vram_ptr[addr] = val; #ifdef DEBUG_VGA_MEM printf("vga: odd/even: [0x" TARGET_FMT_plx "]\n", addr); #endif s->plane_updated |= mask; /* only used to detect font change */ memory_region_set_dirty(&s->vram, addr, 1); } } else { /* standard VGA latched access */ write_mode = s->gr[VGA_GFX_MODE] & 3; switch(write_mode) { default: case 0: /* rotate */ b = s->gr[VGA_GFX_DATA_ROTATE] & 7; val = ((val >> b) | (val << (8 - b))) & 0xff; val |= val << 8; val |= val << 16; /* apply set/reset mask */ set_mask = mask16[s->gr[VGA_GFX_SR_ENABLE]]; val = (val & ~set_mask) | (mask16[s->gr[VGA_GFX_SR_VALUE]] & set_mask); bit_mask = s->gr[VGA_GFX_BIT_MASK]; break; case 1: val = s->latch; goto do_write; case 2: val = mask16[val & 0x0f]; bit_mask = s->gr[VGA_GFX_BIT_MASK]; break; case 3: /* rotate */ b = s->gr[VGA_GFX_DATA_ROTATE] & 7; val = (val >> b) | (val << (8 - b)); bit_mask = s->gr[VGA_GFX_BIT_MASK] & val; val = mask16[s->gr[VGA_GFX_SR_VALUE]]; break; } /* apply logical operation */ func_select = s->gr[VGA_GFX_DATA_ROTATE] >> 3; switch(func_select) { case 0: default: /* nothing to do */ break; case 1: /* and */ val &= s->latch; break; case 2: /* or */ val |= s->latch; break; case 3: /* xor */ val ^= s->latch; break; } /* apply bit mask */ bit_mask |= bit_mask << 8; bit_mask |= bit_mask << 16; val = (val & bit_mask) | (s->latch & ~bit_mask); do_write: /* mask data according to sr[2] */ mask = s->sr[VGA_SEQ_PLANE_WRITE]; s->plane_updated |= mask; /* only used to detect font change */ write_mask = mask16[mask]; if (addr * sizeof(uint32_t) >= s->vram_size) { return; } ((uint32_t *)s->vram_ptr)[addr] = (((uint32_t *)s->vram_ptr)[addr] & ~write_mask) | (val & write_mask); #ifdef DEBUG_VGA_MEM printf("vga: latch: [0x" TARGET_FMT_plx "] mask=0x%08x val=0x%08x\n", addr * 4, write_mask, val); #endif memory_region_set_dirty(&s->vram, addr << 2, sizeof(uint32_t)); } } | 24,245 |
1 | static int read_channel_params(MLPDecodeContext *m, unsigned int substr, GetBitContext *gbp, unsigned int ch) { SubStream *s = &m->substream[substr]; ChannelParams *cp = &s->channel_params[ch]; FilterParams *fir = &cp->filter_params[FIR]; FilterParams *iir = &cp->filter_params[IIR]; int ret; if (s->param_presence_flags & PARAM_FIR) if (get_bits1(gbp)) if ((ret = read_filter_params(m, gbp, substr, ch, FIR)) < 0) return ret; if (s->param_presence_flags & PARAM_IIR) if (get_bits1(gbp)) if ((ret = read_filter_params(m, gbp, substr, ch, IIR)) < 0) return ret; if (fir->order + iir->order > 8) { av_log(m->avctx, AV_LOG_ERROR, "Total filter orders too high.\n"); return AVERROR_INVALIDDATA; } if (fir->order && iir->order && fir->shift != iir->shift) { av_log(m->avctx, AV_LOG_ERROR, "FIR and IIR filters must use the same precision.\n"); return AVERROR_INVALIDDATA; } /* The FIR and IIR filters must have the same precision. * To simplify the filtering code, only the precision of the * FIR filter is considered. If only the IIR filter is employed, * the FIR filter precision is set to that of the IIR filter, so * that the filtering code can use it. */ if (!fir->order && iir->order) fir->shift = iir->shift; if (s->param_presence_flags & PARAM_HUFFOFFSET) if (get_bits1(gbp)) cp->huff_offset = get_sbits(gbp, 15); cp->codebook = get_bits(gbp, 2); cp->huff_lsbs = get_bits(gbp, 5); if (cp->huff_lsbs > 24) { av_log(m->avctx, AV_LOG_ERROR, "Invalid huff_lsbs.\n"); cp->huff_lsbs = 0; return AVERROR_INVALIDDATA; } cp->sign_huff_offset = calculate_sign_huff(m, substr, ch); return 0; } | 24,246 |
1 | static int wsvqa_read_packet(AVFormatContext *s, AVPacket *pkt) { WsVqaDemuxContext *wsvqa = s->priv_data; AVIOContext *pb = s->pb; int ret = -1; unsigned char preamble[VQA_PREAMBLE_SIZE]; unsigned int chunk_type; unsigned int chunk_size; int skip_byte; while (avio_read(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) { chunk_type = AV_RB32(&preamble[0]); chunk_size = AV_RB32(&preamble[4]); skip_byte = chunk_size & 0x01; if ((chunk_type == SND0_TAG) || (chunk_type == SND1_TAG) || (chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) { ret= av_get_packet(pb, pkt, chunk_size); if (ret<0) return AVERROR(EIO); switch (chunk_type) { case SND0_TAG: case SND1_TAG: case SND2_TAG: if (wsvqa->audio_stream_index == -1) { AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); wsvqa->audio_stream_index = st->index; if (!wsvqa->sample_rate) wsvqa->sample_rate = 22050; if (!wsvqa->channels) wsvqa->channels = 1; if (!wsvqa->bps) wsvqa->bps = 8; st->codec->sample_rate = wsvqa->sample_rate; st->codec->bits_per_coded_sample = wsvqa->bps; st->codec->channels = wsvqa->channels; st->codec->codec_type = AVMEDIA_TYPE_AUDIO; avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); switch (chunk_type) { case SND0_TAG: if (wsvqa->bps == 16) st->codec->codec_id = AV_CODEC_ID_PCM_S16LE; else st->codec->codec_id = AV_CODEC_ID_PCM_U8; break; case SND1_TAG: st->codec->codec_id = AV_CODEC_ID_WESTWOOD_SND1; break; case SND2_TAG: st->codec->codec_id = AV_CODEC_ID_ADPCM_IMA_WS; st->codec->extradata_size = 2; st->codec->extradata = av_mallocz(2 + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); AV_WL16(st->codec->extradata, wsvqa->version); break; } } pkt->stream_index = wsvqa->audio_stream_index; switch (chunk_type) { case SND1_TAG: /* unpacked size is stored in header */ pkt->duration = AV_RL16(pkt->data) / wsvqa->channels; break; case SND2_TAG: /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ pkt->duration = (chunk_size * 2) / wsvqa->channels; break; } break; case VQFR_TAG: pkt->stream_index = wsvqa->video_stream_index; pkt->duration = 1; break; } /* stay on 16-bit alignment */ if (skip_byte) avio_skip(pb, 1); return ret; } else { switch(chunk_type){ case CMDS_TAG: break; default: av_log(s, AV_LOG_INFO, "Skipping unknown chunk 0x%08X\n", chunk_type); } avio_skip(pb, chunk_size + skip_byte); } } return ret; } | 24,247 |
0 | static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index, const char *filt_name, const char *args, void *log_ctx) { AVFilter *filt; char inst_name[30]; char tmp_args[256]; int ret; snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%d", filt_name, index); filt = avfilter_get_by_name(filt_name); if (!filt) { av_log(log_ctx, AV_LOG_ERROR, "No such filter: '%s'\n", filt_name); return AVERROR(EINVAL); } *filt_ctx = avfilter_graph_alloc_filter(ctx, filt, inst_name); if (!*filt_ctx) { av_log(log_ctx, AV_LOG_ERROR, "Error creating filter '%s'\n", filt_name); return AVERROR(ENOMEM); } if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") && ctx->scale_sws_opts) { snprintf(tmp_args, sizeof(tmp_args), "%s:%s", args, ctx->scale_sws_opts); args = tmp_args; } ret = avfilter_init_str(*filt_ctx, args); if (ret < 0) { av_log(log_ctx, AV_LOG_ERROR, "Error initializing filter '%s'", filt_name); if (args) av_log(log_ctx, AV_LOG_ERROR, " with args '%s'", args); av_log(log_ctx, AV_LOG_ERROR, "\n"); return ret; } return 0; } | 24,248 |
0 | static void flush_packet(AVFormatContext *ctx, int stream_index, int64_t pts, int64_t dts, int64_t scr) { MpegMuxContext *s = ctx->priv_data; StreamInfo *stream = ctx->streams[stream_index]->priv_data; uint8_t *buf_ptr; int size, payload_size, startcode, id, stuffing_size, i, header_len; int packet_size; uint8_t buffer[128]; int zero_trail_bytes = 0; int pad_packet_bytes = 0; id = stream->id; #if 0 printf("packet ID=%2x PTS=%0.3f\n", id, pts / 90000.0); #endif buf_ptr = buffer; if (((s->packet_number % s->pack_header_freq) == 0)) { /* output pack and systems header if needed */ size = put_pack_header(ctx, buf_ptr, scr); buf_ptr += size; if (s->is_vcd) { /* there is exactly one system header for each stream in a VCD MPEG, One in the very first video packet and one in the very first audio packet (see VCD standard p. IV-7 and IV-8).*/ if (stream->packet_number==0) { size = put_system_header(ctx, buf_ptr, id); buf_ptr += size; } } else { if ((s->packet_number % s->system_header_freq) == 0) { size = put_system_header(ctx, buf_ptr, 0); buf_ptr += size; } } } size = buf_ptr - buffer; put_buffer(&ctx->pb, buffer, size); packet_size = s->packet_size - size; if (s->is_vcd && id == AUDIO_ID) /* The VCD standard demands that 20 zero bytes follow each audio pack (see standard p. IV-8).*/ zero_trail_bytes += 20; if (s->is_vcd && stream->packet_number==0) { /* the first pack of each stream contains only the pack header, the system header and lots of padding (see VCD standard p. IV-6). In the case of an audio pack, 20 zero bytes are also added at the end.*/ pad_packet_bytes = packet_size - zero_trail_bytes; } packet_size -= pad_packet_bytes + zero_trail_bytes; if (packet_size > 0) { /* packet header size */ packet_size -= 6; /* packet header */ if (s->is_mpeg2) { header_len = 3; } else { header_len = 0; } if (pts != AV_NOPTS_VALUE) { if (dts != pts) header_len += 5 + 5; else header_len += 5; } else { if (!s->is_mpeg2) header_len++; } payload_size = packet_size - header_len; if (id < 0xc0) { startcode = PRIVATE_STREAM_1; payload_size -= 4; if (id >= 0xa0) payload_size -= 3; } else { startcode = 0x100 + id; } stuffing_size = payload_size - stream->buffer_ptr; if (stuffing_size < 0) stuffing_size = 0; put_be32(&ctx->pb, startcode); put_be16(&ctx->pb, packet_size); if (!s->is_mpeg2) for(i=0;i<stuffing_size;i++) put_byte(&ctx->pb, 0xff); if (s->is_mpeg2) { put_byte(&ctx->pb, 0x80); /* mpeg2 id */ if (pts != AV_NOPTS_VALUE) { if (dts != pts) { put_byte(&ctx->pb, 0xc0); /* flags */ put_byte(&ctx->pb, header_len - 3 + stuffing_size); put_timestamp(&ctx->pb, 0x03, pts); put_timestamp(&ctx->pb, 0x01, dts); } else { put_byte(&ctx->pb, 0x80); /* flags */ put_byte(&ctx->pb, header_len - 3 + stuffing_size); put_timestamp(&ctx->pb, 0x02, pts); } } else { put_byte(&ctx->pb, 0x00); /* flags */ put_byte(&ctx->pb, header_len - 3 + stuffing_size); } } else { if (pts != AV_NOPTS_VALUE) { if (dts != pts) { put_timestamp(&ctx->pb, 0x03, pts); put_timestamp(&ctx->pb, 0x01, dts); } else { put_timestamp(&ctx->pb, 0x02, pts); } } else { put_byte(&ctx->pb, 0x0f); } } if (startcode == PRIVATE_STREAM_1) { put_byte(&ctx->pb, id); if (id >= 0xa0) { /* LPCM (XXX: check nb_frames) */ put_byte(&ctx->pb, 7); put_be16(&ctx->pb, 4); /* skip 3 header bytes */ put_byte(&ctx->pb, stream->lpcm_header[0]); put_byte(&ctx->pb, stream->lpcm_header[1]); put_byte(&ctx->pb, stream->lpcm_header[2]); } else { /* AC3 */ put_byte(&ctx->pb, stream->nb_frames); put_be16(&ctx->pb, stream->frame_start_offset); } } if (s->is_mpeg2) for(i=0;i<stuffing_size;i++) put_byte(&ctx->pb, 0xff); /* output data */ put_buffer(&ctx->pb, stream->buffer, payload_size - stuffing_size); } if (pad_packet_bytes > 0) put_padding_packet(ctx,&ctx->pb, pad_packet_bytes); for(i=0;i<zero_trail_bytes;i++) put_byte(&ctx->pb, 0x00); put_flush_packet(&ctx->pb); s->packet_number++; stream->packet_number++; stream->nb_frames = 0; stream->frame_start_offset = 0; } | 24,249 |
0 | void ff_xvmc_init_block(MpegEncContext *s) { struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2]; assert(render && render->xvmc_id == AV_XVMC_ID); s->block = (int16_t (*)[64])(render->data_blocks + render->next_free_data_block_num * 64); } | 24,250 |
0 | static inline void set_p_mv_tables(MpegEncContext * s, int mx, int my) { const int xy= s->mb_x + 1 + (s->mb_y + 1)*(s->mb_width + 2); s->p_mv_table[xy][0] = mx; s->p_mv_table[xy][1] = my; /* has allready been set to the 4 MV if 4MV is done */ if(!(s->flags&CODEC_FLAG_4MV)){ int mot_xy= s->block_index[0]; s->motion_val[mot_xy ][0]= mx; s->motion_val[mot_xy ][1]= my; s->motion_val[mot_xy+1][0]= mx; s->motion_val[mot_xy+1][1]= my; mot_xy += s->block_wrap[0]; s->motion_val[mot_xy ][0]= mx; s->motion_val[mot_xy ][1]= my; s->motion_val[mot_xy+1][0]= mx; s->motion_val[mot_xy+1][1]= my; } } | 24,252 |
0 | static int hds_flush(AVFormatContext *s, OutputStream *os, int final, int64_t end_ts) { HDSContext *c = s->priv_data; int i, ret = 0; char target_filename[1024]; int index = s->streams[os->first_stream]->id; if (!os->packets_written) return 0; avio_flush(os->ctx->pb); os->packets_written = 0; close_file(os); snprintf(target_filename, sizeof(target_filename), "%s/stream%dSeg1-Frag%d", s->filename, index, os->fragment_index); ret = ff_rename(os->temp_filename, target_filename); if (ret < 0) return ret; add_fragment(os, target_filename, os->frag_start_ts, end_ts - os->frag_start_ts); if (!final) { ret = init_file(s, os, end_ts); if (ret < 0) return ret; } if (c->window_size || (final && c->remove_at_exit)) { int remove = os->nb_fragments - c->window_size - c->extra_window_size; if (final && c->remove_at_exit) remove = os->nb_fragments; if (remove > 0) { for (i = 0; i < remove; i++) { unlink(os->fragments[i]->file); av_free(os->fragments[i]); } os->nb_fragments -= remove; memmove(os->fragments, os->fragments + remove, os->nb_fragments * sizeof(*os->fragments)); } } if (ret >= 0) ret = write_abst(s, os, final); return ret; } | 24,253 |
1 | build_dsdt(GArray *table_data, GArray *linker, AcpiPmInfo *pm, AcpiMiscInfo *misc, PcPciInfo *pci, MachineState *machine) { CrsRangeEntry *entry; Aml *dsdt, *sb_scope, *scope, *dev, *method, *field, *pkg, *crs; GPtrArray *mem_ranges = g_ptr_array_new_with_free_func(crs_range_free); GPtrArray *io_ranges = g_ptr_array_new_with_free_func(crs_range_free); PCMachineState *pcms = PC_MACHINE(machine); uint32_t nr_mem = machine->ram_slots; int root_bus_limit = 0xFF; PCIBus *bus = NULL; int i; dsdt = init_aml_allocator(); /* Reserve space for header */ acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); build_dbg_aml(dsdt); if (misc->is_piix4) { sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(1))); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_piix4_pm(dsdt); build_piix4_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_piix4_pci_hotplug(dsdt); build_piix4_pci0_int(dsdt); } else { sb_scope = aml_scope("_SB"); aml_append(sb_scope, aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(0xae00), 0x0c)); aml_append(sb_scope, aml_operation_region("PCSB", AML_SYSTEM_IO, aml_int(0xae0c), 0x01)); field = aml_field("PCSB", AML_ANY_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS); aml_append(field, aml_named_field("PCIB", 8)); aml_append(sb_scope, field); aml_append(dsdt, sb_scope); sb_scope = aml_scope("_SB"); dev = aml_device("PCI0"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_ADR", aml_int(0))); aml_append(dev, aml_name_decl("_UID", aml_int(1))); aml_append(dev, aml_name_decl("SUPP", aml_int(0))); aml_append(dev, aml_name_decl("CTRL", aml_int(0))); aml_append(dev, build_q35_osc_method()); aml_append(sb_scope, dev); aml_append(dsdt, sb_scope); build_hpet_aml(dsdt); build_q35_isa_bridge(dsdt); build_isa_devices_aml(dsdt); build_q35_pci0_int(dsdt); } build_cpu_hotplug_aml(dsdt); build_memory_hotplug_aml(dsdt, nr_mem, pm->mem_hp_io_base, pm->mem_hp_io_len); scope = aml_scope("_GPE"); { aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006"))); aml_append(scope, aml_method("_L00", 0, AML_NOTSERIALIZED)); if (misc->is_piix4) { method = aml_method("_E01", 0, AML_NOTSERIALIZED); aml_append(method, aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF)); aml_append(method, aml_call0("\\_SB.PCI0.PCNT")); aml_append(method, aml_release(aml_name("\\_SB.PCI0.BLCK"))); aml_append(scope, method); } else { aml_append(scope, aml_method("_L01", 0, AML_NOTSERIALIZED)); } method = aml_method("_E02", 0, AML_NOTSERIALIZED); aml_append(method, aml_call0("\\_SB." CPU_SCAN_METHOD)); aml_append(scope, method); method = aml_method("_E03", 0, AML_NOTSERIALIZED); aml_append(method, aml_call0(MEMORY_HOTPLUG_HANDLER_PATH)); aml_append(scope, method); aml_append(scope, aml_method("_L04", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L05", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L06", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L07", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L08", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L09", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0A", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0B", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0C", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0D", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0E", 0, AML_NOTSERIALIZED)); aml_append(scope, aml_method("_L0F", 0, AML_NOTSERIALIZED)); } aml_append(dsdt, scope); bus = PC_MACHINE(machine)->bus; if (bus) { QLIST_FOREACH(bus, &bus->child, sibling) { uint8_t bus_num = pci_bus_num(bus); uint8_t numa_node = pci_bus_numa_node(bus); /* look only for expander root buses */ if (!pci_bus_is_root(bus)) { continue; } if (bus_num < root_bus_limit) { root_bus_limit = bus_num - 1; } scope = aml_scope("\\_SB"); dev = aml_device("PC%.02X", bus_num); aml_append(dev, aml_name_decl("_UID", aml_int(bus_num))); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03"))); aml_append(dev, aml_name_decl("_BBN", aml_int(bus_num))); if (numa_node != NUMA_NODE_UNASSIGNED) { aml_append(dev, aml_name_decl("_PXM", aml_int(numa_node))); } aml_append(dev, build_prt(false)); crs = build_crs(PCI_HOST_BRIDGE(BUS(bus)->parent), io_ranges, mem_ranges); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } } scope = aml_scope("\\_SB.PCI0"); /* build PCI0._CRS */ crs = aml_resource_template(); aml_append(crs, aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, 0x0, root_bus_limit, 0x0000, root_bus_limit + 1)); aml_append(crs, aml_io(AML_DECODE16, 0x0CF8, 0x0CF8, 0x01, 0x08)); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, 0x0000, 0x0CF7, 0x0000, 0x0CF8)); crs_replace_with_free_ranges(io_ranges, 0x0D00, 0xFFFF); for (i = 0; i < io_ranges->len; i++) { entry = g_ptr_array_index(io_ranges, i); aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, AML_ENTIRE_RANGE, 0x0000, entry->base, entry->limit, 0x0000, entry->limit - entry->base + 1)); } aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, 0x000A0000, 0x000BFFFF, 0, 0x00020000)); crs_replace_with_free_ranges(mem_ranges, pci->w32.begin, pci->w32.end - 1); for (i = 0; i < mem_ranges->len; i++) { entry = g_ptr_array_index(mem_ranges, i); aml_append(crs, aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_NON_CACHEABLE, AML_READ_WRITE, 0, entry->base, entry->limit, 0, entry->limit - entry->base + 1)); } if (pci->w64.begin) { aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, AML_CACHEABLE, AML_READ_WRITE, 0, pci->w64.begin, pci->w64.end - 1, 0, pci->w64.end - pci->w64.begin)); } if (misc->tpm_version != TPM_VERSION_UNSPEC) { aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); } aml_append(scope, aml_name_decl("_CRS", crs)); /* reserve GPE0 block resources */ dev = aml_device("GPE0"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("GPE0 resources"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->gpe0_blk, pm->gpe0_blk, 1, pm->gpe0_blk_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); g_ptr_array_free(io_ranges, true); g_ptr_array_free(mem_ranges, true); /* reserve PCIHP resources */ if (pm->pcihp_io_len) { dev = aml_device("PHPR"); aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); aml_append(dev, aml_name_decl("_UID", aml_string("PCI Hotplug resources"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1, pm->pcihp_io_len) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); } aml_append(dsdt, scope); /* create S3_ / S4_ / S5_ packages if necessary */ scope = aml_scope("\\"); if (!pm->s3_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(1)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(1)); /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S3", pkg)); } if (!pm->s4_disabled) { pkg = aml_package(4); aml_append(pkg, aml_int(pm->s4_val)); /* PM1a_CNT.SLP_TYP */ /* PM1b_CNT.SLP_TYP, FIXME: not impl. */ aml_append(pkg, aml_int(pm->s4_val)); aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S4", pkg)); } pkg = aml_package(4); aml_append(pkg, aml_int(0)); /* PM1a_CNT.SLP_TYP */ aml_append(pkg, aml_int(0)); /* PM1b_CNT.SLP_TYP not impl. */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(pkg, aml_int(0)); /* reserved */ aml_append(scope, aml_name_decl("_S5", pkg)); aml_append(dsdt, scope); /* create fw_cfg node, unconditionally */ { /* when using port i/o, the 8-bit data register *always* overlaps * with half of the 16-bit control register. Hence, the total size * of the i/o region used is FW_CFG_CTL_SIZE; when using DMA, the * DMA control register is located at FW_CFG_DMA_IO_BASE + 4 */ uint8_t io_size = object_property_get_bool(OBJECT(pcms->fw_cfg), "dma_enabled", NULL) ? ROUND_UP(FW_CFG_CTL_SIZE, 4) + sizeof(dma_addr_t) : FW_CFG_CTL_SIZE; scope = aml_scope("\\_SB.PCI0"); dev = aml_device("FWCF"); aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, FW_CFG_IO_BASE, FW_CFG_IO_BASE, 0x01, io_size) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } if (misc->applesmc_io_base) { scope = aml_scope("\\_SB.PCI0.ISA"); dev = aml_device("SMC"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("APP0001"))); /* device present, functioning, decoding, not shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->applesmc_io_base, misc->applesmc_io_base, 0x01, APPLESMC_MAX_DATA_LENGTH) ); aml_append(crs, aml_irq_no_flags(6)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); aml_append(dsdt, scope); } if (misc->pvpanic_port) { scope = aml_scope("\\_SB.PCI0.ISA"); dev = aml_device("PEVT"); aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0001"))); crs = aml_resource_template(); aml_append(crs, aml_io(AML_DECODE16, misc->pvpanic_port, misc->pvpanic_port, 1, 1) ); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(dev, aml_operation_region("PEOR", AML_SYSTEM_IO, aml_int(misc->pvpanic_port), 1)); field = aml_field("PEOR", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); aml_append(field, aml_named_field("PEPT", 8)); aml_append(dev, field); /* device present, functioning, decoding, shown in UI */ aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); method = aml_method("RDPT", 0, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_name("PEPT"), aml_local(0))); aml_append(method, aml_return(aml_local(0))); aml_append(dev, method); method = aml_method("WRPT", 1, AML_NOTSERIALIZED); aml_append(method, aml_store(aml_arg(0), aml_name("PEPT"))); aml_append(dev, method); aml_append(scope, dev); aml_append(dsdt, scope); } sb_scope = aml_scope("\\_SB"); { build_processor_devices(sb_scope, machine, pm); build_memory_devices(sb_scope, nr_mem, pm->mem_hp_io_base, pm->mem_hp_io_len); { Object *pci_host; PCIBus *bus = NULL; pci_host = acpi_get_i386_pci_host(); if (pci_host) { bus = PCI_HOST_BRIDGE(pci_host)->bus; } if (bus) { Aml *scope = aml_scope("PCI0"); /* Scan all PCI buses. Generate tables to support hotplug. */ build_append_pci_bus_devices(scope, bus, pm->pcihp_bridge_en); if (misc->tpm_version != TPM_VERSION_UNSPEC) { dev = aml_device("ISA.TPM"); aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0C31"))); aml_append(dev, aml_name_decl("_STA", aml_int(0xF))); crs = aml_resource_template(); aml_append(crs, aml_memory32_fixed(TPM_TIS_ADDR_BASE, TPM_TIS_ADDR_SIZE, AML_READ_WRITE)); aml_append(crs, aml_irq_no_flags(TPM_TIS_IRQ)); aml_append(dev, aml_name_decl("_CRS", crs)); aml_append(scope, dev); } aml_append(sb_scope, scope); } } aml_append(dsdt, sb_scope); } /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); build_header(linker, table_data, (void *)(table_data->data + table_data->len - dsdt->buf->len), "DSDT", dsdt->buf->len, 1, NULL, NULL); free_aml_allocator(); } | 24,254 |
1 | static void init_input_filter(FilterGraph *fg, AVFilterInOut *in) { InputStream *ist; enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx); int i; // TODO: support other filter types if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) { av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported " "currently.\n"); exit_program(1); } if (in->name) { AVFormatContext *s; AVStream *st = NULL; char *p; int file_idx = strtol(in->name, &p, 0); if (file_idx < 0 || file_idx >= nb_input_files) { av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n", file_idx, fg->graph_desc); exit_program(1); } s = input_files[file_idx]->ctx; for (i = 0; i < s->nb_streams; i++) { if (s->streams[i]->codec->codec_type != type) continue; if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) { st = s->streams[i]; break; } } if (!st) { av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s " "matches no streams.\n", p, fg->graph_desc); exit_program(1); } ist = input_streams[input_files[file_idx]->ist_index + st->index]; } else { /* find the first unused stream of corresponding type */ for (i = 0; i < nb_input_streams; i++) { ist = input_streams[i]; if (ist->st->codec->codec_type == type && ist->discard) break; } if (i == nb_input_streams) { av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for " "unlabeled input pad %d on filter %s", in->pad_idx, in->filter_ctx->name); exit_program(1); } } ist->discard = 0; ist->decoding_needed = 1; ist->st->discard = AVDISCARD_NONE; fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs, fg->nb_inputs + 1); if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0])))) exit_program(1); fg->inputs[fg->nb_inputs - 1]->ist = ist; fg->inputs[fg->nb_inputs - 1]->graph = fg; ist->filters = grow_array(ist->filters, sizeof(*ist->filters), &ist->nb_filters, ist->nb_filters + 1); ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1]; } | 24,255 |
1 | static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width) { #ifdef HAVE_MMX asm volatile( "movq "MANGLE(bm01010101)", %%mm2\n\t" "mov %0, %%"REG_a" \n\t" "1: \n\t" "movq (%1, %%"REG_a",2), %%mm0 \n\t" "movq 8(%1, %%"REG_a",2), %%mm1 \n\t" "pand %%mm2, %%mm0 \n\t" "pand %%mm2, %%mm1 \n\t" "packuswb %%mm1, %%mm0 \n\t" "movq %%mm0, (%2, %%"REG_a") \n\t" "add $8, %%"REG_a" \n\t" " js 1b \n\t" : : "g" ((long)-width), "r" (src+width*2), "r" (dst+width) : "%"REG_a ); #else int i; for(i=0; i<width; i++) dst[i]= src[2*i]; #endif } | 24,256 |
1 | static void print_pte(Monitor *mon, uint32_t addr, uint32_t pte, uint32_t mask) { monitor_printf(mon, "%08x: %08x %c%c%c%c%c%c%c%c\n", addr, pte & mask, pte & PG_GLOBAL_MASK ? 'G' : '-', pte & PG_PSE_MASK ? 'P' : '-', pte & PG_DIRTY_MASK ? 'D' : '-', pte & PG_ACCESSED_MASK ? 'A' : '-', pte & PG_PCD_MASK ? 'C' : '-', pte & PG_PWT_MASK ? 'T' : '-', pte & PG_USER_MASK ? 'U' : '-', pte & PG_RW_MASK ? 'W' : '-'); } | 24,257 |
1 | av_cold void ff_msmpeg4_encode_init(MpegEncContext *s) { static int init_done=0; int i; common_init(s); if(s->msmpeg4_version>=4){ s->min_qcoeff= -255; s->max_qcoeff= 255; } if (!init_done) { /* init various encoding tables */ init_done = 1; init_mv_table(&mv_tables[0]); init_mv_table(&mv_tables[1]); for(i=0;i<NB_RL_TABLES;i++) init_rl(&rl_table[i], static_rl_table_store[i]); for(i=0; i<NB_RL_TABLES; i++){ int level; for(level=0; level<=MAX_LEVEL; level++){ int run; for(run=0; run<=MAX_RUN; run++){ int last; for(last=0; last<2; last++){ rl_length[i][level][run][last]= get_size_of_code(s, &rl_table[ i], last, run, level, 0); } } } } } } | 24,258 |
1 | av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size) { /* cast to avoid compiler warning */ ff_init_range_encoder(c, (uint8_t *)buf, buf_size); c->low = AV_RB16(c->bytestream); c->bytestream += 2; | 24,259 |
1 | static void gen_load_exclusive(DisasContext *s, int rt, int rt2, TCGv addr, int size) { TCGv tmp; switch (size) { case 0: tmp = gen_ld8u(addr, IS_USER(s)); break; case 1: tmp = gen_ld16u(addr, IS_USER(s)); break; case 2: case 3: tmp = gen_ld32(addr, IS_USER(s)); break; default: abort(); } tcg_gen_mov_i32(cpu_exclusive_val, tmp); store_reg(s, rt, tmp); if (size == 3) { TCGv tmp2 = new_tmp(); tcg_gen_addi_i32(tmp2, addr, 4); tmp = gen_ld32(tmp2, IS_USER(s)); dead_tmp(tmp2); tcg_gen_mov_i32(cpu_exclusive_high, tmp); store_reg(s, rt2, tmp); } tcg_gen_mov_i32(cpu_exclusive_addr, addr); } | 24,260 |
1 | static int vcr1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; VCR1Context *const a = avctx->priv_data; AVFrame *const p = data; const uint8_t *bytestream = buf; int i, x, y, ret; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; if (buf_size < 32) goto packet_small; for (i = 0; i < 16; i++) { a->delta[i] = *bytestream++; bytestream++; buf_size--; } for (y = 0; y < avctx->height; y++) { int offset; uint8_t *luma = &p->data[0][y * p->linesize[0]]; if ((y & 3) == 0) { uint8_t *cb = &p->data[1][(y >> 2) * p->linesize[1]]; uint8_t *cr = &p->data[2][(y >> 2) * p->linesize[2]]; if (buf_size < 4 + avctx->width) goto packet_small; for (i = 0; i < 4; i++) a->offset[i] = *bytestream++; buf_size -= 4; offset = a->offset[0] - a->delta[bytestream[2] & 0xF]; for (x = 0; x < avctx->width; x += 4) { luma[0] = offset += a->delta[bytestream[2] & 0xF]; luma[1] = offset += a->delta[bytestream[2] >> 4]; luma[2] = offset += a->delta[bytestream[0] & 0xF]; luma[3] = offset += a->delta[bytestream[0] >> 4]; luma += 4; *cb++ = bytestream[3]; *cr++ = bytestream[1]; bytestream += 4; } } else { if (buf_size < avctx->width / 2) goto packet_small; offset = a->offset[y & 3] - a->delta[bytestream[2] & 0xF]; for (x = 0; x < avctx->width; x += 8) { luma[0] = offset += a->delta[bytestream[2] & 0xF]; luma[1] = offset += a->delta[bytestream[2] >> 4]; luma[2] = offset += a->delta[bytestream[3] & 0xF]; luma[3] = offset += a->delta[bytestream[3] >> 4]; luma[4] = offset += a->delta[bytestream[0] & 0xF]; luma[5] = offset += a->delta[bytestream[0] >> 4]; luma[6] = offset += a->delta[bytestream[1] & 0xF]; luma[7] = offset += a->delta[bytestream[1] >> 4]; luma += 8; bytestream += 4; } } } *got_frame = 1; return buf_size; packet_small: av_log(avctx, AV_LOG_ERROR, "Input packet too small.\n"); return AVERROR_INVALIDDATA; } | 24,261 |
1 | static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { int c; switch (opc) { case INDEX_op_exit_tb: tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]); tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) | INSN_IMM13(8)); tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) | INSN_RS2(TCG_REG_G0)); break; case INDEX_op_goto_tb: if (s->tb_jmp_offset) { /* direct jump method */ tcg_out_sethi(s, TCG_REG_I5, args[0] & 0xffffe000); tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) | INSN_IMM13((args[0] & 0x1fff))); s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; } else { /* indirect jump method */ tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0])); tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) | INSN_RS2(TCG_REG_G0)); } tcg_out_nop(s); s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; break; case INDEX_op_call: if (const_args[0]) tcg_out32(s, CALL | ((((tcg_target_ulong)args[0] - (tcg_target_ulong)s->code_ptr) >> 2) & 0x3fffffff)); else { tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0])); tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) | INSN_RS2(TCG_REG_G0)); } /* delay slot */ tcg_out_nop(s); break; case INDEX_op_jmp: case INDEX_op_br: tcg_out_branch_i32(s, COND_A, args[0]); tcg_out_nop(s); break; case INDEX_op_movi_i32: tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]); break; #if TCG_TARGET_REG_BITS == 64 #define OP_32_64(x) \ glue(glue(case INDEX_op_, x), _i32): \ glue(glue(case INDEX_op_, x), _i64) #else #define OP_32_64(x) \ glue(glue(case INDEX_op_, x), _i32) #endif OP_32_64(ld8u): tcg_out_ldst(s, args[0], args[1], args[2], LDUB); break; OP_32_64(ld8s): tcg_out_ldst(s, args[0], args[1], args[2], LDSB); break; OP_32_64(ld16u): tcg_out_ldst(s, args[0], args[1], args[2], LDUH); break; OP_32_64(ld16s): tcg_out_ldst(s, args[0], args[1], args[2], LDSH); break; case INDEX_op_ld_i32: #if TCG_TARGET_REG_BITS == 64 case INDEX_op_ld32u_i64: #endif tcg_out_ldst(s, args[0], args[1], args[2], LDUW); break; OP_32_64(st8): tcg_out_ldst(s, args[0], args[1], args[2], STB); break; OP_32_64(st16): tcg_out_ldst(s, args[0], args[1], args[2], STH); break; case INDEX_op_st_i32: #if TCG_TARGET_REG_BITS == 64 case INDEX_op_st32_i64: #endif tcg_out_ldst(s, args[0], args[1], args[2], STW); break; OP_32_64(add): c = ARITH_ADD; goto gen_arith; OP_32_64(sub): c = ARITH_SUB; goto gen_arith; OP_32_64(and): c = ARITH_AND; goto gen_arith; OP_32_64(andc): c = ARITH_ANDN; goto gen_arith; OP_32_64(or): c = ARITH_OR; goto gen_arith; OP_32_64(orc): c = ARITH_ORN; goto gen_arith; OP_32_64(xor): c = ARITH_XOR; goto gen_arith; case INDEX_op_shl_i32: c = SHIFT_SLL; goto gen_arith; case INDEX_op_shr_i32: c = SHIFT_SRL; goto gen_arith; case INDEX_op_sar_i32: c = SHIFT_SRA; goto gen_arith; case INDEX_op_mul_i32: c = ARITH_UMUL; goto gen_arith; OP_32_64(neg): c = ARITH_SUB; goto gen_arith1; OP_32_64(not): c = ARITH_ORN; goto gen_arith1; case INDEX_op_div_i32: tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0); break; case INDEX_op_divu_i32: tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1); break; case INDEX_op_rem_i32: case INDEX_op_remu_i32: tcg_out_div32(s, TCG_REG_I5, args[1], args[2], const_args[2], opc == INDEX_op_remu_i32); tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2], ARITH_UMUL); tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB); break; case INDEX_op_brcond_i32: tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1], args[3]); break; case INDEX_op_setcond_i32: tcg_out_setcond_i32(s, args[3], args[0], args[1], args[2], const_args[2]); break; #if TCG_TARGET_REG_BITS == 32 case INDEX_op_brcond2_i32: tcg_out_brcond2_i32(s, args[4], args[0], args[1], args[2], const_args[2], args[3], const_args[3], args[5]); break; case INDEX_op_setcond2_i32: tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2], args[3], const_args[3], args[4], const_args[4]); break; case INDEX_op_add2_i32: tcg_out_arithc(s, args[0], args[2], args[4], const_args[4], ARITH_ADDCC); tcg_out_arithc(s, args[1], args[3], args[5], const_args[5], ARITH_ADDX); break; case INDEX_op_sub2_i32: tcg_out_arithc(s, args[0], args[2], args[4], const_args[4], ARITH_SUBCC); tcg_out_arithc(s, args[1], args[3], args[5], const_args[5], ARITH_SUBX); break; case INDEX_op_mulu2_i32: tcg_out_arithc(s, args[0], args[2], args[3], const_args[3], ARITH_UMUL); tcg_out_rdy(s, args[1]); break; #endif case INDEX_op_qemu_ld8u: tcg_out_qemu_ld(s, args, 0); break; case INDEX_op_qemu_ld8s: tcg_out_qemu_ld(s, args, 0 | 4); break; case INDEX_op_qemu_ld16u: tcg_out_qemu_ld(s, args, 1); break; case INDEX_op_qemu_ld16s: tcg_out_qemu_ld(s, args, 1 | 4); break; case INDEX_op_qemu_ld32: #if TCG_TARGET_REG_BITS == 64 case INDEX_op_qemu_ld32u: #endif tcg_out_qemu_ld(s, args, 2); break; #if TCG_TARGET_REG_BITS == 64 case INDEX_op_qemu_ld32s: tcg_out_qemu_ld(s, args, 2 | 4); break; #endif case INDEX_op_qemu_ld64: tcg_out_qemu_ld(s, args, 3); break; case INDEX_op_qemu_st8: tcg_out_qemu_st(s, args, 0); break; case INDEX_op_qemu_st16: tcg_out_qemu_st(s, args, 1); break; case INDEX_op_qemu_st32: tcg_out_qemu_st(s, args, 2); break; case INDEX_op_qemu_st64: tcg_out_qemu_st(s, args, 3); break; #if TCG_TARGET_REG_BITS == 64 case INDEX_op_movi_i64: tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); break; case INDEX_op_ld32s_i64: tcg_out_ldst(s, args[0], args[1], args[2], LDSW); break; case INDEX_op_ld_i64: tcg_out_ldst(s, args[0], args[1], args[2], LDX); break; case INDEX_op_st_i64: tcg_out_ldst(s, args[0], args[1], args[2], STX); break; case INDEX_op_shl_i64: c = SHIFT_SLLX; goto gen_arith; case INDEX_op_shr_i64: c = SHIFT_SRLX; goto gen_arith; case INDEX_op_sar_i64: c = SHIFT_SRAX; goto gen_arith; case INDEX_op_mul_i64: c = ARITH_MULX; goto gen_arith; case INDEX_op_div_i64: c = ARITH_SDIVX; goto gen_arith; case INDEX_op_divu_i64: c = ARITH_UDIVX; goto gen_arith; case INDEX_op_rem_i64: case INDEX_op_remu_i64: tcg_out_arithc(s, TCG_REG_I5, args[1], args[2], const_args[2], opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX); tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2], ARITH_MULX); tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB); break; case INDEX_op_ext32s_i64: if (const_args[1]) { tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]); } else { tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA); } break; case INDEX_op_ext32u_i64: if (const_args[1]) { tcg_out_movi_imm32(s, args[0], args[1]); } else { tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL); } break; case INDEX_op_brcond_i64: tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1], args[3]); break; case INDEX_op_setcond_i64: tcg_out_setcond_i64(s, args[3], args[0], args[1], args[2], const_args[2]); break; #endif gen_arith: tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c); break; gen_arith1: tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c); break; default: fprintf(stderr, "unknown opcode 0x%x\n", opc); tcg_abort(); } } | 24,262 |
1 | static void sbr_hf_inverse_filter(SBRDSPContext *dsp, int (*alpha0)[2], int (*alpha1)[2], const int X_low[32][40][2], int k0) { int k; int shift, round; for (k = 0; k < k0; k++) { SoftFloat phi[3][2][2]; SoftFloat a00, a01, a10, a11; SoftFloat dk; dsp->autocorrelate(X_low[k], phi); dk = av_sub_sf(av_mul_sf(phi[2][1][0], phi[1][0][0]), av_mul_sf(av_add_sf(av_mul_sf(phi[1][1][0], phi[1][1][0]), av_mul_sf(phi[1][1][1], phi[1][1][1])), FLOAT_0999999)); if (!dk.mant) { a10 = FLOAT_0; a11 = FLOAT_0; } else { SoftFloat temp_real, temp_im; temp_real = av_sub_sf(av_sub_sf(av_mul_sf(phi[0][0][0], phi[1][1][0]), av_mul_sf(phi[0][0][1], phi[1][1][1])), av_mul_sf(phi[0][1][0], phi[1][0][0])); temp_im = av_sub_sf(av_add_sf(av_mul_sf(phi[0][0][0], phi[1][1][1]), av_mul_sf(phi[0][0][1], phi[1][1][0])), av_mul_sf(phi[0][1][1], phi[1][0][0])); a10 = av_div_sf(temp_real, dk); a11 = av_div_sf(temp_im, dk); } if (!phi[1][0][0].mant) { a00 = FLOAT_0; a01 = FLOAT_0; } else { SoftFloat temp_real, temp_im; temp_real = av_add_sf(phi[0][0][0], av_add_sf(av_mul_sf(a10, phi[1][1][0]), av_mul_sf(a11, phi[1][1][1]))); temp_im = av_add_sf(phi[0][0][1], av_sub_sf(av_mul_sf(a11, phi[1][1][0]), av_mul_sf(a10, phi[1][1][1]))); temp_real.mant = -temp_real.mant; temp_im.mant = -temp_im.mant; a00 = av_div_sf(temp_real, phi[1][0][0]); a01 = av_div_sf(temp_im, phi[1][0][0]); } shift = a00.exp; if (shift >= 3) alpha0[k][0] = 0x7fffffff; else if (shift <= -30) alpha0[k][0] = 0; else { a00.mant *= 2; shift = 2-shift; if (shift == 0) alpha0[k][0] = a00.mant; else { round = 1 << (shift-1); alpha0[k][0] = (a00.mant + round) >> shift; } } shift = a01.exp; if (shift >= 3) alpha0[k][1] = 0x7fffffff; else if (shift <= -30) alpha0[k][1] = 0; else { a01.mant *= 2; shift = 2-shift; if (shift == 0) alpha0[k][1] = a01.mant; else { round = 1 << (shift-1); alpha0[k][1] = (a01.mant + round) >> shift; } } shift = a10.exp; if (shift >= 3) alpha1[k][0] = 0x7fffffff; else if (shift <= -30) alpha1[k][0] = 0; else { a10.mant *= 2; shift = 2-shift; if (shift == 0) alpha1[k][0] = a10.mant; else { round = 1 << (shift-1); alpha1[k][0] = (a10.mant + round) >> shift; } } shift = a11.exp; if (shift >= 3) alpha1[k][1] = 0x7fffffff; else if (shift <= -30) alpha1[k][1] = 0; else { a11.mant *= 2; shift = 2-shift; if (shift == 0) alpha1[k][1] = a11.mant; else { round = 1 << (shift-1); alpha1[k][1] = (a11.mant + round) >> shift; } } shift = (int)(((int64_t)(alpha1[k][0]>>1) * (alpha1[k][0]>>1) + \ (int64_t)(alpha1[k][1]>>1) * (alpha1[k][1]>>1) + \ 0x40000000) >> 31); if (shift >= 0x20000000){ alpha1[k][0] = 0; alpha1[k][1] = 0; alpha0[k][0] = 0; alpha0[k][1] = 0; } shift = (int)(((int64_t)(alpha0[k][0]>>1) * (alpha0[k][0]>>1) + \ (int64_t)(alpha0[k][1]>>1) * (alpha0[k][1]>>1) + \ 0x40000000) >> 31); if (shift >= 0x20000000){ alpha1[k][0] = 0; alpha1[k][1] = 0; alpha0[k][0] = 0; alpha0[k][1] = 0; } } } | 24,263 |
1 | void nbd_client_session_close(NbdClientSession *client) { if (!client->bs) { return; } nbd_teardown_connection(client); client->bs = NULL; } | 24,264 |
1 | static void v9fs_rename(void *opaque) { int32_t fid; ssize_t err = 0; size_t offset = 7; V9fsString name; int32_t newdirfid; V9fsFidState *fidp; V9fsPDU *pdu = opaque; V9fsState *s = pdu->s; v9fs_string_init(&name); err = pdu_unmarshal(pdu, offset, "dds", &fid, &newdirfid, &name); if (err < 0) { fidp = get_fid(pdu, fid); if (fidp == NULL) { BUG_ON(fidp->fid_type != P9_FID_NONE); /* if fs driver is not path based, return EOPNOTSUPP */ if (!(pdu->s->ctx.export_flags & V9FS_PATHNAME_FSCONTEXT)) { err = -EOPNOTSUPP; goto out; v9fs_path_write_lock(s); err = v9fs_complete_rename(pdu, fidp, newdirfid, &name); v9fs_path_unlock(s); if (!err) { err = offset; out: put_fid(pdu, fidp); out_nofid: pdu_complete(pdu, err); v9fs_string_free(&name); | 24,265 |
0 | static int open_in(HLSContext *c, AVIOContext **in, const char *url) { AVDictionary *tmp = NULL; int ret; av_dict_copy(&tmp, c->avio_opts, 0); ret = avio_open2(in, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp); av_dict_free(&tmp); return ret; } | 24,266 |
0 | static int mov_write_trak_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track, AVStream *st) { int64_t pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "trak"); mov_write_tkhd_tag(pb, track, st); if (supports_edts(mov)) mov_write_edts_tag(pb, track); // PSP Movies and several other cases require edts box if (track->tref_tag) mov_write_tref_tag(pb, track); mov_write_mdia_tag(pb, track); if (track->mode == MODE_PSP) mov_write_uuid_tag_psp(pb, track); // PSP Movies require this uuid box if (track->tag == MKTAG('r','t','p',' ')) mov_write_udta_sdp(pb, track); if (track->mode == MODE_MOV) { if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) { double sample_aspect_ratio = av_q2d(st->sample_aspect_ratio); if (st->sample_aspect_ratio.num && 1.0 != sample_aspect_ratio) { mov_write_tapt_tag(pb, track); } } if (is_clcp_track(track)) { mov_write_tapt_tag(pb, track); } } return update_size(pb, pos); } | 24,267 |
0 | static int write_extradata(FFV1Context *f) { RangeCoder *const c = &f->c; uint8_t state[CONTEXT_SIZE]; int i, j, k; uint8_t state2[32][CONTEXT_SIZE]; unsigned v; memset(state2, 128, sizeof(state2)); memset(state, 128, sizeof(state)); f->avctx->extradata_size = 10000 + 4 + (11 * 11 * 5 * 5 * 5 + 11 * 11 * 11) * 32; f->avctx->extradata = av_malloc(f->avctx->extradata_size); ff_init_range_encoder(c, f->avctx->extradata, f->avctx->extradata_size); ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8); put_symbol(c, state, f->version, 0); if (f->version > 2) { if (f->version == 3) f->minor_version = 2; put_symbol(c, state, f->minor_version, 0); } put_symbol(c, state, f->ac, 0); if (f->ac > 1) for (i = 1; i < 256; i++) put_symbol(c, state, f->state_transition[i] - c->one_state[i], 1); put_symbol(c, state, f->colorspace, 0); // YUV cs type put_symbol(c, state, f->bits_per_raw_sample, 0); put_rac(c, state, f->chroma_planes); put_symbol(c, state, f->chroma_h_shift, 0); put_symbol(c, state, f->chroma_v_shift, 0); put_rac(c, state, f->transparency); put_symbol(c, state, f->num_h_slices - 1, 0); put_symbol(c, state, f->num_v_slices - 1, 0); put_symbol(c, state, f->quant_table_count, 0); for (i = 0; i < f->quant_table_count; i++) write_quant_tables(c, f->quant_tables[i]); for (i = 0; i < f->quant_table_count; i++) { for (j = 0; j < f->context_count[i] * CONTEXT_SIZE; j++) if (f->initial_states[i] && f->initial_states[i][0][j] != 128) break; if (j < f->context_count[i] * CONTEXT_SIZE) { put_rac(c, state, 1); for (j = 0; j < f->context_count[i]; j++) for (k = 0; k < CONTEXT_SIZE; k++) { int pred = j ? f->initial_states[i][j - 1][k] : 128; put_symbol(c, state2[k], (int8_t)(f->initial_states[i][j][k] - pred), 1); } } else { put_rac(c, state, 0); } } if (f->version > 2) { put_symbol(c, state, f->ec, 0); } f->avctx->extradata_size = ff_rac_terminate(c); v = av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0, f->avctx->extradata, f->avctx->extradata_size); AV_WL32(f->avctx->extradata + f->avctx->extradata_size, v); f->avctx->extradata_size += 4; return 0; } | 24,268 |
0 | static int mpeg4_decode_gop_header(MpegEncContext * s, GetBitContext *gb){ int hours, minutes, seconds; if(!show_bits(gb, 18)){ av_log(s->avctx, AV_LOG_WARNING, "GOP header invalid\n"); return -1; } hours= get_bits(gb, 5); minutes= get_bits(gb, 6); skip_bits1(gb); seconds= get_bits(gb, 6); s->time_base= seconds + 60*(minutes + 60*hours); skip_bits1(gb); skip_bits1(gb); return 0; } | 24,270 |
1 | static void init_vlcs(ASV1Context *a){ static int done = 0; if (!done) { done = 1; init_vlc(&ccp_vlc, VLC_BITS, 17, &ccp_tab[0][1], 2, 1, &ccp_tab[0][0], 2, 1); init_vlc(&dc_ccp_vlc, VLC_BITS, 8, &dc_ccp_tab[0][1], 2, 1, &dc_ccp_tab[0][0], 2, 1); init_vlc(&ac_ccp_vlc, VLC_BITS, 16, &ac_ccp_tab[0][1], 2, 1, &ac_ccp_tab[0][0], 2, 1); init_vlc(&level_vlc, VLC_BITS, 7, &level_tab[0][1], 2, 1, &level_tab[0][0], 2, 1); init_vlc(&asv2_level_vlc, ASV2_LEVEL_VLC_BITS, 63, &asv2_level_tab[0][1], 2, 1, &asv2_level_tab[0][0], 2, 1); } } | 24,271 |
1 | static int applehttp_close(URLContext *h) { AppleHTTPContext *s = h->priv_data; free_segment_list(s); free_variant_list(s); ffurl_close(s->seg_hd); av_free(s); return 0; } | 24,272 |
1 | int swri_dither_init(SwrContext *s, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt) { int i; double scale = 0; if (s->dither.method > SWR_DITHER_TRIANGULAR_HIGHPASS && s->dither.method <= SWR_DITHER_NS) return AVERROR(EINVAL); out_fmt = av_get_packed_sample_fmt(out_fmt); in_fmt = av_get_packed_sample_fmt( in_fmt); if(in_fmt == AV_SAMPLE_FMT_FLT || in_fmt == AV_SAMPLE_FMT_DBL){ if(out_fmt == AV_SAMPLE_FMT_S32) scale = 1.0/(1L<<31); if(out_fmt == AV_SAMPLE_FMT_S16) scale = 1.0/(1L<<15); if(out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1.0/(1L<< 7); } if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_S16) scale = 1L<<16; if(in_fmt == AV_SAMPLE_FMT_S32 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1L<<24; if(in_fmt == AV_SAMPLE_FMT_S16 && out_fmt == AV_SAMPLE_FMT_U8 ) scale = 1L<<8; scale *= s->dither.scale; s->dither.ns_pos = 0; s->dither.noise_scale= scale; s->dither.ns_scale = scale; s->dither.ns_scale_1 = 1/scale; memset(s->dither.ns_errors, 0, sizeof(s->dither.ns_errors)); for (i=0; filters[i].coefs; i++) { const filter_t *f = &filters[i]; if (fabs(s->out_sample_rate - f->rate) / f->rate <= .05 && f->name == s->dither.method) { int j; s->dither.ns_taps = f->len; for (j=0; j<f->len; j++) s->dither.ns_coeffs[j] = f->coefs[j]; s->dither.ns_scale_1 *= 1 - exp(f->gain_cB * M_LN10 * 0.005) * 2 / (1<<(8*av_get_bytes_per_sample(out_fmt))); break; } } if (!filters[i].coefs && s->dither.method > SWR_DITHER_NS) { av_log(s, AV_LOG_WARNING, "Requested noise shaping dither not available at this sampling rate, using triangular hp dither\n"); s->dither.method = SWR_DITHER_TRIANGULAR_HIGHPASS; } av_assert0(!s->preout.count); s->dither.noise = s->preout; s->dither.temp = s->preout; if (s->dither.method > SWR_DITHER_NS) { s->dither.noise.bps = 4; s->dither.noise.fmt = AV_SAMPLE_FMT_FLTP; s->dither.noise_scale = 1; } return 0; } | 24,273 |
1 | int qemu_pixman_get_type(int rshift, int gshift, int bshift) { int type = PIXMAN_TYPE_OTHER; if (rshift > gshift && gshift > bshift) { if (bshift == 0) { type = PIXMAN_TYPE_ARGB; } else { type = PIXMAN_TYPE_RGBA; } } else if (rshift < gshift && gshift < bshift) { if (rshift == 0) { type = PIXMAN_TYPE_ABGR; } else { type = PIXMAN_TYPE_BGRA; } } return type; } | 24,274 |
1 | static int prom_init1(SysBusDevice *dev) { PROMState *s = OPENPROM(dev); memory_region_init_ram(&s->prom, OBJECT(s), "sun4m.prom", PROM_SIZE_MAX, &error_abort); vmstate_register_ram_global(&s->prom); memory_region_set_readonly(&s->prom, true); sysbus_init_mmio(dev, &s->prom); return 0; } | 24,275 |
1 | void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb) { if (likely(xer_bc != 0)) { if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) || (reg < rb && (reg + xer_bc) > rb))) { helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_LSWX); } else { helper_lsw(env, addr, xer_bc, reg); } } } | 24,276 |
0 | static void h264_v_loop_filter_luma_intra_c(uint8_t *pix, int stride, int alpha, int beta) { h264_loop_filter_luma_intra_c(pix, stride, 1, alpha, beta); } | 24,277 |
1 | mips_mipssim_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; const char *cpu_model = machine->cpu_model; const char *kernel_filename = machine->kernel_filename; const char *kernel_cmdline = machine->kernel_cmdline; const char *initrd_filename = machine->initrd_filename; char *filename; MemoryRegion *address_space_mem = get_system_memory(); MemoryRegion *isa = g_new(MemoryRegion, 1); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *bios = g_new(MemoryRegion, 1); MIPSCPU *cpu; CPUMIPSState *env; ResetData *reset_info; int bios_size; /* Init CPUs. */ if (cpu_model == NULL) { #ifdef TARGET_MIPS64 cpu_model = "5Kf"; #else cpu_model = "24Kf"; #endif } cpu = cpu_mips_init(cpu_model); if (cpu == NULL) { fprintf(stderr, "Unable to find CPU definition\n"); exit(1); } env = &cpu->env; reset_info = g_malloc0(sizeof(ResetData)); reset_info->cpu = cpu; reset_info->vector = env->active_tc.PC; qemu_register_reset(main_cpu_reset, reset_info); /* Allocate RAM. */ memory_region_allocate_system_memory(ram, NULL, "mips_mipssim.ram", ram_size); memory_region_init_ram(bios, NULL, "mips_mipssim.bios", BIOS_SIZE, &error_abort); vmstate_register_ram_global(bios); memory_region_set_readonly(bios, true); memory_region_add_subregion(address_space_mem, 0, ram); /* Map the BIOS / boot exception handler. */ memory_region_add_subregion(address_space_mem, 0x1fc00000LL, bios); /* Load a BIOS / boot exception handler image. */ if (bios_name == NULL) bios_name = BIOS_FILENAME; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { bios_size = load_image_targphys(filename, 0x1fc00000LL, BIOS_SIZE); g_free(filename); } else { bios_size = -1; } if ((bios_size < 0 || bios_size > BIOS_SIZE) && !kernel_filename && !qtest_enabled()) { /* Bail out if we have neither a kernel image nor boot vector code. */ error_report("Could not load MIPS bios '%s', and no " "-kernel argument was specified", bios_name); exit(1); } else { /* We have a boot vector start address. */ env->active_tc.PC = (target_long)(int32_t)0xbfc00000; } if (kernel_filename) { loaderparams.ram_size = ram_size; loaderparams.kernel_filename = kernel_filename; loaderparams.kernel_cmdline = kernel_cmdline; loaderparams.initrd_filename = initrd_filename; reset_info->vector = load_kernel(); } /* Init CPU internal devices. */ cpu_mips_irq_init_cpu(env); cpu_mips_clock_init(env); /* Register 64 KB of ISA IO space at 0x1fd00000. */ memory_region_init_alias(isa, NULL, "isa_mmio", get_system_io(), 0, 0x00010000); memory_region_add_subregion(get_system_memory(), 0x1fd00000, isa); /* A single 16450 sits at offset 0x3f8. It is attached to MIPS CPU INT2, which is interrupt 4. */ if (serial_hds[0]) serial_init(0x3f8, env->irq[4], 115200, serial_hds[0], get_system_io()); if (nd_table[0].used) /* MIPSnet uses the MIPS CPU INT0, which is interrupt 2. */ mipsnet_init(0x4200, env->irq[2], &nd_table[0]); } | 24,278 |
1 | SDState *sd_init(BlockDriverState *bs, bool is_spi) { SDState *sd; if (bdrv_is_read_only(bs)) { fprintf(stderr, "sd_init: Cannot use read-only drive\n"); return NULL; } sd = (SDState *) g_malloc0(sizeof(SDState)); sd->buf = qemu_blockalign(bs, 512); sd->spi = is_spi; sd->enable = true; sd_reset(sd, bs); if (sd->bdrv) { bdrv_attach_dev_nofail(sd->bdrv, sd); bdrv_set_dev_ops(sd->bdrv, &sd_block_ops, sd); } vmstate_register(NULL, -1, &sd_vmstate, sd); return sd; } | 24,279 |
1 | static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { BDRVQcow2State *s = bs->opaque; int offset_in_cluster, n1; int ret; unsigned int cur_bytes; /* number of bytes in current iteration */ uint64_t cluster_offset = 0; uint64_t bytes_done = 0; QEMUIOVector hd_qiov; uint8_t *cluster_data = NULL; qemu_iovec_init(&hd_qiov, qiov->niov); qemu_co_mutex_lock(&s->lock); while (bytes != 0) { /* prepare next request */ cur_bytes = MIN(bytes, INT_MAX); if (s->crypto) { cur_bytes = MIN(cur_bytes, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); } ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); if (ret < 0) { goto fail; } offset_in_cluster = offset_into_cluster(s, offset); qemu_iovec_reset(&hd_qiov); qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); switch (ret) { case QCOW2_CLUSTER_UNALLOCATED: if (bs->backing) { /* read from the base image */ n1 = qcow2_backing_read1(bs->backing->bs, &hd_qiov, offset, cur_bytes); if (n1 > 0) { QEMUIOVector local_qiov; qemu_iovec_init(&local_qiov, hd_qiov.niov); qemu_iovec_concat(&local_qiov, &hd_qiov, 0, n1); BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_preadv(bs->backing, offset, n1, &local_qiov, 0); qemu_co_mutex_lock(&s->lock); qemu_iovec_destroy(&local_qiov); if (ret < 0) { goto fail; } } } else { /* Note: in this case, no need to wait */ qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); } break; case QCOW2_CLUSTER_ZERO_PLAIN: case QCOW2_CLUSTER_ZERO_ALLOC: qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); break; case QCOW2_CLUSTER_COMPRESSED: /* add AIO support for compressed blocks ? */ ret = qcow2_decompress_cluster(bs, cluster_offset); if (ret < 0) { goto fail; } qemu_iovec_from_buf(&hd_qiov, 0, s->cluster_cache + offset_in_cluster, cur_bytes); break; case QCOW2_CLUSTER_NORMAL: if ((cluster_offset & 511) != 0) { ret = -EIO; goto fail; } if (bs->encrypted) { assert(s->crypto); /* * For encrypted images, read everything into a temporary * contiguous buffer on which the AES functions can work. */ if (!cluster_data) { cluster_data = qemu_try_blockalign(bs->file->bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); if (cluster_data == NULL) { ret = -ENOMEM; goto fail; } } assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); qemu_iovec_reset(&hd_qiov); qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); } BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); qemu_co_mutex_unlock(&s->lock); ret = bdrv_co_preadv(bs->file, cluster_offset + offset_in_cluster, cur_bytes, &hd_qiov, 0); qemu_co_mutex_lock(&s->lock); if (ret < 0) { goto fail; } if (bs->encrypted) { assert(s->crypto); assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); if (qcrypto_block_decrypt(s->crypto, (s->crypt_physical_offset ? cluster_offset + offset_in_cluster : offset), cluster_data, cur_bytes, NULL) < 0) { ret = -EIO; goto fail; } qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes); } break; default: g_assert_not_reached(); ret = -EIO; goto fail; } bytes -= cur_bytes; offset += cur_bytes; bytes_done += cur_bytes; } ret = 0; fail: qemu_co_mutex_unlock(&s->lock); qemu_iovec_destroy(&hd_qiov); qemu_vfree(cluster_data); return ret; } | 24,281 |
1 | static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) { TCGv sr_ov = tcg_temp_new(); TCGv t0 = tcg_temp_new(); tcg_gen_setcondi_tl(TCG_COND_EQ, sr_ov, srcb, 0); /* The result of divide-by-zero is undefined. Supress the host-side exception by dividing by 1. */ tcg_gen_or_tl(t0, srcb, sr_ov); tcg_gen_div_tl(dest, srca, t0); tcg_temp_free(t0); tcg_gen_deposit_tl(cpu_sr, cpu_sr, sr_ov, ctz32(SR_OV), 1); gen_ove_ov(dc, sr_ov); tcg_temp_free(sr_ov); } | 24,282 |
0 | static inline void decode_subblock3(DCTELEM *dst, int code, const int is_block2, GetBitContext *gb, VLC *vlc, int q_dc, int q_ac1, int q_ac2) { int coeffs[4]; coeffs[0] = modulo_three_table[code][0]; coeffs[1] = modulo_three_table[code][1]; coeffs[2] = modulo_three_table[code][2]; coeffs[3] = modulo_three_table[code][3]; decode_coeff(dst , coeffs[0], 3, gb, vlc, q_dc); if(is_block2){ decode_coeff(dst+8, coeffs[1], 2, gb, vlc, q_ac1); decode_coeff(dst+1, coeffs[2], 2, gb, vlc, q_ac1); }else{ decode_coeff(dst+1, coeffs[1], 2, gb, vlc, q_ac1); decode_coeff(dst+8, coeffs[2], 2, gb, vlc, q_ac1); } decode_coeff(dst+9, coeffs[3], 2, gb, vlc, q_ac2); } | 24,284 |
0 | static int mm_probe(AVProbeData *p) { /* the first chunk is always the header */ if (p->buf_size < MM_PREAMBLE_SIZE) return 0; if (AV_RL16(&p->buf[0]) != MM_TYPE_HEADER) return 0; if (AV_RL32(&p->buf[2]) != MM_HEADER_LEN_V && AV_RL32(&p->buf[2]) != MM_HEADER_LEN_AV) return 0; /* only return half certainty since this check is a bit sketchy */ return AVPROBE_SCORE_MAX / 2; } | 24,286 |
0 | static int tcp_write_packet(AVFormatContext *s, RTSPStream *rtsp_st) { RTSPState *rt = s->priv_data; AVFormatContext *rtpctx = rtsp_st->transport_priv; uint8_t *buf, *ptr; int size; uint8_t *interleave_header, *interleaved_packet; size = avio_close_dyn_buf(rtpctx->pb, &buf); ptr = buf; while (size > 4) { uint32_t packet_len = AV_RB32(ptr); int id; /* The interleaving header is exactly 4 bytes, which happens to be * the same size as the packet length header from * url_open_dyn_packet_buf. So by writing the interleaving header * over these bytes, we get a consecutive interleaved packet * that can be written in one call. */ interleaved_packet = interleave_header = ptr; ptr += 4; size -= 4; if (packet_len > size || packet_len < 2) break; if (ptr[1] >= RTCP_SR && ptr[1] <= RTCP_APP) id = rtsp_st->interleaved_max; /* RTCP */ else id = rtsp_st->interleaved_min; /* RTP */ interleave_header[0] = '$'; interleave_header[1] = id; AV_WB16(interleave_header + 2, packet_len); url_write(rt->rtsp_hd_out, interleaved_packet, 4 + packet_len); ptr += packet_len; size -= packet_len; } av_free(buf); url_open_dyn_packet_buf(&rtpctx->pb, RTSP_TCP_MAX_PACKET_SIZE); return 0; } | 24,287 |
0 | static int find_and_decode_index(NUTContext *nut){ AVFormatContext *s= nut->avf; ByteIOContext *bc = s->pb; uint64_t tmp, end; int i, j, syncpoint_count; int64_t filesize= url_fsize(bc); int64_t *syncpoints; int8_t *has_keyframe; url_fseek(bc, filesize-12, SEEK_SET); url_fseek(bc, filesize-get_be64(bc), SEEK_SET); if(get_be64(bc) != INDEX_STARTCODE){ av_log(s, AV_LOG_ERROR, "no index at the end\n"); return -1; } end= get_packetheader(nut, bc, 1, INDEX_STARTCODE); end += url_ftell(bc); ff_get_v(bc); //max_pts GET_V(syncpoint_count, tmp < INT_MAX/8 && tmp > 0) syncpoints= av_malloc(sizeof(int64_t)*syncpoint_count); has_keyframe= av_malloc(sizeof(int8_t)*(syncpoint_count+1)); for(i=0; i<syncpoint_count; i++){ GET_V(syncpoints[i], tmp>0) if(i) syncpoints[i] += syncpoints[i-1]; } for(i=0; i<s->nb_streams; i++){ int64_t last_pts= -1; for(j=0; j<syncpoint_count;){ uint64_t x= ff_get_v(bc); int type= x&1; int n= j; x>>=1; if(type){ int flag= x&1; x>>=1; if(n+x >= syncpoint_count + 1){ av_log(s, AV_LOG_ERROR, "index overflow A\n"); return -1; } while(x--) has_keyframe[n++]= flag; has_keyframe[n++]= !flag; }else{ while(x != 1){ if(n>=syncpoint_count + 1){ av_log(s, AV_LOG_ERROR, "index overflow B\n"); return -1; } has_keyframe[n++]= x&1; x>>=1; } } if(has_keyframe[0]){ av_log(s, AV_LOG_ERROR, "keyframe before first syncpoint in index\n"); return -1; } assert(n<=syncpoint_count+1); for(; j<n; j++){ if(has_keyframe[j]){ uint64_t B, A= ff_get_v(bc); if(!A){ A= ff_get_v(bc); B= ff_get_v(bc); //eor_pts[j][i] = last_pts + A + B }else B= 0; av_add_index_entry( s->streams[i], 16*syncpoints[j-1], last_pts + A, 0, 0, AVINDEX_KEYFRAME); last_pts += A + B; } } } } if(skip_reserved(bc, end) || get_checksum(bc)){ av_log(s, AV_LOG_ERROR, "index checksum mismatch\n"); return -1; } return 0; } | 24,288 |
1 | static int dump_ppc_insns (CPUPPCState *env) { opc_handler_t **table, *handler; uint8_t opc1, opc2, opc3; printf("Instructions set:\n"); /* opc1 is 6 bits long */ for (opc1 = 0x00; opc1 < 0x40; opc1++) { table = env->opcodes; handler = table[opc1]; if (is_indirect_opcode(handler)) { /* opc2 is 5 bits long */ for (opc2 = 0; opc2 < 0x20; opc2++) { table = env->opcodes; handler = env->opcodes[opc1]; table = ind_table(handler); handler = table[opc2]; if (is_indirect_opcode(handler)) { table = ind_table(handler); /* opc3 is 5 bits long */ for (opc3 = 0; opc3 < 0x20; opc3++) { handler = table[opc3]; if (handler->handler != &gen_invalid) { printf("INSN: %02x %02x %02x (%02d %04d) : %s\n", opc1, opc2, opc3, opc1, (opc3 << 5) | opc2, handler->oname); } } } else { if (handler->handler != &gen_invalid) { printf("INSN: %02x %02x -- (%02d %04d) : %s\n", opc1, opc2, opc1, opc2, handler->oname); } } } } else { if (handler->handler != &gen_invalid) { printf("INSN: %02x -- -- (%02d ----) : %s\n", opc1, opc1, handler->oname); } } } } | 24,293 |
1 | static int get_packet_size(const uint8_t *buf, int size) { int score, fec_score, dvhs_score; if (size < (TS_FEC_PACKET_SIZE * 5 + 1)) return AVERROR_INVALIDDATA; score = analyze(buf, size, TS_PACKET_SIZE, NULL); dvhs_score = analyze(buf, size, TS_DVHS_PACKET_SIZE, NULL); fec_score = analyze(buf, size, TS_FEC_PACKET_SIZE, NULL); av_dlog(NULL, "score: %d, dvhs_score: %d, fec_score: %d \n", score, dvhs_score, fec_score); if (score > fec_score && score > dvhs_score) return TS_PACKET_SIZE; else if (dvhs_score > score && dvhs_score > fec_score) return TS_DVHS_PACKET_SIZE; else if (score < fec_score && dvhs_score < fec_score) return TS_FEC_PACKET_SIZE; else return AVERROR_INVALIDDATA; } | 24,295 |
1 | static void matroska_execute_seekhead(MatroskaDemuxContext *matroska) { EbmlList *seekhead_list = &matroska->seekhead; MatroskaSeekhead *seekhead = seekhead_list->elem; int64_t before_pos = avio_tell(matroska->ctx->pb); int i; // we should not do any seeking in the streaming case if (!matroska->ctx->pb->seekable || (matroska->ctx->flags & AVFMT_FLAG_IGNIDX)) return; for (i = 0; i < seekhead_list->nb_elem; i++) { if (seekhead[i].pos <= before_pos) continue; // defer cues parsing until we actually need cue data. if (seekhead[i].id == MATROSKA_ID_CUES) { matroska->cues_parsing_deferred = 1; continue; } if (matroska_parse_seekhead_entry(matroska, i) < 0) break; } } | 24,296 |
1 | static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr, target_ulong msrm, int keep_msrh) { CPUState *cs = CPU(ppc_env_get_cpu(env)); #if defined(TARGET_PPC64) if (msr_is_64bit(env, msr)) { nip = (uint64_t)nip; msr &= (uint64_t)msrm; } else { nip = (uint32_t)nip; msr = (uint32_t)(msr & msrm); if (keep_msrh) { msr |= env->msr & ~((uint64_t)0xFFFFFFFF); } } #else nip = (uint32_t)nip; msr &= (uint32_t)msrm; #endif /* XXX: beware: this is false if VLE is supported */ env->nip = nip & ~((target_ulong)0x00000003); hreg_store_msr(env, msr, 1); #if defined(DEBUG_OP) cpu_dump_rfi(env->nip, env->msr); #endif /* No need to raise an exception here, * as rfi is always the last insn of a TB */ cs->interrupt_request |= CPU_INTERRUPT_EXITTB; } | 24,297 |
1 | static void mmio_interface_realize(DeviceState *dev, Error **errp) { MMIOInterface *s = MMIO_INTERFACE(dev); DPRINTF("realize from 0x%" PRIX64 " to 0x%" PRIX64 " map host pointer" " %p\n", s->start, s->end, s->host_ptr); if (!s->host_ptr) { error_setg(errp, "host_ptr property must be set"); } if (!s->subregion) { error_setg(errp, "subregion property must be set"); } memory_region_init_ram_ptr(&s->ram_mem, OBJECT(s), "ram", s->end - s->start + 1, s->host_ptr); memory_region_set_readonly(&s->ram_mem, s->ro); memory_region_add_subregion(s->subregion, s->start, &s->ram_mem); } | 24,298 |
1 | static void load_cursor(VmncContext *c, const uint8_t *src) { int i, j, p; const int bpp = c->bpp2; uint8_t *dst8 = c->curbits; uint16_t *dst16 = (uint16_t *)c->curbits; uint32_t *dst32 = (uint32_t *)c->curbits; for (j = 0; j < c->cur_h; j++) { for (i = 0; i < c->cur_w; i++) { p = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp; if (bpp == 1) *dst8++ = p; if (bpp == 2) *dst16++ = p; if (bpp == 4) *dst32++ = p; } } dst8 = c->curmask; dst16 = (uint16_t*)c->curmask; dst32 = (uint32_t*)c->curmask; for (j = 0; j < c->cur_h; j++) { for (i = 0; i < c->cur_w; i++) { p = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp; if (bpp == 1) *dst8++ = p; if (bpp == 2) *dst16++ = p; if (bpp == 4) *dst32++ = p; } } } | 24,299 |
1 | PCIDevice *pci_ne2000_init(PCIBus *bus, NICInfo *nd, int devfn) { PCINE2000State *d; NE2000State *s; uint8_t *pci_conf; d = (PCINE2000State *)pci_register_device(bus, "NE2000", sizeof(PCINE2000State), devfn, NULL, NULL); pci_conf = d->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_REALTEK); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_REALTEK_8029); pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET); pci_conf[0x0e] = 0x00; // header_type pci_conf[0x3d] = 1; // interrupt pin 0 pci_register_io_region(&d->dev, 0, 0x100, PCI_ADDRESS_SPACE_IO, ne2000_map); s = &d->ne2000; s->irq = d->dev.irq[0]; s->pci_dev = (PCIDevice *)d; memcpy(s->macaddr, nd->macaddr, 6); ne2000_reset(s); s->vc = qemu_new_vlan_client(nd->vlan, nd->model, nd->name, ne2000_receive, ne2000_can_receive, s); qemu_format_nic_info_str(s->vc, s->macaddr); register_savevm("ne2000", -1, 3, ne2000_save, ne2000_load, s); return (PCIDevice *)d; } | 24,300 |
0 | av_cold void ff_dsputil_init(DSPContext* c, AVCodecContext *avctx) { int i, j; ff_check_alignment(); #if CONFIG_ENCODERS if (avctx->bits_per_raw_sample == 10) { c->fdct = ff_jpeg_fdct_islow_10; c->fdct248 = ff_fdct248_islow_10; } else { if(avctx->dct_algo==FF_DCT_FASTINT) { c->fdct = ff_fdct_ifast; c->fdct248 = ff_fdct_ifast248; } else if(avctx->dct_algo==FF_DCT_FAAN) { c->fdct = ff_faandct; c->fdct248 = ff_faandct248; } else { c->fdct = ff_jpeg_fdct_islow_8; //slow/accurate/default c->fdct248 = ff_fdct248_islow_8; } } #endif //CONFIG_ENCODERS if (avctx->bits_per_raw_sample == 10) { c->idct_put = ff_simple_idct_put_10; c->idct_add = ff_simple_idct_add_10; c->idct = ff_simple_idct_10; c->idct_permutation_type = FF_NO_IDCT_PERM; } else { if(avctx->idct_algo==FF_IDCT_INT){ c->idct_put= ff_jref_idct_put; c->idct_add= ff_jref_idct_add; c->idct = ff_j_rev_dct; c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER ) && avctx->idct_algo==FF_IDCT_VP3){ c->idct_put= ff_vp3_idct_put_c; c->idct_add= ff_vp3_idct_add_c; c->idct = ff_vp3_idct_c; c->idct_permutation_type= FF_NO_IDCT_PERM; }else if(avctx->idct_algo==FF_IDCT_WMV2){ c->idct_put= ff_wmv2_idct_put_c; c->idct_add= ff_wmv2_idct_add_c; c->idct = ff_wmv2_idct_c; c->idct_permutation_type= FF_NO_IDCT_PERM; }else if(avctx->idct_algo==FF_IDCT_FAAN){ c->idct_put= ff_faanidct_put; c->idct_add= ff_faanidct_add; c->idct = ff_faanidct; c->idct_permutation_type= FF_NO_IDCT_PERM; }else if(CONFIG_EATGQ_DECODER && avctx->idct_algo==FF_IDCT_EA) { c->idct_put= ff_ea_idct_put_c; c->idct_permutation_type= FF_NO_IDCT_PERM; }else{ //accurate/default c->idct_put = ff_simple_idct_put_8; c->idct_add = ff_simple_idct_add_8; c->idct = ff_simple_idct_8; c->idct_permutation_type= FF_NO_IDCT_PERM; } } c->diff_pixels = diff_pixels_c; c->put_pixels_clamped = ff_put_pixels_clamped_c; c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_c; c->add_pixels_clamped = ff_add_pixels_clamped_c; c->sum_abs_dctelem = sum_abs_dctelem_c; c->gmc1 = gmc1_c; c->gmc = ff_gmc_c; c->pix_sum = pix_sum_c; c->pix_norm1 = pix_norm1_c; c->fill_block_tab[0] = fill_block16_c; c->fill_block_tab[1] = fill_block8_c; /* TODO [0] 16 [1] 8 */ c->pix_abs[0][0] = pix_abs16_c; c->pix_abs[0][1] = pix_abs16_x2_c; c->pix_abs[0][2] = pix_abs16_y2_c; c->pix_abs[0][3] = pix_abs16_xy2_c; c->pix_abs[1][0] = pix_abs8_c; c->pix_abs[1][1] = pix_abs8_x2_c; c->pix_abs[1][2] = pix_abs8_y2_c; c->pix_abs[1][3] = pix_abs8_xy2_c; c->put_tpel_pixels_tab[ 0] = put_tpel_pixels_mc00_c; c->put_tpel_pixels_tab[ 1] = put_tpel_pixels_mc10_c; c->put_tpel_pixels_tab[ 2] = put_tpel_pixels_mc20_c; c->put_tpel_pixels_tab[ 4] = put_tpel_pixels_mc01_c; c->put_tpel_pixels_tab[ 5] = put_tpel_pixels_mc11_c; c->put_tpel_pixels_tab[ 6] = put_tpel_pixels_mc21_c; c->put_tpel_pixels_tab[ 8] = put_tpel_pixels_mc02_c; c->put_tpel_pixels_tab[ 9] = put_tpel_pixels_mc12_c; c->put_tpel_pixels_tab[10] = put_tpel_pixels_mc22_c; c->avg_tpel_pixels_tab[ 0] = avg_tpel_pixels_mc00_c; c->avg_tpel_pixels_tab[ 1] = avg_tpel_pixels_mc10_c; c->avg_tpel_pixels_tab[ 2] = avg_tpel_pixels_mc20_c; c->avg_tpel_pixels_tab[ 4] = avg_tpel_pixels_mc01_c; c->avg_tpel_pixels_tab[ 5] = avg_tpel_pixels_mc11_c; c->avg_tpel_pixels_tab[ 6] = avg_tpel_pixels_mc21_c; c->avg_tpel_pixels_tab[ 8] = avg_tpel_pixels_mc02_c; c->avg_tpel_pixels_tab[ 9] = avg_tpel_pixels_mc12_c; c->avg_tpel_pixels_tab[10] = avg_tpel_pixels_mc22_c; #define dspfunc(PFX, IDX, NUM) \ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c dspfunc(put_qpel, 0, 16); dspfunc(put_no_rnd_qpel, 0, 16); dspfunc(avg_qpel, 0, 16); /* dspfunc(avg_no_rnd_qpel, 0, 16); */ dspfunc(put_qpel, 1, 8); dspfunc(put_no_rnd_qpel, 1, 8); dspfunc(avg_qpel, 1, 8); /* dspfunc(avg_no_rnd_qpel, 1, 8); */ #undef dspfunc #if CONFIG_MLP_DECODER || CONFIG_TRUEHD_DECODER ff_mlp_init(c, avctx); #endif #if CONFIG_WMV2_DECODER || CONFIG_VC1_DECODER ff_intrax8dsp_init(c,avctx); #endif c->put_mspel_pixels_tab[0]= ff_put_pixels8x8_c; c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c; c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c; c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c; c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c; c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c; c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c; c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c; #define SET_CMP_FUNC(name) \ c->name[0]= name ## 16_c;\ c->name[1]= name ## 8x8_c; SET_CMP_FUNC(hadamard8_diff) c->hadamard8_diff[4]= hadamard8_intra16_c; c->hadamard8_diff[5]= hadamard8_intra8x8_c; SET_CMP_FUNC(dct_sad) SET_CMP_FUNC(dct_max) #if CONFIG_GPL SET_CMP_FUNC(dct264_sad) #endif c->sad[0]= pix_abs16_c; c->sad[1]= pix_abs8_c; c->sse[0]= sse16_c; c->sse[1]= sse8_c; c->sse[2]= sse4_c; SET_CMP_FUNC(quant_psnr) SET_CMP_FUNC(rd) SET_CMP_FUNC(bit) c->vsad[0]= vsad16_c; c->vsad[4]= vsad_intra16_c; c->vsad[5]= vsad_intra8_c; c->vsse[0]= vsse16_c; c->vsse[4]= vsse_intra16_c; c->vsse[5]= vsse_intra8_c; c->nsse[0]= nsse16_c; c->nsse[1]= nsse8_c; #if CONFIG_DWT ff_dsputil_init_dwt(c); #endif c->ssd_int8_vs_int16 = ssd_int8_vs_int16_c; c->add_bytes= add_bytes_c; c->diff_bytes= diff_bytes_c; c->add_hfyu_median_prediction= add_hfyu_median_prediction_c; c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_c; c->add_hfyu_left_prediction = add_hfyu_left_prediction_c; c->add_hfyu_left_prediction_bgr32 = add_hfyu_left_prediction_bgr32_c; c->bswap_buf= bswap_buf; c->bswap16_buf = bswap16_buf; if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) { c->h263_h_loop_filter= h263_h_loop_filter_c; c->h263_v_loop_filter= h263_v_loop_filter_c; } if (CONFIG_VP3_DECODER) { c->vp3_h_loop_filter= ff_vp3_h_loop_filter_c; c->vp3_v_loop_filter= ff_vp3_v_loop_filter_c; c->vp3_idct_dc_add= ff_vp3_idct_dc_add_c; } c->h261_loop_filter= h261_loop_filter_c; c->try_8x8basis= try_8x8basis_c; c->add_8x8basis= add_8x8basis_c; #if CONFIG_VORBIS_DECODER c->vorbis_inverse_coupling = ff_vorbis_inverse_coupling; #endif #if CONFIG_AC3_DECODER c->ac3_downmix = ff_ac3_downmix_c; #endif c->vector_fmul_reverse = vector_fmul_reverse_c; c->vector_fmul_add = vector_fmul_add_c; c->vector_fmul_window = vector_fmul_window_c; c->vector_clipf = vector_clipf_c; c->scalarproduct_int16 = scalarproduct_int16_c; c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_c; c->apply_window_int16 = apply_window_int16_c; c->vector_clip_int32 = vector_clip_int32_c; c->scalarproduct_float = scalarproduct_float_c; c->butterflies_float = butterflies_float_c; c->butterflies_float_interleave = butterflies_float_interleave_c; c->vector_fmul_scalar = vector_fmul_scalar_c; c->shrink[0]= av_image_copy_plane; c->shrink[1]= ff_shrink22; c->shrink[2]= ff_shrink44; c->shrink[3]= ff_shrink88; c->prefetch= just_return; memset(c->put_2tap_qpel_pixels_tab, 0, sizeof(c->put_2tap_qpel_pixels_tab)); memset(c->avg_2tap_qpel_pixels_tab, 0, sizeof(c->avg_2tap_qpel_pixels_tab)); #undef FUNC #undef FUNCC #define FUNC(f, depth) f ## _ ## depth #define FUNCC(f, depth) f ## _ ## depth ## _c #define dspfunc1(PFX, IDX, NUM, depth)\ c->PFX ## _pixels_tab[IDX][0] = FUNCC(PFX ## _pixels ## NUM , depth);\ c->PFX ## _pixels_tab[IDX][1] = FUNCC(PFX ## _pixels ## NUM ## _x2 , depth);\ c->PFX ## _pixels_tab[IDX][2] = FUNCC(PFX ## _pixels ## NUM ## _y2 , depth);\ c->PFX ## _pixels_tab[IDX][3] = FUNCC(PFX ## _pixels ## NUM ## _xy2, depth) #define dspfunc2(PFX, IDX, NUM, depth)\ c->PFX ## _pixels_tab[IDX][ 0] = FUNCC(PFX ## NUM ## _mc00, depth);\ c->PFX ## _pixels_tab[IDX][ 1] = FUNCC(PFX ## NUM ## _mc10, depth);\ c->PFX ## _pixels_tab[IDX][ 2] = FUNCC(PFX ## NUM ## _mc20, depth);\ c->PFX ## _pixels_tab[IDX][ 3] = FUNCC(PFX ## NUM ## _mc30, depth);\ c->PFX ## _pixels_tab[IDX][ 4] = FUNCC(PFX ## NUM ## _mc01, depth);\ c->PFX ## _pixels_tab[IDX][ 5] = FUNCC(PFX ## NUM ## _mc11, depth);\ c->PFX ## _pixels_tab[IDX][ 6] = FUNCC(PFX ## NUM ## _mc21, depth);\ c->PFX ## _pixels_tab[IDX][ 7] = FUNCC(PFX ## NUM ## _mc31, depth);\ c->PFX ## _pixels_tab[IDX][ 8] = FUNCC(PFX ## NUM ## _mc02, depth);\ c->PFX ## _pixels_tab[IDX][ 9] = FUNCC(PFX ## NUM ## _mc12, depth);\ c->PFX ## _pixels_tab[IDX][10] = FUNCC(PFX ## NUM ## _mc22, depth);\ c->PFX ## _pixels_tab[IDX][11] = FUNCC(PFX ## NUM ## _mc32, depth);\ c->PFX ## _pixels_tab[IDX][12] = FUNCC(PFX ## NUM ## _mc03, depth);\ c->PFX ## _pixels_tab[IDX][13] = FUNCC(PFX ## NUM ## _mc13, depth);\ c->PFX ## _pixels_tab[IDX][14] = FUNCC(PFX ## NUM ## _mc23, depth);\ c->PFX ## _pixels_tab[IDX][15] = FUNCC(PFX ## NUM ## _mc33, depth) #define BIT_DEPTH_FUNCS(depth, dct)\ c->get_pixels = FUNCC(get_pixels ## dct , depth);\ c->draw_edges = FUNCC(draw_edges , depth);\ c->emulated_edge_mc = FUNC (ff_emulated_edge_mc , depth);\ c->clear_block = FUNCC(clear_block ## dct , depth);\ c->clear_blocks = FUNCC(clear_blocks ## dct , depth);\ c->add_pixels8 = FUNCC(add_pixels8 ## dct , depth);\ c->add_pixels4 = FUNCC(add_pixels4 ## dct , depth);\ c->put_no_rnd_pixels_l2[0] = FUNCC(put_no_rnd_pixels16_l2, depth);\ c->put_no_rnd_pixels_l2[1] = FUNCC(put_no_rnd_pixels8_l2 , depth);\ \ c->put_h264_chroma_pixels_tab[0] = FUNCC(put_h264_chroma_mc8 , depth);\ c->put_h264_chroma_pixels_tab[1] = FUNCC(put_h264_chroma_mc4 , depth);\ c->put_h264_chroma_pixels_tab[2] = FUNCC(put_h264_chroma_mc2 , depth);\ c->avg_h264_chroma_pixels_tab[0] = FUNCC(avg_h264_chroma_mc8 , depth);\ c->avg_h264_chroma_pixels_tab[1] = FUNCC(avg_h264_chroma_mc4 , depth);\ c->avg_h264_chroma_pixels_tab[2] = FUNCC(avg_h264_chroma_mc2 , depth);\ \ dspfunc1(put , 0, 16, depth);\ dspfunc1(put , 1, 8, depth);\ dspfunc1(put , 2, 4, depth);\ dspfunc1(put , 3, 2, depth);\ dspfunc1(put_no_rnd, 0, 16, depth);\ dspfunc1(put_no_rnd, 1, 8, depth);\ dspfunc1(avg , 0, 16, depth);\ dspfunc1(avg , 1, 8, depth);\ dspfunc1(avg , 2, 4, depth);\ dspfunc1(avg , 3, 2, depth);\ dspfunc1(avg_no_rnd, 0, 16, depth);\ dspfunc1(avg_no_rnd, 1, 8, depth);\ \ dspfunc2(put_h264_qpel, 0, 16, depth);\ dspfunc2(put_h264_qpel, 1, 8, depth);\ dspfunc2(put_h264_qpel, 2, 4, depth);\ dspfunc2(put_h264_qpel, 3, 2, depth);\ dspfunc2(avg_h264_qpel, 0, 16, depth);\ dspfunc2(avg_h264_qpel, 1, 8, depth);\ dspfunc2(avg_h264_qpel, 2, 4, depth); switch (avctx->bits_per_raw_sample) { case 9: if (c->dct_bits == 32) { BIT_DEPTH_FUNCS(9, _32); } else { BIT_DEPTH_FUNCS(9, _16); } break; case 10: if (c->dct_bits == 32) { BIT_DEPTH_FUNCS(10, _32); } else { BIT_DEPTH_FUNCS(10, _16); } break; default: BIT_DEPTH_FUNCS(8, _16); break; } if (HAVE_MMX) ff_dsputil_init_mmx (c, avctx); if (ARCH_ARM) ff_dsputil_init_arm (c, avctx); if (HAVE_VIS) ff_dsputil_init_vis (c, avctx); if (ARCH_ALPHA) ff_dsputil_init_alpha (c, avctx); if (ARCH_PPC) ff_dsputil_init_ppc (c, avctx); if (HAVE_MMI) ff_dsputil_init_mmi (c, avctx); if (ARCH_SH4) ff_dsputil_init_sh4 (c, avctx); if (ARCH_BFIN) ff_dsputil_init_bfin (c, avctx); for (i = 0; i < 4; i++) { for (j = 0; j < 16; j++) { if(!c->put_2tap_qpel_pixels_tab[i][j]) c->put_2tap_qpel_pixels_tab[i][j] = c->put_h264_qpel_pixels_tab[i][j]; if(!c->avg_2tap_qpel_pixels_tab[i][j]) c->avg_2tap_qpel_pixels_tab[i][j] = c->avg_h264_qpel_pixels_tab[i][j]; } } ff_init_scantable_permutation(c->idct_permutation, c->idct_permutation_type); } | 24,301 |
0 | static int dash_flush(AVFormatContext *s, int final, int stream) { DASHContext *c = s->priv_data; int i, ret = 0; const char *proto = avio_find_protocol_name(s->filename); int use_rename = proto && !strcmp(proto, "file"); int cur_flush_segment_index = 0; if (stream >= 0) cur_flush_segment_index = c->streams[stream].segment_index; for (i = 0; i < s->nb_streams; i++) { OutputStream *os = &c->streams[i]; AVStream *st = s->streams[i]; char filename[1024] = "", full_path[1024], temp_path[1024]; int range_length, index_length = 0; if (!os->packets_written) continue; // Flush the single stream that got a keyframe right now. // Flush all audio streams as well, in sync with video keyframes, // but not the other video streams. if (stream >= 0 && i != stream) { if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) continue; // Make sure we don't flush audio streams multiple times, when // all video streams are flushed one at a time. if (c->has_video && os->segment_index > cur_flush_segment_index) continue; } if (!os->init_range_length) { flush_init_segment(s, os); } if (!c->single_file) { ff_dash_fill_tmpl_params(filename, sizeof(filename), c->media_seg_name, i, os->segment_index, os->bit_rate, os->start_pts); snprintf(full_path, sizeof(full_path), "%s%s", c->dirname, filename); snprintf(temp_path, sizeof(temp_path), use_rename ? "%s.tmp" : "%s", full_path); ret = s->io_open(s, &os->out, temp_path, AVIO_FLAG_WRITE, NULL); if (ret < 0) break; if (!strcmp(os->format_name, "mp4")) write_styp(os->ctx->pb); } else { snprintf(full_path, sizeof(full_path), "%s%s", c->dirname, os->initfile); } ret = flush_dynbuf(os, &range_length); if (ret < 0) break; os->packets_written = 0; if (c->single_file) { find_index_range(s, full_path, os->pos, &index_length); } else { ff_format_io_close(s, &os->out); if (use_rename) { ret = avpriv_io_move(temp_path, full_path); if (ret < 0) break; } } if (!os->bit_rate) { // calculate average bitrate of first segment int64_t bitrate = (int64_t) range_length * 8 * AV_TIME_BASE / av_rescale_q(os->max_pts - os->start_pts, st->time_base, AV_TIME_BASE_Q); if (bitrate >= 0) { os->bit_rate = bitrate; snprintf(os->bandwidth_str, sizeof(os->bandwidth_str), " bandwidth=\"%d\"", os->bit_rate); } } add_segment(os, filename, os->start_pts, os->max_pts - os->start_pts, os->pos, range_length, index_length); av_log(s, AV_LOG_VERBOSE, "Representation %d media segment %d written to: %s\n", i, os->segment_index, full_path); os->pos += range_length; } if (c->window_size || (final && c->remove_at_exit)) { for (i = 0; i < s->nb_streams; i++) { OutputStream *os = &c->streams[i]; int j; int remove = os->nb_segments - c->window_size - c->extra_window_size; if (final && c->remove_at_exit) remove = os->nb_segments; if (remove > 0) { for (j = 0; j < remove; j++) { char filename[1024]; snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->segments[j]->file); unlink(filename); av_free(os->segments[j]); } os->nb_segments -= remove; memmove(os->segments, os->segments + remove, os->nb_segments * sizeof(*os->segments)); } } } if (ret >= 0) ret = write_manifest(s, final); return ret; } | 24,302 |
0 | static int stream_set_speed(BlockJob *job, int64_t value) { StreamBlockJob *s = container_of(job, StreamBlockJob, common); if (value < 0) { return -EINVAL; } ratelimit_set_speed(&s->limit, value / BDRV_SECTOR_SIZE); return 0; } | 24,303 |
0 | static void cirrus_update_memory_access(CirrusVGAState *s) { unsigned mode; if ((s->sr[0x17] & 0x44) == 0x44) { goto generic_io; } else if (s->cirrus_srcptr != s->cirrus_srcptr_end) { goto generic_io; } else { if ((s->gr[0x0B] & 0x14) == 0x14) { goto generic_io; } else if (s->gr[0x0B] & 0x02) { goto generic_io; } mode = s->gr[0x05] & 0x7; if (mode < 4 || mode > 5 || ((s->gr[0x0B] & 0x4) == 0)) { map_linear_vram(s); s->cirrus_linear_write[0] = cirrus_linear_mem_writeb; s->cirrus_linear_write[1] = cirrus_linear_mem_writew; s->cirrus_linear_write[2] = cirrus_linear_mem_writel; } else { generic_io: unmap_linear_vram(s); s->cirrus_linear_write[0] = cirrus_linear_writeb; s->cirrus_linear_write[1] = cirrus_linear_writew; s->cirrus_linear_write[2] = cirrus_linear_writel; } } } | 24,304 |
0 | void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h) { POWERPC_TBL_DECLARE(altivec_put_pixels16_xy2_num, 1); #ifdef ALTIVEC_USE_REFERENCE_C_CODE int j; POWERPC_TBL_START_COUNT(altivec_put_pixels16_xy2_num, 1); for (j = 0; j < 4; j++) { int i; const uint32_t a = (((const struct unaligned_32 *) (pixels))->l); const uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l); uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL; uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); uint32_t l1, h1; pixels += line_size; for (i = 0; i < h; i += 2) { uint32_t a = (((const struct unaligned_32 *) (pixels))->l); uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l); l1 = (a & 0x03030303UL) + (b & 0x03030303UL); h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); *((uint32_t *) block) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); pixels += line_size; block += line_size; a = (((const struct unaligned_32 *) (pixels))->l); b = (((const struct unaligned_32 *) (pixels + 1))->l); l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL; h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); *((uint32_t *) block) = h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); pixels += line_size; block += line_size; } pixels += 4 - line_size * (h + 1); block += 4 - line_size * h; } POWERPC_TBL_STOP_COUNT(altivec_put_pixels16_xy2_num, 1); #else /* ALTIVEC_USE_REFERENCE_C_CODE */ register int i; register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4; register vector unsigned char blockv, temp1, temp2; register vector unsigned short pixelssum1, pixelssum2, temp3, pixelssum3, pixelssum4, temp4; register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0); register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2); POWERPC_TBL_START_COUNT(altivec_put_pixels16_xy2_num, 1); temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels)); } pixelsv3 = vec_mergel(vczero, pixelsv1); pixelsv4 = vec_mergel(vczero, pixelsv2); pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum3 = vec_add((vector unsigned short)pixelsv3, (vector unsigned short)pixelsv4); pixelssum3 = vec_add(pixelssum3, vctwo); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vctwo); for (i = 0; i < h ; i++) { blockv = vec_ld(0, block); temp1 = vec_ld(line_size, pixels); temp2 = vec_ld(line_size + 16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels)); if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) { pixelsv2 = temp2; } else { pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels)); } pixelsv3 = vec_mergel(vczero, pixelsv1); pixelsv4 = vec_mergel(vczero, pixelsv2); pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); pixelssum4 = vec_add((vector unsigned short)pixelsv3, (vector unsigned short)pixelsv4); pixelssum2 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); temp4 = vec_add(pixelssum3, pixelssum4); temp4 = vec_sra(temp4, vctwo); temp3 = vec_add(pixelssum1, pixelssum2); temp3 = vec_sra(temp3, vctwo); pixelssum3 = vec_add(pixelssum4, vctwo); pixelssum1 = vec_add(pixelssum2, vctwo); blockv = vec_packsu(temp3, temp4); vec_st(blockv, 0, block); block += line_size; pixels += line_size; } POWERPC_TBL_STOP_COUNT(altivec_put_pixels16_xy2_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } | 24,305 |
0 | static void omap_lpg_update(struct omap_lpg_s *s) { int64_t on, period = 1, ticks = 1000; static const int per[8] = { 1, 2, 4, 8, 12, 16, 20, 24 }; if (~s->control & (1 << 6)) /* LPGRES */ on = 0; else if (s->control & (1 << 7)) /* PERM_ON */ on = period; else { period = muldiv64(ticks, per[s->control & 7], /* PERCTRL */ 256 / 32); on = (s->clk && s->power) ? muldiv64(ticks, per[(s->control >> 3) & 7], 256) : 0; /* ONCTRL */ } timer_del(s->tm); if (on == period && s->on < s->period) printf("%s: LED is on\n", __FUNCTION__); else if (on == 0 && s->on) printf("%s: LED is off\n", __FUNCTION__); else if (on && (on != s->on || period != s->period)) { s->cycle = 0; s->on = on; s->period = period; omap_lpg_tick(s); return; } s->on = on; s->period = period; } | 24,306 |
0 | START_TEST(vararg_number) { QObject *obj; QInt *qint; QFloat *qfloat; int value = 0x2342; int64_t value64 = 0x2342342343LL; double valuef = 2.323423423; obj = qobject_from_jsonf("%d", value); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QINT); qint = qobject_to_qint(obj); fail_unless(qint_get_int(qint) == value); QDECREF(qint); obj = qobject_from_jsonf("%" PRId64, value64); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QINT); qint = qobject_to_qint(obj); fail_unless(qint_get_int(qint) == value64); QDECREF(qint); obj = qobject_from_jsonf("%f", valuef); fail_unless(obj != NULL); fail_unless(qobject_type(obj) == QTYPE_QFLOAT); qfloat = qobject_to_qfloat(obj); fail_unless(qfloat_get_double(qfloat) == valuef); QDECREF(qfloat); } | 24,307 |
0 | static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, uint16_t ipbh0) { CPUS390XState *env = &cpu->env; uint32_t sccb; uint64_t code; int r = 0; cpu_synchronize_state(CPU(cpu)); if (env->psw.mask & PSW_MASK_PSTATE) { enter_pgmcheck(cpu, PGM_PRIVILEGED); return 0; } sccb = env->regs[ipbh0 & 0xf]; code = env->regs[(ipbh0 & 0xf0) >> 4]; r = sclp_service_call(sccb, code); if (r < 0) { enter_pgmcheck(cpu, -r); } setcc(cpu, r); return 0; } | 24,310 |
0 | static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result) { return qcow2_check_refcounts(bs, result); } | 24,313 |
0 | static void qemu_tcg_init_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; static QemuCond *tcg_halt_cond; static QemuThread *tcg_cpu_thread; tcg_cpu_address_space_init(cpu, cpu->as); /* share a single thread for all cpus with TCG */ if (!tcg_cpu_thread) { cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); qemu_cond_init(cpu->halt_cond); tcg_halt_cond = cpu->halt_cond; snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", cpu->cpu_index); qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn, cpu, QEMU_THREAD_JOINABLE); #ifdef _WIN32 cpu->hThread = qemu_thread_get_handle(cpu->thread); #endif while (!cpu->created) { qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); } tcg_cpu_thread = cpu->thread; } else { cpu->thread = tcg_cpu_thread; cpu->halt_cond = tcg_halt_cond; } } | 24,314 |
0 | AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title) { AVChapter *chapter = NULL; int i; for(i=0; i<s->nb_chapters; i++) if(s->chapters[i]->id == id) chapter = s->chapters[i]; if(!chapter){ chapter= av_mallocz(sizeof(AVChapter)); if(!chapter) return NULL; dynarray_add(&s->chapters, &s->nb_chapters, chapter); } if(chapter->title) av_free(chapter->title); chapter->title = av_strdup(title); chapter->id = id; chapter->time_base= time_base; chapter->start = start; chapter->end = end; return chapter; } | 24,316 |
0 | int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes) { QEMUIOVector qiov; struct iovec iov = { .iov_base = (void *)buf, .iov_len = bytes, }; int ret; if (bytes < 0) { return -EINVAL; } qemu_iovec_init_external(&qiov, &iov, 1); ret = bdrv_prwv_co(bs, offset, &qiov, false, 0); if (ret < 0) { return ret; } return bytes; } | 24,317 |
0 | static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel) { const char *rn = "invalid"; if (sel != 0) check_insn(ctx, ISA_MIPS64); if (use_icount) gen_io_start(); switch (reg) { case 0: switch (sel) { case 0: gen_helper_mtc0_index(cpu_env, arg); rn = "Index"; break; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_mvpcontrol(cpu_env, arg); rn = "MVPControl"; break; case 2: CP0_CHECK(ctx->insn_flags & ASE_MT); /* ignored */ rn = "MVPConf0"; break; case 3: CP0_CHECK(ctx->insn_flags & ASE_MT); /* ignored */ rn = "MVPConf1"; break; default: goto cp0_unimplemented; } break; case 1: switch (sel) { case 0: /* ignored */ rn = "Random"; break; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpecontrol(cpu_env, arg); rn = "VPEControl"; break; case 2: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeconf0(cpu_env, arg); rn = "VPEConf0"; break; case 3: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeconf1(cpu_env, arg); rn = "VPEConf1"; break; case 4: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_yqmask(cpu_env, arg); rn = "YQMask"; break; case 5: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule)); rn = "VPESchedule"; break; case 6: CP0_CHECK(ctx->insn_flags & ASE_MT); tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack)); rn = "VPEScheFBack"; break; case 7: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_vpeopt(cpu_env, arg); rn = "VPEOpt"; break; default: goto cp0_unimplemented; } break; case 2: switch (sel) { case 0: gen_helper_dmtc0_entrylo0(cpu_env, arg); rn = "EntryLo0"; break; case 1: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcstatus(cpu_env, arg); rn = "TCStatus"; break; case 2: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcbind(cpu_env, arg); rn = "TCBind"; break; case 3: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcrestart(cpu_env, arg); rn = "TCRestart"; break; case 4: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tchalt(cpu_env, arg); rn = "TCHalt"; break; case 5: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tccontext(cpu_env, arg); rn = "TCContext"; break; case 6: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcschedule(cpu_env, arg); rn = "TCSchedule"; break; case 7: CP0_CHECK(ctx->insn_flags & ASE_MT); gen_helper_mtc0_tcschefback(cpu_env, arg); rn = "TCScheFBack"; break; default: goto cp0_unimplemented; } break; case 3: switch (sel) { case 0: gen_helper_dmtc0_entrylo1(cpu_env, arg); rn = "EntryLo1"; break; default: goto cp0_unimplemented; } break; case 4: switch (sel) { case 0: gen_helper_mtc0_context(cpu_env, arg); rn = "Context"; break; case 1: // gen_helper_mtc0_contextconfig(cpu_env, arg); /* SmartMIPS ASE */ rn = "ContextConfig"; goto cp0_unimplemented; // break; case 2: CP0_CHECK(ctx->ulri); tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); rn = "UserLocal"; break; default: goto cp0_unimplemented; } break; case 5: switch (sel) { case 0: gen_helper_mtc0_pagemask(cpu_env, arg); rn = "PageMask"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_pagegrain(cpu_env, arg); rn = "PageGrain"; break; default: goto cp0_unimplemented; } break; case 6: switch (sel) { case 0: gen_helper_mtc0_wired(cpu_env, arg); rn = "Wired"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf0(cpu_env, arg); rn = "SRSConf0"; break; case 2: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf1(cpu_env, arg); rn = "SRSConf1"; break; case 3: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf2(cpu_env, arg); rn = "SRSConf2"; break; case 4: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf3(cpu_env, arg); rn = "SRSConf3"; break; case 5: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsconf4(cpu_env, arg); rn = "SRSConf4"; break; default: goto cp0_unimplemented; } break; case 7: switch (sel) { case 0: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_hwrena(cpu_env, arg); ctx->bstate = BS_STOP; rn = "HWREna"; break; default: goto cp0_unimplemented; } break; case 8: switch (sel) { case 0: /* ignored */ rn = "BadVAddr"; break; case 1: /* ignored */ rn = "BadInstr"; break; case 2: /* ignored */ rn = "BadInstrP"; break; default: goto cp0_unimplemented; } break; case 9: switch (sel) { case 0: gen_helper_mtc0_count(cpu_env, arg); rn = "Count"; break; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 10: switch (sel) { case 0: gen_helper_mtc0_entryhi(cpu_env, arg); rn = "EntryHi"; break; default: goto cp0_unimplemented; } break; case 11: switch (sel) { case 0: gen_helper_mtc0_compare(cpu_env, arg); rn = "Compare"; break; /* 6,7 are implementation dependent */ default: goto cp0_unimplemented; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 12: switch (sel) { case 0: save_cpu_state(ctx, 1); gen_helper_mtc0_status(cpu_env, arg); /* BS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(ctx->pc + 4); ctx->bstate = BS_EXCP; rn = "Status"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_intctl(cpu_env, arg); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "IntCtl"; break; case 2: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_srsctl(cpu_env, arg); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "SRSCtl"; break; case 3: check_insn(ctx, ISA_MIPS32R2); gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_SRSMap)); /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "SRSMap"; break; default: goto cp0_unimplemented; } break; case 13: switch (sel) { case 0: save_cpu_state(ctx, 1); /* Mark as an IO operation because we may trigger a software interrupt. */ if (use_icount) { gen_io_start(); } gen_helper_mtc0_cause(cpu_env, arg); if (use_icount) { gen_io_end(); } /* Stop translation as we may have triggered an intetrupt */ ctx->bstate = BS_STOP; rn = "Cause"; break; default: goto cp0_unimplemented; } break; case 14: switch (sel) { case 0: tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC)); rn = "EPC"; break; default: goto cp0_unimplemented; } break; case 15: switch (sel) { case 0: /* ignored */ rn = "PRid"; break; case 1: check_insn(ctx, ISA_MIPS32R2); gen_helper_mtc0_ebase(cpu_env, arg); rn = "EBase"; break; default: goto cp0_unimplemented; } break; case 16: switch (sel) { case 0: gen_helper_mtc0_config0(cpu_env, arg); rn = "Config"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 1: /* ignored, read only */ rn = "Config1"; break; case 2: gen_helper_mtc0_config2(cpu_env, arg); rn = "Config2"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; case 3: /* ignored */ rn = "Config3"; break; case 4: /* currently ignored */ rn = "Config4"; break; case 5: gen_helper_mtc0_config5(cpu_env, arg); rn = "Config5"; /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; /* 6,7 are implementation dependent */ default: rn = "Invalid config selector"; goto cp0_unimplemented; } break; case 17: switch (sel) { case 0: gen_helper_mtc0_lladdr(cpu_env, arg); rn = "LLAddr"; break; default: goto cp0_unimplemented; } break; case 18: switch (sel) { case 0 ... 7: gen_helper_0e1i(mtc0_watchlo, arg, sel); rn = "WatchLo"; break; default: goto cp0_unimplemented; } break; case 19: switch (sel) { case 0 ... 7: gen_helper_0e1i(mtc0_watchhi, arg, sel); rn = "WatchHi"; break; default: goto cp0_unimplemented; } break; case 20: switch (sel) { case 0: check_insn(ctx, ISA_MIPS3); gen_helper_mtc0_xcontext(cpu_env, arg); rn = "XContext"; break; default: goto cp0_unimplemented; } break; case 21: /* Officially reserved, but sel 0 is used for R1x000 framemask */ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6)); switch (sel) { case 0: gen_helper_mtc0_framemask(cpu_env, arg); rn = "Framemask"; break; default: goto cp0_unimplemented; } break; case 22: /* ignored */ rn = "Diagnostic"; /* implementation dependent */ break; case 23: switch (sel) { case 0: gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */ /* BS_STOP isn't good enough here, hflags may have changed. */ gen_save_pc(ctx->pc + 4); ctx->bstate = BS_EXCP; rn = "Debug"; break; case 1: // gen_helper_mtc0_tracecontrol(cpu_env, arg); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "TraceControl"; // break; case 2: // gen_helper_mtc0_tracecontrol2(cpu_env, arg); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "TraceControl2"; // break; case 3: // gen_helper_mtc0_usertracedata(cpu_env, arg); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "UserTraceData"; // break; case 4: // gen_helper_mtc0_tracebpc(cpu_env, arg); /* PDtrace support */ /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; rn = "TraceBPC"; // break; default: goto cp0_unimplemented; } break; case 24: switch (sel) { case 0: /* EJTAG support */ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC)); rn = "DEPC"; break; default: goto cp0_unimplemented; } break; case 25: switch (sel) { case 0: gen_helper_mtc0_performance0(cpu_env, arg); rn = "Performance0"; break; case 1: // gen_helper_mtc0_performance1(cpu_env, arg); rn = "Performance1"; // break; case 2: // gen_helper_mtc0_performance2(cpu_env, arg); rn = "Performance2"; // break; case 3: // gen_helper_mtc0_performance3(cpu_env, arg); rn = "Performance3"; // break; case 4: // gen_helper_mtc0_performance4(cpu_env, arg); rn = "Performance4"; // break; case 5: // gen_helper_mtc0_performance5(cpu_env, arg); rn = "Performance5"; // break; case 6: // gen_helper_mtc0_performance6(cpu_env, arg); rn = "Performance6"; // break; case 7: // gen_helper_mtc0_performance7(cpu_env, arg); rn = "Performance7"; // break; default: goto cp0_unimplemented; } break; case 26: /* ignored */ rn = "ECC"; break; case 27: switch (sel) { case 0 ... 3: /* ignored */ rn = "CacheErr"; break; default: goto cp0_unimplemented; } break; case 28: switch (sel) { case 0: case 2: case 4: case 6: gen_helper_mtc0_taglo(cpu_env, arg); rn = "TagLo"; break; case 1: case 3: case 5: case 7: gen_helper_mtc0_datalo(cpu_env, arg); rn = "DataLo"; break; default: goto cp0_unimplemented; } break; case 29: switch (sel) { case 0: case 2: case 4: case 6: gen_helper_mtc0_taghi(cpu_env, arg); rn = "TagHi"; break; case 1: case 3: case 5: case 7: gen_helper_mtc0_datahi(cpu_env, arg); rn = "DataHi"; break; default: rn = "invalid sel"; goto cp0_unimplemented; } break; case 30: switch (sel) { case 0: tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC)); rn = "ErrorEPC"; break; default: goto cp0_unimplemented; } break; case 31: switch (sel) { case 0: /* EJTAG support */ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE)); rn = "DESAVE"; break; case 2 ... 7: CP0_CHECK(ctx->kscrexist & (1 << sel)); tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_KScratch[sel-2])); rn = "KScratch"; break; default: goto cp0_unimplemented; } /* Stop translation as we may have switched the execution mode */ ctx->bstate = BS_STOP; break; default: goto cp0_unimplemented; } (void)rn; /* avoid a compiler warning */ LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", rn, reg, sel); /* For simplicity assume that all writes can cause interrupts. */ if (use_icount) { gen_io_end(); ctx->bstate = BS_STOP; } return; cp0_unimplemented: LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", rn, reg, sel); } | 24,319 |
0 | void pc_basic_device_init(ISABus *isa_bus, qemu_irq *gsi, ISADevice **rtc_state, bool create_fdctrl, bool no_vmport, uint32 hpet_irqs) { int i; DriveInfo *fd[MAX_FD]; DeviceState *hpet = NULL; int pit_isa_irq = 0; qemu_irq pit_alt_irq = NULL; qemu_irq rtc_irq = NULL; qemu_irq *a20_line; ISADevice *i8042, *port92, *vmmouse, *pit = NULL; qemu_irq *cpu_exit_irq; MemoryRegion *ioport80_io = g_new(MemoryRegion, 1); MemoryRegion *ioportF0_io = g_new(MemoryRegion, 1); memory_region_init_io(ioport80_io, NULL, &ioport80_io_ops, NULL, "ioport80", 1); memory_region_add_subregion(isa_bus->address_space_io, 0x80, ioport80_io); memory_region_init_io(ioportF0_io, NULL, &ioportF0_io_ops, NULL, "ioportF0", 1); memory_region_add_subregion(isa_bus->address_space_io, 0xf0, ioportF0_io); /* * Check if an HPET shall be created. * * Without KVM_CAP_PIT_STATE2, we cannot switch off the in-kernel PIT * when the HPET wants to take over. Thus we have to disable the latter. */ if (!no_hpet && (!kvm_irqchip_in_kernel() || kvm_has_pit_state2())) { /* In order to set property, here not using sysbus_try_create_simple */ hpet = qdev_try_create(NULL, TYPE_HPET); if (hpet) { /* For pc-piix-*, hpet's intcap is always IRQ2. For pc-q35-1.7 * and earlier, use IRQ2 for compat. Otherwise, use IRQ16~23, * IRQ8 and IRQ2. */ uint8_t compat = object_property_get_int(OBJECT(hpet), HPET_INTCAP, NULL); if (!compat) { qdev_prop_set_uint32(hpet, HPET_INTCAP, hpet_irqs); } qdev_init_nofail(hpet); sysbus_mmio_map(SYS_BUS_DEVICE(hpet), 0, HPET_BASE); for (i = 0; i < GSI_NUM_PINS; i++) { sysbus_connect_irq(SYS_BUS_DEVICE(hpet), i, gsi[i]); } pit_isa_irq = -1; pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT); rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT); } } *rtc_state = rtc_init(isa_bus, 2000, rtc_irq); qemu_register_boot_set(pc_boot_set, *rtc_state); if (!xen_enabled()) { if (kvm_irqchip_in_kernel()) { pit = kvm_pit_init(isa_bus, 0x40); } else { pit = pit_init(isa_bus, 0x40, pit_isa_irq, pit_alt_irq); } if (hpet) { /* connect PIT to output control line of the HPET */ qdev_connect_gpio_out(hpet, 0, qdev_get_gpio_in(DEVICE(pit), 0)); } pcspk_init(isa_bus, pit); } serial_hds_isa_init(isa_bus, MAX_SERIAL_PORTS); parallel_hds_isa_init(isa_bus, MAX_PARALLEL_PORTS); a20_line = qemu_allocate_irqs(handle_a20_line_change, first_cpu, 2); i8042 = isa_create_simple(isa_bus, "i8042"); i8042_setup_a20_line(i8042, &a20_line[0]); if (!no_vmport) { vmport_init(isa_bus); vmmouse = isa_try_create(isa_bus, "vmmouse"); } else { vmmouse = NULL; } if (vmmouse) { DeviceState *dev = DEVICE(vmmouse); qdev_prop_set_ptr(dev, "ps2_mouse", i8042); qdev_init_nofail(dev); } port92 = isa_create_simple(isa_bus, "port92"); port92_init(port92, &a20_line[1]); cpu_exit_irq = qemu_allocate_irqs(cpu_request_exit, NULL, 1); DMA_init(0, cpu_exit_irq); for(i = 0; i < MAX_FD; i++) { fd[i] = drive_get(IF_FLOPPY, 0, i); create_fdctrl |= !!fd[i]; } if (create_fdctrl) { fdctrl_init_isa(isa_bus, fd); } } | 24,321 |
0 | static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) { URI *uri; QueryParams *qp = NULL; bool is_unix = false; int ret = 0; uri = uri_parse(filename); if (!uri) { return -EINVAL; } /* transport */ if (!strcmp(uri->scheme, "gluster")) { gconf->transport = g_strdup("tcp"); } else if (!strcmp(uri->scheme, "gluster+tcp")) { gconf->transport = g_strdup("tcp"); } else if (!strcmp(uri->scheme, "gluster+unix")) { gconf->transport = g_strdup("unix"); is_unix = true; } else if (!strcmp(uri->scheme, "gluster+rdma")) { gconf->transport = g_strdup("rdma"); } else { ret = -EINVAL; goto out; } ret = parse_volume_options(gconf, uri->path); if (ret < 0) { goto out; } qp = query_params_parse(uri->query); if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { ret = -EINVAL; goto out; } if (is_unix) { if (uri->server || uri->port) { ret = -EINVAL; goto out; } if (strcmp(qp->p[0].name, "socket")) { ret = -EINVAL; goto out; } gconf->server = g_strdup(qp->p[0].value); } else { gconf->server = g_strdup(uri->server); gconf->port = uri->port; } out: if (qp) { query_params_free(qp); } uri_free(uri); return ret; } | 24,322 |
0 | static void omap_prcm_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { struct omap_prcm_s *s = (struct omap_prcm_s *) opaque; if (size != 4) { omap_badwidth_write32(opaque, addr, value); return; } switch (addr) { case 0x000: /* PRCM_REVISION */ case 0x054: /* PRCM_VOLTST */ case 0x084: /* PRCM_CLKCFG_STATUS */ case 0x1e4: /* PM_PWSTST_MPU */ case 0x220: /* CM_IDLEST1_CORE */ case 0x224: /* CM_IDLEST2_CORE */ case 0x22c: /* CM_IDLEST4_CORE */ case 0x2c8: /* PM_WKDEP_CORE */ case 0x2e4: /* PM_PWSTST_CORE */ case 0x320: /* CM_IDLEST_GFX */ case 0x3e4: /* PM_PWSTST_GFX */ case 0x420: /* CM_IDLEST_WKUP */ case 0x520: /* CM_IDLEST_CKGEN */ case 0x820: /* CM_IDLEST_DSP */ case 0x8e4: /* PM_PWSTST_DSP */ OMAP_RO_REG(addr); return; case 0x010: /* PRCM_SYSCONFIG */ s->sysconfig = value & 1; break; case 0x018: /* PRCM_IRQSTATUS_MPU */ s->irqst[0] &= ~value; omap_prcm_int_update(s, 0); break; case 0x01c: /* PRCM_IRQENABLE_MPU */ s->irqen[0] = value & 0x3f; omap_prcm_int_update(s, 0); break; case 0x050: /* PRCM_VOLTCTRL */ s->voltctrl = value & 0xf1c3; break; case 0x060: /* PRCM_CLKSRC_CTRL */ s->clksrc[0] = value & 0xdb; /* TODO update clocks */ break; case 0x070: /* PRCM_CLKOUT_CTRL */ s->clkout[0] = value & 0xbbbb; /* TODO update clocks */ break; case 0x078: /* PRCM_CLKEMUL_CTRL */ s->clkemul[0] = value & 1; /* TODO update clocks */ break; case 0x080: /* PRCM_CLKCFG_CTRL */ break; case 0x090: /* PRCM_VOLTSETUP */ s->setuptime[0] = value & 0xffff; break; case 0x094: /* PRCM_CLKSSETUP */ s->setuptime[1] = value & 0xffff; break; case 0x098: /* PRCM_POLCTRL */ s->clkpol[0] = value & 0x701; break; case 0x0b0: /* GENERAL_PURPOSE1 */ case 0x0b4: /* GENERAL_PURPOSE2 */ case 0x0b8: /* GENERAL_PURPOSE3 */ case 0x0bc: /* GENERAL_PURPOSE4 */ case 0x0c0: /* GENERAL_PURPOSE5 */ case 0x0c4: /* GENERAL_PURPOSE6 */ case 0x0c8: /* GENERAL_PURPOSE7 */ case 0x0cc: /* GENERAL_PURPOSE8 */ case 0x0d0: /* GENERAL_PURPOSE9 */ case 0x0d4: /* GENERAL_PURPOSE10 */ case 0x0d8: /* GENERAL_PURPOSE11 */ case 0x0dc: /* GENERAL_PURPOSE12 */ case 0x0e0: /* GENERAL_PURPOSE13 */ case 0x0e4: /* GENERAL_PURPOSE14 */ case 0x0e8: /* GENERAL_PURPOSE15 */ case 0x0ec: /* GENERAL_PURPOSE16 */ case 0x0f0: /* GENERAL_PURPOSE17 */ case 0x0f4: /* GENERAL_PURPOSE18 */ case 0x0f8: /* GENERAL_PURPOSE19 */ case 0x0fc: /* GENERAL_PURPOSE20 */ s->scratch[(addr - 0xb0) >> 2] = value; break; case 0x140: /* CM_CLKSEL_MPU */ s->clksel[0] = value & 0x1f; /* TODO update clocks */ break; case 0x148: /* CM_CLKSTCTRL_MPU */ s->clkctrl[0] = value & 0x1f; break; case 0x158: /* RM_RSTST_MPU */ s->rst[0] &= ~value; break; case 0x1c8: /* PM_WKDEP_MPU */ s->wkup[0] = value & 0x15; break; case 0x1d4: /* PM_EVGENCTRL_MPU */ s->ev = value & 0x1f; break; case 0x1d8: /* PM_EVEGENONTIM_MPU */ s->evtime[0] = value; break; case 0x1dc: /* PM_EVEGENOFFTIM_MPU */ s->evtime[1] = value; break; case 0x1e0: /* PM_PWSTCTRL_MPU */ s->power[0] = value & 0xc0f; break; case 0x200: /* CM_FCLKEN1_CORE */ s->clken[0] = value & 0xbfffffff; /* TODO update clocks */ /* The EN_EAC bit only gets/puts func_96m_clk. */ break; case 0x204: /* CM_FCLKEN2_CORE */ s->clken[1] = value & 0x00000007; /* TODO update clocks */ break; case 0x210: /* CM_ICLKEN1_CORE */ s->clken[2] = value & 0xfffffff9; /* TODO update clocks */ /* The EN_EAC bit only gets/puts core_l4_iclk. */ break; case 0x214: /* CM_ICLKEN2_CORE */ s->clken[3] = value & 0x00000007; /* TODO update clocks */ break; case 0x21c: /* CM_ICLKEN4_CORE */ s->clken[4] = value & 0x0000001f; /* TODO update clocks */ break; case 0x230: /* CM_AUTOIDLE1_CORE */ s->clkidle[0] = value & 0xfffffff9; /* TODO update clocks */ break; case 0x234: /* CM_AUTOIDLE2_CORE */ s->clkidle[1] = value & 0x00000007; /* TODO update clocks */ break; case 0x238: /* CM_AUTOIDLE3_CORE */ s->clkidle[2] = value & 0x00000007; /* TODO update clocks */ break; case 0x23c: /* CM_AUTOIDLE4_CORE */ s->clkidle[3] = value & 0x0000001f; /* TODO update clocks */ break; case 0x240: /* CM_CLKSEL1_CORE */ s->clksel[1] = value & 0x0fffbf7f; /* TODO update clocks */ break; case 0x244: /* CM_CLKSEL2_CORE */ s->clksel[2] = value & 0x00fffffc; /* TODO update clocks */ break; case 0x248: /* CM_CLKSTCTRL_CORE */ s->clkctrl[1] = value & 0x7; break; case 0x2a0: /* PM_WKEN1_CORE */ s->wken[0] = value & 0x04667ff8; break; case 0x2a4: /* PM_WKEN2_CORE */ s->wken[1] = value & 0x00000005; break; case 0x2b0: /* PM_WKST1_CORE */ s->wkst[0] &= ~value; break; case 0x2b4: /* PM_WKST2_CORE */ s->wkst[1] &= ~value; break; case 0x2e0: /* PM_PWSTCTRL_CORE */ s->power[1] = (value & 0x00fc3f) | (1 << 2); break; case 0x300: /* CM_FCLKEN_GFX */ s->clken[5] = value & 6; /* TODO update clocks */ break; case 0x310: /* CM_ICLKEN_GFX */ s->clken[6] = value & 1; /* TODO update clocks */ break; case 0x340: /* CM_CLKSEL_GFX */ s->clksel[3] = value & 7; /* TODO update clocks */ break; case 0x348: /* CM_CLKSTCTRL_GFX */ s->clkctrl[2] = value & 1; break; case 0x350: /* RM_RSTCTRL_GFX */ s->rstctrl[0] = value & 1; /* TODO: reset */ break; case 0x358: /* RM_RSTST_GFX */ s->rst[1] &= ~value; break; case 0x3c8: /* PM_WKDEP_GFX */ s->wkup[1] = value & 0x13; break; case 0x3e0: /* PM_PWSTCTRL_GFX */ s->power[2] = (value & 0x00c0f) | (3 << 2); break; case 0x400: /* CM_FCLKEN_WKUP */ s->clken[7] = value & 0xd; /* TODO update clocks */ break; case 0x410: /* CM_ICLKEN_WKUP */ s->clken[8] = value & 0x3f; /* TODO update clocks */ break; case 0x430: /* CM_AUTOIDLE_WKUP */ s->clkidle[4] = value & 0x0000003f; /* TODO update clocks */ break; case 0x440: /* CM_CLKSEL_WKUP */ s->clksel[4] = value & 3; /* TODO update clocks */ break; case 0x450: /* RM_RSTCTRL_WKUP */ /* TODO: reset */ if (value & 2) qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); break; case 0x454: /* RM_RSTTIME_WKUP */ s->rsttime_wkup = value & 0x1fff; break; case 0x458: /* RM_RSTST_WKUP */ s->rst[2] &= ~value; break; case 0x4a0: /* PM_WKEN_WKUP */ s->wken[2] = value & 0x00000005; break; case 0x4b0: /* PM_WKST_WKUP */ s->wkst[2] &= ~value; break; case 0x500: /* CM_CLKEN_PLL */ if (value & 0xffffff30) fprintf(stderr, "%s: write 0s in CM_CLKEN_PLL for " "future compatibility\n", __FUNCTION__); if ((s->clken[9] ^ value) & 0xcc) { s->clken[9] &= ~0xcc; s->clken[9] |= value & 0xcc; omap_prcm_apll_update(s); } if ((s->clken[9] ^ value) & 3) { s->clken[9] &= ~3; s->clken[9] |= value & 3; omap_prcm_dpll_update(s); } break; case 0x530: /* CM_AUTOIDLE_PLL */ s->clkidle[5] = value & 0x000000cf; /* TODO update clocks */ break; case 0x540: /* CM_CLKSEL1_PLL */ if (value & 0xfc4000d7) fprintf(stderr, "%s: write 0s in CM_CLKSEL1_PLL for " "future compatibility\n", __FUNCTION__); if ((s->clksel[5] ^ value) & 0x003fff00) { s->clksel[5] = value & 0x03bfff28; omap_prcm_dpll_update(s); } /* TODO update the other clocks */ s->clksel[5] = value & 0x03bfff28; break; case 0x544: /* CM_CLKSEL2_PLL */ if (value & ~3) fprintf(stderr, "%s: write 0s in CM_CLKSEL2_PLL[31:2] for " "future compatibility\n", __FUNCTION__); if (s->clksel[6] != (value & 3)) { s->clksel[6] = value & 3; omap_prcm_dpll_update(s); } break; case 0x800: /* CM_FCLKEN_DSP */ s->clken[10] = value & 0x501; /* TODO update clocks */ break; case 0x810: /* CM_ICLKEN_DSP */ s->clken[11] = value & 0x2; /* TODO update clocks */ break; case 0x830: /* CM_AUTOIDLE_DSP */ s->clkidle[6] = value & 0x2; /* TODO update clocks */ break; case 0x840: /* CM_CLKSEL_DSP */ s->clksel[7] = value & 0x3fff; /* TODO update clocks */ break; case 0x848: /* CM_CLKSTCTRL_DSP */ s->clkctrl[3] = value & 0x101; break; case 0x850: /* RM_RSTCTRL_DSP */ /* TODO: reset */ break; case 0x858: /* RM_RSTST_DSP */ s->rst[3] &= ~value; break; case 0x8c8: /* PM_WKDEP_DSP */ s->wkup[2] = value & 0x13; break; case 0x8e0: /* PM_PWSTCTRL_DSP */ s->power[3] = (value & 0x03017) | (3 << 2); break; case 0x8f0: /* PRCM_IRQSTATUS_DSP */ s->irqst[1] &= ~value; omap_prcm_int_update(s, 1); break; case 0x8f4: /* PRCM_IRQENABLE_DSP */ s->irqen[1] = value & 0x7; omap_prcm_int_update(s, 1); break; case 0x8f8: /* PRCM_IRQSTATUS_IVA */ s->irqst[2] &= ~value; omap_prcm_int_update(s, 2); break; case 0x8fc: /* PRCM_IRQENABLE_IVA */ s->irqen[2] = value & 0x7; omap_prcm_int_update(s, 2); break; default: OMAP_BAD_REG(addr); return; } } | 24,323 |
0 | int kvm_arch_init(KVMState *s, int smp_cpus) { int ret; struct utsname utsname; uname(&utsname); lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0; /* create vm86 tss. KVM uses vm86 mode to emulate 16-bit code * directly. In order to use vm86 mode, a TSS is needed. Since this * must be part of guest physical memory, we need to allocate it. Older * versions of KVM just assumed that it would be at the end of physical * memory but that doesn't work with more than 4GB of memory. We simply * refuse to work with those older versions of KVM. */ ret = kvm_check_extension(s, KVM_CAP_SET_TSS_ADDR); if (ret <= 0) { fprintf(stderr, "kvm does not support KVM_CAP_SET_TSS_ADDR\n"); return ret; } /* this address is 3 pages before the bios, and the bios should present * as unavaible memory. FIXME, need to ensure the e820 map deals with * this? */ /* * Tell fw_cfg to notify the BIOS to reserve the range. */ if (e820_add_entry(0xfffbc000, 0x4000, E820_RESERVED) < 0) { perror("e820_add_entry() table is full"); exit(1); } ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, 0xfffbd000); if (ret < 0) { return ret; } return kvm_init_identity_map_page(s); } | 24,324 |
0 | static void bootp_reply(Slirp *slirp, const struct bootp_t *bp) { BOOTPClient *bc = NULL; struct mbuf *m; struct bootp_t *rbp; struct sockaddr_in saddr, daddr; struct in_addr preq_addr; int dhcp_msg_type, val; uint8_t *q; uint8_t client_ethaddr[ETH_ALEN]; /* extract exact DHCP msg type */ dhcp_decode(bp, &dhcp_msg_type, &preq_addr); DPRINTF("bootp packet op=%d msgtype=%d", bp->bp_op, dhcp_msg_type); if (preq_addr.s_addr != htonl(0L)) DPRINTF(" req_addr=%08" PRIx32 "\n", ntohl(preq_addr.s_addr)); else DPRINTF("\n"); if (dhcp_msg_type == 0) dhcp_msg_type = DHCPREQUEST; /* Force reply for old BOOTP clients */ if (dhcp_msg_type != DHCPDISCOVER && dhcp_msg_type != DHCPREQUEST) return; /* Get client's hardware address from bootp request */ memcpy(client_ethaddr, bp->bp_hwaddr, ETH_ALEN); m = m_get(slirp); if (!m) { return; } m->m_data += IF_MAXLINKHDR; rbp = (struct bootp_t *)m->m_data; m->m_data += sizeof(struct udpiphdr); memset(rbp, 0, sizeof(struct bootp_t)); if (dhcp_msg_type == DHCPDISCOVER) { if (preq_addr.s_addr != htonl(0L)) { bc = request_addr(slirp, &preq_addr, client_ethaddr); if (bc) { daddr.sin_addr = preq_addr; } } if (!bc) { new_addr: bc = get_new_addr(slirp, &daddr.sin_addr, client_ethaddr); if (!bc) { DPRINTF("no address left\n"); return; } } memcpy(bc->macaddr, client_ethaddr, ETH_ALEN); } else if (preq_addr.s_addr != htonl(0L)) { bc = request_addr(slirp, &preq_addr, client_ethaddr); if (bc) { daddr.sin_addr = preq_addr; memcpy(bc->macaddr, client_ethaddr, ETH_ALEN); } else { /* DHCPNAKs should be sent to broadcast */ daddr.sin_addr.s_addr = 0xffffffff; } } else { bc = find_addr(slirp, &daddr.sin_addr, bp->bp_hwaddr); if (!bc) { /* if never assigned, behaves as if it was already assigned (windows fix because it remembers its address) */ goto new_addr; } } /* Update ARP table for this IP address */ arp_table_add(slirp, daddr.sin_addr.s_addr, client_ethaddr); saddr.sin_addr = slirp->vhost_addr; saddr.sin_port = htons(BOOTP_SERVER); daddr.sin_port = htons(BOOTP_CLIENT); rbp->bp_op = BOOTP_REPLY; rbp->bp_xid = bp->bp_xid; rbp->bp_htype = 1; rbp->bp_hlen = 6; memcpy(rbp->bp_hwaddr, bp->bp_hwaddr, ETH_ALEN); rbp->bp_yiaddr = daddr.sin_addr; /* Client IP address */ rbp->bp_siaddr = saddr.sin_addr; /* Server IP address */ q = rbp->bp_vend; memcpy(q, rfc1533_cookie, 4); q += 4; if (bc) { DPRINTF("%s addr=%08" PRIx32 "\n", (dhcp_msg_type == DHCPDISCOVER) ? "offered" : "ack'ed", ntohl(daddr.sin_addr.s_addr)); if (dhcp_msg_type == DHCPDISCOVER) { *q++ = RFC2132_MSG_TYPE; *q++ = 1; *q++ = DHCPOFFER; } else /* DHCPREQUEST */ { *q++ = RFC2132_MSG_TYPE; *q++ = 1; *q++ = DHCPACK; } if (slirp->bootp_filename) snprintf((char *)rbp->bp_file, sizeof(rbp->bp_file), "%s", slirp->bootp_filename); *q++ = RFC2132_SRV_ID; *q++ = 4; memcpy(q, &saddr.sin_addr, 4); q += 4; *q++ = RFC1533_NETMASK; *q++ = 4; memcpy(q, &slirp->vnetwork_mask, 4); q += 4; if (!slirp->restricted) { *q++ = RFC1533_GATEWAY; *q++ = 4; memcpy(q, &saddr.sin_addr, 4); q += 4; *q++ = RFC1533_DNS; *q++ = 4; memcpy(q, &slirp->vnameserver_addr, 4); q += 4; } *q++ = RFC2132_LEASE_TIME; *q++ = 4; val = htonl(LEASE_TIME); memcpy(q, &val, 4); q += 4; if (*slirp->client_hostname) { val = strlen(slirp->client_hostname); *q++ = RFC1533_HOSTNAME; *q++ = val; memcpy(q, slirp->client_hostname, val); q += val; } if (slirp->vdnssearch) { size_t spaceleft = sizeof(rbp->bp_vend) - (q - rbp->bp_vend); val = slirp->vdnssearch_len; if (val + 1 > spaceleft) { g_warning("DHCP packet size exceeded, " "omitting domain-search option."); } else { memcpy(q, slirp->vdnssearch, val); q += val; } } } else { static const char nak_msg[] = "requested address not available"; DPRINTF("nak'ed addr=%08" PRIx32 "\n", ntohl(preq_addr.s_addr)); *q++ = RFC2132_MSG_TYPE; *q++ = 1; *q++ = DHCPNAK; *q++ = RFC2132_MESSAGE; *q++ = sizeof(nak_msg) - 1; memcpy(q, nak_msg, sizeof(nak_msg) - 1); q += sizeof(nak_msg) - 1; } *q = RFC1533_END; daddr.sin_addr.s_addr = 0xffffffffu; m->m_len = sizeof(struct bootp_t) - sizeof(struct ip) - sizeof(struct udphdr); udp_output2(NULL, m, &saddr, &daddr, IPTOS_LOWDELAY); } | 24,325 |
1 | static void hmp_logfile(Monitor *mon, const QDict *qdict) { qemu_set_log_filename(qdict_get_str(qdict, "filename")); } | 24,327 |
1 | iscsi_abort_task_cb(struct iscsi_context *iscsi, int status, void *command_data, void *private_data) { IscsiAIOCB *acb = (IscsiAIOCB *)private_data; scsi_free_scsi_task(acb->task); acb->task = NULL; } | 24,329 |
1 | int bdrv_all_delete_snapshot(const char *name, BlockDriverState **first_bad_bs, Error **err) { int ret = 0; BlockDriverState *bs; BdrvNextIterator *it = NULL; QEMUSnapshotInfo sn1, *snapshot = &sn1; while (ret == 0 && (it = bdrv_next(it, &bs))) { AioContext *ctx = bdrv_get_aio_context(bs); aio_context_acquire(ctx); if (bdrv_can_snapshot(bs) && bdrv_snapshot_find(bs, snapshot, name) >= 0) { ret = bdrv_snapshot_delete_by_id_or_name(bs, name, err); } aio_context_release(ctx); } *first_bad_bs = bs; return ret; } | 24,330 |
0 | int ff_hevc_split_packet(HEVCContext *s, HEVCPacket *pkt, const uint8_t *buf, int length, AVCodecContext *avctx, int is_nalff, int nal_length_size) { int consumed, ret = 0; pkt->nb_nals = 0; while (length >= 4) { HEVCNAL *nal; int extract_length = 0; if (is_nalff) { int i; for (i = 0; i < nal_length_size; i++) extract_length = (extract_length << 8) | buf[i]; buf += nal_length_size; length -= nal_length_size; if (extract_length > length) { av_log(avctx, AV_LOG_ERROR, "Invalid NAL unit size.\n"); return AVERROR_INVALIDDATA; } } else { /* search start code */ while (buf[0] != 0 || buf[1] != 0 || buf[2] != 1) { ++buf; --length; if (length < 4) { if (pkt->nb_nals > 0) { // No more start codes: we discarded some irrelevant // bytes at the end of the packet. return 0; } else { av_log(avctx, AV_LOG_ERROR, "No start code is found.\n"); return AVERROR_INVALIDDATA; } } } buf += 3; length -= 3; extract_length = length; } if (pkt->nals_allocated < pkt->nb_nals + 1) { int new_size = pkt->nals_allocated + 1; void *tmp = av_realloc_array(pkt->nals, new_size, sizeof(*pkt->nals)); if (!tmp) return AVERROR(ENOMEM); pkt->nals = tmp; memset(pkt->nals + pkt->nals_allocated, 0, (new_size - pkt->nals_allocated) * sizeof(*pkt->nals)); nal = &pkt->nals[pkt->nb_nals]; nal->skipped_bytes_pos_size = 1024; // initial buffer size nal->skipped_bytes_pos = av_malloc_array(nal->skipped_bytes_pos_size, sizeof(*nal->skipped_bytes_pos)); if (!nal->skipped_bytes_pos) return AVERROR(ENOMEM); pkt->nals_allocated = new_size; } nal = &pkt->nals[pkt->nb_nals]; consumed = ff_hevc_extract_rbsp(s, buf, extract_length, nal); if (consumed < 0) return consumed; pkt->nb_nals++; ret = init_get_bits8(&nal->gb, nal->data, nal->size); if (ret < 0) return ret; ret = hls_nal_unit(nal, avctx); if (ret <= 0) { if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid NAL unit %d, skipping.\n", nal->type); } pkt->nb_nals--; } buf += consumed; length -= consumed; } return 0; } | 24,332 |
1 | static int nvme_start_ctrl(NvmeCtrl *n) { uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12; uint32_t page_size = 1 << page_bits; if (n->cq[0] || n->sq[0] || !n->bar.asq || !n->bar.acq || n->bar.asq & (page_size - 1) || n->bar.acq & (page_size - 1) || NVME_CC_MPS(n->bar.cc) < NVME_CAP_MPSMIN(n->bar.cap) || NVME_CC_MPS(n->bar.cc) > NVME_CAP_MPSMAX(n->bar.cap) || NVME_CC_IOCQES(n->bar.cc) < NVME_CTRL_CQES_MIN(n->id_ctrl.cqes) || NVME_CC_IOCQES(n->bar.cc) > NVME_CTRL_CQES_MAX(n->id_ctrl.cqes) || NVME_CC_IOSQES(n->bar.cc) < NVME_CTRL_SQES_MIN(n->id_ctrl.sqes) || NVME_CC_IOSQES(n->bar.cc) > NVME_CTRL_SQES_MAX(n->id_ctrl.sqes) || !NVME_AQA_ASQS(n->bar.aqa) || !NVME_AQA_ACQS(n->bar.aqa)) { return -1; } n->page_bits = page_bits; n->page_size = page_size; n->max_prp_ents = n->page_size / sizeof(uint64_t); n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc); n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc); nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0, NVME_AQA_ACQS(n->bar.aqa) + 1, 1); nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0, NVME_AQA_ASQS(n->bar.aqa) + 1); return 0; } | 24,333 |
1 | static int mxf_decrypt_triplet(AVFormatContext *s, AVPacket *pkt, KLVPacket *klv) { static const uint8_t checkv[16] = {0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b, 0x43, 0x48, 0x55, 0x4b}; MXFContext *mxf = s->priv_data; AVIOContext *pb = s->pb; int64_t end = avio_tell(pb) + klv->length; uint64_t size; uint64_t orig_size; uint64_t plaintext_size; uint8_t ivec[16]; uint8_t tmpbuf[16]; int index; if (!mxf->aesc && s->key && s->keylen == 16) { mxf->aesc = av_malloc(av_aes_size); if (!mxf->aesc) return -1; av_aes_init(mxf->aesc, s->key, 128, 1); } // crypto context avio_skip(pb, klv_decode_ber_length(pb)); // plaintext offset klv_decode_ber_length(pb); plaintext_size = avio_rb64(pb); // source klv key klv_decode_ber_length(pb); avio_read(pb, klv->key, 16); if (!IS_KLV_KEY(klv, mxf_essence_element_key)) return -1; index = mxf_get_stream_index(s, klv); if (index < 0) return -1; // source size klv_decode_ber_length(pb); orig_size = avio_rb64(pb); if (orig_size < plaintext_size) return -1; // enc. code size = klv_decode_ber_length(pb); if (size < 32 || size - 32 < orig_size) return -1; avio_read(pb, ivec, 16); avio_read(pb, tmpbuf, 16); if (mxf->aesc) av_aes_crypt(mxf->aesc, tmpbuf, tmpbuf, 1, ivec, 1); if (memcmp(tmpbuf, checkv, 16)) av_log(s, AV_LOG_ERROR, "probably incorrect decryption key\n"); size -= 32; av_get_packet(pb, pkt, size); size -= plaintext_size; if (mxf->aesc) av_aes_crypt(mxf->aesc, &pkt->data[plaintext_size], &pkt->data[plaintext_size], size >> 4, ivec, 1); pkt->size = orig_size; pkt->stream_index = index; avio_skip(pb, end - avio_tell(pb)); return 0; } | 24,335 |
1 | void qemu_aio_wait_end(void) { } | 24,337 |
1 | void avfilter_register_all(void) { static int initialized; if (initialized) return; initialized = 1; REGISTER_FILTER (ACONVERT, aconvert, af); REGISTER_FILTER (AFIFO, afifo, af); REGISTER_FILTER (AFORMAT, aformat, af); REGISTER_FILTER (AMERGE, amerge, af); REGISTER_FILTER (AMIX, amix, af); REGISTER_FILTER (ANULL, anull, af); REGISTER_FILTER (ARESAMPLE, aresample, af); REGISTER_FILTER (ASETNSAMPLES, asetnsamples, af); REGISTER_FILTER (ASETPTS, asetpts, af); REGISTER_FILTER (ASETTB, asettb, af); REGISTER_FILTER (ASHOWINFO, ashowinfo, af); REGISTER_FILTER (ASPLIT, asplit, af); REGISTER_FILTER (ASTREAMSYNC, astreamsync, af); REGISTER_FILTER (ASYNCTS, asyncts, af); REGISTER_FILTER (ATEMPO, atempo, af); REGISTER_FILTER (CHANNELMAP, channelmap, af); REGISTER_FILTER (CHANNELSPLIT,channelsplit,af); REGISTER_FILTER (EARWAX, earwax, af); REGISTER_FILTER (JOIN, join, af); REGISTER_FILTER (PAN, pan, af); REGISTER_FILTER (SILENCEDETECT, silencedetect, af); REGISTER_FILTER (VOLUME, volume, af); REGISTER_FILTER (VOLUMEDETECT,volumedetect,af); REGISTER_FILTER (RESAMPLE, resample, af); REGISTER_FILTER (AEVALSRC, aevalsrc, asrc); REGISTER_FILTER (ANULLSRC, anullsrc, asrc); REGISTER_FILTER (FLITE, flite, asrc); REGISTER_FILTER (ABUFFERSINK, abuffersink, asink); REGISTER_FILTER (ANULLSINK, anullsink, asink); REGISTER_FILTER (ALPHAEXTRACT, alphaextract, vf); REGISTER_FILTER (ALPHAMERGE, alphamerge, vf); REGISTER_FILTER (ASS, ass, vf); REGISTER_FILTER (BBOX, bbox, vf); REGISTER_FILTER (BLACKDETECT, blackdetect, vf); REGISTER_FILTER (BLACKFRAME, blackframe, vf); REGISTER_FILTER (BOXBLUR, boxblur, vf); REGISTER_FILTER (COLORMATRIX, colormatrix, vf); REGISTER_FILTER (COPY, copy, vf); REGISTER_FILTER (CROP, crop, vf); REGISTER_FILTER (CROPDETECT, cropdetect, vf); REGISTER_FILTER (DECIMATE, decimate, vf); REGISTER_FILTER (DELOGO, delogo, vf); REGISTER_FILTER (DESHAKE, deshake, vf); REGISTER_FILTER (DRAWBOX, drawbox, vf); REGISTER_FILTER (DRAWTEXT, drawtext, vf); REGISTER_FILTER (EDGEDETECT, edgedetect, vf); REGISTER_FILTER (FADE, fade, vf); REGISTER_FILTER (FIELDORDER, fieldorder, vf); REGISTER_FILTER (FIFO, fifo, vf); REGISTER_FILTER (FORMAT, format, vf); REGISTER_FILTER (FPS, fps, vf); REGISTER_FILTER (FRAMESTEP, framestep, vf); REGISTER_FILTER (FREI0R, frei0r, vf); REGISTER_FILTER (GRADFUN, gradfun, vf); REGISTER_FILTER (HFLIP, hflip, vf); REGISTER_FILTER (HQDN3D, hqdn3d, vf); REGISTER_FILTER (HUE, hue, vf); REGISTER_FILTER (IDET, idet, vf); REGISTER_FILTER (LUT, lut, vf); REGISTER_FILTER (LUTRGB, lutrgb, vf); REGISTER_FILTER (LUTYUV, lutyuv, vf); REGISTER_FILTER (MP, mp, vf); REGISTER_FILTER (NEGATE, negate, vf); REGISTER_FILTER (NOFORMAT, noformat, vf); REGISTER_FILTER (NULL, null, vf); REGISTER_FILTER (OCV, ocv, vf); REGISTER_FILTER (OVERLAY, overlay, vf); REGISTER_FILTER (PAD, pad, vf); REGISTER_FILTER (PIXDESCTEST, pixdesctest, vf); REGISTER_FILTER (REMOVELOGO, removelogo, vf); REGISTER_FILTER (SCALE, scale, vf); REGISTER_FILTER (SELECT, select, vf); REGISTER_FILTER (SETDAR, setdar, vf); REGISTER_FILTER (SETFIELD, setfield, vf); REGISTER_FILTER (SETPTS, setpts, vf); REGISTER_FILTER (SETSAR, setsar, vf); REGISTER_FILTER (SETTB, settb, vf); REGISTER_FILTER (SHOWINFO, showinfo, vf); REGISTER_FILTER (SLICIFY, slicify, vf); REGISTER_FILTER (SMARTBLUR, smartblur, vf); REGISTER_FILTER (SPLIT, split, vf); REGISTER_FILTER (SUPER2XSAI, super2xsai, vf); REGISTER_FILTER (SWAPUV, swapuv, vf); REGISTER_FILTER (THUMBNAIL, thumbnail, vf); REGISTER_FILTER (TILE, tile, vf); REGISTER_FILTER (TINTERLACE, tinterlace, vf); REGISTER_FILTER (TRANSPOSE, transpose, vf); REGISTER_FILTER (UNSHARP, unsharp, vf); REGISTER_FILTER (VFLIP, vflip, vf); REGISTER_FILTER (YADIF, yadif, vf); REGISTER_FILTER (CELLAUTO, cellauto, vsrc); REGISTER_FILTER (COLOR, color, vsrc); REGISTER_FILTER (FREI0R, frei0r_src, vsrc); REGISTER_FILTER (LIFE, life, vsrc); REGISTER_FILTER (MANDELBROT, mandelbrot, vsrc); REGISTER_FILTER (MPTESTSRC, mptestsrc, vsrc); REGISTER_FILTER (NULLSRC, nullsrc, vsrc); REGISTER_FILTER (RGBTESTSRC, rgbtestsrc, vsrc); REGISTER_FILTER (SMPTEBARS, smptebars, vsrc); REGISTER_FILTER (TESTSRC, testsrc, vsrc); REGISTER_FILTER (BUFFERSINK, buffersink, vsink); REGISTER_FILTER (FFBUFFERSINK,ffbuffersink,vsink); REGISTER_FILTER (NULLSINK, nullsink, vsink); /* multimedia filters */ REGISTER_FILTER (CONCAT, concat, avf); REGISTER_FILTER (SHOWSPECTRUM,showspectrum,avf); REGISTER_FILTER (SHOWWAVES, showwaves, avf); /* multimedia sources */ REGISTER_FILTER (AMOVIE, amovie, avsrc); REGISTER_FILTER (MOVIE, movie, avsrc); /* those filters are part of public or internal API => registered * unconditionally */ { extern AVFilter avfilter_vsrc_buffer; avfilter_register(&avfilter_vsrc_buffer); } { extern AVFilter avfilter_asrc_abuffer; avfilter_register(&avfilter_asrc_abuffer); } { extern AVFilter avfilter_vsink_buffer; avfilter_register(&avfilter_vsink_buffer); } { extern AVFilter avfilter_asink_abuffer; avfilter_register(&avfilter_asink_abuffer); } } | 24,338 |
0 | void show_banner(void) { fprintf(stderr, "%s version " FFMPEG_VERSION ", Copyright (c) %d-%d the FFmpeg developers\n", program_name, program_birth_year, this_year); fprintf(stderr, " built on %s %s with %s %s\n", __DATE__, __TIME__, CC_TYPE, CC_VERSION); fprintf(stderr, " configuration: " FFMPEG_CONFIGURATION "\n"); print_all_libs_info(stderr, INDENT|SHOW_CONFIG); print_all_libs_info(stderr, INDENT|SHOW_VERSION); } | 24,341 |
0 | static int decode_delta_block (bit_buffer_t *bitbuf, uint8_t *current, uint8_t *previous, int pitch, svq1_pmv_t *motion, int x, int y) { uint32_t bit_cache; uint32_t block_type; int result = 0; /* get block type */ bit_cache = get_bit_cache (bitbuf); bit_cache >>= (32 - 3); block_type = block_type_table[bit_cache].value; skip_bits(bitbuf,block_type_table[bit_cache].length); /* reset motion vectors */ if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) { motion[0].x = 0; motion[0].y = 0; motion[(x / 8) + 2].x = 0; motion[(x / 8) + 2].y = 0; motion[(x / 8) + 3].x = 0; motion[(x / 8) + 3].y = 0; } switch (block_type) { case SVQ1_BLOCK_SKIP: skip_block (current, previous, pitch, x, y); break; case SVQ1_BLOCK_INTER: result = motion_inter_block (bitbuf, current, previous, pitch, motion, x, y); if (result != 0) { #ifdef DEBUG_SVQ1 printf("Error in motion_inter_block %i\n",result); #endif break; } result = decode_svq1_block (bitbuf, current, pitch, 0); break; case SVQ1_BLOCK_INTER_4V: result = motion_inter_4v_block (bitbuf, current, previous, pitch, motion, x, y); if (result != 0) { #ifdef DEBUG_SVQ1 printf("Error in motion_inter_4v_block %i\n",result); #endif break; } result = decode_svq1_block (bitbuf, current, pitch, 0); break; case SVQ1_BLOCK_INTRA: result = decode_svq1_block (bitbuf, current, pitch, 1); break; } return result; } | 24,342 |
1 | static int pci_ich9_uninit(PCIDevice *dev) { struct AHCIPCIState *d; d = DO_UPCAST(struct AHCIPCIState, card, dev); if (msi_enabled(dev)) { msi_uninit(dev); } qemu_unregister_reset(ahci_reset, d); ahci_uninit(&d->ahci); return 0; } | 24,343 |
0 | void powerpc_display_perf_report(void) { int i; #ifndef POWERPC_PERF_USE_PMC fprintf(stderr, "PowerPC performance report\n Values are from the Time Base register, and represent 4 bus cycles.\n"); #else /* POWERPC_PERF_USE_PMC */ fprintf(stderr, "PowerPC performance report\n Values are from the PMC registers, and represent whatever the registers are set to record.\n"); #endif /* POWERPC_PERF_USE_PMC */ for(i = 0 ; i < powerpc_perf_total ; i++) { if (perfdata[i][powerpc_data_num] != (unsigned long long)0) fprintf(stderr, " Function \"%s\" (pmc1):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n", perfname[i], perfdata[i][powerpc_data_min], perfdata[i][powerpc_data_max], (double)perfdata[i][powerpc_data_sum] / (double)perfdata[i][powerpc_data_num], perfdata[i][powerpc_data_num]); #ifdef POWERPC_PERF_USE_PMC if (perfdata_pmc2[i][powerpc_data_num] != (unsigned long long)0) fprintf(stderr, " Function \"%s\" (pmc2):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n", perfname[i], perfdata_pmc2[i][powerpc_data_min], perfdata_pmc2[i][powerpc_data_max], (double)perfdata_pmc2[i][powerpc_data_sum] / (double)perfdata_pmc2[i][powerpc_data_num], perfdata_pmc2[i][powerpc_data_num]); if (perfdata_pmc3[i][powerpc_data_num] != (unsigned long long)0) fprintf(stderr, " Function \"%s\" (pmc3):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n", perfname[i], perfdata_pmc3[i][powerpc_data_min], perfdata_pmc3[i][powerpc_data_max], (double)perfdata_pmc3[i][powerpc_data_sum] / (double)perfdata_pmc3[i][powerpc_data_num], perfdata_pmc3[i][powerpc_data_num]); #endif } } | 24,344 |
1 | void ccw_dstream_init(CcwDataStream *cds, CCW1 const *ccw, ORB const *orb) { /* * We don't support MIDA (an optional facility) yet and we * catch this earlier. Just for expressing the precondition. */ g_assert(!(orb->ctrl1 & ORB_CTRL1_MASK_MIDAW)); cds->flags = (orb->ctrl0 & ORB_CTRL0_MASK_I2K ? CDS_F_I2K : 0) | (orb->ctrl0 & ORB_CTRL0_MASK_C64 ? CDS_F_C64 : 0) | (ccw->flags & CCW_FLAG_IDA ? CDS_F_IDA : 0); cds->count = ccw->count; cds->cda_orig = ccw->cda; ccw_dstream_rewind(cds); if (!(cds->flags & CDS_F_IDA)) { cds->op_handler = ccw_dstream_rw_noflags; } else { assert(false); } } | 24,345 |
1 | static int msrle_decode_pal4(AVCodecContext *avctx, AVPicture *pic, const uint8_t *data, int data_size) { int stream_ptr = 0; unsigned char rle_code; unsigned char extra_byte, odd_pixel; unsigned char stream_byte; int pixel_ptr = 0; int row_dec = pic->linesize[0]; int row_ptr = (avctx->height - 1) * row_dec; int frame_size = row_dec * avctx->height; int i; while (row_ptr >= 0) { FETCH_NEXT_STREAM_BYTE(); rle_code = stream_byte; if (rle_code == 0) { /* fetch the next byte to see how to handle escape code */ FETCH_NEXT_STREAM_BYTE(); if (stream_byte == 0) { /* line is done, goto the next one */ row_ptr -= row_dec; pixel_ptr = 0; } else if (stream_byte == 1) { /* decode is done */ return 0; } else if (stream_byte == 2) { /* reposition frame decode coordinates */ FETCH_NEXT_STREAM_BYTE(); pixel_ptr += stream_byte; FETCH_NEXT_STREAM_BYTE(); row_ptr -= stream_byte * row_dec; } else { // copy pixels from encoded stream odd_pixel = stream_byte & 1; rle_code = (stream_byte + 1) / 2; extra_byte = rle_code & 0x01; if (row_ptr + pixel_ptr + stream_byte > frame_size) { av_log(avctx, AV_LOG_ERROR, " MS RLE: frame ptr just went out of bounds (1)\n"); return -1; } for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; FETCH_NEXT_STREAM_BYTE(); pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4; pixel_ptr++; if (i + 1 == rle_code && odd_pixel) break; if (pixel_ptr >= avctx->width) break; pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } // if the RLE code is odd, skip a byte in the stream if (extra_byte) stream_ptr++; } } else { // decode a run of data if (row_ptr + pixel_ptr + stream_byte > frame_size) { av_log(avctx, AV_LOG_ERROR, " MS RLE: frame ptr just went out of bounds (1)\n"); return -1; } FETCH_NEXT_STREAM_BYTE(); for (i = 0; i < rle_code; i++) { if (pixel_ptr >= avctx->width) break; if ((i & 1) == 0) pic->data[0][row_ptr + pixel_ptr] = stream_byte >> 4; else pic->data[0][row_ptr + pixel_ptr] = stream_byte & 0x0F; pixel_ptr++; } } } /* one last sanity check on the way out */ if (stream_ptr < data_size) { av_log(avctx, AV_LOG_ERROR, " MS RLE: ended frame decode with bytes left over (%d < %d)\n", stream_ptr, data_size); return -1; } return 0; } | 24,346 |
1 | static int vnc_update_client(VncState *vs, int has_dirty, bool sync) { vs->has_dirty += has_dirty; if (vs->need_update && vs->ioc != NULL) { VncDisplay *vd = vs->vd; VncJob *job; int y; int height, width; int n = 0; if (vs->output.offset && !vs->audio_cap && !vs->force_update) /* kernel send buffers are full -> drop frames to throttle */ return 0; if (!vs->has_dirty && !vs->audio_cap && !vs->force_update) return 0; /* * Send screen updates to the vnc client using the server * surface and server dirty map. guest surface updates * happening in parallel don't disturb us, the next pass will * send them to the client. */ job = vnc_job_new(vs); height = pixman_image_get_height(vd->server); width = pixman_image_get_width(vd->server); y = 0; for (;;) { int x, h; unsigned long x2; unsigned long offset = find_next_bit((unsigned long *) &vs->dirty, height * VNC_DIRTY_BPL(vs), y * VNC_DIRTY_BPL(vs)); if (offset == height * VNC_DIRTY_BPL(vs)) { /* no more dirty bits */ break; } y = offset / VNC_DIRTY_BPL(vs); x = offset % VNC_DIRTY_BPL(vs); x2 = find_next_zero_bit((unsigned long *) &vs->dirty[y], VNC_DIRTY_BPL(vs), x); bitmap_clear(vs->dirty[y], x, x2 - x); h = find_and_clear_dirty_height(vs, y, x, x2, height); x2 = MIN(x2, width / VNC_DIRTY_PIXELS_PER_BIT); if (x2 > x) { n += vnc_job_add_rect(job, x * VNC_DIRTY_PIXELS_PER_BIT, y, (x2 - x) * VNC_DIRTY_PIXELS_PER_BIT, h); } if (!x && x2 == width / VNC_DIRTY_PIXELS_PER_BIT) { y += h; if (y == height) { break; } } } vnc_job_push(job); if (sync) { vnc_jobs_join(vs); } vs->force_update = 0; vs->has_dirty = 0; return n; } if (vs->disconnecting) { vnc_disconnect_finish(vs); } else if (sync) { vnc_jobs_join(vs); } return 0; } | 24,347 |
1 | static void vc1_mc_4mv_chroma4(VC1Context *v) { MpegEncContext *s = &v->s; DSPContext *dsp = &v->s.dsp; uint8_t *srcU, *srcV; int uvsrc_x, uvsrc_y; int uvmx_field[4], uvmy_field[4]; int i, off, tx, ty; int fieldmv = v->blk_mv_type[s->block_index[0]]; static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 }; int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks int v_edge_pos = s->v_edge_pos >> 1; if (!v->s.last_picture.f.data[0]) return; if (s->flags & CODEC_FLAG_GRAY) return; for (i = 0; i < 4; i++) { tx = s->mv[0][i][0]; uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1; ty = s->mv[0][i][1]; if (fieldmv) uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF]; else uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1; } for (i = 0; i < 4; i++) { off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0); uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2); uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2); // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack()) uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1); uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1); srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; uvmx_field[i] = (uvmx_field[i] & 3) << 1; uvmy_field[i] = (uvmy_field[i] & 3) << 1; if (fieldmv && !(uvsrc_y & 1)) v_edge_pos--; if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2) uvsrc_y--; if ((v->mv_mode == MV_PMODE_INTENSITY_COMP) || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv) || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) { s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcU, s->uvlinesize, 5, (5 << fieldmv), uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos); s->dsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize, 5, (5 << fieldmv), uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos); srcU = s->edge_emu_buffer; srcV = s->edge_emu_buffer + 16; /* if we deal with intensity compensation we need to scale source blocks */ if (v->mv_mode == MV_PMODE_INTENSITY_COMP) { int i, j; uint8_t *src, *src2; src = srcU; src2 = srcV; for (j = 0; j < 5; j++) { for (i = 0; i < 5; i++) { src[i] = v->lutuv[src[i]]; src2[i] = v->lutuv[src2[i]]; } src += s->uvlinesize << 1; src2 += s->uvlinesize << 1; } } } if (!v->rnd) { dsp->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]); dsp->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]); } else { v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]); v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]); } } } | 24,348 |
1 | static void vga_invalidate_display(void *opaque) { VGAState *s = (VGAState *)opaque; s->last_width = -1; s->last_height = -1; } | 24,349 |
1 | static void virtio_setup(uint64_t dev_info) { struct schib schib; int ssid; bool found = false; uint16_t dev_no; /* * We unconditionally enable mss support. In every sane configuration, * this will succeed; and even if it doesn't, stsch_err() can deal * with the consequences. */ enable_mss_facility(); if (dev_info != -1) { dev_no = dev_info & 0xffff; debug_print_int("device no. ", dev_no); blk_schid.ssid = (dev_info >> 16) & 0x3; debug_print_int("ssid ", blk_schid.ssid); found = find_dev(&schib, dev_no); } else { for (ssid = 0; ssid < 0x3; ssid++) { blk_schid.ssid = ssid; found = find_dev(&schib, -1); if (found) { break; } } } if (!found) { virtio_panic("No virtio-blk device found!\n"); } virtio_setup_block(blk_schid); if (!virtio_ipl_disk_is_valid()) { virtio_panic("No valid hard disk detected.\n"); } } | 24,350 |
1 | static void qemu_cleanup_net_client(NetClientState *nc) { QTAILQ_REMOVE(&net_clients, nc, next); nc->info->cleanup(nc); } | 24,352 |
1 | static void tcg_out_op (TCGContext *s, int opc, const TCGArg *args, const int *const_args) { int c; switch (opc) { case INDEX_op_exit_tb: tcg_out_movi (s, TCG_TYPE_I64, TCG_REG_R3, args[0]); tcg_out_b (s, 0, (tcg_target_long) tb_ret_addr); break; case INDEX_op_goto_tb: if (s->tb_jmp_offset) { /* direct jump method */ s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; s->code_ptr += 28; } else { tcg_abort (); } s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; break; case INDEX_op_br: { TCGLabel *l = &s->labels[args[0]]; if (l->has_value) { tcg_out_b (s, 0, l->u.value); } else { uint32_t val = *(uint32_t *) s->code_ptr; /* Thanks to Andrzej Zaborowski */ tcg_out32 (s, B | (val & 0x3fffffc)); tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL24, args[0], 0); } } break; case INDEX_op_call: tcg_out_call (s, args[0], const_args[0]); break; case INDEX_op_jmp: if (const_args[0]) { tcg_out_b (s, 0, args[0]); } else { tcg_out32 (s, MTSPR | RS (args[0]) | CTR); tcg_out32 (s, BCCTR | BO_ALWAYS); } break; case INDEX_op_movi_i32: tcg_out_movi (s, TCG_TYPE_I32, args[0], args[1]); break; case INDEX_op_movi_i64: tcg_out_movi (s, TCG_TYPE_I64, args[0], args[1]); break; case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX); break; case INDEX_op_ld8s_i32: case INDEX_op_ld8s_i64: tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX); tcg_out32 (s, EXTSB | RS (args[0]) | RA (args[0])); break; case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: tcg_out_ldst (s, args[0], args[1], args[2], LHZ, LHZX); break; case INDEX_op_ld16s_i32: case INDEX_op_ld16s_i64: tcg_out_ldst (s, args[0], args[1], args[2], LHA, LHAX); break; case INDEX_op_ld_i32: case INDEX_op_ld32u_i64: tcg_out_ldst (s, args[0], args[1], args[2], LWZ, LWZX); break; case INDEX_op_ld32s_i64: tcg_out_ldst (s, args[0], args[1], args[2], LWA, LWAX); break; case INDEX_op_ld_i64: tcg_out_ldst (s, args[0], args[1], args[2], LD, LDX); break; case INDEX_op_st8_i32: case INDEX_op_st8_i64: tcg_out_ldst (s, args[0], args[1], args[2], STB, STBX); break; case INDEX_op_st16_i32: case INDEX_op_st16_i64: tcg_out_ldst (s, args[0], args[1], args[2], STH, STHX); break; case INDEX_op_st_i32: case INDEX_op_st32_i64: tcg_out_ldst (s, args[0], args[1], args[2], STW, STWX); break; case INDEX_op_st_i64: tcg_out_ldst (s, args[0], args[1], args[2], STD, STDX); break; case INDEX_op_add_i32: if (const_args[2]) ppc_addi32 (s, args[0], args[1], args[2]); else tcg_out32 (s, ADD | TAB (args[0], args[1], args[2])); break; case INDEX_op_sub_i32: if (const_args[2]) ppc_addi32 (s, args[0], args[1], -args[2]); else tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1])); break; case INDEX_op_and_i64: case INDEX_op_and_i32: if (const_args[2]) { if ((args[2] & 0xffff) == args[2]) tcg_out32 (s, ANDI | RS (args[1]) | RA (args[0]) | args[2]); else if ((args[2] & 0xffff0000) == args[2]) tcg_out32 (s, ANDIS | RS (args[1]) | RA (args[0]) | ((args[2] >> 16) & 0xffff)); else { tcg_out_movi (s, (opc == INDEX_op_and_i32 ? TCG_TYPE_I32 : TCG_TYPE_I64), 0, args[2]); tcg_out32 (s, AND | SAB (args[1], args[0], 0)); } } else tcg_out32 (s, AND | SAB (args[1], args[0], args[2])); break; case INDEX_op_or_i64: case INDEX_op_or_i32: if (const_args[2]) { if (args[2] & 0xffff) { tcg_out32 (s, ORI | RS (args[1]) | RA (args[0]) | (args[2] & 0xffff)); if (args[2] >> 16) tcg_out32 (s, ORIS | RS (args[0]) | RA (args[0]) | ((args[2] >> 16) & 0xffff)); } else { tcg_out32 (s, ORIS | RS (args[1]) | RA (args[0]) | ((args[2] >> 16) & 0xffff)); } } else tcg_out32 (s, OR | SAB (args[1], args[0], args[2])); break; case INDEX_op_xor_i64: case INDEX_op_xor_i32: if (const_args[2]) { if ((args[2] & 0xffff) == args[2]) tcg_out32 (s, XORI | RS (args[1]) | RA (args[0]) | (args[2] & 0xffff)); else if ((args[2] & 0xffff0000) == args[2]) tcg_out32 (s, XORIS | RS (args[1]) | RA (args[0]) | ((args[2] >> 16) & 0xffff)); else { tcg_out_movi (s, (opc == INDEX_op_and_i32 ? TCG_TYPE_I32 : TCG_TYPE_I64), 0, args[2]); tcg_out32 (s, XOR | SAB (args[1], args[0], 0)); } } else tcg_out32 (s, XOR | SAB (args[1], args[0], args[2])); break; case INDEX_op_mul_i32: if (const_args[2]) { if (args[2] == (int16_t) args[2]) tcg_out32 (s, MULLI | RT (args[0]) | RA (args[1]) | (args[2] & 0xffff)); else { tcg_out_movi (s, TCG_TYPE_I32, 0, args[2]); tcg_out32 (s, MULLW | TAB (args[0], args[1], 0)); } } else tcg_out32 (s, MULLW | TAB (args[0], args[1], args[2])); break; case INDEX_op_div_i32: tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2])); break; case INDEX_op_divu_i32: tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2])); break; case INDEX_op_rem_i32: tcg_out32 (s, DIVW | TAB (0, args[1], args[2])); tcg_out32 (s, MULLW | TAB (0, 0, args[2])); tcg_out32 (s, SUBF | TAB (args[0], 0, args[1])); break; case INDEX_op_remu_i32: tcg_out32 (s, DIVWU | TAB (0, args[1], args[2])); tcg_out32 (s, MULLW | TAB (0, 0, args[2])); tcg_out32 (s, SUBF | TAB (args[0], 0, args[1])); break; case INDEX_op_shl_i32: if (const_args[2]) { tcg_out32 (s, (RLWINM | RA (args[0]) | RS (args[1]) | SH (args[2]) | MB (0) | ME (31 - args[2]) ) ); } else tcg_out32 (s, SLW | SAB (args[1], args[0], args[2])); break; case INDEX_op_shr_i32: if (const_args[2]) { tcg_out32 (s, (RLWINM | RA (args[0]) | RS (args[1]) | SH (32 - args[2]) | MB (args[2]) | ME (31) ) ); } else tcg_out32 (s, SRW | SAB (args[1], args[0], args[2])); break; case INDEX_op_sar_i32: if (const_args[2]) tcg_out32 (s, SRAWI | RS (args[1]) | RA (args[0]) | SH (args[2])); else tcg_out32 (s, SRAW | SAB (args[1], args[0], args[2])); break; case INDEX_op_brcond_i32: tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3], 0); break; case INDEX_op_brcond_i64: tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3], 1); break; case INDEX_op_neg_i32: case INDEX_op_neg_i64: tcg_out32 (s, NEG | RT (args[0]) | RA (args[1])); break; case INDEX_op_add_i64: if (const_args[2]) ppc_addi64 (s, args[0], args[1], args[2]); else tcg_out32 (s, ADD | TAB (args[0], args[1], args[2])); break; case INDEX_op_sub_i64: if (const_args[2]) ppc_addi64 (s, args[0], args[1], -args[2]); else tcg_out32 (s, SUBF | TAB (args[0], args[2], args[1])); break; case INDEX_op_shl_i64: if (const_args[2]) tcg_out_rld (s, RLDICR, args[0], args[1], args[2], 63 - args[2]); else tcg_out32 (s, SLD | SAB (args[1], args[0], args[2])); break; case INDEX_op_shr_i64: if (const_args[2]) tcg_out_rld (s, RLDICL, args[0], args[1], 64 - args[2], args[2]); else tcg_out32 (s, SRD | SAB (args[1], args[0], args[2])); break; case INDEX_op_sar_i64: if (const_args[2]) { int sh = SH (args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1); tcg_out32 (s, SRADI | RA (args[0]) | RS (args[1]) | sh); } else tcg_out32 (s, SRAD | SAB (args[1], args[0], args[2])); break; case INDEX_op_mul_i64: tcg_out32 (s, MULLD | TAB (args[0], args[1], args[2])); break; case INDEX_op_div_i64: tcg_out32 (s, DIVD | TAB (args[0], args[1], args[2])); break; case INDEX_op_divu_i64: tcg_out32 (s, DIVDU | TAB (args[0], args[1], args[2])); break; case INDEX_op_rem_i64: tcg_out32 (s, DIVD | TAB (0, args[1], args[2])); tcg_out32 (s, MULLD | TAB (0, 0, args[2])); tcg_out32 (s, SUBF | TAB (args[0], 0, args[1])); break; case INDEX_op_remu_i64: tcg_out32 (s, DIVDU | TAB (0, args[1], args[2])); tcg_out32 (s, MULLD | TAB (0, 0, args[2])); tcg_out32 (s, SUBF | TAB (args[0], 0, args[1])); break; case INDEX_op_qemu_ld8u: tcg_out_qemu_ld (s, args, 0); break; case INDEX_op_qemu_ld8s: tcg_out_qemu_ld (s, args, 0 | 4); break; case INDEX_op_qemu_ld16u: tcg_out_qemu_ld (s, args, 1); break; case INDEX_op_qemu_ld16s: tcg_out_qemu_ld (s, args, 1 | 4); break; case INDEX_op_qemu_ld32u: tcg_out_qemu_ld (s, args, 2); break; case INDEX_op_qemu_ld32s: tcg_out_qemu_ld (s, args, 2 | 4); break; case INDEX_op_qemu_ld64: tcg_out_qemu_ld (s, args, 3); break; case INDEX_op_qemu_st8: tcg_out_qemu_st (s, args, 0); break; case INDEX_op_qemu_st16: tcg_out_qemu_st (s, args, 1); break; case INDEX_op_qemu_st32: tcg_out_qemu_st (s, args, 2); break; case INDEX_op_qemu_st64: tcg_out_qemu_st (s, args, 3); break; case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: c = EXTSB; goto gen_ext; case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: c = EXTSH; goto gen_ext; case INDEX_op_ext32s_i64: c = EXTSW; goto gen_ext; gen_ext: tcg_out32 (s, c | RS (args[1]) | RA (args[0])); break; default: tcg_dump_ops (s, stderr); tcg_abort (); } } | 24,353 |
1 | static int socket_accept(int sock) { struct sockaddr_un addr; socklen_t addrlen; int ret; addrlen = sizeof(addr); do { ret = accept(sock, (struct sockaddr *)&addr, &addrlen); } while (ret == -1 && errno == EINTR); g_assert_no_errno(ret); close(sock); return ret; } | 24,354 |
1 | ivshmem_server_handle_new_conn(IvshmemServer *server) { IvshmemServerPeer *peer, *other_peer; struct sockaddr_un unaddr; socklen_t unaddr_len; int newfd; unsigned i; /* accept the incoming connection */ unaddr_len = sizeof(unaddr); newfd = qemu_accept(server->sock_fd, (struct sockaddr *)&unaddr, &unaddr_len); if (newfd < 0) { IVSHMEM_SERVER_DEBUG(server, "cannot accept() %s\n", strerror(errno)); return -1; } qemu_set_nonblock(newfd); IVSHMEM_SERVER_DEBUG(server, "accept()=%d\n", newfd); /* allocate new structure for this peer */ peer = g_malloc0(sizeof(*peer)); peer->sock_fd = newfd; /* get an unused peer id */ /* XXX: this could use id allocation such as Linux IDA, or simply * a free-list */ for (i = 0; i < G_MAXUINT16; i++) { if (ivshmem_server_search_peer(server, server->cur_id) == NULL) { break; } server->cur_id++; } if (i == G_MAXUINT16) { IVSHMEM_SERVER_DEBUG(server, "cannot allocate new client id\n"); goto fail; } peer->id = server->cur_id++; /* create eventfd, one per vector */ peer->vectors_count = server->n_vectors; for (i = 0; i < peer->vectors_count; i++) { if (event_notifier_init(&peer->vectors[i], FALSE) < 0) { IVSHMEM_SERVER_DEBUG(server, "cannot create eventfd\n"); goto fail; } } /* send peer id and shm fd */ if (ivshmem_server_send_initial_info(server, peer) < 0) { IVSHMEM_SERVER_DEBUG(server, "cannot send initial info\n"); goto fail; } /* advertise the new peer to others */ QTAILQ_FOREACH(other_peer, &server->peer_list, next) { for (i = 0; i < peer->vectors_count; i++) { ivshmem_server_send_one_msg(other_peer->sock_fd, peer->id, peer->vectors[i].wfd); } } /* advertise the other peers to the new one */ QTAILQ_FOREACH(other_peer, &server->peer_list, next) { for (i = 0; i < peer->vectors_count; i++) { ivshmem_server_send_one_msg(peer->sock_fd, other_peer->id, other_peer->vectors[i].wfd); } } /* advertise the new peer to itself */ for (i = 0; i < peer->vectors_count; i++) { ivshmem_server_send_one_msg(peer->sock_fd, peer->id, event_notifier_get_fd(&peer->vectors[i])); } QTAILQ_INSERT_TAIL(&server->peer_list, peer, next); IVSHMEM_SERVER_DEBUG(server, "new peer id = %" PRId64 "\n", peer->id); return 0; fail: while (i--) { event_notifier_cleanup(&peer->vectors[i]); } close(newfd); g_free(peer); return -1; } | 24,355 |
0 | static int mjpegb_decode_frame(AVCodecContext *avctx, void *data, int *data_size, UINT8 *buf, int buf_size) { MJpegDecodeContext *s = avctx->priv_data; UINT8 *buf_end, *buf_ptr; int i; AVPicture *picture = data; GetBitContext hgb; /* for the header */ uint32_t dqt_offs, dht_offs, sof_offs, sos_offs, second_field_offs; uint32_t field_size; *data_size = 0; /* no supplementary picture */ if (buf_size == 0) return 0; buf_ptr = buf; buf_end = buf + buf_size; read_header: /* reset on every SOI */ s->restart_interval = 0; init_get_bits(&hgb, buf_ptr, /*buf_size*/buf_end - buf_ptr); skip_bits(&hgb, 32); /* reserved zeros */ if (get_bits(&hgb, 32) != be2me_32(ff_get_fourcc("mjpg"))) { dprintf("not mjpeg-b (bad fourcc)\n"); return 0; } field_size = get_bits(&hgb, 32); /* field size */ dprintf("field size: 0x%x\n", field_size); skip_bits(&hgb, 32); /* padded field size */ second_field_offs = get_bits(&hgb, 32); dprintf("second field offs: 0x%x\n", second_field_offs); if (second_field_offs) s->interlaced = 1; dqt_offs = get_bits(&hgb, 32); dprintf("dqt offs: 0x%x\n", dqt_offs); if (dqt_offs) { init_get_bits(&s->gb, buf+dqt_offs, buf_end - (buf+dqt_offs)); s->start_code = DQT; mjpeg_decode_dqt(s); } dht_offs = get_bits(&hgb, 32); dprintf("dht offs: 0x%x\n", dht_offs); if (dht_offs) { init_get_bits(&s->gb, buf+dht_offs, buf_end - (buf+dht_offs)); s->start_code = DHT; mjpeg_decode_dht(s); } sof_offs = get_bits(&hgb, 32); dprintf("sof offs: 0x%x\n", sof_offs); if (sof_offs) { init_get_bits(&s->gb, buf+sof_offs, buf_end - (buf+sof_offs)); s->start_code = SOF0; if (mjpeg_decode_sof0(s) < 0) return -1; } sos_offs = get_bits(&hgb, 32); dprintf("sos offs: 0x%x\n", sos_offs); if (sos_offs) { // init_get_bits(&s->gb, buf+sos_offs, buf_end - (buf+sos_offs)); init_get_bits(&s->gb, buf+sos_offs, field_size); s->start_code = SOS; mjpeg_decode_sos(s); } skip_bits(&hgb, 32); /* start of data offset */ if (s->interlaced) { s->bottom_field ^= 1; /* if not bottom field, do not output image yet */ if (s->bottom_field && second_field_offs) { buf_ptr = buf + second_field_offs; second_field_offs = 0; goto read_header; } } for(i=0;i<3;i++) { picture->data[i] = s->current_picture[i]; picture->linesize[i] = (s->interlaced) ? s->linesize[i] >> 1 : s->linesize[i]; } *data_size = sizeof(AVPicture); avctx->height = s->height; if (s->interlaced) avctx->height *= 2; avctx->width = s->width; /* XXX: not complete test ! */ switch((s->h_count[0] << 4) | s->v_count[0]) { case 0x11: avctx->pix_fmt = PIX_FMT_YUV444P; break; case 0x21: avctx->pix_fmt = PIX_FMT_YUV422P; break; default: case 0x22: avctx->pix_fmt = PIX_FMT_YUV420P; break; } /* dummy quality */ /* XXX: infer it with matrix */ // avctx->quality = 3; return buf_ptr - buf; } | 24,357 |
1 | static int idcin_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { IdcinDemuxContext *idcin = s->priv_data; if (idcin->first_pkt_pos > 0) { int ret = avio_seek(s->pb, idcin->first_pkt_pos, SEEK_SET); if (ret < 0) return ret; ff_update_cur_dts(s, s->streams[idcin->video_stream_index], 0); idcin->next_chunk_is_video = 1; idcin->current_audio_chunk = 0; return 0; } return -1; } | 24,358 |
1 | static int multiwrite_req_compare(const void *a, const void *b) { return (((BlockRequest*) a)->sector - ((BlockRequest*) b)->sector); } | 24,359 |
1 | static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp) { AVFrame *src = srcp->f; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format); int i; int ret = av_frame_ref(dst, src); if (ret < 0) return ret; av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(h), 0); if (srcp->sei_recovery_frame_cnt == 0) dst->key_frame = 1; if (!srcp->crop) return 0; for (i = 0; i < desc->nb_components; i++) { int hshift = (i > 0) ? desc->log2_chroma_w : 0; int vshift = (i > 0) ? desc->log2_chroma_h : 0; int off = ((srcp->crop_left >> hshift) << h->pixel_shift) + (srcp->crop_top >> vshift) * dst->linesize[i]; dst->data[i] += off; } return 0; } | 24,360 |
1 | int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *frame = data; const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MJpegDecodeContext *s = avctx->priv_data; const uint8_t *buf_end, *buf_ptr; const uint8_t *unescaped_buf_ptr; int hshift, vshift; int unescaped_buf_size; int start_code; int i, index; int ret = 0; int is16bit; av_dict_free(&s->exif_metadata); av_freep(&s->stereo3d); s->adobe_transform = -1; buf_ptr = buf; buf_end = buf + buf_size; while (buf_ptr < buf_end) { /* find start next marker */ start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end, &unescaped_buf_ptr, &unescaped_buf_size); /* EOF */ if (start_code < 0) { break; } else if (unescaped_buf_size > INT_MAX / 8) { av_log(avctx, AV_LOG_ERROR, "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n", start_code, unescaped_buf_size, buf_size); return AVERROR_INVALIDDATA; } av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n", start_code, buf_end - buf_ptr); ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "invalid buffer\n"); goto fail; } s->start_code = start_code; if (s->avctx->debug & FF_DEBUG_STARTCODE) av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code); /* process markers */ if (start_code >= 0xd0 && start_code <= 0xd7) av_log(avctx, AV_LOG_DEBUG, "restart marker: %d\n", start_code & 0x0f); /* APP fields */ else if (start_code >= APP0 && start_code <= APP15) mjpeg_decode_app(s); /* Comment */ else if (start_code == COM) mjpeg_decode_com(s); ret = -1; if (!CONFIG_JPEGLS_DECODER && (start_code == SOF48 || start_code == LSE)) { av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n"); return AVERROR(ENOSYS); } if (avctx->skip_frame == AVDISCARD_ALL) { switch(start_code) { case SOF0: case SOF1: case SOF2: case SOF3: case SOF48: case SOI: case SOS: case EOI: break; default: goto skip; } } switch (start_code) { case SOI: s->restart_interval = 0; s->restart_count = 0; /* nothing to do on SOI */ break; case DQT: ff_mjpeg_decode_dqt(s); break; case DHT: if ((ret = ff_mjpeg_decode_dht(s)) < 0) { av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n"); goto fail; } break; case SOF0: case SOF1: s->lossless = 0; s->ls = 0; s->progressive = 0; if ((ret = ff_mjpeg_decode_sof(s)) < 0) goto fail; break; case SOF2: s->lossless = 0; s->ls = 0; s->progressive = 1; if ((ret = ff_mjpeg_decode_sof(s)) < 0) goto fail; break; case SOF3: s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS; s->lossless = 1; s->ls = 0; s->progressive = 0; if ((ret = ff_mjpeg_decode_sof(s)) < 0) goto fail; break; case SOF48: s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS; s->lossless = 1; s->ls = 1; s->progressive = 0; if ((ret = ff_mjpeg_decode_sof(s)) < 0) goto fail; break; case LSE: if (!CONFIG_JPEGLS_DECODER || (ret = ff_jpegls_decode_lse(s)) < 0) goto fail; break; case EOI: eoi_parser: s->cur_scan = 0; if (!s->got_picture) { av_log(avctx, AV_LOG_WARNING, "Found EOI before any SOF, ignoring\n"); break; } if (s->interlaced) { s->bottom_field ^= 1; /* if not bottom field, do not output image yet */ if (s->bottom_field == !s->interlace_polarity) break; } if (avctx->skip_frame == AVDISCARD_ALL) { s->got_picture = 0; goto the_end_no_picture; } if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0) return ret; *got_frame = 1; s->got_picture = 0; if (!s->lossless) { int qp = FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]); int qpw = (s->width + 15) / 16; AVBufferRef *qp_table_buf = av_buffer_alloc(qpw); if (qp_table_buf) { memset(qp_table_buf->data, qp, qpw); av_frame_set_qp_table(data, qp_table_buf, 0, FF_QSCALE_TYPE_MPEG1); } if(avctx->debug & FF_DEBUG_QP) av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp); } goto the_end; case SOS: s->cur_scan++; if (avctx->skip_frame == AVDISCARD_ALL) break; if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) goto fail; break; case DRI: mjpeg_decode_dri(s); break; case SOF5: case SOF6: case SOF7: case SOF9: case SOF10: case SOF11: case SOF13: case SOF14: case SOF15: case JPG: av_log(avctx, AV_LOG_ERROR, "mjpeg: unsupported coding type (%x)\n", start_code); break; } skip: /* eof process start code */ buf_ptr += (get_bits_count(&s->gb) + 7) / 8; av_log(avctx, AV_LOG_DEBUG, "marker parser used %d bytes (%d bits)\n", (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb)); } if (s->got_picture && s->cur_scan) { av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n"); goto eoi_parser; } av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n"); return AVERROR_INVALIDDATA; fail: s->got_picture = 0; return ret; the_end: is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1; if (AV_RB32(s->upscale_h)) { int p; av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P || avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ440P || avctx->pix_fmt == AV_PIX_FMT_YUV440P || avctx->pix_fmt == AV_PIX_FMT_YUVA444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ420P || avctx->pix_fmt == AV_PIX_FMT_YUV420P || avctx->pix_fmt == AV_PIX_FMT_YUV420P16|| avctx->pix_fmt == AV_PIX_FMT_YUVA420P || avctx->pix_fmt == AV_PIX_FMT_YUVA420P16|| avctx->pix_fmt == AV_PIX_FMT_GBRP || avctx->pix_fmt == AV_PIX_FMT_GBRAP ); avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift); for (p = 0; p<4; p++) { uint8_t *line = s->picture_ptr->data[p]; int w = s->width; int h = s->height; if (!s->upscale_h[p]) continue; if (p==1 || p==2) { w = AV_CEIL_RSHIFT(w, hshift); h = AV_CEIL_RSHIFT(h, vshift); } if (s->upscale_v[p]) h = (h+1)>>1; av_assert0(w > 0); for (i = 0; i < h; i++) { if (s->upscale_h[p] == 1) { if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2]; else line[w - 1] = line[(w - 1) / 2]; for (index = w - 2; index > 0; index--) { if (is16bit) ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1; else line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1; } } else if (s->upscale_h[p] == 2) { if (is16bit) { ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3]; if (w > 1) ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1]; } else { line[w - 1] = line[(w - 1) / 3]; if (w > 1) line[w - 2] = line[w - 1]; } for (index = w - 3; index > 0; index--) { line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3; } } line += s->linesize[p]; } } } if (AV_RB32(s->upscale_v)) { int p; av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P || avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ422P || avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUVJ420P || avctx->pix_fmt == AV_PIX_FMT_YUV420P || avctx->pix_fmt == AV_PIX_FMT_YUV440P || avctx->pix_fmt == AV_PIX_FMT_YUVJ440P || avctx->pix_fmt == AV_PIX_FMT_YUVA444P || avctx->pix_fmt == AV_PIX_FMT_YUVA420P || avctx->pix_fmt == AV_PIX_FMT_YUVA420P16|| avctx->pix_fmt == AV_PIX_FMT_GBRP || avctx->pix_fmt == AV_PIX_FMT_GBRAP ); avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift); for (p = 0; p < 4; p++) { uint8_t *dst; int w = s->width; int h = s->height; if (!s->upscale_v[p]) continue; if (p==1 || p==2) { w = AV_CEIL_RSHIFT(w, hshift); h = AV_CEIL_RSHIFT(h, vshift); } dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]]; for (i = h - 1; i; i--) { uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i / 2 * s->linesize[p]]; uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) / 2 * s->linesize[p]]; if (src1 == src2 || i == h - 1) { memcpy(dst, src1, w); } else { for (index = 0; index < w; index++) dst[index] = (src1[index] + src2[index]) >> 1; } dst -= s->linesize[p]; } } } if (s->flipped) { int j; avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift); for (index=0; index<4; index++) { uint8_t *dst = s->picture_ptr->data[index]; int w = s->picture_ptr->width; int h = s->picture_ptr->height; if(index && index<3){ w = AV_CEIL_RSHIFT(w, hshift); h = AV_CEIL_RSHIFT(h, vshift); } if(dst){ uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1); for (i=0; i<h/2; i++) { for (j=0; j<w; j++) FFSWAP(int, dst[j], dst2[j]); dst += s->picture_ptr->linesize[index]; dst2 -= s->picture_ptr->linesize[index]; } } } } if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) { int w = s->picture_ptr->width; int h = s->picture_ptr->height; for (i=0; i<h; i++) { int j; uint8_t *dst[4]; for (index=0; index<4; index++) { dst[index] = s->picture_ptr->data[index] + s->picture_ptr->linesize[index]*i; } for (j=0; j<w; j++) { int k = dst[3][j]; int r = dst[0][j] * k; int g = dst[1][j] * k; int b = dst[2][j] * k; dst[0][j] = g*257 >> 16; dst[1][j] = b*257 >> 16; dst[2][j] = r*257 >> 16; dst[3][j] = 255; } } } if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) { int w = s->picture_ptr->width; int h = s->picture_ptr->height; for (i=0; i<h; i++) { int j; uint8_t *dst[4]; for (index=0; index<4; index++) { dst[index] = s->picture_ptr->data[index] + s->picture_ptr->linesize[index]*i; } for (j=0; j<w; j++) { int k = dst[3][j]; int r = (255 - dst[0][j]) * k; int g = (128 - dst[1][j]) * k; int b = (128 - dst[2][j]) * k; dst[0][j] = r*257 >> 16; dst[1][j] = (g*257 >> 16) + 128; dst[2][j] = (b*257 >> 16) + 128; dst[3][j] = 255; } } } if (s->stereo3d) { AVStereo3D *stereo = av_stereo3d_create_side_data(data); if (stereo) { stereo->type = s->stereo3d->type; stereo->flags = s->stereo3d->flags; } av_freep(&s->stereo3d); } av_dict_copy(avpriv_frame_get_metadatap(data), s->exif_metadata, 0); av_dict_free(&s->exif_metadata); the_end_no_picture: av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n", buf_end - buf_ptr); // return buf_end - buf_ptr; return buf_ptr - buf; } | 24,361 |
1 | static void a15_daughterboard_init(const VexpressMachineState *vms, ram_addr_t ram_size, const char *cpu_model, qemu_irq *pic) { MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); MemoryRegion *sram = g_new(MemoryRegion, 1); if (!cpu_model) { cpu_model = "cortex-a15"; } { /* We have to use a separate 64 bit variable here to avoid the gcc * "comparison is always false due to limited range of data type" * warning if we are on a host where ram_addr_t is 32 bits. */ uint64_t rsz = ram_size; if (rsz > (30ULL * 1024 * 1024 * 1024)) { fprintf(stderr, "vexpress-a15: cannot model more than 30GB RAM\n"); exit(1); } } memory_region_allocate_system_memory(ram, NULL, "vexpress.highmem", ram_size); /* RAM is from 0x80000000 upwards; there is no low-memory alias for it. */ memory_region_add_subregion(sysmem, 0x80000000, ram); /* 0x2c000000 A15MPCore private memory region (GIC) */ init_cpus(cpu_model, "a15mpcore_priv", 0x2c000000, pic, vms->secure); /* A15 daughterboard peripherals: */ /* 0x20000000: CoreSight interfaces: not modelled */ /* 0x2a000000: PL301 AXI interconnect: not modelled */ /* 0x2a420000: SCC: not modelled */ /* 0x2a430000: system counter: not modelled */ /* 0x2b000000: HDLCD controller: not modelled */ /* 0x2b060000: SP805 watchdog: not modelled */ /* 0x2b0a0000: PL341 dynamic memory controller: not modelled */ /* 0x2e000000: system SRAM */ memory_region_init_ram(sram, NULL, "vexpress.a15sram", 0x10000, &error_abort); vmstate_register_ram_global(sram); memory_region_add_subregion(sysmem, 0x2e000000, sram); /* 0x7ffb0000: DMA330 DMA controller: not modelled */ /* 0x7ffd0000: PL354 static memory controller: not modelled */ } | 24,362 |
1 | static void dec_sl(DisasContext *dc) { if (dc->format == OP_FMT_RI) { LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5); } else { LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1); } if (!(dc->env->features & LM32_FEATURE_SHIFT)) { cpu_abort(dc->env, "hardware shifter is not available\n"); } if (dc->format == OP_FMT_RI) { tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5); } else { TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f); tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0); tcg_temp_free(t0); } } | 24,363 |
1 | void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint) { qiov->iov = g_malloc(alloc_hint * sizeof(struct iovec)); qiov->niov = 0; qiov->nalloc = alloc_hint; qiov->size = 0; } | 24,365 |
1 | static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src) { Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data; int qps_changed = 0, i, err; #define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field) if (!s1->current_frame.data[0] ||s->width != s1->width ||s->height!= s1->height) { if (s != s1) copy_fields(s, s1, golden_frame, current_frame); return -1; } if (s != s1) { // init tables if the first frame hasn't been decoded if (!s->current_frame.data[0]) { int y_fragment_count, c_fragment_count; s->avctx = dst; err = allocate_tables(dst); if (err) return err; y_fragment_count = s->fragment_width[0] * s->fragment_height[0]; c_fragment_count = s->fragment_width[1] * s->fragment_height[1]; memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0])); memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1])); } // copy previous frame data copy_fields(s, s1, golden_frame, dsp); // copy qscale data if necessary for (i = 0; i < 3; i++) { if (s->qps[i] != s1->qps[1]) { qps_changed = 1; memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i])); } } if (s->qps[0] != s1->qps[0]) memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array)); if (qps_changed) copy_fields(s, s1, qps, superblock_count); #undef copy_fields } update_frames(dst); return 0; } | 24,366 |
1 | static int write_reftable_entry(BlockDriverState *bs, int rt_index) { BDRVQcowState *s = bs->opaque; uint64_t buf[RT_ENTRIES_PER_SECTOR]; int rt_start_index; int i, ret; rt_start_index = rt_index & ~(RT_ENTRIES_PER_SECTOR - 1); for (i = 0; i < RT_ENTRIES_PER_SECTOR; i++) { buf[i] = cpu_to_be64(s->refcount_table[rt_start_index + i]); } ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT & ~QCOW2_OL_REFCOUNT_TABLE, s->refcount_table_offset + rt_start_index * sizeof(uint64_t), sizeof(buf)); if (ret < 0) { return ret; } BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + rt_start_index * sizeof(uint64_t), buf, sizeof(buf)); if (ret < 0) { return ret; } return 0; } | 24,367 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.