project
stringclasses 2
values | commit_id
stringlengths 40
40
| target
int64 0
1
| func
stringlengths 26
142k
| idx
int64 0
27.3k
|
---|---|---|---|---|
qemu | 57285302af51a8bae334c03e1f8243e935373953 | 1 | static int spapr_vty_init(VIOsPAPRDevice *sdev)
{
VIOsPAPRVTYDevice *dev = (VIOsPAPRVTYDevice *)sdev;
qemu_chr_add_handlers(dev->chardev, vty_can_receive,
vty_receive, NULL, dev);
return 0; | 22,496 |
qemu | c3a699be3c63f75b6ea5877080ea9b96b37524c4 | 1 | static void exynos4210_ltick_recalc_count(struct tick_timer *s)
{
uint64_t to_count;
if ((s->cnt_run && s->last_tcnto) || (s->int_run && s->last_icnto)) {
/*
* one or both timers run and not counted to the end;
* distance is not passed, recalculate with last_tcnto * last_icnto
*/
if (s->last_tcnto) {
to_count = s->last_tcnto * s->last_icnto;
} else {
to_count = s->last_icnto;
}
} else {
/* distance is passed, recalculate with tcnto * icnto */
if (s->icntb) {
s->distance = s->tcntb * s->icntb;
} else {
s->distance = s->tcntb;
}
to_count = s->distance;
s->progress = 0;
}
if (to_count > MCT_LT_COUNTER_STEP) {
/* count by step */
s->count = MCT_LT_COUNTER_STEP;
} else {
s->count = to_count;
}
}
| 22,497 |
qemu | d3c8c67469ee70fcae116d5abc277a7ebc8a19fd | 1 | static void mirror_start_job(const char *job_id, BlockDriverState *bs,
int creation_flags, BlockDriverState *target,
const char *replaces, int64_t speed,
uint32_t granularity, int64_t buf_size,
BlockMirrorBackingMode backing_mode,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
bool unmap,
BlockCompletionFunc *cb,
void *opaque,
const BlockJobDriver *driver,
bool is_none_mode, BlockDriverState *base,
bool auto_complete, const char *filter_node_name,
Error **errp)
{
MirrorBlockJob *s;
BlockDriverState *mirror_top_bs;
bool target_graph_mod;
bool target_is_backing;
Error *local_err = NULL;
int ret;
if (granularity == 0) {
granularity = bdrv_get_default_bitmap_granularity(target);
assert ((granularity & (granularity - 1)) == 0);
/* Granularity must be large enough for sector-based dirty bitmap */
assert(granularity >= BDRV_SECTOR_SIZE);
if (buf_size < 0) {
error_setg(errp, "Invalid parameter 'buf-size'");
return;
if (buf_size == 0) {
buf_size = DEFAULT_MIRROR_BUF_SIZE;
/* In the case of active commit, add dummy driver to provide consistent
* reads on the top, while disabling it in the intermediate nodes, and make
* the backing chain writable. */
mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
BDRV_O_RDWR, errp);
if (mirror_top_bs == NULL) {
return;
mirror_top_bs->total_sectors = bs->total_sectors;
bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
/* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
* it alive until block_job_create() succeeds even if bs has no parent. */
bdrv_ref(mirror_top_bs);
bdrv_drained_begin(bs);
bdrv_append(mirror_top_bs, bs, &local_err);
bdrv_drained_end(bs);
if (local_err) {
bdrv_unref(mirror_top_bs);
error_propagate(errp, local_err);
return;
/* Make sure that the source is not resized while the job is running */
s = block_job_create(job_id, driver, mirror_top_bs,
BLK_PERM_CONSISTENT_READ,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
creation_flags, cb, opaque, errp);
if (!s) {
goto fail;
/* The block job now has a reference to this node */
bdrv_unref(mirror_top_bs);
s->source = bs;
s->mirror_top_bs = mirror_top_bs;
/* No resize for the target either; while the mirror is still running, a
* consistent read isn't necessarily possible. We could possibly allow
* writes and graph modifications, though it would likely defeat the
* purpose of a mirror, so leave them blocked for now.
*
* In the case of active commit, things look a bit different, though,
* because the target is an already populated backing file in active use.
* We can allow anything except resize there.*/
target_is_backing = bdrv_chain_contains(bs, target);
target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
(target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
BLK_PERM_WRITE_UNCHANGED |
(target_is_backing ? BLK_PERM_CONSISTENT_READ |
BLK_PERM_WRITE |
BLK_PERM_GRAPH_MOD : 0));
ret = blk_insert_bs(s->target, target, errp);
if (ret < 0) {
goto fail;
s->replaces = g_strdup(replaces);
s->on_source_error = on_source_error;
s->on_target_error = on_target_error;
s->is_none_mode = is_none_mode;
s->backing_mode = backing_mode;
s->base = base;
s->granularity = granularity;
s->buf_size = ROUND_UP(buf_size, granularity);
s->unmap = unmap;
if (auto_complete) {
s->should_complete = true;
s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
if (!s->dirty_bitmap) {
goto fail;
/* Required permissions are already taken with blk_new() */
block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
&error_abort);
/* In commit_active_start() all intermediate nodes disappear, so
* any jobs in them must be blocked */
if (target_is_backing) {
BlockDriverState *iter;
for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
/* XXX BLK_PERM_WRITE needs to be allowed so we don't block
* ourselves at s->base (if writes are blocked for a node, they are
* also blocked for its backing file). The other options would be a
* second filter driver above s->base (== target). */
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
errp);
if (ret < 0) {
goto fail;
trace_mirror_start(bs, s, opaque);
block_job_start(&s->common);
return;
fail:
if (s) {
/* Make sure this BDS does not go away until we have completed the graph
* changes below */
bdrv_ref(mirror_top_bs);
g_free(s->replaces);
blk_unref(s->target);
block_job_early_fail(&s->common);
bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
&error_abort);
bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
bdrv_unref(mirror_top_bs);
| 22,498 |
FFmpeg | b6eaae39b4913db81d9e3d0ad6a2f6261757d83d | 1 | static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
ASFContext *asf = s->priv_data;
ASFStream *asf_st = 0;
ByteIOContext *pb = &s->pb;
//static int pc = 0;
for (;;) {
int rsize = 0;
if (asf->packet_size_left < FRAME_HEADER_SIZE
|| asf->packet_segments < 1) {
//asf->packet_size_left <= asf->packet_padsize) {
int ret = asf->packet_size_left + asf->packet_padsize;
//printf("PacketLeftSize:%d Pad:%d Pos:%Ld\n", asf->packet_size_left, asf->packet_padsize, url_ftell(pb));
if((url_ftell(&s->pb) + ret - s->data_offset) % asf->packet_size)
ret += asf->packet_size - ((url_ftell(&s->pb) + ret - s->data_offset) % asf->packet_size);
/* fail safe */
url_fskip(pb, ret);
asf->packet_pos= url_ftell(&s->pb);
ret = asf_get_packet(s);
//printf("READ ASF PACKET %d r:%d c:%d\n", ret, asf->packet_size_left, pc++);
if (ret < 0 || url_feof(pb))
return AVERROR_IO;
asf->packet_time_start = 0;
continue;
}
if (asf->packet_time_start == 0) {
/* read frame header */
int num = get_byte(pb);
asf->packet_segments--;
rsize++;
asf->packet_key_frame = (num & 0x80) >> 7;
asf->stream_index = asf->asfid2avid[num & 0x7f];
// sequence should be ignored!
DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0);
DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0);
DO_2BITS(asf->packet_property, asf->packet_replic_size, 0);
//printf("key:%d stream:%d seq:%d offset:%d replic_size:%d\n", asf->packet_key_frame, asf->stream_index, asf->packet_seq, //asf->packet_frag_offset, asf->packet_replic_size);
if (asf->packet_replic_size > 1) {
assert(asf->packet_replic_size >= 8);
// it should be always at least 8 bytes - FIXME validate
asf->packet_obj_size = get_le32(pb);
asf->packet_frag_timestamp = get_le32(pb); // timestamp
if (asf->packet_replic_size > 8)
url_fskip(pb, asf->packet_replic_size - 8);
rsize += asf->packet_replic_size; // FIXME - check validity
} else if (asf->packet_replic_size==1){
// multipacket - frag_offset is begining timestamp
asf->packet_time_start = asf->packet_frag_offset;
asf->packet_frag_offset = 0;
asf->packet_frag_timestamp = asf->packet_timestamp;
asf->packet_time_delta = get_byte(pb);
rsize++;
}else{
assert(asf->packet_replic_size==0);
}
if (asf->packet_flags & 0x01) {
DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
#undef DO_2BITS
//printf("Fragsize %d\n", asf->packet_frag_size);
} else {
asf->packet_frag_size = asf->packet_size_left - rsize;
//printf("Using rest %d %d %d\n", asf->packet_frag_size, asf->packet_size_left, rsize);
}
if (asf->packet_replic_size == 1) {
asf->packet_multi_size = asf->packet_frag_size;
if (asf->packet_multi_size > asf->packet_size_left) {
asf->packet_segments = 0;
continue;
}
}
asf->packet_size_left -= rsize;
//printf("___objsize____ %d %d rs:%d\n", asf->packet_obj_size, asf->packet_frag_offset, rsize);
if (asf->stream_index < 0
|| s->streams[asf->stream_index]->discard >= AVDISCARD_ALL
|| (!asf->packet_key_frame && s->streams[asf->stream_index]->discard >= AVDISCARD_NONKEY)
) {
asf->packet_time_start = 0;
/* unhandled packet (should not happen) */
url_fskip(pb, asf->packet_frag_size);
asf->packet_size_left -= asf->packet_frag_size;
if(asf->stream_index < 0)
av_log(s, AV_LOG_ERROR, "ff asf skip %d %d\n", asf->packet_frag_size, num & 0x7f);
continue;
}
asf->asf_st = s->streams[asf->stream_index]->priv_data;
}
asf_st = asf->asf_st;
if ((asf->packet_frag_offset != asf_st->frag_offset
|| (asf->packet_frag_offset
&& asf->packet_seq != asf_st->seq)) // seq should be ignored
) {
/* cannot continue current packet: free it */
// FIXME better check if packet was already allocated
av_log(s, AV_LOG_INFO, "ff asf parser skips: %d - %d o:%d - %d %d %d fl:%d\n",
asf_st->pkt.size,
asf->packet_obj_size,
asf->packet_frag_offset, asf_st->frag_offset,
asf->packet_seq, asf_st->seq, asf->packet_frag_size);
if (asf_st->pkt.size)
av_free_packet(&asf_st->pkt);
asf_st->frag_offset = 0;
if (asf->packet_frag_offset != 0) {
url_fskip(pb, asf->packet_frag_size);
av_log(s, AV_LOG_INFO, "ff asf parser skipping %db\n", asf->packet_frag_size);
asf->packet_size_left -= asf->packet_frag_size;
continue;
}
}
if (asf->packet_replic_size == 1) {
// frag_offset is here used as the begining timestamp
asf->packet_frag_timestamp = asf->packet_time_start;
asf->packet_time_start += asf->packet_time_delta;
asf->packet_obj_size = asf->packet_frag_size = get_byte(pb);
asf->packet_size_left--;
asf->packet_multi_size--;
if (asf->packet_multi_size < asf->packet_obj_size)
{
asf->packet_time_start = 0;
url_fskip(pb, asf->packet_multi_size);
asf->packet_size_left -= asf->packet_multi_size;
continue;
}
asf->packet_multi_size -= asf->packet_obj_size;
//printf("COMPRESS size %d %d %d ms:%d\n", asf->packet_obj_size, asf->packet_frag_timestamp, asf->packet_size_left, asf->packet_multi_size);
}
if (asf_st->frag_offset == 0) {
/* new packet */
av_new_packet(&asf_st->pkt, asf->packet_obj_size);
asf_st->seq = asf->packet_seq;
asf_st->pkt.pts = asf->packet_frag_timestamp;
asf_st->pkt.stream_index = asf->stream_index;
asf_st->pkt.pos =
asf_st->packet_pos= asf->packet_pos;
//printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
//asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & PKT_FLAG_KEY,
//s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO, asf->packet_obj_size);
if (s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO)
asf->packet_key_frame = 1;
if (asf->packet_key_frame)
asf_st->pkt.flags |= PKT_FLAG_KEY;
}
/* read data */
//printf("READ PACKET s:%d os:%d o:%d,%d l:%d DATA:%p\n",
// asf->packet_size, asf_st->pkt.size, asf->packet_frag_offset,
// asf_st->frag_offset, asf->packet_frag_size, asf_st->pkt.data);
asf->packet_size_left -= asf->packet_frag_size;
if (asf->packet_size_left < 0)
continue;
get_buffer(pb, asf_st->pkt.data + asf->packet_frag_offset,
asf->packet_frag_size);
asf_st->frag_offset += asf->packet_frag_size;
/* test if whole packet is read */
if (asf_st->frag_offset == asf_st->pkt.size) {
/* return packet */
if (asf_st->ds_span > 1) {
/* packet descrambling */
char* newdata = av_malloc(asf_st->pkt.size);
if (newdata) {
int offset = 0;
while (offset < asf_st->pkt.size) {
int off = offset / asf_st->ds_chunk_size;
int row = off / asf_st->ds_span;
int col = off % asf_st->ds_span;
int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size;
//printf("off:%d row:%d col:%d idx:%d\n", off, row, col, idx);
memcpy(newdata + offset,
asf_st->pkt.data + idx * asf_st->ds_chunk_size,
asf_st->ds_chunk_size);
offset += asf_st->ds_chunk_size;
}
av_free(asf_st->pkt.data);
asf_st->pkt.data = newdata;
}
}
asf_st->frag_offset = 0;
memcpy(pkt, &asf_st->pkt, sizeof(AVPacket));
//printf("packet %d %d\n", asf_st->pkt.size, asf->packet_frag_size);
asf_st->pkt.size = 0;
asf_st->pkt.data = 0;
break; // packet completed
}
}
return 0;
} | 22,499 |
FFmpeg | 1ec83d9a9e472f485897ac92bad9631d551a8c5b | 0 | static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TiffContext *const s = avctx->priv_data;
AVFrame *picture = data;
AVFrame *const p = &s->picture;
const uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
unsigned off;
int id, le, ret;
int i, j, entries;
int stride;
unsigned soff, ssize;
uint8_t *dst;
//parse image header
if (end_buf - buf < 8)
return AVERROR_INVALIDDATA;
id = AV_RL16(buf);
buf += 2;
if (id == 0x4949)
le = 1;
else if (id == 0x4D4D)
le = 0;
else {
av_log(avctx, AV_LOG_ERROR, "TIFF header not found\n");
return -1;
}
s->le = le;
s->invert = 0;
s->compr = TIFF_RAW;
s->fill_order = 0;
free_geotags(s);
/* free existing metadata */
av_dict_free(&s->picture.metadata);
// As TIFF 6.0 specification puts it "An arbitrary but carefully chosen number
// that further identifies the file as a TIFF file"
if (tget_short(&buf, le) != 42) {
av_log(avctx, AV_LOG_ERROR,
"The answer to life, universe and everything is not correct!\n");
return -1;
}
// Reset these pointers so we can tell if they were set this frame
s->stripsizes = s->stripdata = NULL;
/* parse image file directory */
off = tget_long(&buf, le);
if (off >= UINT_MAX - 14 || end_buf - orig_buf < off + 14) {
av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
return AVERROR_INVALIDDATA;
}
buf = orig_buf + off;
entries = tget_short(&buf, le);
for (i = 0; i < entries; i++) {
if (tiff_decode_tag(s, orig_buf, buf, end_buf) < 0)
return -1;
buf += 12;
}
for (i = 0; i<s->geotag_count; i++) {
const char *keyname = get_geokey_name(s->geotags[i].key);
if (!keyname) {
av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
continue;
}
if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
continue;
}
ret = av_dict_set(&s->picture.metadata, keyname, s->geotags[i].val, 0);
if (ret<0) {
av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
return ret;
}
}
if (!s->stripdata && !s->stripoff) {
av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
return -1;
}
/* now we have the data and may start decoding */
if ((ret = init_image(s)) < 0)
return ret;
if (s->strips == 1 && !s->stripsize) {
av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
s->stripsize = buf_size - s->stripoff;
}
stride = p->linesize[0];
dst = p->data[0];
for (i = 0; i < s->height; i += s->rps) {
if (s->stripsizes) {
if (s->stripsizes >= end_buf)
return AVERROR_INVALIDDATA;
ssize = tget(&s->stripsizes, s->sstype, s->le);
} else
ssize = s->stripsize;
if (s->stripdata) {
if (s->stripdata >= end_buf)
return AVERROR_INVALIDDATA;
soff = tget(&s->stripdata, s->sot, s->le);
} else
soff = s->stripoff;
if (soff > buf_size || ssize > buf_size - soff) {
av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
return -1;
}
if (tiff_unpack_strip(s, dst, stride, orig_buf + soff, ssize,
FFMIN(s->rps, s->height - i)) < 0)
break;
dst += s->rps * stride;
}
if (s->predictor == 2) {
dst = p->data[0];
soff = s->bpp >> 3;
ssize = s->width * soff;
if (s->avctx->pix_fmt == PIX_FMT_RGB48LE ||
s->avctx->pix_fmt == PIX_FMT_RGBA64LE) {
for (i = 0; i < s->height; i++) {
for (j = soff; j < ssize; j += 2)
AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
dst += stride;
}
} else if (s->avctx->pix_fmt == PIX_FMT_RGB48BE ||
s->avctx->pix_fmt == PIX_FMT_RGBA64BE) {
for (i = 0; i < s->height; i++) {
for (j = soff; j < ssize; j += 2)
AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
dst += stride;
}
} else {
for (i = 0; i < s->height; i++) {
for (j = soff; j < ssize; j++)
dst[j] += dst[j - soff];
dst += stride;
}
}
}
if (s->invert) {
dst = s->picture.data[0];
for (i = 0; i < s->height; i++) {
for (j = 0; j < s->picture.linesize[0]; j++)
dst[j] = (s->avctx->pix_fmt == PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j];
dst += s->picture.linesize[0];
}
}
*picture = s->picture;
*data_size = sizeof(AVPicture);
return buf_size;
}
| 22,500 |
FFmpeg | 5688fd77b57f1dd454990dc6fe48c6a3a1729eca | 0 | void ff_limiter_init_x86(LimiterDSPContext *dsp, int bpp)
{
int cpu_flags = av_get_cpu_flags();
if (ARCH_X86_64 && EXTERNAL_SSE2(cpu_flags)) {
if (bpp <= 8) {
dsp->limiter = ff_limiter_8bit_sse2;
}
}
if (ARCH_X86_64 && EXTERNAL_SSE4(cpu_flags)) {
if (bpp > 8) {
dsp->limiter = ff_limiter_16bit_sse4;
}
}
}
| 22,501 |
qemu | afd9096eb1882f23929f5b5c177898ed231bac66 | 1 | void *virtqueue_pop(VirtQueue *vq, size_t sz)
{
unsigned int i, head, max;
hwaddr desc_pa = vq->vring.desc;
VirtIODevice *vdev = vq->vdev;
VirtQueueElement *elem;
unsigned out_num, in_num;
hwaddr addr[VIRTQUEUE_MAX_SIZE];
struct iovec iov[VIRTQUEUE_MAX_SIZE];
VRingDesc desc;
if (virtio_queue_empty(vq)) {
return NULL;
/* Needed after virtio_queue_empty(), see comment in
* virtqueue_num_heads(). */
smp_rmb();
/* When we start there are none of either input nor output. */
out_num = in_num = 0;
max = vq->vring.num;
i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
vring_set_avail_event(vq, vq->last_avail_idx);
vring_desc_read(vdev, &desc, desc_pa, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
if (desc.len % sizeof(VRingDesc)) {
error_report("Invalid size for indirect buffer table");
/* loop over the indirect descriptor table */
max = desc.len / sizeof(VRingDesc);
desc_pa = desc.addr;
i = 0;
vring_desc_read(vdev, &desc, desc_pa, i);
/* Collect all the descriptors */
do {
if (desc.flags & VRING_DESC_F_WRITE) {
virtqueue_map_desc(&in_num, addr + out_num, iov + out_num,
VIRTQUEUE_MAX_SIZE - out_num, true, desc.addr, desc.len);
} else {
if (in_num) {
error_report("Incorrect order for descriptors");
virtqueue_map_desc(&out_num, addr, iov,
VIRTQUEUE_MAX_SIZE, false, desc.addr, desc.len);
/* If we've got too many, that implies a descriptor loop. */
if ((in_num + out_num) > max) {
error_report("Looped descriptor");
} while ((i = virtqueue_read_next_desc(vdev, &desc, desc_pa, max)) != max);
/* Now copy what we have collected and mapped */
elem = virtqueue_alloc_element(sz, out_num, in_num);
elem->index = head;
for (i = 0; i < out_num; i++) {
elem->out_addr[i] = addr[i];
elem->out_sg[i] = iov[i];
for (i = 0; i < in_num; i++) {
elem->in_addr[i] = addr[out_num + i];
elem->in_sg[i] = iov[out_num + i];
vq->inuse++;
trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
return elem; | 22,502 |
qemu | 788cf9f8c8cbda53843e060540f3e91a060eb744 | 1 | static int img_open_password(BlockBackend *blk, const char *filename,
int flags, bool quiet)
{
BlockDriverState *bs;
char password[256];
bs = blk_bs(blk);
if (bdrv_is_encrypted(bs) && bdrv_key_required(bs) &&
!(flags & BDRV_O_NO_IO)) {
qprintf(quiet, "Disk image '%s' is encrypted.\n", filename);
if (qemu_read_password(password, sizeof(password)) < 0) {
error_report("No password given");
return -1;
}
if (bdrv_set_key(bs, password) < 0) {
error_report("invalid password");
return -1;
}
}
return 0;
}
| 22,504 |
FFmpeg | f19af812a32c1398d48c3550d11dbc6aafbb2bfc | 1 | static void dump(unsigned char *buf,size_t len)
{
int i;
for(i=0;i<len;i++) {
if ((i&15)==0) printf("%04x ",i);
printf("%02x ",buf[i]);
if ((i&15)==15) printf("\n");
}
printf("\n");
}
| 22,505 |
FFmpeg | 74e4948235bc8f8946eeca20525258bbf383f75d | 1 | static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
{
HEVCContext *s1 = avctxt->priv_data, *s;
HEVCLocalContext *lc;
int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
int more_data = 1;
int *ctb_row_p = input_ctb_row;
int ctb_row = ctb_row_p[job];
int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
int thread = ctb_row % s1->threads_number;
int ret;
s = s1->sList[self_id];
lc = s->HEVClc;
if(ctb_row) {
ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
if (ret < 0)
return ret;
ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
}
while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
if (avpriv_atomic_int_get(&s1->wpp_err)){
ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
return 0;
}
ff_hevc_cabac_init(s, ctb_addr_ts);
hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
if (more_data < 0) {
s->tab_slice_address[ctb_addr_rs] = -1;
return more_data;
}
ctb_addr_ts++;
ff_hevc_save_states(s, ctb_addr_ts);
ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
return 0;
}
if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
return ctb_addr_ts;
}
ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
x_ctb+=ctb_size;
if(x_ctb >= s->ps.sps->width) {
break;
}
}
return 0;
} | 22,506 |
FFmpeg | 5afe1d27912be9b643ffb4ddc21f6d920260dbb0 | 1 | static int mpegts_raw_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
MpegTSContext *ts = s->priv_data;
int ret, i;
int64_t pcr_h, next_pcr_h, pos;
int pcr_l, next_pcr_l;
uint8_t pcr_buf[12];
uint8_t *data;
if (av_new_packet(pkt, TS_PACKET_SIZE) < 0)
return AVERROR(ENOMEM);
pkt->pos= avio_tell(s->pb);
ret = read_packet(s, pkt->data, ts->raw_packet_size, &data);
if (ret < 0) {
av_free_packet(pkt);
return ret;
}
if (data != pkt->data)
memcpy(pkt->data, data, ts->raw_packet_size);
finished_reading_packet(s, ts->raw_packet_size);
if (ts->mpeg2ts_compute_pcr) {
/* compute exact PCR for each packet */
if (parse_pcr(&pcr_h, &pcr_l, pkt->data) == 0) {
/* we read the next PCR (XXX: optimize it by using a bigger buffer */
pos = avio_tell(s->pb);
for(i = 0; i < MAX_PACKET_READAHEAD; i++) {
avio_seek(s->pb, pos + i * ts->raw_packet_size, SEEK_SET);
avio_read(s->pb, pcr_buf, 12);
if (parse_pcr(&next_pcr_h, &next_pcr_l, pcr_buf) == 0) {
/* XXX: not precise enough */
ts->pcr_incr = ((next_pcr_h - pcr_h) * 300 + (next_pcr_l - pcr_l)) /
(i + 1);
break;
}
}
avio_seek(s->pb, pos, SEEK_SET);
/* no next PCR found: we use previous increment */
ts->cur_pcr = pcr_h * 300 + pcr_l;
}
pkt->pts = ts->cur_pcr;
pkt->duration = ts->pcr_incr;
ts->cur_pcr += ts->pcr_incr;
}
pkt->stream_index = 0;
return 0;
}
| 22,508 |
FFmpeg | 90540c2d5ace46a1e9789c75fde0b1f7dbb12a9b | 1 | static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
{
const uint8_t *s = src;
const uint8_t *end;
const uint8_t *mm_end;
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_16mask),"m"(green_16mask));
mm_end = end - 11;
while (s < mm_end) {
__asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 3%1, %%mm3 \n\t"
"punpckldq 6%1, %%mm0 \n\t"
"punpckldq 9%1, %%mm3 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm0, %%mm2 \n\t"
"movq %%mm3, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t"
"psrlq $3, %%mm0 \n\t"
"psrlq $3, %%mm3 \n\t"
"pand %2, %%mm0 \n\t"
"pand %2, %%mm3 \n\t"
"psrlq $5, %%mm1 \n\t"
"psrlq $5, %%mm4 \n\t"
"pand %%mm6, %%mm1 \n\t"
"pand %%mm6, %%mm4 \n\t"
"psrlq $8, %%mm2 \n\t"
"psrlq $8, %%mm5 \n\t"
"pand %%mm7, %%mm2 \n\t"
"pand %%mm7, %%mm5 \n\t"
"por %%mm1, %%mm0 \n\t"
"por %%mm4, %%mm3 \n\t"
"por %%mm2, %%mm0 \n\t"
"por %%mm5, %%mm3 \n\t"
"psllq $16, %%mm3 \n\t"
"por %%mm3, %%mm0 \n\t"
MOVNTQ" %%mm0, %0 \n\t"
:"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
d += 4;
s += 12;
}
__asm__ volatile(SFENCE:::"memory");
__asm__ volatile(EMMS:::"memory");
while (s < end) {
const int b = *s++;
const int g = *s++;
const int r = *s++;
*d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
}
}
| 22,509 |
qemu | 9b2fadda3e0196ffd485adde4fe9cdd6fae35300 | 1 | static void gen_mfdcrx(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
#else
if (unlikely(ctx->pr)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
return;
}
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
cpu_gpr[rA(ctx->opcode)]);
/* Note: Rc update flag set leads to undefined state of Rc0 */
#endif
}
| 22,510 |
FFmpeg | e1c48b7aaedc5deb6f22ced02dfe4f356bf3f421 | 0 | static void png_save2(const char *filename, uint32_t *bitmap, int w, int h)
{
int x, y, v;
FILE *f;
char fname[40], fname2[40];
char command[1024];
snprintf(fname, 40, "%s.ppm", filename);
f = fopen(fname, "w");
if (!f) {
perror(fname);
exit(1);
}
fprintf(f, "P6\n"
"%d %d\n"
"%d\n",
w, h, 255);
for(y = 0; y < h; y++) {
for(x = 0; x < w; x++) {
v = bitmap[y * w + x];
putc((v >> 16) & 0xff, f);
putc((v >> 8) & 0xff, f);
putc((v >> 0) & 0xff, f);
}
}
fclose(f);
snprintf(fname2, 40, "%s-a.pgm", filename);
f = fopen(fname2, "w");
if (!f) {
perror(fname2);
exit(1);
}
fprintf(f, "P5\n"
"%d %d\n"
"%d\n",
w, h, 255);
for(y = 0; y < h; y++) {
for(x = 0; x < w; x++) {
v = bitmap[y * w + x];
putc((v >> 24) & 0xff, f);
}
}
fclose(f);
snprintf(command, 1024, "pnmtopng -alpha %s %s > %s.png 2> /dev/null", fname2, fname, filename);
system(command);
snprintf(command, 1024, "rm %s %s", fname, fname2);
system(command);
}
| 22,512 |
FFmpeg | af1e8ffdad4ae0a6d73e8d26d5893739e3c7a389 | 0 | static int spdif_write_packet(struct AVFormatContext *s, AVPacket *pkt)
{
IEC61937Context *ctx = s->priv_data;
int ret, padding;
ctx->out_buf = pkt->data;
ctx->out_bytes = pkt->size;
ctx->length_code = FFALIGN(pkt->size, 2) << 3;
ctx->use_preamble = 1;
ctx->extra_bswap = 0;
ret = ctx->header_info(s, pkt);
if (ret < 0)
return ret;
if (!ctx->pkt_offset)
return 0;
padding = (ctx->pkt_offset - ctx->use_preamble * BURST_HEADER_SIZE - ctx->out_bytes) & ~1;
if (padding < 0) {
av_log(s, AV_LOG_ERROR, "bitrate is too high\n");
return AVERROR(EINVAL);
}
if (ctx->use_preamble) {
put_le16(s->pb, SYNCWORD1); //Pa
put_le16(s->pb, SYNCWORD2); //Pb
put_le16(s->pb, ctx->data_type); //Pc
put_le16(s->pb, ctx->length_code);//Pd
}
if (HAVE_BIGENDIAN ^ ctx->extra_bswap) {
put_buffer(s->pb, ctx->out_buf, ctx->out_bytes & ~1);
} else {
av_fast_malloc(&ctx->buffer, &ctx->buffer_size, ctx->out_bytes + FF_INPUT_BUFFER_PADDING_SIZE);
if (!ctx->buffer)
return AVERROR(ENOMEM);
ff_spdif_bswap_buf16((uint16_t *)ctx->buffer, (uint16_t *)ctx->out_buf, ctx->out_bytes >> 1);
put_buffer(s->pb, ctx->buffer, ctx->out_bytes & ~1);
}
if (ctx->out_bytes & 1)
put_be16(s->pb, ctx->out_buf[ctx->out_bytes - 1]);
put_nbyte(s->pb, 0, padding);
av_log(s, AV_LOG_DEBUG, "type=%x len=%i pkt_offset=%i\n",
ctx->data_type, ctx->out_bytes, ctx->pkt_offset);
put_flush_packet(s->pb);
return 0;
}
| 22,513 |
FFmpeg | 1b1bb2c4efc126d74d44d8c421860c85f932ecb1 | 0 | av_cold void ff_rl_init(RLTable *rl,
uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
{
int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
uint8_t index_run[MAX_RUN + 1];
int last, run, level, start, end, i;
/* If table is static, we can quit if rl->max_level[0] is not NULL */
if (static_store && rl->max_level[0])
return;
/* compute max_level[], max_run[] and index_run[] */
for (last = 0; last < 2; last++) {
if (last == 0) {
start = 0;
end = rl->last;
} else {
start = rl->last;
end = rl->n;
}
memset(max_level, 0, MAX_RUN + 1);
memset(max_run, 0, MAX_LEVEL + 1);
memset(index_run, rl->n, MAX_RUN + 1);
for (i = start; i < end; i++) {
run = rl->table_run[i];
level = rl->table_level[i];
if (index_run[run] == rl->n)
index_run[run] = i;
if (level > max_level[run])
max_level[run] = level;
if (run > max_run[level])
max_run[level] = run;
}
if (static_store)
rl->max_level[last] = static_store[last];
else
rl->max_level[last] = av_malloc(MAX_RUN + 1);
memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
if (static_store)
rl->max_run[last] = static_store[last] + MAX_RUN + 1;
else
rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
if (static_store)
rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
else
rl->index_run[last] = av_malloc(MAX_RUN + 1);
memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
}
}
| 22,514 |
qemu | 9b2fadda3e0196ffd485adde4fe9cdd6fae35300 | 1 | static void gen_slbie(DisasContext *ctx)
{
#if defined(CONFIG_USER_ONLY)
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
#else
if (unlikely(ctx->pr)) {
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_OPC);
return;
}
gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
#endif
}
| 22,515 |
qemu | 4f4321c11ff6e98583846bfd6f0e81954924b003 | 1 | static int usb_hid_handle_data(USBDevice *dev, USBPacket *p)
{
USBHIDState *s = (USBHIDState *)dev;
int ret = 0;
switch(p->pid) {
case USB_TOKEN_IN:
if (p->devep == 1) {
int64_t curtime = qemu_get_clock_ns(vm_clock);
if (!s->changed && (!s->idle || s->next_idle_clock - curtime > 0))
return USB_RET_NAK;
usb_hid_set_next_idle(s, curtime);
if (s->kind == USB_MOUSE || s->kind == USB_TABLET) {
ret = usb_pointer_poll(s, p->data, p->len);
}
else if (s->kind == USB_KEYBOARD) {
ret = usb_keyboard_poll(s, p->data, p->len);
}
s->changed = s->n > 0;
} else {
goto fail;
}
break;
case USB_TOKEN_OUT:
default:
fail:
ret = USB_RET_STALL;
break;
}
return ret;
}
| 22,516 |
FFmpeg | 322428c851980396485d4c6bb4cfe79db43467f8 | 1 | int av_opencl_init(AVDictionary *options, AVOpenCLExternalEnv *ext_opencl_env)
{
int ret = 0;
AVDictionaryEntry *opt_build_entry;
AVDictionaryEntry *opt_platform_entry;
AVDictionaryEntry *opt_device_entry;
LOCK_OPENCL
if (!gpu_env.init_count) {
opt_platform_entry = av_dict_get(options, "platform_idx", NULL, 0);
opt_device_entry = av_dict_get(options, "device_idx", NULL, 0);
/* initialize devices, context, command_queue */
gpu_env.usr_spec_dev_info.platform_idx = -1;
gpu_env.usr_spec_dev_info.dev_idx = -1;
if (opt_platform_entry) {
gpu_env.usr_spec_dev_info.platform_idx = strtol(opt_platform_entry->value, NULL, 10);
}
if (opt_device_entry) {
gpu_env.usr_spec_dev_info.dev_idx = strtol(opt_device_entry->value, NULL, 10);
}
ret = init_opencl_env(&gpu_env, ext_opencl_env);
if (ret < 0)
goto end;
}
/*initialize program, kernel_name, kernel_count*/
opt_build_entry = av_dict_get(options, "build_options", NULL, 0);
if (opt_build_entry)
ret = compile_kernel_file(&gpu_env, opt_build_entry->value);
else
ret = compile_kernel_file(&gpu_env, NULL);
if (ret < 0)
goto end;
av_assert1(gpu_env.kernel_code_count > 0);
gpu_env.init_count++;
end:
UNLOCK_OPENCL
return ret;
}
| 22,517 |
FFmpeg | 689a8674131c3852fc78eff1d7c044850d263e22 | 1 | static int msf_read_header(AVFormatContext *s)
{
unsigned codec, align, size;
AVStream *st;
avio_skip(s->pb, 4);
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec = avio_rb32(s->pb);
st->codec->channels = avio_rb32(s->pb);
if (st->codec->channels <= 0)
return AVERROR_INVALIDDATA;
size = avio_rb32(s->pb);
st->codec->sample_rate = avio_rb32(s->pb);
if (st->codec->sample_rate <= 0)
return AVERROR_INVALIDDATA;
align = avio_rb32(s->pb) ;
if (align > INT_MAX / st->codec->channels)
return AVERROR_INVALIDDATA;
st->codec->block_align = align;
switch (codec) {
case 0: st->codec->codec_id = AV_CODEC_ID_PCM_S16BE; break;
case 3: st->codec->block_align = 16 * st->codec->channels;
st->codec->codec_id = AV_CODEC_ID_ADPCM_PSX; break;
case 7: st->need_parsing = AVSTREAM_PARSE_FULL_RAW;
st->codec->codec_id = AV_CODEC_ID_MP3; break;
default:
avpriv_request_sample(s, "Codec %d", codec);
return AVERROR_PATCHWELCOME;
}
st->duration = av_get_audio_frame_duration(st->codec, size);
avio_skip(s->pb, 0x40 - avio_tell(s->pb));
avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate);
return 0;
}
| 22,518 |
qemu | b125f9dc7bd68cd4c57189db4da83b0620b28a72 | 1 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
{
int i, target_code_size, max_target_code_size;
int direct_jmp_count, direct_jmp2_count, cross_page;
TranslationBlock *tb;
target_code_size = 0;
max_target_code_size = 0;
cross_page = 0;
direct_jmp_count = 0;
direct_jmp2_count = 0;
for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
tb = &tcg_ctx.tb_ctx.tbs[i];
target_code_size += tb->size;
if (tb->size > max_target_code_size) {
max_target_code_size = tb->size;
}
if (tb->page_addr[1] != -1) {
cross_page++;
}
if (tb->tb_next_offset[0] != 0xffff) {
direct_jmp_count++;
if (tb->tb_next_offset[1] != 0xffff) {
direct_jmp2_count++;
}
}
}
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n");
cpu_fprintf(f, "gen code size %td/%zd\n",
tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
tcg_ctx.code_gen_buffer_max_size);
cpu_fprintf(f, "TB count %d/%d\n",
tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
tcg_ctx.tb_ctx.nb_tbs : 0,
max_target_code_size);
cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
tcg_ctx.code_gen_buffer) /
tcg_ctx.tb_ctx.nb_tbs : 0,
target_code_size ? (double) (tcg_ctx.code_gen_ptr -
tcg_ctx.code_gen_buffer) /
target_code_size : 0);
cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
tcg_ctx.tb_ctx.nb_tbs : 0);
cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
direct_jmp_count,
tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
tcg_ctx.tb_ctx.nb_tbs : 0,
direct_jmp2_count,
tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
tcg_ctx.tb_ctx.nb_tbs : 0);
cpu_fprintf(f, "\nStatistics:\n");
cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
cpu_fprintf(f, "TB invalidate count %d\n",
tcg_ctx.tb_ctx.tb_phys_invalidate_count);
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
tcg_dump_info(f, cpu_fprintf);
}
| 22,519 |
qemu | 39e594dbcd897849f2ca95b3310ea00fff29ea99 | 1 | static sd_rsp_type_t sd_normal_command(SDState *sd,
SDRequest req)
{
uint32_t rca = 0x0000;
uint64_t addr = (sd->ocr & (1 << 30)) ? (uint64_t) req.arg << 9 : req.arg;
if (sd_cmd_type[req.cmd] == sd_ac || sd_cmd_type[req.cmd] == sd_adtc)
rca = req.arg >> 16;
DPRINTF("CMD%d 0x%08x state %d\n", req.cmd, req.arg, sd->state);
switch (req.cmd) {
/* Basic commands (Class 0 and Class 1) */
case 0: /* CMD0: GO_IDLE_STATE */
switch (sd->state) {
case sd_inactive_state:
return sd->spi ? sd_r1 : sd_r0;
default:
sd->state = sd_idle_state;
sd_reset(sd, sd->bdrv);
return sd->spi ? sd_r1 : sd_r0;
}
break;
case 1: /* CMD1: SEND_OP_CMD */
if (!sd->spi)
goto bad_cmd;
sd->state = sd_transfer_state;
return sd_r1;
case 2: /* CMD2: ALL_SEND_CID */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_ready_state:
sd->state = sd_identification_state;
return sd_r2_i;
default:
break;
}
break;
case 3: /* CMD3: SEND_RELATIVE_ADDR */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_identification_state:
case sd_standby_state:
sd->state = sd_standby_state;
sd_set_rca(sd);
return sd_r6;
default:
break;
}
break;
case 4: /* CMD4: SEND_DSR */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_standby_state:
break;
default:
break;
}
break;
case 5: /* CMD5: reserved for SDIO cards */
case 6: /* CMD6: SWITCH_FUNCTION */
if (sd->spi)
goto bad_cmd;
switch (sd->mode) {
case sd_data_transfer_mode:
sd_function_switch(sd, req.arg);
sd->state = sd_sendingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 7: /* CMD7: SELECT/DESELECT_CARD */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_standby_state:
if (sd->rca != rca)
sd->state = sd_transfer_state;
return sd_r1b;
case sd_transfer_state:
case sd_sendingdata_state:
if (sd->rca == rca)
break;
sd->state = sd_standby_state;
return sd_r1b;
case sd_disconnect_state:
if (sd->rca != rca)
sd->state = sd_programming_state;
return sd_r1b;
case sd_programming_state:
if (sd->rca == rca)
break;
sd->state = sd_disconnect_state;
return sd_r1b;
default:
break;
}
break;
case 8: /* CMD8: SEND_IF_COND */
/* Physical Layer Specification Version 2.00 command */
switch (sd->state) {
case sd_idle_state:
sd->vhs = 0;
/* No response if not exactly one VHS bit is set. */
if (!(req.arg >> 8) || (req.arg >> ffs(req.arg & ~0xff)))
return sd->spi ? sd_r7 : sd_r0;
/* Accept. */
sd->vhs = req.arg;
return sd_r7;
default:
break;
}
break;
case 9: /* CMD9: SEND_CSD */
switch (sd->state) {
case sd_standby_state:
if (sd->rca != rca)
return sd_r2_s;
case sd_transfer_state:
if (!sd->spi)
break;
sd->state = sd_sendingdata_state;
memcpy(sd->data, sd->csd, 16);
sd->data_start = addr;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 10: /* CMD10: SEND_CID */
switch (sd->state) {
case sd_standby_state:
if (sd->rca != rca)
return sd_r2_i;
case sd_transfer_state:
if (!sd->spi)
break;
sd->state = sd_sendingdata_state;
memcpy(sd->data, sd->cid, 16);
sd->data_start = addr;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 11: /* CMD11: READ_DAT_UNTIL_STOP */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = req.arg;
sd->data_offset = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
default:
break;
}
break;
case 12: /* CMD12: STOP_TRANSMISSION */
switch (sd->state) {
case sd_sendingdata_state:
sd->state = sd_transfer_state;
return sd_r1b;
case sd_receivingdata_state:
sd->state = sd_programming_state;
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
default:
break;
}
break;
case 13: /* CMD13: SEND_STATUS */
switch (sd->mode) {
case sd_data_transfer_mode:
if (sd->rca != rca)
return sd_r1;
default:
break;
}
break;
case 15: /* CMD15: GO_INACTIVE_STATE */
if (sd->spi)
goto bad_cmd;
switch (sd->mode) {
case sd_data_transfer_mode:
if (sd->rca != rca)
sd->state = sd_inactive_state;
default:
break;
}
break;
/* Block read commands (Classs 2) */
case 16: /* CMD16: SET_BLOCKLEN */
switch (sd->state) {
case sd_transfer_state:
if (req.arg > (1 << HWBLOCK_SHIFT))
sd->card_status |= BLOCK_LEN_ERROR;
else
sd->blk_len = req.arg;
return sd_r1;
default:
break;
}
break;
case 17: /* CMD17: READ_SINGLE_BLOCK */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = addr;
sd->data_offset = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
return sd_r1;
default:
break;
}
break;
case 18: /* CMD18: READ_MULTIPLE_BLOCK */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
sd->data_start = addr;
sd->data_offset = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
return sd_r1;
default:
break;
}
break;
/* Block write commands (Class 4) */
case 24: /* CMD24: WRITE_SINGLE_BLOCK */
if (sd->spi)
goto unimplemented_cmd;
switch (sd->state) {
case sd_transfer_state:
/* Writing in SPI mode not implemented. */
if (sd->spi)
break;
sd->state = sd_receivingdata_state;
sd->data_start = addr;
sd->data_offset = 0;
sd->blk_written = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
if (sd_wp_addr(sd, sd->data_start))
sd->card_status |= WP_VIOLATION;
if (sd->csd[14] & 0x30)
sd->card_status |= WP_VIOLATION;
return sd_r1;
default:
break;
}
break;
case 25: /* CMD25: WRITE_MULTIPLE_BLOCK */
if (sd->spi)
goto unimplemented_cmd;
switch (sd->state) {
case sd_transfer_state:
/* Writing in SPI mode not implemented. */
if (sd->spi)
break;
sd->state = sd_receivingdata_state;
sd->data_start = addr;
sd->data_offset = 0;
sd->blk_written = 0;
if (sd->data_start + sd->blk_len > sd->size)
sd->card_status |= ADDRESS_ERROR;
if (sd_wp_addr(sd, sd->data_start))
sd->card_status |= WP_VIOLATION;
if (sd->csd[14] & 0x30)
sd->card_status |= WP_VIOLATION;
return sd_r1;
default:
break;
}
break;
case 26: /* CMD26: PROGRAM_CID */
if (sd->spi)
goto bad_cmd;
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_receivingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
case 27: /* CMD27: PROGRAM_CSD */
if (sd->spi)
goto unimplemented_cmd;
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_receivingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
/* Write protection (Class 6) */
case 28: /* CMD28: SET_WRITE_PROT */
switch (sd->state) {
case sd_transfer_state:
if (addr >= sd->size) {
sd->card_status = ADDRESS_ERROR;
return sd_r1b;
}
sd->state = sd_programming_state;
sd->wp_groups[addr >> (HWBLOCK_SHIFT +
SECTOR_SHIFT + WPGROUP_SHIFT)] = 1;
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
default:
break;
}
break;
case 29: /* CMD29: CLR_WRITE_PROT */
switch (sd->state) {
case sd_transfer_state:
if (addr >= sd->size) {
sd->card_status = ADDRESS_ERROR;
return sd_r1b;
}
sd->state = sd_programming_state;
sd->wp_groups[addr >> (HWBLOCK_SHIFT +
SECTOR_SHIFT + WPGROUP_SHIFT)] = 0;
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
default:
break;
}
break;
case 30: /* CMD30: SEND_WRITE_PROT */
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_sendingdata_state;
*(uint32_t *) sd->data = sd_wpbits(sd, req.arg);
sd->data_start = addr;
sd->data_offset = 0;
return sd_r1b;
default:
break;
}
break;
/* Erase commands (Class 5) */
case 32: /* CMD32: ERASE_WR_BLK_START */
switch (sd->state) {
case sd_transfer_state:
sd->erase_start = req.arg;
return sd_r1;
default:
break;
}
break;
case 33: /* CMD33: ERASE_WR_BLK_END */
switch (sd->state) {
case sd_transfer_state:
sd->erase_end = req.arg;
return sd_r1;
default:
break;
}
break;
case 38: /* CMD38: ERASE */
switch (sd->state) {
case sd_transfer_state:
if (sd->csd[14] & 0x30) {
sd->card_status |= WP_VIOLATION;
return sd_r1b;
}
sd->state = sd_programming_state;
sd_erase(sd);
/* Bzzzzzzztt .... Operation complete. */
sd->state = sd_transfer_state;
return sd_r1b;
default:
break;
}
break;
/* Lock card commands (Class 7) */
case 42: /* CMD42: LOCK_UNLOCK */
if (sd->spi)
goto unimplemented_cmd;
switch (sd->state) {
case sd_transfer_state:
sd->state = sd_receivingdata_state;
sd->data_start = 0;
sd->data_offset = 0;
return sd_r1;
default:
break;
}
break;
/* Application specific commands (Class 8) */
case 55: /* CMD55: APP_CMD */
if (sd->rca != rca)
sd->card_status |= APP_CMD;
return sd_r1;
case 56: /* CMD56: GEN_CMD */
fprintf(stderr, "SD: GEN_CMD 0x%08x\n", req.arg);
switch (sd->state) {
case sd_transfer_state:
sd->data_offset = 0;
if (req.arg & 1)
sd->state = sd_sendingdata_state;
else
sd->state = sd_receivingdata_state;
return sd_r1;
default:
break;
}
break;
default:
bad_cmd:
fprintf(stderr, "SD: Unknown CMD%i\n", req.cmd);
unimplemented_cmd:
/* Commands that are recognised but not yet implemented in SPI mode. */
fprintf(stderr, "SD: CMD%i not implemented in SPI mode\n", req.cmd);
}
fprintf(stderr, "SD: CMD%i in a wrong state\n", req.cmd);
} | 22,520 |
FFmpeg | 84a6bc23570c17ce91071e41431103f709c0d595 | 0 | static int32_t tag_tree_size(uint16_t w, uint16_t h)
{
uint32_t res = 0;
while (w > 1 || h > 1) {
res += w * h;
if (res + 1 >= INT32_MAX)
return -1;
w = (w + 1) >> 1;
h = (h + 1) >> 1;
}
return (int32_t)(res + 1);
}
| 22,521 |
FFmpeg | e95580e70a8c0102cc2a399dff25307211a9b7ca | 0 | static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
{
MOVContext *mov = s->priv_data;
ByteIOContext *pb = s->pb;
MOVTrack *trk = &mov->tracks[pkt->stream_index];
AVCodecContext *enc = trk->enc;
unsigned int samplesInChunk = 0;
int size= pkt->size;
if (url_is_streamed(s->pb)) return 0; /* Can't handle that */
if (!size) return 0; /* Discard 0 sized packets */
if (enc->codec_id == CODEC_ID_AMR_NB) {
/* We must find out how many AMR blocks there are in one packet */
static uint16_t packed_size[16] =
{13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 0};
int len = 0;
while (len < size && samplesInChunk < 100) {
len += packed_size[(pkt->data[len] >> 3) & 0x0F];
samplesInChunk++;
}
if(samplesInChunk > 1){
av_log(s, AV_LOG_ERROR, "fatal error, input is not a single packet, implement a AVParser for it\n");
return -1;
}
} else if (trk->sampleSize)
samplesInChunk = size/trk->sampleSize;
else
samplesInChunk = 1;
/* copy extradata if it exists */
if (trk->vosLen == 0 && enc->extradata_size > 0) {
trk->vosLen = enc->extradata_size;
trk->vosData = av_malloc(trk->vosLen);
memcpy(trk->vosData, enc->extradata, trk->vosLen);
}
if (enc->codec_id == CODEC_ID_H264 && trk->vosLen > 0 && *(uint8_t *)trk->vosData != 1) {
/* from x264 or from bytestream h264 */
/* nal reformating needed */
int ret = ff_avc_parse_nal_units(pkt->data, &pkt->data, &pkt->size);
if (ret < 0)
return ret;
assert(pkt->size);
size = pkt->size;
} else if (enc->codec_id == CODEC_ID_DNXHD && !trk->vosLen) {
/* copy frame header to create needed atoms */
if (size < 640)
return -1;
trk->vosLen = 640;
trk->vosData = av_malloc(trk->vosLen);
memcpy(trk->vosData, pkt->data, 640);
}
if (!(trk->entry % MOV_INDEX_CLUSTER_SIZE)) {
trk->cluster = av_realloc(trk->cluster, (trk->entry + MOV_INDEX_CLUSTER_SIZE) * sizeof(*trk->cluster));
if (!trk->cluster)
return -1;
}
trk->cluster[trk->entry].pos = url_ftell(pb);
trk->cluster[trk->entry].samplesInChunk = samplesInChunk;
trk->cluster[trk->entry].size = size;
trk->cluster[trk->entry].entries = samplesInChunk;
trk->cluster[trk->entry].dts = pkt->dts;
trk->trackDuration = pkt->dts - trk->cluster[0].dts + pkt->duration;
if(enc->codec_type == CODEC_TYPE_VIDEO) {
if (pkt->dts != pkt->pts)
trk->hasBframes = 1;
trk->cluster[trk->entry].cts = pkt->pts - pkt->dts;
trk->cluster[trk->entry].key_frame = !!(pkt->flags & PKT_FLAG_KEY);
if(trk->cluster[trk->entry].key_frame)
trk->hasKeyframes++;
}
trk->entry++;
trk->sampleCount += samplesInChunk;
mov->mdat_size += size;
put_buffer(pb, pkt->data, size);
put_flush_packet(pb);
return 0;
}
| 22,522 |
FFmpeg | d1adad3cca407f493c3637e20ecd4f7124e69212 | 0 | static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
unsigned i;
#if COMPILE_TEMPLATE_MMX
x86_reg mmx_size= 23 - src_size;
__asm__ volatile (
"test %%"REG_a", %%"REG_a" \n\t"
"jns 2f \n\t"
"movq "MANGLE(mask24r)", %%mm5 \n\t"
"movq "MANGLE(mask24g)", %%mm6 \n\t"
"movq "MANGLE(mask24b)", %%mm7 \n\t"
".p2align 4 \n\t"
"1: \n\t"
PREFETCH" 32(%1, %%"REG_a") \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
"movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
"movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
"psllq $16, %%mm0 \n\t" // 00 BGR BGR
"pand %%mm5, %%mm0 \n\t"
"pand %%mm6, %%mm1 \n\t"
"pand %%mm7, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
"movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
MOVNTQ" %%mm1, (%2, %%"REG_a") \n\t" // RGB RGB RG
"movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
"movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
"pand %%mm7, %%mm0 \n\t"
"pand %%mm5, %%mm1 \n\t"
"pand %%mm6, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
"movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
MOVNTQ" %%mm1, 8(%2, %%"REG_a") \n\t" // B RGB RGB R
"movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
"movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
"pand %%mm6, %%mm0 \n\t"
"pand %%mm7, %%mm1 \n\t"
"pand %%mm5, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
MOVNTQ" %%mm1, 16(%2, %%"REG_a") \n\t"
"add $24, %%"REG_a" \n\t"
" js 1b \n\t"
"2: \n\t"
: "+a" (mmx_size)
: "r" (src-mmx_size), "r"(dst-mmx_size)
);
__asm__ volatile(SFENCE:::"memory");
__asm__ volatile(EMMS:::"memory");
if (mmx_size==23) return; //finished, was multiple of 8
src+= src_size;
dst+= src_size;
src_size= 23-mmx_size;
src-= src_size;
dst-= src_size;
#endif
for (i=0; i<src_size; i+=3) {
register uint8_t x;
x = src[i + 2];
dst[i + 1] = src[i + 1];
dst[i + 2] = src[i + 0];
dst[i + 0] = x;
}
}
| 22,523 |
FFmpeg | 486637af8ef29ec215e0e0b7ecd3b5470f0e04e5 | 0 | static inline void mix_2f_2r_to_mono(AC3DecodeContext *ctx)
{
int i;
float (*output)[256] = ctx->audio_block.block_output;
for (i = 0; i < 256; i++)
output[1][i] = (output[2][i] + output[3][i] + output[4][i]);
memset(output[2], 0, sizeof(output[2]));
memset(output[3], 0, sizeof(output[3]));
memset(output[4], 0, sizeof(output[4]));
}
| 22,524 |
qemu | c3e10c7b4377c1cbc0a4fbc12312c2cf41c0cda7 | 1 | void OPPROTO op_405_check_ov (void)
{
do_405_check_ov();
RETURN();
}
| 22,525 |
qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 | 1 | static void put_int32(QEMUFile *f, void *pv, size_t size)
{
int32_t *v = pv;
qemu_put_sbe32s(f, v);
}
| 22,526 |
qemu | eff235eb2bcd7092901f4698a7907e742f3b7f2f | 1 | static ExitStatus trans_fop_wew_0c(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
unsigned ra = extract32(insn, 21, 5);
return do_fop_wew(ctx, rt, ra, di->f_wew);
}
| 22,527 |
FFmpeg | 18b94669372d3d4b6c51e347587ea64acef9dbb8 | 1 | static void ebml_free(EbmlSyntax *syntax, void *data)
{
int i, j;
for (i = 0; syntax[i].id; i++) {
void *data_off = (char *) data + syntax[i].data_offset;
switch (syntax[i].type) {
case EBML_STR:
case EBML_UTF8:
av_freep(data_off);
break;
case EBML_BIN:
av_freep(&((EbmlBin *) data_off)->data);
break;
case EBML_LEVEL1:
case EBML_NEST:
if (syntax[i].list_elem_size) {
EbmlList *list = data_off;
char *ptr = list->elem;
for (j = 0; j < list->nb_elem;
j++, ptr += syntax[i].list_elem_size)
ebml_free(syntax[i].def.n, ptr);
av_freep(&list->elem);
} else
ebml_free(syntax[i].def.n, data_off);
default:
break;
}
}
} | 22,528 |
qemu | 9ed257d1d1c65dbe5a08f207e5106e98384e1860 | 1 | static int common_bind(struct common *c)
{
uint64_t mfn;
if (xenstore_read_fe_uint64(&c->xendev, "page-ref", &mfn) == -1)
return -1;
assert(mfn == (xen_pfn_t)mfn);
if (xenstore_read_fe_int(&c->xendev, "event-channel", &c->xendev.remote_port) == -1)
return -1;
c->page = xc_map_foreign_range(xen_xc, c->xendev.dom,
XC_PAGE_SIZE,
PROT_READ | PROT_WRITE, mfn);
if (c->page == NULL)
return -1;
xen_be_bind_evtchn(&c->xendev);
xen_be_printf(&c->xendev, 1, "ring mfn %"PRIx64", remote-port %d, local-port %d\n",
mfn, c->xendev.remote_port, c->xendev.local_port);
return 0;
}
| 22,529 |
FFmpeg | bd5c860fdbc33d19d2ff0f6d1f06de07c17560dd | 1 | int av_thread_message_queue_alloc(AVThreadMessageQueue **mq,
unsigned nelem,
unsigned elsize)
{
#if HAVE_THREADS
AVThreadMessageQueue *rmq;
int ret = 0;
if (nelem > INT_MAX / elsize)
return AVERROR(EINVAL);
if (!(rmq = av_mallocz(sizeof(*rmq))))
return AVERROR(ENOMEM);
if ((ret = pthread_mutex_init(&rmq->lock, NULL))) {
av_free(rmq);
return AVERROR(ret);
}
if ((ret = pthread_cond_init(&rmq->cond, NULL))) {
pthread_mutex_destroy(&rmq->lock);
av_free(rmq);
return AVERROR(ret);
}
if (!(rmq->fifo = av_fifo_alloc(elsize * nelem))) {
pthread_cond_destroy(&rmq->cond);
pthread_mutex_destroy(&rmq->lock);
av_free(rmq);
return AVERROR(ret);
}
rmq->elsize = elsize;
*mq = rmq;
return 0;
#else
*mq = NULL;
return AVERROR(ENOSYS);
#endif /* HAVE_THREADS */
}
| 22,530 |
FFmpeg | 32baeafeee4f8446c2c3720b9223ad2166ca9d30 | 1 | static void xvid_idct_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
{
ff_xvid_idct(block);
ff_put_pixels_clamped(block, dest, line_size);
}
| 22,532 |
FFmpeg | 3c6607eb6f946ed3e108db3f0694cab7e5a5df7e | 1 | int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret;
int user_packet = !!avpkt->data;
*got_packet_ptr = 0;
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
av_free_packet(avpkt);
av_init_packet(avpkt);
avpkt->size = 0;
return 0;
}
if (av_image_check_size(avctx->width, avctx->height, 0, avctx))
return AVERROR(EINVAL);
av_assert0(avctx->codec->encode2);
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret) {
if (!*got_packet_ptr)
avpkt->size = 0;
else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
if (!user_packet && avpkt->data) {
uint8_t *new_data = av_realloc(avpkt->data, avpkt->size);
if (new_data)
avpkt->data = new_data;
}
avctx->frame_number++;
}
if (ret < 0 || !*got_packet_ptr)
av_free_packet(avpkt);
emms_c();
return ret;
}
| 22,533 |
FFmpeg | 568e18b15e2ddf494fd8926707d34ca08c8edce5 | 1 | static int get_str(ByteIOContext *bc, char *string, int maxlen){
int len= get_v(bc);
if(len && maxlen)
get_buffer(bc, string, FFMIN(len, maxlen));
while(len > maxlen){
get_byte(bc);
len--;
}
if(maxlen)
string[FFMIN(len, maxlen-1)]= 0;
if(maxlen == len)
return -1;
else
return 0;
}
| 22,535 |
FFmpeg | 493aa30adf88baf5bc734072592a22db586f0cfb | 1 | static int dvbsub_decode(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DVBSubContext *ctx = avctx->priv_data;
AVSubtitle *sub = data;
const uint8_t *p, *p_end;
int segment_type;
int page_id;
int segment_length;
#ifdef DEBUG_PACKET_CONTENTS
int i;
av_log(avctx, AV_LOG_INFO, "DVB sub packet:\n");
for (i=0; i < buf_size; i++) {
av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]);
if (i % 16 == 15)
av_log(avctx, AV_LOG_INFO, "\n");
}
if (i % 16)
av_log(avctx, AV_LOG_INFO, "\n");
#endif
if (buf_size <= 2 || *buf != 0x0f)
return -1;
p = buf;
p_end = buf + buf_size;
while (p < p_end && *p == 0x0f) {
p += 1;
segment_type = *p++;
page_id = AV_RB16(p);
p += 2;
segment_length = AV_RB16(p);
p += 2;
if (page_id == ctx->composition_id || page_id == ctx->ancillary_id ||
ctx->composition_id == -1 || ctx->ancillary_id == -1) {
switch (segment_type) {
case DVBSUB_PAGE_SEGMENT:
dvbsub_parse_page_segment(avctx, p, segment_length);
break;
case DVBSUB_REGION_SEGMENT:
dvbsub_parse_region_segment(avctx, p, segment_length);
break;
case DVBSUB_CLUT_SEGMENT:
dvbsub_parse_clut_segment(avctx, p, segment_length);
break;
case DVBSUB_OBJECT_SEGMENT:
dvbsub_parse_object_segment(avctx, p, segment_length);
break;
case DVBSUB_DISPLAYDEFINITION_SEGMENT:
dvbsub_parse_display_definition_segment(avctx, p, segment_length);
case DVBSUB_DISPLAY_SEGMENT:
*data_size = dvbsub_display_end_segment(avctx, p, segment_length, sub);
break;
default:
av_dlog(avctx, "Subtitling segment type 0x%x, page id %d, length %d\n",
segment_type, page_id, segment_length);
break;
}
}
p += segment_length;
}
return p - buf;
}
| 22,536 |
FFmpeg | 09d5929f3721613fbb9ac9e74265c89c70df2ce0 | 1 | static int tag_tree_decode(Jpeg2000DecoderContext *s, Jpeg2000TgtNode *node,
int threshold)
{
Jpeg2000TgtNode *stack[30];
int sp = -1, curval = 0;
while (node && !node->vis) {
stack[++sp] = node;
node = node->parent;
}
if (node)
curval = node->val;
else
curval = stack[sp]->val;
while (curval < threshold && sp >= 0) {
if (curval < stack[sp]->val)
curval = stack[sp]->val;
while (curval < threshold) {
int ret;
if ((ret = get_bits(s, 1)) > 0) {
stack[sp]->vis++;
break;
} else if (!ret)
curval++;
else
return ret;
}
stack[sp]->val = curval;
sp--;
}
return curval;
} | 22,537 |
FFmpeg | 590863876d1478547640304a31c15809c3618090 | 1 | static int output_configure(AACContext *ac,
uint8_t layout_map[MAX_ELEM_ID * 4][3], int tags,
enum OCStatus oc_type, int get_new_frame)
{
AVCodecContext *avctx = ac->avctx;
int i, channels = 0, ret;
uint64_t layout = 0;
uint8_t id_map[TYPE_END][MAX_ELEM_ID] = {{ 0 }};
uint8_t type_counts[TYPE_END] = { 0 };
if (ac->oc[1].layout_map != layout_map) {
memcpy(ac->oc[1].layout_map, layout_map, tags * sizeof(layout_map[0]));
ac->oc[1].layout_map_tags = tags;
for (i = 0; i < tags; i++) {
int type = layout_map[i][0];
int id = layout_map[i][1];
id_map[type][id] = type_counts[type]++;
// Try to sniff a reasonable channel order, otherwise output the
// channels in the order the PCE declared them.
if (avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE)
layout = sniff_channel_order(layout_map, tags);
for (i = 0; i < tags; i++) {
int type = layout_map[i][0];
int id = layout_map[i][1];
int iid = id_map[type][id];
int position = layout_map[i][2];
// Allocate or free elements depending on if they are in the
// current program configuration.
ret = che_configure(ac, position, type, iid, &channels);
if (ret < 0)
return ret;
ac->tag_che_map[type][id] = ac->che[type][iid];
if (ac->oc[1].m4ac.ps == 1 && channels == 2) {
if (layout == AV_CH_FRONT_CENTER) {
layout = AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT;
} else {
layout = 0;
if (layout) avctx->channel_layout = layout;
ac->oc[1].channel_layout = layout;
avctx->channels = ac->oc[1].channels = channels;
ac->oc[1].status = oc_type;
if (get_new_frame) {
if ((ret = frame_configure_elements(ac->avctx)) < 0)
return ret;
return 0;
| 22,538 |
qemu | 9ef91a677110ec200d7b2904fc4bcae5a77329ad | 0 | static int qemu_paio_submit(struct qemu_paiocb *aiocb, int type)
{
aiocb->aio_type = type;
aiocb->ret = -EINPROGRESS;
aiocb->active = 0;
mutex_lock(&lock);
if (idle_threads == 0 && cur_threads < max_threads)
spawn_thread();
TAILQ_INSERT_TAIL(&request_list, aiocb, node);
mutex_unlock(&lock);
cond_signal(&cond);
return 0;
}
| 22,539 |
qemu | 59800ec8e52bcfa271fa61fb0aae19205ef1b7f1 | 0 | uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
uint64_t arg3)
{
CPU_DoubleU farg1, farg2, farg3;
farg1.ll = arg1;
farg2.ll = arg2;
farg3.ll = arg3;
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d) ||
float64_is_signaling_nan(farg2.d) ||
float64_is_signaling_nan(farg3.d))) {
/* sNaN operation */
fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
if (unlikely(float128_is_infinity(ft0_128) &&
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
farg1.ll = fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
}
if (likely(!float64_is_any_nan(farg1.d))) {
farg1.d = float64_chs(farg1.d);
}
}
return farg1.ll;
}
| 22,540 |
qemu | 61007b316cd71ee7333ff7a0a749a8949527575f | 0 | static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
}
| 22,541 |
qemu | b02ef3d92b19ad304a84433d3817f0903296ebc7 | 0 | static void attach_storage_element(SCLPDevice *sclp, SCCB *sccb,
uint16_t element)
{
int i, assigned, subincrement_id;
AttachStorageElement *attach_info = (AttachStorageElement *) sccb;
sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
assert(mhd);
if (element != 1) {
sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
return;
}
assigned = mhd->standby_mem_size >> mhd->increment_size;
attach_info->assigned = cpu_to_be16(assigned);
subincrement_id = ((ram_size >> mhd->increment_size) << 16)
+ SCLP_STARTING_SUBINCREMENT_ID;
for (i = 0; i < assigned; i++) {
attach_info->entries[i] = cpu_to_be32(subincrement_id);
subincrement_id += SCLP_INCREMENT_UNIT;
}
sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
}
| 22,542 |
qemu | 1ec26c757d5996468afcc0dced4fad04139574b3 | 0 | static void kvmppc_pivot_hpt_cpu(CPUState *cs, run_on_cpu_data arg)
{
target_ulong sdr1 = arg.target_ptr;
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
/* This is just for the benefit of PR KVM */
cpu_synchronize_state(cs);
env->spr[SPR_SDR1] = sdr1;
if (kvmppc_put_books_sregs(cpu) < 0) {
error_report("Unable to update SDR1 in KVM");
exit(1);
}
}
| 22,543 |
qemu | 245f7b51c0ea04fb2224b1127430a096c91aee70 | 0 | static int send_rect_simple(VncState *vs, int x, int y, int w, int h)
{
int max_size, max_width;
int max_sub_width, max_sub_height;
int dx, dy;
int rw, rh;
int n = 0;
max_size = tight_conf[vs->tight_compression].max_rect_size;
max_width = tight_conf[vs->tight_compression].max_rect_width;
if (w > max_width || w * h > max_size) {
max_sub_width = (w > max_width) ? max_width : w;
max_sub_height = max_size / max_sub_width;
for (dy = 0; dy < h; dy += max_sub_height) {
for (dx = 0; dx < w; dx += max_width) {
rw = MIN(max_sub_width, w - dx);
rh = MIN(max_sub_height, h - dy);
n += send_sub_rect(vs, x+dx, y+dy, rw, rh);
}
}
} else {
n += send_sub_rect(vs, x, y, w, h);
}
return n;
}
| 22,544 |
FFmpeg | fd7af82c53ea8a2577ea8952d35fb158db594592 | 0 | static int decompress_i(AVCodecContext *avctx, uint32_t *dst, int linesize)
{
SCPRContext *s = avctx->priv_data;
GetByteContext *gb = &s->gb;
int cx = 0, cx1 = 0, k = 0, clr = 0;
int run, r, g, b, off, y = 0, x = 0, ret;
const int cxshift = s->cxshift;
unsigned lx, ly, ptype;
reinit_tables(s);
bytestream2_skip(gb, 2);
init_rangecoder(&s->rc, gb);
while (k < avctx->width + 1) {
ret = decode_unit(s, &s->pixel_model[0][cx + cx1], 400, &r);
if (ret < 0)
return ret;
cx1 = (cx << 6) & 0xFC0;
cx = r >> cxshift;
ret = decode_unit(s, &s->pixel_model[1][cx + cx1], 400, &g);
if (ret < 0)
return ret;
cx1 = (cx << 6) & 0xFC0;
cx = g >> cxshift;
ret = decode_unit(s, &s->pixel_model[2][cx + cx1], 400, &b);
if (ret < 0)
return ret;
cx1 = (cx << 6) & 0xFC0;
cx = b >> cxshift;
ret = decode_value(s, s->run_model[0], 256, 400, &run);
if (ret < 0)
return ret;
clr = (b << 16) + (g << 8) + r;
k += run;
while (run-- > 0) {
dst[y * linesize + x] = clr;
lx = x;
ly = y;
x++;
if (x >= avctx->width) {
x = 0;
y++;
}
}
}
off = -linesize - 1;
ptype = 0;
while (x < avctx->width && y < avctx->height) {
ret = decode_value(s, s->op_model[ptype], 6, 1000, &ptype);
if (ret < 0)
return ret;
if (ptype == 0) {
ret = decode_unit(s, &s->pixel_model[0][cx + cx1], 400, &r);
if (ret < 0)
return ret;
cx1 = (cx << 6) & 0xFC0;
cx = r >> cxshift;
ret = decode_unit(s, &s->pixel_model[1][cx + cx1], 400, &g);
if (ret < 0)
return ret;
cx1 = (cx << 6) & 0xFC0;
cx = g >> cxshift;
ret = decode_unit(s, &s->pixel_model[2][cx + cx1], 400, &b);
if (ret < 0)
return ret;
cx1 = (cx << 6) & 0xFC0;
cx = b >> cxshift;
clr = (b << 16) + (g << 8) + r;
}
if (ptype > 5)
return AVERROR_INVALIDDATA;
ret = decode_value(s, s->run_model[ptype], 256, 400, &run);
if (ret < 0)
return ret;
switch (ptype) {
case 0:
while (run-- > 0) {
dst[y * linesize + x] = clr;
lx = x;
ly = y;
x++;
if (x >= avctx->width) {
x = 0;
y++;
}
}
break;
case 1:
while (run-- > 0) {
dst[y * linesize + x] = dst[ly * linesize + lx];
lx = x;
ly = y;
x++;
if (x >= avctx->width) {
x = 0;
y++;
}
}
clr = dst[ly * linesize + lx];
break;
case 2:
while (run-- > 0) {
clr = dst[y * linesize + x + off + 1];
dst[y * linesize + x] = clr;
lx = x;
ly = y;
x++;
if (x >= avctx->width) {
x = 0;
y++;
}
}
break;
case 4:
while (run-- > 0) {
uint8_t *odst = (uint8_t *)dst;
r = odst[(ly * linesize + lx) * 4] +
odst[((y * linesize + x) + off) * 4 + 4] -
odst[((y * linesize + x) + off) * 4];
g = odst[(ly * linesize + lx) * 4 + 1] +
odst[((y * linesize + x) + off) * 4 + 5] -
odst[((y * linesize + x) + off) * 4 + 1];
b = odst[(ly * linesize + lx) * 4 + 2] +
odst[((y * linesize + x) + off) * 4 + 6] -
odst[((y * linesize + x) + off) * 4 + 2];
clr = ((b & 0xFF) << 16) + ((g & 0xFF) << 8) + (r & 0xFF);
dst[y * linesize + x] = clr;
lx = x;
ly = y;
x++;
if (x >= avctx->width) {
x = 0;
y++;
}
}
break;
case 5:
while (run-- > 0) {
clr = dst[y * linesize + x + off];
dst[y * linesize + x] = clr;
lx = x;
ly = y;
x++;
if (x >= avctx->width) {
x = 0;
y++;
}
}
break;
}
if (avctx->bits_per_coded_sample == 16) {
cx1 = (clr & 0xFF00) >> 2;
cx = (clr & 0xFFFFFF) >> 16;
} else {
cx1 = (clr & 0xFC00) >> 4;
cx = (clr & 0xFFFFFF) >> 18;
}
}
return 0;
}
| 22,548 |
qemu | 6e0d8677cb443e7408c0b7a25a93c6596d7fa380 | 0 | static void gen_extu(int ot, TCGv reg)
{
switch(ot) {
case OT_BYTE:
tcg_gen_ext8u_tl(reg, reg);
break;
case OT_WORD:
tcg_gen_ext16u_tl(reg, reg);
break;
case OT_LONG:
tcg_gen_ext32u_tl(reg, reg);
break;
default:
break;
}
}
| 22,549 |
qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c | 0 | ssize_t read_targphys(const char *name,
int fd, target_phys_addr_t dst_addr, size_t nbytes)
{
uint8_t *buf;
ssize_t did;
buf = g_malloc(nbytes);
did = read(fd, buf, nbytes);
if (did > 0)
rom_add_blob_fixed("read", buf, did, dst_addr);
g_free(buf);
return did;
}
| 22,550 |
qemu | 6acbe4c6f18e7de00481ff30574262b58526de45 | 0 | DeviceState *qdev_device_add(QemuOpts *opts)
{
DeviceClass *k;
const char *driver, *path, *id;
DeviceState *qdev;
BusState *bus;
driver = qemu_opt_get(opts, "driver");
if (!driver) {
qerror_report(QERR_MISSING_PARAMETER, "driver");
return NULL;
}
/* find driver */
k = DEVICE_CLASS(object_class_by_name(driver));
/* find bus */
path = qemu_opt_get(opts, "bus");
if (path != NULL) {
bus = qbus_find(path);
if (!bus) {
return NULL;
}
if (bus->info != k->bus_info) {
qerror_report(QERR_BAD_BUS_FOR_DEVICE,
driver, bus->info->name);
return NULL;
}
} else {
bus = qbus_find_recursive(main_system_bus, NULL, k->bus_info);
if (!bus) {
qerror_report(QERR_NO_BUS_FOR_DEVICE,
driver, k->bus_info->name);
return NULL;
}
}
if (qdev_hotplug && !bus->allow_hotplug) {
qerror_report(QERR_BUS_NO_HOTPLUG, bus->name);
return NULL;
}
/* create device, set properties */
qdev = qdev_create_from_info(bus, driver);
id = qemu_opts_id(opts);
if (id) {
qdev->id = id;
qdev_property_add_child(qdev_get_peripheral(), qdev->id, qdev, NULL);
} else {
static int anon_count;
gchar *name = g_strdup_printf("device[%d]", anon_count++);
qdev_property_add_child(qdev_get_peripheral_anon(), name,
qdev, NULL);
g_free(name);
}
if (qemu_opt_foreach(opts, set_property, qdev, 1) != 0) {
qdev_free(qdev);
return NULL;
}
if (qdev_init(qdev) < 0) {
qerror_report(QERR_DEVICE_INIT_FAILED, driver);
return NULL;
}
qdev->opts = opts;
return qdev;
}
| 22,551 |
qemu | ac531cb6e542b1e61d668604adf9dc5306a948c0 | 0 | static void qdict_teardown(void)
{
QDECREF(tests_dict);
tests_dict = NULL;
}
| 22,554 |
qemu | bec1631100323fac0900aea71043d5c4e22fc2fa | 0 | static void tcg_out_brcond(TCGContext *s, TCGCond cond,
TCGArg arg1, TCGArg arg2, int const_arg2,
int label_index, TCGType type)
{
tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
tcg_out_bc(s, tcg_to_bc[cond], label_index);
}
| 22,555 |
qemu | fae2afb10e3fdceab612c62a2b1e8b944ff578d9 | 0 | static void qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
{
switch (le32_to_cpu(ext->cmd.type)) {
case QXL_CMD_SURFACE:
{
QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
uint32_t id = le32_to_cpu(cmd->surface_id);
PANIC_ON(id >= NUM_SURFACES);
qemu_mutex_lock(&qxl->track_lock);
if (cmd->type == QXL_SURFACE_CMD_CREATE) {
qxl->guest_surfaces.cmds[id] = ext->cmd.data;
qxl->guest_surfaces.count++;
if (qxl->guest_surfaces.max < qxl->guest_surfaces.count)
qxl->guest_surfaces.max = qxl->guest_surfaces.count;
}
if (cmd->type == QXL_SURFACE_CMD_DESTROY) {
qxl->guest_surfaces.cmds[id] = 0;
qxl->guest_surfaces.count--;
}
qemu_mutex_unlock(&qxl->track_lock);
break;
}
case QXL_CMD_CURSOR:
{
QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
if (cmd->type == QXL_CURSOR_SET) {
qemu_mutex_lock(&qxl->track_lock);
qxl->guest_cursor = ext->cmd.data;
qemu_mutex_unlock(&qxl->track_lock);
}
break;
}
}
}
| 22,556 |
qemu | c2fa30757a2ba1bb5b053883773a9a61fbdd7082 | 0 | static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
{
NvdimmFuncGetLabelSizeOut label_size_out = {
.len = cpu_to_le32(sizeof(label_size_out)),
};
uint32_t label_size, mxfer;
label_size = nvdimm->label_size;
mxfer = nvdimm_get_max_xfer_label_size();
nvdimm_debug("label_size %#x, max_xfer %#x.\n", label_size, mxfer);
label_size_out.func_ret_status = cpu_to_le32(0 /* Success */);
label_size_out.label_size = cpu_to_le32(label_size);
label_size_out.max_xfer = cpu_to_le32(mxfer);
cpu_physical_memory_write(dsm_mem_addr, &label_size_out,
sizeof(label_size_out));
}
| 22,557 |
qemu | bdb11366b9370e97fb436444c697c01fe839dc11 | 0 | qemu_irq *armv7m_init(int flash_size, int sram_size,
const char *kernel_filename, const char *cpu_model)
{
CPUState *env;
DeviceState *nvic;
/* FIXME: make this local state. */
static qemu_irq pic[64];
qemu_irq *cpu_pic;
uint32_t pc;
int image_size;
uint64_t entry;
uint64_t lowaddr;
int i;
flash_size *= 1024;
sram_size *= 1024;
if (!cpu_model)
cpu_model = "cortex-m3";
env = cpu_init(cpu_model);
if (!env) {
fprintf(stderr, "Unable to find CPU definition\n");
exit(1);
}
#if 0
/* > 32Mb SRAM gets complicated because it overlaps the bitband area.
We don't have proper commandline options, so allocate half of memory
as SRAM, up to a maximum of 32Mb, and the rest as code. */
if (ram_size > (512 + 32) * 1024 * 1024)
ram_size = (512 + 32) * 1024 * 1024;
sram_size = (ram_size / 2) & TARGET_PAGE_MASK;
if (sram_size > 32 * 1024 * 1024)
sram_size = 32 * 1024 * 1024;
code_size = ram_size - sram_size;
#endif
/* Flash programming is done via the SCU, so pretend it is ROM. */
cpu_register_physical_memory(0, flash_size,
qemu_ram_alloc(flash_size) | IO_MEM_ROM);
cpu_register_physical_memory(0x20000000, sram_size,
qemu_ram_alloc(sram_size) | IO_MEM_RAM);
armv7m_bitband_init();
nvic = qdev_create(NULL, "armv7m_nvic");
qdev_set_prop_ptr(nvic, "cpu", env);
qdev_init(nvic);
cpu_pic = arm_pic_init_cpu(env);
sysbus_connect_irq(sysbus_from_qdev(nvic), 0, cpu_pic[ARM_PIC_CPU_IRQ]);
for (i = 0; i < 64; i++) {
pic[i] = qdev_get_gpio_in(nvic, i);
}
image_size = load_elf(kernel_filename, 0, &entry, &lowaddr, NULL);
if (image_size < 0) {
image_size = load_image_targphys(kernel_filename, 0, flash_size);
lowaddr = 0;
}
if (image_size < 0) {
fprintf(stderr, "qemu: could not load kernel '%s'\n",
kernel_filename);
exit(1);
}
/* If the image was loaded at address zero then assume it is a
regular ROM image and perform the normal CPU reset sequence.
Otherwise jump directly to the entry point. */
if (lowaddr == 0) {
env->regs[13] = ldl_phys(0);
pc = ldl_phys(4);
} else {
pc = entry;
}
env->thumb = pc & 1;
env->regs[15] = pc & ~1;
/* Hack to map an additional page of ram at the top of the address
space. This stops qemu complaining about executing code outside RAM
when returning from an exception. */
cpu_register_physical_memory(0xfffff000, 0x1000,
qemu_ram_alloc(0x1000) | IO_MEM_RAM);
return pic;
}
| 22,558 |
qemu | 72cf2d4f0e181d0d3a3122e04129c58a95da713e | 0 | int monitor_get_fd(Monitor *mon, const char *fdname)
{
mon_fd_t *monfd;
LIST_FOREACH(monfd, &mon->fds, next) {
int fd;
if (strcmp(monfd->name, fdname) != 0) {
continue;
}
fd = monfd->fd;
/* caller takes ownership of fd */
LIST_REMOVE(monfd, next);
qemu_free(monfd->name);
qemu_free(monfd);
return fd;
}
return -1;
}
| 22,560 |
qemu | fdfab37dfeffefbd4533b4158055c9b82d7c3e69 | 0 | static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters,
uint64_t *l2_table, int l2_index)
{
int i;
for (i = 0; i < nb_clusters; i++) {
uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]);
QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry);
switch(cluster_type) {
case QCOW2_CLUSTER_NORMAL:
if (l2_entry & QCOW_OFLAG_COPIED) {
goto out;
}
break;
case QCOW2_CLUSTER_UNALLOCATED:
case QCOW2_CLUSTER_COMPRESSED:
case QCOW2_CLUSTER_ZERO:
break;
default:
abort();
}
}
out:
assert(i <= nb_clusters);
return i;
}
| 22,561 |
qemu | 74c85296dc880568005b8e7572e08a39d66bcdca | 0 | VirtIOS390Device *s390_virtio_bus_find_vring(VirtIOS390Bus *bus,
ram_addr_t mem,
int *vq_num)
{
BusChild *kid;
int i;
QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
VirtIOS390Device *dev = (VirtIOS390Device *)kid->child;
for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
if (!virtio_queue_get_addr(dev->vdev, i))
break;
if (virtio_queue_get_addr(dev->vdev, i) == mem) {
if (vq_num) {
*vq_num = i;
}
return dev;
}
}
}
return NULL;
}
| 22,562 |
FFmpeg | 1c088632e98af96f9cbe8129c5d7eb7274f8d4ed | 0 | static inline int parse_nal_units(AVCodecParserContext *s, const uint8_t *buf,
int buf_size, AVCodecContext *avctx)
{
HEVCParserContext *ctx = s->priv_data;
HEVCContext *h = &ctx->h;
GetBitContext *gb;
SliceHeader *sh = &h->sh;
HEVCParamSets *ps = &h->ps;
HEVCSEIContext *sei = &h->sei;
int is_global = buf == avctx->extradata;
int i, ret;
if (!h->HEVClc)
h->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
if (!h->HEVClc)
return AVERROR(ENOMEM);
gb = &h->HEVClc->gb;
/* set some sane default values */
s->pict_type = AV_PICTURE_TYPE_I;
s->key_frame = 0;
s->picture_structure = AV_PICTURE_STRUCTURE_UNKNOWN;
h->avctx = avctx;
ff_hevc_reset_sei(sei);
ret = ff_h2645_packet_split(&ctx->pkt, buf, buf_size, avctx, 0, 0,
AV_CODEC_ID_HEVC, 1);
if (ret < 0)
return ret;
for (i = 0; i < ctx->pkt.nb_nals; i++) {
H2645NAL *nal = &ctx->pkt.nals[i];
int num = 0, den = 0;
h->nal_unit_type = nal->type;
h->temporal_id = nal->temporal_id;
*gb = nal->gb;
switch (h->nal_unit_type) {
case HEVC_NAL_VPS:
ff_hevc_decode_nal_vps(gb, avctx, ps);
break;
case HEVC_NAL_SPS:
ff_hevc_decode_nal_sps(gb, avctx, ps, 1);
break;
case HEVC_NAL_PPS:
ff_hevc_decode_nal_pps(gb, avctx, ps);
break;
case HEVC_NAL_SEI_PREFIX:
case HEVC_NAL_SEI_SUFFIX:
ff_hevc_decode_nal_sei(gb, avctx, sei, ps, h->nal_unit_type);
break;
case HEVC_NAL_TRAIL_N:
case HEVC_NAL_TRAIL_R:
case HEVC_NAL_TSA_N:
case HEVC_NAL_TSA_R:
case HEVC_NAL_STSA_N:
case HEVC_NAL_STSA_R:
case HEVC_NAL_RADL_N:
case HEVC_NAL_RADL_R:
case HEVC_NAL_RASL_N:
case HEVC_NAL_RASL_R:
case HEVC_NAL_BLA_W_LP:
case HEVC_NAL_BLA_W_RADL:
case HEVC_NAL_BLA_N_LP:
case HEVC_NAL_IDR_W_RADL:
case HEVC_NAL_IDR_N_LP:
case HEVC_NAL_CRA_NUT:
if (is_global) {
av_log(avctx, AV_LOG_ERROR, "Invalid NAL unit: %d\n", h->nal_unit_type);
return AVERROR_INVALIDDATA;
}
sh->first_slice_in_pic_flag = get_bits1(gb);
s->picture_structure = h->sei.picture_timing.picture_struct;
s->field_order = h->sei.picture_timing.picture_struct;
if (IS_IRAP(h)) {
s->key_frame = 1;
sh->no_output_of_prior_pics_flag = get_bits1(gb);
}
sh->pps_id = get_ue_golomb(gb);
if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !ps->pps_list[sh->pps_id]) {
av_log(avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
return AVERROR_INVALIDDATA;
}
ps->pps = (HEVCPPS*)ps->pps_list[sh->pps_id]->data;
if (ps->pps->sps_id >= HEVC_MAX_SPS_COUNT || !ps->sps_list[ps->pps->sps_id]) {
av_log(avctx, AV_LOG_ERROR, "SPS id out of range: %d\n", ps->pps->sps_id);
return AVERROR_INVALIDDATA;
}
if (ps->sps != (HEVCSPS*)ps->sps_list[ps->pps->sps_id]->data) {
ps->sps = (HEVCSPS*)ps->sps_list[ps->pps->sps_id]->data;
ps->vps = (HEVCVPS*)ps->vps_list[ps->sps->vps_id]->data;
}
s->coded_width = ps->sps->width;
s->coded_height = ps->sps->height;
s->width = ps->sps->output_width;
s->height = ps->sps->output_height;
s->format = ps->sps->pix_fmt;
avctx->profile = ps->sps->ptl.general_ptl.profile_idc;
avctx->level = ps->sps->ptl.general_ptl.level_idc;
if (ps->vps->vps_timing_info_present_flag) {
num = ps->vps->vps_num_units_in_tick;
den = ps->vps->vps_time_scale;
} else if (ps->sps->vui.vui_timing_info_present_flag) {
num = ps->sps->vui.vui_num_units_in_tick;
den = ps->sps->vui.vui_time_scale;
}
if (num != 0 && den != 0)
av_reduce(&avctx->framerate.den, &avctx->framerate.num,
num, den, 1 << 30);
if (!sh->first_slice_in_pic_flag) {
int slice_address_length;
if (ps->pps->dependent_slice_segments_enabled_flag)
sh->dependent_slice_segment_flag = get_bits1(gb);
else
sh->dependent_slice_segment_flag = 0;
slice_address_length = av_ceil_log2_c(ps->sps->ctb_width *
ps->sps->ctb_height);
sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
if (sh->slice_segment_addr >= ps->sps->ctb_width * ps->sps->ctb_height) {
av_log(avctx, AV_LOG_ERROR, "Invalid slice segment address: %u.\n",
sh->slice_segment_addr);
return AVERROR_INVALIDDATA;
}
} else
sh->dependent_slice_segment_flag = 0;
if (sh->dependent_slice_segment_flag)
break;
for (i = 0; i < ps->pps->num_extra_slice_header_bits; i++)
skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
sh->slice_type = get_ue_golomb(gb);
if (!(sh->slice_type == HEVC_SLICE_I || sh->slice_type == HEVC_SLICE_P ||
sh->slice_type == HEVC_SLICE_B)) {
av_log(avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
sh->slice_type);
return AVERROR_INVALIDDATA;
}
s->pict_type = sh->slice_type == HEVC_SLICE_B ? AV_PICTURE_TYPE_B :
sh->slice_type == HEVC_SLICE_P ? AV_PICTURE_TYPE_P :
AV_PICTURE_TYPE_I;
if (ps->pps->output_flag_present_flag)
sh->pic_output_flag = get_bits1(gb);
if (ps->sps->separate_colour_plane_flag)
sh->colour_plane_id = get_bits(gb, 2);
if (!IS_IDR(h)) {
sh->pic_order_cnt_lsb = get_bits(gb, ps->sps->log2_max_poc_lsb);
s->output_picture_number = h->poc = ff_hevc_compute_poc(h->ps.sps, h->pocTid0, sh->pic_order_cnt_lsb, h->nal_unit_type);
} else
s->output_picture_number = h->poc = 0;
if (h->temporal_id == 0 &&
h->nal_unit_type != HEVC_NAL_TRAIL_N &&
h->nal_unit_type != HEVC_NAL_TSA_N &&
h->nal_unit_type != HEVC_NAL_STSA_N &&
h->nal_unit_type != HEVC_NAL_RADL_N &&
h->nal_unit_type != HEVC_NAL_RASL_N &&
h->nal_unit_type != HEVC_NAL_RADL_R &&
h->nal_unit_type != HEVC_NAL_RASL_R)
h->pocTid0 = h->poc;
return 0; /* no need to evaluate the rest */
}
}
/* didn't find a picture! */
if (!is_global)
av_log(h->avctx, AV_LOG_ERROR, "missing picture in access unit\n");
return -1;
}
| 22,563 |
FFmpeg | e549933a270dd2cfc36f2cf9bb6b29acf3dc6d08 | 0 | void ff_put_h264_qpel4_mc22_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_mid_4w_msa(src - (2 * stride) - 2, stride, dst, stride, 4);
}
| 22,564 |
FFmpeg | 61bd0ed781b56eea1e8e851aab34a2ee3b59fbac | 0 | int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length,
void *logctx, int is_nalff, int nal_length_size,
enum AVCodecID codec_id)
{
int consumed, ret = 0;
const uint8_t *next_avc = buf + (is_nalff ? 0 : length);
pkt->nb_nals = 0;
while (length >= 4) {
H2645NAL *nal;
int extract_length = 0;
int skip_trailing_zeros = 1;
/*
* Only parse an AVC1 length field if one is expected at the current
* buffer position. There are unfortunately streams with multiple
* NAL units covered by the length field. Those NAL units are delimited
* by Annex B start code prefixes. ff_h2645_extract_rbsp() detects it
* correctly and consumes only the first NAL unit. The additional NAL
* units are handled here in the Annex B parsing code.
*/
if (buf == next_avc) {
int i;
for (i = 0; i < nal_length_size; i++)
extract_length = (extract_length << 8) | buf[i];
if (extract_length > length) {
av_log(logctx, AV_LOG_ERROR, "Invalid NAL unit size.\n");
return AVERROR_INVALIDDATA;
}
buf += nal_length_size;
length -= nal_length_size;
// keep track of the next AVC1 length field
next_avc = buf + extract_length;
} else {
/*
* expected to return immediately except for streams with mixed
* NAL unit coding
*/
int buf_index = find_next_start_code(buf, next_avc);
buf += buf_index;
length -= buf_index;
/*
* break if an AVC1 length field is expected at the current buffer
* position
*/
if (buf == next_avc)
continue;
if (length > 0) {
extract_length = length;
} else if (pkt->nb_nals == 0) {
av_log(logctx, AV_LOG_ERROR, "No NAL unit found\n");
return AVERROR_INVALIDDATA;
} else {
break;
}
}
if (pkt->nals_allocated < pkt->nb_nals + 1) {
int new_size = pkt->nals_allocated + 1;
H2645NAL *tmp = av_realloc_array(pkt->nals, new_size, sizeof(*tmp));
if (!tmp)
return AVERROR(ENOMEM);
pkt->nals = tmp;
memset(pkt->nals + pkt->nals_allocated, 0,
(new_size - pkt->nals_allocated) * sizeof(*tmp));
pkt->nals_allocated = new_size;
}
nal = &pkt->nals[pkt->nb_nals++];
consumed = ff_h2645_extract_rbsp(buf, extract_length, nal);
if (consumed < 0)
return consumed;
/* see commit 3566042a0 */
if (consumed < length - 3 &&
buf[consumed] == 0x00 && buf[consumed + 1] == 0x00 &&
buf[consumed + 2] == 0x01 && buf[consumed + 3] == 0xE0)
skip_trailing_zeros = 0;
nal->size_bits = get_bit_length(nal, skip_trailing_zeros);
ret = init_get_bits(&nal->gb, nal->data, nal->size_bits);
if (ret < 0)
return ret;
if (codec_id == AV_CODEC_ID_HEVC)
ret = hevc_parse_nal_header(nal, logctx);
else
ret = h264_parse_nal_header(nal, logctx);
if (ret <= 0) {
if (ret < 0) {
av_log(logctx, AV_LOG_ERROR, "Invalid NAL unit %d, skipping.\n",
nal->type);
}
pkt->nb_nals--;
}
buf += consumed;
length -= consumed;
}
return 0;
}
| 22,566 |
FFmpeg | 3c895fc098f7637f6d5ec3a9d6766e724a8b9e41 | 0 | static int output_packet(AVInputStream *ist, int ist_index,
AVOutputStream **ost_table, int nb_ostreams,
const AVPacket *pkt)
{
AVFormatContext *os;
AVOutputStream *ost;
uint8_t *ptr;
int len, ret, i;
uint8_t *data_buf;
int data_size, got_picture;
AVFrame picture;
short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
void *buffer_to_free;
if (pkt && pkt->pts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too
ist->next_pts = ist->pts = pkt->dts;
} else {
ist->pts = ist->next_pts;
}
if (pkt == NULL) {
/* EOF handling */
ptr = NULL;
len = 0;
goto handle_eof;
}
len = pkt->size;
ptr = pkt->data;
while (len > 0) {
handle_eof:
/* decode the packet if needed */
data_buf = NULL; /* fail safe */
data_size = 0;
if (ist->decoding_needed) {
switch(ist->st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
/* XXX: could avoid copy if PCM 16 bits with same
endianness as CPU */
ret = avcodec_decode_audio(&ist->st->codec, samples, &data_size,
ptr, len);
if (ret < 0)
goto fail_decode;
ptr += ret;
len -= ret;
/* Some bug in mpeg audio decoder gives */
/* data_size < 0, it seems they are overflows */
if (data_size <= 0) {
/* no audio frame */
continue;
}
data_buf = (uint8_t *)samples;
ist->next_pts += ((int64_t)AV_TIME_BASE/2 * data_size) /
(ist->st->codec.sample_rate * ist->st->codec.channels);
break;
case CODEC_TYPE_VIDEO:
data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
/* XXX: allocate picture correctly */
avcodec_get_frame_defaults(&picture);
ret = avcodec_decode_video(&ist->st->codec,
&picture, &got_picture, ptr, len);
ist->st->quality= picture.quality;
if (ret < 0)
goto fail_decode;
if (!got_picture) {
/* no picture yet */
goto discard_packet;
}
if (ist->st->codec.frame_rate_base != 0) {
ist->next_pts += ((int64_t)AV_TIME_BASE *
ist->st->codec.frame_rate_base) /
ist->st->codec.frame_rate;
}
len = 0;
break;
default:
goto fail_decode;
}
} else {
data_buf = ptr;
data_size = len;
ret = len;
len = 0;
}
buffer_to_free = NULL;
if (ist->st->codec.codec_type == CODEC_TYPE_VIDEO) {
pre_process_video_frame(ist, (AVPicture *)&picture,
&buffer_to_free);
}
/* frame rate emulation */
if (ist->st->codec.rate_emu) {
int64_t pts = av_rescale((int64_t) ist->frame * ist->st->codec.frame_rate_base, 1000000, ist->st->codec.frame_rate);
int64_t now = av_gettime() - ist->start;
if (pts > now)
usleep(pts - now);
ist->frame++;
}
#if 0
/* mpeg PTS deordering : if it is a P or I frame, the PTS
is the one of the next displayed one */
/* XXX: add mpeg4 too ? */
if (ist->st->codec.codec_id == CODEC_ID_MPEG1VIDEO) {
if (ist->st->codec.pict_type != B_TYPE) {
int64_t tmp;
tmp = ist->last_ip_pts;
ist->last_ip_pts = ist->frac_pts.val;
ist->frac_pts.val = tmp;
}
}
#endif
/* if output time reached then transcode raw format,
encode packets and output them */
if (start_time == 0 || ist->pts >= start_time)
for(i=0;i<nb_ostreams;i++) {
int frame_size;
ost = ost_table[i];
if (ost->source_index == ist_index) {
os = output_files[ost->file_index];
#if 0
printf("%d: got pts=%0.3f %0.3f\n", i,
(double)pkt->pts / AV_TIME_BASE,
((double)ist->pts / AV_TIME_BASE) -
((double)ost->st->pts.val * ost->time_base.num / ost->time_base.den));
#endif
/* set the input output pts pairs */
ost->sync_ipts = (double)ist->pts / AV_TIME_BASE;
if (ost->encoding_needed) {
switch(ost->st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
do_audio_out(os, ost, ist, data_buf, data_size);
break;
case CODEC_TYPE_VIDEO:
/* find an audio stream for synchro */
{
int i;
AVOutputStream *audio_sync, *ost1;
audio_sync = NULL;
for(i=0;i<nb_ostreams;i++) {
ost1 = ost_table[i];
if (ost1->file_index == ost->file_index &&
ost1->st->codec.codec_type == CODEC_TYPE_AUDIO) {
audio_sync = ost1;
break;
}
}
do_video_out(os, ost, ist, &picture, &frame_size, audio_sync);
video_size += frame_size;
if (do_vstats && frame_size)
do_video_stats(os, ost, frame_size);
}
break;
default:
av_abort();
}
} else {
AVFrame avframe; //FIXME/XXX remove this
AVPacket opkt;
av_init_packet(&opkt);
/* no reencoding needed : output the packet directly */
/* force the input stream PTS */
avcodec_get_frame_defaults(&avframe);
ost->st->codec.coded_frame= &avframe;
avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO)
audio_size += data_size;
else if (ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
video_size += data_size;
opkt.stream_index= ost->index;
opkt.data= data_buf;
opkt.size= data_size;
opkt.pts= ist->pts; //FIXME dts vs. pts
opkt.flags= pkt->flags;
av_write_frame(os, &opkt);
ost->st->codec.frame_number++;
ost->frame_number++;
}
}
}
av_free(buffer_to_free);
}
discard_packet:
return 0;
fail_decode:
return -1;
}
| 22,567 |
FFmpeg | 08a747afb98c11da48b89339c2f1c5fdc56ced7e | 0 | static void count_frame_bits(AC3EncodeContext *s)
{
AC3EncOptions *opt = &s->options;
int blk, ch;
int frame_bits = 0;
/* header */
if (s->eac3) {
/* coupling */
if (s->channel_mode > AC3_CHMODE_MONO) {
frame_bits++;
for (blk = 1; blk < AC3_MAX_BLOCKS; blk++) {
AC3Block *block = &s->blocks[blk];
frame_bits++;
if (block->new_cpl_strategy)
frame_bits++;
}
}
/* coupling exponent strategy */
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++)
frame_bits += 2 * s->blocks[blk].cpl_in_use;
} else {
if (opt->audio_production_info)
frame_bits += 7;
if (s->bitstream_id == 6) {
if (opt->extended_bsi_1)
frame_bits += 14;
if (opt->extended_bsi_2)
frame_bits += 14;
}
}
/* audio blocks */
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
AC3Block *block = &s->blocks[blk];
/* coupling strategy */
if (!s->eac3)
frame_bits++;
if (block->new_cpl_strategy) {
if (!s->eac3)
frame_bits++;
if (block->cpl_in_use) {
if (s->eac3)
frame_bits++;
if (!s->eac3 || s->channel_mode != AC3_CHMODE_STEREO)
frame_bits += s->fbw_channels;
if (s->channel_mode == AC3_CHMODE_STEREO)
frame_bits++;
frame_bits += 4 + 4;
if (s->eac3)
frame_bits++;
else
frame_bits += s->num_cpl_subbands - 1;
}
}
/* coupling coordinates */
if (block->cpl_in_use) {
for (ch = 1; ch <= s->fbw_channels; ch++) {
if (block->channel_in_cpl[ch]) {
if (!s->eac3 || block->new_cpl_coords != 2)
frame_bits++;
if (block->new_cpl_coords) {
frame_bits += 2;
frame_bits += (4 + 4) * s->num_cpl_bands;
}
}
}
}
/* stereo rematrixing */
if (s->channel_mode == AC3_CHMODE_STEREO) {
if (!s->eac3 || blk > 0)
frame_bits++;
if (s->blocks[blk].new_rematrixing_strategy)
frame_bits += block->num_rematrixing_bands;
}
/* bandwidth codes & gain range */
for (ch = 1; ch <= s->fbw_channels; ch++) {
if (s->exp_strategy[ch][blk] != EXP_REUSE) {
if (!block->channel_in_cpl[ch])
frame_bits += 6;
frame_bits += 2;
}
}
/* coupling exponent strategy */
if (!s->eac3 && block->cpl_in_use)
frame_bits += 2;
/* snr offsets and fast gain codes */
if (!s->eac3) {
frame_bits++;
if (block->new_snr_offsets)
frame_bits += 6 + (s->channels + block->cpl_in_use) * (4 + 3);
}
/* coupling leak info */
if (block->cpl_in_use) {
if (!s->eac3 || block->new_cpl_leak != 2)
frame_bits++;
if (block->new_cpl_leak)
frame_bits += 3 + 3;
}
}
s->frame_bits = s->frame_bits_fixed + frame_bits;
}
| 22,568 |
qemu | 187337f8b0ec0813dd3876d1efe37d415fb81c2e | 1 | void sp804_init(uint32_t base, qemu_irq irq)
{
int iomemtype;
sp804_state *s;
qemu_irq *qi;
s = (sp804_state *)qemu_mallocz(sizeof(sp804_state));
qi = qemu_allocate_irqs(sp804_set_irq, s, 2);
s->base = base;
s->irq = irq;
/* ??? The timers are actually configurable between 32kHz and 1MHz, but
we don't implement that. */
s->timer[0] = arm_timer_init(1000000, qi[0]);
s->timer[1] = arm_timer_init(1000000, qi[1]);
iomemtype = cpu_register_io_memory(0, sp804_readfn,
sp804_writefn, s);
cpu_register_physical_memory(base, 0x00000fff, iomemtype);
/* ??? Save/restore. */
}
| 22,569 |
qemu | 07caea315a85ebfe90851f9c2e4ef3fdd24117b5 | 1 | static void network_init(void)
{
int i;
for(i = 0; i < nb_nics; i++) {
NICInfo *nd = &nd_table[i];
const char *default_devaddr = NULL;
if (i == 0 && (!nd->model || strcmp(nd->model, "pcnet") == 0))
/* The malta board has a PCNet card using PCI SLOT 11 */
default_devaddr = "0b";
pci_nic_init(nd, "pcnet", default_devaddr);
}
}
| 22,570 |
qemu | 2f9606b3736c3be4dbd606c46525c7b770ced119 | 1 | static uint8_t *buffer_end(Buffer *buffer)
{
return buffer->buffer + buffer->offset;
}
| 22,571 |
qemu | b45c03f585ea9bb1af76c73e82195418c294919d | 1 | PXA2xxState *pxa270_init(MemoryRegion *address_space,
unsigned int sdram_size, const char *revision)
{
PXA2xxState *s;
int i;
DriveInfo *dinfo;
s = (PXA2xxState *) g_malloc0(sizeof(PXA2xxState));
if (revision && strncmp(revision, "pxa27", 5)) {
fprintf(stderr, "Machine requires a PXA27x processor.\n");
exit(1);
}
if (!revision)
revision = "pxa270";
s->cpu = cpu_arm_init(revision);
if (s->cpu == NULL) {
fprintf(stderr, "Unable to find CPU definition\n");
exit(1);
}
s->reset = qemu_allocate_irq(pxa2xx_reset, s, 0);
/* SDRAM & Internal Memory Storage */
memory_region_init_ram(&s->sdram, NULL, "pxa270.sdram", sdram_size,
&error_abort);
vmstate_register_ram_global(&s->sdram);
memory_region_add_subregion(address_space, PXA2XX_SDRAM_BASE, &s->sdram);
memory_region_init_ram(&s->internal, NULL, "pxa270.internal", 0x40000,
&error_abort);
vmstate_register_ram_global(&s->internal);
memory_region_add_subregion(address_space, PXA2XX_INTERNAL_BASE,
&s->internal);
s->pic = pxa2xx_pic_init(0x40d00000, s->cpu);
s->dma = pxa27x_dma_init(0x40000000,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_DMA));
sysbus_create_varargs("pxa27x-timer", 0x40a00000,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 0),
qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 1),
qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 2),
qdev_get_gpio_in(s->pic, PXA2XX_PIC_OST_0 + 3),
qdev_get_gpio_in(s->pic, PXA27X_PIC_OST_4_11),
NULL);
s->gpio = pxa2xx_gpio_init(0x40e00000, s->cpu, s->pic, 121);
dinfo = drive_get(IF_SD, 0, 0);
if (!dinfo) {
fprintf(stderr, "qemu: missing SecureDigital device\n");
exit(1);
}
s->mmc = pxa2xx_mmci_init(address_space, 0x41100000,
blk_by_legacy_dinfo(dinfo),
qdev_get_gpio_in(s->pic, PXA2XX_PIC_MMC),
qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_MMCI),
qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_MMCI));
for (i = 0; pxa270_serial[i].io_base; i++) {
if (serial_hds[i]) {
serial_mm_init(address_space, pxa270_serial[i].io_base, 2,
qdev_get_gpio_in(s->pic, pxa270_serial[i].irqn),
14857000 / 16, serial_hds[i],
DEVICE_NATIVE_ENDIAN);
} else {
break;
}
}
if (serial_hds[i])
s->fir = pxa2xx_fir_init(address_space, 0x40800000,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_ICP),
qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_ICP),
qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_ICP),
serial_hds[i]);
s->lcd = pxa2xx_lcdc_init(address_space, 0x44000000,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_LCD));
s->cm_base = 0x41300000;
s->cm_regs[CCCR >> 2] = 0x02000210; /* 416.0 MHz */
s->clkcfg = 0x00000009; /* Turbo mode active */
memory_region_init_io(&s->cm_iomem, NULL, &pxa2xx_cm_ops, s, "pxa2xx-cm", 0x1000);
memory_region_add_subregion(address_space, s->cm_base, &s->cm_iomem);
vmstate_register(NULL, 0, &vmstate_pxa2xx_cm, s);
pxa2xx_setup_cp14(s);
s->mm_base = 0x48000000;
s->mm_regs[MDMRS >> 2] = 0x00020002;
s->mm_regs[MDREFR >> 2] = 0x03ca4000;
s->mm_regs[MECR >> 2] = 0x00000001; /* Two PC Card sockets */
memory_region_init_io(&s->mm_iomem, NULL, &pxa2xx_mm_ops, s, "pxa2xx-mm", 0x1000);
memory_region_add_subregion(address_space, s->mm_base, &s->mm_iomem);
vmstate_register(NULL, 0, &vmstate_pxa2xx_mm, s);
s->pm_base = 0x40f00000;
memory_region_init_io(&s->pm_iomem, NULL, &pxa2xx_pm_ops, s, "pxa2xx-pm", 0x100);
memory_region_add_subregion(address_space, s->pm_base, &s->pm_iomem);
vmstate_register(NULL, 0, &vmstate_pxa2xx_pm, s);
for (i = 0; pxa27x_ssp[i].io_base; i ++);
s->ssp = (SSIBus **)g_malloc0(sizeof(SSIBus *) * i);
for (i = 0; pxa27x_ssp[i].io_base; i ++) {
DeviceState *dev;
dev = sysbus_create_simple(TYPE_PXA2XX_SSP, pxa27x_ssp[i].io_base,
qdev_get_gpio_in(s->pic, pxa27x_ssp[i].irqn));
s->ssp[i] = (SSIBus *)qdev_get_child_bus(dev, "ssi");
}
if (usb_enabled()) {
sysbus_create_simple("sysbus-ohci", 0x4c000000,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_USBH1));
}
s->pcmcia[0] = pxa2xx_pcmcia_init(address_space, 0x20000000);
s->pcmcia[1] = pxa2xx_pcmcia_init(address_space, 0x30000000);
sysbus_create_simple(TYPE_PXA2XX_RTC, 0x40900000,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_RTCALARM));
s->i2c[0] = pxa2xx_i2c_init(0x40301600,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2C), 0xffff);
s->i2c[1] = pxa2xx_i2c_init(0x40f00100,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_PWRI2C), 0xff);
s->i2s = pxa2xx_i2s_init(address_space, 0x40400000,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_I2S),
qdev_get_gpio_in(s->dma, PXA2XX_RX_RQ_I2S),
qdev_get_gpio_in(s->dma, PXA2XX_TX_RQ_I2S));
s->kp = pxa27x_keypad_init(address_space, 0x41500000,
qdev_get_gpio_in(s->pic, PXA2XX_PIC_KEYPAD));
/* GPIO1 resets the processor */
/* The handler can be overridden by board-specific code */
qdev_connect_gpio_out(s->gpio, 1, s->reset);
return s;
}
| 22,572 |
qemu | 51cc2e783af5586b2e742ce9e5b2762dc50ad325 | 1 | static int cpu_mips_register (CPUMIPSState *env, const mips_def_t *def)
{
env->CP0_PRid = def->CP0_PRid;
env->CP0_Config0 = def->CP0_Config0;
#ifdef TARGET_WORDS_BIGENDIAN
env->CP0_Config0 |= (1 << CP0C0_BE);
#endif
env->CP0_Config1 = def->CP0_Config1;
env->CP0_Config2 = def->CP0_Config2;
env->CP0_Config3 = def->CP0_Config3;
env->CP0_Config6 = def->CP0_Config6;
env->CP0_Config7 = def->CP0_Config7;
env->SYNCI_Step = def->SYNCI_Step;
env->CCRes = def->CCRes;
env->CP0_Status_rw_bitmask = def->CP0_Status_rw_bitmask;
env->CP0_TCStatus_rw_bitmask = def->CP0_TCStatus_rw_bitmask;
env->CP0_SRSCtl = def->CP0_SRSCtl;
env->current_tc = 0;
env->SEGBITS = def->SEGBITS;
env->SEGMask = (target_ulong)((1ULL << def->SEGBITS) - 1);
#if defined(TARGET_MIPS64)
if (def->insn_flags & ISA_MIPS3) {
env->hflags |= MIPS_HFLAG_64;
env->SEGMask |= 3ULL << 62;
}
#endif
env->PABITS = def->PABITS;
env->PAMask = (target_ulong)((1ULL << def->PABITS) - 1);
env->CP0_SRSConf0_rw_bitmask = def->CP0_SRSConf0_rw_bitmask;
env->CP0_SRSConf0 = def->CP0_SRSConf0;
env->CP0_SRSConf1_rw_bitmask = def->CP0_SRSConf1_rw_bitmask;
env->CP0_SRSConf1 = def->CP0_SRSConf1;
env->CP0_SRSConf2_rw_bitmask = def->CP0_SRSConf2_rw_bitmask;
env->CP0_SRSConf2 = def->CP0_SRSConf2;
env->CP0_SRSConf3_rw_bitmask = def->CP0_SRSConf3_rw_bitmask;
env->CP0_SRSConf3 = def->CP0_SRSConf3;
env->CP0_SRSConf4_rw_bitmask = def->CP0_SRSConf4_rw_bitmask;
env->CP0_SRSConf4 = def->CP0_SRSConf4;
env->insn_flags = def->insn_flags;
#ifndef CONFIG_USER_ONLY
mmu_init(env, def);
#endif
fpu_init(env, def);
mvp_init(env, def);
return 0;
}
| 22,574 |
qemu | 21ef45d71221b4577330fe3aacfb06afad91ad46 | 1 | static void gd_update(DisplayChangeListener *dcl,
DisplayState *ds, int x, int y, int w, int h)
{
GtkDisplayState *s = ds->opaque;
int x1, x2, y1, y2;
int mx, my;
int fbw, fbh;
int ww, wh;
DPRINTF("update(x=%d, y=%d, w=%d, h=%d)\n", x, y, w, h);
x1 = floor(x * s->scale_x);
y1 = floor(y * s->scale_y);
x2 = ceil(x * s->scale_x + w * s->scale_x);
y2 = ceil(y * s->scale_y + h * s->scale_y);
fbw = ds_get_width(s->ds) * s->scale_x;
fbh = ds_get_height(s->ds) * s->scale_y;
gdk_drawable_get_size(gtk_widget_get_window(s->drawing_area), &ww, &wh);
mx = my = 0;
if (ww > fbw) {
mx = (ww - fbw) / 2;
}
if (wh > fbh) {
my = (wh - fbh) / 2;
}
gtk_widget_queue_draw_area(s->drawing_area, mx + x1, my + y1, (x2 - x1), (y2 - y1));
}
| 22,575 |
FFmpeg | 2192f89368d837a4d960a1cabf5475fdeff697e7 | 1 | static void load_module(const char *filename)
{
void *dll;
void (*init_func)(void);
dll = dlopen(filename, RTLD_NOW);
if (!dll) {
fprintf(stderr, "Could not load module '%s' - %s\n",
filename, dlerror());
}
init_func = dlsym(dll, "ffserver_module_init");
if (!init_func) {
fprintf(stderr,
"%s: init function 'ffserver_module_init()' not found\n",
filename);
dlclose(dll);
}
init_func();
} | 22,576 |
FFmpeg | 0a41f47dc17b49acaff6fe469a6ab358986cc449 | 0 | DVDemuxContext* avpriv_dv_init_demux(AVFormatContext *s)
{
DVDemuxContext *c;
c = av_mallocz(sizeof(DVDemuxContext));
if (!c)
return NULL;
c->vst = avformat_new_stream(s, NULL);
if (!c->vst) {
av_free(c);
return NULL;
}
c->sys = NULL;
c->fctx = s;
memset(c->ast, 0, sizeof(c->ast));
c->ach = 0;
c->frames = 0;
c->abytes = 0;
c->vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
c->vst->codec->codec_id = CODEC_ID_DVVIDEO;
c->vst->codec->bit_rate = 25000000;
c->vst->start_time = 0;
return c;
}
| 22,577 |
FFmpeg | 15cea3695daf3f6363794594982e3816ddc8d90b | 1 | int ff_read_riff_info(AVFormatContext *s, int64_t size)
{
int64_t start, end, cur;
AVIOContext *pb = s->pb;
start = avio_tell(pb);
end = start + size;
while ((cur = avio_tell(pb)) >= 0 && cur <= end - 8 /* = tag + size */) {
uint32_t chunk_code;
int64_t chunk_size;
char key[5] = {0};
char *value;
chunk_code = avio_rl32(pb);
chunk_size = avio_rl32(pb);
if (chunk_size > end || end - chunk_size < cur || chunk_size == UINT_MAX) {
av_log(s, AV_LOG_ERROR, "too big INFO subchunk\n");
return AVERROR_INVALIDDATA;
}
chunk_size += (chunk_size & 1);
value = av_malloc(chunk_size + 1);
if (!value) {
av_log(s, AV_LOG_ERROR, "out of memory, unable to read INFO tag\n");
return AVERROR(ENOMEM);
}
AV_WL32(key, chunk_code);
if (avio_read(pb, value, chunk_size) != chunk_size) {
av_freep(key);
av_freep(value);
av_log(s, AV_LOG_ERROR, "premature end of file while reading INFO tag\n");
return AVERROR_INVALIDDATA;
}
value[chunk_size] = 0;
av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL);
}
return 0;
}
| 22,578 |
FFmpeg | f6774f905fb3cfdc319523ac640be30b14c1bc55 | 1 | static int h261_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
H261Context *h = avctx->priv_data;
MpegEncContext *s = &h->s;
int ret;
AVFrame *pict = data;
av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);
av_dlog(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]);
s->flags = avctx->flags;
s->flags2 = avctx->flags2;
h->gob_start_code_skipped = 0;
retry:
init_get_bits(&s->gb, buf, buf_size * 8);
if (!s->context_initialized)
// we need the IDCT permutaton for reading a custom matrix
if (ff_MPV_common_init(s) < 0)
return -1;
ret = h261_decode_picture_header(h);
/* skip if the header was thrashed */
if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
return -1;
}
if (s->width != avctx->coded_width || s->height != avctx->coded_height) {
ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat
s->parse_context.buffer = 0;
ff_MPV_common_end(s);
s->parse_context = pc;
}
if (!s->context_initialized) {
ret = ff_set_dimensions(avctx, s->width, s->height);
if (ret < 0)
return ret;
goto retry;
}
// for skipping the frame
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size);
if (ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_mpeg_er_frame_start(s);
/* decode each macroblock */
s->mb_x = 0;
s->mb_y = 0;
while (h->gob_number < (s->mb_height == 18 ? 12 : 5)) {
if (h261_resync(h) < 0)
break;
h261_decode_gob(h);
}
ff_MPV_frame_end(s);
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
return ret;
ff_print_debug_info(s, s->current_picture_ptr);
*got_frame = 1;
return get_consumed_bytes(s, buf_size);
}
| 22,579 |
qemu | 54e6814360ab2110ed3ed07b2b9a3f9907e1202a | 1 | int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset,
int n_start, int n_end, int *num, QCowL2Meta *m)
{
BDRVQcowState *s = bs->opaque;
int l2_index, ret, sectors;
uint64_t *l2_table;
unsigned int nb_clusters, keep_clusters;
uint64_t cluster_offset;
trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset,
n_start, n_end);
/* Find L2 entry for the first involved cluster */
again:
ret = get_cluster_table(bs, offset, &l2_table, &l2_index);
if (ret < 0) {
return ret;
}
/*
* Calculate the number of clusters to look for. We stop at L2 table
* boundaries to keep things simple.
*/
nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS),
s->l2_size - l2_index);
cluster_offset = be64_to_cpu(l2_table[l2_index]);
/*
* Check how many clusters are already allocated and don't need COW, and how
* many need a new allocation.
*/
if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL
&& (cluster_offset & QCOW_OFLAG_COPIED))
{
/* We keep all QCOW_OFLAG_COPIED clusters */
keep_clusters =
count_contiguous_clusters(nb_clusters, s->cluster_size,
&l2_table[l2_index], 0,
QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO);
assert(keep_clusters <= nb_clusters);
nb_clusters -= keep_clusters;
} else {
/* For the moment, overwrite compressed clusters one by one */
if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
nb_clusters = 1;
} else {
nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
}
keep_clusters = 0;
cluster_offset = 0;
}
cluster_offset &= L2E_OFFSET_MASK;
/*
* The L2 table isn't used any more after this. As long as the cache works
* synchronously, it's important to release it before calling
* do_alloc_cluster_offset, which may yield if we need to wait for another
* request to complete. If we still had the reference, we could use up the
* whole cache with sleeping requests.
*/
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
if (ret < 0) {
return ret;
}
/* If there is something left to allocate, do that now */
*m = (QCowL2Meta) {
.cluster_offset = cluster_offset,
.nb_clusters = 0,
};
qemu_co_queue_init(&m->dependent_requests);
if (nb_clusters > 0) {
uint64_t alloc_offset;
uint64_t alloc_cluster_offset;
uint64_t keep_bytes = keep_clusters * s->cluster_size;
/* Calculate start and size of allocation */
alloc_offset = offset + keep_bytes;
if (keep_clusters == 0) {
alloc_cluster_offset = 0;
} else {
alloc_cluster_offset = cluster_offset + keep_bytes;
}
/* Allocate, if necessary at a given offset in the image file */
ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset,
&nb_clusters);
if (ret == -EAGAIN) {
goto again;
} else if (ret < 0) {
goto fail;
}
/* save info needed for meta data update */
if (nb_clusters > 0) {
int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
int avail_sectors = (keep_clusters + nb_clusters)
<< (s->cluster_bits - BDRV_SECTOR_BITS);
*m = (QCowL2Meta) {
.cluster_offset = keep_clusters == 0 ?
alloc_cluster_offset : cluster_offset,
.alloc_offset = alloc_cluster_offset,
.offset = alloc_offset,
.n_start = keep_clusters == 0 ? n_start : 0,
.nb_clusters = nb_clusters,
.nb_available = MIN(requested_sectors, avail_sectors),
};
qemu_co_queue_init(&m->dependent_requests);
QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
}
}
/* Some cleanup work */
sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9);
if (sectors > n_end) {
sectors = n_end;
}
assert(sectors > n_start);
*num = sectors - n_start;
return 0;
fail:
if (m->nb_clusters > 0) {
QLIST_REMOVE(m, next_in_flight);
}
return ret;
}
| 22,580 |
qemu | d07bde88a52bf293c3f8846cfd162e0a57e1557c | 1 | static void tb_gen_code(CPUState *env,
target_ulong pc, target_ulong cs_base, int flags,
int cflags)
{
TranslationBlock *tb;
uint8_t *tc_ptr;
target_ulong phys_pc, phys_page2, virt_page2;
int code_gen_size;
phys_pc = get_phys_addr_code(env, pc);
tb = tb_alloc(pc);
if (!tb) {
/* flush must be done */
tb_flush(env);
/* cannot fail at this point */
tb = tb_alloc(pc);
}
tc_ptr = code_gen_ptr;
tb->tc_ptr = tc_ptr;
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
/* check next page if needed */
virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
phys_page2 = -1;
if ((pc & TARGET_PAGE_MASK) != virt_page2) {
phys_page2 = get_phys_addr_code(env, virt_page2);
}
tb_link_phys(tb, phys_pc, phys_page2);
}
| 22,581 |
qemu | c0532a76b407af4b276dc5a62d8178db59857ea6 | 1 | void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
uint64_t mcg_status, uint64_t addr, uint64_t misc)
{
uint64_t mcg_cap = cenv->mcg_cap;
unsigned bank_num = mcg_cap & 0xff;
uint64_t *banks = cenv->mce_banks;
if (bank >= bank_num || !(status & MCI_STATUS_VAL))
return;
if (kvm_enabled()) {
kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc);
return;
}
/*
* if MSR_MCG_CTL is not all 1s, the uncorrected error
* reporting is disabled
*/
if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
cenv->mcg_ctl != ~(uint64_t)0)
return;
banks += 4 * bank;
/*
* if MSR_MCi_CTL is not all 1s, the uncorrected error
* reporting is disabled for the bank
*/
if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
return;
if (status & MCI_STATUS_UC) {
if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
!(cenv->cr[4] & CR4_MCE_MASK)) {
fprintf(stderr, "injects mce exception while previous "
"one is in progress!\n");
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
qemu_system_reset_request();
return;
}
if (banks[1] & MCI_STATUS_VAL)
status |= MCI_STATUS_OVER;
banks[2] = addr;
banks[3] = misc;
cenv->mcg_status = mcg_status;
banks[1] = status;
cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
} else if (!(banks[1] & MCI_STATUS_VAL)
|| !(banks[1] & MCI_STATUS_UC)) {
if (banks[1] & MCI_STATUS_VAL)
status |= MCI_STATUS_OVER;
banks[2] = addr;
banks[3] = misc;
banks[1] = status;
} else
banks[1] |= MCI_STATUS_OVER;
}
| 22,582 |
qemu | baf35cb90204d75404892aa4e52628ae7a00669b | 1 | void qemu_aio_poll(void)
{
}
| 22,583 |
qemu | 8059feee004111534c4c0652e2f0715e9b4e0754 | 1 | void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
size_t num_sg, int is_write)
{
unsigned int i;
hwaddr len;
if (num_sg > VIRTQUEUE_MAX_SIZE) {
error_report("virtio: map attempt out of bounds: %zd > %d",
num_sg, VIRTQUEUE_MAX_SIZE);
exit(1);
}
for (i = 0; i < num_sg; i++) {
len = sg[i].iov_len;
sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
error_report("virtio: error trying to map MMIO memory");
exit(1);
}
}
}
| 22,584 |
FFmpeg | 62b1e3b1031e901105d78e831120de8e4c3e0013 | 1 | static int aasc_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AascContext *s = avctx->priv_data;
int compr, i, stride, ret;
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
compr = AV_RL32(buf);
buf += 4;
buf_size -= 4;
switch (compr) {
case 0:
stride = (avctx->width * 3 + 3) & ~3;
if (buf_size < stride * avctx->height)
for (i = avctx->height - 1; i >= 0; i--) {
memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * 3);
buf += stride;
}
break;
case 1:
bytestream2_init(&s->gb, buf, buf_size);
ff_msrle_decode(avctx, (AVPicture*)s->frame, 8, &s->gb);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
}
*got_frame = 1;
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
/* report that the buffer was completely consumed */
return buf_size;
} | 22,585 |
FFmpeg | ac4b32df71bd932838043a4838b86d11e169707f | 1 | static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride,
int is4tap)
{
LOAD_PIXELS
int a, f1, f2;
const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
a = 3 * (q0 - p0);
if (is4tap)
a += clip_int8(p1 - q1);
a = clip_int8(a);
// We deviate from the spec here with c(a+3) >> 3
// since that's what libvpx does.
f1 = FFMIN(a + 4, 127) >> 3;
f2 = FFMIN(a + 3, 127) >> 3;
// Despite what the spec says, we do need to clamp here to
// be bitexact with libvpx.
p[-1 * stride] = cm[p0 + f2];
p[ 0 * stride] = cm[q0 - f1];
// only used for _inner on blocks without high edge variance
if (!is4tap) {
a = (f1 + 1) >> 1;
p[-2 * stride] = cm[p1 + a];
p[ 1 * stride] = cm[q1 - a];
}
}
| 22,586 |
qemu | ac0c14d71b68ac18f03a876028e332534e1e6a3e | 1 | static int virtio_blk_init_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
VirtIODevice *vdev;
if (proxy->class_code != PCI_CLASS_STORAGE_SCSI &&
proxy->class_code != PCI_CLASS_STORAGE_OTHER)
proxy->class_code = PCI_CLASS_STORAGE_SCSI;
if (!proxy->block.bs) {
error_report("virtio-blk-pci: drive property not set");
vdev = virtio_blk_init(&pci_dev->qdev, &proxy->block);
vdev->nvectors = proxy->nvectors;
virtio_init_pci(proxy, vdev,
PCI_VENDOR_ID_REDHAT_QUMRANET,
PCI_DEVICE_ID_VIRTIO_BLOCK,
proxy->class_code, 0x00);
/* make the actual value visible */
proxy->nvectors = vdev->nvectors;
return 0;
| 22,587 |
qemu | 6a5b69a959483c7404576a7dc54221ced41e6515 | 1 | static target_ulong helper_sdiv_common(CPUSPARCState *env, target_ulong a,
target_ulong b, int cc)
{
SPARCCPU *cpu = sparc_env_get_cpu(env);
int overflow = 0;
int64_t x0;
int32_t x1;
x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
x1 = (b & 0xffffffff);
if (x1 == 0) {
cpu_restore_state(CPU(cpu), GETPC());
helper_raise_exception(env, TT_DIV_ZERO);
}
x0 = x0 / x1;
if ((int32_t) x0 != x0) {
x0 = x0 < 0 ? 0x80000000 : 0x7fffffff;
overflow = 1;
}
if (cc) {
env->cc_dst = x0;
env->cc_src2 = overflow;
env->cc_op = CC_OP_DIV;
}
return x0;
}
| 22,588 |
FFmpeg | c2c4cee866926cb95b2b1a4b28fff9caa4177c7e | 0 | static int mkv_write_track(AVFormatContext *s, MatroskaMuxContext *mkv,
int i, AVIOContext *pb, int default_stream_exists)
{
AVStream *st = s->streams[i];
AVCodecContext *codec = st->codec;
ebml_master subinfo, track;
int native_id = 0;
int qt_id = 0;
int bit_depth = av_get_bits_per_sample(codec->codec_id);
int sample_rate = codec->sample_rate;
int output_sample_rate = 0;
int display_width_div = 1;
int display_height_div = 1;
int j, ret;
AVDictionaryEntry *tag;
// ms precision is the de-facto standard timescale for mkv files
avpriv_set_pts_info(st, 64, 1, 1000);
if (codec->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
mkv->have_attachments = 1;
return 0;
}
if (!bit_depth && codec->codec_id != AV_CODEC_ID_ADPCM_G726)
bit_depth = av_get_bytes_per_sample(codec->sample_fmt) << 3;
if (!bit_depth)
bit_depth = codec->bits_per_coded_sample;
if (codec->codec_id == AV_CODEC_ID_AAC)
get_aac_sample_rates(s, codec, &sample_rate, &output_sample_rate);
track = start_ebml_master(pb, MATROSKA_ID_TRACKENTRY, 0);
put_ebml_uint (pb, MATROSKA_ID_TRACKNUMBER,
mkv->is_dash ? mkv->dash_track_number : i + 1);
put_ebml_uint (pb, MATROSKA_ID_TRACKUID,
mkv->is_dash ? mkv->dash_track_number : i + 1);
put_ebml_uint (pb, MATROSKA_ID_TRACKFLAGLACING , 0); // no lacing (yet)
if ((tag = av_dict_get(st->metadata, "title", NULL, 0)))
put_ebml_string(pb, MATROSKA_ID_TRACKNAME, tag->value);
tag = av_dict_get(st->metadata, "language", NULL, 0);
if (mkv->mode != MODE_WEBM || codec->codec_id != AV_CODEC_ID_WEBVTT) {
put_ebml_string(pb, MATROSKA_ID_TRACKLANGUAGE, tag && tag->value ? tag->value:"und");
} else if (tag && tag->value) {
put_ebml_string(pb, MATROSKA_ID_TRACKLANGUAGE, tag->value);
}
// The default value for TRACKFLAGDEFAULT is 1, so add element
// if we need to clear it.
if (default_stream_exists && !(st->disposition & AV_DISPOSITION_DEFAULT))
put_ebml_uint(pb, MATROSKA_ID_TRACKFLAGDEFAULT, !!(st->disposition & AV_DISPOSITION_DEFAULT));
if (st->disposition & AV_DISPOSITION_FORCED)
put_ebml_uint(pb, MATROSKA_ID_TRACKFLAGFORCED, 1);
if (mkv->mode == MODE_WEBM && codec->codec_id == AV_CODEC_ID_WEBVTT) {
const char *codec_id;
if (st->disposition & AV_DISPOSITION_CAPTIONS) {
codec_id = "D_WEBVTT/CAPTIONS";
native_id = MATROSKA_TRACK_TYPE_SUBTITLE;
} else if (st->disposition & AV_DISPOSITION_DESCRIPTIONS) {
codec_id = "D_WEBVTT/DESCRIPTIONS";
native_id = MATROSKA_TRACK_TYPE_METADATA;
} else if (st->disposition & AV_DISPOSITION_METADATA) {
codec_id = "D_WEBVTT/METADATA";
native_id = MATROSKA_TRACK_TYPE_METADATA;
} else {
codec_id = "D_WEBVTT/SUBTITLES";
native_id = MATROSKA_TRACK_TYPE_SUBTITLE;
}
put_ebml_string(pb, MATROSKA_ID_CODECID, codec_id);
} else {
// look for a codec ID string specific to mkv to use,
// if none are found, use AVI codes
for (j = 0; ff_mkv_codec_tags[j].id != AV_CODEC_ID_NONE; j++) {
if (ff_mkv_codec_tags[j].id == codec->codec_id) {
put_ebml_string(pb, MATROSKA_ID_CODECID, ff_mkv_codec_tags[j].str);
native_id = 1;
break;
}
}
}
if (codec->codec_type == AVMEDIA_TYPE_AUDIO && codec->delay && codec->codec_id == AV_CODEC_ID_OPUS) {
// mkv->tracks[i].ts_offset = av_rescale_q(codec->delay,
// (AVRational){ 1, codec->sample_rate },
// st->time_base);
put_ebml_uint(pb, MATROSKA_ID_CODECDELAY,
av_rescale_q(codec->delay, (AVRational){ 1, codec->sample_rate },
(AVRational){ 1, 1000000000 }));
}
if (codec->codec_id == AV_CODEC_ID_OPUS) {
put_ebml_uint(pb, MATROSKA_ID_SEEKPREROLL, OPUS_SEEK_PREROLL);
}
if (mkv->mode == MODE_WEBM && !(codec->codec_id == AV_CODEC_ID_VP8 ||
codec->codec_id == AV_CODEC_ID_VP9 ||
codec->codec_id == AV_CODEC_ID_OPUS ||
codec->codec_id == AV_CODEC_ID_VORBIS ||
codec->codec_id == AV_CODEC_ID_WEBVTT)) {
av_log(s, AV_LOG_ERROR,
"Only VP8 or VP9 video and Vorbis or Opus audio and WebVTT subtitles are supported for WebM.\n");
return AVERROR(EINVAL);
}
switch (codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_VIDEO);
if( st->avg_frame_rate.num > 0 && st->avg_frame_rate.den > 0
&& 1.0/av_q2d(st->avg_frame_rate) > av_q2d(codec->time_base))
put_ebml_uint(pb, MATROSKA_ID_TRACKDEFAULTDURATION, 1E9 / av_q2d(st->avg_frame_rate));
else
put_ebml_uint(pb, MATROSKA_ID_TRACKDEFAULTDURATION, av_q2d(codec->time_base)*1E9);
if (!native_id &&
ff_codec_get_tag(ff_codec_movvideo_tags, codec->codec_id) &&
(!ff_codec_get_tag(ff_codec_bmp_tags, codec->codec_id) ||
codec->codec_id == AV_CODEC_ID_SVQ1 ||
codec->codec_id == AV_CODEC_ID_SVQ3 ||
codec->codec_id == AV_CODEC_ID_CINEPAK))
qt_id = 1;
if (qt_id)
put_ebml_string(pb, MATROSKA_ID_CODECID, "V_QUICKTIME");
else if (!native_id) {
// if there is no mkv-specific codec ID, use VFW mode
put_ebml_string(pb, MATROSKA_ID_CODECID, "V_MS/VFW/FOURCC");
mkv->tracks[i].write_dts = 1;
}
subinfo = start_ebml_master(pb, MATROSKA_ID_TRACKVIDEO, 0);
// XXX: interlace flag?
put_ebml_uint (pb, MATROSKA_ID_VIDEOPIXELWIDTH , codec->width);
put_ebml_uint (pb, MATROSKA_ID_VIDEOPIXELHEIGHT, codec->height);
if ((tag = av_dict_get(st->metadata, "stereo_mode", NULL, 0)) ||
(tag = av_dict_get( s->metadata, "stereo_mode", NULL, 0))) {
int st_mode = MATROSKA_VIDEO_STEREO_MODE_COUNT;
for (j=0; j<MATROSKA_VIDEO_STEREO_MODE_COUNT; j++)
if (!strcmp(tag->value, ff_matroska_video_stereo_mode[j])){
st_mode = j;
break;
}
if (mkv_write_stereo_mode(s, pb, st_mode, mkv->mode) < 0)
return AVERROR(EINVAL);
switch (st_mode) {
case 1:
case 8:
case 9:
case 11:
display_width_div = 2;
break;
case 2:
case 3:
case 6:
case 7:
display_height_div = 2;
break;
}
}
if ((tag = av_dict_get(st->metadata, "alpha_mode", NULL, 0)) ||
(tag = av_dict_get( s->metadata, "alpha_mode", NULL, 0)) ||
(codec->pix_fmt == AV_PIX_FMT_YUVA420P)) {
put_ebml_uint(pb, MATROSKA_ID_VIDEOALPHAMODE, 1);
}
if (st->sample_aspect_ratio.num) {
int64_t d_width = av_rescale(codec->width, st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
if (d_width > INT_MAX) {
av_log(s, AV_LOG_ERROR, "Overflow in display width\n");
return AVERROR(EINVAL);
}
put_ebml_uint(pb, MATROSKA_ID_VIDEODISPLAYWIDTH , d_width / display_width_div);
put_ebml_uint(pb, MATROSKA_ID_VIDEODISPLAYHEIGHT, codec->height / display_height_div);
} else if (display_width_div != 1 || display_height_div != 1) {
put_ebml_uint(pb, MATROSKA_ID_VIDEODISPLAYWIDTH , codec->width / display_width_div);
put_ebml_uint(pb, MATROSKA_ID_VIDEODISPLAYHEIGHT, codec->height / display_height_div);
}
if (codec->codec_id == AV_CODEC_ID_RAWVIDEO) {
uint32_t color_space = av_le2ne32(codec->codec_tag);
put_ebml_binary(pb, MATROSKA_ID_VIDEOCOLORSPACE, &color_space, sizeof(color_space));
}
end_ebml_master(pb, subinfo);
break;
case AVMEDIA_TYPE_AUDIO:
put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, MATROSKA_TRACK_TYPE_AUDIO);
if (!native_id)
// no mkv-specific ID, use ACM mode
put_ebml_string(pb, MATROSKA_ID_CODECID, "A_MS/ACM");
subinfo = start_ebml_master(pb, MATROSKA_ID_TRACKAUDIO, 0);
put_ebml_uint (pb, MATROSKA_ID_AUDIOCHANNELS , codec->channels);
put_ebml_float (pb, MATROSKA_ID_AUDIOSAMPLINGFREQ, sample_rate);
if (output_sample_rate)
put_ebml_float(pb, MATROSKA_ID_AUDIOOUTSAMPLINGFREQ, output_sample_rate);
if (bit_depth)
put_ebml_uint(pb, MATROSKA_ID_AUDIOBITDEPTH, bit_depth);
end_ebml_master(pb, subinfo);
break;
case AVMEDIA_TYPE_SUBTITLE:
if (!native_id) {
av_log(s, AV_LOG_ERROR, "Subtitle codec %d is not supported.\n", codec->codec_id);
return AVERROR(ENOSYS);
}
if (mkv->mode != MODE_WEBM || codec->codec_id != AV_CODEC_ID_WEBVTT)
native_id = MATROSKA_TRACK_TYPE_SUBTITLE;
put_ebml_uint(pb, MATROSKA_ID_TRACKTYPE, native_id);
break;
default:
av_log(s, AV_LOG_ERROR, "Only audio, video, and subtitles are supported for Matroska.\n");
return AVERROR(EINVAL);
}
if (mkv->mode != MODE_WEBM || codec->codec_id != AV_CODEC_ID_WEBVTT) {
ret = mkv_write_codecprivate(s, pb, codec, native_id, qt_id);
if (ret < 0)
return ret;
}
end_ebml_master(pb, track);
return 0;
}
| 22,589 |
FFmpeg | 87e8788680e16c51f6048af26f3f7830c35207a5 | 0 | static int ffm_probe(AVProbeData *p)
{
if (p->buf_size >= 4 &&
p->buf[0] == 'F' && p->buf[1] == 'F' && p->buf[2] == 'M' &&
p->buf[3] == '1')
return AVPROBE_SCORE_MAX + 1;
return 0;
}
| 22,590 |
qemu | e8bccad5ac6095b5af7946cd72d9aacb57f7c0a3 | 1 | static void win32_aio_process_completion(QEMUWin32AIOState *s,
QEMUWin32AIOCB *waiocb, DWORD count)
{
int ret;
s->count--;
if (waiocb->ov.Internal != 0) {
ret = -EIO;
} else {
ret = 0;
if (count < waiocb->nbytes) {
/* Short reads mean EOF, pad with zeros. */
if (waiocb->is_read) {
qemu_iovec_memset(waiocb->qiov, count, 0,
waiocb->qiov->size - count);
} else {
ret = -EINVAL;
}
}
}
if (!waiocb->is_linear) {
if (ret == 0 && waiocb->is_read) {
QEMUIOVector *qiov = waiocb->qiov;
char *p = waiocb->buf;
int i;
for (i = 0; i < qiov->niov; ++i) {
memcpy(qiov->iov[i].iov_base, p, qiov->iov[i].iov_len);
p += qiov->iov[i].iov_len;
}
qemu_vfree(waiocb->buf);
}
}
waiocb->common.cb(waiocb->common.opaque, ret);
qemu_aio_release(waiocb);
}
| 22,592 |
FFmpeg | a5e5959d52860678d028df07ad1351a11aaf47f7 | 1 | static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
AVCodecParserContext *pc, AVPacket *pkt,
int64_t next_dts, int64_t next_pts)
{
int num, den, presentation_delayed, delay, i;
int64_t offset;
AVRational duration;
int onein_oneout = st->codec->codec_id != AV_CODEC_ID_H264 &&
st->codec->codec_id != AV_CODEC_ID_HEVC;
if (s->flags & AVFMT_FLAG_NOFILLIN)
return;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && pkt->dts != AV_NOPTS_VALUE) {
if (pkt->dts == pkt->pts && st->last_dts_for_order_check != AV_NOPTS_VALUE) {
if (st->last_dts_for_order_check <= pkt->dts) {
st->dts_ordered++;
} else {
av_log(s, st->dts_misordered ? AV_LOG_DEBUG : AV_LOG_WARNING,
"DTS %"PRIi64" < %"PRIi64" out of order\n",
pkt->dts,
st->last_dts_for_order_check);
st->dts_misordered++;
}
if (st->dts_ordered + st->dts_misordered > 250) {
st->dts_ordered >>= 1;
st->dts_misordered >>= 1;
}
}
st->last_dts_for_order_check = pkt->dts;
if (st->dts_ordered < 8*st->dts_misordered && pkt->dts == pkt->pts)
pkt->dts = AV_NOPTS_VALUE;
}
if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
pkt->dts = AV_NOPTS_VALUE;
if (pc && pc->pict_type == AV_PICTURE_TYPE_B
&& !st->codec->has_b_frames)
//FIXME Set low_delay = 0 when has_b_frames = 1
st->codec->has_b_frames = 1;
/* do we have a video B-frame ? */
delay = st->codec->has_b_frames;
presentation_delayed = 0;
/* XXX: need has_b_frame, but cannot get it if the codec is
* not initialized */
if (delay &&
pc && pc->pict_type != AV_PICTURE_TYPE_B)
presentation_delayed = 1;
if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
st->pts_wrap_bits < 63 &&
pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
if (is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > st->cur_dts) {
pkt->dts -= 1LL << st->pts_wrap_bits;
} else
pkt->pts += 1LL << st->pts_wrap_bits;
}
/* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
* We take the conservative approach and discard both.
* Note: If this is misbehaving for an H.264 file, then possibly
* presentation_delayed is not set correctly. */
if (delay == 1 && pkt->dts == pkt->pts &&
pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
if ( strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")
&& strcmp(s->iformat->name, "flv")) // otherwise we discard correct timestamps for vc1-wmapro.ism
pkt->dts = AV_NOPTS_VALUE;
}
duration = av_mul_q((AVRational) {pkt->duration, 1}, st->time_base);
if (pkt->duration == 0) {
ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
if (den && num) {
duration = (AVRational) {num, den};
pkt->duration = av_rescale_rnd(1,
num * (int64_t) st->time_base.den,
den * (int64_t) st->time_base.num,
AV_ROUND_DOWN);
}
}
if (pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
update_initial_durations(s, st, pkt->stream_index, pkt->duration);
/* Correct timestamps with byte offset if demuxers only have timestamps
* on packet boundaries */
if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
/* this will estimate bitrate based on this frame's duration and size */
offset = av_rescale(pc->offset, pkt->duration, pkt->size);
if (pkt->pts != AV_NOPTS_VALUE)
pkt->pts += offset;
if (pkt->dts != AV_NOPTS_VALUE)
pkt->dts += offset;
}
/* This may be redundant, but it should not hurt. */
if (pkt->dts != AV_NOPTS_VALUE &&
pkt->pts != AV_NOPTS_VALUE &&
pkt->pts > pkt->dts)
presentation_delayed = 1;
av_dlog(NULL,
"IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d delay:%d onein_oneout:%d\n",
presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts),
pkt->stream_index, pc, pkt->duration, delay, onein_oneout);
/* Interpolate PTS and DTS if they are not present. We skip H264
* currently because delay and has_b_frames are not reliably set. */
if ((delay == 0 || (delay == 1 && pc)) &&
onein_oneout) {
if (presentation_delayed) {
/* DTS = decompression timestamp */
/* PTS = presentation timestamp */
if (pkt->dts == AV_NOPTS_VALUE)
pkt->dts = st->last_IP_pts;
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
if (pkt->dts == AV_NOPTS_VALUE)
pkt->dts = st->cur_dts;
/* This is tricky: the dts must be incremented by the duration
* of the frame we are displaying, i.e. the last I- or P-frame. */
if (st->last_IP_duration == 0)
st->last_IP_duration = pkt->duration;
if (pkt->dts != AV_NOPTS_VALUE)
st->cur_dts = pkt->dts + st->last_IP_duration;
if (pkt->dts != AV_NOPTS_VALUE &&
pkt->pts == AV_NOPTS_VALUE &&
st->last_IP_duration > 0 &&
(st->cur_dts - next_dts) <= 1 &&
next_dts != next_pts &&
next_pts != AV_NOPTS_VALUE)
pkt->pts = next_dts;
st->last_IP_duration = pkt->duration;
st->last_IP_pts = pkt->pts;
/* Cannot compute PTS if not present (we can compute it only
* by knowing the future. */
} else if (pkt->pts != AV_NOPTS_VALUE ||
pkt->dts != AV_NOPTS_VALUE ||
pkt->duration ) {
/* presentation is not delayed : PTS and DTS are the same */
if (pkt->pts == AV_NOPTS_VALUE)
pkt->pts = pkt->dts;
update_initial_timestamps(s, pkt->stream_index, pkt->pts,
pkt->pts, pkt);
if (pkt->pts == AV_NOPTS_VALUE)
pkt->pts = st->cur_dts;
pkt->dts = pkt->pts;
if (pkt->pts != AV_NOPTS_VALUE)
st->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1);
}
}
if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)) {
st->pts_buffer[0] = pkt->pts;
for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
pkt->dts = select_from_pts_buffer(st, st->pts_buffer, pkt->dts);
}
// We skipped it above so we try here.
if (!onein_oneout)
// This should happen on the first packet
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
if (pkt->dts > st->cur_dts)
st->cur_dts = pkt->dts;
av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
/* update flags */
if (is_intra_only(st->codec))
pkt->flags |= AV_PKT_FLAG_KEY;
if (pc)
pkt->convergence_duration = pc->convergence_duration;
}
| 22,593 |
FFmpeg | a2a17d3f879436182bcc52c2986a56acd81e7e92 | 1 | static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
One could remove the recomputation of the perm
vector by assuming (stride % 16) == 0, unfortunately
this is not always true.
*/
DECLARE_ALIGNED(16, short, data)[8] =
{
((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1,
data[0] * 2 + 1,
c->QP * 2,
c->QP * 4
};
int numEq;
uint8_t *src2 = src;
vector signed short v_dcOffset;
vector signed short v2QP;
vector unsigned short v4QP;
vector unsigned short v_dcThreshold;
const int properStride = (stride % 16);
const int srcAlign = ((unsigned long)src2 % 16);
const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
const vector signed int zero = vec_splat_s32(0);
const vector signed short mask = vec_splat_s16(1);
vector signed int v_numEq = vec_splat_s32(0);
vector signed short v_data = vec_ld(0, data);
vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3,
v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
//FIXME avoid this mess if possible
register int j0 = 0,
j1 = stride,
j2 = 2 * stride,
j3 = 3 * stride,
j4 = 4 * stride,
j5 = 5 * stride,
j6 = 6 * stride,
j7 = 7 * stride;
vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3,
v_srcA4, v_srcA5, v_srcA6, v_srcA7;
v_dcOffset = vec_splat(v_data, 0);
v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
v2QP = vec_splat(v_data, 2);
v4QP = (vector unsigned short)vec_splat(v_data, 3);
src2 += stride * 4;
#define LOAD_LINE(i) \
{ \
vector unsigned char perm##i = vec_lvsl(j##i, src2); \
vector unsigned char v_srcA2##i; \
vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
if (two_vectors) \
v_srcA2##i = vec_ld(j##i + 16, src2); \
v_srcA##i = \
vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i); }
#define LOAD_LINE_ALIGNED(i) \
v_srcA##i = vec_ld(j##i, src2); \
v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i)
/* Special-casing the aligned case is worthwhile, as all calls from
* the (transposed) horizontable deblocks will be aligned, in addition
* to the naturally aligned vertical deblocks. */
if (properStride && srcAlign) {
LOAD_LINE_ALIGNED(0);
LOAD_LINE_ALIGNED(1);
LOAD_LINE_ALIGNED(2);
LOAD_LINE_ALIGNED(3);
LOAD_LINE_ALIGNED(4);
LOAD_LINE_ALIGNED(5);
LOAD_LINE_ALIGNED(6);
LOAD_LINE_ALIGNED(7);
} else {
LOAD_LINE(0);
LOAD_LINE(1);
LOAD_LINE(2);
LOAD_LINE(3);
LOAD_LINE(4);
LOAD_LINE(5);
LOAD_LINE(6);
LOAD_LINE(7);
}
#undef LOAD_LINE
#undef LOAD_LINE_ALIGNED
#define ITER(i, j) \
const vector signed short v_diff##i = \
vec_sub(v_srcAss##i, v_srcAss##j); \
const vector signed short v_sum##i = \
vec_add(v_diff##i, v_dcOffset); \
const vector signed short v_comp##i = \
(vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
v_dcThreshold); \
const vector signed short v_part##i = vec_and(mask, v_comp##i);
{
ITER(0, 1)
ITER(1, 2)
ITER(2, 3)
ITER(3, 4)
ITER(4, 5)
ITER(5, 6)
ITER(6, 7)
v_numEq = vec_sum4s(v_part0, v_numEq);
v_numEq = vec_sum4s(v_part1, v_numEq);
v_numEq = vec_sum4s(v_part2, v_numEq);
v_numEq = vec_sum4s(v_part3, v_numEq);
v_numEq = vec_sum4s(v_part4, v_numEq);
v_numEq = vec_sum4s(v_part5, v_numEq);
v_numEq = vec_sum4s(v_part6, v_numEq);
}
#undef ITER
v_numEq = vec_sums(v_numEq, zero);
v_numEq = vec_splat(v_numEq, 3);
vec_ste(v_numEq, 0, &numEq);
if (numEq > c->ppMode.flatnessThreshold){
const vector unsigned char mmoP1 = (const vector unsigned char)
{0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
const vector unsigned char mmoP2 = (const vector unsigned char)
{0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
const vector unsigned char mmoP = (const vector unsigned char)
vec_lvsl(8, (unsigned char*)0);
vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
vector signed short mmoDiff = vec_sub(mmoL, mmoR);
vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
if (vec_any_gt(mmoSum, v4QP))
return 0;
else
return 1;
}
else return 2;
}
| 22,594 |
FFmpeg | f6774f905fb3cfdc319523ac640be30b14c1bc55 | 1 | static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
{
VC1Context * const v = avctx->priv_data;
MpegEncContext * const s = &v->s;
struct vaapi_context * const vactx = avctx->hwaccel_context;
VAPictureParameterBufferVC1 *pic_param;
av_dlog(avctx, "vaapi_vc1_start_frame()\n");
vactx->slice_param_size = sizeof(VASliceParameterBufferVC1);
/* Fill in VAPictureParameterBufferVC1 */
pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferVC1));
if (!pic_param)
return -1;
pic_param->forward_reference_picture = VA_INVALID_ID;
pic_param->backward_reference_picture = VA_INVALID_ID;
pic_param->inloop_decoded_picture = VA_INVALID_ID;
pic_param->sequence_fields.value = 0; /* reset all bits */
pic_param->sequence_fields.bits.pulldown = v->broadcast;
pic_param->sequence_fields.bits.interlace = v->interlace;
pic_param->sequence_fields.bits.tfcntrflag = v->tfcntrflag;
pic_param->sequence_fields.bits.finterpflag = v->finterpflag;
pic_param->sequence_fields.bits.psf = v->psf;
pic_param->sequence_fields.bits.multires = v->multires;
pic_param->sequence_fields.bits.overlap = v->overlap;
pic_param->sequence_fields.bits.syncmarker = v->resync_marker;
pic_param->sequence_fields.bits.rangered = v->rangered;
pic_param->sequence_fields.bits.max_b_frames = s->avctx->max_b_frames;
#if VA_CHECK_VERSION(0,32,0)
pic_param->sequence_fields.bits.profile = v->profile;
#endif
pic_param->coded_width = s->avctx->coded_width;
pic_param->coded_height = s->avctx->coded_height;
pic_param->entrypoint_fields.value = 0; /* reset all bits */
pic_param->entrypoint_fields.bits.broken_link = v->broken_link;
pic_param->entrypoint_fields.bits.closed_entry = v->closed_entry;
pic_param->entrypoint_fields.bits.panscan_flag = v->panscanflag;
pic_param->entrypoint_fields.bits.loopfilter = s->loop_filter;
pic_param->conditional_overlap_flag = v->condover;
pic_param->fast_uvmc_flag = v->fastuvmc;
pic_param->range_mapping_fields.value = 0; /* reset all bits */
pic_param->range_mapping_fields.bits.luma_flag = v->range_mapy_flag;
pic_param->range_mapping_fields.bits.luma = v->range_mapy;
pic_param->range_mapping_fields.bits.chroma_flag = v->range_mapuv_flag;
pic_param->range_mapping_fields.bits.chroma = v->range_mapuv;
pic_param->b_picture_fraction = v->bfraction_lut_index;
pic_param->cbp_table = v->cbpcy_vlc ? v->cbpcy_vlc - ff_vc1_cbpcy_p_vlc : 0;
pic_param->mb_mode_table = 0; /* XXX: interlaced frame */
pic_param->range_reduction_frame = v->rangeredfrm;
pic_param->rounding_control = v->rnd;
pic_param->post_processing = v->postproc;
pic_param->picture_resolution_index = v->respic;
pic_param->luma_scale = v->lumscale;
pic_param->luma_shift = v->lumshift;
pic_param->picture_fields.value = 0; /* reset all bits */
pic_param->picture_fields.bits.picture_type = vc1_get_PTYPE(v);
pic_param->picture_fields.bits.frame_coding_mode = v->fcm;
pic_param->picture_fields.bits.top_field_first = v->tff;
pic_param->picture_fields.bits.is_first_field = v->fcm == 0; /* XXX: interlaced frame */
pic_param->picture_fields.bits.intensity_compensation = v->mv_mode == MV_PMODE_INTENSITY_COMP;
pic_param->raw_coding.value = 0; /* reset all bits */
pic_param->raw_coding.flags.mv_type_mb = v->mv_type_is_raw;
pic_param->raw_coding.flags.direct_mb = v->dmb_is_raw;
pic_param->raw_coding.flags.skip_mb = v->skip_is_raw;
pic_param->raw_coding.flags.field_tx = 0; /* XXX: interlaced frame */
pic_param->raw_coding.flags.forward_mb = 0; /* XXX: interlaced frame */
pic_param->raw_coding.flags.ac_pred = v->acpred_is_raw;
pic_param->raw_coding.flags.overflags = v->overflg_is_raw;
pic_param->bitplane_present.value = 0; /* reset all bits */
pic_param->bitplane_present.flags.bp_mv_type_mb = vc1_has_MVTYPEMB_bitplane(v);
pic_param->bitplane_present.flags.bp_direct_mb = vc1_has_DIRECTMB_bitplane(v);
pic_param->bitplane_present.flags.bp_skip_mb = vc1_has_SKIPMB_bitplane(v);
pic_param->bitplane_present.flags.bp_field_tx = 0; /* XXX: interlaced frame */
pic_param->bitplane_present.flags.bp_forward_mb = 0; /* XXX: interlaced frame */
pic_param->bitplane_present.flags.bp_ac_pred = vc1_has_ACPRED_bitplane(v);
pic_param->bitplane_present.flags.bp_overflags = vc1_has_OVERFLAGS_bitplane(v);
pic_param->reference_fields.value = 0; /* reset all bits */
pic_param->reference_fields.bits.reference_distance_flag = v->refdist_flag;
pic_param->reference_fields.bits.reference_distance = 0; /* XXX: interlaced frame */
pic_param->reference_fields.bits.num_reference_pictures = 0; /* XXX: interlaced frame */
pic_param->reference_fields.bits.reference_field_pic_indicator = 0; /* XXX: interlaced frame */
pic_param->mv_fields.value = 0; /* reset all bits */
pic_param->mv_fields.bits.mv_mode = vc1_get_MVMODE(v);
pic_param->mv_fields.bits.mv_mode2 = vc1_get_MVMODE2(v);
pic_param->mv_fields.bits.mv_table = s->mv_table_index;
pic_param->mv_fields.bits.two_mv_block_pattern_table = 0; /* XXX: interlaced frame */
pic_param->mv_fields.bits.four_mv_switch = 0; /* XXX: interlaced frame */
pic_param->mv_fields.bits.four_mv_block_pattern_table = 0; /* XXX: interlaced frame */
pic_param->mv_fields.bits.extended_mv_flag = v->extended_mv;
pic_param->mv_fields.bits.extended_mv_range = v->mvrange;
pic_param->mv_fields.bits.extended_dmv_flag = v->extended_dmv;
pic_param->mv_fields.bits.extended_dmv_range = 0; /* XXX: interlaced frame */
pic_param->pic_quantizer_fields.value = 0; /* reset all bits */
pic_param->pic_quantizer_fields.bits.dquant = v->dquant;
pic_param->pic_quantizer_fields.bits.quantizer = v->quantizer_mode;
pic_param->pic_quantizer_fields.bits.half_qp = v->halfpq;
pic_param->pic_quantizer_fields.bits.pic_quantizer_scale = v->pq;
pic_param->pic_quantizer_fields.bits.pic_quantizer_type = v->pquantizer;
pic_param->pic_quantizer_fields.bits.dq_frame = v->dquantfrm;
pic_param->pic_quantizer_fields.bits.dq_profile = v->dqprofile;
pic_param->pic_quantizer_fields.bits.dq_sb_edge = v->dqprofile == DQPROFILE_SINGLE_EDGE ? v->dqsbedge : 0;
pic_param->pic_quantizer_fields.bits.dq_db_edge = v->dqprofile == DQPROFILE_DOUBLE_EDGES ? v->dqsbedge : 0;
pic_param->pic_quantizer_fields.bits.dq_binary_level = v->dqbilevel;
pic_param->pic_quantizer_fields.bits.alt_pic_quantizer = v->altpq;
pic_param->transform_fields.value = 0; /* reset all bits */
pic_param->transform_fields.bits.variable_sized_transform_flag = v->vstransform;
pic_param->transform_fields.bits.mb_level_transform_type_flag = v->ttmbf;
pic_param->transform_fields.bits.frame_level_transform_type = vc1_get_TTFRM(v);
pic_param->transform_fields.bits.transform_ac_codingset_idx1 = v->c_ac_table_index;
pic_param->transform_fields.bits.transform_ac_codingset_idx2 = v->y_ac_table_index;
pic_param->transform_fields.bits.intra_transform_dc_table = v->s.dc_table_index;
switch (s->pict_type) {
case AV_PICTURE_TYPE_B:
pic_param->backward_reference_picture = ff_vaapi_get_surface_id(&s->next_picture.f);
// fall-through
case AV_PICTURE_TYPE_P:
pic_param->forward_reference_picture = ff_vaapi_get_surface_id(&s->last_picture.f);
break;
}
if (pic_param->bitplane_present.value) {
uint8_t *bitplane;
const uint8_t *ff_bp[3];
int x, y, n;
switch (s->pict_type) {
case AV_PICTURE_TYPE_P:
ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL;
ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL;
ff_bp[2] = pic_param->bitplane_present.flags.bp_mv_type_mb ? v->mv_type_mb_plane : NULL;
break;
case AV_PICTURE_TYPE_B:
if (!v->bi_type) {
ff_bp[0] = pic_param->bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL;
ff_bp[1] = pic_param->bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL;
ff_bp[2] = NULL; /* XXX: interlaced frame (FORWARD plane) */
break;
}
/* fall-through (BI-type) */
case AV_PICTURE_TYPE_I:
ff_bp[0] = NULL; /* XXX: interlaced frame (FIELDTX plane) */
ff_bp[1] = pic_param->bitplane_present.flags.bp_ac_pred ? v->acpred_plane : NULL;
ff_bp[2] = pic_param->bitplane_present.flags.bp_overflags ? v->over_flags_plane : NULL;
break;
default:
ff_bp[0] = NULL;
ff_bp[1] = NULL;
ff_bp[2] = NULL;
break;
}
bitplane = ff_vaapi_alloc_bitplane(vactx, (s->mb_width * s->mb_height + 1) / 2);
if (!bitplane)
return -1;
n = 0;
for (y = 0; y < s->mb_height; y++)
for (x = 0; x < s->mb_width; x++, n++)
vc1_pack_bitplanes(bitplane, n, ff_bp, x, y, s->mb_stride);
if (n & 1) /* move last nibble to the high order */
bitplane[n/2] <<= 4;
}
return 0;
}
| 22,595 |
FFmpeg | eb5049227033d946add93c0714bb8a28d94166f1 | 1 | static int dxv_decompress_raw(AVCodecContext *avctx)
{
DXVContext *ctx = avctx->priv_data;
GetByteContext *gbc = &ctx->gbc;
bytestream2_get_buffer(gbc, ctx->tex_data, ctx->tex_size);
return 0;
} | 22,596 |
qemu | 107e4b352cc309f9bd7588ef1a44549200620078 | 1 | static int of_dpa_cmd_group_add(OfDpa *of_dpa, uint32_t group_id,
RockerTlv **group_tlvs)
{
OfDpaGroup *group = of_dpa_group_find(of_dpa, group_id);
int err;
if (group) {
return -ROCKER_EEXIST;
}
group = of_dpa_group_alloc(group_id);
if (!group) {
return -ROCKER_ENOMEM;
}
err = of_dpa_cmd_group_do(of_dpa, group_id, group, group_tlvs);
if (err) {
goto err_cmd_add;
}
err = of_dpa_group_add(of_dpa, group);
if (err) {
goto err_cmd_add;
}
return ROCKER_OK;
err_cmd_add:
g_free(group);
return err;
}
| 22,598 |
FFmpeg | 6e3ea4461fa9a77964efd2fa7ed1250dd1c8d43d | 0 | static int mxf_read_local_tags(MXFContext *mxf, KLVPacket *klv, int (*read_child)(), int ctx_size, enum MXFMetadataSetType type)
{
ByteIOContext *pb = mxf->fc->pb;
MXFMetadataSet *ctx = ctx_size ? av_mallocz(ctx_size) : mxf;
uint64_t klv_end = url_ftell(pb) + klv->length;
if (!ctx)
return -1;
while (url_ftell(pb) + 4 < klv_end) {
int tag = get_be16(pb);
int size = get_be16(pb); /* KLV specified by 0x53 */
uint64_t next = url_ftell(pb) + size;
UID uid;
if (!size) { /* ignore empty tag, needed for some files with empty UMID tag */
av_log(mxf->fc, AV_LOG_ERROR, "local tag 0x%04X with 0 size\n", tag);
continue;
}
if (tag > 0x7FFF) { /* dynamic tag */
int i;
for (i = 0; i < mxf->local_tags_count; i++) {
int local_tag = AV_RB16(mxf->local_tags+i*18);
if (local_tag == tag) {
memcpy(uid, mxf->local_tags+i*18+2, 16);
dprintf(mxf->fc, "local tag 0x%04X\n", local_tag);
#ifdef DEBUG
PRINT_KEY(mxf->fc, "uid", uid);
#endif
}
}
}
if (ctx_size && tag == 0x3C0A)
get_buffer(pb, ctx->uid, 16);
else
read_child(ctx, pb, tag, size, uid);
url_fseek(pb, next, SEEK_SET);
}
if (ctx_size) ctx->type = type;
return ctx_size ? mxf_add_metadata_set(mxf, ctx) : 0;
}
| 22,599 |
FFmpeg | 89f704cabab446afc8ba6ecea76714a51b1df32b | 0 | static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame,
int *got_packet)
{
X264Context *x4 = ctx->priv_data;
x264_nal_t *nal;
int nnal, i, ret;
x264_picture_t pic_out = {0};
int pict_type;
x264_picture_init( &x4->pic );
x4->pic.img.i_csp = x4->params.i_csp;
if (x264_bit_depth > 8)
x4->pic.img.i_csp |= X264_CSP_HIGH_DEPTH;
x4->pic.img.i_plane = avfmt2_num_planes(ctx->pix_fmt);
if (frame) {
for (i = 0; i < x4->pic.img.i_plane; i++) {
x4->pic.img.plane[i] = frame->data[i];
x4->pic.img.i_stride[i] = frame->linesize[i];
}
x4->pic.i_pts = frame->pts;
switch (frame->pict_type) {
case AV_PICTURE_TYPE_I:
x4->pic.i_type = x4->forced_idr > 0 ? X264_TYPE_IDR
: X264_TYPE_KEYFRAME;
break;
case AV_PICTURE_TYPE_P:
x4->pic.i_type = X264_TYPE_P;
break;
case AV_PICTURE_TYPE_B:
x4->pic.i_type = X264_TYPE_B;
break;
default:
x4->pic.i_type = X264_TYPE_AUTO;
break;
}
reconfig_encoder(ctx, frame);
if (x4->a53_cc) {
void *sei_data;
size_t sei_size;
ret = ff_alloc_a53_sei(frame, 0, &sei_data, &sei_size);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
} else if (sei_data) {
x4->pic.extra_sei.payloads = av_mallocz(sizeof(x4->pic.extra_sei.payloads[0]));
if (x4->pic.extra_sei.payloads == NULL) {
av_log(ctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
av_free(sei_data);
} else {
x4->pic.extra_sei.sei_free = av_free;
x4->pic.extra_sei.payloads[0].payload_size = sei_size;
x4->pic.extra_sei.payloads[0].payload = sei_data;
x4->pic.extra_sei.num_payloads = 1;
x4->pic.extra_sei.payloads[0].payload_type = 4;
}
}
}
}
do {
if (x264_encoder_encode(x4->enc, &nal, &nnal, frame? &x4->pic: NULL, &pic_out) < 0)
return AVERROR_EXTERNAL;
ret = encode_nals(ctx, pkt, nal, nnal);
if (ret < 0)
return ret;
} while (!ret && !frame && x264_encoder_delayed_frames(x4->enc));
pkt->pts = pic_out.i_pts;
pkt->dts = pic_out.i_dts;
switch (pic_out.i_type) {
case X264_TYPE_IDR:
case X264_TYPE_I:
pict_type = AV_PICTURE_TYPE_I;
break;
case X264_TYPE_P:
pict_type = AV_PICTURE_TYPE_P;
break;
case X264_TYPE_B:
case X264_TYPE_BREF:
pict_type = AV_PICTURE_TYPE_B;
break;
default:
pict_type = AV_PICTURE_TYPE_NONE;
}
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
ctx->coded_frame->pict_type = pict_type;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
pkt->flags |= AV_PKT_FLAG_KEY*pic_out.b_keyframe;
if (ret) {
ff_side_data_set_encoder_stats(pkt, (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA, NULL, 0, pict_type);
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
ctx->coded_frame->quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
*got_packet = ret;
return 0;
}
| 22,602 |
FFmpeg | 332f9ac4e31ce5e6d0c42ac9e0229d7d1b2b4d60 | 0 | int flv_h263_decode_picture_header(MpegEncContext *s)
{
int format, width, height;
/* picture header */
if (get_bits_long(&s->gb, 17) != 1) {
av_log(s->avctx, AV_LOG_ERROR, "Bad picture start code\n");
return -1;
}
format = get_bits(&s->gb, 5);
if (format != 0 && format != 1) {
av_log(s->avctx, AV_LOG_ERROR, "Bad picture format\n");
return -1;
}
s->h263_flv = format+1;
s->picture_number = get_bits(&s->gb, 8); /* picture timestamp */
format = get_bits(&s->gb, 3);
switch (format) {
case 0:
width = get_bits(&s->gb, 8);
height = get_bits(&s->gb, 8);
break;
case 1:
width = get_bits(&s->gb, 16);
height = get_bits(&s->gb, 16);
break;
case 2:
width = 352;
height = 288;
break;
case 3:
width = 176;
height = 144;
break;
case 4:
width = 128;
height = 96;
break;
case 5:
width = 320;
height = 240;
break;
case 6:
width = 160;
height = 120;
break;
default:
width = height = 0;
break;
}
if ((width == 0) || (height == 0))
return -1;
s->width = width;
s->height = height;
s->pict_type = I_TYPE + get_bits(&s->gb, 2);
if (s->pict_type > P_TYPE)
s->pict_type = P_TYPE;
skip_bits1(&s->gb); /* deblocking flag */
s->qscale = get_bits(&s->gb, 5);
s->h263_plus = 0;
s->unrestricted_mv = 1;
s->h263_long_vectors = 0;
/* PEI */
while (get_bits1(&s->gb) != 0) {
skip_bits(&s->gb, 8);
}
s->f_code = 1;
if(s->avctx->debug & FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "%c esc_type:%d, qp:%d num:%d\n",
av_get_pict_type_char(s->pict_type), s->h263_flv-1, s->qscale, s->picture_number);
}
s->y_dc_scale_table=
s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
return 0;
}
| 22,603 |
qemu | f1d3b99154138741161fc52f5a8c373bf71613c6 | 1 | static void pci_msix(void)
{
QVirtioPCIDevice *dev;
QPCIBus *bus;
QVirtQueuePCI *vqpci;
QGuestAllocator *alloc;
QVirtioBlkReq req;
int n_size = TEST_IMAGE_SIZE / 2;
void *addr;
uint64_t req_addr;
uint64_t capacity;
uint32_t features;
uint32_t free_head;
uint8_t status;
char *data;
bus = pci_test_start();
alloc = pc_alloc_init();
dev = virtio_blk_pci_init(bus, PCI_SLOT);
qpci_msix_enable(dev->pdev);
qvirtio_pci_set_msix_configuration_vector(dev, alloc, 0);
/* MSI-X is enabled */
addr = dev->addr + VIRTIO_PCI_CONFIG_OFF(true);
capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev,
(uint64_t)(uintptr_t)addr);
g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512);
features = qvirtio_get_features(&qvirtio_pci, &dev->vdev);
features = features & ~(QVIRTIO_F_BAD_FEATURE |
(1u << VIRTIO_RING_F_INDIRECT_DESC) |
(1u << VIRTIO_RING_F_EVENT_IDX) |
(1u << VIRTIO_BLK_F_SCSI));
qvirtio_set_features(&qvirtio_pci, &dev->vdev, features);
vqpci = (QVirtQueuePCI *)qvirtqueue_setup(&qvirtio_pci, &dev->vdev,
alloc, 0);
qvirtqueue_pci_msix_setup(dev, vqpci, alloc, 1);
qvirtio_set_driver_ok(&qvirtio_pci, &dev->vdev);
qmp("{ 'execute': 'block_resize', 'arguments': { 'device': 'drive0', "
" 'size': %d } }", n_size);
qvirtio_wait_config_isr(&qvirtio_pci, &dev->vdev, QVIRTIO_BLK_TIMEOUT_US);
capacity = qvirtio_config_readq(&qvirtio_pci, &dev->vdev,
(uint64_t)(uintptr_t)addr);
g_assert_cmpint(capacity, ==, n_size / 512);
/* Write request */
req.type = VIRTIO_BLK_T_OUT;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
strcpy(req.data, "TEST");
req_addr = virtio_blk_request(alloc, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, false, true);
qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
guest_free(alloc, req_addr);
/* Read request */
req.type = VIRTIO_BLK_T_IN;
req.ioprio = 1;
req.sector = 0;
req.data = g_malloc0(512);
req_addr = virtio_blk_request(alloc, &req, 512);
g_free(req.data);
free_head = qvirtqueue_add(&vqpci->vq, req_addr, 16, false, true);
qvirtqueue_add(&vqpci->vq, req_addr + 16, 512, true, true);
qvirtqueue_add(&vqpci->vq, req_addr + 528, 1, true, false);
qvirtqueue_kick(&qvirtio_pci, &dev->vdev, &vqpci->vq, free_head);
qvirtio_wait_queue_isr(&qvirtio_pci, &dev->vdev, &vqpci->vq,
QVIRTIO_BLK_TIMEOUT_US);
status = readb(req_addr + 528);
g_assert_cmpint(status, ==, 0);
data = g_malloc0(512);
memread(req_addr + 16, data, 512);
g_assert_cmpstr(data, ==, "TEST");
g_free(data);
guest_free(alloc, req_addr);
/* End test */
guest_free(alloc, vqpci->vq.desc);
pc_alloc_uninit(alloc);
qpci_msix_disable(dev->pdev);
qvirtio_pci_device_disable(dev);
g_free(dev);
qpci_free_pc(bus);
test_end();
}
| 22,604 |
qemu | 0b8b8753e4d94901627b3e86431230f2319215c4 | 1 | static void test_co_queue(void)
{
Coroutine *c1;
Coroutine *c2;
c1 = qemu_coroutine_create(c1_fn);
c2 = qemu_coroutine_create(c2_fn);
qemu_coroutine_enter(c1, c2);
memset(c1, 0xff, sizeof(Coroutine));
qemu_coroutine_enter(c2, NULL);
}
| 22,605 |
FFmpeg | 56706ac0d5723cb549fec2602e798ab1bf6004cd | 1 | static int libopenjpeg_copy_unpacked8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
{
int compno;
int x;
int y;
int width;
int height;
int *image_line;
int frame_index;
const int numcomps = image->numcomps;
for (compno = 0; compno < numcomps; ++compno) {
if (image->comps[compno].w > frame->linesize[compno]) {
av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n");
return 0;
}
}
for (compno = 0; compno < numcomps; ++compno) {
width = avctx->width / image->comps[compno].dx;
height = avctx->height / image->comps[compno].dy;
for (y = 0; y < height; ++y) {
image_line = image->comps[compno].data + y * image->comps[compno].w;
frame_index = y * frame->linesize[compno];
for (x = 0; x < width; ++x)
image_line[x] = frame->data[compno][frame_index++];
for (; x < image->comps[compno].w; ++x) {
image_line[x] = image_line[x - 1];
}
}
for (; y < image->comps[compno].h; ++y) {
image_line = image->comps[compno].data + y * image->comps[compno].w;
for (x = 0; x < image->comps[compno].w; ++x) {
image_line[x] = image_line[x - image->comps[compno].w];
}
}
}
return 1;
}
| 22,606 |
FFmpeg | 984d58a3440d513f66344b5332f6b589c0a6bbc6 | 1 | static int url_alloc_for_protocol(URLContext **puc, struct URLProtocol *up,
const char *filename, int flags,
const AVIOInterruptCB *int_cb)
{
URLContext *uc;
int err;
#if CONFIG_NETWORK
if (up->flags & URL_PROTOCOL_FLAG_NETWORK && !ff_network_init())
return AVERROR(EIO);
#endif
if ((flags & AVIO_FLAG_READ) && !up->url_read) {
av_log(NULL, AV_LOG_ERROR,
"Impossible to open the '%s' protocol for reading\n", up->name);
return AVERROR(EIO);
}
if ((flags & AVIO_FLAG_WRITE) && !up->url_write) {
av_log(NULL, AV_LOG_ERROR,
"Impossible to open the '%s' protocol for writing\n", up->name);
return AVERROR(EIO);
}
uc = av_mallocz(sizeof(URLContext) + strlen(filename) + 1);
if (!uc) {
err = AVERROR(ENOMEM);
goto fail;
}
uc->av_class = &ffurl_context_class;
uc->filename = (char *)&uc[1];
strcpy(uc->filename, filename);
uc->prot = up;
uc->flags = flags;
uc->is_streamed = 0; /* default = not streamed */
uc->max_packet_size = 0; /* default: stream file */
if (up->priv_data_size) {
uc->priv_data = av_mallocz(up->priv_data_size);
if (!uc->priv_data) {
err = AVERROR(ENOMEM);
goto fail;
}
if (up->priv_data_class) {
int proto_len= strlen(up->name);
char *start = strchr(uc->filename, ',');
*(const AVClass **)uc->priv_data = up->priv_data_class;
av_opt_set_defaults(uc->priv_data);
if(!strncmp(up->name, uc->filename, proto_len) && uc->filename + proto_len == start){
int ret= 0;
char *p= start;
char sep= *++p;
char *key, *val;
p++;
while(ret >= 0 && (key= strchr(p, sep)) && p<key && (val = strchr(key+1, sep))){
*val= *key= 0;
ret= av_opt_set(uc->priv_data, p, key+1, 0);
if (ret == AVERROR_OPTION_NOT_FOUND)
av_log(uc, AV_LOG_ERROR, "Key '%s' not found.\n", p);
*val= *key= sep;
p= val+1;
}
if(ret<0 || p!=key){
av_log(uc, AV_LOG_ERROR, "Error parsing options string %s\n", start);
av_freep(&uc->priv_data);
av_freep(&uc);
err = AVERROR(EINVAL);
goto fail;
}
memmove(start, key+1, strlen(key));
}
}
}
if (int_cb)
uc->interrupt_callback = *int_cb;
*puc = uc;
return 0;
fail:
*puc = NULL;
if (uc)
av_freep(&uc->priv_data);
av_freep(&uc);
#if CONFIG_NETWORK
if (up->flags & URL_PROTOCOL_FLAG_NETWORK)
ff_network_close();
#endif
return err;
}
| 22,607 |
FFmpeg | e92a78a4095d69d876bef189225608a35166dc4a | 1 | void ff_write_pass1_stats(MpegEncContext *s)
{
snprintf(s->avctx->stats_out, 256,
"in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
"fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
s->current_picture_ptr->f.display_picture_number,
s->current_picture_ptr->f.coded_picture_number,
s->pict_type,
s->current_picture.f.quality,
s->i_tex_bits,
s->p_tex_bits,
s->mv_bits,
s->misc_bits,
s->f_code,
s->b_code,
s->current_picture.mc_mb_var_sum,
s->current_picture.mb_var_sum,
s->i_count, s->skip_count,
s->header_bits);
}
| 22,608 |
qemu | f575f145f4fa97fdbb9bbb4df62dfeada3f15dc4 | 1 | static bool is_zero_cluster(BlockDriverState *bs, int64_t start)
{
BDRVQcow2State *s = bs->opaque;
int nr;
BlockDriverState *file;
int64_t res = bdrv_get_block_status_above(bs, NULL, start,
s->cluster_sectors, &nr, &file);
return res >= 0 && ((res & BDRV_BLOCK_ZERO) || !(res & BDRV_BLOCK_DATA));
}
| 22,609 |
qemu | cdeaf1f15909e2e8af38f45aea7cfa467a729c52 | 1 | static int get_cluster_offset(BlockDriverState *bs,
VmdkExtent *extent,
VmdkMetaData *m_data,
uint64_t offset,
int allocate,
uint64_t *cluster_offset)
{
unsigned int l1_index, l2_offset, l2_index;
int min_index, i, j;
uint32_t min_count, *l2_table;
bool zeroed = false;
if (m_data) {
m_data->valid = 0;
}
if (extent->flat) {
*cluster_offset = extent->flat_start_offset;
return VMDK_OK;
}
offset -= (extent->end_sector - extent->sectors) * SECTOR_SIZE;
l1_index = (offset >> 9) / extent->l1_entry_sectors;
if (l1_index >= extent->l1_size) {
return VMDK_ERROR;
}
l2_offset = extent->l1_table[l1_index];
if (!l2_offset) {
return VMDK_UNALLOC;
}
for (i = 0; i < L2_CACHE_SIZE; i++) {
if (l2_offset == extent->l2_cache_offsets[i]) {
/* increment the hit count */
if (++extent->l2_cache_counts[i] == 0xffffffff) {
for (j = 0; j < L2_CACHE_SIZE; j++) {
extent->l2_cache_counts[j] >>= 1;
}
}
l2_table = extent->l2_cache + (i * extent->l2_size);
goto found;
}
}
/* not found: load a new entry in the least used one */
min_index = 0;
min_count = 0xffffffff;
for (i = 0; i < L2_CACHE_SIZE; i++) {
if (extent->l2_cache_counts[i] < min_count) {
min_count = extent->l2_cache_counts[i];
min_index = i;
}
}
l2_table = extent->l2_cache + (min_index * extent->l2_size);
if (bdrv_pread(
extent->file,
(int64_t)l2_offset * 512,
l2_table,
extent->l2_size * sizeof(uint32_t)
) != extent->l2_size * sizeof(uint32_t)) {
return VMDK_ERROR;
}
extent->l2_cache_offsets[min_index] = l2_offset;
extent->l2_cache_counts[min_index] = 1;
found:
l2_index = ((offset >> 9) / extent->cluster_sectors) % extent->l2_size;
*cluster_offset = le32_to_cpu(l2_table[l2_index]);
if (extent->has_zero_grain && *cluster_offset == VMDK_GTE_ZEROED) {
zeroed = true;
}
if (!*cluster_offset || zeroed) {
if (!allocate) {
return zeroed ? VMDK_ZEROED : VMDK_UNALLOC;
}
/* Avoid the L2 tables update for the images that have snapshots. */
*cluster_offset = bdrv_getlength(extent->file);
if (!extent->compressed) {
bdrv_truncate(
extent->file,
*cluster_offset + (extent->cluster_sectors << 9)
);
}
*cluster_offset >>= 9;
l2_table[l2_index] = cpu_to_le32(*cluster_offset);
/* First of all we write grain itself, to avoid race condition
* that may to corrupt the image.
* This problem may occur because of insufficient space on host disk
* or inappropriate VM shutdown.
*/
if (get_whole_cluster(
bs, extent, *cluster_offset, offset, allocate) == -1) {
return VMDK_ERROR;
}
if (m_data) {
m_data->offset = *cluster_offset;
m_data->l1_index = l1_index;
m_data->l2_index = l2_index;
m_data->l2_offset = l2_offset;
m_data->valid = 1;
}
}
*cluster_offset <<= 9;
return VMDK_OK;
}
| 22,610 |
qemu | 357d1e3bc7d2d80e5271bc4f3ac8537e30dc8046 | 1 | QPCIBus *qpci_init_spapr(QGuestAllocator *alloc)
{
QPCIBusSPAPR *ret;
ret = g_malloc(sizeof(*ret));
ret->alloc = alloc;
ret->bus.io_readb = qpci_spapr_io_readb;
ret->bus.io_readw = qpci_spapr_io_readw;
ret->bus.io_readl = qpci_spapr_io_readl;
ret->bus.io_writeb = qpci_spapr_io_writeb;
ret->bus.io_writew = qpci_spapr_io_writew;
ret->bus.io_writel = qpci_spapr_io_writel;
ret->bus.config_readb = qpci_spapr_config_readb;
ret->bus.config_readw = qpci_spapr_config_readw;
ret->bus.config_readl = qpci_spapr_config_readl;
ret->bus.config_writeb = qpci_spapr_config_writeb;
ret->bus.config_writew = qpci_spapr_config_writew;
ret->bus.config_writel = qpci_spapr_config_writel;
ret->bus.iomap = qpci_spapr_iomap;
ret->bus.iounmap = qpci_spapr_iounmap;
/* FIXME: We assume the default location of the PHB for now.
* Ideally we'd parse the device tree deposited in the guest to
* get the window locations */
ret->buid = 0x800000020000000ULL;
ret->pio_cpu_base = SPAPR_PCI_WINDOW_BASE + SPAPR_PCI_IO_WIN_OFF;
ret->pio.pci_base = 0;
ret->pio.size = SPAPR_PCI_IO_WIN_SIZE;
/* 32-bit portion of the MMIO window is at PCI address 2..4 GiB */
ret->mmio32_cpu_base = SPAPR_PCI_WINDOW_BASE + SPAPR_PCI_MMIO32_WIN_OFF;
ret->mmio32.pci_base = 0x80000000; /* 2 GiB */
ret->mmio32.size = SPAPR_PCI_MMIO32_WIN_SIZE;
ret->pci_hole_start = 0xC0000000;
ret->pci_hole_size =
ret->mmio32.pci_base + ret->mmio32.size - ret->pci_hole_start;
ret->pci_hole_alloc = 0;
ret->pci_iohole_start = 0xc000;
ret->pci_iohole_size =
ret->pio.pci_base + ret->pio.size - ret->pci_iohole_start;
ret->pci_iohole_alloc = 0;
return &ret->bus;
}
| 22,611 |
FFmpeg | 2ce4f28431623cdde4aa496fd10430f6c7bdef63 | 1 | static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *data,
int jobnr, int threadnr)
{
VP56Context *s0 = avctx->priv_data;
int is_alpha = (jobnr == 1);
VP56Context *s = is_alpha ? s0->alpha_context : s0;
AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
int mb_row, mb_col, mb_row_flip, mb_offset = 0;
int block, y, uv;
ptrdiff_t stride_y, stride_uv;
int res;
int damaged = 0;
if (p->key_frame) {
p->pict_type = AV_PICTURE_TYPE_I;
s->default_models_init(s);
for (block=0; block<s->mb_height*s->mb_width; block++)
s->macroblocks[block].type = VP56_MB_INTRA;
} else {
p->pict_type = AV_PICTURE_TYPE_P;
vp56_parse_mb_type_models(s);
s->parse_vector_models(s);
s->mb_type = VP56_MB_INTER_NOVEC_PF;
}
if (s->parse_coeff_models(s))
goto next;
memset(s->prev_dc, 0, sizeof(s->prev_dc));
s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
for (block=0; block < 4*s->mb_width+6; block++) {
s->above_blocks[block].ref_frame = VP56_FRAME_NONE;
s->above_blocks[block].dc_coeff = 0;
s->above_blocks[block].not_null_dc = 0;
}
s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT;
s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT;
stride_y = p->linesize[0];
stride_uv = p->linesize[1];
if (s->flip < 0)
mb_offset = 7;
/* main macroblocks loop */
for (mb_row=0; mb_row<s->mb_height; mb_row++) {
if (s->flip < 0)
mb_row_flip = s->mb_height - mb_row - 1;
else
mb_row_flip = mb_row;
for (block=0; block<4; block++) {
s->left_block[block].ref_frame = VP56_FRAME_NONE;
s->left_block[block].dc_coeff = 0;
s->left_block[block].not_null_dc = 0;
}
memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
s->above_block_idx[0] = 1;
s->above_block_idx[1] = 2;
s->above_block_idx[2] = 1;
s->above_block_idx[3] = 2;
s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
s->block_offset[1] = s->block_offset[0] + 8;
s->block_offset[3] = s->block_offset[2] + 8;
s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
s->block_offset[5] = s->block_offset[4];
for (mb_col=0; mb_col<s->mb_width; mb_col++) {
if (!damaged) {
int ret = vp56_decode_mb(s, mb_row, mb_col, is_alpha);
if (ret < 0)
damaged = 1;
}
if (damaged)
vp56_conceal_mb(s, mb_row, mb_col, is_alpha);
for (y=0; y<4; y++) {
s->above_block_idx[y] += 2;
s->block_offset[y] += 16;
}
for (uv=4; uv<6; uv++) {
s->above_block_idx[uv] += 1;
s->block_offset[uv] += 8;
}
}
}
next:
if (p->key_frame || s->golden_frame) {
av_frame_unref(s->frames[VP56_FRAME_GOLDEN]);
if ((res = av_frame_ref(s->frames[VP56_FRAME_GOLDEN], p)) < 0)
return res;
}
av_frame_unref(s->frames[VP56_FRAME_PREVIOUS]);
FFSWAP(AVFrame *, s->frames[VP56_FRAME_CURRENT],
s->frames[VP56_FRAME_PREVIOUS]);
return 0;
}
| 22,612 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.