project
stringclasses 2
values | commit_id
stringlengths 40
40
| target
int64 0
1
| func
stringlengths 26
142k
| idx
int64 0
27.3k
|
---|---|---|---|---|
FFmpeg | 82f19afefe4f28db0e2eefbc2d06eee0def74a53 | 0 | static int parse_tag(AVFormatContext *s, const uint8_t *buf)
{
int genre;
if (!(buf[0] == 'T' &&
buf[1] == 'A' &&
buf[2] == 'G'))
return -1;
get_string(s, "title", buf + 3, 30);
get_string(s, "artist", buf + 33, 30);
get_string(s, "album", buf + 63, 30);
get_string(s, "date", buf + 93, 4);
get_string(s, "comment", buf + 97, 30);
if (buf[125] == 0 && buf[126] != 0)
av_metadata_set2(&s->metadata, "track", av_d2str(buf[126]), AV_METADATA_DONT_STRDUP_VAL);
genre = buf[127];
if (genre <= ID3v1_GENRE_MAX)
av_metadata_set2(&s->metadata, "genre", ff_id3v1_genre_str[genre], 0);
return 0;
}
| 18,752 |
qemu | 6baebed7698a37a0ac5168faf26023426b0ac940 | 1 | static void vnc_update(VncState *vs, int x, int y, int w, int h)
{
int i;
h += y;
/* round x down to ensure the loop only spans one 16-pixel block per,
iteration. otherwise, if (x % 16) != 0, the last iteration may span
two 16-pixel blocks but we only mark the first as dirty
*/
w += (x % 16);
x -= (x % 16);
x = MIN(x, vs->serverds.width);
y = MIN(y, vs->serverds.height);
w = MIN(x + w, vs->serverds.width) - x;
h = MIN(h, vs->serverds.height);
for (; y < h; y++)
for (i = 0; i < w; i += 16)
vnc_set_bit(vs->dirty_row[y], (x + i) / 16);
}
| 18,754 |
qemu | 2c993ec294893af31deed27e5d79610ce71642e1 | 1 | int qemu_ftruncate64(int fd, int64_t length)
{
LARGE_INTEGER li;
LONG high;
HANDLE h;
BOOL res;
if ((GetVersion() & 0x80000000UL) && (length >> 32) != 0)
return -1;
h = (HANDLE)_get_osfhandle(fd);
/* get current position, ftruncate do not change position */
li.HighPart = 0;
li.LowPart = SetFilePointer (h, 0, &li.HighPart, FILE_CURRENT);
if (li.LowPart == 0xffffffffUL && GetLastError() != NO_ERROR)
return -1;
high = length >> 32;
if (!SetFilePointer(h, (DWORD) length, &high, FILE_BEGIN))
return -1;
res = SetEndOfFile(h);
/* back to old position */
SetFilePointer(h, li.LowPart, &li.HighPart, FILE_BEGIN);
return res ? 0 : -1;
}
| 18,755 |
qemu | e0dadc1e9ef1f35208e5d2af9c7740c18a0b769f | 1 | I2CBus *aux_get_i2c_bus(AUXBus *bus)
{
return aux_bridge_get_i2c_bus(bus->bridge);
}
| 18,757 |
FFmpeg | 276df9d8210121f6d59b7876e1724ebe7a11f5e5 | 1 | static void lame_window_init(AacPsyContext *ctx, AVCodecContext *avctx) {
int i;
for (i = 0; i < avctx->channels; i++) {
AacPsyChannel *pch = &ctx->ch[i];
if (avctx->flags & CODEC_FLAG_QSCALE)
pch->attack_threshold = psy_vbr_map[avctx->global_quality / FF_QP2LAMBDA].st_lrm;
else
pch->attack_threshold = lame_calc_attack_threshold(avctx->bit_rate / avctx->channels / 1000);
for (i = 0; i < AAC_NUM_BLOCKS_SHORT * PSY_LAME_NUM_SUBBLOCKS; i++)
pch->prev_energy_subshort[i] = 10.0f;
}
}
| 18,758 |
qemu | e4f4fb1eca795e36f363b4647724221e774523c1 | 1 | static void pflash_cfi01_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = pflash_cfi01_realize;
dc->props = pflash_cfi01_properties;
dc->vmsd = &vmstate_pflash;
set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
} | 18,760 |
FFmpeg | 8c2515bbb9800a3cdcc28aa19133302ba5f2da45 | 1 | static void mc_block(uint8_t *dst, uint8_t *src, uint8_t *tmp, int stride, int b_w, int b_h, int dx, int dy){
int x, y;
START_TIMER
for(y=0; y < b_h+5; y++){
for(x=0; x < b_w; x++){
int a0= src[x ];
int a1= src[x + 1];
int a2= src[x + 2];
int a3= src[x + 3];
int a4= src[x + 4];
int a5= src[x + 5];
// int am= 9*(a1+a2) - (a0+a3);
int am= 20*(a2+a3) - 5*(a1+a4) + (a0+a5);
// int am= 18*(a2+a3) - 2*(a1+a4);
// int aL= (-7*a0 + 105*a1 + 35*a2 - 5*a3)>>3;
// int aR= (-7*a3 + 105*a2 + 35*a1 - 5*a0)>>3;
// if(b_w==16) am= 8*(a1+a2);
if(dx<8) tmp[x]= (32*a2*( 8-dx) + am* dx + 128)>>8;
else tmp[x]= ( am*(16-dx) + 32*a3*(dx-8) + 128)>>8;
/* if (dx< 4) tmp[x + y*stride]= (16*a1*( 4-dx) + aL* dx + 32)>>6;
else if(dx< 8) tmp[x + y*stride]= ( aL*( 8-dx) + am*(dx- 4) + 32)>>6;
else if(dx<12) tmp[x + y*stride]= ( am*(12-dx) + aR*(dx- 8) + 32)>>6;
else tmp[x + y*stride]= ( aR*(16-dx) + 16*a2*(dx-12) + 32)>>6;*/
}
tmp += stride;
src += stride;
}
tmp -= (b_h+5)*stride;
for(y=0; y < b_h; y++){
for(x=0; x < b_w; x++){
int a0= tmp[x + 0*stride];
int a1= tmp[x + 1*stride];
int a2= tmp[x + 2*stride];
int a3= tmp[x + 3*stride];
int a4= tmp[x + 4*stride];
int a5= tmp[x + 5*stride];
int am= 20*(a2+a3) - 5*(a1+a4) + (a0+a5);
// int am= 18*(a2+a3) - 2*(a1+a4);
/* int aL= (-7*a0 + 105*a1 + 35*a2 - 5*a3)>>3;
int aR= (-7*a3 + 105*a2 + 35*a1 - 5*a0)>>3;*/
// if(b_w==16) am= 8*(a1+a2);
if(dy<8) dst[x]= (32*a2*( 8-dy) + am* dy + 128)>>8;
else dst[x]= ( am*(16-dy) + 32*a3*(dy-8) + 128)>>8;
/* if (dy< 4) tmp[x + y*stride]= (16*a1*( 4-dy) + aL* dy + 32)>>6;
else if(dy< 8) tmp[x + y*stride]= ( aL*( 8-dy) + am*(dy- 4) + 32)>>6;
else if(dy<12) tmp[x + y*stride]= ( am*(12-dy) + aR*(dy- 8) + 32)>>6;
else tmp[x + y*stride]= ( aR*(16-dy) + 16*a2*(dy-12) + 32)>>6;*/
}
dst += stride;
tmp += stride;
}
STOP_TIMER("mc_block")
}
| 18,761 |
qemu | 405a27640b33c31ccef4001b3f3936b8c9d2218f | 1 | static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs,
int64_t sector_num,
QEMUIOVector *qiov,
int nb_sectors,
BlockDriverCompletionFunc *cb,
void *opaque,
RBDAIOCmd cmd)
{
RBDAIOCB *acb;
RADOSCB *rcb;
rbd_completion_t c;
int64_t off, size;
char *buf;
int r;
BDRVRBDState *s = bs->opaque;
acb = qemu_aio_get(&rbd_aiocb_info, bs, cb, opaque);
acb->cmd = cmd;
acb->qiov = qiov;
if (cmd == RBD_AIO_DISCARD || cmd == RBD_AIO_FLUSH) {
acb->bounce = NULL;
} else {
acb->bounce = qemu_blockalign(bs, qiov->size);
}
acb->ret = 0;
acb->error = 0;
acb->s = s;
acb->cancelled = 0;
acb->bh = NULL;
acb->status = -EINPROGRESS;
if (cmd == RBD_AIO_WRITE) {
qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
}
buf = acb->bounce;
off = sector_num * BDRV_SECTOR_SIZE;
size = nb_sectors * BDRV_SECTOR_SIZE;
rcb = g_malloc(sizeof(RADOSCB));
rcb->done = 0;
rcb->acb = acb;
rcb->buf = buf;
rcb->s = acb->s;
rcb->size = size;
r = rbd_aio_create_completion(rcb, (rbd_callback_t) rbd_finish_aiocb, &c);
if (r < 0) {
goto failed;
}
switch (cmd) {
case RBD_AIO_WRITE:
r = rbd_aio_write(s->image, off, size, buf, c);
break;
case RBD_AIO_READ:
r = rbd_aio_read(s->image, off, size, buf, c);
break;
case RBD_AIO_DISCARD:
r = rbd_aio_discard_wrapper(s->image, off, size, c);
break;
case RBD_AIO_FLUSH:
r = rbd_aio_flush_wrapper(s->image, c);
break;
default:
r = -EINVAL;
}
if (r < 0) {
goto failed;
}
return &acb->common;
failed:
g_free(rcb);
qemu_aio_release(acb);
return NULL;
}
| 18,762 |
FFmpeg | e3d2500fe498289a878b956f6efb4995438c9515 | 1 | static inline void RENAME(initFilter)(int16_t *filter, int16_t *filterPos, int *filterSize, int xInc,
int srcW, int dstW, int filterAlign, int one)
{
int i;
#ifdef HAVE_MMX
asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
#endif
if(ABS(xInc - 0x10000) <10) // unscaled
{
int i;
*filterSize= (1 +(filterAlign-1)) & (~(filterAlign-1)); // 1 or 4 normaly
for(i=0; i<dstW*(*filterSize); i++) filter[i]=0;
for(i=0; i<dstW; i++)
{
filter[i*(*filterSize)]=1;
filterPos[i]=i;
}
}
else if(xInc <= (1<<16) || sws_flags==SWS_FAST_BILINEAR) // upscale
{
int i;
int xDstInSrc;
if(sws_flags==SWS_BICUBIC) *filterSize= 4;
else *filterSize= 2;
// printf("%d %d %d\n", filterSize, srcW, dstW);
*filterSize= (*filterSize +(filterAlign-1)) & (~(filterAlign-1));
xDstInSrc= xInc - 0x8000;
for(i=0; i<dstW; i++)
{
int xx= (xDstInSrc>>16) - (*filterSize>>1) + 1;
int j;
filterPos[i]= xx;
if(sws_flags == SWS_BICUBIC)
{
double d= ABS(((xx+1)<<16) - xDstInSrc)/(double)(1<<16);
// int coeff;
int y1,y2,y3,y4;
double A= -0.75;
// Equation is from VirtualDub
y1 = (int)floor(0.5 + ( + A*d - 2.0*A*d*d + A*d*d*d) * 16384.0);
y2 = (int)floor(0.5 + (+ 1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d) * 16384.0);
y3 = (int)floor(0.5 + ( - A*d + (2.0*A+3.0)*d*d - (A+2.0)*d*d*d) * 16384.0);
y4 = (int)floor(0.5 + ( + A*d*d - A*d*d*d) * 16384.0);
// printf("%d %d %d \n", coeff, (int)d, xDstInSrc);
filter[i*(*filterSize) + 0]= y1;
filter[i*(*filterSize) + 1]= y2;
filter[i*(*filterSize) + 2]= y3;
filter[i*(*filterSize) + 3]= y4;
// printf("%1.3f %d, %d, %d, %d\n",d , y1, y2, y3, y4);
}
else
{
for(j=0; j<*filterSize; j++)
{
double d= ABS((xx<<16) - xDstInSrc)/(double)(1<<16);
int coeff;
coeff= (int)(0.5 + (1.0 - d)*(1<<14));
if(coeff<0) coeff=0;
// printf("%d %d %d \n", coeff, (int)d, xDstInSrc);
filter[i*(*filterSize) + j]= coeff;
xx++;
}
}
xDstInSrc+= xInc;
}
}
else // downscale
{
int xDstInSrc;
if(sws_flags==SWS_BICUBIC) *filterSize= (int)ceil(1 + 4.0*srcW / (double)dstW);
else *filterSize= (int)ceil(1 + 2.0*srcW / (double)dstW);
// printf("%d %d %d\n", *filterSize, srcW, dstW);
*filterSize= (*filterSize +(filterAlign-1)) & (~(filterAlign-1));
xDstInSrc= xInc - 0x8000;
for(i=0; i<dstW; i++)
{
int xx= (int)((double)xDstInSrc/(double)(1<<16) - *filterSize*0.5 + 0.5);
int j;
filterPos[i]= xx;
for(j=0; j<*filterSize; j++)
{
double d= ABS((xx<<16) - xDstInSrc)/(double)xInc;
int coeff;
if(sws_flags == SWS_BICUBIC)
{
double A= -0.75;
// d*=2;
// Equation is from VirtualDub
if(d<1.0)
coeff = (int)floor(0.5 + (1.0 - (A+3.0)*d*d
+ (A+2.0)*d*d*d) * (1<<14));
else if(d<2.0)
coeff = (int)floor(0.5 + (-4.0*A + 8.0*A*d
- 5.0*A*d*d + A*d*d*d) * (1<<14));
else
coeff=0;
}
else
{
coeff= (int)(0.5 + (1.0 - d)*(1<<14));
if(coeff<0) coeff=0;
}
// if(filterAlign==1) printf("%d %d %d \n", coeff, (int)d, xDstInSrc);
filter[i*(*filterSize) + j]= coeff;
xx++;
}
xDstInSrc+= xInc;
}
}
//fix borders
for(i=0; i<dstW; i++)
{
int j;
if(filterPos[i] < 0)
{
// Move filter coeffs left to compensate for filterPos
for(j=1; j<*filterSize; j++)
{
int left= MAX(j + filterPos[i], 0);
filter[i*(*filterSize) + left] += filter[i*(*filterSize) + j];
filter[i*(*filterSize) + j]=0;
}
filterPos[i]= 0;
}
if(filterPos[i] + *filterSize > srcW)
{
int shift= filterPos[i] + *filterSize - srcW;
// Move filter coeffs right to compensate for filterPos
for(j=*filterSize-2; j>=0; j--)
{
int right= MIN(j + shift, *filterSize-1);
filter[i*(*filterSize) +right] += filter[i*(*filterSize) +j];
filter[i*(*filterSize) +j]=0;
}
filterPos[i]= srcW - *filterSize;
}
}
//FIXME try to align filterpos if possible / try to shift filterpos to put zeros at the end
// and skip these than later
//Normalize
for(i=0; i<dstW; i++)
{
int j;
double sum=0;
double scale= one;
for(j=0; j<*filterSize; j++)
{
sum+= filter[i*(*filterSize) + j];
}
scale/= sum;
for(j=0; j<*filterSize; j++)
{
filter[i*(*filterSize) + j]= (int)(filter[i*(*filterSize) + j]*scale);
}
}
}
| 18,763 |
FFmpeg | a736eb4a605f46d5ff96c7b32e55710ecd9cce89 | 1 | static void latm_write_frame_header(AVFormatContext *s, PutBitContext *bs)
{
LATMContext *ctx = s->priv_data;
AVCodecContext *avctx = s->streams[0]->codec;
GetBitContext gb;
int header_size;
/* AudioMuxElement */
put_bits(bs, 1, !!ctx->counter);
if (!ctx->counter) {
init_get_bits(&gb, avctx->extradata, avctx->extradata_size * 8);
/* StreamMuxConfig */
put_bits(bs, 1, 0); /* audioMuxVersion */
put_bits(bs, 1, 1); /* allStreamsSameTimeFraming */
put_bits(bs, 6, 0); /* numSubFrames */
put_bits(bs, 4, 0); /* numProgram */
put_bits(bs, 3, 0); /* numLayer */
/* AudioSpecificConfig */
if (ctx->object_type == AOT_ALS) {
header_size = avctx->extradata_size-(ctx->off + 7) >> 3;
avpriv_copy_bits(bs, &avctx->extradata[ctx->off], header_size);
} else {
avpriv_copy_bits(bs, avctx->extradata, ctx->off + 3);
if (!ctx->channel_conf) {
avpriv_copy_pce_data(bs, &gb);
}
}
put_bits(bs, 3, 0); /* frameLengthType */
put_bits(bs, 8, 0xff); /* latmBufferFullness */
put_bits(bs, 1, 0); /* otherDataPresent */
put_bits(bs, 1, 0); /* crcCheckPresent */
}
ctx->counter++;
ctx->counter %= ctx->mod;
}
| 18,764 |
FFmpeg | b89f4fb1908f26d2704b9496952131fffd4dafae | 1 | static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TM2Context * const l = avctx->priv_data;
AVFrame * const p= (AVFrame*)&l->pic;
int i, skip, t;
uint8_t *swbuf;
swbuf = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
if(!swbuf){
av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
return -1;
}
p->reference = 1;
p->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if(avctx->reget_buffer(avctx, p) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
av_free(swbuf);
return -1;
}
l->dsp.bswap_buf((uint32_t*)swbuf, (const uint32_t*)buf, buf_size >> 2);
skip = tm2_read_header(l, swbuf);
if(skip == -1){
av_free(swbuf);
return -1;
}
for(i = 0; i < TM2_NUM_STREAMS; i++){
t = tm2_read_stream(l, swbuf + skip, tm2_stream_order[i]);
if(t == -1){
av_free(swbuf);
return -1;
}
skip += t;
}
p->key_frame = tm2_decode_blocks(l, p);
if(p->key_frame)
p->pict_type = FF_I_TYPE;
else
p->pict_type = FF_P_TYPE;
l->cur = !l->cur;
*data_size = sizeof(AVFrame);
*(AVFrame*)data = l->pic;
av_free(swbuf);
return buf_size;
}
| 18,765 |
FFmpeg | 28f9ab7029bd1a02f659995919f899f84ee7361b | 0 | static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
{
int x, y;
int *bounding_values= s->bounding_values_array+127;
int width = s->fragment_width[!!plane];
int height = s->fragment_height[!!plane];
int fragment = s->fragment_start [plane] + ystart * width;
int stride = s->current_frame.linesize[plane];
uint8_t *plane_data = s->current_frame.data [plane];
if (!s->flipped_image) stride = -stride;
plane_data += s->data_offset[plane] + 8*ystart*stride;
for (y = ystart; y < yend; y++) {
for (x = 0; x < width; x++) {
/* This code basically just deblocks on the edges of coded blocks.
* However, it has to be much more complicated because of the
* braindamaged deblock ordering used in VP3/Theora. Order matters
* because some pixels get filtered twice. */
if( s->all_fragments[fragment].coding_method != MODE_COPY )
{
/* do not perform left edge filter for left columns frags */
if (x > 0) {
s->dsp.vp3_h_loop_filter(
plane_data + 8*x,
stride, bounding_values);
}
/* do not perform top edge filter for top row fragments */
if (y > 0) {
s->dsp.vp3_v_loop_filter(
plane_data + 8*x,
stride, bounding_values);
}
/* do not perform right edge filter for right column
* fragments or if right fragment neighbor is also coded
* in this frame (it will be filtered in next iteration) */
if ((x < width - 1) &&
(s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
s->dsp.vp3_h_loop_filter(
plane_data + 8*x + 8,
stride, bounding_values);
}
/* do not perform bottom edge filter for bottom row
* fragments or if bottom fragment neighbor is also coded
* in this frame (it will be filtered in the next row) */
if ((y < height - 1) &&
(s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
s->dsp.vp3_v_loop_filter(
plane_data + 8*x + 8*stride,
stride, bounding_values);
}
}
fragment++;
}
plane_data += 8*stride;
}
}
| 18,767 |
FFmpeg | 69619a13c3fef940cba545cf0a283ff22771dd71 | 1 | static int matroska_probe(AVProbeData *p)
{
uint64_t total = 0;
int len_mask = 0x80, size = 1, n = 1, i;
/* EBML header? */
if (AV_RB32(p->buf) != EBML_ID_HEADER)
return 0;
/* length of header */
total = p->buf[4];
while (size <= 8 && !(total & len_mask)) {
size++;
len_mask >>= 1;
}
if (size > 8)
return 0;
total &= (len_mask - 1);
while (n < size)
total = (total << 8) | p->buf[4 + n++];
/* Does the probe data contain the whole header? */
if (p->buf_size < 4 + size + total)
return 0;
/* The header should contain a known document type. For now,
* we don't parse the whole header but simply check for the
* availability of that array of characters inside the header.
* Not fully fool-proof, but good enough. */
for (i = 0; i < FF_ARRAY_ELEMS(matroska_doctypes); i++) {
int probelen = strlen(matroska_doctypes[i]);
for (n = 4+size; n <= 4+size+total-probelen; n++)
if (!memcmp(p->buf+n, matroska_doctypes[i], probelen))
return AVPROBE_SCORE_MAX;
}
// probably valid EBML header but no recognized doctype
return AVPROBE_SCORE_MAX/2;
} | 18,768 |
FFmpeg | 7f526efd17973ec6d2204f7a47b6923e2be31363 | 1 | void palette8torgb32(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
{
unsigned i;
/*
for(i=0; i<num_pixels; i++)
((unsigned *)dst)[i] = ((unsigned *)palette)[ src[i] ];
*/
for(i=0; i<num_pixels; i++)
{
#ifdef WORDS_BIGENDIAN
dst[3]= palette[ src[i]*4+2 ];
dst[2]= palette[ src[i]*4+1 ];
dst[1]= palette[ src[i]*4+0 ];
#else
//FIXME slow?
dst[0]= palette[ src[i]*4+2 ];
dst[1]= palette[ src[i]*4+1 ];
dst[2]= palette[ src[i]*4+0 ];
// dst[3]= 0; /* do we need this cleansing? */
#endif
dst+= 4;
}
}
| 18,769 |
qemu | 0426d53c6530606bf7641b83f2b755fe61c280ee | 1 | void visit_get_next_type(Visitor *v, int *obj, const int *qtypes,
const char *name, Error **errp)
{
if (v->get_next_type) {
v->get_next_type(v, obj, qtypes, name, errp);
}
}
| 18,770 |
qemu | 10eacda787ac9990dc22d4437b289200c819712c | 1 | static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
{
/* Return true if it is not valid for us to switch to
* this CPU mode (ie all the UNPREDICTABLE cases in
* the ARM ARM CPSRWriteByInstr pseudocode).
/* Changes to or from Hyp via MSR and CPS are illegal. */
((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
mode == ARM_CPU_MODE_HYP)) {
switch (mode) {
case ARM_CPU_MODE_USR:
return 0;
case ARM_CPU_MODE_SYS:
case ARM_CPU_MODE_SVC:
case ARM_CPU_MODE_ABT:
case ARM_CPU_MODE_UND:
case ARM_CPU_MODE_IRQ:
case ARM_CPU_MODE_FIQ:
/* Note that we don't implement the IMPDEF NSACR.RFR which in v7
* allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
return 0;
case ARM_CPU_MODE_HYP:
return !arm_feature(env, ARM_FEATURE_EL2)
|| arm_current_el(env) < 2 || arm_is_secure(env);
case ARM_CPU_MODE_MON:
return arm_current_el(env) < 3;
default:
| 18,771 |
FFmpeg | fd34dbea58e097609ff09cf7dcc59f74930195d3 | 1 | static int mxf_read_source_clip(void *arg, AVIOContext *pb, int tag, int size, UID uid)
{
MXFStructuralComponent *source_clip = arg;
switch(tag) {
case 0x0202:
source_clip->duration = avio_rb64(pb);
break;
case 0x1201:
source_clip->start_position = avio_rb64(pb);
break;
case 0x1101:
/* UMID, only get last 16 bytes */
avio_skip(pb, 16);
avio_read(pb, source_clip->source_package_uid, 16);
break;
case 0x1102:
source_clip->source_track_id = avio_rb32(pb);
break;
}
return 0;
}
| 18,772 |
FFmpeg | a956840cbcf89d709c4bd5980808ac0b5c8aeedf | 1 | static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
{
char buf[1024];
AVBPrint buf_script;
OutputStream *ost;
AVFormatContext *oc;
int64_t total_size;
AVCodecContext *enc;
int frame_number, vid, i;
double bitrate;
double speed;
int64_t pts = INT64_MIN + 1;
static int64_t last_time = -1;
static int qp_histogram[52];
int hours, mins, secs, us;
float t;
if (!print_stats && !is_last_report && !progress_avio)
return;
if (!is_last_report) {
if (last_time == -1) {
last_time = cur_time;
return;
}
if ((cur_time - last_time) < 500000)
return;
last_time = cur_time;
}
t = (cur_time-timer_start) / 1000000.0;
oc = output_files[0]->ctx;
total_size = avio_size(oc->pb);
if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
total_size = avio_tell(oc->pb);
buf[0] = '\0';
vid = 0;
av_bprint_init(&buf_script, 0, 1);
for (i = 0; i < nb_output_streams; i++) {
float q = -1;
ost = output_streams[i];
enc = ost->enc_ctx;
if (!ost->stream_copy)
q = ost->quality / (float) FF_QP2LAMBDA;
if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
ost->file_index, ost->index, q);
}
if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
float fps;
frame_number = ost->frame_number;
fps = t > 1 ? frame_number / t : 0;
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
frame_number, fps < 9.95, fps, q);
av_bprintf(&buf_script, "frame=%d\n", frame_number);
av_bprintf(&buf_script, "fps=%.1f\n", fps);
av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
ost->file_index, ost->index, q);
if (is_last_report)
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
if (qp_hist) {
int j;
int qp = lrintf(q);
if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
qp_histogram[qp]++;
for (j = 0; j < 32; j++)
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
}
if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
int j;
double error, error_sum = 0;
double scale, scale_sum = 0;
double p;
char type[3] = { 'Y','U','V' };
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
for (j = 0; j < 3; j++) {
if (is_last_report) {
error = enc->error[j];
scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
} else {
error = ost->error[j];
scale = enc->width * enc->height * 255.0 * 255.0;
}
if (j)
scale /= 4;
error_sum += error;
scale_sum += scale;
p = psnr(error / scale);
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
ost->file_index, ost->index, type[j] | 32, p);
}
p = psnr(error_sum / scale_sum);
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
ost->file_index, ost->index, p);
}
vid = 1;
}
/* compute min output value */
if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
ost->st->time_base, AV_TIME_BASE_Q));
if (is_last_report)
nb_frames_drop += ost->last_dropped;
}
secs = FFABS(pts) / AV_TIME_BASE;
us = FFABS(pts) % AV_TIME_BASE;
mins = secs / 60;
secs %= 60;
hours = mins / 60;
mins %= 60;
bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"size=N/A time=");
else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"size=%8.0fkB time=", total_size / 1024.0);
if (pts < 0)
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
"%02d:%02d:%02d.%02d ", hours, mins, secs,
(100 * us) / AV_TIME_BASE);
if (bitrate < 0) {
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
av_bprintf(&buf_script, "bitrate=N/A\n");
}else{
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
}
if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
hours, mins, secs, us);
if (nb_frames_dup || nb_frames_drop)
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
nb_frames_dup, nb_frames_drop);
av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
if (speed < 0) {
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
av_bprintf(&buf_script, "speed=N/A\n");
} else {
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
}
if (print_stats || is_last_report) {
const char end = is_last_report ? '\n' : '\r';
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
fprintf(stderr, "%s %c", buf, end);
} else
av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
fflush(stderr);
}
if (progress_avio) {
av_bprintf(&buf_script, "progress=%s\n",
is_last_report ? "end" : "continue");
avio_write(progress_avio, buf_script.str,
FFMIN(buf_script.len, buf_script.size - 1));
avio_flush(progress_avio);
av_bprint_finalize(&buf_script, NULL);
if (is_last_report) {
avio_closep(&progress_avio);
}
}
if (is_last_report)
print_final_stats(total_size);
}
| 18,774 |
qemu | 9897e462645fb8d91a4cef2a1ea3a74cc9149c13 | 1 | static int htab_load(QEMUFile *f, void *opaque, int version_id)
{
sPAPRMachineState *spapr = opaque;
uint32_t section_hdr;
int fd = -1;
if (version_id < 1 || version_id > 1) {
error_report("htab_load() bad version");
return -EINVAL;
}
section_hdr = qemu_get_be32(f);
if (section_hdr) {
Error *local_err;
/* First section gives the htab size */
spapr_reallocate_hpt(spapr, section_hdr, &local_err);
if (local_err) {
error_report_err(local_err);
return -EINVAL;
}
return 0;
}
if (!spapr->htab) {
assert(kvm_enabled());
fd = kvmppc_get_htab_fd(true);
if (fd < 0) {
error_report("Unable to open fd to restore KVM hash table: %s",
strerror(errno));
}
}
while (true) {
uint32_t index;
uint16_t n_valid, n_invalid;
index = qemu_get_be32(f);
n_valid = qemu_get_be16(f);
n_invalid = qemu_get_be16(f);
if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
/* End of Stream */
break;
}
if ((index + n_valid + n_invalid) >
(HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
/* Bad index in stream */
error_report(
"htab_load() bad index %d (%hd+%hd entries) in htab stream (htab_shift=%d)",
index, n_valid, n_invalid, spapr->htab_shift);
return -EINVAL;
}
if (spapr->htab) {
if (n_valid) {
qemu_get_buffer(f, HPTE(spapr->htab, index),
HASH_PTE_SIZE_64 * n_valid);
}
if (n_invalid) {
memset(HPTE(spapr->htab, index + n_valid), 0,
HASH_PTE_SIZE_64 * n_invalid);
}
} else {
int rc;
assert(fd >= 0);
rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
if (rc < 0) {
return rc;
}
}
}
if (!spapr->htab) {
assert(fd >= 0);
close(fd);
}
return 0;
}
| 18,775 |
FFmpeg | 753074721bd414874d18c372c491bdc6323fa3bf | 0 | static av_cold void dump_enc_cfg(AVCodecContext *avctx,
const struct vpx_codec_enc_cfg *cfg)
{
int width = -30;
int level = AV_LOG_DEBUG;
av_log(avctx, level, "vpx_codec_enc_cfg\n");
av_log(avctx, level, "generic settings\n"
" %*s%u\n %*s%u\n %*s%u\n %*s%u\n %*s%u\n"
#if CONFIG_LIBVPX_VP9_ENCODER && defined(VPX_IMG_FMT_HIGHBITDEPTH)
" %*s%u\n %*s%u\n"
#endif
" %*s{%u/%u}\n %*s%u\n %*s%d\n %*s%u\n",
width, "g_usage:", cfg->g_usage,
width, "g_threads:", cfg->g_threads,
width, "g_profile:", cfg->g_profile,
width, "g_w:", cfg->g_w,
width, "g_h:", cfg->g_h,
#if CONFIG_LIBVPX_VP9_ENCODER && defined(VPX_IMG_FMT_HIGHBITDEPTH)
width, "g_bit_depth:", cfg->g_bit_depth,
width, "g_input_bit_depth:", cfg->g_input_bit_depth,
#endif
width, "g_timebase:", cfg->g_timebase.num, cfg->g_timebase.den,
width, "g_error_resilient:", cfg->g_error_resilient,
width, "g_pass:", cfg->g_pass,
width, "g_lag_in_frames:", cfg->g_lag_in_frames);
av_log(avctx, level, "rate control settings\n"
" %*s%u\n %*s%u\n %*s%u\n %*s%u\n"
" %*s%d\n %*s%p(%"SIZE_SPECIFIER")\n %*s%u\n",
width, "rc_dropframe_thresh:", cfg->rc_dropframe_thresh,
width, "rc_resize_allowed:", cfg->rc_resize_allowed,
width, "rc_resize_up_thresh:", cfg->rc_resize_up_thresh,
width, "rc_resize_down_thresh:", cfg->rc_resize_down_thresh,
width, "rc_end_usage:", cfg->rc_end_usage,
width, "rc_twopass_stats_in:", cfg->rc_twopass_stats_in.buf, cfg->rc_twopass_stats_in.sz,
width, "rc_target_bitrate:", cfg->rc_target_bitrate);
av_log(avctx, level, "quantizer settings\n"
" %*s%u\n %*s%u\n",
width, "rc_min_quantizer:", cfg->rc_min_quantizer,
width, "rc_max_quantizer:", cfg->rc_max_quantizer);
av_log(avctx, level, "bitrate tolerance\n"
" %*s%u\n %*s%u\n",
width, "rc_undershoot_pct:", cfg->rc_undershoot_pct,
width, "rc_overshoot_pct:", cfg->rc_overshoot_pct);
av_log(avctx, level, "decoder buffer model\n"
" %*s%u\n %*s%u\n %*s%u\n",
width, "rc_buf_sz:", cfg->rc_buf_sz,
width, "rc_buf_initial_sz:", cfg->rc_buf_initial_sz,
width, "rc_buf_optimal_sz:", cfg->rc_buf_optimal_sz);
av_log(avctx, level, "2 pass rate control settings\n"
" %*s%u\n %*s%u\n %*s%u\n",
width, "rc_2pass_vbr_bias_pct:", cfg->rc_2pass_vbr_bias_pct,
width, "rc_2pass_vbr_minsection_pct:", cfg->rc_2pass_vbr_minsection_pct,
width, "rc_2pass_vbr_maxsection_pct:", cfg->rc_2pass_vbr_maxsection_pct);
av_log(avctx, level, "keyframing settings\n"
" %*s%d\n %*s%u\n %*s%u\n",
width, "kf_mode:", cfg->kf_mode,
width, "kf_min_dist:", cfg->kf_min_dist,
width, "kf_max_dist:", cfg->kf_max_dist);
av_log(avctx, level, "\n");
}
| 18,777 |
FFmpeg | b754978a3b0aa17e7794f64c69bf4491762797fd | 0 | static int mpegps_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
AVStream *st;
int len, startcode, i, type, codec_id;
int64_t pts, dts;
redo:
len = mpegps_read_pes_header(s, NULL, &startcode, &pts, &dts, 1);
if (len < 0)
return len;
/* now find stream */
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
if (st->id == startcode)
goto found;
}
if (startcode >= 0x1e0 && startcode <= 0x1ef) {
type = CODEC_TYPE_VIDEO;
codec_id = CODEC_ID_MPEG2VIDEO;
} else if (startcode >= 0x1c0 && startcode <= 0x1df) {
type = CODEC_TYPE_AUDIO;
codec_id = CODEC_ID_MP2;
} else if (startcode >= 0x80 && startcode <= 0x9f) {
type = CODEC_TYPE_AUDIO;
codec_id = CODEC_ID_AC3;
} else if (startcode >= 0xa0 && startcode <= 0xbf) {
type = CODEC_TYPE_AUDIO;
codec_id = CODEC_ID_PCM_S16BE;
} else {
skip:
/* skip packet */
url_fskip(&s->pb, len);
goto redo;
}
/* no stream found: add a new stream */
st = av_new_stream(s, startcode);
if (!st)
goto skip;
st->codec.codec_type = type;
st->codec.codec_id = codec_id;
if (codec_id != CODEC_ID_PCM_S16BE)
st->need_parsing = 1;
found:
if (startcode >= 0xa0 && startcode <= 0xbf) {
int b1, freq;
/* for LPCM, we just skip the header and consider it is raw
audio data */
if (len <= 3)
goto skip;
get_byte(&s->pb); /* emphasis (1), muse(1), reserved(1), frame number(5) */
b1 = get_byte(&s->pb); /* quant (2), freq(2), reserved(1), channels(3) */
get_byte(&s->pb); /* dynamic range control (0x80 = off) */
len -= 3;
freq = (b1 >> 4) & 3;
st->codec.sample_rate = lpcm_freq_tab[freq];
st->codec.channels = 1 + (b1 & 7);
st->codec.bit_rate = st->codec.channels * st->codec.sample_rate * 2;
}
av_new_packet(pkt, len);
get_buffer(&s->pb, pkt->data, pkt->size);
pkt->pts = pts;
pkt->dts = dts;
pkt->stream_index = st->index;
#if 0
printf("%d: pts=%0.3f dts=%0.3f\n",
pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0);
#endif
return 0;
}
| 18,778 |
FFmpeg | 0058584580b87feb47898e60e4b80c7f425882ad | 0 | static void get_downmix_coeffs(AC3DecodeContext *ctx)
{
int from = ctx->bsi.acmod;
int to = ctx->output;
float clev = clevs[ctx->bsi.cmixlev];
float slev = slevs[ctx->bsi.surmixlev];
ac3_audio_block *ab = &ctx->audio_block;
if (to == AC3_OUTPUT_UNMODIFIED)
return 0;
switch (from) {
case AC3_INPUT_DUALMONO:
switch (to) {
case AC3_OUTPUT_MONO:
case AC3_OUTPUT_STEREO: /* We Assume that sum of both mono channels is requested */
ab->chcoeffs[0] *= LEVEL_MINUS_6DB;
ab->chcoeffs[1] *= LEVEL_MINUS_6DB;
break;
}
break;
case AC3_INPUT_MONO:
switch (to) {
case AC3_OUTPUT_STEREO:
ab->chcoeffs[0] *= LEVEL_MINUS_3DB;
break;
}
break;
case AC3_INPUT_STEREO:
switch (to) {
case AC3_OUTPUT_MONO:
ab->chcoeffs[0] *= LEVEL_MINUS_3DB;
ab->chcoeffs[1] *= LEVEL_MINUS_3DB;
break;
}
break;
case AC3_INPUT_3F:
switch (to) {
case AC3_OUTPUT_MONO:
ab->chcoeffs[0] *= LEVEL_MINUS_3DB;
ab->chcoeffs[2] *= LEVEL_MINUS_3DB;
ab->chcoeffs[1] *= clev * LEVEL_PLUS_3DB;
break;
case AC3_OUTPUT_STEREO:
ab->chcoeffs[1] *= clev;
break;
}
break;
case AC3_INPUT_2F_1R:
switch (to) {
case AC3_OUTPUT_MONO:
ab->chcoeffs[0] *= LEVEL_MINUS_3DB;
ab->chcoeffs[1] *= LEVEL_MINUS_3DB;
ab->chcoeffs[2] *= slev * LEVEL_MINUS_3DB;
break;
case AC3_OUTPUT_STEREO:
ab->chcoeffs[2] *= slev * LEVEL_MINUS_3DB;
break;
case AC3_OUTPUT_DOLBY:
ab->chcoeffs[2] *= LEVEL_MINUS_3DB;
break;
}
break;
case AC3_INPUT_3F_1R:
switch (to) {
case AC3_OUTPUT_MONO:
ab->chcoeffs[0] *= LEVEL_MINUS_3DB;
ab->chcoeffs[2] *= LEVEL_MINUS_3DB;
ab->chcoeffs[1] *= clev * LEVEL_PLUS_3DB;
ab->chcoeffs[3] *= slev * LEVEL_MINUS_3DB;
break;
case AC3_OUTPUT_STEREO:
ab->chcoeffs[1] *= clev;
ab->chcoeffs[3] *= slev * LEVEL_MINUS_3DB;
break;
case AC3_OUTPUT_DOLBY:
ab->chcoeffs[1] *= LEVEL_MINUS_3DB;
ab->chcoeffs[3] *= LEVEL_MINUS_3DB;
break;
}
break;
case AC3_INPUT_2F_2R:
switch (to) {
case AC3_OUTPUT_MONO:
ab->chcoeffs[0] *= LEVEL_MINUS_3DB;
ab->chcoeffs[1] *= LEVEL_MINUS_3DB;
ab->chcoeffs[2] *= slev * LEVEL_MINUS_3DB;
ab->chcoeffs[3] *= slev * LEVEL_MINUS_3DB;
break;
case AC3_OUTPUT_STEREO:
ab->chcoeffs[2] *= slev;
ab->chcoeffs[3] *= slev;
break;
case AC3_OUTPUT_DOLBY:
ab->chcoeffs[2] *= LEVEL_MINUS_3DB;
ab->chcoeffs[3] *= LEVEL_MINUS_3DB;
break;
}
break;
case AC3_INPUT_3F_2R:
switch (to) {
case AC3_OUTPUT_MONO:
ab->chcoeffs[0] *= LEVEL_MINUS_3DB;
ab->chcoeffs[2] *= LEVEL_MINUS_3DB;
ab->chcoeffs[1] *= clev * LEVEL_PLUS_3DB;
ab->chcoeffs[3] *= slev * LEVEL_MINUS_3DB;
ab->chcoeffs[4] *= slev * LEVEL_MINUS_3DB;
break;
case AC3_OUTPUT_STEREO:
ab->chcoeffs[1] *= clev;
ab->chcoeffs[3] *= slev;
ab->chcoeffs[4] *= slev;
break;
case AC3_OUTPUT_DOLBY:
ab->chcoeffs[1] *= LEVEL_MINUS_3DB;
ab->chcoeffs[3] *= LEVEL_MINUS_3DB;
ab->chcoeffs[4] *= LEVEL_MINUS_3DB;
break;
}
break;
}
}
| 18,779 |
FFmpeg | 659d4ba5af5d72716ee370bb367c741bd15e75b4 | 0 | static av_cold void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx,
int mm_flags)
{
const int high_bit_depth = avctx->bits_per_raw_sample > 8;
#if HAVE_INLINE_ASM
c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
if (!high_bit_depth) {
c->clear_block = clear_block_mmx;
c->clear_blocks = clear_blocks_mmx;
c->draw_edges = draw_edges_mmx;
SET_HPEL_FUNCS(put, [0], 16, mmx);
SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
SET_HPEL_FUNCS(avg, [0], 16, mmx);
SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx);
SET_HPEL_FUNCS(put, [1], 8, mmx);
SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx);
SET_HPEL_FUNCS(avg, [1], 8, mmx);
switch (avctx->idct_algo) {
case FF_IDCT_AUTO:
case FF_IDCT_SIMPLEMMX:
c->idct_put = ff_simple_idct_put_mmx;
c->idct_add = ff_simple_idct_add_mmx;
c->idct = ff_simple_idct_mmx;
c->idct_permutation_type = FF_SIMPLE_IDCT_PERM;
break;
case FF_IDCT_XVIDMMX:
c->idct_put = ff_idct_xvid_mmx_put;
c->idct_add = ff_idct_xvid_mmx_add;
c->idct = ff_idct_xvid_mmx;
break;
}
}
c->gmc = gmc_mmx;
c->add_bytes = add_bytes_mmx;
if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
c->h263_v_loop_filter = h263_v_loop_filter_mmx;
c->h263_h_loop_filter = h263_h_loop_filter_mmx;
}
#endif /* HAVE_INLINE_ASM */
#if HAVE_YASM
c->vector_clip_int32 = ff_vector_clip_int32_mmx;
#endif
}
| 18,780 |
qemu | 273a2142176098fe2c27f263d86ad66b133b43cb | 0 | static int pci_nic_uninit(PCIDevice *dev)
{
PCIEEPRO100State *d = DO_UPCAST(PCIEEPRO100State, dev, dev);
EEPRO100State *s = &d->eepro100;
cpu_unregister_io_memory(s->mmio_index);
return 0;
}
| 18,782 |
qemu | b9bec74bcb16519a876ec21cd5277c526a9b512d | 0 | static int kvm_getput_regs(CPUState *env, int set)
{
struct kvm_regs regs;
int ret = 0;
if (!set) {
ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
if (ret < 0)
return ret;
}
kvm_getput_reg(®s.rax, &env->regs[R_EAX], set);
kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set);
kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set);
kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set);
kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set);
kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set);
kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set);
kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set);
#ifdef TARGET_X86_64
kvm_getput_reg(®s.r8, &env->regs[8], set);
kvm_getput_reg(®s.r9, &env->regs[9], set);
kvm_getput_reg(®s.r10, &env->regs[10], set);
kvm_getput_reg(®s.r11, &env->regs[11], set);
kvm_getput_reg(®s.r12, &env->regs[12], set);
kvm_getput_reg(®s.r13, &env->regs[13], set);
kvm_getput_reg(®s.r14, &env->regs[14], set);
kvm_getput_reg(®s.r15, &env->regs[15], set);
#endif
kvm_getput_reg(®s.rflags, &env->eflags, set);
kvm_getput_reg(®s.rip, &env->eip, set);
if (set)
ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
return ret;
}
| 18,783 |
qemu | b3db211f3c80bb996a704d665fe275619f728bd4 | 0 | static void visitor_reset(TestOutputVisitorData *data)
{
visitor_output_teardown(data, NULL);
visitor_output_setup(data, NULL);
}
| 18,784 |
qemu | 8bd7f71d794b93ce027b856f5b79a98f4f82e44c | 0 | static int qemu_reset_requested(void)
{
int r = reset_requested;
reset_requested = 0;
return r;
}
| 18,785 |
qemu | 83479e770d31e171232a82f4eee7dab06d3b219c | 0 | void cpu_interrupt(CPUState *s)
{
s->interrupt_request = 1;
}
| 18,786 |
qemu | bd269ebc82fbaa5fe7ce5bc7c1770ac8acecd884 | 0 | static void test_io_channel_setup_async(SocketAddressLegacy *listen_addr,
SocketAddressLegacy *connect_addr,
QIOChannel **src,
QIOChannel **dst)
{
QIOChannelSocket *lioc;
struct TestIOChannelData data;
data.loop = g_main_loop_new(g_main_context_default(),
TRUE);
lioc = qio_channel_socket_new();
qio_channel_socket_listen_async(
lioc, listen_addr,
test_io_channel_complete, &data, NULL);
g_main_loop_run(data.loop);
g_main_context_iteration(g_main_context_default(), FALSE);
g_assert(!data.err);
if (listen_addr->type == SOCKET_ADDRESS_LEGACY_KIND_INET) {
SocketAddressLegacy *laddr = qio_channel_socket_get_local_address(
lioc, &error_abort);
g_free(connect_addr->u.inet.data->port);
connect_addr->u.inet.data->port = g_strdup(laddr->u.inet.data->port);
qapi_free_SocketAddressLegacy(laddr);
}
*src = QIO_CHANNEL(qio_channel_socket_new());
qio_channel_socket_connect_async(
QIO_CHANNEL_SOCKET(*src), connect_addr,
test_io_channel_complete, &data, NULL);
g_main_loop_run(data.loop);
g_main_context_iteration(g_main_context_default(), FALSE);
g_assert(!data.err);
qio_channel_wait(QIO_CHANNEL(lioc), G_IO_IN);
*dst = QIO_CHANNEL(qio_channel_socket_accept(lioc, &error_abort));
g_assert(*dst);
qio_channel_set_delay(*src, false);
test_io_channel_set_socket_bufs(*src, *dst);
object_unref(OBJECT(lioc));
g_main_loop_unref(data.loop);
}
| 18,787 |
qemu | 245f7b51c0ea04fb2224b1127430a096c91aee70 | 0 | static int send_palette_rect(VncState *vs, int w, int h, struct QDict *palette)
{
int stream = 2;
int level = tight_conf[vs->tight_compression].idx_zlib_level;
int colors;
size_t bytes;
colors = qdict_size(palette);
vnc_write_u8(vs, (stream | VNC_TIGHT_EXPLICIT_FILTER) << 4);
vnc_write_u8(vs, VNC_TIGHT_FILTER_PALETTE);
vnc_write_u8(vs, colors - 1);
switch(vs->clientds.pf.bytes_per_pixel) {
case 4:
{
size_t old_offset, offset;
uint32_t header[qdict_size(palette)];
struct palette_cb_priv priv = { vs, (uint8_t *)header };
old_offset = vs->output.offset;
qdict_iter(palette, write_palette, &priv);
vnc_write(vs, header, sizeof(header));
if (vs->tight_pixel24) {
tight_pack24(vs, vs->output.buffer + old_offset, colors, &offset);
vs->output.offset = old_offset + offset;
}
tight_encode_indexed_rect32(vs->tight.buffer, w * h, palette);
break;
}
case 2:
{
uint16_t header[qdict_size(palette)];
struct palette_cb_priv priv = { vs, (uint8_t *)header };
qdict_iter(palette, write_palette, &priv);
vnc_write(vs, header, sizeof(header));
tight_encode_indexed_rect16(vs->tight.buffer, w * h, palette);
break;
}
default:
return -1; /* No palette for 8bits colors */
break;
}
bytes = w * h;
vs->tight.offset = bytes;
bytes = tight_compress_data(vs, stream, bytes,
level, Z_DEFAULT_STRATEGY);
return (bytes >= 0);
}
| 18,788 |
qemu | 47d3df2387ed6927732584ffa4159c26d9f4dee8 | 0 | static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
{
return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
}
| 18,789 |
qemu | 026aeffcb4752054830ba203020ed6eb05bcaba8 | 0 | static void cirrus_bitblt_fill_nop(CirrusVGAState *s,
uint8_t *dst,
int dstpitch, int bltwidth,int bltheight)
{
}
| 18,790 |
FFmpeg | 3176217c60ca7828712985092d9102d331ea4f3d | 0 | static int copy_parameter_set(void **to, void **from, int count, int size)
{
int i;
for (i = 0; i < count; i++) {
if (to[i] && !from[i]) {
av_freep(&to[i]);
} else if (from[i] && !to[i]) {
to[i] = av_malloc(size);
if (!to[i])
return AVERROR(ENOMEM);
}
if (from[i])
memcpy(to[i], from[i], size);
}
return 0;
}
| 18,791 |
qemu | 9e41bade85ef338afd983c109368d1bbbe931f80 | 0 | static int i2c_slave_qdev_init(DeviceState *dev)
{
I2CSlave *s = I2C_SLAVE(dev);
I2CSlaveClass *sc = I2C_SLAVE_GET_CLASS(s);
return sc->init(s);
}
| 18,792 |
qemu | dd4d607e40dcd2cb7646b510504880a70939d91b | 0 | static void vtd_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n)
{
VTDAddressSpace *vtd_as = container_of(mr, VTDAddressSpace, iommu);
IntelIOMMUState *s = vtd_as->iommu_state;
uint8_t bus_n = pci_bus_num(vtd_as->bus);
VTDContextEntry ce;
if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
/*
* Scanned a valid context entry, walk over the pages and
* notify when needed.
*/
trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn),
PCI_FUNC(vtd_as->devfn),
VTD_CONTEXT_ENTRY_DID(ce.hi),
ce.hi, ce.lo);
vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n);
} else {
trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
PCI_FUNC(vtd_as->devfn));
}
return;
}
| 18,793 |
qemu | 710aec915d208246891b68e2ba61b54951edc508 | 0 | static void monitor_protocol_emitter(Monitor *mon, QObject *data,
QError *err)
{
QDict *qmp;
trace_monitor_protocol_emitter(mon);
if (!err) {
/* success response */
qmp = qdict_new();
if (data) {
qobject_incref(data);
qdict_put_obj(qmp, "return", data);
} else {
/* return an empty QDict by default */
qdict_put(qmp, "return", qdict_new());
}
} else {
/* error response */
qmp = build_qmp_error_dict(err);
}
if (mon->mc->id) {
qdict_put_obj(qmp, "id", mon->mc->id);
mon->mc->id = NULL;
}
monitor_json_emitter(mon, QOBJECT(qmp));
QDECREF(qmp);
}
| 18,794 |
qemu | 428c3ece97179557f2753071fb0ca97a03437267 | 0 | static int msi_msix_setup(XenPCIPassthroughState *s,
uint64_t addr,
uint32_t data,
int *ppirq,
bool is_msix,
int msix_entry,
bool is_not_mapped)
{
uint8_t gvec = msi_vector(data);
int rc = 0;
assert((!is_msix && msix_entry == 0) || is_msix);
if (gvec == 0) {
/* if gvec is 0, the guest is asking for a particular pirq that
* is passed as dest_id */
*ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr);
if (!*ppirq) {
/* this probably identifies an misconfiguration of the guest,
* try the emulated path */
*ppirq = XEN_PT_UNASSIGNED_PIRQ;
} else {
XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s"
" (vec: %#x, entry: %#x)\n",
*ppirq, is_msix ? "-X" : "", gvec, msix_entry);
}
}
if (is_not_mapped) {
uint64_t table_base = 0;
if (is_msix) {
table_base = s->msix->table_base;
}
rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN,
ppirq, PCI_DEVFN(s->real_device.dev,
s->real_device.func),
s->real_device.bus,
msix_entry, table_base);
if (rc) {
XEN_PT_ERR(&s->dev,
"Mapping of MSI%s (err: %i, vec: %#x, entry %#x)\n",
is_msix ? "-X" : "", errno, gvec, msix_entry);
return rc;
}
}
return 0;
}
| 18,796 |
qemu | 0cd09c3a6cc2230ba38c462fc410b4acce59eb6f | 0 | void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
{
int i, config_size = 0;
host_features |= (1 << VIRTIO_NET_F_MAC);
for (i = 0; feature_sizes[i].flags != 0; i++) {
if (host_features & feature_sizes[i].flags) {
config_size = MAX(feature_sizes[i].end, config_size);
}
}
n->config_size = config_size;
}
| 18,797 |
qemu | 4d43d3f3c8147ade184df9a1e9e82826edd39e19 | 0 | static void virtio_pci_vmstate_change(DeviceState *d, bool running)
{
VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
if (running) {
/* Try to find out if the guest has bus master disabled, but is
in ready state. Then we have a buggy guest OS. */
if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
!(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
}
virtio_pci_start_ioeventfd(proxy);
} else {
virtio_pci_stop_ioeventfd(proxy);
}
}
| 18,798 |
qemu | ef0e64a9838c0a20b5cb8a0bd2dcbcc59b0b812d | 0 | static void ide_dma_cb(void *opaque, int ret)
{
IDEState *s = opaque;
int n;
int64_t sector_num;
uint64_t offset;
bool stay_active = false;
if (ret == -ECANCELED) {
return;
}
if (ret < 0) {
if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
s->bus->dma->aiocb = NULL;
dma_buf_commit(s, 0);
return;
}
}
n = s->io_buffer_size >> 9;
if (n > s->nsector) {
/* The PRDs were longer than needed for this request. Shorten them so
* we don't get a negative remainder. The Active bit must remain set
* after the request completes. */
n = s->nsector;
stay_active = true;
}
sector_num = ide_get_sector(s);
if (n > 0) {
assert(n * 512 == s->sg.size);
dma_buf_commit(s, s->sg.size);
sector_num += n;
ide_set_sector(s, sector_num);
s->nsector -= n;
}
/* end of transfer ? */
if (s->nsector == 0) {
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
goto eot;
}
/* launch next transfer */
n = s->nsector;
s->io_buffer_index = 0;
s->io_buffer_size = n * 512;
if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) {
/* The PRDs were too short. Reset the Active bit, but don't raise an
* interrupt. */
s->status = READY_STAT | SEEK_STAT;
dma_buf_commit(s, 0);
goto eot;
}
trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
!ide_sect_range_ok(s, sector_num, n)) {
ide_dma_error(s);
block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
return;
}
offset = sector_num << BDRV_SECTOR_BITS;
switch (s->dma_cmd) {
case IDE_DMA_READ:
s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_WRITE:
s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
BDRV_SECTOR_SIZE, ide_dma_cb, s);
break;
case IDE_DMA_TRIM:
s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
&s->sg, offset, BDRV_SECTOR_SIZE,
ide_issue_trim, s->blk, ide_dma_cb, s,
DMA_DIRECTION_TO_DEVICE);
break;
default:
abort();
}
return;
eot:
if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
block_acct_done(blk_get_stats(s->blk), &s->acct);
}
ide_set_inactive(s, stay_active);
}
| 18,800 |
qemu | 10c4c98ab7dc18169b37b76f6ea5e60ebe65222b | 0 | SSIBus *ssi_create_bus(DeviceState *parent, const char *name)
{
BusState *bus;
bus = qbus_create(BUS_TYPE_SSI, sizeof(SSIBus), parent, name);
return FROM_QBUS(SSIBus, bus);
}
| 18,801 |
FFmpeg | b1ade3d1821a29174963b28cd0caa5f7ed394998 | 0 | int ff_celp_lp_synthesis_filter(int16_t *out,
const int16_t* filter_coeffs,
const int16_t* in,
int buffer_length,
int filter_length,
int stop_on_overflow,
int rounder)
{
int i,n;
// Avoids a +1 in the inner loop.
filter_length++;
for (n = 0; n < buffer_length; n++) {
int sum = rounder;
for (i = 1; i < filter_length; i++)
sum -= filter_coeffs[i-1] * out[n-i];
sum = (sum >> 12) + in[n];
if (sum + 0x8000 > 0xFFFFU) {
if (stop_on_overflow)
return 1;
sum = (sum >> 31) ^ 32767;
}
out[n] = sum;
}
return 0;
}
| 18,802 |
FFmpeg | 65b83ce01b94eae944290c11257799a9288f2d4c | 0 | ff_rm_parse_packet (AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *ast, int len, AVPacket *pkt,
int *seq, int flags, int64_t timestamp)
{
RMDemuxContext *rm = s->priv_data;
int ret;
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
rm->current_stream= st->id;
ret = rm_assemble_video_frame(s, pb, rm, ast, pkt, len, seq, ×tamp);
if(ret)
return ret < 0 ? ret : -1; //got partial frame or error
} else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if ((ast->deint_id == DEINT_ID_GENR) ||
(ast->deint_id == DEINT_ID_INT4) ||
(ast->deint_id == DEINT_ID_SIPR)) {
int x;
int sps = ast->sub_packet_size;
int cfs = ast->coded_framesize;
int h = ast->sub_packet_h;
int y = ast->sub_packet_cnt;
int w = ast->audio_framesize;
if (flags & 2)
y = ast->sub_packet_cnt = 0;
if (!y)
ast->audiotimestamp = timestamp;
switch (ast->deint_id) {
case DEINT_ID_INT4:
for (x = 0; x < h/2; x++)
readfull(s, pb, ast->pkt.data+x*2*w+y*cfs, cfs);
break;
case DEINT_ID_GENR:
for (x = 0; x < w/sps; x++)
readfull(s, pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
break;
case DEINT_ID_SIPR:
readfull(s, pb, ast->pkt.data + y * w, w);
break;
}
if (++(ast->sub_packet_cnt) < h)
return -1;
if (ast->deint_id == DEINT_ID_SIPR)
ff_rm_reorder_sipr_data(ast->pkt.data, h, w);
ast->sub_packet_cnt = 0;
rm->audio_stream_num = st->index;
rm->audio_pkt_cnt = h * w / st->codec->block_align;
} else if ((ast->deint_id == DEINT_ID_VBRF) ||
(ast->deint_id == DEINT_ID_VBRS)) {
int x;
rm->audio_stream_num = st->index;
ast->sub_packet_cnt = (avio_rb16(pb) & 0xf0) >> 4;
if (ast->sub_packet_cnt) {
for (x = 0; x < ast->sub_packet_cnt; x++)
ast->sub_packet_lengths[x] = avio_rb16(pb);
rm->audio_pkt_cnt = ast->sub_packet_cnt;
ast->audiotimestamp = timestamp;
} else
return -1;
} else {
av_get_packet(pb, pkt, len);
rm_ac3_swap_bytes(st, pkt);
}
} else
av_get_packet(pb, pkt, len);
pkt->stream_index = st->index;
#if 0
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
if(st->codec->codec_id == AV_CODEC_ID_RV20){
int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
av_log(s, AV_LOG_DEBUG, "%d %"PRId64" %d\n", *timestamp, *timestamp*512LL/25, seq);
seq |= (timestamp&~0x3FFF);
if(seq - timestamp > 0x2000) seq -= 0x4000;
if(seq - timestamp < -0x2000) seq += 0x4000;
}
}
#endif
pkt->pts = timestamp;
if (flags & 2)
pkt->flags |= AV_PKT_FLAG_KEY;
return st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0;
}
| 18,803 |
FFmpeg | 114315994bc08812eeef51eaace455d86b5098d5 | 0 | static int read_var_block_data(ALSDecContext *ctx, ALSBlockData *bd)
{
ALSSpecificConfig *sconf = &ctx->sconf;
AVCodecContext *avctx = ctx->avctx;
GetBitContext *gb = &ctx->gb;
unsigned int k;
unsigned int s[8];
unsigned int sx[8];
unsigned int sub_blocks, log2_sub_blocks, sb_length;
unsigned int start = 0;
unsigned int opt_order;
int sb;
int32_t *quant_cof = bd->quant_cof;
int32_t *current_res;
// ensure variable block decoding by reusing this field
bd->const_block = 0;
bd->opt_order = 1;
bd->js_blocks = get_bits1(gb);
opt_order = bd->opt_order;
// determine the number of subblocks for entropy decoding
if (!sconf->bgmc && !sconf->sb_part) {
log2_sub_blocks = 0;
} else {
if (sconf->bgmc && sconf->sb_part)
log2_sub_blocks = get_bits(gb, 2);
else
log2_sub_blocks = 2 * get_bits1(gb);
}
sub_blocks = 1 << log2_sub_blocks;
// do not continue in case of a damaged stream since
// block_length must be evenly divisible by sub_blocks
if (bd->block_length & (sub_blocks - 1)) {
av_log(avctx, AV_LOG_WARNING,
"Block length is not evenly divisible by the number of subblocks.\n");
return -1;
}
sb_length = bd->block_length >> log2_sub_blocks;
if (sconf->bgmc) {
s[0] = get_bits(gb, 8 + (sconf->resolution > 1));
for (k = 1; k < sub_blocks; k++)
s[k] = s[k - 1] + decode_rice(gb, 2);
for (k = 0; k < sub_blocks; k++) {
sx[k] = s[k] & 0x0F;
s [k] >>= 4;
}
} else {
s[0] = get_bits(gb, 4 + (sconf->resolution > 1));
for (k = 1; k < sub_blocks; k++)
s[k] = s[k - 1] + decode_rice(gb, 0);
}
if (get_bits1(gb))
bd->shift_lsbs = get_bits(gb, 4) + 1;
bd->store_prev_samples = (bd->js_blocks && bd->raw_other) || bd->shift_lsbs;
if (!sconf->rlslms) {
if (sconf->adapt_order) {
int opt_order_length = av_ceil_log2(av_clip((bd->block_length >> 3) - 1,
2, sconf->max_order + 1));
bd->opt_order = get_bits(gb, opt_order_length);
} else {
bd->opt_order = sconf->max_order;
}
opt_order = bd->opt_order;
if (opt_order) {
int add_base;
if (sconf->coef_table == 3) {
add_base = 0x7F;
// read coefficient 0
quant_cof[0] = 32 * parcor_scaled_values[get_bits(gb, 7)];
// read coefficient 1
if (opt_order > 1)
quant_cof[1] = -32 * parcor_scaled_values[get_bits(gb, 7)];
// read coefficients 2 to opt_order
for (k = 2; k < opt_order; k++)
quant_cof[k] = get_bits(gb, 7);
} else {
int k_max;
add_base = 1;
// read coefficient 0 to 19
k_max = FFMIN(opt_order, 20);
for (k = 0; k < k_max; k++) {
int rice_param = parcor_rice_table[sconf->coef_table][k][1];
int offset = parcor_rice_table[sconf->coef_table][k][0];
quant_cof[k] = decode_rice(gb, rice_param) + offset;
}
// read coefficients 20 to 126
k_max = FFMIN(opt_order, 127);
for (; k < k_max; k++)
quant_cof[k] = decode_rice(gb, 2) + (k & 1);
// read coefficients 127 to opt_order
for (; k < opt_order; k++)
quant_cof[k] = decode_rice(gb, 1);
quant_cof[0] = 32 * parcor_scaled_values[quant_cof[0] + 64];
if (opt_order > 1)
quant_cof[1] = -32 * parcor_scaled_values[quant_cof[1] + 64];
}
for (k = 2; k < opt_order; k++)
quant_cof[k] = (quant_cof[k] << 14) + (add_base << 13);
}
}
// read LTP gain and lag values
if (sconf->long_term_prediction) {
*bd->use_ltp = get_bits1(gb);
if (*bd->use_ltp) {
bd->ltp_gain[0] = decode_rice(gb, 1) << 3;
bd->ltp_gain[1] = decode_rice(gb, 2) << 3;
bd->ltp_gain[2] = ltp_gain_values[get_unary(gb, 0, 4)][get_bits(gb, 2)];
bd->ltp_gain[3] = decode_rice(gb, 2) << 3;
bd->ltp_gain[4] = decode_rice(gb, 1) << 3;
*bd->ltp_lag = get_bits(gb, ctx->ltp_lag_length);
*bd->ltp_lag += FFMAX(4, opt_order + 1);
}
}
// read first value and residuals in case of a random access block
if (bd->ra_block) {
if (opt_order)
bd->raw_samples[0] = decode_rice(gb, avctx->bits_per_raw_sample - 4);
if (opt_order > 1)
bd->raw_samples[1] = decode_rice(gb, s[0] + 3);
if (opt_order > 2)
bd->raw_samples[2] = decode_rice(gb, s[0] + 1);
start = FFMIN(opt_order, 3);
}
// read all residuals
if (sconf->bgmc) {
unsigned int delta[sub_blocks];
unsigned int k [sub_blocks];
unsigned int b = av_clip((av_ceil_log2(bd->block_length) - 3) >> 1, 0, 5);
unsigned int i = start;
// read most significant bits
unsigned int high;
unsigned int low;
unsigned int value;
ff_bgmc_decode_init(gb, &high, &low, &value);
current_res = bd->raw_samples + start;
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
k [sb] = s[sb] > b ? s[sb] - b : 0;
delta[sb] = 5 - s[sb] + k[sb];
ff_bgmc_decode(gb, sb_length, current_res,
delta[sb], sx[sb], &high, &low, &value, ctx->bgmc_lut, ctx->bgmc_lut_status);
current_res += sb_length;
}
ff_bgmc_decode_end(gb);
// read least significant bits and tails
i = start;
current_res = bd->raw_samples + start;
for (sb = 0; sb < sub_blocks; sb++, i = 0) {
unsigned int cur_tail_code = tail_code[sx[sb]][delta[sb]];
unsigned int cur_k = k[sb];
unsigned int cur_s = s[sb];
for (; i < sb_length; i++) {
int32_t res = *current_res;
if (res == cur_tail_code) {
unsigned int max_msb = (2 + (sx[sb] > 2) + (sx[sb] > 10))
<< (5 - delta[sb]);
res = decode_rice(gb, cur_s);
if (res >= 0) {
res += (max_msb ) << cur_k;
} else {
res -= (max_msb - 1) << cur_k;
}
} else {
if (res > cur_tail_code)
res--;
if (res & 1)
res = -res;
res >>= 1;
if (cur_k) {
res <<= cur_k;
res |= get_bits_long(gb, cur_k);
}
}
*current_res++ = res;
}
}
} else {
current_res = bd->raw_samples + start;
for (sb = 0; sb < sub_blocks; sb++, start = 0)
for (; start < sb_length; start++)
*current_res++ = decode_rice(gb, s[sb]);
}
if (!sconf->mc_coding || ctx->js_switch)
align_get_bits(gb);
return 0;
}
| 18,804 |
FFmpeg | d1adad3cca407f493c3637e20ecd4f7124e69212 | 0 | static inline void RENAME(yuv2packed1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
{
const int yalpha1=0;
int i;
const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
const int yalpha= 4096; //FIXME ...
if (flags&SWS_FULL_CHR_H_INT) {
c->yuv2packed2(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
return;
}
#if COMPILE_TEMPLATE_MMX
if(!(flags & SWS_BITEXACT)) {
if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
switch(dstFormat) {
case PIX_FMT_RGB32:
if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5)
YSCALEYUV2RGB1_ALPHA(%%REGBP)
WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
} else {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5)
"pcmpeqd %%mm7, %%mm7 \n\t"
WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
}
return;
case PIX_FMT_BGR24:
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5)
"pxor %%mm7, %%mm7 \n\t"
WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
return;
case PIX_FMT_RGB555:
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5)
"pxor %%mm7, %%mm7 \n\t"
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB15(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
return;
case PIX_FMT_RGB565:
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5)
"pxor %%mm7, %%mm7 \n\t"
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB16(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
return;
case PIX_FMT_YUYV422:
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2PACKED1(%%REGBP, %5)
WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
return;
}
} else {
switch(dstFormat) {
case PIX_FMT_RGB32:
if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5)
YSCALEYUV2RGB1_ALPHA(%%REGBP)
WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
} else {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5)
"pcmpeqd %%mm7, %%mm7 \n\t"
WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
}
return;
case PIX_FMT_BGR24:
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5)
"pxor %%mm7, %%mm7 \n\t"
WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
return;
case PIX_FMT_RGB555:
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5)
"pxor %%mm7, %%mm7 \n\t"
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB15(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
return;
case PIX_FMT_RGB565:
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5)
"pxor %%mm7, %%mm7 \n\t"
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB16(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
return;
case PIX_FMT_YUYV422:
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2PACKED1b(%%REGBP, %5)
WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
"a" (&c->redDither)
);
return;
}
}
}
#endif /* COMPILE_TEMPLATE_MMX */
if (uvalpha < 2048) {
YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
} else {
YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
}
}
| 18,805 |
FFmpeg | 77cb22fa7b3f632c16aa3d4e7aa7d47f9cd99f2c | 0 | static int flic_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
FlicDemuxContext *flic = s->priv_data;
ByteIOContext *pb = &s->pb;
unsigned char header[FLIC_HEADER_SIZE];
AVStream *st;
int speed;
int magic_number;
flic->pts = 0;
/* load the whole header and pull out the width and height */
if (get_buffer(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
return AVERROR(EIO);
magic_number = AV_RL16(&header[4]);
speed = AV_RL32(&header[0x10]);
/* initialize the decoder streams */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
flic->video_stream_index = st->index;
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_FLIC;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = AV_RL16(&header[0x08]);
st->codec->height = AV_RL16(&header[0x0A]);
if (!st->codec->width || !st->codec->height)
return AVERROR_INVALIDDATA;
/* send over the whole 128-byte FLIC header */
st->codec->extradata_size = FLIC_HEADER_SIZE;
st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);
av_set_pts_info(st, 33, 1, 90000);
/* Time to figure out the framerate: If there is a FLIC chunk magic
* number at offset 0x10, assume this is from the Bullfrog game,
* Magic Carpet. */
if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
flic->frame_pts_inc = FLIC_MC_PTS_INC;
/* rewind the stream since the first chunk is at offset 12 */
url_fseek(pb, 12, SEEK_SET);
/* send over abbreviated FLIC header chunk */
av_free(st->codec->extradata);
st->codec->extradata_size = 12;
st->codec->extradata = av_malloc(12);
memcpy(st->codec->extradata, header, 12);
} else if (magic_number == FLIC_FILE_MAGIC_1) {
/*
* in this case, the speed (n) is number of 1/70s ticks between frames:
*
* pts n * frame #
* -------- = ----------- => pts = n * (90000/70) * frame #
* 90000 70
*
* therefore, the frame pts increment = n * 1285.7
*/
flic->frame_pts_inc = speed * 1285.7;
} else if ((magic_number == FLIC_FILE_MAGIC_2) ||
(magic_number == FLIC_FILE_MAGIC_3)) {
/*
* in this case, the speed (n) is number of milliseconds between frames:
*
* pts n * frame #
* -------- = ----------- => pts = n * 90 * frame #
* 90000 1000
*
* therefore, the frame pts increment = n * 90
*/
flic->frame_pts_inc = speed * 90;
} else {
av_log(s, AV_LOG_INFO, "Invalid or unsupported magic chunk in file\n");
return AVERROR_INVALIDDATA;
}
if (flic->frame_pts_inc == 0)
flic->frame_pts_inc = FLIC_DEFAULT_PTS_INC;
return 0;
}
| 18,807 |
FFmpeg | e87190f5d20d380608f792ceb14d0def1d80e24b | 0 | static void show_format(WriterContext *w, AVFormatContext *fmt_ctx)
{
char val_str[128];
int64_t size = fmt_ctx->pb ? avio_size(fmt_ctx->pb) : -1;
writer_print_section_header(w, SECTION_ID_FORMAT);
print_str("filename", fmt_ctx->filename);
print_int("nb_streams", fmt_ctx->nb_streams);
print_int("nb_programs", fmt_ctx->nb_programs);
print_str("format_name", fmt_ctx->iformat->name);
if (!do_bitexact) {
if (fmt_ctx->iformat->long_name) print_str ("format_long_name", fmt_ctx->iformat->long_name);
else print_str_opt("format_long_name", "unknown");
}
print_time("start_time", fmt_ctx->start_time, &AV_TIME_BASE_Q);
print_time("duration", fmt_ctx->duration, &AV_TIME_BASE_Q);
if (size >= 0) print_val ("size", size, unit_byte_str);
else print_str_opt("size", "N/A");
if (fmt_ctx->bit_rate > 0) print_val ("bit_rate", fmt_ctx->bit_rate, unit_bit_per_second_str);
else print_str_opt("bit_rate", "N/A");
print_int("probe_score", av_format_get_probe_score(fmt_ctx));
show_tags(w, fmt_ctx->metadata, SECTION_ID_FORMAT_TAGS);
writer_print_section_footer(w);
fflush(stdout);
}
| 18,808 |
FFmpeg | 150ddbc1482c65b9aac803f011d7fcd734f776ec | 0 | static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
{
int i, ret;
for (i = 0; i < nb_ostreams; i++) {
OutputStream *ost = &ost_table[i];
AVCodecContext *enc = ost->st->codec;
AVFormatContext *os = output_files[ost->file_index].ctx;
if (!ost->encoding_needed)
continue;
if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
continue;
if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
continue;
for(;;) {
AVPacket pkt;
int fifo_bytes;
av_init_packet(&pkt);
pkt.stream_index= ost->index;
switch (ost->st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
fifo_bytes = av_fifo_size(ost->fifo);
ret = 0;
/* encode any samples remaining in fifo */
if (fifo_bytes > 0) {
int osize = av_get_bytes_per_sample(enc->sample_fmt);
int fs_tmp = enc->frame_size;
av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
enc->frame_size = fifo_bytes / (osize * enc->channels);
} else { /* pad */
int frame_bytes = enc->frame_size*osize*enc->channels;
if (allocated_audio_buf_size < frame_bytes)
exit_program(1);
generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
}
ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
ost->st->time_base.num, enc->sample_rate);
enc->frame_size = fs_tmp;
}
if (ret <= 0) {
ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
}
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
exit_program(1);
}
audio_size += ret;
pkt.flags |= AV_PKT_FLAG_KEY;
break;
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
exit_program(1);
}
video_size += ret;
if(enc->coded_frame && enc->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
if (ost->logfile && enc->stats_out) {
fprintf(ost->logfile, "%s", enc->stats_out);
}
break;
default:
ret=-1;
}
if (ret <= 0)
break;
pkt.data = bit_buffer;
pkt.size = ret;
if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
}
}
}
| 18,809 |
FFmpeg | 5dc47a2bd52e375ed742c45d08356b45098f458d | 0 | static int mkv_write_chapters(AVFormatContext *s)
{
MatroskaMuxContext *mkv = s->priv_data;
AVIOContext *pb = s->pb;
ebml_master chapters, editionentry;
AVRational scale = {1, 1E9};
int i, ret;
if (!s->nb_chapters || mkv->wrote_chapters)
return 0;
ret = mkv_add_seekhead_entry(mkv->main_seekhead, MATROSKA_ID_CHAPTERS, avio_tell(pb));
if (ret < 0) return ret;
chapters = start_ebml_master(pb, MATROSKA_ID_CHAPTERS , 0);
editionentry = start_ebml_master(pb, MATROSKA_ID_EDITIONENTRY, 0);
put_ebml_uint(pb, MATROSKA_ID_EDITIONFLAGDEFAULT, 1);
put_ebml_uint(pb, MATROSKA_ID_EDITIONFLAGHIDDEN , 0);
for (i = 0; i < s->nb_chapters; i++) {
ebml_master chapteratom, chapterdisplay;
AVChapter *c = s->chapters[i];
AVDictionaryEntry *t = NULL;
chapteratom = start_ebml_master(pb, MATROSKA_ID_CHAPTERATOM, 0);
put_ebml_uint(pb, MATROSKA_ID_CHAPTERUID, c->id);
put_ebml_uint(pb, MATROSKA_ID_CHAPTERTIMESTART,
av_rescale_q(c->start, c->time_base, scale));
put_ebml_uint(pb, MATROSKA_ID_CHAPTERTIMEEND,
av_rescale_q(c->end, c->time_base, scale));
put_ebml_uint(pb, MATROSKA_ID_CHAPTERFLAGHIDDEN , 0);
put_ebml_uint(pb, MATROSKA_ID_CHAPTERFLAGENABLED, 1);
if ((t = av_dict_get(c->metadata, "title", NULL, 0))) {
chapterdisplay = start_ebml_master(pb, MATROSKA_ID_CHAPTERDISPLAY, 0);
put_ebml_string(pb, MATROSKA_ID_CHAPSTRING, t->value);
put_ebml_string(pb, MATROSKA_ID_CHAPLANG , "und");
end_ebml_master(pb, chapterdisplay);
}
end_ebml_master(pb, chapteratom);
}
end_ebml_master(pb, editionentry);
end_ebml_master(pb, chapters);
mkv->wrote_chapters = 1;
return 0;
}
| 18,810 |
qemu | cd6a9bb6e977864b1b7ec21b983fa0678b4b82e9 | 1 | static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
{
uint64_t epnshift;
/* Page size according to the SLB, which we use to generate the
* EPN for hash table lookup.. When we implement more recent MMU
* extensions this might be different from the actual page size
* encoded in the PTE */
if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
epnshift = TARGET_PAGE_BITS;
} else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
epnshift = TARGET_PAGE_BITS_64K;
} else {
epnshift = TARGET_PAGE_BITS_16M;
}
return epnshift;
}
| 18,811 |
FFmpeg | 9a54c6f243412f62bae498ddcac337cb18ae6290 | 1 | static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
int jobnr, int threadnr, int is_vp7)
{
VP8Context *s = avctx->priv_data;
VP8ThreadData *td = &s->thread_data[threadnr];
int mb_x, mb_y = td->thread_mb_pos >> 16, num_jobs = s->num_jobs;
AVFrame *curframe = s->curframe->tf.f;
VP8Macroblock *mb;
VP8ThreadData *prev_td, *next_td;
uint8_t *dst[3] = {
curframe->data[0] + 16 * mb_y * s->linesize,
curframe->data[1] + 8 * mb_y * s->uvlinesize,
curframe->data[2] + 8 * mb_y * s->uvlinesize
};
if (s->mb_layout == 1)
mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
else
mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
if (mb_y == 0)
prev_td = td;
else
prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
if (mb_y == s->mb_height - 1)
next_td = td;
else
next_td = &s->thread_data[(jobnr + 1) % num_jobs];
for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
VP8FilterStrength *f = &td->filter_strength[mb_x];
if (prev_td != td)
check_thread_pos(td, prev_td,
(mb_x + 1) + (s->mb_width + 3), mb_y - 1);
if (next_td != td)
if (next_td != &s->thread_data[0])
check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
if (num_jobs == 1) {
if (s->filter.simple)
backup_mb_border(s->top_border[mb_x + 1], dst[0],
NULL, NULL, s->linesize, 0, 1);
else
backup_mb_border(s->top_border[mb_x + 1], dst[0],
dst[1], dst[2], s->linesize, s->uvlinesize, 0);
}
if (s->filter.simple)
filter_mb_simple(s, dst[0], f, mb_x, mb_y);
else
filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
dst[0] += 16;
dst[1] += 8;
dst[2] += 8;
update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
}
}
| 18,813 |
FFmpeg | e5c01ccdf5a9a330d4c51a9b9ea721fd8f1fb70b | 0 | static int decode_frame(FLACContext *s)
{
int i, ret;
GetBitContext *gb = &s->gb;
FLACFrameInfo fi;
if ((ret = ff_flac_decode_frame_header(s->avctx, gb, &fi, 0)) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "invalid frame header\n");
return ret;
}
if (s->channels && fi.channels != s->channels && s->got_streaminfo) {
s->channels = s->avctx->channels = fi.channels;
ff_flac_set_channel_layout(s->avctx);
ret = allocate_buffers(s);
if (ret < 0)
return ret;
}
s->channels = s->avctx->channels = fi.channels;
if (!s->avctx->channel_layout)
ff_flac_set_channel_layout(s->avctx);
s->ch_mode = fi.ch_mode;
if (!s->bps && !fi.bps) {
av_log(s->avctx, AV_LOG_ERROR, "bps not found in STREAMINFO or frame header\n");
return AVERROR_INVALIDDATA;
}
if (!fi.bps) {
fi.bps = s->bps;
} else if (s->bps && fi.bps != s->bps) {
av_log(s->avctx, AV_LOG_ERROR, "switching bps mid-stream is not "
"supported\n");
return AVERROR_INVALIDDATA;
}
if (!s->bps) {
s->bps = s->avctx->bits_per_raw_sample = fi.bps;
flac_set_bps(s);
}
if (!s->max_blocksize)
s->max_blocksize = FLAC_MAX_BLOCKSIZE;
if (fi.blocksize > s->max_blocksize) {
av_log(s->avctx, AV_LOG_ERROR, "blocksize %d > %d\n", fi.blocksize,
s->max_blocksize);
return AVERROR_INVALIDDATA;
}
s->blocksize = fi.blocksize;
if (!s->samplerate && !fi.samplerate) {
av_log(s->avctx, AV_LOG_ERROR, "sample rate not found in STREAMINFO"
" or frame header\n");
return AVERROR_INVALIDDATA;
}
if (fi.samplerate == 0)
fi.samplerate = s->samplerate;
s->samplerate = s->avctx->sample_rate = fi.samplerate;
if (!s->got_streaminfo) {
ret = allocate_buffers(s);
if (ret < 0)
return ret;
ff_flacdsp_init(&s->dsp, s->avctx->sample_fmt, s->channels, s->bps);
s->got_streaminfo = 1;
dump_headers(s->avctx, (FLACStreaminfo *)s);
}
// dump_headers(s->avctx, (FLACStreaminfo *)s);
/* subframes */
for (i = 0; i < s->channels; i++) {
if ((ret = decode_subframe(s, i)) < 0)
return ret;
}
align_get_bits(gb);
/* frame footer */
skip_bits(gb, 16); /* data crc */
return 0;
}
| 18,815 |
qemu | aec4b054ea36c53c8b887da99f20010133b84378 | 1 | static void invalid_array_comma(void)
{
QObject *obj = qobject_from_json("[32,}", NULL);
g_assert(obj == NULL);
}
| 18,816 |
qemu | c508277335e3b6b20cf18e6ea3a35c1fa835c64a | 1 | static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
{
uint32_t events;
VMW_CBPRN("Clearing events: 0x%x", val);
events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val;
VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events);
}
| 18,817 |
qemu | 9d5614d582d23ec96b167583557bf3f25f64f050 | 1 | static void grlib_gptimer_enable(GPTimer *timer)
{
assert(timer != NULL);
ptimer_stop(timer->ptimer);
if (!(timer->config & GPTIMER_ENABLE)) {
/* Timer disabled */
trace_grlib_gptimer_disabled(timer->id, timer->config);
return;
}
/* ptimer is triggered when the counter reach 0 but GPTimer is triggered at
underflow. Set count + 1 to simulate the GPTimer behavior. */
trace_grlib_gptimer_enable(timer->id, timer->counter + 1);
ptimer_set_count(timer->ptimer, timer->counter + 1);
ptimer_run(timer->ptimer, 1);
}
| 18,818 |
FFmpeg | 46191a2da16f751e53d93646ae1388d421d12bee | 1 | static int mov_read_mac_string(MOVContext *c, AVIOContext *pb, int len,
char *dst, int dstlen)
{
char *p = dst;
char *end = dst+dstlen-1;
int i;
for (i = 0; i < len; i++) {
uint8_t t, c = avio_r8(pb);
if (c < 0x80 && p < end)
*p++ = c;
else
PUT_UTF8(mac_to_unicode[c-0x80], t, if (p < end) *p++ = t;);
}
*p = 0;
return p - dst;
}
| 18,821 |
FFmpeg | 428098165de4c3edfe42c1b7f00627d287015863 | 1 | altivec_yuv2packedX (SwsContext *c,
int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
uint8_t *dest, int dstW, int dstY)
{
int i,j;
vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
vector signed short R0,G0,B0,R1,G1,B1;
vector unsigned char R,G,B;
vector unsigned char *out,*nout;
vector signed short RND = vec_splat_s16(1<<3);
vector unsigned short SCL = vec_splat_u16(4);
unsigned long scratch[16] __attribute__ ((aligned (16)));
vector signed short *YCoeffs, *CCoeffs;
YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
out = (vector unsigned char *)dest;
for(i=0; i<dstW; i+=16){
Y0 = RND;
Y1 = RND;
/* extract 16 coeffs from lumSrc */
for(j=0; j<lumFilterSize; j++) {
X0 = vec_ld (0, &lumSrc[j][i]);
X1 = vec_ld (16, &lumSrc[j][i]);
Y0 = vec_mradds (X0, YCoeffs[j], Y0);
Y1 = vec_mradds (X1, YCoeffs[j], Y1);
}
U = RND;
V = RND;
/* extract 8 coeffs from U,V */
for(j=0; j<chrFilterSize; j++) {
X = vec_ld (0, &chrSrc[j][i/2]);
U = vec_mradds (X, CCoeffs[j], U);
X = vec_ld (0, &chrSrc[j][i/2+2048]);
V = vec_mradds (X, CCoeffs[j], V);
}
/* scale and clip signals */
Y0 = vec_sra (Y0, SCL);
Y1 = vec_sra (Y1, SCL);
U = vec_sra (U, SCL);
V = vec_sra (V, SCL);
Y0 = vec_clip_s16 (Y0);
Y1 = vec_clip_s16 (Y1);
U = vec_clip_s16 (U);
V = vec_clip_s16 (V);
/* now we have
Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
*/
U0 = vec_mergeh (U,U);
V0 = vec_mergeh (V,V);
U1 = vec_mergel (U,U);
V1 = vec_mergel (V,V);
cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
R = vec_packclp (R0,R1);
G = vec_packclp (G0,G1);
B = vec_packclp (B0,B1);
switch(c->dstFormat) {
case PIX_FMT_ABGR: out_abgr (R,G,B,out); break;
case PIX_FMT_BGRA: out_bgra (R,G,B,out); break;
case PIX_FMT_RGBA: out_rgba (R,G,B,out); break;
case PIX_FMT_ARGB: out_argb (R,G,B,out); break;
case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break;
case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break;
default:
{
/* If this is reached, the caller should have called yuv2packedXinC
instead. */
static int printed_error_message;
if(!printed_error_message) {
av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
sws_format_name(c->dstFormat));
printed_error_message=1;
}
return;
}
}
}
if (i < dstW) {
i -= 16;
Y0 = RND;
Y1 = RND;
/* extract 16 coeffs from lumSrc */
for(j=0; j<lumFilterSize; j++) {
X0 = vec_ld (0, &lumSrc[j][i]);
X1 = vec_ld (16, &lumSrc[j][i]);
Y0 = vec_mradds (X0, YCoeffs[j], Y0);
Y1 = vec_mradds (X1, YCoeffs[j], Y1);
}
U = RND;
V = RND;
/* extract 8 coeffs from U,V */
for(j=0; j<chrFilterSize; j++) {
X = vec_ld (0, &chrSrc[j][i/2]);
U = vec_mradds (X, CCoeffs[j], U);
X = vec_ld (0, &chrSrc[j][i/2+2048]);
V = vec_mradds (X, CCoeffs[j], V);
}
/* scale and clip signals */
Y0 = vec_sra (Y0, SCL);
Y1 = vec_sra (Y1, SCL);
U = vec_sra (U, SCL);
V = vec_sra (V, SCL);
Y0 = vec_clip_s16 (Y0);
Y1 = vec_clip_s16 (Y1);
U = vec_clip_s16 (U);
V = vec_clip_s16 (V);
/* now we have
Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
*/
U0 = vec_mergeh (U,U);
V0 = vec_mergeh (V,V);
U1 = vec_mergel (U,U);
V1 = vec_mergel (V,V);
cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
R = vec_packclp (R0,R1);
G = vec_packclp (G0,G1);
B = vec_packclp (B0,B1);
nout = (vector unsigned char *)scratch;
switch(c->dstFormat) {
case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break;
case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break;
case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break;
case PIX_FMT_ARGB: out_argb (R,G,B,nout); break;
case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break;
case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break;
default:
/* Unreachable, I think. */
av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
sws_format_name(c->dstFormat));
return;
}
memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
}
}
| 18,822 |
qemu | a2f31f180499593b5edb8ac5ab8ac1b92f0abcd4 | 1 | int unix_listen_opts(QemuOpts *opts, Error **errp)
{
struct sockaddr_un un;
const char *path = qemu_opt_get(opts, "path");
int sock, fd;
sock = qemu_socket(PF_UNIX, SOCK_STREAM, 0);
if (sock < 0) {
error_setg_errno(errp, errno, "Failed to create Unix socket");
return -1;
}
memset(&un, 0, sizeof(un));
un.sun_family = AF_UNIX;
if (path && strlen(path)) {
snprintf(un.sun_path, sizeof(un.sun_path), "%s", path);
} else {
const char *tmpdir = getenv("TMPDIR");
tmpdir = tmpdir ? tmpdir : "/tmp";
if (snprintf(un.sun_path, sizeof(un.sun_path), "%s/qemu-socket-XXXXXX",
tmpdir) >= sizeof(un.sun_path)) {
error_setg_errno(errp, errno,
"TMPDIR environment variable (%s) too large", tmpdir);
goto err;
}
/*
* This dummy fd usage silences the mktemp() unsecure warning.
* Using mkstemp() doesn't make things more secure here
* though. bind() complains about existing files, so we have
* to unlink first and thus re-open the race window. The
* worst case possible is bind() failing, i.e. a DoS attack.
*/
fd = mkstemp(un.sun_path);
if (fd < 0) {
error_setg_errno(errp, errno,
"Failed to make a temporary socket name in %s", tmpdir);
goto err;
}
close(fd);
qemu_opt_set(opts, "path", un.sun_path, &error_abort);
}
if ((access(un.sun_path, F_OK) == 0) &&
unlink(un.sun_path) < 0) {
error_setg_errno(errp, errno,
"Failed to unlink socket %s", un.sun_path);
goto err;
}
if (bind(sock, (struct sockaddr*) &un, sizeof(un)) < 0) {
error_setg_errno(errp, errno, "Failed to bind socket to %s", un.sun_path);
goto err;
}
if (listen(sock, 1) < 0) {
error_setg_errno(errp, errno, "Failed to listen on socket");
goto err;
}
return sock;
err:
closesocket(sock);
return -1;
}
| 18,823 |
FFmpeg | 3ab9a2a5577d445252724af4067d2a7c8a378efa | 1 | static void rv40_h_strong_loop_filter(uint8_t *src, const int stride,
const int alpha, const int lims,
const int dmode, const int chroma)
{
rv40_strong_loop_filter(src, stride, 1, alpha, lims, dmode, chroma);
}
| 18,824 |
FFmpeg | e6aed5f404a0983b2971c90e9022f6964a75de0b | 1 | static void copy_frame(J2kEncoderContext *s)
{
int tileno, compno, i, y, x;
uint8_t *line;
for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
J2kTile *tile = s->tile + tileno;
if (s->planar){
for (compno = 0; compno < s->ncomponents; compno++){
J2kComponent *comp = tile->comp + compno;
int *dst = comp->data;
line = s->picture->data[compno]
+ comp->coord[1][0] * s->picture->linesize[compno]
+ comp->coord[0][0];
for (y = comp->coord[1][0]; y < comp->coord[1][1]; y++){
uint8_t *ptr = line;
for (x = comp->coord[0][0]; x < comp->coord[0][1]; x++)
*dst++ = *ptr++ - (1 << 7);
line += s->picture->linesize[compno];
}
}
} else{
line = s->picture->data[0] + tile->comp[0].coord[1][0] * s->picture->linesize[0]
+ tile->comp[0].coord[0][0] * s->ncomponents;
i = 0;
for (y = tile->comp[0].coord[1][0]; y < tile->comp[0].coord[1][1]; y++){
uint8_t *ptr = line;
for (x = tile->comp[0].coord[0][0]; x < tile->comp[0].coord[0][1]; x++, i++){
for (compno = 0; compno < s->ncomponents; compno++){
tile->comp[compno].data[i] = *ptr++ - (1 << 7);
}
}
line += s->picture->linesize[0];
}
}
}
}
| 18,825 |
qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 | 1 | static void migrate_fd_cleanup(void *opaque)
{
MigrationState *s = opaque;
qemu_bh_delete(s->cleanup_bh);
s->cleanup_bh = NULL;
if (s->file) {
trace_migrate_fd_cleanup();
qemu_mutex_unlock_iothread();
qemu_thread_join(&s->thread);
qemu_mutex_lock_iothread();
qemu_fclose(s->file);
s->file = NULL;
}
assert(s->state != MIG_STATE_ACTIVE);
if (s->state != MIG_STATE_COMPLETED) {
qemu_savevm_state_cancel();
if (s->state == MIG_STATE_CANCELLING) {
migrate_set_state(s, MIG_STATE_CANCELLING, MIG_STATE_CANCELLED);
}
}
notifier_list_notify(&migration_state_notifiers, s);
}
| 18,826 |
qemu | f53c398aa603cea135ee58fd15249aeff7b9c7ea | 1 | static void uhci_async_cancel_device(UHCIState *s, USBDevice *dev)
{
UHCIAsync *curr, *n;
QTAILQ_FOREACH_SAFE(curr, &s->async_pending, next, n) {
if (curr->packet.owner == NULL ||
curr->packet.owner->dev != dev) {
continue;
}
uhci_async_unlink(s, curr);
uhci_async_cancel(s, curr);
}
}
| 18,827 |
qemu | 0b5538c300a56c3cfb33022840fe0b4968147e7a | 1 | static TraceEvent* find_trace_event_by_name(const char *tname)
{
unsigned int i;
if (!tname) {
return NULL;
}
for (i = 0; i < NR_TRACE_EVENTS; i++) {
if (!strcmp(trace_list[i].tp_name, tname)) {
return &trace_list[i];
}
}
return NULL; /* indicates end of list reached without a match */
}
| 18,829 |
qemu | b0f74c87a1dbd6b0c5e4de7f1c5cb40197e3fbe9 | 1 | i2c_slave *twl92230_init(i2c_bus *bus, qemu_irq irq)
{
struct menelaus_s *s = (struct menelaus_s *)
i2c_slave_init(bus, 0, sizeof(struct menelaus_s));
s->i2c.event = menelaus_event;
s->i2c.recv = menelaus_rx;
s->i2c.send = menelaus_tx;
s->irq = irq;
s->rtc.hz = qemu_new_timer(rt_clock, menelaus_rtc_hz, s);
s->in = qemu_allocate_irqs(menelaus_gpio_set, s, 3);
s->pwrbtn = qemu_allocate_irqs(menelaus_pwrbtn_set, s, 1)[0];
menelaus_reset(&s->i2c);
register_savevm("menelaus", -1, 0, menelaus_save, menelaus_load, s);
return &s->i2c;
}
| 18,831 |
FFmpeg | aaec4e03e91f3f301d5720429a5da99b44e7e1bb | 0 | static int av_seek_frame_generic(AVFormatContext *s,
int stream_index, int64_t timestamp, int flags)
{
int index;
AVStream *st;
AVIndexEntry *ie;
st = s->streams[stream_index];
index = av_index_search_timestamp(st, timestamp, flags);
if(index < 0 || index==st->nb_index_entries-1){
int i;
AVPacket pkt;
if(st->nb_index_entries){
assert(st->index_entries);
ie= &st->index_entries[st->nb_index_entries-1];
url_fseek(s->pb, ie->pos, SEEK_SET);
av_update_cur_dts(s, st, ie->timestamp);
}else
url_fseek(s->pb, 0, SEEK_SET);
for(i=0;; i++) {
int ret = av_read_frame(s, &pkt);
if(ret<0)
break;
av_free_packet(&pkt);
if(stream_index == pkt.stream_index){
if((pkt.flags & PKT_FLAG_KEY) && pkt.dts > timestamp)
break;
}
}
index = av_index_search_timestamp(st, timestamp, flags);
}
if (index < 0)
return -1;
av_read_frame_flush(s);
if (s->iformat->read_seek){
if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
return 0;
}
ie = &st->index_entries[index];
url_fseek(s->pb, ie->pos, SEEK_SET);
av_update_cur_dts(s, st, ie->timestamp);
return 0;
}
| 18,832 |
FFmpeg | c1b24ca762cba20236190cd7b24e84a9ad788bcb | 1 | static int ogg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct ogg *ogg;
struct ogg_stream *os;
int idx, ret;
int pstart, psize;
int64_t fpos, pts, dts;
if (s->io_repositioned) {
ogg_reset(s);
s->io_repositioned = 0;
//Get an ogg packet
retry:
do {
ret = ogg_packet(s, &idx, &pstart, &psize, &fpos);
if (ret < 0)
return ret;
} while (idx < 0 || !s->streams[idx]);
ogg = s->priv_data;
os = ogg->streams + idx;
// pflags might not be set until after this
pts = ogg_calc_pts(s, idx, &dts);
ogg_validate_keyframe(s, idx, pstart, psize);
if (os->keyframe_seek && !(os->pflags & AV_PKT_FLAG_KEY))
goto retry;
os->keyframe_seek = 0;
//Alloc a pkt
ret = av_new_packet(pkt, psize);
if (ret < 0)
return ret;
pkt->stream_index = idx;
memcpy(pkt->data, os->buf + pstart, psize);
pkt->pts = pts;
pkt->dts = dts;
pkt->flags = os->pflags;
pkt->duration = os->pduration;
pkt->pos = fpos;
if (os->end_trimming) {
uint8_t *side_data = av_packet_new_side_data(pkt,
AV_PKT_DATA_SKIP_SAMPLES,
10);
AV_WL32(side_data + 4, os->end_trimming);
os->end_trimming = 0;
if (os->new_metadata) {
uint8_t *side_data = av_packet_new_side_data(pkt,
AV_PKT_DATA_METADATA_UPDATE,
os->new_metadata_size);
memcpy(side_data, os->new_metadata, os->new_metadata_size);
av_freep(&os->new_metadata);
os->new_metadata_size = 0;
return psize;
| 18,835 |
qemu | df46189412567906312684eb72ba87c6a86a4cdb | 1 | static void slirp_cleanup(void)
{
WSACleanup();
}
| 18,836 |
FFmpeg | 27f6da292118850ca7900de64d06b56e0ebb5070 | 1 | static int read_sl_header(PESContext *pes, SLConfigDescr *sl,
const uint8_t *buf, int buf_size)
{
GetBitContext gb;
int au_start_flag = 0, au_end_flag = 0, ocr_flag = 0, idle_flag = 0;
int padding_flag = 0, padding_bits = 0, inst_bitrate_flag = 0;
int dts_flag = -1, cts_flag = -1;
int64_t dts = AV_NOPTS_VALUE, cts = AV_NOPTS_VALUE;
init_get_bits(&gb, buf, buf_size * 8);
if (sl->use_au_start)
au_start_flag = get_bits1(&gb);
if (sl->use_au_end)
au_end_flag = get_bits1(&gb);
if (!sl->use_au_start && !sl->use_au_end)
au_start_flag = au_end_flag = 1;
if (sl->ocr_len > 0)
ocr_flag = get_bits1(&gb);
if (sl->use_idle)
idle_flag = get_bits1(&gb);
if (sl->use_padding)
padding_flag = get_bits1(&gb);
if (padding_flag)
padding_bits = get_bits(&gb, 3);
if (!idle_flag && (!padding_flag || padding_bits != 0)) {
if (sl->packet_seq_num_len)
skip_bits_long(&gb, sl->packet_seq_num_len);
if (sl->degr_prior_len)
if (get_bits1(&gb))
skip_bits(&gb, sl->degr_prior_len);
if (ocr_flag)
skip_bits_long(&gb, sl->ocr_len);
if (au_start_flag) {
if (sl->use_rand_acc_pt)
get_bits1(&gb);
if (sl->au_seq_num_len > 0)
skip_bits_long(&gb, sl->au_seq_num_len);
if (sl->use_timestamps) {
dts_flag = get_bits1(&gb);
cts_flag = get_bits1(&gb);
}
}
if (sl->inst_bitrate_len)
inst_bitrate_flag = get_bits1(&gb);
if (dts_flag == 1)
dts = get_ts64(&gb, sl->timestamp_len);
if (cts_flag == 1)
cts = get_ts64(&gb, sl->timestamp_len);
if (sl->au_len > 0)
skip_bits_long(&gb, sl->au_len);
if (inst_bitrate_flag)
skip_bits_long(&gb, sl->inst_bitrate_len);
}
if (dts != AV_NOPTS_VALUE)
pes->dts = dts;
if (cts != AV_NOPTS_VALUE)
pes->pts = cts;
if (sl->timestamp_len && sl->timestamp_res)
avpriv_set_pts_info(pes->st, sl->timestamp_len, 1, sl->timestamp_res);
return (get_bits_count(&gb) + 7) >> 3;
}
| 18,837 |
FFmpeg | 1f50baa2b2da7fdbfccf0662883f38a763ff6619 | 1 | static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
{
HEVCLocalContext *lc = s->HEVClc;
GetBitContext *gb = &lc->gb;
int ctb_addr_ts, ret;
*gb = nal->gb;
s->nal_unit_type = nal->type;
s->temporal_id = nal->temporal_id;
switch (s->nal_unit_type) {
case HEVC_NAL_VPS:
ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_SPS:
ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
s->apply_defdispwin);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_PPS:
ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_SEI_PREFIX:
case HEVC_NAL_SEI_SUFFIX:
ret = ff_hevc_decode_nal_sei(s);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_TRAIL_R:
case HEVC_NAL_TRAIL_N:
case HEVC_NAL_TSA_N:
case HEVC_NAL_TSA_R:
case HEVC_NAL_STSA_N:
case HEVC_NAL_STSA_R:
case HEVC_NAL_BLA_W_LP:
case HEVC_NAL_BLA_W_RADL:
case HEVC_NAL_BLA_N_LP:
case HEVC_NAL_IDR_W_RADL:
case HEVC_NAL_IDR_N_LP:
case HEVC_NAL_CRA_NUT:
case HEVC_NAL_RADL_N:
case HEVC_NAL_RADL_R:
case HEVC_NAL_RASL_N:
case HEVC_NAL_RASL_R:
ret = hls_slice_header(s);
if (ret < 0)
return ret;
if (s->max_ra == INT_MAX) {
if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
s->max_ra = s->poc;
} else {
if (IS_IDR(s))
s->max_ra = INT_MIN;
}
}
if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
s->poc <= s->max_ra) {
s->is_decoded = 0;
break;
} else {
if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
s->max_ra = INT_MIN;
}
if (s->sh.first_slice_in_pic_flag) {
ret = hevc_frame_start(s);
if (ret < 0)
return ret;
} else if (!s->ref) {
av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
goto fail;
}
if (s->nal_unit_type != s->first_nal_type) {
av_log(s->avctx, AV_LOG_ERROR,
"Non-matching NAL types of the VCL NALUs: %d %d\n",
s->first_nal_type, s->nal_unit_type);
return AVERROR_INVALIDDATA;
}
if (!s->sh.dependent_slice_segment_flag &&
s->sh.slice_type != HEVC_SLICE_I) {
ret = ff_hevc_slice_rpl(s);
if (ret < 0) {
av_log(s->avctx, AV_LOG_WARNING,
"Error constructing the reference lists for the current slice.\n");
goto fail;
}
}
if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
if (ret < 0)
goto fail;
}
if (s->avctx->hwaccel) {
ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
if (ret < 0)
goto fail;
} else {
if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
ctb_addr_ts = hls_slice_data_wpp(s, nal);
else
ctb_addr_ts = hls_slice_data(s);
if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
s->is_decoded = 1;
}
if (ctb_addr_ts < 0) {
ret = ctb_addr_ts;
goto fail;
}
}
break;
case HEVC_NAL_EOS_NUT:
case HEVC_NAL_EOB_NUT:
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
break;
case HEVC_NAL_AUD:
case HEVC_NAL_FD_NUT:
break;
default:
av_log(s->avctx, AV_LOG_INFO,
"Skipping NAL unit %d\n", s->nal_unit_type);
}
return 0;
fail:
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return ret;
return 0;
}
| 18,839 |
qemu | c5a49c63fa26e8825ad101dfe86339ae4c216539 | 1 | static bool gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
{
if (dc->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_update_ccount(cpu_env);
tcg_gen_mov_i32(d, cpu_SR[sr]);
if (dc->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
return true;
}
return false;
}
| 18,840 |
FFmpeg | 86020073dbb9a3a9d1fbb76345b2ca29ba1f13d2 | 1 | static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
ADPCMDecodeContext *c = avctx->priv_data;
ADPCMChannelStatus *cs;
int n, m, channel, i;
short *samples;
const uint8_t *src;
int st; /* stereo */
int count1, count2;
int nb_samples, coded_samples, ret;
nb_samples = get_nb_samples(avctx, buf, buf_size, &coded_samples);
if (nb_samples <= 0) {
av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
return AVERROR_INVALIDDATA;
}
/* get output buffer */
c->frame.nb_samples = nb_samples;
if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
samples = (short *)c->frame.data[0];
/* use coded_samples when applicable */
/* it is always <= nb_samples, so the output buffer will be large enough */
if (coded_samples) {
if (coded_samples != nb_samples)
av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
c->frame.nb_samples = nb_samples = coded_samples;
}
src = buf;
st = avctx->channels == 2 ? 1 : 0;
switch(avctx->codec->id) {
case CODEC_ID_ADPCM_IMA_QT:
/* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
Channel data is interleaved per-chunk. */
for (channel = 0; channel < avctx->channels; channel++) {
int16_t predictor;
int step_index;
cs = &(c->status[channel]);
/* (pppppp) (piiiiiii) */
/* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
predictor = AV_RB16(src);
step_index = predictor & 0x7F;
predictor &= 0xFF80;
src += 2;
if (cs->step_index == step_index) {
int diff = (int)predictor - cs->predictor;
if (diff < 0)
diff = - diff;
if (diff > 0x7f)
goto update;
} else {
update:
cs->step_index = step_index;
cs->predictor = predictor;
}
if (cs->step_index > 88){
av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
cs->step_index = 88;
}
samples = (short *)c->frame.data[0] + channel;
for (m = 0; m < 32; m++) {
*samples = adpcm_ima_qt_expand_nibble(cs, src[0] & 0x0F, 3);
samples += avctx->channels;
*samples = adpcm_ima_qt_expand_nibble(cs, src[0] >> 4 , 3);
samples += avctx->channels;
src ++;
}
}
break;
case CODEC_ID_ADPCM_IMA_WAV:
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
for(i=0; i<avctx->channels; i++){
cs = &(c->status[i]);
cs->predictor = *samples++ = (int16_t)bytestream_get_le16(&src);
cs->step_index = *src++;
if (cs->step_index > 88){
av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n", cs->step_index);
cs->step_index = 88;
}
if (*src++) av_log(avctx, AV_LOG_ERROR, "unused byte should be null but is %d!!\n", src[-1]); /* unused */
}
for (n = (nb_samples - 1) / 8; n > 0; n--) {
for (i = 0; i < avctx->channels; i++) {
cs = &c->status[i];
for (m = 0; m < 4; m++) {
uint8_t v = *src++;
*samples = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
samples += avctx->channels;
*samples = adpcm_ima_expand_nibble(cs, v >> 4 , 3);
samples += avctx->channels;
}
samples -= 8 * avctx->channels - 1;
}
samples += 7 * avctx->channels;
}
break;
case CODEC_ID_ADPCM_4XM:
for (i = 0; i < avctx->channels; i++)
c->status[i].predictor= (int16_t)bytestream_get_le16(&src);
for (i = 0; i < avctx->channels; i++) {
c->status[i].step_index= (int16_t)bytestream_get_le16(&src);
c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
}
for (i = 0; i < avctx->channels; i++) {
samples = (short *)c->frame.data[0] + i;
cs = &c->status[i];
for (n = nb_samples >> 1; n > 0; n--, src++) {
uint8_t v = *src;
*samples = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
samples += avctx->channels;
*samples = adpcm_ima_expand_nibble(cs, v >> 4 , 4);
samples += avctx->channels;
}
}
break;
case CODEC_ID_ADPCM_MS:
{
int block_predictor;
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
block_predictor = av_clip(*src++, 0, 6);
c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
if (st) {
block_predictor = av_clip(*src++, 0, 6);
c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
}
c->status[0].idelta = (int16_t)bytestream_get_le16(&src);
if (st){
c->status[1].idelta = (int16_t)bytestream_get_le16(&src);
}
c->status[0].sample1 = bytestream_get_le16(&src);
if (st) c->status[1].sample1 = bytestream_get_le16(&src);
c->status[0].sample2 = bytestream_get_le16(&src);
if (st) c->status[1].sample2 = bytestream_get_le16(&src);
*samples++ = c->status[0].sample2;
if (st) *samples++ = c->status[1].sample2;
*samples++ = c->status[0].sample1;
if (st) *samples++ = c->status[1].sample1;
for(n = (nb_samples - 2) >> (1 - st); n > 0; n--, src++) {
*samples++ = adpcm_ms_expand_nibble(&c->status[0 ], src[0] >> 4 );
*samples++ = adpcm_ms_expand_nibble(&c->status[st], src[0] & 0x0F);
}
break;
}
case CODEC_ID_ADPCM_IMA_DK4:
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
for (channel = 0; channel < avctx->channels; channel++) {
cs = &c->status[channel];
cs->predictor = (int16_t)bytestream_get_le16(&src);
cs->step_index = av_clip(*src++, 0, 88);
src++;
*samples++ = cs->predictor;
}
for (n = nb_samples >> (1 - st); n > 0; n--, src++) {
uint8_t v = *src;
*samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
}
break;
case CODEC_ID_ADPCM_IMA_DK3:
{
unsigned char last_byte = 0;
unsigned char nibble;
int decode_top_nibble_next = 0;
int end_of_packet = 0;
int diff_channel;
if (avctx->block_align != 0 && buf_size > avctx->block_align)
buf_size = avctx->block_align;
c->status[0].predictor = (int16_t)AV_RL16(src + 10);
c->status[1].predictor = (int16_t)AV_RL16(src + 12);
c->status[0].step_index = av_clip(src[14], 0, 88);
c->status[1].step_index = av_clip(src[15], 0, 88);
/* sign extend the predictors */
src += 16;
diff_channel = c->status[1].predictor;
/* the DK3_GET_NEXT_NIBBLE macro issues the break statement when
* the buffer is consumed */
while (1) {
/* for this algorithm, c->status[0] is the sum channel and
* c->status[1] is the diff channel */
/* process the first predictor of the sum channel */
DK3_GET_NEXT_NIBBLE();
adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
/* process the diff channel predictor */
DK3_GET_NEXT_NIBBLE();
adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
/* process the first pair of stereo PCM samples */
diff_channel = (diff_channel + c->status[1].predictor) / 2;
*samples++ = c->status[0].predictor + c->status[1].predictor;
*samples++ = c->status[0].predictor - c->status[1].predictor;
/* process the second predictor of the sum channel */
DK3_GET_NEXT_NIBBLE();
adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
/* process the second pair of stereo PCM samples */
diff_channel = (diff_channel + c->status[1].predictor) / 2;
*samples++ = c->status[0].predictor + c->status[1].predictor;
*samples++ = c->status[0].predictor - c->status[1].predictor;
}
break;
}
case CODEC_ID_ADPCM_IMA_ISS:
for (channel = 0; channel < avctx->channels; channel++) {
cs = &c->status[channel];
cs->predictor = (int16_t)bytestream_get_le16(&src);
cs->step_index = av_clip(*src++, 0, 88);
src++;
}
for (n = nb_samples >> (1 - st); n > 0; n--, src++) {
uint8_t v1, v2;
uint8_t v = *src;
/* nibbles are swapped for mono */
if (st) {
v1 = v >> 4;
v2 = v & 0x0F;
} else {
v2 = v >> 4;
v1 = v & 0x0F;
}
*samples++ = adpcm_ima_expand_nibble(&c->status[0 ], v1, 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
}
break;
case CODEC_ID_ADPCM_IMA_APC:
while (src < buf + buf_size) {
uint8_t v = *src++;
*samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4 , 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
}
break;
case CODEC_ID_ADPCM_IMA_WS:
for (channel = 0; channel < avctx->channels; channel++) {
const uint8_t *src0;
int src_stride;
int16_t *smp = samples + channel;
if (c->vqa_version == 3) {
src0 = src + channel * buf_size / 2;
src_stride = 1;
} else {
src0 = src + channel;
src_stride = avctx->channels;
}
for (n = nb_samples / 2; n > 0; n--) {
uint8_t v = *src0;
src0 += src_stride;
*smp = adpcm_ima_expand_nibble(&c->status[channel], v >> 4 , 3);
smp += avctx->channels;
*smp = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
smp += avctx->channels;
}
}
src = buf + buf_size;
break;
case CODEC_ID_ADPCM_XA:
while (buf_size >= 128) {
xa_decode(samples, src, &c->status[0], &c->status[1],
avctx->channels);
src += 128;
samples += 28 * 8;
buf_size -= 128;
}
break;
case CODEC_ID_ADPCM_IMA_EA_EACS:
src += 4; // skip sample count (already read)
for (i=0; i<=st; i++)
c->status[i].step_index = av_clip(bytestream_get_le32(&src), 0, 88);
for (i=0; i<=st; i++)
c->status[i].predictor = bytestream_get_le32(&src);
for (n = nb_samples >> (1 - st); n > 0; n--, src++) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0], *src>>4, 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[st], *src&0x0F, 3);
}
break;
case CODEC_ID_ADPCM_IMA_EA_SEAD:
for (n = nb_samples >> (1 - st); n > 0; n--, src++) {
*samples++ = adpcm_ima_expand_nibble(&c->status[0], src[0] >> 4, 6);
*samples++ = adpcm_ima_expand_nibble(&c->status[st],src[0]&0x0F, 6);
}
break;
case CODEC_ID_ADPCM_EA:
{
int32_t previous_left_sample, previous_right_sample;
int32_t current_left_sample, current_right_sample;
int32_t next_left_sample, next_right_sample;
int32_t coeff1l, coeff2l, coeff1r, coeff2r;
uint8_t shift_left, shift_right;
/* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
each coding 28 stereo samples. */
src += 4; // skip sample count (already read)
current_left_sample = (int16_t)bytestream_get_le16(&src);
previous_left_sample = (int16_t)bytestream_get_le16(&src);
current_right_sample = (int16_t)bytestream_get_le16(&src);
previous_right_sample = (int16_t)bytestream_get_le16(&src);
for (count1 = 0; count1 < nb_samples / 28; count1++) {
coeff1l = ea_adpcm_table[ *src >> 4 ];
coeff2l = ea_adpcm_table[(*src >> 4 ) + 4];
coeff1r = ea_adpcm_table[*src & 0x0F];
coeff2r = ea_adpcm_table[(*src & 0x0F) + 4];
src++;
shift_left = 20 - (*src >> 4);
shift_right = 20 - (*src & 0x0F);
src++;
for (count2 = 0; count2 < 28; count2++) {
next_left_sample = sign_extend(*src >> 4, 4) << shift_left;
next_right_sample = sign_extend(*src, 4) << shift_right;
src++;
next_left_sample = (next_left_sample +
(current_left_sample * coeff1l) +
(previous_left_sample * coeff2l) + 0x80) >> 8;
next_right_sample = (next_right_sample +
(current_right_sample * coeff1r) +
(previous_right_sample * coeff2r) + 0x80) >> 8;
previous_left_sample = current_left_sample;
current_left_sample = av_clip_int16(next_left_sample);
previous_right_sample = current_right_sample;
current_right_sample = av_clip_int16(next_right_sample);
*samples++ = (unsigned short)current_left_sample;
*samples++ = (unsigned short)current_right_sample;
}
}
if (src - buf == buf_size - 2)
src += 2; // Skip terminating 0x0000
break;
}
case CODEC_ID_ADPCM_EA_MAXIS_XA:
{
int coeff[2][2], shift[2];
for(channel = 0; channel < avctx->channels; channel++) {
for (i=0; i<2; i++)
coeff[channel][i] = ea_adpcm_table[(*src >> 4) + 4*i];
shift[channel] = 20 - (*src & 0x0F);
src++;
}
for (count1 = 0; count1 < nb_samples / 2; count1++) {
for(i = 4; i >= 0; i-=4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
for(channel = 0; channel < avctx->channels; channel++) {
int32_t sample = sign_extend(src[channel] >> i, 4) << shift[channel];
sample = (sample +
c->status[channel].sample1 * coeff[channel][0] +
c->status[channel].sample2 * coeff[channel][1] + 0x80) >> 8;
c->status[channel].sample2 = c->status[channel].sample1;
c->status[channel].sample1 = av_clip_int16(sample);
*samples++ = c->status[channel].sample1;
}
}
src+=avctx->channels;
}
/* consume whole packet */
src = buf + buf_size;
break;
}
case CODEC_ID_ADPCM_EA_R1:
case CODEC_ID_ADPCM_EA_R2:
case CODEC_ID_ADPCM_EA_R3: {
/* channel numbering
2chan: 0=fl, 1=fr
4chan: 0=fl, 1=rl, 2=fr, 3=rr
6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
const int big_endian = avctx->codec->id == CODEC_ID_ADPCM_EA_R3;
int32_t previous_sample, current_sample, next_sample;
int32_t coeff1, coeff2;
uint8_t shift;
unsigned int channel;
uint16_t *samplesC;
const uint8_t *srcC;
const uint8_t *src_end = buf + buf_size;
int count = 0;
src += 4; // skip sample count (already read)
for (channel=0; channel<avctx->channels; channel++) {
int32_t offset = (big_endian ? bytestream_get_be32(&src)
: bytestream_get_le32(&src))
+ (avctx->channels-channel-1) * 4;
if ((offset < 0) || (offset >= src_end - src - 4)) break;
srcC = src + offset;
samplesC = samples + channel;
if (avctx->codec->id == CODEC_ID_ADPCM_EA_R1) {
current_sample = (int16_t)bytestream_get_le16(&srcC);
previous_sample = (int16_t)bytestream_get_le16(&srcC);
} else {
current_sample = c->status[channel].predictor;
previous_sample = c->status[channel].prev_sample;
}
for (count1 = 0; count1 < nb_samples / 28; count1++) {
if (*srcC == 0xEE) { /* only seen in R2 and R3 */
srcC++;
if (srcC > src_end - 30*2) break;
current_sample = (int16_t)bytestream_get_be16(&srcC);
previous_sample = (int16_t)bytestream_get_be16(&srcC);
for (count2=0; count2<28; count2++) {
*samplesC = (int16_t)bytestream_get_be16(&srcC);
samplesC += avctx->channels;
}
} else {
coeff1 = ea_adpcm_table[ *srcC>>4 ];
coeff2 = ea_adpcm_table[(*srcC>>4) + 4];
shift = 20 - (*srcC++ & 0x0F);
if (srcC > src_end - 14) break;
for (count2=0; count2<28; count2++) {
if (count2 & 1)
next_sample = sign_extend(*srcC++, 4) << shift;
else
next_sample = sign_extend(*srcC >> 4, 4) << shift;
next_sample += (current_sample * coeff1) +
(previous_sample * coeff2);
next_sample = av_clip_int16(next_sample >> 8);
previous_sample = current_sample;
current_sample = next_sample;
*samplesC = current_sample;
samplesC += avctx->channels;
}
}
}
if (!count) {
count = count1;
} else if (count != count1) {
av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
count = FFMAX(count, count1);
}
if (avctx->codec->id != CODEC_ID_ADPCM_EA_R1) {
c->status[channel].predictor = current_sample;
c->status[channel].prev_sample = previous_sample;
}
}
c->frame.nb_samples = count * 28;
src = src_end;
break;
}
case CODEC_ID_ADPCM_EA_XAS:
for (channel=0; channel<avctx->channels; channel++) {
int coeff[2][4], shift[4];
short *s2, *s = &samples[channel];
for (n=0; n<4; n++, s+=32*avctx->channels) {
for (i=0; i<2; i++)
coeff[i][n] = ea_adpcm_table[(src[0]&0x0F)+4*i];
shift[n] = 20 - (src[2] & 0x0F);
for (s2=s, i=0; i<2; i++, src+=2, s2+=avctx->channels)
s2[0] = (src[0]&0xF0) + (src[1]<<8);
}
for (m=2; m<32; m+=2) {
s = &samples[m*avctx->channels + channel];
for (n=0; n<4; n++, src++, s+=32*avctx->channels) {
for (s2=s, i=0; i<8; i+=4, s2+=avctx->channels) {
int level = sign_extend(*src >> (4 - i), 4) << shift[n];
int pred = s2[-1*avctx->channels] * coeff[0][n]
+ s2[-2*avctx->channels] * coeff[1][n];
s2[0] = av_clip_int16((level + pred + 0x80) >> 8);
}
}
}
}
break;
case CODEC_ID_ADPCM_IMA_AMV:
case CODEC_ID_ADPCM_IMA_SMJPEG:
if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV) {
c->status[0].predictor = sign_extend(bytestream_get_le16(&src), 16);
c->status[0].step_index = av_clip(bytestream_get_le16(&src), 0, 88);
src += 4;
} else {
c->status[0].predictor = sign_extend(bytestream_get_be16(&src), 16);
c->status[0].step_index = av_clip(bytestream_get_byte(&src), 0, 88);
src += 1;
}
for (n = nb_samples >> (1 - st); n > 0; n--, src++) {
char hi, lo;
lo = *src & 0x0F;
hi = *src >> 4;
if (avctx->codec->id == CODEC_ID_ADPCM_IMA_AMV)
FFSWAP(char, hi, lo);
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
lo, 3);
*samples++ = adpcm_ima_expand_nibble(&c->status[0],
hi, 3);
}
break;
case CODEC_ID_ADPCM_CT:
for (n = nb_samples >> (1 - st); n > 0; n--, src++) {
uint8_t v = *src;
*samples++ = adpcm_ct_expand_nibble(&c->status[0 ], v >> 4 );
*samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
}
break;
case CODEC_ID_ADPCM_SBPRO_4:
case CODEC_ID_ADPCM_SBPRO_3:
case CODEC_ID_ADPCM_SBPRO_2:
if (!c->status[0].step_index) {
/* the first byte is a raw sample */
*samples++ = 128 * (*src++ - 0x80);
if (st)
*samples++ = 128 * (*src++ - 0x80);
c->status[0].step_index = 1;
nb_samples--;
}
if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_4) {
for (n = nb_samples >> (1 - st); n > 0; n--, src++) {
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
src[0] >> 4, 4, 0);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
src[0] & 0x0F, 4, 0);
}
} else if (avctx->codec->id == CODEC_ID_ADPCM_SBPRO_3) {
for (n = nb_samples / 3; n > 0; n--, src++) {
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
src[0] >> 5 , 3, 0);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
(src[0] >> 2) & 0x07, 3, 0);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
src[0] & 0x03, 2, 0);
}
} else {
for (n = nb_samples >> (2 - st); n > 0; n--, src++) {
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
src[0] >> 6 , 2, 2);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
(src[0] >> 4) & 0x03, 2, 2);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[0],
(src[0] >> 2) & 0x03, 2, 2);
*samples++ = adpcm_sbpro_expand_nibble(&c->status[st],
src[0] & 0x03, 2, 2);
}
}
break;
case CODEC_ID_ADPCM_SWF:
{
GetBitContext gb;
const int *table;
int k0, signmask, nb_bits, count;
int size = buf_size*8;
init_get_bits(&gb, buf, size);
//read bits & initial values
nb_bits = get_bits(&gb, 2)+2;
//av_log(NULL,AV_LOG_INFO,"nb_bits: %d\n", nb_bits);
table = swf_index_tables[nb_bits-2];
k0 = 1 << (nb_bits-2);
signmask = 1 << (nb_bits-1);
while (get_bits_count(&gb) <= size - 22*avctx->channels) {
for (i = 0; i < avctx->channels; i++) {
*samples++ = c->status[i].predictor = get_sbits(&gb, 16);
c->status[i].step_index = get_bits(&gb, 6);
}
for (count = 0; get_bits_count(&gb) <= size - nb_bits*avctx->channels && count < 4095; count++) {
int i;
for (i = 0; i < avctx->channels; i++) {
// similar to IMA adpcm
int delta = get_bits(&gb, nb_bits);
int step = ff_adpcm_step_table[c->status[i].step_index];
long vpdiff = 0; // vpdiff = (delta+0.5)*step/4
int k = k0;
do {
if (delta & k)
vpdiff += step;
step >>= 1;
k >>= 1;
} while(k);
vpdiff += step;
if (delta & signmask)
c->status[i].predictor -= vpdiff;
else
c->status[i].predictor += vpdiff;
c->status[i].step_index += table[delta & (~signmask)];
c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
c->status[i].predictor = av_clip_int16(c->status[i].predictor);
*samples++ = c->status[i].predictor;
}
}
}
src += buf_size;
break;
}
case CODEC_ID_ADPCM_YAMAHA:
for (n = nb_samples >> (1 - st); n > 0; n--, src++) {
uint8_t v = *src;
*samples++ = adpcm_yamaha_expand_nibble(&c->status[0 ], v & 0x0F);
*samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4 );
}
break;
case CODEC_ID_ADPCM_THP:
{
int table[2][16];
int prev[2][2];
int ch;
src += 4; // skip channel size
src += 4; // skip number of samples (already read)
for (i = 0; i < 32; i++)
table[0][i] = (int16_t)bytestream_get_be16(&src);
/* Initialize the previous sample. */
for (i = 0; i < 4; i++)
prev[0][i] = (int16_t)bytestream_get_be16(&src);
for (ch = 0; ch <= st; ch++) {
samples = (short *)c->frame.data[0] + ch;
/* Read in every sample for this channel. */
for (i = 0; i < nb_samples / 14; i++) {
int index = (*src >> 4) & 7;
unsigned int exp = *src++ & 15;
int factor1 = table[ch][index * 2];
int factor2 = table[ch][index * 2 + 1];
/* Decode 14 samples. */
for (n = 0; n < 14; n++) {
int32_t sampledat;
if(n&1) sampledat = sign_extend(*src++, 4);
else sampledat = sign_extend(*src >> 4, 4);
sampledat = ((prev[ch][0]*factor1
+ prev[ch][1]*factor2) >> 11) + (sampledat << exp);
*samples = av_clip_int16(sampledat);
prev[ch][1] = prev[ch][0];
prev[ch][0] = *samples++;
/* In case of stereo, skip one sample, this sample
is for the other channel. */
samples += st;
}
}
}
break;
}
default:
return -1;
}
*got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return src - buf;
}
| 18,841 |
FFmpeg | b5228e44c7f3a5eba537c8a39a45cfbf2961a28d | 1 | static void noise_scale(int *coefs, int scale, int band_energy, int len)
{
int ssign = scale < 0 ? -1 : 1;
int s = FFABS(scale);
unsigned int round;
int i, out, c = exp2tab[s & 3];
int nlz = 0;
while (band_energy > 0x7fff) {
band_energy >>= 1;
nlz++;
}
c /= band_energy;
s = 21 + nlz - (s >> 2);
if (s > 0) {
round = 1 << (s-1);
for (i=0; i<len; i++) {
out = (int)(((int64_t)coefs[i] * c) >> 32);
coefs[i] = ((int)(out+round) >> s) * ssign;
}
}
else {
s = s + 32;
round = 1 << (s-1);
for (i=0; i<len; i++) {
out = (int)((int64_t)((int64_t)coefs[i] * c + round) >> s);
coefs[i] = out * ssign;
}
}
}
| 18,842 |
qemu | 88365d17d586bcf0d9f4432447db345f72278a2a | 1 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, int type)
{
return -EINVAL;
}
| 18,843 |
qemu | b7b5233ad7fdd9985bb6d05b7919f3a20723ff2c | 1 | void *g_malloc(size_t size)
{
char * p;
size += 16;
p = bsd_vmalloc(size);
*(size_t *)p = size;
return p + 16;
}
| 18,844 |
FFmpeg | e24c31b656254b2516befbde78aeaca0122a6010 | 1 | int ff_dirac_golomb_read_16bit(DiracGolombLUT *lut_ctx, const uint8_t *buf,
int bytes, uint8_t *_dst, int coeffs)
{
int i, b, c_idx = 0;
int16_t *dst = (int16_t *)_dst;
DiracGolombLUT *future[4], *l = &lut_ctx[2*LUT_SIZE + buf[0]];
INIT_RESIDUE(res, 0, 0);
#define APPEND_RESIDUE(N, M) \
N |= M >> (N ## _bits); \
N ## _bits += (M ## _bits)
for (b = 1; b <= bytes; b++) {
future[0] = &lut_ctx[buf[b]];
future[1] = future[0] + 1*LUT_SIZE;
future[2] = future[0] + 2*LUT_SIZE;
future[3] = future[0] + 3*LUT_SIZE;
if ((c_idx + 1) > coeffs)
return c_idx;
if (res_bits && l->sign) {
int32_t coeff = 1;
APPEND_RESIDUE(res, l->preamble);
for (i = 0; i < (res_bits >> 1) - 1; i++) {
coeff <<= 1;
coeff |= (res >> (RSIZE_BITS - 2*i - 2)) & 1;
}
dst[c_idx++] = l->sign * (coeff - 1);
res_bits = res = 0;
}
for (i = 0; i < LUT_BITS; i++)
dst[c_idx + i] = l->ready[i];
c_idx += l->ready_num;
APPEND_RESIDUE(res, l->leftover);
l = future[l->need_s ? 3 : !res_bits ? 2 : res_bits & 1];
}
return c_idx;
}
| 18,845 |
qemu | 5f9490de566c5b092a6cfedc3c7a37a9c9dee917 | 1 | static void spapr_tce_table_finalize(Object *obj)
{
sPAPRTCETable *tcet = SPAPR_TCE_TABLE(obj);
QLIST_REMOVE(tcet, list);
if (!kvm_enabled() ||
(kvmppc_remove_spapr_tce(tcet->table, tcet->fd,
tcet->nb_table) != 0)) {
g_free(tcet->table);
}
}
| 18,846 |
FFmpeg | 8ef740ce01abff3bc7df0f79803a3dc781d89daa | 0 | int ff_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
{
uint8_t *src[4], *dst[4];
int i, j, vsub, ret;
int (*draw_slice)(AVFilterLink *, int, int, int);
FF_TPRINTF_START(NULL, draw_slice); ff_tlog_link(NULL, link, 0); ff_tlog(NULL, " y:%d h:%d dir:%d\n", y, h, slice_dir);
/* copy the slice if needed for permission reasons */
if (link->src_buf) {
vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
for (i = 0; i < 4; i++) {
if (link->src_buf->data[i]) {
src[i] = link->src_buf-> data[i] +
(y >> (i==1 || i==2 ? vsub : 0)) * link->src_buf-> linesize[i];
dst[i] = link->cur_buf->data[i] +
(y >> (i==1 || i==2 ? vsub : 0)) * link->cur_buf->linesize[i];
} else
src[i] = dst[i] = NULL;
}
for (i = 0; i < 4; i++) {
int planew =
av_image_get_linesize(link->format, link->cur_buf->video->w, i);
if (!src[i]) continue;
for (j = 0; j < h >> (i==1 || i==2 ? vsub : 0); j++) {
memcpy(dst[i], src[i], planew);
src[i] += link->src_buf->linesize[i];
dst[i] += link->cur_buf->linesize[i];
}
}
}
if (!(draw_slice = link->dstpad->draw_slice))
draw_slice = default_draw_slice;
ret = draw_slice(link, y, h, slice_dir);
if (ret < 0)
clear_link(link);
return ret;
}
| 18,847 |
qemu | 3a661f1eabf7e8db66e28489884d9b54aacb94ea | 1 | int qcrypto_cipher_encrypt(QCryptoCipher *cipher,
const void *in,
void *out,
size_t len,
Error **errp)
{
QCryptoCipherBuiltin *ctxt = cipher->opaque;
return ctxt->encrypt(cipher, in, out, len, errp); | 18,848 |
qemu | 448fe3c1349b540c66e048788dd98b9c80775c53 | 1 | static void e1000e_pci_foreach_callback(QPCIDevice *dev, int devfn, void *data)
{
*(QPCIDevice **) data = dev;
}
| 18,849 |
FFmpeg | aac07a7a4c2c7a4a29cf6dbc88c1b9fdd191b99d | 0 | static int rm_read_index(AVFormatContext *s)
{
AVIOContext *pb = s->pb;
unsigned int size, n_pkts, str_id, next_off, n, pos, pts;
AVStream *st;
do {
if (avio_rl32(pb) != MKTAG('I','N','D','X'))
return -1;
size = avio_rb32(pb);
if (size < 20)
return -1;
avio_skip(pb, 2);
n_pkts = avio_rb32(pb);
str_id = avio_rb16(pb);
next_off = avio_rb32(pb);
for (n = 0; n < s->nb_streams; n++)
if (s->streams[n]->id == str_id) {
st = s->streams[n];
break;
}
if (n == s->nb_streams)
goto skip;
for (n = 0; n < n_pkts; n++) {
avio_skip(pb, 2);
pts = avio_rb32(pb);
pos = avio_rb32(pb);
avio_skip(pb, 4); /* packet no. */
av_add_index_entry(st, pos, pts, 0, 0, AVINDEX_KEYFRAME);
}
skip:
if (next_off && avio_tell(pb) != next_off &&
avio_seek(pb, next_off, SEEK_SET) < 0)
return -1;
} while (next_off);
return 0;
}
| 18,850 |
FFmpeg | fd6768a4b8e2ce9e25a447aec9194b2bcdd1ac35 | 0 | int ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats, int stats_size, int skip0)
{
HeapElem *h = av_malloc_array(sizeof(*h), stats_size);
int *up = av_malloc_array(sizeof(*up) * 2, stats_size);
uint8_t *len = av_malloc_array(sizeof(*len) * 2, stats_size);
uint16_t *map= av_malloc_array(sizeof(*map), stats_size);
int offset, i, next;
int size = 0;
int ret = 0;
if (!h || !up || !len) {
ret = AVERROR(ENOMEM);
goto end;
}
for (i = 0; i<stats_size; i++) {
dst[i] = 255;
if (stats[i] || !skip0)
map[size++] = i;
}
for (offset = 1; ; offset <<= 1) {
for (i=0; i < size; i++) {
h[i].name = i;
h[i].val = (stats[map[i]] << 14) + offset;
}
for (i = size / 2 - 1; i >= 0; i--)
heap_sift(h, i, size);
for (next = size; next < size * 2 - 1; next++) {
// merge the two smallest entries, and put it back in the heap
uint64_t min1v = h[0].val;
up[h[0].name] = next;
h[0].val = INT64_MAX;
heap_sift(h, 0, size);
up[h[0].name] = next;
h[0].name = next;
h[0].val += min1v;
heap_sift(h, 0, size);
}
len[2 * size - 2] = 0;
for (i = 2 * size - 3; i >= size; i--)
len[i] = len[up[i]] + 1;
for (i = 0; i < size; i++) {
dst[map[i]] = len[up[i]] + 1;
if (dst[map[i]] >= 32) break;
}
if (i==size) break;
}
end:
av_free(h);
av_free(up);
av_free(len);
av_free(map);
return ret;
}
| 18,851 |
qemu | 42a268c241183877192c376d03bd9b6d527407c7 | 0 | static int dec_scc_r(CPUCRISState *env, DisasContext *dc)
{
int cond = dc->op2;
LOG_DIS("s%s $r%u\n",
cc_name(cond), dc->op1);
if (cond != CC_A) {
int l1;
gen_tst_cc(dc, cpu_R[dc->op1], cond);
l1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[dc->op1], 0, l1);
tcg_gen_movi_tl(cpu_R[dc->op1], 1);
gen_set_label(l1);
} else {
tcg_gen_movi_tl(cpu_R[dc->op1], 1);
}
cris_cc_mask(dc, 0);
return 2;
}
| 18,854 |
qemu | 61007b316cd71ee7333ff7a0a749a8949527575f | 0 | static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
int64_t sector_num,
QEMUIOVector *qiov,
int nb_sectors,
BlockCompletionFunc *cb,
void *opaque,
int is_write)
{
BlockAIOCBSync *acb;
acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
acb->is_write = is_write;
acb->qiov = qiov;
acb->bounce = qemu_try_blockalign(bs, qiov->size);
acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
if (acb->bounce == NULL) {
acb->ret = -ENOMEM;
} else if (is_write) {
qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
} else {
acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
}
qemu_bh_schedule(acb->bh);
return &acb->common;
}
| 18,855 |
qemu | 80f5ce758ac277e76c016dd7c0b246e40d4fca2d | 0 | static int elf_core_dump(int signr, const CPUState *env)
{
const TaskState *ts = (const TaskState *)env->opaque;
struct vm_area_struct *vma = NULL;
char corefile[PATH_MAX];
struct elf_note_info info;
struct elfhdr elf;
struct elf_phdr phdr;
struct rlimit dumpsize;
struct mm_struct *mm = NULL;
off_t offset = 0, data_offset = 0;
int segs = 0;
int fd = -1;
errno = 0;
getrlimit(RLIMIT_CORE, &dumpsize);
if (dumpsize.rlim_cur == 0)
return 0;
if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
return (-errno);
if ((fd = open(corefile, O_WRONLY | O_CREAT,
S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
return (-errno);
/*
* Walk through target process memory mappings and
* set up structure containing this information. After
* this point vma_xxx functions can be used.
*/
if ((mm = vma_init()) == NULL)
goto out;
walk_memory_regions(mm, vma_walker);
segs = vma_get_mapping_count(mm);
/*
* Construct valid coredump ELF header. We also
* add one more segment for notes.
*/
fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
if (dump_write(fd, &elf, sizeof (elf)) != 0)
goto out;
/* fill in in-memory version of notes */
if (fill_note_info(&info, signr, env) < 0)
goto out;
offset += sizeof (elf); /* elf header */
offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
/* write out notes program header */
fill_elf_note_phdr(&phdr, info.notes_size, offset);
offset += info.notes_size;
if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
goto out;
/*
* ELF specification wants data to start at page boundary so
* we align it here.
*/
offset = roundup(offset, ELF_EXEC_PAGESIZE);
/*
* Write program headers for memory regions mapped in
* the target process.
*/
for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
(void) memset(&phdr, 0, sizeof (phdr));
phdr.p_type = PT_LOAD;
phdr.p_offset = offset;
phdr.p_vaddr = vma->vma_start;
phdr.p_paddr = 0;
phdr.p_filesz = vma_dump_size(vma);
offset += phdr.p_filesz;
phdr.p_memsz = vma->vma_end - vma->vma_start;
phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
if (vma->vma_flags & PROT_WRITE)
phdr.p_flags |= PF_W;
if (vma->vma_flags & PROT_EXEC)
phdr.p_flags |= PF_X;
phdr.p_align = ELF_EXEC_PAGESIZE;
dump_write(fd, &phdr, sizeof (phdr));
}
/*
* Next we write notes just after program headers. No
* alignment needed here.
*/
if (write_note_info(&info, fd) < 0)
goto out;
/* align data to page boundary */
data_offset = lseek(fd, 0, SEEK_CUR);
data_offset = TARGET_PAGE_ALIGN(data_offset);
if (lseek(fd, data_offset, SEEK_SET) != data_offset)
goto out;
/*
* Finally we can dump process memory into corefile as well.
*/
for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
abi_ulong addr;
abi_ulong end;
end = vma->vma_start + vma_dump_size(vma);
for (addr = vma->vma_start; addr < end;
addr += TARGET_PAGE_SIZE) {
char page[TARGET_PAGE_SIZE];
int error;
/*
* Read in page from target process memory and
* write it to coredump file.
*/
error = copy_from_user(page, addr, sizeof (page));
if (error != 0) {
(void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
addr);
errno = -error;
goto out;
}
if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
goto out;
}
}
out:
free_note_info(&info);
if (mm != NULL)
vma_delete(mm);
(void) close(fd);
if (errno != 0)
return (-errno);
return (0);
}
| 18,856 |
qemu | b931bfbf042983f311b3b09894d8030b2755a638 | 0 | int net_init_vhost_user(const NetClientOptions *opts, const char *name,
NetClientState *peer, Error **errp)
{
const NetdevVhostUserOptions *vhost_user_opts;
CharDriverState *chr;
assert(opts->kind == NET_CLIENT_OPTIONS_KIND_VHOST_USER);
vhost_user_opts = opts->vhost_user;
chr = net_vhost_parse_chardev(vhost_user_opts, errp);
if (!chr) {
return -1;
}
/* verify net frontend */
if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net,
(char *)name, errp)) {
return -1;
}
return net_vhost_user_init(peer, "vhost_user", name, chr);
}
| 18,857 |
qemu | 7e97cd88148876bad36ee7c66d526dcaed328d0d | 0 | static void *tcg_cpu_thread_fn(void *arg)
{
CPUState *env = arg;
qemu_tcg_init_cpu_signals();
qemu_thread_self(env->thread);
/* signal CPU creation */
qemu_mutex_lock(&qemu_global_mutex);
for (env = first_cpu; env != NULL; env = env->next_cpu)
env->created = 1;
qemu_cond_signal(&qemu_cpu_cond);
/* and wait for machine initialization */
while (!qemu_system_ready)
qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
while (1) {
cpu_exec_all();
qemu_tcg_wait_io_event();
}
return NULL;
}
| 18,858 |
qemu | de9e9d9f17a36ff76c1a02a5348835e5e0a081b0 | 0 | static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
{
TCGv r_temp, zero;
r_temp = tcg_temp_new();
/* old op:
if (!(env->y & 1))
T1 = 0;
*/
zero = tcg_const_tl(0);
tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
zero, cpu_cc_src2);
tcg_temp_free(zero);
// b2 = T0 & 1;
// env->y = (b2 << 31) | (env->y >> 1);
tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
tcg_gen_shli_tl(r_temp, r_temp, 31);
tcg_gen_shri_tl(cpu_tmp0, cpu_y, 1);
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0x7fffffff);
tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, r_temp);
tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
// b1 = N ^ V;
gen_mov_reg_N(cpu_tmp0, cpu_psr);
gen_mov_reg_V(r_temp, cpu_psr);
tcg_gen_xor_tl(cpu_tmp0, cpu_tmp0, r_temp);
tcg_temp_free(r_temp);
// T0 = (b1 << 31) | (T0 >> 1);
// src1 = T0;
tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, 31);
tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
tcg_gen_mov_tl(dst, cpu_cc_dst);
}
| 18,859 |
FFmpeg | d0da310317cae25c0c533f6862fb25dc81ec3d4b | 0 | dshow_cycle_pins(AVFormatContext *avctx, enum dshowDeviceType devtype,
IBaseFilter *device_filter, IPin **ppin)
{
IEnumPins *pins = 0;
IPin *device_pin = NULL;
IPin *pin;
int r;
const GUID *mediatype[2] = { &MEDIATYPE_Video, &MEDIATYPE_Audio };
const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
r = IBaseFilter_EnumPins(device_filter, &pins);
if (r != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not enumerate pins.\n");
return AVERROR(EIO);
}
while (IEnumPins_Next(pins, 1, &pin, NULL) == S_OK && !device_pin) {
IKsPropertySet *p = NULL;
IEnumMediaTypes *types;
PIN_INFO info = {0};
AM_MEDIA_TYPE *type;
GUID category;
DWORD r2;
IPin_QueryPinInfo(pin, &info);
IBaseFilter_Release(info.pFilter);
if (info.dir != PINDIR_OUTPUT)
goto next;
if (IPin_QueryInterface(pin, &IID_IKsPropertySet, (void **) &p) != S_OK)
goto next;
if (IKsPropertySet_Get(p, &ROPSETID_Pin, AMPROPERTY_PIN_CATEGORY,
NULL, 0, &category, sizeof(GUID), &r2) != S_OK)
goto next;
if (!IsEqualGUID(&category, &PIN_CATEGORY_CAPTURE))
goto next;
if (IPin_EnumMediaTypes(pin, &types) != S_OK)
goto next;
IEnumMediaTypes_Reset(types);
while (IEnumMediaTypes_Next(types, 1, &type, NULL) == S_OK && !device_pin) {
if (IsEqualGUID(&type->majortype, mediatype[devtype])) {
device_pin = pin;
goto next;
}
CoTaskMemFree(type);
}
next:
if (types)
IEnumMediaTypes_Release(types);
if (p)
IKsPropertySet_Release(p);
if (device_pin != pin)
IPin_Release(pin);
}
IEnumPins_Release(pins);
if (!device_pin) {
av_log(avctx, AV_LOG_ERROR,
"Could not find output pin from %s capture device.\n", devtypename);
return AVERROR(EIO);
}
*ppin = device_pin;
return 0;
}
| 18,860 |
qemu | a03ef88f77af045a2eb9629b5ce774a3fb973c5e | 0 | static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags)
{
uint64_t reqid = request_id++;
int ret = bdrv_co_pwrite_zeroes(bs->file->bs, offset, count, flags);
block_request_create(reqid, bs, qemu_coroutine_self());
qemu_coroutine_yield();
return ret;
}
| 18,861 |
qemu | cea5f9a28faa528b6b1b117c9ab2d8828f473fef | 0 | static void tcg_target_qemu_prologue (TCGContext *s)
{
int i, frame_size;
#ifndef __APPLE__
uint64_t addr;
#endif
frame_size = 0
+ 8 /* back chain */
+ 8 /* CR */
+ 8 /* LR */
+ 8 /* compiler doubleword */
+ 8 /* link editor doubleword */
+ 8 /* TOC save area */
+ TCG_STATIC_CALL_ARGS_SIZE
+ ARRAY_SIZE (tcg_target_callee_save_regs) * 8
;
frame_size = (frame_size + 15) & ~15;
#ifndef __APPLE__
/* First emit adhoc function descriptor */
addr = (uint64_t) s->code_ptr + 24;
tcg_out32 (s, addr >> 32); tcg_out32 (s, addr); /* entry point */
s->code_ptr += 16; /* skip TOC and environment pointer */
#endif
/* Prologue */
tcg_out32 (s, MFSPR | RT (0) | LR);
tcg_out32 (s, STDU | RS (1) | RA (1) | (-frame_size & 0xffff));
for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
tcg_out32 (s, (STD
| RS (tcg_target_callee_save_regs[i])
| RA (1)
| (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
)
);
tcg_out32 (s, STD | RS (0) | RA (1) | (frame_size + 16));
#ifdef CONFIG_USE_GUEST_BASE
if (GUEST_BASE) {
tcg_out_movi (s, TCG_TYPE_I64, TCG_GUEST_BASE_REG, GUEST_BASE);
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
}
#endif
tcg_out32 (s, MTSPR | RS (3) | CTR);
tcg_out32 (s, BCCTR | BO_ALWAYS);
/* Epilogue */
tb_ret_addr = s->code_ptr;
for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
tcg_out32 (s, (LD
| RT (tcg_target_callee_save_regs[i])
| RA (1)
| (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
)
);
tcg_out32 (s, LD | RT (0) | RA (1) | (frame_size + 16));
tcg_out32 (s, MTSPR | RS (0) | LR);
tcg_out32 (s, ADDI | RT (1) | RA (1) | frame_size);
tcg_out32 (s, BCLR | BO_ALWAYS);
}
| 18,862 |
qemu | 9f1963b3f72521f75a549f8afd61b19e7da63c6f | 0 | int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
{
int ret = blk_check_request(blk, sector_num, nb_sectors);
if (ret < 0) {
return ret;
}
return bdrv_co_discard(blk_bs(blk), sector_num, nb_sectors);
}
| 18,863 |
qemu | a0efbf16604770b9d805bcf210ec29942321134f | 0 | GList *range_list_insert(GList *list, Range *data)
{
GList *l;
/* Range lists require no empty ranges */
assert(data->begin < data->end || (data->begin && !data->end));
/* Skip all list elements strictly less than data */
for (l = list; l && range_compare(l->data, data) < 0; l = l->next) {
}
if (!l || range_compare(l->data, data) > 0) {
/* Rest of the list (if any) is strictly greater than @data */
return g_list_insert_before(list, l, data);
}
/* Current list element overlaps @data, merge the two */
range_extend(l->data, data);
g_free(data);
/* Merge any subsequent list elements that now also overlap */
while (l->next && range_compare(l->data, l->next->data) == 0) {
GList *new_l;
range_extend(l->data, l->next->data);
g_free(l->next->data);
new_l = g_list_delete_link(list, l->next);
assert(new_l == list);
}
return list;
}
| 18,864 |
qemu | c39ce112b60ffafbaf700853e32bea74cbb2c148 | 0 | SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun,
void *hba_private)
{
return d->info->alloc_req(d, tag, lun, hba_private);
}
| 18,865 |
qemu | 185b43386ad999c80bdc58e41b87f05e5b3e8463 | 0 | static int nbd_send_negotiate(int csock, off_t size, uint32_t flags)
{
char buf[8 + 8 + 8 + 128];
/* Negotiate
[ 0 .. 7] passwd ("NBDMAGIC")
[ 8 .. 15] magic (0x00420281861253)
[16 .. 23] size
[24 .. 27] flags
[28 .. 151] reserved (0)
*/
TRACE("Beginning negotiation.");
memcpy(buf, "NBDMAGIC", 8);
cpu_to_be64w((uint64_t*)(buf + 8), 0x00420281861253LL);
cpu_to_be64w((uint64_t*)(buf + 16), size);
cpu_to_be32w((uint32_t*)(buf + 24),
flags | NBD_FLAG_HAS_FLAGS | NBD_FLAG_SEND_TRIM |
NBD_FLAG_SEND_FLUSH | NBD_FLAG_SEND_FUA);
memset(buf + 28, 0, 124);
if (write_sync(csock, buf, sizeof(buf)) != sizeof(buf)) {
LOG("write failed");
errno = EINVAL;
return -1;
}
TRACE("Negotiation succeeded.");
return 0;
}
| 18,866 |
qemu | 91cda45b69e45a089f9989979a65db3f710c9925 | 0 | static int find_pte32(CPUPPCState *env, struct mmu_ctx_hash32 *ctx, int h,
int rw, int type, int target_page_bits)
{
hwaddr pteg_off;
target_ulong pte0, pte1;
int i, good = -1;
int ret, r;
ret = -1; /* No entry found */
pteg_off = get_pteg_offset32(env, ctx->hash[h]);
for (i = 0; i < HPTES_PER_GROUP; i++) {
pte0 = ppc_hash32_load_hpte0(env, pteg_off + i*HASH_PTE_SIZE_32);
pte1 = ppc_hash32_load_hpte1(env, pteg_off + i*HASH_PTE_SIZE_32);
r = pte_check_hash32(ctx, pte0, pte1, h, rw, type);
LOG_MMU("Load pte from %08" HWADDR_PRIx " => " TARGET_FMT_lx " "
TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
pteg_off + (i * 8), pte0, pte1, (int)(pte0 >> 31), h,
(int)((pte0 >> 6) & 1), ctx->ptem);
switch (r) {
case -3:
/* PTE inconsistency */
return -1;
case -2:
/* Access violation */
ret = -2;
good = i;
break;
case -1:
default:
/* No PTE match */
break;
case 0:
/* access granted */
/* XXX: we should go on looping to check all PTEs consistency
* but if we can speed-up the whole thing as the
* result would be undefined if PTEs are not consistent.
*/
ret = 0;
good = i;
goto done;
}
}
if (good != -1) {
done:
LOG_MMU("found PTE at addr %08" HWADDR_PRIx " prot=%01x ret=%d\n",
ctx->raddr, ctx->prot, ret);
/* Update page flags */
pte1 = ctx->raddr;
if (ppc_hash32_pte_update_flags(ctx, &pte1, ret, rw) == 1) {
ppc_hash32_store_hpte1(env, pteg_off + good * HASH_PTE_SIZE_32,
pte1);
}
}
/* We have a TLB that saves 4K pages, so let's
* split a huge page to 4k chunks */
if (target_page_bits != TARGET_PAGE_BITS) {
ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1))
& TARGET_PAGE_MASK;
}
return ret;
}
| 18,867 |
qemu | 4d68e86bb10159099da0798f74e7512955f15eec | 0 | static void __attribute__((constructor)) coroutine_pool_init(void)
{
qemu_mutex_init(&pool_lock);
}
| 18,868 |
qemu | c2fa30757a2ba1bb5b053883773a9a61fbdd7082 | 0 | static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
{
/*
* function 0 is called to inquire which functions are supported by
* OSPM
*/
if (!in->function) {
nvdimm_dsm_function0(0 /* No function supported other than
function 0 */, dsm_mem_addr);
return;
}
/* No function except function 0 is supported yet. */
nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
}
| 18,869 |
qemu | d76548a98f4e18d3c65a3d921bbb70caf9be6138 | 0 | static int spapr_phb_vfio_eeh_set_option(sPAPRPHBState *sphb,
unsigned int addr, int option)
{
sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
int ret;
switch (option) {
case RTAS_EEH_DISABLE:
op.op = VFIO_EEH_PE_DISABLE;
break;
case RTAS_EEH_ENABLE: {
PCIHostState *phb;
PCIDevice *pdev;
/*
* The EEH functionality is enabled on basis of PCI device,
* instead of PE. We need check the validity of the PCI
* device address.
*/
phb = PCI_HOST_BRIDGE(sphb);
pdev = pci_find_device(phb->bus,
(addr >> 16) & 0xFF, (addr >> 8) & 0xFF);
if (!pdev) {
return RTAS_OUT_PARAM_ERROR;
}
op.op = VFIO_EEH_PE_ENABLE;
break;
}
case RTAS_EEH_THAW_IO:
op.op = VFIO_EEH_PE_UNFREEZE_IO;
break;
case RTAS_EEH_THAW_DMA:
op.op = VFIO_EEH_PE_UNFREEZE_DMA;
break;
default:
return RTAS_OUT_PARAM_ERROR;
}
ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
VFIO_EEH_PE_OP, &op);
if (ret < 0) {
return RTAS_OUT_HW_ERROR;
}
return RTAS_OUT_SUCCESS;
}
| 18,870 |
FFmpeg | 3176217c60ca7828712985092d9102d331ea4f3d | 0 | static int vaapi_h264_start_frame(AVCodecContext *avctx,
av_unused const uint8_t *buffer,
av_unused uint32_t size)
{
H264Context * const h = avctx->priv_data;
struct vaapi_context * const vactx = avctx->hwaccel_context;
VAPictureParameterBufferH264 *pic_param;
VAIQMatrixBufferH264 *iq_matrix;
ff_dlog(avctx, "vaapi_h264_start_frame()\n");
vactx->slice_param_size = sizeof(VASliceParameterBufferH264);
/* Fill in VAPictureParameterBufferH264. */
pic_param = ff_vaapi_alloc_pic_param(vactx, sizeof(VAPictureParameterBufferH264));
if (!pic_param)
return -1;
fill_vaapi_pic(&pic_param->CurrPic, h->cur_pic_ptr, h->picture_structure);
if (fill_vaapi_ReferenceFrames(pic_param, h) < 0)
return -1;
pic_param->picture_width_in_mbs_minus1 = h->mb_width - 1;
pic_param->picture_height_in_mbs_minus1 = h->mb_height - 1;
pic_param->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8;
pic_param->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8;
pic_param->num_ref_frames = h->sps.ref_frame_count;
pic_param->seq_fields.value = 0; /* reset all bits */
pic_param->seq_fields.bits.chroma_format_idc = h->sps.chroma_format_idc;
pic_param->seq_fields.bits.residual_colour_transform_flag = h->sps.residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag;
pic_param->seq_fields.bits.frame_mbs_only_flag = h->sps.frame_mbs_only_flag;
pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = h->sps.mb_aff;
pic_param->seq_fields.bits.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag;
pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = h->sps.level_idc >= 31; /* A.3.3.2 */
pic_param->seq_fields.bits.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4;
pic_param->seq_fields.bits.pic_order_cnt_type = h->sps.poc_type;
pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;
pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
pic_param->num_slice_groups_minus1 = h->pps.slice_group_count - 1;
pic_param->slice_group_map_type = h->pps.mb_slice_group_map_type;
pic_param->slice_group_change_rate_minus1 = 0; /* XXX: unimplemented in Libav */
pic_param->pic_init_qp_minus26 = h->pps.init_qp - 26;
pic_param->pic_init_qs_minus26 = h->pps.init_qs - 26;
pic_param->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0];
pic_param->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
pic_param->pic_fields.value = 0; /* reset all bits */
pic_param->pic_fields.bits.entropy_coding_mode_flag = h->pps.cabac;
pic_param->pic_fields.bits.weighted_pred_flag = h->pps.weighted_pred;
pic_param->pic_fields.bits.weighted_bipred_idc = h->pps.weighted_bipred_idc;
pic_param->pic_fields.bits.transform_8x8_mode_flag = h->pps.transform_8x8_mode;
pic_param->pic_fields.bits.field_pic_flag = h->picture_structure != PICT_FRAME;
pic_param->pic_fields.bits.constrained_intra_pred_flag = h->pps.constrained_intra_pred;
pic_param->pic_fields.bits.pic_order_present_flag = h->pps.pic_order_present;
pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present;
pic_param->pic_fields.bits.reference_pic_flag = h->nal_ref_idc != 0;
pic_param->frame_num = h->frame_num;
/* Fill in VAIQMatrixBufferH264. */
iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264));
if (!iq_matrix)
return -1;
memcpy(iq_matrix->ScalingList4x4, h->pps.scaling_matrix4, sizeof(iq_matrix->ScalingList4x4));
memcpy(iq_matrix->ScalingList8x8[0], h->pps.scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0]));
memcpy(iq_matrix->ScalingList8x8[1], h->pps.scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0]));
return 0;
}
| 18,871 |
qemu | b40acf99bef69fa8ab0f9092ff162fde945eec12 | 0 | void cpu_outb(pio_addr_t addr, uint8_t val)
{
LOG_IOPORT("outb: %04"FMT_pioaddr" %02"PRIx8"\n", addr, val);
trace_cpu_out(addr, val);
ioport_write(0, addr, val);
}
| 18,873 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.