id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
15,322 |
static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
{
ASFContext *asf = s->priv_data;
ASFStream *asf_st = 0;
ByteIOContext *pb = &s->pb;
static int pc = 0;
for (;;) {
int rsize = 0;
if (asf->packet_size_left < FRAME_HEADER_SIZE
|| asf->packet_segments < 0) {
//asf->packet_size_left <= asf->packet_padsize) {
int ret;
//printf("PACKETLEFTSIZE %d\n", asf->packet_size_left);
/* fail safe */
if (asf->packet_size_left)
url_fskip(pb, asf->packet_size_left);
if (asf->packet_padsize)
url_fskip(pb, asf->packet_padsize);
ret = asf_get_packet(s);
//printf("READ ASF PACKET %d r:%d c:%d\n", ret, asf->packet_size_left, pc++);
if (ret < 0)
return -EIO;
asf->packet_time_start = 0;
continue;
}
if (asf->packet_time_start == 0) {
int num;
if (--asf->packet_segments < 0)
continue;
/* read frame header */
num = get_byte(pb);
rsize++;
asf->packet_key_frame = (num & 0x80) >> 7;
asf->stream_index = asf->asfid2avid[num & 0x7f];
if (asf->stream_index < 0)
{
/* unhandled packet (should not happen) */
url_fskip(pb, asf->packet_frag_size);
asf->packet_size_left -= asf->packet_frag_size;
printf("ff asf skip %d %d\n", asf->packet_frag_size, num &0x7f);
continue;
}
asf->asf_st = s->streams[asf->stream_index]->priv_data;
//printf("ASFST %p %d <> %d\n", asf->asf_st, asf->stream_index, num & 0x7f);
// sequence should be ignored!
DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0);
DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0);
DO_2BITS(asf->packet_property, asf->packet_replic_size, 0);
if (asf->packet_replic_size > 1) {
// it should be always at least 8 bytes - FIXME validate
asf->packet_obj_size = get_le32(pb);
asf->packet_frag_timestamp = get_le32(pb); // timestamp
rsize += asf->packet_replic_size; // FIXME - check validity
} else {
// multipacket - frag_offset is beginig timestamp
asf->packet_time_start = asf->packet_frag_offset;
asf->packet_frag_offset = 0;
asf->packet_frag_timestamp = asf->packet_timestamp;
if (asf->packet_replic_size == 1) {
asf->packet_time_delta = get_byte(pb);
rsize++;
}
}
if (asf->packet_flags & 0x01) {
DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
//printf("Fragsize %d\n", asf->packet_frag_size);
} else {
asf->packet_frag_size = asf->packet_size_left - rsize;
//printf("Using rest %d %d %d\n", asf->packet_frag_size, asf->packet_size_left, rsize);
}
if (asf->packet_replic_size == 1)
{
asf->packet_multi_size = asf->packet_frag_size;
if (asf->packet_multi_size > asf->packet_size_left)
{
asf->packet_segments = 0;
continue;
}
}
#undef DO_2BITS
asf->packet_size_left -= rsize;
//printf("___objsize____ %d %d rs:%d\n", asf->packet_obj_size, asf->packet_frag_offset, rsize);
}
asf_st = asf->asf_st;
if ((asf->packet_frag_offset != asf_st->frag_offset
|| (asf->packet_frag_offset
&& asf->packet_seq != asf_st->seq)) // seq should be ignored
) {
/* cannot continue current packet: free it */
// FIXME better check if packet was already allocated
printf("ff asf parser skips: %d - %d o:%d - %d %d %d fl:%d\n",
asf_st->pkt.size,
asf->packet_obj_size,
asf->packet_frag_offset, asf_st->frag_offset,
asf->packet_seq, asf_st->seq, asf->packet_frag_size);
if (asf_st->pkt.size)
av_free_packet(&asf_st->pkt);
asf_st->frag_offset = 0;
if (asf->packet_frag_offset != 0) {
url_fskip(pb, asf->packet_frag_size);
printf("ff asf parser skiping %db\n", asf->packet_frag_size);
asf->packet_size_left -= asf->packet_frag_size;
continue;
}
}
if (asf->packet_replic_size == 1) {
// frag_offset is here used as the begining timestamp
asf->packet_frag_timestamp = asf->packet_time_start;
asf->packet_time_start += asf->packet_time_delta;
asf->packet_obj_size = asf->packet_frag_size = get_byte(pb);
asf->packet_size_left--;
asf->packet_multi_size--;
if (asf->packet_multi_size < asf->packet_obj_size)
{
asf->packet_time_start = 0;
url_fskip(pb, asf->packet_multi_size);
asf->packet_size_left -= asf->packet_multi_size;
continue;
}
asf->packet_multi_size -= asf->packet_obj_size;
//printf("COMPRESS size %d %d %d ms:%d\n", asf->packet_obj_size, asf->packet_frag_timestamp, asf->packet_size_left, asf->packet_multi_size);
}
if (asf_st->frag_offset == 0) {
/* new packet */
av_new_packet(&asf_st->pkt, asf->packet_obj_size);
asf_st->seq = asf->packet_seq;
asf_st->pkt.pts = asf->packet_frag_timestamp - asf->hdr.preroll;
asf_st->pkt.stream_index = asf->stream_index;
if (asf->packet_key_frame)
asf_st->pkt.flags |= PKT_FLAG_KEY;
}
/* read data */
//printf("READ PACKET s:%d os:%d o:%d,%d l:%d DATA:%p\n",
// asf->packet_size, asf_st->pkt.size, asf->packet_frag_offset,
// asf_st->frag_offset, asf->packet_frag_size, asf_st->pkt.data);
asf->packet_size_left -= asf->packet_frag_size;
if (asf->packet_size_left < 0)
continue;
get_buffer(pb, asf_st->pkt.data + asf->packet_frag_offset,
asf->packet_frag_size);
asf_st->frag_offset += asf->packet_frag_size;
/* test if whole packet is read */
if (asf_st->frag_offset == asf_st->pkt.size) {
/* return packet */
if (asf_st->ds_span > 1) {
/* packet descrambling */
char* newdata = av_malloc(asf_st->pkt.size);
if (newdata) {
int offset = 0;
while (offset < asf_st->pkt.size) {
int off = offset / asf_st->ds_chunk_size;
int row = off / asf_st->ds_span;
int col = off % asf_st->ds_span;
int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size;
//printf("off:%d row:%d col:%d idx:%d\n", off, row, col, idx);
memcpy(newdata + offset,
asf_st->pkt.data + idx * asf_st->ds_chunk_size,
asf_st->ds_chunk_size);
offset += asf_st->ds_chunk_size;
}
av_free(asf_st->pkt.data);
asf_st->pkt.data = newdata;
}
}
asf_st->frag_offset = 0;
memcpy(pkt, &asf_st->pkt, sizeof(AVPacket));
//printf("packet %d %d\n", asf_st->pkt.size, asf->packet_frag_size);
asf_st->pkt.size = 0;
asf_st->pkt.data = 0;
break; // packet completed
}
}
return 0;
}
| false |
FFmpeg
|
1359c2d43e8b13f09ad46526261fe00d7834d411
|
15,323 |
static int qdm2_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
QDM2Context *s = avctx->priv_data;
int16_t *out = data;
int i;
if(!buf)
return 0;
if(buf_size < s->checksum_size)
return -1;
av_log(avctx, AV_LOG_DEBUG, "decode(%d): %p[%d] -> %p[%d]\n",
buf_size, buf, s->checksum_size, data, *data_size);
for (i = 0; i < 16; i++) {
if (qdm2_decode(s, buf, out) < 0)
return -1;
out += s->channels * s->frame_size;
}
*data_size = (uint8_t*)out - (uint8_t*)data;
return s->checksum_size;
}
| false |
FFmpeg
|
7d49f79f1cd47783a963a757a6563b9cac29db62
|
15,324 |
static av_cold int xcbgrab_read_header(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
int screen_num, ret;
const xcb_setup_t *setup;
char *display_name = av_strdup(s->filename);
if (s->filename) {
if (!display_name)
return AVERROR(ENOMEM);
if (!sscanf(s->filename, "%[^+]+%d,%d", display_name, &c->x, &c->y)) {
*display_name = 0;
sscanf(s->filename, "+%d,%d", &c->x, &c->y);
}
}
c->conn = xcb_connect(display_name, &screen_num);
av_freep(&display_name);
if ((ret = xcb_connection_has_error(c->conn))) {
av_log(s, AV_LOG_ERROR, "Cannot open display %s, error %d.\n",
s->filename ? s->filename : "default", ret);
return AVERROR(EIO);
}
setup = xcb_get_setup(c->conn);
c->screen = get_screen(setup, screen_num);
if (!c->screen) {
av_log(s, AV_LOG_ERROR, "The screen %d does not exist.\n",
screen_num);
xcbgrab_read_close(s);
return AVERROR(EIO);
}
#if CONFIG_LIBXCB_SHM
c->segment = xcb_generate_id(c->conn);
#endif
ret = create_stream(s);
if (ret < 0) {
xcbgrab_read_close(s);
return ret;
}
#if CONFIG_LIBXCB_SHM
c->has_shm = check_shm(c->conn);
#endif
#if CONFIG_LIBXCB_XFIXES
if (c->draw_mouse) {
if (!(c->draw_mouse = check_xfixes(c->conn))) {
av_log(s, AV_LOG_WARNING,
"XFixes not available, cannot draw the mouse.\n");
}
if (c->bpp < 24) {
avpriv_report_missing_feature(s, "%d bits per pixel screen",
c->bpp);
c->draw_mouse = 0;
}
}
#endif
if (c->show_region)
setup_window(s);
return 0;
}
| false |
FFmpeg
|
62eca2f827d441f52125191fd78c96b67a7ba30c
|
15,325 |
H264_CHROMA_MC(put_ , op_put)
H264_CHROMA_MC(avg_ , op_avg)
#undef op_avg
#undef op_put
#define H264_LOWPASS(OPNAME, OP, OP2) \
static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=2;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=2;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
const int srcA= src[-1*srcStride];\
const int src0= src[0 *srcStride];\
const int src1= src[1 *srcStride];\
const int src2= src[2 *srcStride];\
const int src3= src[3 *srcStride];\
const int src4= src[4 *srcStride];\
OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
dst++;\
src++;\
}\
}\
\
static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=2;\
const int w=2;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
tmp+=tmpStride;\
src+=srcStride;\
}\
tmp -= tmpStride*(h+5-2);\
for(i=0; i<w; i++)\
{\
const int tmpB= tmp[-2*tmpStride] - pad;\
const int tmpA= tmp[-1*tmpStride] - pad;\
const int tmp0= tmp[0 *tmpStride] - pad;\
const int tmp1= tmp[1 *tmpStride] - pad;\
const int tmp2= tmp[2 *tmpStride] - pad;\
const int tmp3= tmp[3 *tmpStride] - pad;\
const int tmp4= tmp[4 *tmpStride] - pad;\
OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
dst++;\
tmp++;\
}\
}\
static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=4;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=4;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
const int srcA= src[-1*srcStride];\
const int src0= src[0 *srcStride];\
const int src1= src[1 *srcStride];\
const int src2= src[2 *srcStride];\
const int src3= src[3 *srcStride];\
const int src4= src[4 *srcStride];\
const int src5= src[5 *srcStride];\
const int src6= src[6 *srcStride];\
OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
dst++;\
src++;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=4;\
const int w=4;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
tmp+=tmpStride;\
src+=srcStride;\
}\
tmp -= tmpStride*(h+5-2);\
for(i=0; i<w; i++)\
{\
const int tmpB= tmp[-2*tmpStride] - pad;\
const int tmpA= tmp[-1*tmpStride] - pad;\
const int tmp0= tmp[0 *tmpStride] - pad;\
const int tmp1= tmp[1 *tmpStride] - pad;\
const int tmp2= tmp[2 *tmpStride] - pad;\
const int tmp3= tmp[3 *tmpStride] - pad;\
const int tmp4= tmp[4 *tmpStride] - pad;\
const int tmp5= tmp[5 *tmpStride] - pad;\
const int tmp6= tmp[6 *tmpStride] - pad;\
OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
dst++;\
tmp++;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=8;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=8;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
const int srcA= src[-1*srcStride];\
const int src0= src[0 *srcStride];\
const int src1= src[1 *srcStride];\
const int src2= src[2 *srcStride];\
const int src3= src[3 *srcStride];\
const int src4= src[4 *srcStride];\
const int src5= src[5 *srcStride];\
const int src6= src[6 *srcStride];\
const int src7= src[7 *srcStride];\
const int src8= src[8 *srcStride];\
const int src9= src[9 *srcStride];\
const int src10=src[10*srcStride];\
OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
dst++;\
src++;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, pixeltmp *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=8;\
const int w=8;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
tmp+=tmpStride;\
src+=srcStride;\
}\
tmp -= tmpStride*(h+5-2);\
for(i=0; i<w; i++)\
{\
const int tmpB= tmp[-2*tmpStride] - pad;\
const int tmpA= tmp[-1*tmpStride] - pad;\
const int tmp0= tmp[0 *tmpStride] - pad;\
const int tmp1= tmp[1 *tmpStride] - pad;\
const int tmp2= tmp[2 *tmpStride] - pad;\
const int tmp3= tmp[3 *tmpStride] - pad;\
const int tmp4= tmp[4 *tmpStride] - pad;\
const int tmp5= tmp[5 *tmpStride] - pad;\
const int tmp6= tmp[6 *tmpStride] - pad;\
const int tmp7= tmp[7 *tmpStride] - pad;\
const int tmp8= tmp[8 *tmpStride] - pad;\
const int tmp9= tmp[9 *tmpStride] - pad;\
const int tmp10=tmp[10*tmpStride] - pad;\
OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
dst++;\
tmp++;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
}\
\
static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
}\
\
static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, pixeltmp *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
}\
#define H264_MC(OPNAME, SIZE) \
static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t half[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t half[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t half[SIZE*SIZE*sizeof(pixel)];\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t half[SIZE*SIZE*sizeof(pixel)];\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
pixeltmp tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
#define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
#define op_put(a, b) a = CLIP(((b) + 16)>>5)
#define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
#define op2_put(a, b) a = CLIP(((b) + 512)>>10)
H264_LOWPASS(put_ , op_put, op2_put)
H264_LOWPASS(avg_ , op_avg, op2_avg)
H264_MC(put_, 2)
H264_MC(put_, 4)
H264_MC(put_, 8)
H264_MC(put_, 16)
H264_MC(avg_, 4)
H264_MC(avg_, 8)
H264_MC(avg_, 16)
#undef op_avg
#undef op_put
#undef op2_avg
#undef op2_put
#if BIT_DEPTH == 8
# define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
# define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
# define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
# define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
#elif BIT_DEPTH == 9
# define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
# define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
# define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
# define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
#elif BIT_DEPTH == 10
# define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
# define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
# define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
# define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
#elif BIT_DEPTH == 12
# define put_h264_qpel8_mc00_12_c ff_put_pixels8x8_12_c
# define avg_h264_qpel8_mc00_12_c ff_avg_pixels8x8_12_c
# define put_h264_qpel16_mc00_12_c ff_put_pixels16x16_12_c
# define avg_h264_qpel16_mc00_12_c ff_avg_pixels16x16_12_c
#elif BIT_DEPTH == 14
# define put_h264_qpel8_mc00_14_c ff_put_pixels8x8_14_c
# define avg_h264_qpel8_mc00_14_c ff_avg_pixels8x8_14_c
# define put_h264_qpel16_mc00_14_c ff_put_pixels16x16_14_c
# define avg_h264_qpel16_mc00_14_c ff_avg_pixels16x16_14_c
#endif
void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
FUNCC(put_pixels8)(dst, src, stride, 8);
}
| false |
FFmpeg
|
6ff6a51b863733f2a3a737cdbfe492fa9cefe16b
|
15,326 |
yuv2yuvX16_c_template(const int16_t *lumFilter, const int16_t **lumSrc,
int lumFilterSize, const int16_t *chrFilter,
const int16_t **chrUSrc, const int16_t **chrVSrc,
int chrFilterSize, const int16_t **alpSrc,
uint16_t *dest, uint16_t *uDest, uint16_t *vDest,
uint16_t *aDest, int dstW, int chrDstW,
int big_endian, int output_bits)
{
//FIXME Optimize (just quickly written not optimized..)
int i;
int shift = 11 + 16 - output_bits;
#define output_pixel(pos, val) \
if (big_endian) { \
if (output_bits == 16) { \
AV_WB16(pos, av_clip_uint16(val >> shift)); \
} else { \
AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
} \
} else { \
if (output_bits == 16) { \
AV_WL16(pos, av_clip_uint16(val >> shift)); \
} else { \
AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
} \
}
for (i = 0; i < dstW; i++) {
int val = 1 << (26-output_bits);
int j;
for (j = 0; j < lumFilterSize; j++)
val += lumSrc[j][i] * lumFilter[j];
output_pixel(&dest[i], val);
}
if (uDest) {
for (i = 0; i < chrDstW; i++) {
int u = 1 << (26-output_bits);
int v = 1 << (26-output_bits);
int j;
for (j = 0; j < chrFilterSize; j++) {
u += chrUSrc[j][i] * chrFilter[j];
v += chrVSrc[j][i] * chrFilter[j];
}
output_pixel(&uDest[i], u);
output_pixel(&vDest[i], v);
}
}
if (CONFIG_SWSCALE_ALPHA && aDest) {
for (i = 0; i < dstW; i++) {
int val = 1 << (26-output_bits);
int j;
for (j = 0; j < lumFilterSize; j++)
val += alpSrc[j][i] * lumFilter[j];
output_pixel(&aDest[i], val);
}
}
#undef output_pixel
}
| false |
FFmpeg
|
13a099799e89a76eb921ca452e1b04a7a28a9855
|
15,327 |
void ff_h264_pred_direct_motion(H264Context * const h, int *mb_type){
MpegEncContext * const s = &h->s;
int b8_stride = h->b8_stride;
int b4_stride = h->b_stride;
int mb_xy = h->mb_xy;
int mb_type_col[2];
const int16_t (*l1mv0)[2], (*l1mv1)[2];
const int8_t *l1ref0, *l1ref1;
const int is_b8x8 = IS_8X8(*mb_type);
unsigned int sub_mb_type;
int i8, i4;
assert(h->ref_list[1][0].reference&3);
#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
b8_stride = 0;
}else{
mb_xy += h->col_fieldoff; // non zero for FL -> FL & differ parity
}
goto single_col;
}else{ // AFL/AFR/FR/FL -> AFR/FR
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
b8_stride *= 3;
b4_stride *= 6;
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
&& (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
&& !is_b8x8){
*mb_type |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */
}else{
*mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
}
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
if(IS_8X8(mb_type_col[0]) && !h->sps.direct_8x8_inference_flag){
/* FIXME save sub mb types from previous frames (or derive from MVs)
* so we know exactly what block size to use */
sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
*mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
}else if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
*mb_type |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
}else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
*mb_type |= MB_TYPE_L0L1|MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
}else{
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
*mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
}
}
}
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].ref_index [0][h->mb2b8_xy[mb_xy]];
l1ref1 = &h->ref_list[1][0].ref_index [1][h->mb2b8_xy[mb_xy]];
if(!b8_stride){
if(s->mb_y&1){
l1ref0 += h->b8_stride;
l1ref1 += h->b8_stride;
l1mv0 += 2*b4_stride;
l1mv1 += 2*b4_stride;
}
}
if(h->direct_spatial_mv_pred){
int ref[2];
int mv[2][2];
int list;
/* FIXME interlacing + spatial direct uses wrong colocated block positions */
/* ref = min(neighbors) */
for(list=0; list<2; list++){
int refa = h->ref_cache[list][scan8[0] - 1];
int refb = h->ref_cache[list][scan8[0] - 8];
int refc = h->ref_cache[list][scan8[0] - 8 + 4];
if(refc == PART_NOT_AVAILABLE)
refc = h->ref_cache[list][scan8[0] - 8 - 1];
ref[list] = FFMIN3((unsigned)refa, (unsigned)refb, (unsigned)refc);
if(ref[list] >= 0){
pred_motion(h, 0, 4, list, ref[list], &mv[list][0], &mv[list][1]);
}else{
int mask= ~(MB_TYPE_L0 << (2*list));
mv[list][0] = mv[list][1] = 0;
ref[list] = -1;
if(!is_b8x8)
*mb_type &= mask;
sub_mb_type &= mask;
}
}
if(ref[0] < 0 && ref[1] < 0){
ref[0] = ref[1] = 0;
if(!is_b8x8)
*mb_type |= MB_TYPE_L0L1;
sub_mb_type |= MB_TYPE_L0L1;
}
if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
for(i8=0; i8<4; i8++){
int x8 = i8&1;
int y8 = i8>>1;
int xy8 = x8+y8*b8_stride;
int xy4 = 3*x8+y8*b4_stride;
int a,b;
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
continue;
h->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
if(!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref
&& ( (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1)
|| (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){
a=b=0;
if(ref[0] > 0)
a= pack16to32(mv[0][0],mv[0][1]);
if(ref[1] > 0)
b= pack16to32(mv[1][0],mv[1][1]);
}else{
a= pack16to32(mv[0][0],mv[0][1]);
b= pack16to32(mv[1][0],mv[1][1]);
}
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4);
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4);
}
}else if(IS_16X16(*mb_type)){
int a,b;
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref
&& ( (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
|| (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
&& (h->x264_build>33 || !h->x264_build)))){
a=b=0;
if(ref[0] > 0)
a= pack16to32(mv[0][0],mv[0][1]);
if(ref[1] > 0)
b= pack16to32(mv[1][0],mv[1][1]);
}else{
a= pack16to32(mv[0][0],mv[0][1]);
b= pack16to32(mv[1][0],mv[1][1]);
}
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
}else{
for(i8=0; i8<4; i8++){
const int x8 = i8&1;
const int y8 = i8>>1;
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
continue;
h->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mv[0][0],mv[0][1]), 4);
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mv[1][0],mv[1][1]), 4);
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
/* col_zero_flag */
if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref && ( l1ref0[x8 + y8*b8_stride] == 0
|| (l1ref0[x8 + y8*b8_stride] < 0 && l1ref1[x8 + y8*b8_stride] == 0
&& (h->x264_build>33 || !h->x264_build)))){
const int16_t (*l1mv)[2]= l1ref0[x8 + y8*b8_stride] == 0 ? l1mv0 : l1mv1;
if(IS_SUB_8X8(sub_mb_type)){
const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
if(ref[0] == 0)
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
if(ref[1] == 0)
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
}
}else
for(i4=0; i4<4; i4++){
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
if(ref[0] == 0)
*(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0;
if(ref[1] == 0)
*(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0;
}
}
}
}
}
}else{ /* direct temporal mv pred */
const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
const int *dist_scale_factor = h->dist_scale_factor;
int ref_offset= 0;
if(FRAME_MBAFF && IS_INTERLACED(*mb_type)){
map_col_to_list0[0] = h->map_col_to_list0_field[s->mb_y&1][0];
map_col_to_list0[1] = h->map_col_to_list0_field[s->mb_y&1][1];
dist_scale_factor =h->dist_scale_factor_field[s->mb_y&1];
}
if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0]))
ref_offset += 16;
if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
int y_shift = 2*!IS_INTERLACED(*mb_type);
assert(h->sps.direct_8x8_inference_flag);
for(i8=0; i8<4; i8++){
const int x8 = i8&1;
const int y8 = i8>>1;
int ref0, scale;
const int16_t (*l1mv)[2]= l1mv0;
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
continue;
h->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
if(IS_INTRA(mb_type_col[y8])){
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
continue;
}
ref0 = l1ref0[x8 + y8*b8_stride];
if(ref0 >= 0)
ref0 = map_col_to_list0[0][ref0 + ref_offset];
else{
ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset];
l1mv= l1mv1;
}
scale = dist_scale_factor[ref0];
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
{
const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride];
int my_col = (mv_col[1]<<y_shift)/2;
int mx = (scale * mv_col[0] + 128) >> 8;
int my = (scale * my_col + 128) >> 8;
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
}
}
return;
}
/* one-to-one mv scaling */
if(IS_16X16(*mb_type)){
int ref, mv0, mv1;
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
if(IS_INTRA(mb_type_col[0])){
ref=mv0=mv1=0;
}else{
const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
: map_col_to_list0[1][l1ref1[0] + ref_offset];
const int scale = dist_scale_factor[ref0];
const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
int mv_l0[2];
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
ref= ref0;
mv0= pack16to32(mv_l0[0],mv_l0[1]);
mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
}
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
}else{
for(i8=0; i8<4; i8++){
const int x8 = i8&1;
const int y8 = i8>>1;
int ref0, scale;
const int16_t (*l1mv)[2]= l1mv0;
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
continue;
h->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
if(IS_INTRA(mb_type_col[0])){
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
continue;
}
ref0 = l1ref0[x8 + y8*b8_stride];
if(ref0 >= 0)
ref0 = map_col_to_list0[0][ref0 + ref_offset];
else{
ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset];
l1mv= l1mv1;
}
scale = dist_scale_factor[ref0];
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
if(IS_SUB_8X8(sub_mb_type)){
const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
int mx = (scale * mv_col[0] + 128) >> 8;
int my = (scale * mv_col[1] + 128) >> 8;
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
}else
for(i4=0; i4<4; i4++){
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
*(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] =
pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
}
}
}
}
}
| false |
FFmpeg
|
055a6aa76a84ced5bebf988ef67e22ac3c5763c5
|
15,328 |
static void avc_luma_mid_and_aver_dst_4x4_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst, int32_t dst_stride)
{
v16i8 src0, src1, src2, src3, src4;
v16i8 mask0, mask1, mask2;
v8i16 hz_out0, hz_out1, hz_out2, hz_out3;
v8i16 hz_out4, hz_out5, hz_out6, hz_out7, hz_out8;
v8i16 res0, res1, res2, res3;
v16u8 dst0, dst1, dst2, dst3;
v16u8 tmp0, tmp1, tmp2, tmp3;
LD_SB3(&luma_mask_arr[48], 16, mask0, mask1, mask2);
LD_SB5(src, src_stride, src0, src1, src2, src3, src4);
src += (5 * src_stride);
XORI_B5_128_SB(src0, src1, src2, src3, src4);
hz_out0 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src0, src1,
mask0, mask1, mask2);
hz_out2 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src2, src3,
mask0, mask1, mask2);
PCKOD_D2_SH(hz_out0, hz_out0, hz_out2, hz_out2, hz_out1, hz_out3);
hz_out4 = AVC_HORZ_FILTER_SH(src4, src4, mask0, mask1, mask2);
LD_SB4(src, src_stride, src0, src1, src2, src3);
XORI_B4_128_SB(src0, src1, src2, src3);
hz_out5 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src0, src1,
mask0, mask1, mask2);
hz_out7 = AVC_XOR_VSHF_B_AND_APPLY_6TAP_HORIZ_FILT_SH(src2, src3,
mask0, mask1, mask2);
PCKOD_D2_SH(hz_out5, hz_out5, hz_out7, hz_out7, hz_out6, hz_out8);
res0 = AVC_CALC_DPADD_H_6PIX_2COEFF_R_SH(hz_out0, hz_out1, hz_out2,
hz_out3, hz_out4, hz_out5);
res1 = AVC_CALC_DPADD_H_6PIX_2COEFF_R_SH(hz_out1, hz_out2, hz_out3,
hz_out4, hz_out5, hz_out6);
res2 = AVC_CALC_DPADD_H_6PIX_2COEFF_R_SH(hz_out2, hz_out3, hz_out4,
hz_out5, hz_out6, hz_out7);
res3 = AVC_CALC_DPADD_H_6PIX_2COEFF_R_SH(hz_out3, hz_out4, hz_out5,
hz_out6, hz_out7, hz_out8);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
tmp0 = PCKEV_XORI128_UB(res0, res1);
tmp1 = PCKEV_XORI128_UB(res2, res3);
PCKEV_D2_UB(dst1, dst0, dst3, dst2, tmp2, tmp3);
AVER_UB2_UB(tmp0, tmp2, tmp1, tmp3, tmp0, tmp1);
ST4x4_UB(tmp0, tmp1, 0, 2, 0, 2, dst, dst_stride);
}
| false |
FFmpeg
|
1181d93231e9b807965724587d363c1cfd5a1d0d
|
15,329 |
static av_always_inline void dnxhd_decode_dct_block(DNXHDContext *ctx,
DCTELEM *block, int n,
int qscale,
int index_bits,
int level_bias,
int level_shift)
{
int i, j, index1, index2, len;
int level, component, sign;
const uint8_t *weight_matrix;
OPEN_READER(bs, &ctx->gb);
if (n&2) {
component = 1 + (n&1);
weight_matrix = ctx->cid_table->chroma_weight;
} else {
component = 0;
weight_matrix = ctx->cid_table->luma_weight;
}
UPDATE_CACHE(bs, &ctx->gb);
GET_VLC(len, bs, &ctx->gb, ctx->dc_vlc.table, DNXHD_DC_VLC_BITS, 1);
if (len) {
level = GET_CACHE(bs, &ctx->gb);
LAST_SKIP_BITS(bs, &ctx->gb, len);
sign = ~level >> 31;
level = (NEG_USR32(sign ^ level, len) ^ sign) - sign;
ctx->last_dc[component] += level;
}
block[0] = ctx->last_dc[component];
//av_log(ctx->avctx, AV_LOG_DEBUG, "dc %d\n", block[0]);
for (i = 1; ; i++) {
UPDATE_CACHE(bs, &ctx->gb);
GET_VLC(index1, bs, &ctx->gb, ctx->ac_vlc.table,
DNXHD_VLC_BITS, 2);
//av_log(ctx->avctx, AV_LOG_DEBUG, "index %d\n", index1);
level = ctx->cid_table->ac_level[index1];
if (!level) { /* EOB */
//av_log(ctx->avctx, AV_LOG_DEBUG, "EOB\n");
break;
}
sign = SHOW_SBITS(bs, &ctx->gb, 1);
SKIP_BITS(bs, &ctx->gb, 1);
if (ctx->cid_table->ac_index_flag[index1]) {
level += SHOW_UBITS(bs, &ctx->gb, index_bits) << 6;
SKIP_BITS(bs, &ctx->gb, index_bits);
}
if (ctx->cid_table->ac_run_flag[index1]) {
UPDATE_CACHE(bs, &ctx->gb);
GET_VLC(index2, bs, &ctx->gb, ctx->run_vlc.table,
DNXHD_VLC_BITS, 2);
i += ctx->cid_table->run[index2];
}
if (i > 63) {
av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i);
break;
}
j = ctx->scantable.permutated[i];
//av_log(ctx->avctx, AV_LOG_DEBUG, "j %d\n", j);
//av_log(ctx->avctx, AV_LOG_DEBUG, "level %d, weight %d\n", level, weight_matrix[i]);
level = (2*level+1) * qscale * weight_matrix[i];
if (level_bias < 32 || weight_matrix[i] != level_bias)
level += level_bias;
level >>= level_shift;
//av_log(NULL, AV_LOG_DEBUG, "i %d, j %d, end level %d\n", i, j, level);
block[j] = (level^sign) - sign;
}
CLOSE_READER(bs, &ctx->gb);
}
| false |
FFmpeg
|
b297c881d6b968bbb2bb7a3a0979429ee03b594c
|
15,331 |
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
{
VideoState *is;
is = av_mallocz(sizeof(VideoState));
if (!is)
return NULL;
av_strlcpy(is->filename, filename, sizeof(is->filename));
is->iformat = iformat;
is->ytop = 0;
is->xleft = 0;
/* start video display */
is->pictq_mutex = SDL_CreateMutex();
is->pictq_cond = SDL_CreateCond();
is->subpq_mutex = SDL_CreateMutex();
is->subpq_cond = SDL_CreateCond();
packet_queue_init(&is->videoq);
packet_queue_init(&is->audioq);
packet_queue_init(&is->subtitleq);
is->continue_read_thread = SDL_CreateCond();
//FIXME: use a cleaner way to signal obsolete external clock...
update_external_clock_pts(is, (double)AV_NOPTS_VALUE);
update_external_clock_speed(is, 1.0);
is->audio_current_pts_drift = -av_gettime() / 1000000.0;
is->video_current_pts_drift = is->audio_current_pts_drift;
is->audio_clock_serial = -1;
is->video_clock_serial = -1;
is->av_sync_type = av_sync_type;
is->read_tid = SDL_CreateThread(read_thread, is);
if (!is->read_tid) {
av_free(is);
return NULL;
}
return is;
}
| false |
FFmpeg
|
daece4c6745b42e8b1e171fb4bf485d5d64fc53f
|
15,332 |
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
{
int i;
PutBitContext *pbc = &s->pb;
int mb_y = s->mb_y - !s->mb_x;
int ret;
MJpegContext *m;
m = s->mjpeg_ctx;
if (s->huffman == HUFFMAN_TABLE_OPTIMAL) {
ff_mjpeg_build_optimal_huffman(m);
// Replace the VLCs with the optimal ones.
// The default ones may be used for trellis during quantization.
ff_init_uni_ac_vlc(m->huff_size_ac_luminance, m->uni_ac_vlc_len);
ff_init_uni_ac_vlc(m->huff_size_ac_chrominance, m->uni_chroma_ac_vlc_len);
s->intra_ac_vlc_length =
s->intra_ac_vlc_last_length = m->uni_ac_vlc_len;
s->intra_chroma_ac_vlc_length =
s->intra_chroma_ac_vlc_last_length = m->uni_chroma_ac_vlc_len;
}
ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
s->pred, s->intra_matrix, s->chroma_intra_matrix);
ff_mjpeg_encode_picture_frame(s);
ret = ff_mpv_reallocate_putbitbuffer(s, put_bits_count(&s->pb) / 8 + 100,
put_bits_count(&s->pb) / 4 + 1000);
if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Buffer reallocation failed\n");
goto fail;
}
ff_mjpeg_escape_FF(pbc, s->esc_pos);
if((s->avctx->active_thread_type & FF_THREAD_SLICE) && mb_y < s->mb_height)
put_marker(pbc, RST0 + (mb_y&7));
s->esc_pos = put_bits_count(pbc) >> 3;
fail:
for(i=0; i<3; i++)
s->last_dc[i] = 128 << s->intra_dc_precision;
return ret;
}
| false |
FFmpeg
|
3e1507a9547ac09b6ff4372123cde09f19218f3d
|
15,333 |
void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
{
void (*start_frame)(AVFilterLink *, AVFilterBufferRef *);
AVFilterPad *dst = link->dstpad;
FF_DPRINTF_START(NULL, start_frame); ff_dprintf_link(NULL, link, 0); dprintf(NULL, " "); ff_dprintf_ref(NULL, picref, 1);
if (!(start_frame = dst->start_frame))
start_frame = avfilter_default_start_frame;
/* prepare to copy the picture if it has insufficient permissions */
if ((dst->min_perms & picref->perms) != dst->min_perms ||
dst->rej_perms & picref->perms) {
av_log(link->dst, AV_LOG_DEBUG,
"frame copy needed (have perms %x, need %x, reject %x)\n",
picref->perms,
link->dstpad->min_perms, link->dstpad->rej_perms);
link->cur_buf = avfilter_get_video_buffer(link, dst->min_perms, link->w, link->h);
link->src_buf = picref;
avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf);
}
else
link->cur_buf = picref;
start_frame(link, link->cur_buf);
}
| false |
FFmpeg
|
0ccabeeaef77e240f2a44f78271a8914a23e239b
|
15,334 |
static void decorrelation(PSContext *ps, float (*out)[32][2], const float (*s)[32][2], int is34)
{
float power[34][PS_QMF_TIME_SLOTS] = {{0}};
float transient_gain[34][PS_QMF_TIME_SLOTS];
float *peak_decay_nrg = ps->peak_decay_nrg;
float *power_smooth = ps->power_smooth;
float *peak_decay_diff_smooth = ps->peak_decay_diff_smooth;
float (*delay)[PS_QMF_TIME_SLOTS + PS_MAX_DELAY][2] = ps->delay;
float (*ap_delay)[PS_AP_LINKS][PS_QMF_TIME_SLOTS + PS_MAX_AP_DELAY][2] = ps->ap_delay;
const int8_t *k_to_i = is34 ? k_to_i_34 : k_to_i_20;
const float peak_decay_factor = 0.76592833836465f;
const float transient_impact = 1.5f;
const float a_smooth = 0.25f; ///< Smoothing coefficient
int i, k, m, n;
int n0 = 0, nL = 32;
static const int link_delay[] = { 3, 4, 5 };
static const float a[] = { 0.65143905753106f,
0.56471812200776f,
0.48954165955695f };
if (is34 != ps->is34bands_old) {
memset(ps->peak_decay_nrg, 0, sizeof(ps->peak_decay_nrg));
memset(ps->power_smooth, 0, sizeof(ps->power_smooth));
memset(ps->peak_decay_diff_smooth, 0, sizeof(ps->peak_decay_diff_smooth));
memset(ps->delay, 0, sizeof(ps->delay));
memset(ps->ap_delay, 0, sizeof(ps->ap_delay));
}
for (n = n0; n < nL; n++) {
for (k = 0; k < NR_BANDS[is34]; k++) {
int i = k_to_i[k];
power[i][n] += s[k][n][0] * s[k][n][0] + s[k][n][1] * s[k][n][1];
}
}
//Transient detection
for (i = 0; i < NR_PAR_BANDS[is34]; i++) {
for (n = n0; n < nL; n++) {
float decayed_peak = peak_decay_factor * peak_decay_nrg[i];
float denom;
peak_decay_nrg[i] = FFMAX(decayed_peak, power[i][n]);
power_smooth[i] += a_smooth * (power[i][n] - power_smooth[i]);
peak_decay_diff_smooth[i] += a_smooth * (peak_decay_nrg[i] - power[i][n] - peak_decay_diff_smooth[i]);
denom = transient_impact * peak_decay_diff_smooth[i];
transient_gain[i][n] = (denom > power_smooth[i]) ?
power_smooth[i] / denom : 1.0f;
}
}
//Decorrelation and transient reduction
// PS_AP_LINKS - 1
// -----
// | | Q_fract_allpass[k][m]*z^-link_delay[m] - a[m]*g_decay_slope[k]
//H[k][z] = z^-2 * phi_fract[k] * | | ----------------------------------------------------------------
// | | 1 - a[m]*g_decay_slope[k]*Q_fract_allpass[k][m]*z^-link_delay[m]
// m = 0
//d[k][z] (out) = transient_gain_mapped[k][z] * H[k][z] * s[k][z]
for (k = 0; k < NR_ALLPASS_BANDS[is34]; k++) {
int b = k_to_i[k];
float g_decay_slope = 1.f - DECAY_SLOPE * (k - DECAY_CUTOFF[is34]);
float ag[PS_AP_LINKS];
g_decay_slope = av_clipf(g_decay_slope, 0.f, 1.f);
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
for (m = 0; m < PS_AP_LINKS; m++) {
memcpy(ap_delay[k][m], ap_delay[k][m]+numQMFSlots, 5*sizeof(ap_delay[k][m][0]));
ag[m] = a[m] * g_decay_slope;
}
for (n = n0; n < nL; n++) {
float in_re = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][0] -
delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][1];
float in_im = delay[k][n+PS_MAX_DELAY-2][0] * phi_fract[is34][k][1] +
delay[k][n+PS_MAX_DELAY-2][1] * phi_fract[is34][k][0];
for (m = 0; m < PS_AP_LINKS; m++) {
float a_re = ag[m] * in_re;
float a_im = ag[m] * in_im;
float link_delay_re = ap_delay[k][m][n+5-link_delay[m]][0];
float link_delay_im = ap_delay[k][m][n+5-link_delay[m]][1];
float fractional_delay_re = Q_fract_allpass[is34][k][m][0];
float fractional_delay_im = Q_fract_allpass[is34][k][m][1];
ap_delay[k][m][n+5][0] = in_re;
ap_delay[k][m][n+5][1] = in_im;
in_re = link_delay_re * fractional_delay_re - link_delay_im * fractional_delay_im - a_re;
in_im = link_delay_re * fractional_delay_im + link_delay_im * fractional_delay_re - a_im;
ap_delay[k][m][n+5][0] += ag[m] * in_re;
ap_delay[k][m][n+5][1] += ag[m] * in_im;
}
out[k][n][0] = transient_gain[b][n] * in_re;
out[k][n][1] = transient_gain[b][n] * in_im;
}
}
for (; k < SHORT_DELAY_BAND[is34]; k++) {
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
for (n = n0; n < nL; n++) {
//H = delay 14
out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][0];
out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-14][1];
}
}
for (; k < NR_BANDS[is34]; k++) {
memcpy(delay[k], delay[k]+nL, PS_MAX_DELAY*sizeof(delay[k][0]));
memcpy(delay[k]+PS_MAX_DELAY, s[k], numQMFSlots*sizeof(delay[k][0]));
for (n = n0; n < nL; n++) {
//H = delay 1
out[k][n][0] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][0];
out[k][n][1] = transient_gain[k_to_i[k]][n] * delay[k][n+PS_MAX_DELAY-1][1];
}
}
}
| false |
FFmpeg
|
bf1945af301aff54c33352e75f17aec6cb5269d7
|
15,335 |
rdt_new_context (void)
{
PayloadContext *rdt = av_mallocz(sizeof(PayloadContext));
int ret = avformat_open_input(&rdt->rmctx, "", &ff_rdt_demuxer, NULL);
if (ret < 0) {
av_free(rdt);
return NULL;
}
return rdt;
}
| false |
FFmpeg
|
8692e6284f5169257a537c8fc25addf32fc67c87
|
15,336 |
void bdrv_img_create(const char *filename, const char *fmt,
const char *base_filename, const char *base_fmt,
char *options, uint64_t img_size, int flags, bool quiet,
Error **errp)
{
QemuOptsList *create_opts = NULL;
QemuOpts *opts = NULL;
const char *backing_fmt, *backing_file;
int64_t size;
BlockDriver *drv, *proto_drv;
Error *local_err = NULL;
int ret = 0;
/* Find driver and parse its options */
drv = bdrv_find_format(fmt);
if (!drv) {
error_setg(errp, "Unknown file format '%s'", fmt);
return;
}
proto_drv = bdrv_find_protocol(filename, true, errp);
if (!proto_drv) {
return;
}
if (!drv->create_opts) {
error_setg(errp, "Format driver '%s' does not support image creation",
drv->format_name);
return;
}
if (!proto_drv->create_opts) {
error_setg(errp, "Protocol driver '%s' does not support image creation",
proto_drv->format_name);
return;
}
create_opts = qemu_opts_append(create_opts, drv->create_opts);
create_opts = qemu_opts_append(create_opts, proto_drv->create_opts);
/* Create parameter list with default values */
opts = qemu_opts_create(create_opts, NULL, 0, &error_abort);
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, img_size, &error_abort);
/* Parse -o options */
if (options) {
qemu_opts_do_parse(opts, options, NULL, &local_err);
if (local_err) {
error_report_err(local_err);
local_err = NULL;
error_setg(errp, "Invalid options for file format '%s'", fmt);
goto out;
}
}
if (base_filename) {
qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, base_filename, &local_err);
if (local_err) {
error_setg(errp, "Backing file not supported for file format '%s'",
fmt);
goto out;
}
}
if (base_fmt) {
qemu_opt_set(opts, BLOCK_OPT_BACKING_FMT, base_fmt, &local_err);
if (local_err) {
error_setg(errp, "Backing file format not supported for file "
"format '%s'", fmt);
goto out;
}
}
backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
if (backing_file) {
if (!strcmp(filename, backing_file)) {
error_setg(errp, "Error: Trying to create an image with the "
"same filename as the backing file");
goto out;
}
}
backing_fmt = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
/* The size for the image must always be specified, unless we have a backing
* file and we have not been forbidden from opening it. */
size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
if (backing_file && !(flags & BDRV_O_NO_BACKING)) {
BlockDriverState *bs;
char *full_backing = g_new0(char, PATH_MAX);
int back_flags;
QDict *backing_options = NULL;
bdrv_get_full_backing_filename_from_filename(filename, backing_file,
full_backing, PATH_MAX,
&local_err);
if (local_err) {
g_free(full_backing);
goto out;
}
/* backing files always opened read-only */
back_flags = flags;
back_flags &= ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
if (backing_fmt) {
backing_options = qdict_new();
qdict_put_str(backing_options, "driver", backing_fmt);
}
bs = bdrv_open(full_backing, NULL, backing_options, back_flags,
&local_err);
g_free(full_backing);
if (!bs && size != -1) {
/* Couldn't open BS, but we have a size, so it's nonfatal */
warn_reportf_err(local_err,
"Could not verify backing image. "
"This may become an error in future versions.\n");
local_err = NULL;
} else if (!bs) {
/* Couldn't open bs, do not have size */
error_append_hint(&local_err,
"Could not open backing image to determine size.\n");
goto out;
} else {
if (size == -1) {
/* Opened BS, have no size */
size = bdrv_getlength(bs);
if (size < 0) {
error_setg_errno(errp, -size, "Could not get size of '%s'",
backing_file);
bdrv_unref(bs);
goto out;
}
qemu_opt_set_number(opts, BLOCK_OPT_SIZE, size, &error_abort);
}
bdrv_unref(bs);
}
} /* (backing_file && !(flags & BDRV_O_NO_BACKING)) */
if (size == -1) {
error_setg(errp, "Image creation needs a size parameter");
goto out;
}
if (!quiet) {
printf("Formatting '%s', fmt=%s ", filename, fmt);
qemu_opts_print(opts, " ");
puts("");
}
ret = bdrv_create(drv, filename, opts, &local_err);
if (ret == -EFBIG) {
/* This is generally a better message than whatever the driver would
* deliver (especially because of the cluster_size_hint), since that
* is most probably not much different from "image too large". */
const char *cluster_size_hint = "";
if (qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 0)) {
cluster_size_hint = " (try using a larger cluster size)";
}
error_setg(errp, "The image size is too large for file format '%s'"
"%s", fmt, cluster_size_hint);
error_free(local_err);
local_err = NULL;
}
out:
qemu_opts_del(opts);
qemu_opts_free(create_opts);
error_propagate(errp, local_err);
}
| false |
qemu
|
a8b42a1c09e751b9f921a1a73756411fc118020b
|
15,338 |
static void virtio_ccw_post_plugged(DeviceState *d, Error **errp)
{
VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
if (!virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1)) {
/* A backend didn't support modern virtio. */
dev->max_rev = 0;
}
}
| false |
qemu
|
d1b4259f1ab18af24e6a297edb6a8f71691f3256
|
15,339 |
static void sun4m_load_kernel(long vram_size, int ram_size, int boot_device,
const char *kernel_filename,
const char *kernel_cmdline,
const char *initrd_filename,
int machine_id)
{
int ret, linux_boot;
char buf[1024];
unsigned int i;
long prom_offset, initrd_size, kernel_size;
linux_boot = (kernel_filename != NULL);
prom_offset = ram_size + vram_size;
cpu_register_physical_memory(PROM_ADDR,
(PROM_SIZE_MAX + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK,
prom_offset | IO_MEM_ROM);
snprintf(buf, sizeof(buf), "%s/%s", bios_dir, PROM_FILENAME);
ret = load_elf(buf, 0, NULL, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "qemu: could not load prom '%s'\n",
buf);
exit(1);
}
kernel_size = 0;
if (linux_boot) {
kernel_size = load_elf(kernel_filename, -0xf0000000, NULL, NULL, NULL);
if (kernel_size < 0)
kernel_size = load_aout(kernel_filename, phys_ram_base + KERNEL_LOAD_ADDR);
if (kernel_size < 0)
kernel_size = load_image(kernel_filename, phys_ram_base + KERNEL_LOAD_ADDR);
if (kernel_size < 0) {
fprintf(stderr, "qemu: could not load kernel '%s'\n",
kernel_filename);
exit(1);
}
/* load initrd */
initrd_size = 0;
if (initrd_filename) {
initrd_size = load_image(initrd_filename, phys_ram_base + INITRD_LOAD_ADDR);
if (initrd_size < 0) {
fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
initrd_filename);
exit(1);
}
}
if (initrd_size > 0) {
for (i = 0; i < 64 * TARGET_PAGE_SIZE; i += TARGET_PAGE_SIZE) {
if (ldl_raw(phys_ram_base + KERNEL_LOAD_ADDR + i)
== 0x48647253) { // HdrS
stl_raw(phys_ram_base + KERNEL_LOAD_ADDR + i + 16, INITRD_LOAD_ADDR);
stl_raw(phys_ram_base + KERNEL_LOAD_ADDR + i + 20, initrd_size);
break;
}
}
}
}
nvram_init(nvram, (uint8_t *)&nd_table[0].macaddr, kernel_cmdline,
boot_device, ram_size, kernel_size, graphic_width,
graphic_height, graphic_depth, machine_id);
}
| false |
qemu
|
b3ceef24f4fee8d5ed96b8c4a5d3e80c0a651f0b
|
15,340 |
static void gen_srq(DisasContext *ctx)
{
int l1 = gen_new_label();
TCGv t0 = tcg_temp_new();
TCGv t1 = tcg_temp_new();
tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
tcg_gen_subfi_tl(t1, 32, t1);
tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
tcg_gen_or_tl(t1, t0, t1);
gen_store_spr(SPR_MQ, t1);
tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
gen_set_label(l1);
tcg_temp_free(t0);
tcg_temp_free(t1);
if (unlikely(Rc(ctx->opcode) != 0))
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
| false |
qemu
|
42a268c241183877192c376d03bd9b6d527407c7
|
15,342 |
static void local_mapped_file_attr(int dirfd, const char *name,
struct stat *stbuf)
{
FILE *fp;
char buf[ATTR_MAX];
int map_dirfd;
map_dirfd = openat_dir(dirfd, VIRTFS_META_DIR);
if (map_dirfd == -1) {
return;
}
fp = local_fopenat(map_dirfd, name, "r");
close_preserve_errno(map_dirfd);
if (!fp) {
return;
}
memset(buf, 0, ATTR_MAX);
while (fgets(buf, ATTR_MAX, fp)) {
if (!strncmp(buf, "virtfs.uid", 10)) {
stbuf->st_uid = atoi(buf+11);
} else if (!strncmp(buf, "virtfs.gid", 10)) {
stbuf->st_gid = atoi(buf+11);
} else if (!strncmp(buf, "virtfs.mode", 11)) {
stbuf->st_mode = atoi(buf+12);
} else if (!strncmp(buf, "virtfs.rdev", 11)) {
stbuf->st_rdev = atoi(buf+12);
}
memset(buf, 0, ATTR_MAX);
}
fclose(fp);
}
| false |
qemu
|
81ffbf5ab1458e357a761f1272105a55829b351e
|
15,343 |
static int save_user_regs(CPUPPCState *env, struct target_mcontext *frame,
int sigret)
{
target_ulong msr = env->msr;
int i;
target_ulong ccr = 0;
/* In general, the kernel attempts to be intelligent about what it
needs to save for Altivec/FP/SPE registers. We don't care that
much, so we just go ahead and save everything. */
/* Save general registers. */
for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
if (__put_user(env->gpr[i], &frame->mc_gregs[i])) {
return 1;
}
}
if (__put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP])
|| __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR])
|| __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK])
|| __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]))
return 1;
for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
ccr |= env->crf[i] << (32 - ((i + 1) * 4));
}
if (__put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]))
return 1;
/* Save Altivec registers if necessary. */
if (env->insns_flags & PPC_ALTIVEC) {
for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
ppc_avr_t *avr = &env->avr[i];
ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
if (__put_user(avr->u64[0], &vreg->u64[0]) ||
__put_user(avr->u64[1], &vreg->u64[1])) {
return 1;
}
}
/* Set MSR_VR in the saved MSR value to indicate that
frame->mc_vregs contains valid data. */
msr |= MSR_VR;
if (__put_user((uint32_t)env->spr[SPR_VRSAVE],
&frame->mc_vregs.altivec[32].u32[3]))
return 1;
}
/* Save floating point registers. */
if (env->insns_flags & PPC_FLOAT) {
for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
if (__put_user(env->fpr[i], &frame->mc_fregs[i])) {
return 1;
}
}
if (__put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]))
return 1;
}
/* Save SPE registers. The kernel only saves the high half. */
if (env->insns_flags & PPC_SPE) {
#if defined(TARGET_PPC64)
for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
if (__put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i])) {
return 1;
}
}
#else
for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
if (__put_user(env->gprh[i], &frame->mc_vregs.spe[i])) {
return 1;
}
}
#endif
/* Set MSR_SPE in the saved MSR value to indicate that
frame->mc_vregs contains valid data. */
msr |= MSR_SPE;
if (__put_user(env->spe_fscr, &frame->mc_vregs.spe[32]))
return 1;
}
/* Store MSR. */
if (__put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]))
return 1;
/* Set up the sigreturn trampoline: li r0,sigret; sc. */
if (sigret) {
if (__put_user(0x38000000UL | sigret, &frame->tramp[0]) ||
__put_user(0x44000002UL, &frame->tramp[1])) {
return 1;
}
}
return 0;
}
| false |
qemu
|
c650c008e326f3a1e84083bc269265456057a212
|
15,344 |
static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code, int frame_type){
AVFormatContext *s= nut->avf;
StreamContext *stream;
ByteIOContext *bc = &s->pb;
int size, flags, size_mul, size_lsb, stream_id;
int key_frame = 0;
int64_t pts = 0;
const int prefix_len= frame_type == 2 ? 8+1 : 1;
const int64_t frame_start= url_ftell(bc) - prefix_len;
flags= nut->frame_code[frame_code].flags;
size_mul= nut->frame_code[frame_code].size_mul;
size_lsb= nut->frame_code[frame_code].size_lsb;
stream_id= nut->frame_code[frame_code].stream_id_plus1 - 1;
if(flags & FLAG_FRAME_TYPE){
reset(s);
if(get_packetheader(nut, bc, prefix_len, 0) < 0)
return -1;
if(frame_type!=2)
frame_type= 1;
}
if(stream_id==-1)
stream_id= get_v(bc);
if(stream_id >= s->nb_streams){
av_log(s, AV_LOG_ERROR, "illegal stream_id\n");
return -1;
}
stream= &nut->stream[stream_id];
// av_log(s, AV_LOG_DEBUG, "ft:%d ppts:%d %d %d\n", frame_type, stream->lru_pts_delta[0], stream->lru_pts_delta[1], stream->lru_pts_delta[2]);
if(flags & FLAG_PRED_KEY_FRAME){
if(flags & FLAG_KEY_FRAME)
key_frame= !stream->last_key_frame;
else
key_frame= stream->last_key_frame;
}else{
key_frame= !!(flags & FLAG_KEY_FRAME);
}
if(flags & FLAG_PTS){
if(flags & FLAG_FULL_PTS){
pts= get_v(bc);
if(frame_type && key_frame){
av_add_index_entry(
s->streams[stream_id],
frame_start,
pts,
frame_start - nut->stream[stream_id].last_sync_pos,
AVINDEX_KEYFRAME);
nut->stream[stream_id].last_sync_pos= frame_start;
assert(nut->packet_start == frame_start);
}
}else{
int64_t mask = (1<<stream->msb_timestamp_shift)-1;
int64_t delta= stream->last_pts - mask/2;
pts= ((get_v(bc) - delta)&mask) + delta;
}
}else{
pts= stream->last_pts + stream->lru_pts_delta[(flags&12)>>2];
}
if(size_mul <= size_lsb){
size= stream->lru_size[size_lsb - size_mul];
}else{
if(flags & FLAG_DATA_SIZE)
size= size_mul*get_v(bc) + size_lsb;
else
size= size_lsb;
}
//av_log(s, AV_LOG_DEBUG, "fs:%lld fc:%d ft:%d kf:%d pts:%lld size:%d\n", frame_start, frame_code, frame_type, key_frame, pts, size);
if(url_ftell(bc) - nut->packet_start + size > nut->written_packet_size){
av_log(s, AV_LOG_ERROR, "frame size too large\n");
return -1;
}
av_new_packet(pkt, size);
get_buffer(bc, pkt->data, size);
pkt->stream_index = stream_id;
if (key_frame)
pkt->flags |= PKT_FLAG_KEY;
pkt->pts = pts * AV_TIME_BASE * stream->rate_den / stream->rate_num;
update(nut, stream_id, frame_start, frame_type, frame_code, key_frame, size, pts);
return 0;
}
| false |
FFmpeg
|
aec8f88a9eb0ef8d684a2e76a152c9090da4af51
|
15,345 |
static void dec_sr(DisasContext *dc)
{
if (dc->format == OP_FMT_RI) {
LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
} else {
LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
}
/* The real CPU (w/o hardware shifter) only supports right shift by exactly
* one bit */
if (dc->format == OP_FMT_RI) {
if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
qemu_log_mask(LOG_GUEST_ERROR,
"hardware shifter is not available\n");
t_gen_illegal_insn(dc);
return;
}
tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
} else {
int l1 = gen_new_label();
int l2 = gen_new_label();
TCGv t0 = tcg_temp_local_new();
tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
if (!(dc->features & LM32_FEATURE_SHIFT)) {
tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
t_gen_illegal_insn(dc);
tcg_gen_br(l2);
}
gen_set_label(l1);
tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
gen_set_label(l2);
tcg_temp_free(t0);
}
}
| false |
qemu
|
42a268c241183877192c376d03bd9b6d527407c7
|
15,346 |
static ssize_t handle_aiocb_discard(RawPosixAIOData *aiocb)
{
int ret = -EOPNOTSUPP;
BDRVRawState *s = aiocb->bs->opaque;
if (s->has_discard == 0) {
return 0;
}
if (aiocb->aio_type & QEMU_AIO_BLKDEV) {
#ifdef BLKDISCARD
do {
uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes };
if (ioctl(aiocb->aio_fildes, BLKDISCARD, range) == 0) {
return 0;
}
} while (errno == EINTR);
ret = -errno;
#endif
} else {
#ifdef CONFIG_XFS
if (s->is_xfs) {
return xfs_discard(s, aiocb->aio_offset, aiocb->aio_nbytes);
}
#endif
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
do {
if (fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
aiocb->aio_offset, aiocb->aio_nbytes) == 0) {
return 0;
}
} while (errno == EINTR);
ret = -errno;
#endif
}
if (ret == -ENODEV || ret == -ENOSYS || ret == -EOPNOTSUPP ||
ret == -ENOTTY) {
s->has_discard = 0;
ret = 0;
}
return ret;
}
| false |
qemu
|
7ce21016b69b512bf4777965a4292318f2bc7544
|
15,347 |
void AUD_register_card (const char *name, QEMUSoundCard *card)
{
audio_init ();
card->name = qemu_strdup (name);
memset (&card->entries, 0, sizeof (card->entries));
LIST_INSERT_HEAD (&glob_audio_state.card_head, card, entries);
}
| false |
qemu
|
72cf2d4f0e181d0d3a3122e04129c58a95da713e
|
15,348 |
static void rtc_get_date(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
Error *err = NULL;
RTCState *s = MC146818_RTC(obj);
struct tm current_tm;
rtc_update_time(s);
rtc_get_time(s, ¤t_tm);
visit_start_struct(v, NULL, "struct tm", name, 0, &err);
if (err) {
goto out;
}
visit_type_int32(v, ¤t_tm.tm_year, "tm_year", &err);
if (err) {
goto out_end;
}
visit_type_int32(v, ¤t_tm.tm_mon, "tm_mon", &err);
if (err) {
goto out_end;
}
visit_type_int32(v, ¤t_tm.tm_mday, "tm_mday", &err);
if (err) {
goto out_end;
}
visit_type_int32(v, ¤t_tm.tm_hour, "tm_hour", &err);
if (err) {
goto out_end;
}
visit_type_int32(v, ¤t_tm.tm_min, "tm_min", &err);
if (err) {
goto out_end;
}
visit_type_int32(v, ¤t_tm.tm_sec, "tm_sec", &err);
if (err) {
goto out_end;
}
out_end:
error_propagate(errp, err);
err = NULL;
visit_end_struct(v, errp);
out:
error_propagate(errp, err);
}
| false |
qemu
|
8e099d14f5233f330c4a6f03ff655219cd789c8f
|
15,349 |
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
{
int i;
vaddr &= TARGET_PAGE_MASK;
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
#if (NB_MMU_MODES >= 3)
tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
#endif
#if (NB_MMU_MODES >= 4)
tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
#endif
#if (NB_MMU_MODES >= 5)
tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
#endif
}
| false |
qemu
|
cfde4bd93100c58c0bfaed76deefb144caac488f
|
15,350 |
ioapic_mem_write(void *opaque, target_phys_addr_t addr, uint64_t val,
unsigned int size)
{
IOAPICCommonState *s = opaque;
int index;
switch (addr & 0xff) {
case IOAPIC_IOREGSEL:
s->ioregsel = val;
break;
case IOAPIC_IOWIN:
if (size != 4) {
break;
}
DPRINTF("write: %08x = %08" PRIx64 "\n", s->ioregsel, val);
switch (s->ioregsel) {
case IOAPIC_REG_ID:
s->id = (val >> IOAPIC_ID_SHIFT) & IOAPIC_ID_MASK;
break;
case IOAPIC_REG_VER:
case IOAPIC_REG_ARB:
break;
default:
index = (s->ioregsel - IOAPIC_REG_REDTBL_BASE) >> 1;
if (index >= 0 && index < IOAPIC_NUM_PINS) {
if (s->ioregsel & 1) {
s->ioredtbl[index] &= 0xffffffff;
s->ioredtbl[index] |= (uint64_t)val << 32;
} else {
s->ioredtbl[index] &= ~0xffffffffULL;
s->ioredtbl[index] |= val;
}
ioapic_service(s);
}
}
break;
}
}
| false |
qemu
|
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
|
15,351 |
int qemu_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
{
int ret;
#ifdef CONFIG_ACCEPT4
ret = accept4(s, addr, addrlen, SOCK_CLOEXEC);
if (ret != -1 || errno != EINVAL) {
return ret;
}
#endif
ret = accept(s, addr, addrlen);
if (ret >= 0) {
qemu_set_cloexec(ret);
}
return ret;
}
| false |
qemu
|
347ed55cd109864b02dd29bb7e6cda1622e8019e
|
15,352 |
static void ich9_lpc_config_write(PCIDevice *d,
uint32_t addr, uint32_t val, int len)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
uint32_t rcba_old = pci_get_long(d->config + ICH9_LPC_RCBA);
pci_default_write_config(d, addr, val, len);
if (ranges_overlap(addr, len, ICH9_LPC_PMBASE, 4) ||
ranges_overlap(addr, len, ICH9_LPC_ACPI_CTRL, 1)) {
ich9_lpc_pmbase_sci_update(lpc);
}
if (ranges_overlap(addr, len, ICH9_LPC_RCBA, 4)) {
ich9_lpc_rcba_update(lpc, rcba_old);
}
if (ranges_overlap(addr, len, ICH9_LPC_PIRQA_ROUT, 4)) {
pci_bus_fire_intx_routing_notifier(lpc->d.bus);
}
if (ranges_overlap(addr, len, ICH9_LPC_PIRQE_ROUT, 4)) {
pci_bus_fire_intx_routing_notifier(lpc->d.bus);
}
if (ranges_overlap(addr, len, ICH9_LPC_GEN_PMCON_1, 8)) {
ich9_lpc_pmcon_update(lpc);
}
}
| false |
qemu
|
fd56e0612b6454a282fa6a953fdb09281a98c589
|
15,353 |
static uint64_t omap_mpui_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
struct omap_mpu_state_s *s = (struct omap_mpu_state_s *) opaque;
if (size != 4) {
return omap_badwidth_read32(opaque, addr);
}
switch (addr) {
case 0x00: /* CTRL */
return s->mpui_ctrl;
case 0x04: /* DEBUG_ADDR */
return 0x01ffffff;
case 0x08: /* DEBUG_DATA */
return 0xffffffff;
case 0x0c: /* DEBUG_FLAG */
return 0x00000800;
case 0x10: /* STATUS */
return 0x00000000;
/* Not in OMAP310 */
case 0x14: /* DSP_STATUS */
case 0x18: /* DSP_BOOT_CONFIG */
return 0x00000000;
case 0x1c: /* DSP_MPUI_CONFIG */
return 0x0000ffff;
}
OMAP_BAD_REG(addr);
return 0;
}
| false |
qemu
|
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
|
15,354 |
GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **err)
{
return guest_fsfreeze_state.status;
}
| false |
qemu
|
f22d85e9e67262db34504f4079745f9843da6a92
|
15,355 |
static av_cold int nvenc_setup_surfaces(AVCodecContext *avctx)
{
NvencContext *ctx = avctx->priv_data;
int i, res;
ctx->surfaces = av_mallocz_array(ctx->nb_surfaces, sizeof(*ctx->surfaces));
if (!ctx->surfaces)
return AVERROR(ENOMEM);
ctx->timestamp_list = av_fifo_alloc(ctx->nb_surfaces * sizeof(int64_t));
if (!ctx->timestamp_list)
return AVERROR(ENOMEM);
ctx->unused_surface_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(NvencSurface*));
if (!ctx->unused_surface_queue)
return AVERROR(ENOMEM);
ctx->output_surface_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(NvencSurface*));
if (!ctx->output_surface_queue)
return AVERROR(ENOMEM);
ctx->output_surface_ready_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(NvencSurface*));
if (!ctx->output_surface_ready_queue)
return AVERROR(ENOMEM);
res = nvenc_push_context(avctx);
if (res < 0)
return res;
for (i = 0; i < ctx->nb_surfaces; i++) {
if ((res = nvenc_alloc_surface(avctx, i)) < 0)
{
nvenc_pop_context(avctx);
return res;
}
}
res = nvenc_pop_context(avctx);
if (res < 0)
return res;
return 0;
}
| false |
FFmpeg
|
4e93f00b06d9623ea9639ba7185cdacdf11c2672
|
15,357 |
static void do_transmit_packets(dp8393xState *s)
{
uint16_t data[12];
int width, size;
int tx_len, len;
uint16_t i;
width = (s->regs[SONIC_DCR] & SONIC_DCR_DW) ? 2 : 1;
while (1) {
/* Read memory */
DPRINTF("Transmit packet at %08x\n",
(s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_CTDA]);
size = sizeof(uint16_t) * 6 * width;
s->regs[SONIC_TTDA] = s->regs[SONIC_CTDA];
s->memory_rw(s->mem_opaque,
((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * width,
(uint8_t *)data, size, 0);
tx_len = 0;
/* Update registers */
s->regs[SONIC_TCR] = data[0 * width] & 0xf000;
s->regs[SONIC_TPS] = data[1 * width];
s->regs[SONIC_TFC] = data[2 * width];
s->regs[SONIC_TSA0] = data[3 * width];
s->regs[SONIC_TSA1] = data[4 * width];
s->regs[SONIC_TFS] = data[5 * width];
/* Handle programmable interrupt */
if (s->regs[SONIC_TCR] & SONIC_TCR_PINT) {
s->regs[SONIC_ISR] |= SONIC_ISR_PINT;
} else {
s->regs[SONIC_ISR] &= ~SONIC_ISR_PINT;
}
for (i = 0; i < s->regs[SONIC_TFC]; ) {
/* Append fragment */
len = s->regs[SONIC_TFS];
if (tx_len + len > sizeof(s->tx_buffer)) {
len = sizeof(s->tx_buffer) - tx_len;
}
s->memory_rw(s->mem_opaque,
(s->regs[SONIC_TSA1] << 16) | s->regs[SONIC_TSA0],
&s->tx_buffer[tx_len], len, 0);
tx_len += len;
i++;
if (i != s->regs[SONIC_TFC]) {
/* Read next fragment details */
size = sizeof(uint16_t) * 3 * width;
s->memory_rw(s->mem_opaque,
((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * (4 + 3 * i) * width,
(uint8_t *)data, size, 0);
s->regs[SONIC_TSA0] = data[0 * width];
s->regs[SONIC_TSA1] = data[1 * width];
s->regs[SONIC_TFS] = data[2 * width];
}
}
/* Handle Ethernet checksum */
if (!(s->regs[SONIC_TCR] & SONIC_TCR_CRCI)) {
/* Don't append FCS there, to look like slirp packets
* which don't have one */
} else {
/* Remove existing FCS */
tx_len -= 4;
}
if (s->regs[SONIC_RCR] & (SONIC_RCR_LB1 | SONIC_RCR_LB0)) {
/* Loopback */
s->regs[SONIC_TCR] |= SONIC_TCR_CRSL;
if (s->vc->fd_can_read(s)) {
s->loopback_packet = 1;
s->vc->receive(s, s->tx_buffer, tx_len);
}
} else {
/* Transmit packet */
qemu_send_packet(s->vc, s->tx_buffer, tx_len);
}
s->regs[SONIC_TCR] |= SONIC_TCR_PTX;
/* Write status */
data[0 * width] = s->regs[SONIC_TCR] & 0x0fff; /* status */
size = sizeof(uint16_t) * width;
s->memory_rw(s->mem_opaque,
(s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA],
(uint8_t *)data, size, 1);
if (!(s->regs[SONIC_CR] & SONIC_CR_HTX)) {
/* Read footer of packet */
size = sizeof(uint16_t) * width;
s->memory_rw(s->mem_opaque,
((s->regs[SONIC_UTDA] << 16) | s->regs[SONIC_TTDA]) + sizeof(uint16_t) * (4 + 3 * s->regs[SONIC_TFC]) * width,
(uint8_t *)data, size, 0);
s->regs[SONIC_CTDA] = data[0 * width] & ~0x1;
if (data[0 * width] & 0x1) {
/* EOL detected */
break;
}
}
}
/* Done */
s->regs[SONIC_CR] &= ~SONIC_CR_TXP;
s->regs[SONIC_ISR] |= SONIC_ISR_TXDN;
dp8393x_update_irq(s);
}
| false |
qemu
|
e3f5ec2b5e92706e3b807059f79b1fb5d936e567
|
15,359 |
static void msrle_decode_pal8(MsrleContext *s)
{
int stream_ptr = 0;
unsigned char rle_code;
unsigned char extra_byte;
unsigned char stream_byte;
int pixel_ptr = 0;
int row_dec = s->frame.linesize[0];
int row_ptr = (s->avctx->height - 1) * row_dec;
int frame_size = row_dec * s->avctx->height;
while (row_ptr >= 0) {
FETCH_NEXT_STREAM_BYTE();
rle_code = stream_byte;
if (rle_code == 0) {
/* fetch the next byte to see how to handle escape code */
FETCH_NEXT_STREAM_BYTE();
if (stream_byte == 0) {
/* line is done, goto the next one */
row_ptr -= row_dec;
pixel_ptr = 0;
} else if (stream_byte == 1) {
/* decode is done */
return;
} else if (stream_byte == 2) {
/* reposition frame decode coordinates */
FETCH_NEXT_STREAM_BYTE();
pixel_ptr += stream_byte;
FETCH_NEXT_STREAM_BYTE();
row_ptr -= stream_byte * row_dec;
} else {
/* copy pixels from encoded stream */
if ((row_ptr + pixel_ptr + stream_byte > frame_size) ||
(row_ptr < 0)) {
printf(" MS RLE: frame ptr just went out of bounds (1)\n");
return;
}
rle_code = stream_byte;
extra_byte = stream_byte & 0x01;
if (stream_ptr + rle_code + extra_byte > s->size) {
printf(" MS RLE: stream ptr just went out of bounds (2)\n");
return;
}
while (rle_code--) {
FETCH_NEXT_STREAM_BYTE();
s->frame.data[0][row_ptr + pixel_ptr] = stream_byte;
pixel_ptr++;
}
/* if the RLE code is odd, skip a byte in the stream */
if (extra_byte)
stream_ptr++;
}
} else {
/* decode a run of data */
if ((row_ptr + pixel_ptr + stream_byte > frame_size) ||
(row_ptr < 0)) {
printf(" MS RLE: frame ptr just went out of bounds (2)\n");
return;
}
FETCH_NEXT_STREAM_BYTE();
while(rle_code--) {
s->frame.data[0][row_ptr + pixel_ptr] = stream_byte;
pixel_ptr++;
}
}
}
/* make the palette available */
memcpy(s->frame.data[1], s->palette, 256 * 4);
/* one last sanity check on the way out */
if (stream_ptr < s->size)
printf(" MS RLE: ended frame decode with bytes left over (%d < %d)\n",
stream_ptr, s->size);
}
| false |
FFmpeg
|
875efafac8afe22971c87fc7dfee83d27364ab50
|
15,360 |
static int rv30_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
{
int i, j, k;
for(i = 0; i < 4; i++, dst += r->intra_types_stride - 4){
for(j = 0; j < 4; j+= 2){
int code = svq3_get_ue_golomb(gb) << 1;
if(code >= 81*2){
av_log(r->s.avctx, AV_LOG_ERROR, "Incorrect intra prediction code\n");
return -1;
}
for(k = 0; k < 2; k++){
int A = dst[-r->intra_types_stride] + 1;
int B = dst[-1] + 1;
*dst++ = rv30_itype_from_context[A * 90 + B * 9 + rv30_itype_code[code + k]];
if(dst[-1] == 9){
av_log(r->s.avctx, AV_LOG_ERROR, "Incorrect intra prediction mode\n");
return -1;
}
}
}
}
return 0;
}
| false |
FFmpeg
|
979bea13003ef489d95d2538ac2fb1c26c6f103b
|
15,361 |
static void put_cabac(CABACContext *c, uint8_t * const state, int bit){
int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + *state];
if(bit == ((*state)&1)){
c->range -= RangeLPS;
*state= ff_h264_mps_state[*state];
}else{
c->low += c->range - RangeLPS;
c->range = RangeLPS;
*state= ff_h264_lps_state[*state];
}
renorm_cabac_encoder(c);
}
| false |
FFmpeg
|
64d779f2f7607070a87b0a70edeba5e51834ce85
|
15,362 |
static int wv_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
ByteIOContext *pb = s->pb;
WVContext *wc = s->priv_data;
AVStream *st;
if(wv_read_block_header(s, pb) < 0)
return -1;
wc->block_parsed = 0;
/* now we are ready: build format streams */
st = av_new_stream(s, 0);
if (!st)
return -1;
st->codec->codec_type = CODEC_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_WAVPACK;
st->codec->channels = wc->chan;
st->codec->sample_rate = wc->rate;
st->codec->bits_per_coded_sample = wc->bpp;
av_set_pts_info(st, 64, 1, wc->rate);
s->start_time = 0;
s->duration = (int64_t)wc->samples * AV_TIME_BASE / st->codec->sample_rate;
if(!url_is_streamed(s->pb)) {
int64_t cur = url_ftell(s->pb);
ff_ape_parse_tag(s);
if(!av_metadata_get(s->metadata, "", NULL, AV_METADATA_IGNORE_SUFFIX))
ff_id3v1_read(s);
url_fseek(s->pb, cur, SEEK_SET);
}
return 0;
}
| false |
FFmpeg
|
aa926a480f2f84a87dc1fbf42d40fe23bae82ae1
|
15,363 |
static int mov_read_esds(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
return ff_mov_read_esds(c->fc, pb, atom);
}
| true |
FFmpeg
|
86dfcfd0e30d6645eea2c63c1c60a0550e7c97ea
|
15,364 |
void qemu_cpu_kick_self(void)
{
#ifndef _WIN32
assert(cpu_single_env);
raise(SIG_IPI);
#else
abort();
#endif
}
| true |
qemu
|
12d4536f7d911b6d87a766ad7300482ea663cea2
|
15,365 |
static uint64_t get_cluster_offset(BlockDriverState *bs,
uint64_t offset, int *num)
{
BDRVQcowState *s = bs->opaque;
int l1_index, l2_index;
uint64_t l2_offset, *l2_table, cluster_offset;
int l1_bits, c;
int index_in_cluster, nb_available, nb_needed, nb_clusters;
index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
nb_needed = *num + index_in_cluster;
l1_bits = s->l2_bits + s->cluster_bits;
/* compute how many bytes there are between the offset and
* the end of the l1 entry
*/
nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1));
/* compute the number of available sectors */
nb_available = (nb_available >> 9) + index_in_cluster;
cluster_offset = 0;
/* seek the the l2 offset in the l1 table */
l1_index = offset >> l1_bits;
if (l1_index >= s->l1_size)
goto out;
l2_offset = s->l1_table[l1_index];
/* seek the l2 table of the given l2 offset */
if (!l2_offset)
goto out;
/* load the l2 table in memory */
l2_offset &= ~QCOW_OFLAG_COPIED;
l2_table = l2_load(bs, l2_offset);
if (l2_table == NULL)
return 0;
/* find the cluster offset for the given disk offset */
l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
cluster_offset = be64_to_cpu(l2_table[l2_index]);
nb_clusters = size_to_clusters(s, nb_needed << 9);
if (!cluster_offset) {
/* how many empty clusters ? */
c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
} else {
/* how many allocated clusters ? */
c = count_contiguous_clusters(nb_clusters, s->cluster_size,
&l2_table[l2_index], QCOW_OFLAG_COPIED);
}
nb_available = (c * s->cluster_sectors);
out:
if (nb_available > nb_needed)
nb_available = nb_needed;
*num = nb_available - index_in_cluster;
return cluster_offset & ~QCOW_OFLAG_COPIED;
}
| true |
qemu
|
ff4b91c2f7e51dab148aba4bf43c2f39f219e495
|
15,366 |
bool migration_has_finished(MigrationState *s)
{
return s->state == MIG_STATE_COMPLETED;
}
| true |
qemu
|
60fe637bf0e4d7989e21e50f52526444765c63b4
|
15,367 |
static void microdrive_realize(DeviceState *dev, Error **errp)
{
MicroDriveState *md = MICRODRIVE(dev);
ide_init2(&md->bus, qemu_allocate_irqs(md_set_irq, md, 1)[0]);
}
| true |
qemu
|
f3c7d0389fe8a2792fd4c1cf151b885de03c8f62
|
15,369 |
static int decode_tsw1(uint8_t *frame, int width, int height,
const uint8_t *src, const uint8_t *src_end)
{
const uint8_t *frame_start = frame;
const uint8_t *frame_end = frame + width * height;
int mask = 0x10000, bitbuf = 0;
int v, offset, count, segments;
segments = bytestream_get_le32(&src);
frame += bytestream_get_le32(&src);
if (frame < frame_start || frame > frame_end)
return -1;
while (segments--) {
if (mask == 0x10000) {
if (src >= src_end)
return -1;
bitbuf = bytestream_get_le16(&src);
mask = 1;
}
if (src_end - src < 2 || frame_end - frame < 2)
return -1;
if (bitbuf & mask) {
v = bytestream_get_le16(&src);
offset = (v & 0x1FFF) << 1;
count = ((v >> 13) + 2) << 1;
if (frame - frame_start < offset || frame_end - frame < count)
return -1;
av_memcpy_backptr(frame, offset, count);
frame += count;
} else {
*frame++ = *src++;
*frame++ = *src++;
}
mask <<= 1;
}
return 0;
}
| true |
FFmpeg
|
2c9a5172d328259c5d76e588f2ddc12f439ffcd0
|
15,370 |
static CharDriverState *net_vhost_parse_chardev(
const NetdevVhostUserOptions *opts, Error **errp)
{
CharDriverState *chr = qemu_chr_find(opts->chardev);
VhostUserChardevProps props;
if (chr == NULL) {
error_setg(errp, "chardev \"%s\" not found", opts->chardev);
return NULL;
}
/* inspect chardev opts */
memset(&props, 0, sizeof(props));
if (qemu_opt_foreach(chr->opts, net_vhost_chardev_opts, &props, errp)) {
return NULL;
}
if (!props.is_socket || !props.is_unix) {
error_setg(errp, "chardev \"%s\" is not a unix socket",
opts->chardev);
return NULL;
}
qemu_chr_fe_claim_no_fail(chr);
return chr;
}
| true |
qemu
|
0a73336d96397c80881219d080518fac6f1ecacb
|
15,371 |
static int qemu_rdma_drain_cq(QEMUFile *f, RDMAContext *rdma)
{
int ret;
if (qemu_rdma_write_flush(f, rdma) < 0) {
return -EIO;
}
while (rdma->nb_sent) {
ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RDMA_WRITE, NULL);
if (ret < 0) {
fprintf(stderr, "rdma migration: complete polling error!\n");
return -EIO;
}
}
qemu_rdma_unregister_waiting(rdma);
return 0;
}
| true |
qemu
|
60fe637bf0e4d7989e21e50f52526444765c63b4
|
15,372 |
static int smacker_decode_header_tree(SmackVContext *smk, GetBitContext *gb, int **recodes, int *last, int size)
{
int res;
HuffContext huff;
HuffContext tmp1, tmp2;
VLC vlc[2];
int escapes[3];
DBCtx ctx;
tmp1.length = 256;
tmp1.maxlength = 0;
tmp1.current = 0;
tmp1.bits = av_mallocz(256 * 4);
tmp1.lengths = av_mallocz(256 * sizeof(int));
tmp1.values = av_mallocz(256 * sizeof(int));
tmp2.length = 256;
tmp2.maxlength = 0;
tmp2.current = 0;
tmp2.bits = av_mallocz(256 * 4);
tmp2.lengths = av_mallocz(256 * sizeof(int));
tmp2.values = av_mallocz(256 * sizeof(int));
memset(&vlc[0], 0, sizeof(VLC));
memset(&vlc[1], 0, sizeof(VLC));
if(get_bits1(gb)) {
smacker_decode_tree(gb, &tmp1, 0, 0);
get_bits1(gb);
res = init_vlc(&vlc[0], SMKTREE_BITS, tmp1.length,
tmp1.lengths, sizeof(int), sizeof(int),
tmp1.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
if(res < 0) {
av_log(smk->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
} else {
av_log(smk->avctx, AV_LOG_ERROR, "Skipping low bytes tree\n");
if(get_bits1(gb)){
smacker_decode_tree(gb, &tmp2, 0, 0);
get_bits1(gb);
res = init_vlc(&vlc[1], SMKTREE_BITS, tmp2.length,
tmp2.lengths, sizeof(int), sizeof(int),
tmp2.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
if(res < 0) {
av_log(smk->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
} else {
av_log(smk->avctx, AV_LOG_ERROR, "Skipping high bytes tree\n");
escapes[0] = get_bits(gb, 8);
escapes[0] |= get_bits(gb, 8) << 8;
escapes[1] = get_bits(gb, 8);
escapes[1] |= get_bits(gb, 8) << 8;
escapes[2] = get_bits(gb, 8);
escapes[2] |= get_bits(gb, 8) << 8;
last[0] = last[1] = last[2] = -1;
ctx.escapes[0] = escapes[0];
ctx.escapes[1] = escapes[1];
ctx.escapes[2] = escapes[2];
ctx.v1 = &vlc[0];
ctx.v2 = &vlc[1];
ctx.recode1 = tmp1.values;
ctx.recode2 = tmp2.values;
ctx.last = last;
huff.length = ((size + 3) >> 2) + 3;
huff.maxlength = 0;
huff.current = 0;
huff.values = av_mallocz(huff.length * sizeof(int));
smacker_decode_bigtree(gb, &huff, &ctx);
get_bits1(gb);
if(ctx.last[0] == -1) ctx.last[0] = huff.current++;
if(ctx.last[1] == -1) ctx.last[1] = huff.current++;
if(ctx.last[2] == -1) ctx.last[2] = huff.current++;
*recodes = huff.values;
if(vlc[0].table)
free_vlc(&vlc[0]);
if(vlc[1].table)
free_vlc(&vlc[1]);
av_free(tmp1.bits);
av_free(tmp1.lengths);
av_free(tmp1.values);
av_free(tmp2.bits);
av_free(tmp2.lengths);
av_free(tmp2.values);
return 0;
| true |
FFmpeg
|
3a1a7e32ace7af47de74e8ae779cb4e04c89aa97
|
15,374 |
static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
{
lhs->selector = rhs->selector;
lhs->base = rhs->base;
lhs->limit = rhs->limit;
lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
(rhs->present * DESC_P_MASK) |
(rhs->dpl << DESC_DPL_SHIFT) |
(rhs->db << DESC_B_SHIFT) |
(rhs->s * DESC_S_MASK) |
(rhs->l << DESC_L_SHIFT) |
(rhs->g * DESC_G_MASK) |
(rhs->avl * DESC_AVL_MASK);
}
| true |
qemu
|
4cae9c97967a0e6311858285a30c44208210b277
|
15,375 |
int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts)
{
int i, j;
int64_t last = st->info->last_dts;
if( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last
&& ts - (uint64_t)last < INT64_MAX){
double dts= (is_relative(ts) ? ts - RELATIVE_TS_BASE : ts) * av_q2d(st->time_base);
int64_t duration= ts - last;
if (!st->info->duration_error)
st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
if (!st->info->duration_error)
return AVERROR(ENOMEM);
// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
// av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
for (i=0; i<MAX_STD_TIMEBASES; i++) {
if (st->info->duration_error[0][1][i] < 1e10) {
int framerate= get_std_framerate(i);
double sdts= dts*framerate/(1001*12);
for(j=0; j<2; j++){
int64_t ticks= llrint(sdts+j*0.5);
double error= sdts - ticks + j*0.5;
st->info->duration_error[j][0][i] += error;
st->info->duration_error[j][1][i] += error*error;
}
}
}
st->info->duration_count++;
if (st->info->duration_count % 10 == 0) {
int n = st->info->duration_count;
for (i=0; i<MAX_STD_TIMEBASES; i++) {
if (st->info->duration_error[0][1][i] < 1e10) {
double a0 = st->info->duration_error[0][0][i] / n;
double error0 = st->info->duration_error[0][1][i] / n - a0*a0;
double a1 = st->info->duration_error[1][0][i] / n;
double error1 = st->info->duration_error[1][1][i] / n - a1*a1;
if (error0 > 0.04 && error1 > 0.04) {
st->info->duration_error[0][1][i] = 2e10;
st->info->duration_error[1][1][i] = 2e10;
}
}
}
}
// ignore the first 4 values, they might have some random jitter
if (st->info->duration_count > 3 && is_relative(ts) == is_relative(last))
st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
}
if (ts != AV_NOPTS_VALUE)
st->info->last_dts = ts;
return 0;
}
| true |
FFmpeg
|
d600b18f224e02f8bfc6660bfa442e7ff3fb057c
|
15,376 |
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
struct vhost_virtqueue *vq = dev->vqs + i;
hwaddr l;
void *p;
if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
continue;
}
l = vq->ring_size;
p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
if (!p || l != vq->ring_size) {
fprintf(stderr, "Unable to map ring buffer for ring %d\n", i);
return -ENOMEM;
}
if (p != vq->ring) {
fprintf(stderr, "Ring buffer relocated for ring %d\n", i);
return -EBUSY;
}
cpu_physical_memory_unmap(p, l, 0, 0);
}
return 0;
}
| true |
qemu
|
8617343faae6ba7e916137c6c9e3ef22c00565d8
|
15,377 |
static av_cold int vc2_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet)
{
int ret = 0;
int sig_size = 256;
VC2EncContext *s = avctx->priv_data;
const char aux_data[] = LIBAVCODEC_IDENT;
const int aux_data_size = sizeof(aux_data);
const int header_size = 100 + aux_data_size;
int64_t max_frame_bytes, r_bitrate = avctx->bit_rate >> (s->interlaced);
s->avctx = avctx;
s->size_scaler = 1;
s->prefix_bytes = 0;
s->last_parse_code = 0;
s->next_parse_offset = 0;
/* Rate control */
max_frame_bytes = (av_rescale(r_bitrate, s->avctx->time_base.num,
s->avctx->time_base.den) >> 3) - header_size;
/* Find an appropriate size scaler */
while (sig_size > 255) {
s->slice_max_bytes = FFALIGN(av_rescale(max_frame_bytes, 1,
s->num_x*s->num_y), s->size_scaler);
s->slice_max_bytes += 4 + s->prefix_bytes;
sig_size = s->slice_max_bytes/s->size_scaler; /* Signalled slize size */
s->size_scaler <<= 1;
}
s->slice_min_bytes = s->slice_max_bytes - s->slice_max_bytes*(s->tolerance/100.0f);
ret = encode_frame(s, avpkt, frame, aux_data, header_size, s->interlaced);
if (ret)
return ret;
if (s->interlaced) {
ret = encode_frame(s, avpkt, frame, aux_data, header_size, 2);
if (ret)
return ret;
}
flush_put_bits(&s->pb);
avpkt->size = put_bits_count(&s->pb) >> 3;
*got_packet = 1;
return 0;
}
| true |
FFmpeg
|
f4b30beac0c1a70d6da1e3ffe1e74e9e55397d8e
|
15,378 |
int ff_tak_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb,
TAKStreamInfo *ti, int log_level_offset)
{
if (get_bits(gb, TAK_FRAME_HEADER_SYNC_ID_BITS) != TAK_FRAME_HEADER_SYNC_ID) {
av_log(avctx, AV_LOG_ERROR + log_level_offset, "missing sync id\n");
return AVERROR_INVALIDDATA;
}
ti->flags = get_bits(gb, TAK_FRAME_HEADER_FLAGS_BITS);
ti->frame_num = get_bits(gb, TAK_FRAME_HEADER_NO_BITS);
if (ti->flags & TAK_FRAME_FLAG_IS_LAST) {
ti->last_frame_samples = get_bits(gb, TAK_FRAME_HEADER_SAMPLE_COUNT_BITS) + 1;
skip_bits(gb, 2);
} else {
ti->last_frame_samples = 0;
}
if (ti->flags & TAK_FRAME_FLAG_HAS_INFO) {
avpriv_tak_parse_streaminfo(gb, ti);
if (get_bits(gb, 6))
skip_bits(gb, 25);
align_get_bits(gb);
}
if (ti->flags & TAK_FRAME_FLAG_HAS_METADATA)
return AVERROR_INVALIDDATA;
skip_bits(gb, 24);
return 0;
}
| false |
FFmpeg
|
6bd665b7c5798803366b877903fa3bce7f129d05
|
15,379 |
static int dvbsub_read_2bit_string(uint8_t *destbuf, int dbuf_len,
const uint8_t **srcbuf, int buf_size,
int non_mod, uint8_t *map_table)
{
GetBitContext gb;
int bits;
int run_length;
int pixels_read = 0;
init_get_bits(&gb, *srcbuf, buf_size << 3);
while (get_bits_count(&gb) < buf_size << 3 && pixels_read < dbuf_len) {
bits = get_bits(&gb, 2);
if (bits) {
if (non_mod != 1 || bits != 1) {
if (map_table)
*destbuf++ = map_table[bits];
else
*destbuf++ = bits;
}
pixels_read++;
} else {
bits = get_bits1(&gb);
if (bits == 1) {
run_length = get_bits(&gb, 3) + 3;
bits = get_bits(&gb, 2);
if (non_mod == 1 && bits == 1)
pixels_read += run_length;
else {
if (map_table)
bits = map_table[bits];
while (run_length-- > 0 && pixels_read < dbuf_len) {
*destbuf++ = bits;
pixels_read++;
}
}
} else {
bits = get_bits1(&gb);
if (bits == 0) {
bits = get_bits(&gb, 2);
if (bits == 2) {
run_length = get_bits(&gb, 4) + 12;
bits = get_bits(&gb, 2);
if (non_mod == 1 && bits == 1)
pixels_read += run_length;
else {
if (map_table)
bits = map_table[bits];
while (run_length-- > 0 && pixels_read < dbuf_len) {
*destbuf++ = bits;
pixels_read++;
}
}
} else if (bits == 3) {
run_length = get_bits(&gb, 8) + 29;
bits = get_bits(&gb, 2);
if (non_mod == 1 && bits == 1)
pixels_read += run_length;
else {
if (map_table)
bits = map_table[bits];
while (run_length-- > 0 && pixels_read < dbuf_len) {
*destbuf++ = bits;
pixels_read++;
}
}
} else if (bits == 1) {
pixels_read += 2;
if (map_table)
bits = map_table[0];
else
bits = 0;
if (pixels_read <= dbuf_len) {
*destbuf++ = bits;
*destbuf++ = bits;
}
} else {
(*srcbuf) += (get_bits_count(&gb) + 7) >> 3;
return pixels_read;
}
} else {
if (map_table)
bits = map_table[0];
else
bits = 0;
*destbuf++ = bits;
pixels_read++;
}
}
}
}
if (get_bits(&gb, 6))
av_log(0, AV_LOG_ERROR, "DVBSub error: line overflow\n");
(*srcbuf) += (get_bits_count(&gb) + 7) >> 3;
return pixels_read;
}
| false |
FFmpeg
|
eea064aea610ea41b5bda0b62dac56be536af9aa
|
15,380 |
D(float, sse)
D(float, avx)
D(int16, mmx)
D(int16, sse2)
av_cold void swri_rematrix_init_x86(struct SwrContext *s){
int mm_flags = av_get_cpu_flags();
int nb_in = av_get_channel_layout_nb_channels(s->in_ch_layout);
int nb_out = av_get_channel_layout_nb_channels(s->out_ch_layout);
int num = nb_in * nb_out;
int i,j;
s->mix_1_1_simd = NULL;
s->mix_2_1_simd = NULL;
if (s->midbuf.fmt == AV_SAMPLE_FMT_S16P){
if(mm_flags & AV_CPU_FLAG_MMX) {
s->mix_1_1_simd = ff_mix_1_1_a_int16_mmx;
s->mix_2_1_simd = ff_mix_2_1_a_int16_mmx;
}
if(mm_flags & AV_CPU_FLAG_SSE2) {
s->mix_1_1_simd = ff_mix_1_1_a_int16_sse2;
s->mix_2_1_simd = ff_mix_2_1_a_int16_sse2;
}
s->native_simd_matrix = av_mallocz(2 * num * sizeof(int16_t));
s->native_simd_one = av_mallocz(2 * sizeof(int16_t));
for(i=0; i<nb_out; i++){
int sh = 0;
for(j=0; j<nb_in; j++)
sh = FFMAX(sh, FFABS(((int*)s->native_matrix)[i * nb_in + j]));
sh = FFMAX(av_log2(sh) - 14, 0);
for(j=0; j<nb_in; j++) {
((int16_t*)s->native_simd_matrix)[2*(i * nb_in + j)+1] = 15 - sh;
((int16_t*)s->native_simd_matrix)[2*(i * nb_in + j)] =
((((int*)s->native_matrix)[i * nb_in + j]) + (1<<sh>>1)) >> sh;
}
}
((int16_t*)s->native_simd_one)[1] = 14;
((int16_t*)s->native_simd_one)[0] = 16384;
} else if(s->midbuf.fmt == AV_SAMPLE_FMT_FLTP){
if(mm_flags & AV_CPU_FLAG_SSE) {
s->mix_1_1_simd = ff_mix_1_1_a_float_sse;
s->mix_2_1_simd = ff_mix_2_1_a_float_sse;
}
if(HAVE_AVX_EXTERNAL && mm_flags & AV_CPU_FLAG_AVX) {
s->mix_1_1_simd = ff_mix_1_1_a_float_avx;
s->mix_2_1_simd = ff_mix_2_1_a_float_avx;
}
s->native_simd_matrix = av_mallocz(num * sizeof(float));
memcpy(s->native_simd_matrix, s->native_matrix, num * sizeof(float));
s->native_simd_one = av_mallocz(sizeof(float));
memcpy(s->native_simd_one, s->native_one, sizeof(float));
}
}
| false |
FFmpeg
|
9937362c54be085e75c90c55dad443329be59e69
|
15,381 |
static int mov_read_wave(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
if ((uint64_t)atom.size > (1<<30))
return AVERROR_INVALIDDATA;
if (st->codec->codec_id == AV_CODEC_ID_QDM2 || st->codec->codec_id == AV_CODEC_ID_QDMC) {
// pass all frma atom to codec, needed at least for QDMC and QDM2
av_free(st->codec->extradata);
st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata)
return AVERROR(ENOMEM);
st->codec->extradata_size = atom.size;
avio_read(pb, st->codec->extradata, atom.size);
} else if (atom.size > 8) { /* to read frma, esds atoms */
int ret;
if ((ret = mov_read_default(c, pb, atom)) < 0)
return ret;
} else
avio_skip(pb, atom.size);
return 0;
}
| true |
FFmpeg
|
5c720657c23afd798ae0db7c7362eb859a89ab3d
|
15,382 |
static void rng_egd_finalize(Object *obj)
{
RngEgd *s = RNG_EGD(obj);
if (s->chr) {
qemu_chr_add_handlers(s->chr, NULL, NULL, NULL, NULL);
}
g_free(s->chr_name);
rng_egd_free_requests(s);
}
| true |
qemu
|
456d60692310e7ac25cf822cc1e98192ad636ece
|
15,383 |
static int mov_read_chan(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
if (atom.size < 16)
return 0;
ff_mov_read_chan(c->fc, st, atom.size - 4);
return 0;
}
| true |
FFmpeg
|
9afb7061f938831248942050cfdb449e014ed427
|
15,384 |
static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
uint64_t sector, size_t size)
{
if (sector & dev->sector_mask) {
if (size % dev->conf->logical_block_size) {
return true;
| true |
qemu
|
3c2daac0b98952a858277878cb11294256b39e43
|
15,385 |
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
BC_STATUS ret;
BC_DTS_STATUS decoder_status = { 0, };
CopyRet rec_ret;
CHDContext *priv = avctx->priv_data;
HANDLE dev = priv->dev;
uint8_t *in_data = avpkt->data;
int len = avpkt->size;
int free_data = 0;
uint8_t pic_type = 0;
av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");
if (avpkt->size == 7 && !priv->bframe_bug) {
/*
* The use of a drop frame triggers the bug
*/
av_log(avctx, AV_LOG_INFO,
"CrystalHD: Enabling work-around for packed b-frame bug\n");
priv->bframe_bug = 1;
} else if (avpkt->size == 8 && priv->bframe_bug) {
/*
* Delay frames don't trigger the bug
*/
av_log(avctx, AV_LOG_INFO,
"CrystalHD: Disabling work-around for packed b-frame bug\n");
priv->bframe_bug = 0;
}
if (len) {
int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
if (priv->bsfc) {
int ret = 0;
AVPacket filter_packet = { 0 };
AVPacket filtered_packet = { 0 };
ret = av_packet_ref(&filter_packet, avpkt);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to ref input packet\n");
return ret;
}
ret = av_bsf_send_packet(priv->bsfc, &filter_packet);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to send input packet\n");
return ret;
}
ret = av_bsf_receive_packet(priv->bsfc, &filtered_packet);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to receive output packet\n");
return ret;
}
in_data = filtered_packet.data;
len = filtered_packet.size;
av_packet_unref(&filter_packet);
}
if (priv->parser) {
int ret = 0;
free_data = ret > 0;
if (ret >= 0) {
uint8_t *pout;
int psize;
int index;
H264Context *h = priv->parser->priv_data;
index = av_parser_parse2(priv->parser, avctx, &pout, &psize,
in_data, len, avctx->internal->pkt->pts,
avctx->internal->pkt->dts, 0);
if (index < 0) {
av_log(avctx, AV_LOG_WARNING,
"CrystalHD: Failed to parse h.264 packet to "
"detect interlacing.\n");
} else if (index != len) {
av_log(avctx, AV_LOG_WARNING,
"CrystalHD: Failed to parse h.264 packet "
"completely. Interlaced frames may be "
"incorrectly detected.\n");
} else {
av_log(avctx, AV_LOG_VERBOSE,
"CrystalHD: parser picture type %d\n",
h->picture_structure);
pic_type = h->picture_structure;
}
} else {
av_log(avctx, AV_LOG_WARNING,
"CrystalHD: mp4toannexb filter failed to filter "
"packet. Interlaced frames may be incorrectly "
"detected.\n");
}
}
if (len < tx_free - 1024) {
/*
* Despite being notionally opaque, either libcrystalhd or
* the hardware itself will mangle pts values that are too
* small or too large. The docs claim it should be in units
* of 100ns. Given that we're nominally dealing with a black
* box on both sides, any transform we do has no guarantee of
* avoiding mangling so we need to build a mapping to values
* we know will not be mangled.
*/
uint64_t pts = opaque_list_push(priv, avctx->internal->pkt->pts, pic_type);
if (!pts) {
if (free_data) {
av_freep(&in_data);
}
return AVERROR(ENOMEM);
}
av_log(priv->avctx, AV_LOG_VERBOSE,
"input \"pts\": %"PRIu64"\n", pts);
ret = DtsProcInput(dev, in_data, len, pts, 0);
if (free_data) {
av_freep(&in_data);
}
if (ret == BC_STS_BUSY) {
av_log(avctx, AV_LOG_WARNING,
"CrystalHD: ProcInput returned busy\n");
usleep(BASE_WAIT);
return AVERROR(EBUSY);
} else if (ret != BC_STS_SUCCESS) {
av_log(avctx, AV_LOG_ERROR,
"CrystalHD: ProcInput failed: %u\n", ret);
return -1;
}
avctx->has_b_frames++;
} else {
av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n");
len = 0; // We didn't consume any bytes.
}
} else {
av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
}
if (priv->skip_next_output) {
av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n");
priv->skip_next_output = 0;
avctx->has_b_frames--;
return len;
}
ret = DtsGetDriverStatus(dev, &decoder_status);
if (ret != BC_STS_SUCCESS) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
return -1;
}
/*
* No frames ready. Don't try to extract.
*
* Empirical testing shows that ReadyListCount can be a damn lie,
* and ProcOut still fails when count > 0. The same testing showed
* that two more iterations were needed before ProcOutput would
* succeed.
*/
if (priv->output_ready < 2) {
if (decoder_status.ReadyListCount != 0)
priv->output_ready++;
usleep(BASE_WAIT);
av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
return len;
} else if (decoder_status.ReadyListCount == 0) {
/*
* After the pipeline is established, if we encounter a lack of frames
* that probably means we're not giving the hardware enough time to
* decode them, so start increasing the wait time at the end of a
* decode call.
*/
usleep(BASE_WAIT);
priv->decode_wait += WAIT_UNIT;
av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
return len;
}
do {
rec_ret = receive_frame(avctx, data, got_frame);
if (rec_ret == RET_OK && *got_frame == 0) {
/*
* This case is for when the encoded fields are stored
* separately and we get a separate avpkt for each one. To keep
* the pipeline stable, we should return nothing and wait for
* the next time round to grab the second field.
* H.264 PAFF is an example of this.
*/
av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
avctx->has_b_frames--;
} else if (rec_ret == RET_COPY_NEXT_FIELD) {
/*
* This case is for when the encoded fields are stored in a
* single avpkt but the hardware returns then separately. Unless
* we grab the second field before returning, we'll slip another
* frame in the pipeline and if that happens a lot, we're sunk.
* So we have to get that second field now.
* Interlaced mpeg2 and vc1 are examples of this.
*/
av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
while (1) {
usleep(priv->decode_wait);
ret = DtsGetDriverStatus(dev, &decoder_status);
if (ret == BC_STS_SUCCESS &&
decoder_status.ReadyListCount > 0) {
rec_ret = receive_frame(avctx, data, got_frame);
if ((rec_ret == RET_OK && *got_frame > 0) ||
rec_ret == RET_ERROR)
break;
}
}
av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n");
} else if (rec_ret == RET_SKIP_NEXT_COPY) {
/*
* Two input packets got turned into a field pair. Gawd.
*/
av_log(avctx, AV_LOG_VERBOSE,
"Don't output on next decode call.\n");
priv->skip_next_output = 1;
}
/*
* If rec_ret == RET_COPY_AGAIN, that means that either we just handled
* a FMT_CHANGE event and need to go around again for the actual frame,
* we got a busy status and need to try again, or we're dealing with
* packed b-frames, where the hardware strangely returns the packed
* p-frame twice. We choose to keep the second copy as it carries the
* valid pts.
*/
} while (rec_ret == RET_COPY_AGAIN);
usleep(priv->decode_wait);
return len;
}
| true |
FFmpeg
|
b5f45208fbe5373c7f9112a8169933b73a8478e1
|
15,386 |
static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb)
{
Vp3DecodeContext *s = avctx->priv_data;
int major, minor, micro;
major = get_bits(&gb, 8); /* version major */
minor = get_bits(&gb, 8); /* version minor */
micro = get_bits(&gb, 8); /* version micro */
av_log(avctx, AV_LOG_INFO, "Theora bitstream version %d.%d.%d\n",
major, minor, micro);
/* FIXME: endianess? */
s->theora = (major << 16) | (minor << 8) | micro;
/* 3.3.0 aka alpha3 has the same frame orientation as original vp3 */
/* but previous versions have the image flipped relative to vp3 */
if (s->theora < 0x030300)
{
s->flipped_image = 1;
av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
s->width = get_bits(&gb, 16) << 4;
s->height = get_bits(&gb, 16) << 4;
skip_bits(&gb, 24); /* frame width */
skip_bits(&gb, 24); /* frame height */
skip_bits(&gb, 8); /* offset x */
skip_bits(&gb, 8); /* offset y */
skip_bits(&gb, 32); /* fps numerator */
skip_bits(&gb, 32); /* fps denumerator */
skip_bits(&gb, 24); /* aspect numerator */
skip_bits(&gb, 24); /* aspect denumerator */
if (s->theora < 0x030300)
skip_bits(&gb, 5); /* keyframe frequency force */
skip_bits(&gb, 8); /* colorspace */
skip_bits(&gb, 24); /* bitrate */
skip_bits(&gb, 6); /* last(?) quality index */
if (s->theora >= 0x030300)
{
skip_bits(&gb, 5); /* keyframe frequency force */
skip_bits(&gb, 5); /* spare bits */
// align_get_bits(&gb);
avctx->width = s->width;
avctx->height = s->height;
vp3_decode_init(avctx);
return 0;
| true |
FFmpeg
|
0ecca7a49f8e254c12a3a1de048d738bfbb614c6
|
15,387 |
static void pci_update_mappings(PCIDevice *d)
{
PCIIORegion *r;
int cmd, i;
uint32_t last_addr, new_addr, config_ofs;
cmd = le16_to_cpu(*(uint16_t *)(d->config + PCI_COMMAND));
for(i = 0; i < PCI_NUM_REGIONS; i++) {
r = &d->io_regions[i];
if (i == PCI_ROM_SLOT) {
config_ofs = 0x30;
} else {
config_ofs = 0x10 + i * 4;
}
if (r->size != 0) {
if (r->type & PCI_ADDRESS_SPACE_IO) {
if (cmd & PCI_COMMAND_IO) {
new_addr = le32_to_cpu(*(uint32_t *)(d->config +
config_ofs));
new_addr = new_addr & ~(r->size - 1);
last_addr = new_addr + r->size - 1;
/* NOTE: we have only 64K ioports on PC */
if (last_addr <= new_addr || new_addr == 0 ||
last_addr >= 0x10000) {
new_addr = -1;
}
} else {
new_addr = -1;
}
} else {
if (cmd & PCI_COMMAND_MEMORY) {
new_addr = le32_to_cpu(*(uint32_t *)(d->config +
config_ofs));
/* the ROM slot has a specific enable bit */
if (i == PCI_ROM_SLOT && !(new_addr & 1))
goto no_mem_map;
new_addr = new_addr & ~(r->size - 1);
last_addr = new_addr + r->size - 1;
/* NOTE: we do not support wrapping */
/* XXX: as we cannot support really dynamic
mappings, we handle specific values as invalid
mappings. */
if (last_addr <= new_addr || new_addr == 0 ||
last_addr == -1) {
new_addr = -1;
}
} else {
no_mem_map:
new_addr = -1;
}
}
/* now do the real mapping */
if (new_addr != r->addr) {
if (r->addr != -1) {
if (r->type & PCI_ADDRESS_SPACE_IO) {
int class;
/* NOTE: specific hack for IDE in PC case:
only one byte must be mapped. */
class = d->config[0x0a] | (d->config[0x0b] << 8);
if (class == 0x0101 && r->size == 4) {
isa_unassign_ioport(r->addr + 2, 1);
} else {
isa_unassign_ioport(r->addr, r->size);
}
} else {
cpu_register_physical_memory(pci_to_cpu_addr(r->addr),
r->size,
IO_MEM_UNASSIGNED);
}
}
r->addr = new_addr;
if (r->addr != -1) {
r->map_func(d, i, r->addr, r->size, r->type);
}
}
}
}
}
| true |
qemu
|
f65ed4c1529f29a7d62d6733eaa50bed24a4b2ed
|
15,390 |
static int make_completely_empty(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
int ret, l1_clusters;
int64_t offset;
uint64_t *new_reftable = NULL;
uint64_t rt_entry, l1_size2;
struct {
uint64_t l1_offset;
uint64_t reftable_offset;
uint32_t reftable_clusters;
} QEMU_PACKED l1_ofs_rt_ofs_cls;
ret = qcow2_cache_empty(bs, s->l2_table_cache);
if (ret < 0) {
goto fail;
}
ret = qcow2_cache_empty(bs, s->refcount_block_cache);
if (ret < 0) {
goto fail;
}
/* Refcounts will be broken utterly */
ret = qcow2_mark_dirty(bs);
if (ret < 0) {
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t));
l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t);
/* After this call, neither the in-memory nor the on-disk refcount
* information accurately describe the actual references */
ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset,
l1_clusters * s->cluster_size, 0);
if (ret < 0) {
goto fail_broken_refcounts;
}
memset(s->l1_table, 0, l1_size2);
BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE);
/* Overwrite enough clusters at the beginning of the sectors to place
* the refcount table, a refcount block and the L1 table in; this may
* overwrite parts of the existing refcount and L1 table, which is not
* an issue because the dirty flag is set, complete data loss is in fact
* desired and partial data loss is consequently fine as well */
ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size,
(2 + l1_clusters) * s->cluster_size, 0);
/* This call (even if it failed overall) may have overwritten on-disk
* refcount structures; in that case, the in-memory refcount information
* will probably differ from the on-disk information which makes the BDS
* unusable */
if (ret < 0) {
goto fail_broken_refcounts;
}
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE);
/* "Create" an empty reftable (one cluster) directly after the image
* header and an empty L1 table three clusters after the image header;
* the cluster between those two will be used as the first refblock */
l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size);
l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size);
l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1);
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset),
&l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls));
if (ret < 0) {
goto fail_broken_refcounts;
}
s->l1_table_offset = 3 * s->cluster_size;
new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t));
if (!new_reftable) {
ret = -ENOMEM;
goto fail_broken_refcounts;
}
s->refcount_table_offset = s->cluster_size;
s->refcount_table_size = s->cluster_size / sizeof(uint64_t);
g_free(s->refcount_table);
s->refcount_table = new_reftable;
new_reftable = NULL;
/* Now the in-memory refcount information again corresponds to the on-disk
* information (reftable is empty and no refblocks (the refblock cache is
* empty)); however, this means some clusters (e.g. the image header) are
* referenced, but not refcounted, but the normal qcow2 code assumes that
* the in-memory information is always correct */
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
/* Enter the first refblock into the reftable */
rt_entry = cpu_to_be64(2 * s->cluster_size);
ret = bdrv_pwrite_sync(bs->file, s->cluster_size,
&rt_entry, sizeof(rt_entry));
if (ret < 0) {
goto fail_broken_refcounts;
}
s->refcount_table[0] = 2 * s->cluster_size;
s->free_cluster_index = 0;
assert(3 + l1_clusters <= s->refcount_block_size);
offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2);
if (offset < 0) {
ret = offset;
goto fail_broken_refcounts;
} else if (offset > 0) {
error_report("First cluster in emptied image is in use");
abort();
}
/* Now finally the in-memory information corresponds to the on-disk
* structures and is correct */
ret = qcow2_mark_clean(bs);
if (ret < 0) {
goto fail;
}
ret = bdrv_truncate(bs->file->bs, (3 + l1_clusters) * s->cluster_size);
if (ret < 0) {
goto fail;
}
return 0;
fail_broken_refcounts:
/* The BDS is unusable at this point. If we wanted to make it usable, we
* would have to call qcow2_refcount_close(), qcow2_refcount_init(),
* qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init()
* again. However, because the functions which could have caused this error
* path to be taken are used by those functions as well, it's very likely
* that that sequence will fail as well. Therefore, just eject the BDS. */
bs->drv = NULL;
fail:
g_free(new_reftable);
return ret;
}
| true |
qemu
|
7061a078984ba7d08b8b80686ad98c5162e56fbd
|
15,391 |
bool gs_allowed(void)
{
/* for "none" machine this results in true */
return get_machine_class()->gs_allowed;
}
| true |
qemu
|
0280b3eb7c0519b43452c05cf51f8777d9e38975
|
15,392 |
void qmp_block_dirty_bitmap_remove(const char *node, const char *name,
Error **errp)
{
AioContext *aio_context;
BlockDriverState *bs;
BdrvDirtyBitmap *bitmap;
bitmap = block_dirty_bitmap_lookup(node, name, &bs, &aio_context, errp);
if (!bitmap || !bs) {
return;
bdrv_dirty_bitmap_make_anon(bs, bitmap);
bdrv_release_dirty_bitmap(bs, bitmap);
aio_context_release(aio_context);
| true |
qemu
|
9bd2b08f27b9c27bb40d73b6466321b8c635086e
|
15,393 |
void av_opencl_uninit(void)
{
cl_int status;
int i;
LOCK_OPENCL
gpu_env.init_count--;
if (gpu_env.is_user_created)
goto end;
if ((gpu_env.init_count > 0) || (gpu_env.kernel_count > 0))
goto end;
for (i = 0; i < gpu_env.program_count; i++) {
if (gpu_env.programs[i]) {
status = clReleaseProgram(gpu_env.programs[i]);
if (status != CL_SUCCESS) {
av_log(&openclutils, AV_LOG_ERROR, "Could not release OpenCL program: %s\n", opencl_errstr(status));
}
gpu_env.programs[i] = NULL;
}
}
if (gpu_env.command_queue) {
status = clReleaseCommandQueue(gpu_env.command_queue);
if (status != CL_SUCCESS) {
av_log(&openclutils, AV_LOG_ERROR, "Could not release OpenCL command queue: %s\n", opencl_errstr(status));
}
gpu_env.command_queue = NULL;
}
if (gpu_env.context) {
status = clReleaseContext(gpu_env.context);
if (status != CL_SUCCESS) {
av_log(&openclutils, AV_LOG_ERROR, "Could not release OpenCL context: %s\n", opencl_errstr(status));
}
gpu_env.context = NULL;
}
av_freep(&(gpu_env.device_ids));
end:
UNLOCK_OPENCL
}
| false |
FFmpeg
|
57d77b3963ce1023eaf5ada8cba58b9379405cc8
|
15,394 |
static void s390_cpu_plug(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
MachineState *ms = MACHINE(hotplug_dev);
S390CPU *cpu = S390_CPU(dev);
g_assert(!ms->possible_cpus->cpus[cpu->env.core_id].cpu);
ms->possible_cpus->cpus[cpu->env.core_id].cpu = OBJECT(dev);
| true |
qemu
|
c5b934303cf83fe3dda31e8d3e5778458c8a9eeb
|
15,395 |
static void usbredir_handle_data(USBDevice *udev, USBPacket *p)
{
USBRedirDevice *dev = DO_UPCAST(USBRedirDevice, dev, udev);
uint8_t ep;
ep = p->ep->nr;
if (p->pid == USB_TOKEN_IN) {
ep |= USB_DIR_IN;
}
switch (dev->endpoint[EP2I(ep)].type) {
case USB_ENDPOINT_XFER_CONTROL:
ERROR("handle_data called for control transfer on ep %02X\n", ep);
p->status = USB_RET_NAK;
break;
case USB_ENDPOINT_XFER_ISOC:
usbredir_handle_iso_data(dev, p, ep);
break;
case USB_ENDPOINT_XFER_BULK:
if (p->state == USB_PACKET_SETUP && p->pid == USB_TOKEN_IN &&
p->ep->pipeline) {
p->status = USB_RET_ADD_TO_QUEUE;
break;
}
usbredir_handle_bulk_data(dev, p, ep);
break;
case USB_ENDPOINT_XFER_INT:
if (ep & USB_DIR_IN) {
usbredir_handle_interrupt_in_data(dev, p, ep);
} else {
usbredir_handle_interrupt_out_data(dev, p, ep);
}
break;
default:
ERROR("handle_data ep %02X has unknown type %d\n", ep,
dev->endpoint[EP2I(ep)].type);
p->status = USB_RET_NAK;
}
}
| true |
qemu
|
b2d1fe67d09d2b6c7da647fbcea6ca0148c206d3
|
15,396 |
static int sdl_init_out (HWVoiceOut *hw, struct audsettings *as)
{
SDLVoiceOut *sdl = (SDLVoiceOut *) hw;
SDLAudioState *s = &glob_sdl;
SDL_AudioSpec req, obt;
int endianness;
int err;
audfmt_e effective_fmt;
struct audsettings obt_as;
req.freq = as->freq;
req.format = aud_to_sdlfmt (as->fmt);
req.channels = as->nchannels;
req.samples = conf.nb_samples;
req.callback = sdl_callback;
req.userdata = sdl;
if (sdl_open (&req, &obt)) {
return -1;
}
err = sdl_to_audfmt(obt.format, &effective_fmt, &endianness);
if (err) {
sdl_close (s);
return -1;
}
obt_as.freq = obt.freq;
obt_as.nchannels = obt.channels;
obt_as.fmt = effective_fmt;
obt_as.endianness = endianness;
audio_pcm_init_info (&hw->info, &obt_as);
hw->samples = obt.samples;
s->initialized = 1;
s->exit = 0;
SDL_PauseAudio (0);
return 0;
}
| true |
qemu
|
5706db1deb061ee9affdcea81e59c4c2cad7c41e
|
15,398 |
static int dirac_probe(AVProbeData *p)
{
if (AV_RL32(p->buf) == MKTAG('B', 'B', 'C', 'D'))
return AVPROBE_SCORE_MAX;
else
return 0;
}
| true |
FFmpeg
|
2fbc759d08cae97f9361e464a685a149c9d12c72
|
15,399 |
static void apply_mdct(AC3EncodeContext *s)
{
int blk, ch;
for (ch = 0; ch < s->channels; ch++) {
for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) {
AC3Block *block = &s->blocks[blk];
const SampleType *input_samples = &s->planar_samples[ch][blk * AC3_BLOCK_SIZE];
apply_window(&s->dsp, s->windowed_samples, input_samples, s->mdct.window, AC3_WINDOW_SIZE);
block->exp_shift[ch] = normalize_samples(s);
mdct512(&s->mdct, block->mdct_coef[ch], s->windowed_samples);
}
}
}
| true |
FFmpeg
|
323e6fead07c75f418e4b60704a4f437bb3483b2
|
15,400 |
static void read_id3(AVFormatContext *s, uint64_t id3pos)
{
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
if (avio_seek(s->pb, id3pos, SEEK_SET) < 0)
return;
ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
if (id3v2_extra_meta)
ff_id3v2_parse_apic(s, &id3v2_extra_meta);
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
}
| false |
FFmpeg
|
5331773cc33ba26b9e26ace643d926219e46a17b
|
15,402 |
static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
{
AVStream *st = c->fc->streams[c->fc->nb_streams-1];
if((uint64_t)atom.size > (1<<30))
return -1;
if (st->codec->codec_id == CODEC_ID_QDM2) {
// pass all frma atom to codec, needed at least for QDM2
av_free(st->codec->extradata);
st->codec->extradata_size = atom.size;
st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
if (st->codec->extradata) {
get_buffer(pb, st->codec->extradata, atom.size);
} else
url_fskip(pb, atom.size);
} else if (atom.size > 8) { /* to read frma, esds atoms */
mov_read_default(c, pb, atom);
} else
url_fskip(pb, atom.size);
return 0;
}
| false |
FFmpeg
|
9cf0419bb1a2cf929dcf458d435ae3c3bfb5d3ab
|
15,404 |
static void unix_accept_incoming_migration(void *opaque)
{
struct sockaddr_un addr;
socklen_t addrlen = sizeof(addr);
int s = (unsigned long)opaque;
QEMUFile *f;
int c, ret;
do {
c = accept(s, (struct sockaddr *)&addr, &addrlen);
} while (c == -1 && socket_error() == EINTR);
dprintf("accepted migration\n");
if (c == -1) {
fprintf(stderr, "could not accept migration connection\n");
return;
}
f = qemu_fopen_socket(c);
if (f == NULL) {
fprintf(stderr, "could not qemu_fopen socket\n");
goto out;
}
ret = qemu_loadvm_state(f);
if (ret < 0) {
fprintf(stderr, "load of migration failed\n");
goto out_fopen;
}
qemu_announce_self();
dprintf("successfully loaded vm state\n");
/* we've successfully migrated, close the server socket */
qemu_set_fd_handler2(s, NULL, NULL, NULL, NULL);
close(s);
out_fopen:
qemu_fclose(f);
out:
close(c);
}
| true |
qemu
|
40ff6d7e8dceca227e7f8a3e8e0d58b2c66d19b4
|
15,405 |
int qemu_uuid_parse(const char *str, QemuUUID *uuid)
{
unsigned char *uu = &uuid->data[0];
int ret;
if (strlen(str) != 36) {
return -1;
}
ret = sscanf(str, UUID_FMT, &uu[0], &uu[1], &uu[2], &uu[3],
&uu[4], &uu[5], &uu[6], &uu[7], &uu[8], &uu[9],
&uu[10], &uu[11], &uu[12], &uu[13], &uu[14],
&uu[15]);
if (ret != 16) {
return -1;
}
return 0;
}
| true |
qemu
|
0d6ae94783b35a5c42d88872d1adb523f5fcc6f3
|
15,406 |
static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *p, int *got_packet)
{
TargaContext *s = avctx->priv_data;
int bpp, picsize, datasize = -1, ret;
uint8_t *out;
if(avctx->width > 0xffff || avctx->height > 0xffff) {
av_log(avctx, AV_LOG_ERROR, "image dimensions too large\n");
return AVERROR(EINVAL);
}
picsize = av_image_get_buffer_size(avctx->pix_fmt,
avctx->width, avctx->height, 1);
if ((ret = ff_alloc_packet(pkt, picsize + 45)) < 0) {
av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
return ret;
}
/* zero out the header and only set applicable fields */
memset(pkt->data, 0, 12);
AV_WL16(pkt->data+12, avctx->width);
AV_WL16(pkt->data+14, avctx->height);
/* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */
pkt->data[17] = 0x20 | (avctx->pix_fmt == AV_PIX_FMT_BGRA ? 8 : 0);
switch(avctx->pix_fmt) {
case AV_PIX_FMT_GRAY8:
pkt->data[2] = TGA_BW; /* uncompressed grayscale image */
pkt->data[16] = 8; /* bpp */
break;
case AV_PIX_FMT_RGB555LE:
pkt->data[2] = TGA_RGB; /* uncompresses true-color image */
pkt->data[16] = 16; /* bpp */
break;
case AV_PIX_FMT_BGR24:
pkt->data[2] = TGA_RGB; /* uncompressed true-color image */
pkt->data[16] = 24; /* bpp */
break;
case AV_PIX_FMT_BGRA:
pkt->data[2] = TGA_RGB; /* uncompressed true-color image */
pkt->data[16] = 32; /* bpp */
break;
default:
av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n",
av_get_pix_fmt_name(avctx->pix_fmt));
return AVERROR(EINVAL);
}
bpp = pkt->data[16] >> 3;
out = pkt->data + 18; /* skip past the header we just output */
#if FF_API_CODER_TYPE
FF_DISABLE_DEPRECATION_WARNINGS
if (avctx->coder_type == FF_CODER_TYPE_RAW)
s->rle = 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
/* try RLE compression */
if (s->rle)
datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height);
/* if that worked well, mark the picture as RLE compressed */
if(datasize >= 0)
pkt->data[2] |= 8;
/* if RLE didn't make it smaller, go back to no compression */
else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height);
out += datasize;
/* The standard recommends including this section, even if we don't use
* any of the features it affords. TODO: take advantage of the pixel
* aspect ratio and encoder ID fields available? */
memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26);
pkt->size = out + 26 - pkt->data;
pkt->flags |= AV_PKT_FLAG_KEY;
*got_packet = 1;
return 0;
}
| true |
FFmpeg
|
d8f3b0fb584677d4882e3a2d7c28f8b15c7319f5
|
15,407 |
static void vp8_idct_dc_add_c(uint8_t *dst, DCTELEM block[16], ptrdiff_t stride)
{
int i, dc = (block[0] + 4) >> 3;
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP + dc;
block[0] = 0;
for (i = 0; i < 4; i++) {
dst[0] = cm[dst[0]];
dst[1] = cm[dst[1]];
dst[2] = cm[dst[2]];
dst[3] = cm[dst[3]];
dst += stride;
}
}
| true |
FFmpeg
|
c23acbaed40101c677dfcfbbfe0d2c230a8e8f44
|
15,408 |
static av_cold void set_bandwidth(AC3EncodeContext *s)
{
int blk, ch, cpl_start;
if (s->cutoff) {
/* calculate bandwidth based on user-specified cutoff frequency */
int fbw_coeffs;
fbw_coeffs = s->cutoff * 2 * AC3_MAX_COEFS / s->sample_rate;
s->bandwidth_code = av_clip((fbw_coeffs - 73) / 3, 0, 60);
} else {
/* use default bandwidth setting */
s->bandwidth_code = ac3_bandwidth_tab[s->fbw_channels-1][s->bit_alloc.sr_code][s->frame_size_code/2];
}
/* set number of coefficients for each channel */
for (ch = 1; ch <= s->fbw_channels; ch++) {
s->start_freq[ch] = 0;
for (blk = 0; blk < s->num_blocks; blk++)
s->blocks[blk].end_freq[ch] = s->bandwidth_code * 3 + 73;
}
/* LFE channel always has 7 coefs */
if (s->lfe_on) {
s->start_freq[s->lfe_channel] = 0;
for (blk = 0; blk < s->num_blocks; blk++)
s->blocks[blk].end_freq[ch] = 7;
}
/* initialize coupling strategy */
if (s->cpl_enabled) {
if (s->options.cpl_start != AC3ENC_OPT_AUTO) {
cpl_start = s->options.cpl_start;
} else {
cpl_start = ac3_coupling_start_tab[s->channel_mode-2][s->bit_alloc.sr_code][s->frame_size_code/2];
if (cpl_start < 0) {
if (s->options.channel_coupling == AC3ENC_OPT_AUTO)
s->cpl_enabled = 0;
else
cpl_start = 15;
}
}
}
if (s->cpl_enabled) {
int i, cpl_start_band, cpl_end_band;
uint8_t *cpl_band_sizes = s->cpl_band_sizes;
cpl_end_band = s->bandwidth_code / 4 + 3;
cpl_start_band = av_clip(cpl_start, 0, FFMIN(cpl_end_band-1, 15));
s->num_cpl_subbands = cpl_end_band - cpl_start_band;
s->num_cpl_bands = 1;
*cpl_band_sizes = 12;
for (i = cpl_start_band + 1; i < cpl_end_band; i++) {
if (ff_eac3_default_cpl_band_struct[i]) {
*cpl_band_sizes += 12;
} else {
s->num_cpl_bands++;
cpl_band_sizes++;
*cpl_band_sizes = 12;
}
}
s->start_freq[CPL_CH] = cpl_start_band * 12 + 37;
s->cpl_end_freq = cpl_end_band * 12 + 37;
for (blk = 0; blk < s->num_blocks; blk++)
s->blocks[blk].end_freq[CPL_CH] = s->cpl_end_freq;
}
}
| true |
FFmpeg
|
eeb48353abaab335a1fc9cc448a5691330325a09
|
15,409 |
static void test_tco_defaults(void)
{
TestData d;
d.args = NULL;
d.noreboot = true;
test_init(&d);
g_assert_cmpint(qpci_io_readw(d.dev, d.tco_io_bar, TCO_RLD), ==,
TCO_RLD_DEFAULT);
/* TCO_DAT_IN & TCO_DAT_OUT */
g_assert_cmpint(qpci_io_readw(d.dev, d.tco_io_bar, TCO_DAT_IN), ==,
(TCO_DAT_OUT_DEFAULT << 8) | TCO_DAT_IN_DEFAULT);
/* TCO1_STS & TCO2_STS */
g_assert_cmpint(qpci_io_readl(d.dev, d.tco_io_bar, TCO1_STS), ==,
(TCO2_STS_DEFAULT << 16) | TCO1_STS_DEFAULT);
/* TCO1_CNT & TCO2_CNT */
g_assert_cmpint(qpci_io_readl(d.dev, d.tco_io_bar, TCO1_CNT), ==,
(TCO2_CNT_DEFAULT << 16) | TCO1_CNT_DEFAULT);
/* TCO_MESSAGE1 & TCO_MESSAGE2 */
g_assert_cmpint(qpci_io_readw(d.dev, d.tco_io_bar, TCO_MESSAGE1), ==,
(TCO_MESSAGE2_DEFAULT << 8) | TCO_MESSAGE1_DEFAULT);
g_assert_cmpint(qpci_io_readb(d.dev, d.tco_io_bar, TCO_WDCNT), ==,
TCO_WDCNT_DEFAULT);
g_assert_cmpint(qpci_io_readb(d.dev, d.tco_io_bar, SW_IRQ_GEN), ==,
SW_IRQ_GEN_DEFAULT);
g_assert_cmpint(qpci_io_readw(d.dev, d.tco_io_bar, TCO_TMR), ==,
TCO_TMR_DEFAULT);
qtest_end();
}
| true |
qemu
|
34779e8c3991f7fcd74b2045478abcef67dbeb15
|
15,410 |
static void print_sdp(void)
{
char sdp[16384];
int i;
AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
if (!avc)
exit(1);
for (i = 0; i < nb_output_files; i++)
avc[i] = output_files[i]->ctx;
av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
printf("SDP:\n%s\n", sdp);
fflush(stdout);
av_freep(&avc);
}
| true |
FFmpeg
|
636ced8e1dc8248a1353b416240b93d70ad03edb
|
15,412 |
static void pc_xen_hvm_init(QEMUMachineInitArgs *args)
{
if (xen_hvm_init() != 0) {
hw_error("xen hardware virtual machine initialisation failed");
}
pc_init_pci(args);
}
| false |
qemu
|
a97d6fe6fbb97630d77253d20bdce78f76d01850
|
15,413 |
static void cond_broadcast(pthread_cond_t *cond)
{
int ret = pthread_cond_broadcast(cond);
if (ret) die2(ret, "pthread_cond_broadcast");
}
| false |
qemu
|
5d47e3728bbd589701f74bb494c9c9825ba23c88
|
15,414 |
void vnc_tight_clear(VncState *vs)
{
int i;
for (i=0; i<ARRAY_SIZE(vs->tight_stream); i++) {
if (vs->tight_stream[i].opaque) {
deflateEnd(&vs->tight_stream[i]);
}
}
buffer_free(&vs->tight);
buffer_free(&vs->tight_zlib);
buffer_free(&vs->tight_gradient);
#ifdef CONFIG_VNC_JPEG
buffer_free(&vs->tight_jpeg);
#endif
}
| false |
qemu
|
245f7b51c0ea04fb2224b1127430a096c91aee70
|
15,415 |
static void default_drive(int enable, int snapshot, BlockInterfaceType type,
int index, const char *optstr)
{
QemuOpts *opts;
if (!enable || drive_get_by_index(type, index)) {
return;
}
opts = drive_add(type, index, NULL, optstr);
if (snapshot) {
drive_enable_snapshot(opts, NULL);
}
if (!drive_new(opts, type)) {
exit(1);
}
}
| false |
qemu
|
a66c9dc734fb30de1e18e9dc217f2d37e16c492a
|
15,416 |
static void gic_set_irq(void *opaque, int irq, int level)
{
gic_state *s = (gic_state *)opaque;
/* The first external input line is internal interrupt 32. */
irq += GIC_INTERNAL;
if (level == GIC_TEST_LEVEL(irq, ALL_CPU_MASK))
return;
if (level) {
GIC_SET_LEVEL(irq, ALL_CPU_MASK);
if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq, ALL_CPU_MASK)) {
DPRINTF("Set %d pending mask %x\n", irq, GIC_TARGET(irq));
GIC_SET_PENDING(irq, GIC_TARGET(irq));
}
} else {
GIC_CLEAR_LEVEL(irq, ALL_CPU_MASK);
}
gic_update(s);
}
| false |
qemu
|
544d1afa7013fce155f5afbbc24737f2fc0c0f26
|
15,417 |
static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
{
VirtIODevice *vdev;
int n, r;
if (!dev->ioeventfd_started) {
return;
}
vdev = virtio_bus_get_device(&dev->bus);
for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
if (!virtio_queue_get_num(vdev, n)) {
continue;
}
r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
assert(r >= 0);
}
dev->ioeventfd_started = false;
}
| false |
qemu
|
8dfbaa6ac450c4ec2646b1ca08a4017052a90c1d
|
15,418 |
static void pl181_fifo_run(pl181_state *s)
{
uint32_t bits;
uint32_t value;
int n;
int limit;
int is_read;
is_read = (s->datactrl & PL181_DATA_DIRECTION) != 0;
if (s->datacnt != 0 && (!is_read || sd_data_ready(s->card))
&& !s->linux_hack) {
limit = is_read ? PL181_FIFO_LEN : 0;
n = 0;
value = 0;
while (s->datacnt && s->fifo_len != limit) {
if (is_read) {
value |= (uint32_t)sd_read_data(s->card) << (n * 8);
n++;
if (n == 4) {
pl181_fifo_push(s, value);
value = 0;
n = 0;
}
} else {
if (n == 0) {
value = pl181_fifo_pop(s);
n = 4;
}
sd_write_data(s->card, value & 0xff);
value >>= 8;
n--;
}
s->datacnt--;
}
if (n && is_read) {
pl181_fifo_push(s, value);
}
}
s->status &= ~(PL181_STATUS_RX_FIFO | PL181_STATUS_TX_FIFO);
if (s->datacnt == 0) {
s->status |= PL181_STATUS_DATAEND;
/* HACK: */
s->status |= PL181_STATUS_DATABLOCKEND;
DPRINTF("Transfer Complete\n");
}
if (s->datacnt == 0 && s->fifo_len == 0) {
s->datactrl &= ~PL181_DATA_ENABLE;
DPRINTF("Data engine idle\n");
} else {
/* Update FIFO bits. */
bits = PL181_STATUS_TXACTIVE | PL181_STATUS_RXACTIVE;
if (s->fifo_len == 0) {
bits |= PL181_STATUS_TXFIFOEMPTY;
bits |= PL181_STATUS_RXFIFOEMPTY;
} else {
bits |= PL181_STATUS_TXDATAAVLBL;
bits |= PL181_STATUS_RXDATAAVLBL;
}
if (s->fifo_len == 16) {
bits |= PL181_STATUS_TXFIFOFULL;
bits |= PL181_STATUS_RXFIFOFULL;
}
if (s->fifo_len <= 8) {
bits |= PL181_STATUS_TXFIFOHALFEMPTY;
}
if (s->fifo_len >= 8) {
bits |= PL181_STATUS_RXFIFOHALFFULL;
}
if (s->datactrl & PL181_DATA_DIRECTION) {
bits &= PL181_STATUS_RX_FIFO;
} else {
bits &= PL181_STATUS_TX_FIFO;
}
s->status |= bits;
}
}
| false |
qemu
|
bc3b26f5356c6de13f887c865c98dcdfac143514
|
15,419 |
envlist_setenv(envlist_t *envlist, const char *env)
{
struct envlist_entry *entry = NULL;
const char *eq_sign;
size_t envname_len;
if ((envlist == NULL) || (env == NULL))
return (EINVAL);
/* find out first equals sign in given env */
if ((eq_sign = strchr(env, '=')) == NULL)
return (EINVAL);
envname_len = eq_sign - env + 1;
/*
* If there already exists variable with given name
* we remove and release it before allocating a whole
* new entry.
*/
for (entry = envlist->el_entries.lh_first; entry != NULL;
entry = entry->ev_link.le_next) {
if (strncmp(entry->ev_var, env, envname_len) == 0)
break;
}
if (entry != NULL) {
LIST_REMOVE(entry, ev_link);
free((char *)entry->ev_var);
free(entry);
} else {
envlist->el_count++;
}
if ((entry = malloc(sizeof (*entry))) == NULL)
return (errno);
if ((entry->ev_var = strdup(env)) == NULL) {
free(entry);
return (errno);
}
LIST_INSERT_HEAD(&envlist->el_entries, entry, ev_link);
return (0);
}
| false |
qemu
|
72cf2d4f0e181d0d3a3122e04129c58a95da713e
|
15,422 |
int sclp_service_call(CPUS390XState *env, uint32_t sccb, uint64_t code)
{
int r = 0;
int shift = 0;
#ifdef DEBUG_HELPER
printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
#endif
if (sccb & ~0x7ffffff8ul) {
fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
r = -1;
goto out;
}
switch(code) {
case SCLP_CMDW_READ_SCP_INFO:
case SCLP_CMDW_READ_SCP_INFO_FORCED:
while ((ram_size >> (20 + shift)) > 65535) {
shift++;
}
stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
stb_phys(sccb + SCP_INCREMENT, 1 << shift);
stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
if (kvm_enabled()) {
#ifdef CONFIG_KVM
kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
sccb & ~3, 0, 1);
#endif
} else {
env->psw.addr += 4;
ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
}
break;
default:
#ifdef DEBUG_HELPER
printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
#endif
r = -1;
break;
}
out:
return r;
}
| false |
qemu
|
9abf567d95a4e840df868ca993219175fbef8c22
|
15,423 |
static int coroutine_fn do_perform_cow(BlockDriverState *bs,
uint64_t src_cluster_offset,
uint64_t cluster_offset,
int offset_in_cluster,
int bytes)
{
BDRVQcow2State *s = bs->opaque;
QEMUIOVector qiov;
struct iovec iov;
int ret;
iov.iov_len = bytes;
iov.iov_base = qemu_try_blockalign(bs, iov.iov_len);
if (iov.iov_base == NULL) {
return -ENOMEM;
}
qemu_iovec_init_external(&qiov, &iov, 1);
BLKDBG_EVENT(bs->file, BLKDBG_COW_READ);
if (!bs->drv) {
ret = -ENOMEDIUM;
goto out;
}
/* Call .bdrv_co_readv() directly instead of using the public block-layer
* interface. This avoids double I/O throttling and request tracking,
* which can lead to deadlock when block layer copy-on-read is enabled.
*/
ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster,
bytes, &qiov, 0);
if (ret < 0) {
goto out;
}
if (bs->encrypted) {
Error *err = NULL;
int64_t sector = (cluster_offset + offset_in_cluster)
>> BDRV_SECTOR_BITS;
assert(s->cipher);
assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
assert((bytes & ~BDRV_SECTOR_MASK) == 0);
if (qcow2_encrypt_sectors(s, sector, iov.iov_base, iov.iov_base,
bytes >> BDRV_SECTOR_BITS, true, &err) < 0) {
ret = -EIO;
error_free(err);
goto out;
}
}
ret = qcow2_pre_write_overlap_check(bs, 0,
cluster_offset + offset_in_cluster, bytes);
if (ret < 0) {
goto out;
}
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwritev(bs->file->bs, cluster_offset + offset_in_cluster,
bytes, &qiov, 0);
if (ret < 0) {
goto out;
}
ret = 0;
out:
qemu_vfree(iov.iov_base);
return ret;
}
| false |
qemu
|
a03ef88f77af045a2eb9629b5ce774a3fb973c5e
|
15,424 |
static int get_physical_address_code(CPUState *env,
target_phys_addr_t *physical, int *prot,
target_ulong address, int is_user)
{
target_ulong mask;
unsigned int i;
if ((env->lsu & IMMU_E) == 0) { /* IMMU disabled */
*physical = address;
*prot = PAGE_EXEC;
return 0;
}
for (i = 0; i < 64; i++) {
switch ((env->itlb_tte[i] >> 61) & 3) {
default:
case 0x0: // 8k
mask = 0xffffffffffffe000ULL;
break;
case 0x1: // 64k
mask = 0xffffffffffff0000ULL;
break;
case 0x2: // 512k
mask = 0xfffffffffff80000ULL;
break;
case 0x3: // 4M
mask = 0xffffffffffc00000ULL;
break;
}
// ctx match, vaddr match, valid?
if (env->dmmuregs[1] == (env->itlb_tag[i] & 0x1fff) &&
(address & mask) == (env->itlb_tag[i] & mask) &&
(env->itlb_tte[i] & 0x8000000000000000ULL)) {
// access ok?
if ((env->itlb_tte[i] & 0x4) && is_user) {
if (env->immuregs[3]) /* Fault status register */
env->immuregs[3] = 2; /* overflow (not read before
another fault) */
env->immuregs[3] |= (is_user << 3) | 1;
env->exception_index = TT_TFAULT;
#ifdef DEBUG_MMU
printf("TFAULT at 0x%" PRIx64 "\n", address);
#endif
return 1;
}
*physical = ((env->itlb_tte[i] & mask) | (address & ~mask)) &
0x1ffffffe000ULL;
*prot = PAGE_EXEC;
return 0;
}
}
#ifdef DEBUG_MMU
printf("TMISS at 0x%" PRIx64 "\n", address);
#endif
/* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
env->immuregs[6] = (address & ~0x1fffULL) | (env->dmmuregs[1] & 0x1fff);
env->exception_index = TT_TMISS;
return 1;
}
| false |
qemu
|
e8807b14cc8c12c0e14c08fa396d9da043b48209
|
15,425 |
BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
ThreadPoolFunc *func, void *arg,
BlockCompletionFunc *cb, void *opaque)
{
ThreadPoolElement *req;
req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
req->func = func;
req->arg = arg;
req->state = THREAD_QUEUED;
req->pool = pool;
QLIST_INSERT_HEAD(&pool->head, req, all);
trace_thread_pool_submit(pool, req, arg);
qemu_mutex_lock(&pool->lock);
if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) {
spawn_thread(pool);
}
QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs);
qemu_mutex_unlock(&pool->lock);
qemu_sem_post(&pool->sem);
return &req->common;
}
| false |
qemu
|
c2b38b277a7882a592f4f2ec955084b2b756daaa
|
15,426 |
static int colo_packet_compare_tcp(Packet *spkt, Packet *ppkt)
{
struct tcphdr *ptcp, *stcp;
int res;
trace_colo_compare_main("compare tcp");
ptcp = (struct tcphdr *)ppkt->transport_header;
stcp = (struct tcphdr *)spkt->transport_header;
/*
* The 'identification' field in the IP header is *very* random
* it almost never matches. Fudge this by ignoring differences in
* unfragmented packets; they'll normally sort themselves out if different
* anyway, and it should recover at the TCP level.
* An alternative would be to get both the primary and secondary to rewrite
* somehow; but that would need some sync traffic to sync the state
*/
if (ntohs(ppkt->ip->ip_off) & IP_DF) {
spkt->ip->ip_id = ppkt->ip->ip_id;
/* and the sum will be different if the IDs were different */
spkt->ip->ip_sum = ppkt->ip->ip_sum;
}
if (ptcp->th_sum == stcp->th_sum) {
res = colo_packet_compare_common(ppkt, spkt, ETH_HLEN);
} else {
res = -1;
}
if (res != 0 && trace_event_get_state(TRACE_COLO_COMPARE_MISCOMPARE)) {
trace_colo_compare_pkt_info_src(inet_ntoa(ppkt->ip->ip_src),
ntohl(stcp->th_seq),
ntohl(stcp->th_ack),
res, stcp->th_flags,
spkt->size);
trace_colo_compare_pkt_info_dst(inet_ntoa(ppkt->ip->ip_dst),
ntohl(ptcp->th_seq),
ntohl(ptcp->th_ack),
res, ptcp->th_flags,
ppkt->size);
qemu_hexdump((char *)ppkt->data, stderr,
"colo-compare ppkt", ppkt->size);
qemu_hexdump((char *)spkt->data, stderr,
"colo-compare spkt", spkt->size);
}
return res;
}
| false |
qemu
|
184d4d42033d5c111276e4eef9ea273c2e114d18
|
15,427 |
static void data_plane_blk_insert_notifier(Notifier *n, void *data)
{
VirtIOBlockDataPlane *s = container_of(n, VirtIOBlockDataPlane,
insert_notifier);
assert(s->conf->conf.blk == data);
data_plane_set_up_op_blockers(s);
}
| false |
qemu
|
348295838384941d1e5420d10e57366c4e303d45
|
15,428 |
static void vapic_write(void *opaque, target_phys_addr_t addr, uint64_t data,
unsigned int size)
{
CPUX86State *env = cpu_single_env;
target_phys_addr_t rom_paddr;
VAPICROMState *s = opaque;
cpu_synchronize_state(env);
/*
* The VAPIC supports two PIO-based hypercalls, both via port 0x7E.
* o 16-bit write access:
* Reports the option ROM initialization to the hypervisor. Written
* value is the offset of the state structure in the ROM.
* o 8-bit write access:
* Reactivates the VAPIC after a guest hibernation, i.e. after the
* option ROM content has been re-initialized by a guest power cycle.
* o 32-bit write access:
* Poll for pending IRQs, considering the current VAPIC state.
*/
switch (size) {
case 2:
if (s->state == VAPIC_INACTIVE) {
rom_paddr = (env->segs[R_CS].base + env->eip) & ROM_BLOCK_MASK;
s->rom_state_paddr = rom_paddr + data;
s->state = VAPIC_STANDBY;
}
if (vapic_prepare(s) < 0) {
s->state = VAPIC_INACTIVE;
break;
}
break;
case 1:
if (kvm_enabled()) {
/*
* Disable triggering instruction in ROM by writing a NOP.
*
* We cannot do this in TCG mode as the reported IP is not
* accurate.
*/
pause_all_vcpus();
patch_byte(env, env->eip - 2, 0x66);
patch_byte(env, env->eip - 1, 0x90);
resume_all_vcpus();
}
if (s->state == VAPIC_ACTIVE) {
break;
}
if (update_rom_mapping(s, env, env->eip) < 0) {
break;
}
if (find_real_tpr_addr(s, env) < 0) {
break;
}
vapic_enable(s, env);
break;
default:
case 4:
if (!kvm_irqchip_in_kernel()) {
apic_poll_irq(env->apic_state);
}
break;
}
}
| false |
qemu
|
a8170e5e97ad17ca169c64ba87ae2f53850dab4c
|
15,429 |
long do_rt_sigreturn(CPUM68KState *env)
{
struct target_rt_sigframe *frame;
abi_ulong frame_addr = env->aregs[7] - 4;
target_sigset_t target_set;
sigset_t set;
int d0;
if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
goto badframe;
target_to_host_sigset_internal(&set, &target_set);
sigprocmask(SIG_SETMASK, &set, NULL);
/* restore registers */
if (target_rt_restore_ucontext(env, &frame->uc, &d0))
goto badframe;
if (do_sigaltstack(frame_addr +
offsetof(struct target_rt_sigframe, uc.tuc_stack),
0, get_sp_from_cpustate(env)) == -EFAULT)
goto badframe;
unlock_user_struct(frame, frame_addr, 0);
return d0;
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
return 0;
}
| false |
qemu
|
1c275925bfbbc2de84a8f0e09d1dd70bbefb6da3
|
15,430 |
static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf)
{
if (req->dev && req->dev->unit_attention.key == UNIT_ATTENTION) {
scsi_req_build_sense(req, req->dev->unit_attention);
} else if (req->bus->unit_attention.key == UNIT_ATTENTION) {
scsi_req_build_sense(req, req->bus->unit_attention);
}
scsi_req_complete(req, CHECK_CONDITION);
return 0;
}
| false |
qemu
|
0bf8264e2d2bd19c1eecf9bde0e59284ef47eabb
|
15,431 |
static void vc1_decode_skip_blocks(VC1Context *v)
{
MpegEncContext *s = &v->s;
ff_er_add_slice(s, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
s->first_slice_line = 1;
for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
s->mb_x = 0;
ff_init_block_index(s);
ff_update_block_index(s);
if (s->last_picture.f.data[0]) {
memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
}
ff_draw_horiz_band(s, s->mb_y * 16, 16);
s->first_slice_line = 0;
}
s->pict_type = AV_PICTURE_TYPE_P;
}
| false |
FFmpeg
|
0d194ee51ed477f843900e657a7edbcbecdffa42
|
15,432 |
static ExitStatus gen_mfpr(TCGv va, int regno)
{
int data = cpu_pr_data(regno);
/* Special help for VMTIME and WALLTIME. */
if (regno == 250 || regno == 249) {
void (*helper)(TCGv) = gen_helper_get_walltime;
if (regno == 249) {
helper = gen_helper_get_vmtime;
}
if (use_icount) {
gen_io_start();
helper(va);
gen_io_end();
return EXIT_PC_STALE;
} else {
helper(va);
return NO_EXIT;
}
}
/* The basic registers are data only, and unknown registers
are read-zero, write-ignore. */
if (data == 0) {
tcg_gen_movi_i64(va, 0);
} else if (data & PR_BYTE) {
tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
} else if (data & PR_LONG) {
tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
} else {
tcg_gen_ld_i64(va, cpu_env, data);
}
return NO_EXIT;
}
| false |
qemu
|
bd79255d2571a3c68820117caf94ea9afe1d527e
|
15,433 |
static char *SocketAddress_to_str(const char *prefix, SocketAddress *addr,
bool is_listen, bool is_telnet)
{
switch (addr->type) {
case SOCKET_ADDRESS_KIND_INET:
return g_strdup_printf("%s%s:%s:%s%s", prefix,
is_telnet ? "telnet" : "tcp", addr->u.inet->host,
addr->u.inet->port, is_listen ? ",server" : "");
break;
case SOCKET_ADDRESS_KIND_UNIX:
return g_strdup_printf("%sunix:%s%s", prefix,
addr->u.q_unix->path,
is_listen ? ",server" : "");
break;
case SOCKET_ADDRESS_KIND_FD:
return g_strdup_printf("%sfd:%s%s", prefix, addr->u.fd->str,
is_listen ? ",server" : "");
break;
default:
abort();
}
}
| false |
qemu
|
32bafa8fdd098d52fbf1102d5a5e48d29398c0aa
|
15,435 |
static void blkdebug_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVBlkdebugState *s = bs->opaque;
if (s->align) {
bs->request_alignment = s->align;
}
}
| false |
qemu
|
a5b8dd2ce83208cd7d6eb4562339ecf5aae13574
|
15,436 |
static int add_rule(QemuOpts *opts, void *opaque)
{
struct add_rule_data *d = opaque;
BDRVBlkdebugState *s = d->s;
const char* event_name;
BlkDebugEvent event;
struct BlkdebugRule *rule;
/* Find the right event for the rule */
event_name = qemu_opt_get(opts, "event");
if (!event_name || get_event_by_name(event_name, &event) < 0) {
return -1;
}
/* Set attributes common for all actions */
rule = g_malloc0(sizeof(*rule));
*rule = (struct BlkdebugRule) {
.event = event,
.action = d->action,
.state = qemu_opt_get_number(opts, "state", 0),
};
/* Parse action-specific options */
switch (d->action) {
case ACTION_INJECT_ERROR:
rule->options.inject.error = qemu_opt_get_number(opts, "errno", EIO);
rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
rule->options.inject.immediately =
qemu_opt_get_bool(opts, "immediately", 0);
rule->options.inject.sector = qemu_opt_get_number(opts, "sector", -1);
break;
case ACTION_SET_STATE:
rule->options.set_state.new_state =
qemu_opt_get_number(opts, "new_state", 0);
break;
case ACTION_SUSPEND:
rule->options.suspend.tag =
g_strdup(qemu_opt_get(opts, "tag"));
break;
};
/* Add the rule */
QLIST_INSERT_HEAD(&s->rules[event], rule, next);
return 0;
}
| false |
qemu
|
d4362d642e5cfd50671eba250b5888a89a88691a
|
15,437 |
static int cmos_get_fd_drive_type(FloppyDriveType fd0)
{
int val;
switch (fd0) {
case FLOPPY_DRIVE_TYPE_144:
/* 1.44 Mb 3"5 drive */
val = 4;
break;
case FLOPPY_DRIVE_TYPE_288:
/* 2.88 Mb 3"5 drive */
val = 5;
break;
case FLOPPY_DRIVE_TYPE_120:
/* 1.2 Mb 5"5 drive */
val = 2;
break;
case FLOPPY_DRIVE_TYPE_NONE:
default:
val = 0;
break;
}
return val;
}
| false |
qemu
|
bda055096be9a91d602af457f8bedeede86eb3f6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.